Ajay Tripathy пре 5 година
родитељ
комит
b431850a15

+ 3 - 3
cmd/costmodel/main.go

@@ -17,11 +17,11 @@ func Healthz(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
 }
 
 func main() {
-	costmodel.Initialize()
+	a := costmodel.Initialize()
 
 	rootMux := http.NewServeMux()
-	costmodel.Router.GET("/healthz", Healthz)
-	rootMux.Handle("/", costmodel.Router)
+	a.Router.GET("/healthz", Healthz)
+	rootMux.Handle("/", a.Router)
 	rootMux.Handle("/metrics", promhttp.Handler())
 	klog.Fatal(http.ListenAndServe(":9003", errors.PanicHandlerMiddleware(rootMux)))
 }

+ 3 - 140
go.sum

@@ -1,15 +1,11 @@
-cloud.google.com/go v0.0.0-20160913182117-3b1ae45394a2/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
 cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ=
 contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
-git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
 github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
 github.com/Azure/azure-sdk-for-go v24.1.0+incompatible h1:P7GocB7bhkyGbRL1tCy0m9FDqb1V/dqssch3jZieUHk=
 github.com/Azure/azure-sdk-for-go v24.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-autorest v11.1.0+incompatible h1:9DfMsQdUMEtg1jKRTjtkNZsvOuZXJOMl4dN1kiQwAc8=
-github.com/Azure/go-autorest v11.1.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
 github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
 github.com/Azure/go-autorest v11.3.2+incompatible h1:2bRmoaLvtIXW5uWpZVoIkc0C1z7c84rVGnP+3mpyCRg=
 github.com/Azure/go-autorest v11.3.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
@@ -19,19 +15,13 @@ github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mo
 github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
 github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
 github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
 github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
 github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
 github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
-github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
 github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
 github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/aws/aws-sdk-go v1.19.10 h1:WHIaUrU98WsWIXxlxeMCmbuB5HowxuUnk8eBH4iGl/g=
-github.com/aws/aws-sdk-go v1.19.10/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
 github.com/aws/aws-sdk-go v1.28.9 h1:grIuBQc+p3dTRXerh5+2OxSuWFi0iXuxbFdTSg0jaW0=
 github.com/aws/aws-sdk-go v1.28.9/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
 github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
@@ -41,15 +31,11 @@ github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
 github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
 github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
 github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
 github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
 github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
 github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
 github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
 github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -61,14 +47,10 @@ github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11
 github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
 github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
 github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
 github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
 github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
 github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
 github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@@ -89,10 +71,10 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
 github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
 github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
 github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
+github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
 github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
 github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
 github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
 github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
@@ -105,29 +87,20 @@ github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm
 github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
 github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
-github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
 github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
 github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
 github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
 github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
 github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
 github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
 github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
 github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
@@ -142,7 +115,6 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/uuid v0.0.0-20171113160352-8c31c18f31ed/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
 github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -150,24 +122,15 @@ github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp
 github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
 github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
 github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
-github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
-github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
 github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8 h1:L9JPKrtsHMQ4VCRQfHvbbHBfB2Urn8xf6QZeXZ+OrN4=
 github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
 github.com/gophercloud/gophercloud v0.2.0 h1:lD2Bce2xBAMNNcFZ0dObTpXkGLlVIb33RPVUNVpw6ic=
 github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
 github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
 github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
 github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
 github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
 github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
 github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -178,8 +141,6 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
 github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
 github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
-github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
 github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
 github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
@@ -189,7 +150,6 @@ github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62
 github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
 github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
 github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE=
 github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
@@ -210,7 +170,6 @@ github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4
 github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U=
 github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw=
 github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
 github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
 github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
@@ -223,14 +182,11 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kubecost/cost-model v0.0.0-20190415210323-992655b79eac/go.mod h1:NxiMjOpYdrBQBjo3bGcJOpB+KZd1NWpTbWaWlMq3f+Q=
 github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
 github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
 github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
 github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
 github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
-github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
 github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
 github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
@@ -261,25 +217,20 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+
 github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
 github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
 github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
 github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
-github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
 github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
 github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
 github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
 github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
 github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
 github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
 github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -289,32 +240,16 @@ github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
-github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
 github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
 github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
-github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
-github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
-github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
-github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
 github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
 github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
 github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
 github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -327,8 +262,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
 github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
@@ -343,18 +276,14 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
 github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
-github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
 github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
 github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
 github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
@@ -367,74 +296,48 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV
 github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
 github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
 github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
 github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
 github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
 github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
 github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
 go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
 go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
-go.opencensus.io v0.19.2 h1:ZZpq6xI6kv/LuE/5s5UQvBU5vMjvRnPb8PvJrIntAnc=
-go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
 go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
 go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-golang.org/x/crypto v0.0.0-20180808211826-de0752318171/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a h1:Igim7XhdOpBnWPuYJ70XcNpq8q3BCACtVgNfoJxOV7g=
-golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
 golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
 golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
 golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190415214537-1da14a5a36f2 h1:iC0Y6EDq+rhnAePxGvJs2kzUAYcwESqdcGRPzEUfzTU=
-golang.org/x/net v0.0.0-20190415214537-1da14a5a36f2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68=
 golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
 golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/oauth2 v0.0.0-20170412232759-a6bd8cefa181/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
 golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -454,16 +357,12 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
 golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -481,14 +380,9 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
 golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g=
 golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -499,29 +393,19 @@ golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3
 golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU=
-google.golang.org/api v0.3.0 h1:UIJY20OEo3+tK5MBlcdx37kmdH6EnRjGkW78mc6+EeA=
-google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw=
 google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI=
 google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
 google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
 google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
-google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
 google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
@@ -531,8 +415,6 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8
 gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
 gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o=
 gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
-gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
 gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
@@ -542,38 +424,21 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
 gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/api v0.0.0-20190404065945-709cf190c7b7 h1:s6+su3184vqq9jlmBa1UHf/JXGlx6mR5/Wn1mgGgho0=
-k8s.io/api v0.0.0-20190404065945-709cf190c7b7/go.mod h1:qa6Gt7knwxwD/MXwV8iaEoTpniVksiix/r9hpLWYgTA=
 k8s.io/api v0.0.0-20190620084959-7cf5895f2711 h1:BblVYz/wE5WtBsD/Gvu54KyBUTJMflolzc5I2DTvh50=
 k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A=
 k8s.io/api v0.0.0-20190913080256-21721929cffa h1:5HxstS7zbT60CcA8qiFOeJtUxIwenu0dVIR5Ne0BUI8=
 k8s.io/api v0.0.0-20190913080256-21721929cffa/go.mod h1:jESdJL4e7Q+sDnEXOZ1ysc1WBxR4I34RbRh5QqGT9kQ=
-k8s.io/apimachinery v0.0.0-20190404065847-4a4abcd45006 h1:Jiue4qiNBoiq1GxPx33Kjw7KOyqYVEcZyl4y4rIXXLU=
-k8s.io/apimachinery v0.0.0-20190404065847-4a4abcd45006/go.mod h1:65NCMCFo27j/Cv2DAQSfKd70SAtu/hwoqasuXFCDNvY=
 k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 h1:uV4S5IB5g4Nvi+TBVNf3e9L4wrirlwYJ6w88jUQxTUw=
 k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
 k8s.io/apimachinery v0.0.0-20190913075812-e119e5e154b6 h1:tGU1C/vMoUV2ZakSH6wQq2shk9KiFtjoH2vDDHlhpA4=
 k8s.io/apimachinery v0.0.0-20190913075812-e119e5e154b6/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4=
-k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag=
-k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ=
-k8s.io/apimachinery v0.19.1 h1:cwsxZazM/LA9aUsBaL4bRS5ygoM6bYp8dFk22DSYQa4=
-k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc=
-k8s.io/apimachinery v0.19.3 h1:bpIQXlKjB4cB/oNpnNnV+BybGPR7iP5oYpsOTEJ4hgc=
-k8s.io/client-go v0.0.0-20190404172613-2e1a3ed22ac5 h1:BwY2C//EoWktJi74O6R2REBonrhsfhRI0qfVwOjOPp8=
-k8s.io/client-go v0.0.0-20190404172613-2e1a3ed22ac5/go.mod h1:bIEHXHbykaOlj+pgLllzLJ2RPGdzkjtqdk0Il07KPEM=
 k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g=
 k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k=
 k8s.io/client-go v1.5.1 h1:XaX/lo2/u3/pmFau8HN+sB5C/b4dc4Dmm2eXjBH4p1E=
 k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=
 k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
 k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA=
-k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
 k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
 k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ=
 k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
@@ -582,8 +447,6 @@ k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH
 k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
 k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4=
 k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
-k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 h1:8r+l4bNWjRlsFYlQJnKJ2p7s1YQPj4XyXiJVqDHRx7c=
-k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
 sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
 sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
 sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

+ 60 - 0
pkg/cloud/provider.go

@@ -253,6 +253,66 @@ func CustomPricesEnabled(p Provider) bool {
 	return config.CustomPricesEnabled == "true"
 }
 
+// AllocateIdleByDefault returns true if the application settings specify to allocate idle by default
+func AllocateIdleByDefault(p Provider) bool {
+	config, err := p.GetConfig()
+	if err != nil {
+		return false
+	}
+
+	return config.DefaultIdle == "true"
+}
+
+// SharedNamespace returns a list of names of shared namespaces, as defined in the application settings
+func SharedNamespaces(p Provider) []string {
+	namespaces := []string{}
+
+	config, err := p.GetConfig()
+	if err != nil {
+		return namespaces
+	}
+	if config.SharedNamespaces == "" {
+		return namespaces
+	}
+	// trim spaces so that "kube-system, kubecost" is equivalent to "kube-system,kubecost"
+	for _, ns := range strings.Split(config.SharedNamespaces, ",") {
+		namespaces = append(namespaces, strings.Trim(ns, " "))
+	}
+
+	return namespaces
+}
+
+// SharedLabel returns the configured set of shared labels as a parallel tuple of keys to values; e.g.
+// for app:kubecost,type:staging this returns (["app", "type"], ["kubecost", "staging"]) in order to
+// match the signature of the NewSharedResourceInfo
+func SharedLabels(p Provider) ([]string, []string) {
+	names := []string{}
+	values := []string{}
+
+	config, err := p.GetConfig()
+	if err != nil {
+		return names, values
+	}
+
+	if config.SharedLabelNames == "" || config.SharedLabelValues == "" {
+		return names, values
+	}
+
+	ks := strings.Split(config.SharedLabelNames, ",")
+	vs := strings.Split(config.SharedLabelValues, ",")
+	if len(ks) != len(vs) {
+		klog.V(2).Infof("[Warning] shared labels have mis-matched lengths: %d names, %d values", len(ks), len(vs))
+		return names, values
+	}
+
+	for i := range ks {
+		names = append(names, strings.Trim(ks[i], " "))
+		values = append(values, strings.Trim(vs[i], " "))
+	}
+
+	return names, values
+}
+
 func NewCrossClusterProvider(ctype string, overrideConfigPath string, cache clustercache.ClusterCache) (Provider, error) {
 	if ctype == "aws" {
 		return &AWS{

+ 2000 - 0
pkg/costmodel/aggregation.go

@@ -0,0 +1,2000 @@
+package costmodel
+
+import (
+	"fmt"
+	"math"
+	"net/http"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/julienschmidt/httprouter"
+	"github.com/kubecost/cost-model/pkg/cloud"
+	"github.com/kubecost/cost-model/pkg/env"
+	"github.com/kubecost/cost-model/pkg/errors"
+	"github.com/kubecost/cost-model/pkg/kubecost"
+	"github.com/kubecost/cost-model/pkg/log"
+	"github.com/kubecost/cost-model/pkg/prom"
+	"github.com/kubecost/cost-model/pkg/thanos"
+	"github.com/kubecost/cost-model/pkg/util"
+	"github.com/patrickmn/go-cache"
+	prometheusClient "github.com/prometheus/client_golang/api"
+	"k8s.io/klog"
+)
+
+const (
+	// SplitTypeWeighted signals that shared costs should be shared
+	// proportionally, rather than evenly
+	SplitTypeWeighted = "weighted"
+
+	// UnallocatedSubfield indicates an allocation datum that does not have the
+	// chosen Aggregator; e.g. during aggregation by some label, there may be
+	// cost data that do not have the given label.
+	UnallocatedSubfield = "__unallocated__"
+
+	clusterCostsCacheMinutes = 5.0
+)
+
+// Aggregation describes aggregated cost data, containing cumulative cost and
+// allocation data per resource, vectors of rate data per resource, efficiency
+// data, and metadata describing the type of aggregation operation.
+type Aggregation struct {
+	Aggregator                 string               `json:"aggregation"`
+	Subfields                  []string             `json:"subfields,omitempty"`
+	Environment                string               `json:"environment"`
+	Cluster                    string               `json:"cluster,omitempty"`
+	Properties                 *kubecost.Properties `json:"-"`
+	CPUAllocationHourlyAverage float64              `json:"cpuAllocationAverage"`
+	CPUAllocationVectors       []*util.Vector       `json:"-"`
+	CPUAllocationTotal         float64              `json:"-"`
+	CPUCost                    float64              `json:"cpuCost"`
+	CPUCostVector              []*util.Vector       `json:"cpuCostVector,omitempty"`
+	CPUEfficiency              float64              `json:"cpuEfficiency"`
+	CPURequestedVectors        []*util.Vector       `json:"-"`
+	CPUUsedVectors             []*util.Vector       `json:"-"`
+	Efficiency                 float64              `json:"efficiency"`
+	GPUAllocationHourlyAverage float64              `json:"gpuAllocationAverage"`
+	GPUAllocationVectors       []*util.Vector       `json:"-"`
+	GPUCost                    float64              `json:"gpuCost"`
+	GPUCostVector              []*util.Vector       `json:"gpuCostVector,omitempty"`
+	GPUAllocationTotal         float64              `json:"-"`
+	RAMAllocationHourlyAverage float64              `json:"ramAllocationAverage"`
+	RAMAllocationVectors       []*util.Vector       `json:"-"`
+	RAMAllocationTotal         float64              `json:"-"`
+	RAMCost                    float64              `json:"ramCost"`
+	RAMCostVector              []*util.Vector       `json:"ramCostVector,omitempty"`
+	RAMEfficiency              float64              `json:"ramEfficiency"`
+	RAMRequestedVectors        []*util.Vector       `json:"-"`
+	RAMUsedVectors             []*util.Vector       `json:"-"`
+	PVAllocationHourlyAverage  float64              `json:"pvAllocationAverage"`
+	PVAllocationVectors        []*util.Vector       `json:"-"`
+	PVAllocationTotal          float64              `json:"-"`
+	PVCost                     float64              `json:"pvCost"`
+	PVCostVector               []*util.Vector       `json:"pvCostVector,omitempty"`
+	NetworkCost                float64              `json:"networkCost"`
+	NetworkCostVector          []*util.Vector       `json:"networkCostVector,omitempty"`
+	SharedCost                 float64              `json:"sharedCost"`
+	TotalCost                  float64              `json:"totalCost"`
+	TotalCostVector            []*util.Vector       `json:"totalCostVector,omitempty"`
+}
+
+// TotalHours determines the amount of hours the Aggregation covers, as a
+// function of the cost vectors and the resolution of those vectors' data
+func (a *Aggregation) TotalHours(resolutionHours float64) float64 {
+	length := 1
+
+	if length < len(a.CPUCostVector) {
+		length = len(a.CPUCostVector)
+	}
+	if length < len(a.RAMCostVector) {
+		length = len(a.RAMCostVector)
+	}
+	if length < len(a.PVCostVector) {
+		length = len(a.PVCostVector)
+	}
+	if length < len(a.GPUCostVector) {
+		length = len(a.GPUCostVector)
+	}
+	if length < len(a.NetworkCostVector) {
+		length = len(a.NetworkCostVector)
+	}
+
+	return float64(length) * resolutionHours
+}
+
+// RateCoefficient computes the coefficient by which the total cost needs to be
+// multiplied in order to convert totals costs into per-rate costs.
+func (a *Aggregation) RateCoefficient(rateStr string, resolutionHours float64) float64 {
+	// monthly rate = (730.0)*(total cost)/(total hours)
+	// daily rate = (24.0)*(total cost)/(total hours)
+	// hourly rate = (1.0)*(total cost)/(total hours)
+
+	// default to hourly rate
+	coeff := 1.0
+	switch rateStr {
+	case "daily":
+		coeff = util.HoursPerDay
+	case "monthly":
+		coeff = util.HoursPerMonth
+	}
+
+	return coeff / a.TotalHours(resolutionHours)
+}
+
+type SharedResourceInfo struct {
+	ShareResources  bool
+	SharedNamespace map[string]bool
+	LabelSelectors  map[string]map[string]bool
+}
+
+type SharedCostInfo struct {
+	Name      string
+	Cost      float64
+	ShareType string
+}
+
+func (s *SharedResourceInfo) IsSharedResource(costDatum *CostData) bool {
+	// exists in a shared namespace
+	if _, ok := s.SharedNamespace[costDatum.Namespace]; ok {
+		return true
+	}
+	// has at least one shared label (OR, not AND in the case of multiple labels)
+	for labelName, labelValues := range s.LabelSelectors {
+		if val, ok := costDatum.Labels[labelName]; ok && labelValues[val] {
+			return true
+		}
+	}
+	return false
+}
+
+func NewSharedResourceInfo(shareResources bool, sharedNamespaces []string, labelNames []string, labelValues []string) *SharedResourceInfo {
+	sr := &SharedResourceInfo{
+		ShareResources:  shareResources,
+		SharedNamespace: make(map[string]bool),
+		LabelSelectors:  make(map[string]map[string]bool),
+	}
+
+	for _, ns := range sharedNamespaces {
+		sr.SharedNamespace[strings.Trim(ns, " ")] = true
+	}
+
+	// Creating a map of label name to label value, but only if
+	// the cardinality matches
+	if len(labelNames) == len(labelValues) {
+		for i := range labelNames {
+			cleanedLname := prom.SanitizeLabelName(strings.Trim(labelNames[i], " "))
+			if values, ok := sr.LabelSelectors[cleanedLname]; ok {
+				values[strings.Trim(labelValues[i], " ")] = true
+			} else {
+				sr.LabelSelectors[cleanedLname] = map[string]bool{strings.Trim(labelValues[i], " "): true}
+			}
+		}
+	}
+
+	return sr
+}
+
+func GetTotalContainerCost(costData map[string]*CostData, rate string, cp cloud.Provider, discount float64, customDiscount float64, idleCoefficients map[string]float64) float64 {
+	totalContainerCost := 0.0
+	for _, costDatum := range costData {
+		clusterID := costDatum.ClusterID
+		cpuv, ramv, gpuv, pvvs, netv := getPriceVectors(cp, costDatum, rate, discount, customDiscount, idleCoefficients[clusterID])
+		totalContainerCost += totalVectors(cpuv)
+		totalContainerCost += totalVectors(ramv)
+		totalContainerCost += totalVectors(gpuv)
+		for _, pv := range pvvs {
+			totalContainerCost += totalVectors(pv)
+		}
+		totalContainerCost += totalVectors(netv)
+	}
+	return totalContainerCost
+}
+
+func (a *Accesses) ComputeIdleCoefficient(costData map[string]*CostData, cli prometheusClient.Client, cp cloud.Provider, discount float64, customDiscount float64, windowString, offset string) (map[string]float64, error) {
+	coefficients := make(map[string]float64)
+
+	profileName := "ComputeIdleCoefficient: ComputeClusterCosts"
+	profileStart := time.Now()
+
+	var clusterCosts map[string]*ClusterCosts
+	var err error
+
+	key := fmt.Sprintf("%s:%s", windowString, offset)
+	if data, valid := a.ClusterCostsCache.Get(key); valid {
+		clusterCosts = data.(map[string]*ClusterCosts)
+	} else {
+		clusterCosts, err = a.ComputeClusterCosts(cli, cp, windowString, offset, false)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	measureTime(profileStart, profileThreshold, profileName)
+
+	for cid, costs := range clusterCosts {
+		if costs.CPUCumulative == 0 && costs.RAMCumulative == 0 && costs.StorageCumulative == 0 {
+			klog.V(1).Infof("[Warning] No ClusterCosts data for cluster '%s'. Is it emitting data?", cid)
+			coefficients[cid] = 1.0
+			continue
+		}
+
+		if costs.TotalCumulative == 0 {
+			return nil, fmt.Errorf("TotalCumulative cluster cost for cluster '%s' returned 0 over window '%s' offset '%s'", cid, windowString, offset)
+		}
+
+		totalContainerCost := 0.0
+		for _, costDatum := range costData {
+			if costDatum.ClusterID == cid {
+				cpuv, ramv, gpuv, pvvs, _ := getPriceVectors(cp, costDatum, "", discount, customDiscount, 1)
+				totalContainerCost += totalVectors(cpuv)
+				totalContainerCost += totalVectors(ramv)
+				totalContainerCost += totalVectors(gpuv)
+				for _, pv := range pvvs {
+					totalContainerCost += totalVectors(pv)
+				}
+			}
+		}
+
+		coeff := totalContainerCost / costs.TotalCumulative
+		coefficients[cid] = coeff
+	}
+
+	return coefficients, nil
+}
+
+// AggregationOptions provides optional parameters to AggregateCostData, allowing callers to perform more complex operations
+type AggregationOptions struct {
+	Discount               float64            // percent by which to discount CPU, RAM, and GPU cost
+	CustomDiscount         float64            // additional custom discount applied to all prices
+	IdleCoefficients       map[string]float64 // scales costs by amount of idle resources on a per-cluster basis
+	IncludeEfficiency      bool               // set to true to receive efficiency/usage data
+	IncludeTimeSeries      bool               // set to true to receive time series data
+	Rate                   string             // set to "hourly", "daily", or "monthly" to receive cost rate, rather than cumulative cost
+	ResolutionHours        float64
+	SharedResourceInfo     *SharedResourceInfo
+	SharedCosts            map[string]*SharedCostInfo
+	FilteredContainerCount int
+	FilteredEnvironments   map[string]int
+	SharedSplit            string
+	TotalContainerCost     float64
+}
+
+// Helper method to test request/usgae values against allocation averages for efficiency scores. Generate a warning log if
+// clamp is required
+func clampAverage(requestsAvg float64, usedAverage float64, allocationAvg float64, resource string) (float64, float64) {
+	rAvg := requestsAvg
+	if rAvg > allocationAvg {
+		klog.V(4).Infof("[Warning] Average %s Requested (%f) > Average %s Allocated (%f). Clamping.", resource, rAvg, resource, allocationAvg)
+		rAvg = allocationAvg
+	}
+
+	uAvg := usedAverage
+	if uAvg > allocationAvg {
+		klog.V(4).Infof("[Warning]: Average %s Used (%f) > Average %s Allocated (%f). Clamping.", resource, uAvg, resource, allocationAvg)
+		uAvg = allocationAvg
+	}
+
+	return rAvg, uAvg
+}
+
+// AggregateCostData aggregates raw cost data by field; e.g. namespace, cluster, service, or label. In the case of label, callers
+// must pass a slice of subfields indicating the labels by which to group. Provider is used to define custom resource pricing.
+// See AggregationOptions for optional parameters.
+func AggregateCostData(costData map[string]*CostData, field string, subfields []string, cp cloud.Provider, opts *AggregationOptions) map[string]*Aggregation {
+	discount := opts.Discount
+	customDiscount := opts.CustomDiscount
+	idleCoefficients := opts.IdleCoefficients
+	includeTimeSeries := opts.IncludeTimeSeries
+	includeEfficiency := opts.IncludeEfficiency
+	rate := opts.Rate
+	sr := opts.SharedResourceInfo
+
+	resolutionHours := 1.0
+	if opts.ResolutionHours > 0.0 {
+		resolutionHours = opts.ResolutionHours
+	}
+
+	if idleCoefficients == nil {
+		idleCoefficients = make(map[string]float64)
+	}
+
+	// aggregations collects key-value pairs of resource group-to-aggregated data
+	// e.g. namespace-to-data or label-value-to-data
+	aggregations := make(map[string]*Aggregation)
+
+	// sharedResourceCost is the running total cost of resources that should be reported
+	// as shared across all other resources, rather than reported as a stand-alone category
+	sharedResourceCost := 0.0
+
+	for _, costDatum := range costData {
+		idleCoefficient, ok := idleCoefficients[costDatum.ClusterID]
+		if !ok {
+			idleCoefficient = 1.0
+		}
+		if sr != nil && sr.ShareResources && sr.IsSharedResource(costDatum) {
+			cpuv, ramv, gpuv, pvvs, netv := getPriceVectors(cp, costDatum, rate, discount, customDiscount, idleCoefficient)
+			sharedResourceCost += totalVectors(cpuv)
+			sharedResourceCost += totalVectors(ramv)
+			sharedResourceCost += totalVectors(gpuv)
+			sharedResourceCost += totalVectors(netv)
+			for _, pv := range pvvs {
+				sharedResourceCost += totalVectors(pv)
+			}
+		} else {
+			if field == "cluster" {
+				aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.ClusterID, discount, customDiscount, idleCoefficient, false)
+			} else if field == "node" {
+				aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.NodeName, discount, customDiscount, idleCoefficient, false)
+			} else if field == "namespace" {
+				aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace, discount, customDiscount, idleCoefficient, false)
+			} else if field == "service" {
+				if len(costDatum.Services) > 0 {
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace+"/"+costDatum.Services[0], discount, customDiscount, idleCoefficient, false)
+				} else {
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
+				}
+			} else if field == "deployment" {
+				if len(costDatum.Deployments) > 0 {
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace+"/"+costDatum.Deployments[0], discount, customDiscount, idleCoefficient, false)
+				} else {
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
+				}
+			} else if field == "statefulset" {
+				if len(costDatum.Statefulsets) > 0 {
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace+"/"+costDatum.Statefulsets[0], discount, customDiscount, idleCoefficient, false)
+				} else {
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
+				}
+			} else if field == "daemonset" {
+				if len(costDatum.Daemonsets) > 0 {
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace+"/"+costDatum.Daemonsets[0], discount, customDiscount, idleCoefficient, false)
+				} else {
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
+				}
+			} else if field == "controller" {
+				if controller, kind, hasController := costDatum.GetController(); hasController {
+					key := fmt.Sprintf("%s/%s:%s", costDatum.Namespace, kind, controller)
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, key, discount, customDiscount, idleCoefficient, false)
+				} else {
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
+				}
+			} else if field == "label" {
+				found := false
+				if costDatum.Labels != nil {
+					for _, sf := range subfields {
+						if subfieldName, ok := costDatum.Labels[sf]; ok {
+							aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, subfieldName, discount, customDiscount, idleCoefficient, false)
+							found = true
+							break
+						}
+					}
+				}
+				if !found {
+					aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, UnallocatedSubfield, discount, customDiscount, idleCoefficient, false)
+				}
+			} else if field == "pod" {
+				aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, costDatum.Namespace+"/"+costDatum.PodName, discount, customDiscount, idleCoefficient, false)
+			} else if field == "container" {
+				key := fmt.Sprintf("%s/%s/%s/%s", costDatum.ClusterID, costDatum.Namespace, costDatum.PodName, costDatum.Name)
+				aggregateDatum(cp, aggregations, costDatum, field, subfields, rate, key, discount, customDiscount, idleCoefficient, true)
+			}
+		}
+	}
+
+	for key, agg := range aggregations {
+		sharedCoefficient := 1 / float64(len(opts.FilteredEnvironments)+len(aggregations))
+
+		agg.CPUCost = totalVectors(agg.CPUCostVector)
+		agg.RAMCost = totalVectors(agg.RAMCostVector)
+		agg.GPUCost = totalVectors(agg.GPUCostVector)
+		agg.PVCost = totalVectors(agg.PVCostVector)
+		agg.NetworkCost = totalVectors(agg.NetworkCostVector)
+		if opts.SharedSplit == SplitTypeWeighted {
+			d := opts.TotalContainerCost - sharedResourceCost
+			if d == 0 {
+				klog.V(1).Infof("[Warning] Total container cost '%f' and shared resource cost '%f are the same'. Setting sharedCoefficient to 1", opts.TotalContainerCost, sharedResourceCost)
+				sharedCoefficient = 1.0
+			} else {
+				sharedCoefficient = (agg.CPUCost + agg.RAMCost + agg.GPUCost + agg.PVCost + agg.NetworkCost) / d
+			}
+		}
+		agg.SharedCost = sharedResourceCost * sharedCoefficient
+
+		for _, v := range opts.SharedCosts {
+			agg.SharedCost += v.Cost * sharedCoefficient
+		}
+
+		if rate != "" {
+			rateCoeff := agg.RateCoefficient(rate, resolutionHours)
+			agg.CPUCost *= rateCoeff
+			agg.RAMCost *= rateCoeff
+			agg.GPUCost *= rateCoeff
+			agg.PVCost *= rateCoeff
+			agg.NetworkCost *= rateCoeff
+			agg.SharedCost *= rateCoeff
+		}
+
+		agg.TotalCost = agg.CPUCost + agg.RAMCost + agg.GPUCost + agg.PVCost + agg.NetworkCost + agg.SharedCost
+
+		// Evicted and Completed Pods can still show up here, but have 0 cost.
+		// Filter these by default. Any reason to keep them?
+		if agg.TotalCost == 0 {
+			delete(aggregations, key)
+			continue
+		}
+
+		// CPU, RAM, and PV allocation are cumulative per-datum, whereas GPU is rate per-datum
+		agg.CPUAllocationHourlyAverage = totalVectors(agg.CPUAllocationVectors) / agg.TotalHours(resolutionHours)
+		agg.RAMAllocationHourlyAverage = totalVectors(agg.RAMAllocationVectors) / agg.TotalHours(resolutionHours)
+		agg.GPUAllocationHourlyAverage = averageVectors(agg.GPUAllocationVectors)
+		agg.PVAllocationHourlyAverage = totalVectors(agg.PVAllocationVectors) / agg.TotalHours(resolutionHours)
+
+		// TODO niko/etl does this check out for GPU data? Do we need to rewrite GPU queries to be
+		// culumative?
+		agg.CPUAllocationTotal = totalVectors(agg.CPUAllocationVectors)
+		agg.GPUAllocationTotal = totalVectors(agg.GPUAllocationVectors)
+		agg.PVAllocationTotal = totalVectors(agg.PVAllocationVectors)
+		agg.RAMAllocationTotal = totalVectors(agg.RAMAllocationVectors)
+
+		if includeEfficiency {
+			// Default both RAM and CPU to 0% efficiency so that a 0-requested, 0-allocated, 0-used situation
+			// returns 0% efficiency, which should be a red-flag.
+			//
+			// If non-zero numbers are available, then efficiency is defined as:
+			//   idlePercentage =  (requested - used) / allocated
+			//   efficiency = (1.0 - idlePercentage)
+			//
+			// It is possible to score > 100% efficiency, which is meant to be interpreted as a red flag.
+			// It is not possible to score < 0% efficiency.
+
+			agg.CPUEfficiency = 0.0
+			CPUIdle := 0.0
+			if agg.CPUAllocationHourlyAverage > 0.0 {
+				avgCPURequested := averageVectors(agg.CPURequestedVectors)
+				avgCPUUsed := averageVectors(agg.CPUUsedVectors)
+
+				// Clamp averages, log range violations
+				avgCPURequested, avgCPUUsed = clampAverage(avgCPURequested, avgCPUUsed, agg.CPUAllocationHourlyAverage, "CPU")
+
+				CPUIdle = ((avgCPURequested - avgCPUUsed) / agg.CPUAllocationHourlyAverage)
+				agg.CPUEfficiency = 1.0 - CPUIdle
+			}
+
+			agg.RAMEfficiency = 0.0
+			RAMIdle := 0.0
+			if agg.RAMAllocationHourlyAverage > 0.0 {
+				avgRAMRequested := averageVectors(agg.RAMRequestedVectors)
+				avgRAMUsed := averageVectors(agg.RAMUsedVectors)
+
+				// Clamp averages, log range violations
+				avgRAMRequested, avgRAMUsed = clampAverage(avgRAMRequested, avgRAMUsed, agg.RAMAllocationHourlyAverage, "RAM")
+
+				RAMIdle = ((avgRAMRequested - avgRAMUsed) / agg.RAMAllocationHourlyAverage)
+				agg.RAMEfficiency = 1.0 - RAMIdle
+			}
+
+			// Score total efficiency by the sum of CPU and RAM efficiency, weighted by their
+			// respective total costs.
+			agg.Efficiency = 0.0
+			if (agg.CPUCost + agg.RAMCost) > 0 {
+				agg.Efficiency = ((agg.CPUCost * agg.CPUEfficiency) + (agg.RAMCost * agg.RAMEfficiency)) / (agg.CPUCost + agg.RAMCost)
+			}
+		}
+
+		// convert RAM from bytes to GiB
+		agg.RAMAllocationHourlyAverage = agg.RAMAllocationHourlyAverage / 1024 / 1024 / 1024
+		// convert storage from bytes to GiB
+		agg.PVAllocationHourlyAverage = agg.PVAllocationHourlyAverage / 1024 / 1024 / 1024
+
+		// remove time series data if it is not explicitly requested
+		if !includeTimeSeries {
+			agg.CPUCostVector = nil
+			agg.RAMCostVector = nil
+			agg.GPUCostVector = nil
+			agg.PVCostVector = nil
+			agg.NetworkCostVector = nil
+			agg.TotalCostVector = nil
+		} else { // otherwise compute a totalcostvector
+			v1 := addVectors(agg.CPUCostVector, agg.RAMCostVector)
+			v2 := addVectors(v1, agg.GPUCostVector)
+			v3 := addVectors(v2, agg.PVCostVector)
+			v4 := addVectors(v3, agg.NetworkCostVector)
+			agg.TotalCostVector = v4
+		}
+		// Typesafety checks
+		if math.IsNaN(agg.CPUAllocationHourlyAverage) || math.IsInf(agg.CPUAllocationHourlyAverage, 0) {
+			klog.V(1).Infof("[Warning] CPUAllocationHourlyAverage is %f for '%s: %s/%s'", agg.CPUAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.CPUAllocationHourlyAverage = 0
+		}
+		if math.IsNaN(agg.CPUCost) || math.IsInf(agg.CPUCost, 0) {
+			klog.V(1).Infof("[Warning] CPUCost is %f for '%s: %s/%s'", agg.CPUCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.CPUCost = 0
+		}
+		if math.IsNaN(agg.CPUEfficiency) || math.IsInf(agg.CPUEfficiency, 0) {
+			klog.V(1).Infof("[Warning] CPUEfficiency is %f for '%s: %s/%s'", agg.CPUEfficiency, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.CPUEfficiency = 0
+		}
+		if math.IsNaN(agg.Efficiency) || math.IsInf(agg.Efficiency, 0) {
+			klog.V(1).Infof("[Warning] Efficiency is %f for '%s: %s/%s'", agg.Efficiency, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.Efficiency = 0
+		}
+		if math.IsNaN(agg.GPUAllocationHourlyAverage) || math.IsInf(agg.GPUAllocationHourlyAverage, 0) {
+			klog.V(1).Infof("[Warning] GPUAllocationHourlyAverage is %f for '%s: %s/%s'", agg.GPUAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.GPUAllocationHourlyAverage = 0
+		}
+		if math.IsNaN(agg.GPUCost) || math.IsInf(agg.GPUCost, 0) {
+			klog.V(1).Infof("[Warning] GPUCost is %f for '%s: %s/%s'", agg.GPUCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.GPUCost = 0
+		}
+		if math.IsNaN(agg.RAMAllocationHourlyAverage) || math.IsInf(agg.RAMAllocationHourlyAverage, 0) {
+			klog.V(1).Infof("[Warning] RAMAllocationHourlyAverage is %f for '%s: %s/%s'", agg.RAMAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.RAMAllocationHourlyAverage = 0
+		}
+		if math.IsNaN(agg.RAMCost) || math.IsInf(agg.RAMCost, 0) {
+			klog.V(1).Infof("[Warning] RAMCost is %f for '%s: %s/%s'", agg.RAMCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.RAMCost = 0
+		}
+		if math.IsNaN(agg.RAMEfficiency) || math.IsInf(agg.RAMEfficiency, 0) {
+			klog.V(1).Infof("[Warning] RAMEfficiency is %f for '%s: %s/%s'", agg.RAMEfficiency, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.RAMEfficiency = 0
+		}
+		if math.IsNaN(agg.PVAllocationHourlyAverage) || math.IsInf(agg.PVAllocationHourlyAverage, 0) {
+			klog.V(1).Infof("[Warning] PVAllocationHourlyAverage is %f for '%s: %s/%s'", agg.PVAllocationHourlyAverage, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.PVAllocationHourlyAverage = 0
+		}
+		if math.IsNaN(agg.PVCost) || math.IsInf(agg.PVCost, 0) {
+			klog.V(1).Infof("[Warning] PVCost is %f for '%s: %s/%s'", agg.PVCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.PVCost = 0
+		}
+		if math.IsNaN(agg.NetworkCost) || math.IsInf(agg.NetworkCost, 0) {
+			klog.V(1).Infof("[Warning] NetworkCost is %f for '%s: %s/%s'", agg.NetworkCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.NetworkCost = 0
+		}
+		if math.IsNaN(agg.SharedCost) || math.IsInf(agg.SharedCost, 0) {
+			klog.V(1).Infof("[Warning] SharedCost is %f for '%s: %s/%s'", agg.SharedCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.SharedCost = 0
+		}
+		if math.IsNaN(agg.TotalCost) || math.IsInf(agg.TotalCost, 0) {
+			klog.V(1).Infof("[Warning] TotalCost is %f for '%s: %s/%s'", agg.TotalCost, agg.Cluster, agg.Aggregator, agg.Environment)
+			agg.TotalCost = 0
+		}
+	}
+
+	return aggregations
+}
+
+func aggregateDatum(cp cloud.Provider, aggregations map[string]*Aggregation, costDatum *CostData, field string, subfields []string, rate string, key string, discount float64, customDiscount float64, idleCoefficient float64, includeProperties bool) {
+	// add new entry to aggregation results if a new key is encountered
+	if _, ok := aggregations[key]; !ok {
+		agg := &Aggregation{
+			Aggregator:  field,
+			Environment: key,
+		}
+		if len(subfields) > 0 {
+			agg.Subfields = subfields
+		}
+		if includeProperties {
+			props := &kubecost.Properties{}
+			props.SetCluster(costDatum.ClusterID)
+			props.SetNode(costDatum.NodeName)
+			if controller, kind, hasController := costDatum.GetController(); hasController {
+				props.SetController(controller)
+				props.SetControllerKind(kind)
+			}
+			props.SetLabels(costDatum.Labels)
+			props.SetNamespace(costDatum.Namespace)
+			props.SetPod(costDatum.PodName)
+			props.SetServices(costDatum.Services)
+			props.SetContainer(costDatum.Name)
+			agg.Properties = props
+		}
+
+		aggregations[key] = agg
+	}
+
+	mergeVectors(cp, costDatum, aggregations[key], rate, discount, customDiscount, idleCoefficient)
+}
+
+func mergeVectors(cp cloud.Provider, costDatum *CostData, aggregation *Aggregation, rate string, discount float64, customDiscount float64, idleCoefficient float64) {
+	aggregation.CPUAllocationVectors = addVectors(costDatum.CPUAllocation, aggregation.CPUAllocationVectors)
+	aggregation.CPURequestedVectors = addVectors(costDatum.CPUReq, aggregation.CPURequestedVectors)
+	aggregation.CPUUsedVectors = addVectors(costDatum.CPUUsed, aggregation.CPUUsedVectors)
+
+	aggregation.RAMAllocationVectors = addVectors(costDatum.RAMAllocation, aggregation.RAMAllocationVectors)
+	aggregation.RAMRequestedVectors = addVectors(costDatum.RAMReq, aggregation.RAMRequestedVectors)
+	aggregation.RAMUsedVectors = addVectors(costDatum.RAMUsed, aggregation.RAMUsedVectors)
+
+	aggregation.GPUAllocationVectors = addVectors(costDatum.GPUReq, aggregation.GPUAllocationVectors)
+
+	for _, pvcd := range costDatum.PVCData {
+		aggregation.PVAllocationVectors = addVectors(pvcd.Values, aggregation.PVAllocationVectors)
+	}
+
+	cpuv, ramv, gpuv, pvvs, netv := getPriceVectors(cp, costDatum, rate, discount, customDiscount, idleCoefficient)
+	aggregation.CPUCostVector = addVectors(cpuv, aggregation.CPUCostVector)
+	aggregation.RAMCostVector = addVectors(ramv, aggregation.RAMCostVector)
+	aggregation.GPUCostVector = addVectors(gpuv, aggregation.GPUCostVector)
+	aggregation.NetworkCostVector = addVectors(netv, aggregation.NetworkCostVector)
+	for _, vectorList := range pvvs {
+		aggregation.PVCostVector = addVectors(aggregation.PVCostVector, vectorList)
+	}
+}
+
+// Returns the blended discounts applied to the node as a result of global discounts and reserved instance
+// discounts
+func getDiscounts(costDatum *CostData, cpuCost float64, ramCost float64, discount float64) (float64, float64) {
+	if costDatum.NodeData == nil {
+		return discount, discount
+	}
+	if costDatum.NodeData.IsSpot() {
+		return 0, 0
+	}
+
+	reserved := costDatum.NodeData.Reserved
+
+	// blended discounts
+	blendedCPUDiscount := discount
+	blendedRAMDiscount := discount
+
+	if reserved != nil && reserved.CPUCost > 0 && reserved.RAMCost > 0 {
+		reservedCPUDiscount := 0.0
+		if cpuCost == 0 {
+			klog.V(1).Infof("[Warning] No cpu cost found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
+		} else {
+			reservedCPUDiscount = 1.0 - (reserved.CPUCost / cpuCost)
+		}
+		reservedRAMDiscount := 0.0
+		if ramCost == 0 {
+			klog.V(1).Infof("[Warning] No ram cost found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
+		} else {
+			reservedRAMDiscount = 1.0 - (reserved.RAMCost / ramCost)
+		}
+
+		// AWS passes the # of reserved CPU and RAM as -1 to represent "All"
+		if reserved.ReservedCPU < 0 && reserved.ReservedRAM < 0 {
+			blendedCPUDiscount = reservedCPUDiscount
+			blendedRAMDiscount = reservedRAMDiscount
+		} else {
+			nodeCPU, ierr := strconv.ParseInt(costDatum.NodeData.VCPU, 10, 64)
+			nodeRAM, ferr := strconv.ParseFloat(costDatum.NodeData.RAMBytes, 64)
+			if ierr == nil && ferr == nil {
+				nodeRAMGB := nodeRAM / 1024 / 1024 / 1024
+				reservedRAMGB := float64(reserved.ReservedRAM) / 1024 / 1024 / 1024
+				nonReservedCPU := nodeCPU - reserved.ReservedCPU
+				nonReservedRAM := nodeRAMGB - reservedRAMGB
+
+				if nonReservedCPU == 0 {
+					blendedCPUDiscount = reservedCPUDiscount
+				} else {
+					if nodeCPU == 0 {
+						klog.V(1).Infof("[Warning] No ram found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
+					} else {
+						blendedCPUDiscount = (float64(reserved.ReservedCPU) * reservedCPUDiscount) + (float64(nonReservedCPU)*discount)/float64(nodeCPU)
+					}
+				}
+
+				if nonReservedRAM == 0 {
+					blendedRAMDiscount = reservedRAMDiscount
+				} else {
+					if nodeRAMGB == 0 {
+						klog.V(1).Infof("[Warning] No ram found for cluster '%s' node '%s'", costDatum.ClusterID, costDatum.NodeName)
+					} else {
+						blendedRAMDiscount = (reservedRAMGB * reservedRAMDiscount) + (nonReservedRAM*discount)/nodeRAMGB
+					}
+				}
+			}
+		}
+	}
+
+	return blendedCPUDiscount, blendedRAMDiscount
+}
+
+func parseVectorPricing(cfg *cloud.CustomPricing, costDatum *CostData, cpuCostStr, ramCostStr, gpuCostStr, pvCostStr string) (float64, float64, float64, float64, bool) {
+	usesCustom := false
+	cpuCost, err := strconv.ParseFloat(cpuCostStr, 64)
+	if err != nil || math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) || cpuCost == 0 {
+		cpuCost, err = strconv.ParseFloat(cfg.CPU, 64)
+		usesCustom = true
+		if err != nil || math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
+			cpuCost = 0
+		}
+	}
+	ramCost, err := strconv.ParseFloat(ramCostStr, 64)
+	if err != nil || math.IsNaN(ramCost) || math.IsInf(ramCost, 0) || ramCost == 0 {
+		ramCost, err = strconv.ParseFloat(cfg.RAM, 64)
+		usesCustom = true
+		if err != nil || math.IsNaN(ramCost) || math.IsInf(ramCost, 0) {
+			ramCost = 0
+		}
+	}
+	gpuCost, err := strconv.ParseFloat(gpuCostStr, 64)
+	if err != nil || math.IsNaN(gpuCost) || math.IsInf(gpuCost, 0) {
+		gpuCost, err = strconv.ParseFloat(cfg.GPU, 64)
+		if err != nil || math.IsNaN(gpuCost) || math.IsInf(gpuCost, 0) {
+			gpuCost = 0
+		}
+	}
+	pvCost, err := strconv.ParseFloat(pvCostStr, 64)
+	if err != nil || math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
+		pvCost, err = strconv.ParseFloat(cfg.Storage, 64)
+		if err != nil || math.IsNaN(pvCost) || math.IsInf(pvCost, 0) {
+			pvCost = 0
+		}
+	}
+	return cpuCost, ramCost, gpuCost, pvCost, usesCustom
+}
+
+func getPriceVectors(cp cloud.Provider, costDatum *CostData, rate string, discount float64, customDiscount float64, idleCoefficient float64) ([]*util.Vector, []*util.Vector, []*util.Vector, [][]*util.Vector, []*util.Vector) {
+
+	var cpuCost float64
+	var ramCost float64
+	var gpuCost float64
+	var pvCost float64
+	var usesCustom bool
+
+	// If custom pricing is enabled and can be retrieved, replace
+	// default cost values with custom values
+	customPricing, err := cp.GetConfig()
+	if err != nil {
+		klog.Errorf("failed to load custom pricing: %s", err)
+	}
+	if cloud.CustomPricesEnabled(cp) && err == nil {
+		var cpuCostStr string
+		var ramCostStr string
+		var gpuCostStr string
+		var pvCostStr string
+		if costDatum.NodeData.IsSpot() {
+			cpuCostStr = customPricing.SpotCPU
+			ramCostStr = customPricing.SpotRAM
+			gpuCostStr = customPricing.SpotGPU
+		} else {
+			cpuCostStr = customPricing.CPU
+			ramCostStr = customPricing.RAM
+			gpuCostStr = customPricing.GPU
+		}
+		pvCostStr = customPricing.Storage
+		cpuCost, ramCost, gpuCost, pvCost, usesCustom = parseVectorPricing(customPricing, costDatum, cpuCostStr, ramCostStr, gpuCostStr, pvCostStr)
+	} else if costDatum.NodeData == nil && err == nil {
+		cpuCostStr := customPricing.CPU
+		ramCostStr := customPricing.RAM
+		gpuCostStr := customPricing.GPU
+		pvCostStr := customPricing.Storage
+		cpuCost, ramCost, gpuCost, pvCost, usesCustom = parseVectorPricing(customPricing, costDatum, cpuCostStr, ramCostStr, gpuCostStr, pvCostStr)
+	} else {
+		cpuCostStr := costDatum.NodeData.VCPUCost
+		ramCostStr := costDatum.NodeData.RAMCost
+		gpuCostStr := costDatum.NodeData.GPUCost
+		pvCostStr := costDatum.NodeData.StorageCost
+		cpuCost, ramCost, gpuCost, pvCost, usesCustom = parseVectorPricing(customPricing, costDatum, cpuCostStr, ramCostStr, gpuCostStr, pvCostStr)
+	}
+
+	if usesCustom {
+		log.DedupedWarningf(5, "No pricing data found for node `%s` , using custom pricing", costDatum.NodeName)
+	}
+
+	cpuDiscount, ramDiscount := getDiscounts(costDatum, cpuCost, ramCost, discount)
+
+	klog.V(4).Infof("Node Name: %s", costDatum.NodeName)
+	klog.V(4).Infof("Blended CPU Discount: %f", cpuDiscount)
+	klog.V(4).Infof("Blended RAM Discount: %f", ramDiscount)
+
+	// TODO should we try to apply the rate coefficient here or leave it as a totals-only metric?
+	rateCoeff := 1.0
+
+	if idleCoefficient == 0 {
+		idleCoefficient = 1.0
+	}
+
+	cpuv := make([]*util.Vector, 0, len(costDatum.CPUAllocation))
+	for _, val := range costDatum.CPUAllocation {
+		cpuv = append(cpuv, &util.Vector{
+			Timestamp: math.Round(val.Timestamp/10) * 10,
+			Value:     (val.Value * cpuCost * (1 - cpuDiscount) * (1 - customDiscount) / idleCoefficient) * rateCoeff,
+		})
+	}
+
+	ramv := make([]*util.Vector, 0, len(costDatum.RAMAllocation))
+	for _, val := range costDatum.RAMAllocation {
+		ramv = append(ramv, &util.Vector{
+			Timestamp: math.Round(val.Timestamp/10) * 10,
+			Value:     ((val.Value / 1024 / 1024 / 1024) * ramCost * (1 - ramDiscount) * (1 - customDiscount) / idleCoefficient) * rateCoeff,
+		})
+	}
+
+	gpuv := make([]*util.Vector, 0, len(costDatum.GPUReq))
+	for _, val := range costDatum.GPUReq {
+		gpuv = append(gpuv, &util.Vector{
+			Timestamp: math.Round(val.Timestamp/10) * 10,
+			Value:     (val.Value * gpuCost * (1 - discount) * (1 - customDiscount) / idleCoefficient) * rateCoeff,
+		})
+	}
+
+	pvvs := make([][]*util.Vector, 0, len(costDatum.PVCData))
+	for _, pvcData := range costDatum.PVCData {
+		pvv := make([]*util.Vector, 0, len(pvcData.Values))
+		if pvcData.Volume != nil {
+			cost, _ := strconv.ParseFloat(pvcData.Volume.Cost, 64)
+
+			// override with custom pricing if enabled
+			if cloud.CustomPricesEnabled(cp) {
+				cost = pvCost
+			}
+
+			for _, val := range pvcData.Values {
+				pvv = append(pvv, &util.Vector{
+					Timestamp: math.Round(val.Timestamp/10) * 10,
+					Value:     ((val.Value / 1024 / 1024 / 1024) * cost * (1 - customDiscount) / idleCoefficient) * rateCoeff,
+				})
+			}
+			pvvs = append(pvvs, pvv)
+		}
+	}
+
+	netv := make([]*util.Vector, 0, len(costDatum.NetworkData))
+	for _, val := range costDatum.NetworkData {
+		netv = append(netv, &util.Vector{
+			Timestamp: math.Round(val.Timestamp/10) * 10,
+			Value:     val.Value,
+		})
+	}
+
+	return cpuv, ramv, gpuv, pvvs, netv
+}
+
+func averageVectors(vectors []*util.Vector) float64 {
+	if len(vectors) == 0 {
+		return 0.0
+	}
+	return totalVectors(vectors) / float64(len(vectors))
+}
+
+func totalVectors(vectors []*util.Vector) float64 {
+	total := 0.0
+	for _, vector := range vectors {
+		total += vector.Value
+	}
+	return total
+}
+
+// addVectors adds two slices of Vectors. Vector timestamps are rounded to the
+// nearest ten seconds to allow matching of Vectors within a delta allowance.
+// Matching Vectors are summed, while unmatched Vectors are passed through.
+// e.g. [(t=1, 1), (t=2, 2)] + [(t=2, 2), (t=3, 3)] = [(t=1, 1), (t=2, 4), (t=3, 3)]
+func addVectors(xvs []*util.Vector, yvs []*util.Vector) []*util.Vector {
+	sumOp := func(result *util.Vector, x *float64, y *float64) bool {
+		if x != nil && y != nil {
+			result.Value = *x + *y
+		} else if y != nil {
+			result.Value = *y
+		} else if x != nil {
+			result.Value = *x
+		}
+
+		return true
+	}
+
+	return util.ApplyVectorOp(xvs, yvs, sumOp)
+}
+
+// minCostDataLength sets the minimum number of time series data required to
+// cache both raw and aggregated cost data
+const minCostDataLength = 2
+
+// EmptyDataError describes an error caused by empty cost data for some
+// defined interval
+type EmptyDataError struct {
+	err    error
+	window kubecost.Window
+}
+
+// Error implements the error interface
+func (ede *EmptyDataError) Error() string {
+	err := fmt.Sprintf("empty data for range: %s", ede.window)
+	if ede.err != nil {
+		err += fmt.Sprintf(": %s", ede.err)
+	}
+	return err
+}
+
+func costDataTimeSeriesLength(costData map[string]*CostData) int {
+	l := 0
+	for _, cd := range costData {
+		if l < len(cd.RAMAllocation) {
+			l = len(cd.RAMAllocation)
+		}
+		if l < len(cd.CPUAllocation) {
+			l = len(cd.CPUAllocation)
+		}
+	}
+	return l
+}
+
+// ScaleHourlyCostData converts per-hour cost data to per-resolution data. If the target resolution is higher (i.e. < 1.0h)
+// then we can do simple multiplication by the fraction-of-an-hour and retain accuracy. If the target resolution is
+// lower (i.e. > 1.0h) then we sum groups of hourly data by resolution to maintain fidelity.
+// e.g. (100 hours of per-hour hourly data, resolutionHours=10) => 10 data points, grouped and summed by 10-hour window
+// e.g. (20 minutes of per-minute hourly data, resolutionHours=1/60) => 20 data points, scaled down by a factor of 60
+func ScaleHourlyCostData(data map[string]*CostData, resolutionHours float64) map[string]*CostData {
+	scaled := map[string]*CostData{}
+
+	for key, datum := range data {
+		datum.RAMReq = scaleVectorSeries(datum.RAMReq, resolutionHours)
+		datum.RAMUsed = scaleVectorSeries(datum.RAMUsed, resolutionHours)
+		datum.RAMAllocation = scaleVectorSeries(datum.RAMAllocation, resolutionHours)
+		datum.CPUReq = scaleVectorSeries(datum.CPUReq, resolutionHours)
+		datum.CPUUsed = scaleVectorSeries(datum.CPUUsed, resolutionHours)
+		datum.CPUAllocation = scaleVectorSeries(datum.CPUAllocation, resolutionHours)
+		datum.GPUReq = scaleVectorSeries(datum.GPUReq, resolutionHours)
+		datum.NetworkData = scaleVectorSeries(datum.NetworkData, resolutionHours)
+
+		for _, pvcDatum := range datum.PVCData {
+			pvcDatum.Values = scaleVectorSeries(pvcDatum.Values, resolutionHours)
+		}
+
+		scaled[key] = datum
+	}
+
+	return scaled
+}
+
+func scaleVectorSeries(vs []*util.Vector, resolutionHours float64) []*util.Vector {
+	// if scaling to a lower resolution, compress the hourly data for maximum accuracy
+	if resolutionHours > 1.0 {
+		return compressVectorSeries(vs, resolutionHours)
+	}
+
+	// if scaling to a higher resolution, simply scale each value down by the fraction of an hour
+	for _, v := range vs {
+		v.Value *= resolutionHours
+	}
+	return vs
+}
+
+func compressVectorSeries(vs []*util.Vector, resolutionHours float64) []*util.Vector {
+	if len(vs) == 0 {
+		return vs
+	}
+
+	compressed := []*util.Vector{}
+
+	threshold := float64(60 * 60 * resolutionHours)
+	var acc *util.Vector
+
+	for i, v := range vs {
+		if acc == nil {
+			// start a new accumulation from current datum
+			acc = &util.Vector{
+				Value:     vs[i].Value,
+				Timestamp: vs[i].Timestamp,
+			}
+			continue
+		}
+		if v.Timestamp-acc.Timestamp < threshold {
+			// v should be accumulated in current datum
+			acc.Value += v.Value
+		} else {
+			// v falls outside current datum's threshold; append and start a new one
+			compressed = append(compressed, acc)
+			acc = &util.Vector{
+				Value:     vs[i].Value,
+				Timestamp: vs[i].Timestamp,
+			}
+		}
+	}
+	// append any remaining, incomplete accumulation
+	if acc != nil {
+		compressed = append(compressed, acc)
+	}
+
+	return compressed
+}
+
+type AggregateQueryOpts struct {
+	Rate                  string
+	Filters               map[string]string
+	SharedResources       *SharedResourceInfo
+	ShareSplit            string
+	AllocateIdle          bool
+	IncludeTimeSeries     bool
+	IncludeEfficiency     bool
+	DisableCache          bool
+	ClearCache            bool
+	NoCache               bool
+	NoExpireCache         bool
+	RemoteEnabled         bool
+	DisableSharedOverhead bool
+	UseETLAdapter         bool
+}
+
+func DefaultAggregateQueryOpts() *AggregateQueryOpts {
+	return &AggregateQueryOpts{
+		Rate:                  "",
+		Filters:               map[string]string{},
+		SharedResources:       nil,
+		ShareSplit:            SplitTypeWeighted,
+		AllocateIdle:          false,
+		IncludeTimeSeries:     true,
+		IncludeEfficiency:     true,
+		DisableCache:          false,
+		ClearCache:            false,
+		NoCache:               false,
+		NoExpireCache:         false,
+		RemoteEnabled:         env.IsRemoteEnabled(),
+		DisableSharedOverhead: false,
+		UseETLAdapter:         false,
+	}
+}
+
+// ComputeAggregateCostModel computes cost data for the given window, then aggregates it by the given fields.
+// Data is cached on two levels: the aggregation is cached as well as the underlying cost data.
+func (a *Accesses) ComputeAggregateCostModel(promClient prometheusClient.Client, window kubecost.Window, field string, subfields []string, opts *AggregateQueryOpts) (map[string]*Aggregation, string, error) {
+	// Window is the range of the query, i.e. (start, end)
+	// It must be closed, i.e. neither start nor end can be nil
+	if window.IsOpen() {
+		return nil, "", fmt.Errorf("illegal window: %s", window)
+	}
+
+	// Resolution is the duration of each datum in the cost model range query,
+	// which corresponds to both the step size given to Prometheus query_range
+	// and to the window passed to the range queries.
+	// i.e. by default, we support 1h resolution for queries of windows defined
+	// in terms of days or integer multiples of hours (e.g. 1d, 12h)
+	resolution := time.Hour
+
+	// Determine resolution by size of duration and divisibility of window.
+	// By default, resolution is 1hr. If the window is smaller than 1hr, then
+	// resolution goes down to 1m. If the window is not a multiple of 1hr, then
+	// resolution goes down to 1m. If the window is greater than 1d, then
+	// resolution gets scaled up to improve performance by reducing the amount
+	// of data being computed.
+	durMins := int64(math.Trunc(window.Minutes()))
+	if durMins < 24*60 { // less than 1d
+		// TODO should we have additional options for going by
+		// e.g. 30m? 10m? 5m?
+		if durMins%60 != 0 || durMins < 3*60 { // not divisible by 1h or less than 3h
+			resolution = time.Minute
+		}
+	} else { // greater than 1d
+		if durMins >= 7*24*60 { // greater than (or equal to) 7 days
+			resolution = 24.0 * time.Hour
+		} else if durMins >= 2*24*60 { // greater than (or equal to) 2 days
+			resolution = 2.0 * time.Hour
+		}
+	}
+
+	// Parse options
+	if opts == nil {
+		opts = DefaultAggregateQueryOpts()
+	}
+	rate := opts.Rate
+	filters := opts.Filters
+	sri := opts.SharedResources
+	shared := opts.ShareSplit
+	allocateIdle := opts.AllocateIdle
+	includeTimeSeries := opts.IncludeTimeSeries
+	includeEfficiency := opts.IncludeEfficiency
+	disableCache := opts.DisableCache
+	clearCache := opts.ClearCache
+	noCache := opts.NoCache
+	noExpireCache := opts.NoExpireCache
+	remoteEnabled := opts.RemoteEnabled
+	disableSharedOverhead := opts.DisableSharedOverhead
+
+	// retainFuncs override filterFuncs. Make sure shared resources do not
+	// get filtered out.
+	retainFuncs := []FilterFunc{}
+	retainFuncs = append(retainFuncs, func(cd *CostData) (bool, string) {
+		if sri != nil {
+			return sri.IsSharedResource(cd), ""
+		}
+		return false, ""
+	})
+
+	// Parse cost data filters into FilterFuncs
+	filterFuncs := []FilterFunc{}
+
+	aggregateEnvironment := func(costDatum *CostData) string {
+		if field == "cluster" {
+			return costDatum.ClusterID
+		} else if field == "node" {
+			return costDatum.NodeName
+		} else if field == "namespace" {
+			return costDatum.Namespace
+		} else if field == "service" {
+			if len(costDatum.Services) > 0 {
+				return costDatum.Namespace + "/" + costDatum.Services[0]
+			}
+		} else if field == "deployment" {
+			if len(costDatum.Deployments) > 0 {
+				return costDatum.Namespace + "/" + costDatum.Deployments[0]
+			}
+		} else if field == "daemonset" {
+			if len(costDatum.Daemonsets) > 0 {
+				return costDatum.Namespace + "/" + costDatum.Daemonsets[0]
+			}
+		} else if field == "statefulset" {
+			if len(costDatum.Statefulsets) > 0 {
+				return costDatum.Namespace + "/" + costDatum.Statefulsets[0]
+			}
+		} else if field == "label" {
+			if costDatum.Labels != nil {
+				for _, sf := range subfields {
+					if subfieldName, ok := costDatum.Labels[sf]; ok {
+						return fmt.Sprintf("%s=%s", sf, subfieldName)
+					}
+				}
+			}
+		} else if field == "pod" {
+			return costDatum.Namespace + "/" + costDatum.PodName
+		} else if field == "container" {
+			return costDatum.Namespace + "/" + costDatum.PodName + "/" + costDatum.Name
+		}
+		return ""
+	}
+
+	if filters["podprefix"] != "" {
+		pps := []string{}
+		for _, fp := range strings.Split(filters["podprefix"], ",") {
+			if fp != "" {
+				cleanedFilter := strings.TrimSpace(fp)
+				pps = append(pps, cleanedFilter)
+			}
+		}
+		filterFuncs = append(filterFuncs, func(cd *CostData) (bool, string) {
+			aggEnv := aggregateEnvironment(cd)
+			for _, pp := range pps {
+				cleanedFilter := strings.TrimSpace(pp)
+				if strings.HasPrefix(cd.PodName, cleanedFilter) {
+					return true, aggEnv
+				}
+			}
+			return false, aggEnv
+		})
+	}
+
+	if filters["namespace"] != "" {
+		// namespaces may be comma-separated, e.g. kubecost,default
+		// multiple namespaces are evaluated as an OR relationship
+		nss := strings.Split(filters["namespace"], ",")
+		filterFuncs = append(filterFuncs, func(cd *CostData) (bool, string) {
+			aggEnv := aggregateEnvironment(cd)
+			for _, ns := range nss {
+				nsTrim := strings.TrimSpace(ns)
+				if cd.Namespace == nsTrim {
+					return true, aggEnv
+				} else if strings.HasSuffix(nsTrim, "*") { // trigger wildcard prefix filtering
+					nsTrimAsterisk := strings.TrimSuffix(nsTrim, "*")
+					if strings.HasPrefix(cd.Namespace, nsTrimAsterisk) {
+						return true, aggEnv
+					}
+				}
+			}
+			return false, aggEnv
+		})
+	}
+
+	if filters["node"] != "" {
+		// nodes may be comma-separated, e.g. aws-node-1,aws-node-2
+		// multiple nodes are evaluated as an OR relationship
+		nodes := strings.Split(filters["node"], ",")
+		filterFuncs = append(filterFuncs, func(cd *CostData) (bool, string) {
+			aggEnv := aggregateEnvironment(cd)
+			for _, node := range nodes {
+				nodeTrim := strings.TrimSpace(node)
+				if cd.NodeName == nodeTrim {
+					return true, aggEnv
+				} else if strings.HasSuffix(nodeTrim, "*") { // trigger wildcard prefix filtering
+					nodeTrimAsterisk := strings.TrimSuffix(nodeTrim, "*")
+					if strings.HasPrefix(cd.NodeName, nodeTrimAsterisk) {
+						return true, aggEnv
+					}
+				}
+			}
+			return false, aggEnv
+		})
+	}
+
+	if filters["cluster"] != "" {
+		// clusters may be comma-separated, e.g. cluster-one,cluster-two
+		// multiple clusters are evaluated as an OR relationship
+		cs := strings.Split(filters["cluster"], ",")
+		filterFuncs = append(filterFuncs, func(cd *CostData) (bool, string) {
+			aggEnv := aggregateEnvironment(cd)
+			for _, c := range cs {
+				cTrim := strings.TrimSpace(c)
+				id, name := cd.ClusterID, cd.ClusterName
+				if id == cTrim || name == cTrim {
+					return true, aggEnv
+				} else if strings.HasSuffix(cTrim, "*") { // trigger wildcard prefix filtering
+					cTrimAsterisk := strings.TrimSuffix(cTrim, "*")
+					if strings.HasPrefix(id, cTrimAsterisk) || strings.HasPrefix(name, cTrimAsterisk) {
+						return true, aggEnv
+					}
+				}
+			}
+			return false, aggEnv
+		})
+	}
+
+	if filters["labels"] != "" {
+		// labels are expected to be comma-separated and to take the form key=value
+		// e.g. app=cost-analyzer,app.kubernetes.io/instance=kubecost
+		// each different label will be applied as an AND
+		// multiple values for a single label will be evaluated as an OR
+		labelValues := map[string][]string{}
+		ls := strings.Split(filters["labels"], ",")
+		for _, l := range ls {
+			lTrim := strings.TrimSpace(l)
+			label := strings.Split(lTrim, "=")
+			if len(label) == 2 {
+				ln := prom.SanitizeLabelName(strings.TrimSpace(label[0]))
+				lv := strings.TrimSpace(label[1])
+				labelValues[ln] = append(labelValues[ln], lv)
+			} else {
+				// label is not of the form name=value, so log it and move on
+				log.Warningf("ComputeAggregateCostModel: skipping illegal label filter: %s", l)
+			}
+		}
+
+		// Generate FilterFunc for each set of label filters by invoking a function instead of accessing
+		// values by closure to prevent reference-type looping bug.
+		// (see https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable)
+		for label, values := range labelValues {
+			ff := (func(l string, vs []string) FilterFunc {
+				return func(cd *CostData) (bool, string) {
+					ae := aggregateEnvironment(cd)
+					for _, v := range vs {
+						if v == "__unallocated__" { // Special case. __unallocated__ means return all pods without the attached label
+							if _, ok := cd.Labels[label]; !ok {
+								return true, ae
+							}
+						}
+						if cd.Labels[label] == v {
+							return true, ae
+						} else if strings.HasSuffix(v, "*") { // trigger wildcard prefix filtering
+							vTrim := strings.TrimSuffix(v, "*")
+							if strings.HasPrefix(cd.Labels[label], vTrim) {
+								return true, ae
+							}
+						}
+					}
+					return false, ae
+				}
+			})(label, values)
+			filterFuncs = append(filterFuncs, ff)
+		}
+	}
+
+	// clear cache prior to checking the cache so that a clearCache=true
+	// request always returns a freshly computed value
+	if clearCache {
+		a.AggregateCache.Flush()
+		a.CostDataCache.Flush()
+	}
+
+	cacheExpiry := a.GetCacheExpiration(window.Duration())
+	if noExpireCache {
+		cacheExpiry = cache.NoExpiration
+	}
+
+	// parametrize cache key by all request parameters
+	aggKey := GenerateAggKey(window, field, subfields, opts)
+
+	thanosOffset := time.Now().Add(-thanos.OffsetDuration())
+	if a.ThanosClient != nil && window.End().After(thanosOffset) {
+		log.Infof("ComputeAggregateCostModel: setting end time backwards to first present data")
+
+		// Apply offsets to both end and start times to maintain correct time range
+		deltaDuration := window.End().Sub(thanosOffset)
+		s := window.Start().Add(-1 * deltaDuration)
+		e := time.Now().Add(-thanos.OffsetDuration())
+		window.Set(&s, &e)
+	}
+
+	dur, off := window.ToDurationOffset()
+	key := fmt.Sprintf(`%s:%s:%fh:%t`, dur, off, resolution.Hours(), remoteEnabled)
+
+	// report message about which of the two caches hit. by default report a miss
+	cacheMessage := fmt.Sprintf("ComputeAggregateCostModel: L1 cache miss: %s L2 cache miss: %s", aggKey, key)
+
+	// check the cache for aggregated response; if cache is hit and not disabled, return response
+	if value, found := a.AggregateCache.Get(aggKey); found && !disableCache && !noCache {
+		result, ok := value.(map[string]*Aggregation)
+		if !ok {
+			// disable cache and recompute if type cast fails
+			log.Errorf("ComputeAggregateCostModel: caching error: failed to cast aggregate data to struct: %s", aggKey)
+			return a.ComputeAggregateCostModel(promClient, window, field, subfields, opts)
+		}
+		return result, fmt.Sprintf("aggregate cache hit: %s", aggKey), nil
+	}
+
+	if window.Hours() >= 1.0 {
+		// exclude the last window of the time frame to match Prometheus definitions of range, offset, and resolution
+		start := window.Start().Add(resolution)
+		window.Set(&start, window.End())
+	} else {
+		// don't cache requests for durations of less than one hour
+		disableCache = true
+	}
+
+	// attempt to retrieve cost data from cache
+	var costData map[string]*CostData
+	var err error
+	cacheData, found := a.CostDataCache.Get(key)
+	if found && !disableCache && !noCache {
+		ok := false
+		costData, ok = cacheData.(map[string]*CostData)
+		cacheMessage = fmt.Sprintf("ComputeAggregateCostModel: L1 cache miss: %s, L2 cost data cache hit: %s", aggKey, key)
+		if !ok {
+			log.Errorf("ComputeAggregateCostModel: caching error: failed to cast cost data to struct: %s", key)
+		}
+	} else {
+		log.Infof("ComputeAggregateCostModel: missed cache: %s (found %t, disableCache %t, noCache %t)", key, found, disableCache, noCache)
+
+		costData, err = a.Model.ComputeCostDataRange(promClient, a.CloudProvider, window, resolution, "", "", remoteEnabled)
+		if err != nil {
+			if prom.IsErrorCollection(err) {
+				return nil, "", err
+			}
+			if pce, ok := err.(prom.CommError); ok {
+				return nil, "", pce
+			}
+			if strings.Contains(err.Error(), "data is empty") {
+				return nil, "", &EmptyDataError{err: err, window: window}
+			}
+			return nil, "", err
+		}
+
+		// compute length of the time series in the cost data and only compute
+		// aggregates and cache if the length is sufficiently high
+		costDataLen := costDataTimeSeriesLength(costData)
+
+		if costDataLen == 0 {
+			return nil, "", &EmptyDataError{window: window}
+		}
+		if costDataLen >= minCostDataLength && !noCache {
+			log.Infof("ComputeAggregateCostModel: setting L2 cache: %s", key)
+			a.CostDataCache.Set(key, costData, cacheExpiry)
+		}
+	}
+
+	c, err := a.CloudProvider.GetConfig()
+	if err != nil {
+		return nil, "", err
+	}
+	discount, err := ParsePercentString(c.Discount)
+	if err != nil {
+		return nil, "", err
+	}
+	customDiscount, err := ParsePercentString(c.NegotiatedDiscount)
+	if err != nil {
+		return nil, "", err
+	}
+
+	sc := make(map[string]*SharedCostInfo)
+	if !disableSharedOverhead {
+		for key, val := range c.SharedCosts {
+			cost, err := strconv.ParseFloat(val, 64)
+			durationCoefficient := window.Hours() / util.HoursPerMonth
+			if err != nil {
+				return nil, "", fmt.Errorf("unable to parse shared cost %s: %s", val, err)
+			}
+			sc[key] = &SharedCostInfo{
+				Name: key,
+				Cost: cost * durationCoefficient,
+			}
+		}
+	}
+
+	idleCoefficients := make(map[string]float64)
+	if allocateIdle {
+		duration, offset := window.ToDurationOffset()
+
+		idleDurationCalcHours := window.Hours()
+		if window.Hours() < 1 {
+			idleDurationCalcHours = 1
+		}
+		duration = fmt.Sprintf("%dh", int(idleDurationCalcHours))
+
+		if a.ThanosClient != nil {
+			offset = thanos.Offset()
+			log.Infof("ComputeAggregateCostModel: setting offset to %s", offset)
+		}
+
+		idleCoefficients, err = a.ComputeIdleCoefficient(costData, promClient, a.CloudProvider, discount, customDiscount, duration, offset)
+		if err != nil {
+			log.Errorf("ComputeAggregateCostModel: error computing idle coefficient: duration=%s, offset=%s, err=%s", duration, offset, err)
+			return nil, "", err
+		}
+	}
+
+	totalContainerCost := 0.0
+	if shared == SplitTypeWeighted {
+		totalContainerCost = GetTotalContainerCost(costData, rate, a.CloudProvider, discount, customDiscount, idleCoefficients)
+	}
+
+	// filter cost data by namespace and cluster after caching for maximal cache hits
+	costData, filteredContainerCount, filteredEnvironments := FilterCostData(costData, retainFuncs, filterFuncs)
+
+	// aggregate cost model data by given fields and cache the result for the default expiration
+	aggOpts := &AggregationOptions{
+		Discount:               discount,
+		CustomDiscount:         customDiscount,
+		IdleCoefficients:       idleCoefficients,
+		IncludeEfficiency:      includeEfficiency,
+		IncludeTimeSeries:      includeTimeSeries,
+		Rate:                   rate,
+		ResolutionHours:        resolution.Hours(),
+		SharedResourceInfo:     sri,
+		SharedCosts:            sc,
+		FilteredContainerCount: filteredContainerCount,
+		FilteredEnvironments:   filteredEnvironments,
+		TotalContainerCost:     totalContainerCost,
+		SharedSplit:            shared,
+	}
+	result := AggregateCostData(costData, field, subfields, a.CloudProvider, aggOpts)
+
+	// If sending time series data back, switch scale back to hourly data. At this point,
+	// resolutionHours may have converted our hourly data to more- or less-than hourly data.
+	if includeTimeSeries {
+		for _, aggs := range result {
+			ScaleAggregationTimeSeries(aggs, resolution.Hours())
+		}
+	}
+
+	// compute length of the time series in the cost data and only cache
+	// aggregation results if the length is sufficiently high
+	costDataLen := costDataTimeSeriesLength(costData)
+	if costDataLen >= minCostDataLength && window.Hours() > 1.0 && !noCache {
+		// Set the result map (rather than a pointer to it) because map is a reference type
+		log.Infof("ComputeAggregateCostModel: setting aggregate cache: %s", aggKey)
+		a.AggregateCache.Set(aggKey, result, cacheExpiry)
+	} else {
+		log.Infof("ComputeAggregateCostModel: not setting aggregate cache: %s (not enough data: %t; duration less than 1h: %t; noCache: %t)", key, costDataLen < minCostDataLength, window.Hours() < 1, noCache)
+	}
+
+	return result, cacheMessage, nil
+}
+
+// ScaleAggregationTimeSeries reverses the scaling done by ScaleHourlyCostData, returning
+// the aggregation's time series to hourly data.
+func ScaleAggregationTimeSeries(aggregation *Aggregation, resolutionHours float64) {
+	for _, v := range aggregation.CPUCostVector {
+		v.Value /= resolutionHours
+	}
+
+	for _, v := range aggregation.GPUCostVector {
+		v.Value /= resolutionHours
+	}
+
+	for _, v := range aggregation.RAMCostVector {
+		v.Value /= resolutionHours
+	}
+
+	for _, v := range aggregation.PVCostVector {
+		v.Value /= resolutionHours
+	}
+
+	for _, v := range aggregation.NetworkCostVector {
+		v.Value /= resolutionHours
+	}
+
+	for _, v := range aggregation.TotalCostVector {
+		v.Value /= resolutionHours
+	}
+
+	return
+}
+
+// String returns a string representation of the encapsulated shared resources, which
+// can be used to uniquely identify a set of shared resources. Sorting sets of shared
+// resources ensures that strings representing permutations of the same combination match.
+func (s *SharedResourceInfo) String() string {
+	if s == nil {
+		return ""
+	}
+
+	nss := []string{}
+	for ns := range s.SharedNamespace {
+		nss = append(nss, ns)
+	}
+	sort.Strings(nss)
+	nsStr := strings.Join(nss, ",")
+
+	labels := []string{}
+	for lbl, vals := range s.LabelSelectors {
+		for val := range vals {
+			if lbl != "" && val != "" {
+				labels = append(labels, fmt.Sprintf("%s=%s", lbl, val))
+			}
+		}
+	}
+	sort.Strings(labels)
+	labelStr := strings.Join(labels, ",")
+
+	return fmt.Sprintf("%s:%s", nsStr, labelStr)
+}
+
+type aggKeyParams struct {
+	duration   string
+	offset     string
+	filters    map[string]string
+	field      string
+	subfields  []string
+	rate       string
+	sri        *SharedResourceInfo
+	shareType  string
+	idle       bool
+	timeSeries bool
+	efficiency bool
+}
+
+// GenerateAggKey generates a parameter-unique key for caching the aggregate cost model
+func GenerateAggKey(window kubecost.Window, field string, subfields []string, opts *AggregateQueryOpts) string {
+	if opts == nil {
+		opts = DefaultAggregateQueryOpts()
+	}
+
+	// Covert to duration, offset so that cache hits occur, even when timestamps have
+	// shifted slightly.
+	duration, offset := window.ToDurationOffset()
+
+	// parse, trim, and sort podprefix filters
+	podPrefixFilters := []string{}
+	if ppfs, ok := opts.Filters["podprefix"]; ok && ppfs != "" {
+		for _, psf := range strings.Split(ppfs, ",") {
+			podPrefixFilters = append(podPrefixFilters, strings.TrimSpace(psf))
+		}
+	}
+	sort.Strings(podPrefixFilters)
+	podPrefixFiltersStr := strings.Join(podPrefixFilters, ",")
+
+	// parse, trim, and sort namespace filters
+	nsFilters := []string{}
+	if nsfs, ok := opts.Filters["namespace"]; ok && nsfs != "" {
+		for _, nsf := range strings.Split(nsfs, ",") {
+			nsFilters = append(nsFilters, strings.TrimSpace(nsf))
+		}
+	}
+	sort.Strings(nsFilters)
+	nsFilterStr := strings.Join(nsFilters, ",")
+
+	// parse, trim, and sort node filters
+	nodeFilters := []string{}
+	if nodefs, ok := opts.Filters["node"]; ok && nodefs != "" {
+		for _, nodef := range strings.Split(nodefs, ",") {
+			nodeFilters = append(nodeFilters, strings.TrimSpace(nodef))
+		}
+	}
+	sort.Strings(nodeFilters)
+	nodeFilterStr := strings.Join(nodeFilters, ",")
+
+	// parse, trim, and sort cluster filters
+	cFilters := []string{}
+	if cfs, ok := opts.Filters["cluster"]; ok && cfs != "" {
+		for _, cf := range strings.Split(cfs, ",") {
+			cFilters = append(cFilters, strings.TrimSpace(cf))
+		}
+	}
+	sort.Strings(cFilters)
+	cFilterStr := strings.Join(cFilters, ",")
+
+	// parse, trim, and sort label filters
+	lFilters := []string{}
+	if lfs, ok := opts.Filters["labels"]; ok && lfs != "" {
+		for _, lf := range strings.Split(lfs, ",") {
+			// trim whitespace from the label name and the label value
+			// of each label name/value pair, then reconstruct
+			// e.g. "tier = frontend, app = kubecost" == "app=kubecost,tier=frontend"
+			lfa := strings.Split(lf, "=")
+			if len(lfa) == 2 {
+				lfn := strings.TrimSpace(lfa[0])
+				lfv := strings.TrimSpace(lfa[1])
+				lFilters = append(lFilters, fmt.Sprintf("%s=%s", lfn, lfv))
+			} else {
+				// label is not of the form name=value, so log it and move on
+				klog.V(2).Infof("[Warning] GenerateAggKey: skipping illegal label filter: %s", lf)
+			}
+		}
+	}
+	sort.Strings(lFilters)
+	lFilterStr := strings.Join(lFilters, ",")
+
+	filterStr := fmt.Sprintf("%s:%s:%s:%s:%s", nsFilterStr, nodeFilterStr, cFilterStr, lFilterStr, podPrefixFiltersStr)
+
+	sort.Strings(subfields)
+	fieldStr := fmt.Sprintf("%s:%s", field, strings.Join(subfields, ","))
+
+	return fmt.Sprintf("%s:%s:%s:%s:%s:%s:%s:%t:%t:%t", duration, offset, filterStr, fieldStr, opts.Rate,
+		opts.SharedResources, opts.ShareSplit, opts.AllocateIdle, opts.IncludeTimeSeries,
+		opts.IncludeEfficiency)
+}
+
+// Aggregator is capable of computing the aggregated cost model. This is
+// a brutal interface, which should be cleaned up, but it's necessary for
+// being able to swap in an ETL-backed implementation.
+type Aggregator interface {
+	ComputeAggregateCostModel(promClient prometheusClient.Client, window kubecost.Window, field string, subfields []string, opts *AggregateQueryOpts) (map[string]*Aggregation, string, error)
+}
+
+func (a *Accesses) warmAggregateCostModelCache() {
+	// Only allow one concurrent cache-warming operation
+	sem := util.NewSemaphore(1)
+
+	// Set default values, pulling them from application settings where applicable, and warm the cache
+	// for the given duration. Cache is intentionally set to expire (i.e. noExpireCache=false) so that
+	// if the default parameters change, the old cached defaults with eventually expire. Thus, the
+	// timing of the cache expiry/refresh is the only mechanism ensuring 100% cache warmth.
+	warmFunc := func(duration, durationHrs, offset string, cacheEfficiencyData bool) (error, error) {
+		promClient := a.GetPrometheusClient(true)
+
+		windowStr := fmt.Sprintf("%s offset %s", duration, offset)
+		window, err := kubecost.ParseWindowUTC(windowStr)
+		if err != nil {
+			return nil, fmt.Errorf("invalid window from window string: %s", windowStr)
+		}
+
+		field := "namespace"
+		subfields := []string{}
+
+		aggOpts := DefaultAggregateQueryOpts()
+		aggOpts.Rate = ""
+		aggOpts.Filters = map[string]string{}
+		aggOpts.IncludeTimeSeries = false
+		aggOpts.IncludeEfficiency = true
+		aggOpts.DisableCache = true
+		aggOpts.ClearCache = false
+		aggOpts.NoCache = false
+		aggOpts.NoExpireCache = false
+		aggOpts.ShareSplit = SplitTypeWeighted
+		aggOpts.RemoteEnabled = env.IsRemoteEnabled()
+		aggOpts.AllocateIdle = cloud.AllocateIdleByDefault(a.CloudProvider)
+
+		sharedNamespaces := cloud.SharedNamespaces(a.CloudProvider)
+		sharedLabelNames, sharedLabelValues := cloud.SharedLabels(a.CloudProvider)
+
+		if len(sharedNamespaces) > 0 || len(sharedLabelNames) > 0 {
+			aggOpts.SharedResources = NewSharedResourceInfo(true, sharedNamespaces, sharedLabelNames, sharedLabelValues)
+		}
+
+		aggKey := GenerateAggKey(window, field, subfields, aggOpts)
+		log.Infof("aggregation: cache warming defaults: %s", aggKey)
+		key := fmt.Sprintf("%s:%s", durationHrs, offset)
+
+		_, _, aggErr := a.ComputeAggregateCostModel(promClient, window, field, subfields, aggOpts)
+		if aggErr != nil {
+			log.Infof("Error building cache %s: %s", window, aggErr)
+		}
+		if a.ThanosClient != nil {
+			offset = thanos.Offset()
+			log.Infof("Setting offset to %s", offset)
+		}
+		totals, err := a.ComputeClusterCosts(promClient, a.CloudProvider, durationHrs, offset, cacheEfficiencyData)
+		if err != nil {
+			log.Infof("Error building cluster costs cache %s", key)
+		}
+		maxMinutesWithData := 0.0
+		for _, cluster := range totals {
+			if cluster.DataMinutes > maxMinutesWithData {
+				maxMinutesWithData = cluster.DataMinutes
+			}
+		}
+		if len(totals) > 0 && maxMinutesWithData > clusterCostsCacheMinutes {
+			a.ClusterCostsCache.Set(key, totals, a.GetCacheExpiration(window.Duration()))
+			log.Infof("caching %s cluster costs for %s", duration, a.GetCacheExpiration(window.Duration()))
+		} else {
+			log.Warningf("not caching %s cluster costs: no data or less than %f minutes data ", duration, clusterCostsCacheMinutes)
+		}
+		return aggErr, err
+	}
+
+	// 1 day
+	go func(sem *util.Semaphore) {
+		defer errors.HandlePanic()
+
+		duration := "1d"
+		offset := "1m"
+		durHrs := "24h"
+		dur := 24 * time.Hour
+
+		for {
+			sem.Acquire()
+			warmFunc(duration, durHrs, offset, true)
+			sem.Return()
+
+			log.Infof("aggregation: warm cache: %s", duration)
+			time.Sleep(a.GetCacheRefresh(dur))
+		}
+	}(sem)
+
+	// 2 day
+	go func(sem *util.Semaphore) {
+		defer errors.HandlePanic()
+
+		duration := "2d"
+		offset := "1m"
+		durHrs := "48h"
+		dur := 2 * 24 * time.Hour
+
+		for {
+			sem.Acquire()
+			warmFunc(duration, durHrs, offset, false)
+			sem.Return()
+
+			log.Infof("aggregation: warm cache: %s", duration)
+			time.Sleep(a.GetCacheRefresh(dur))
+		}
+	}(sem)
+
+	if !env.IsETLEnabled() {
+		// 7 day
+		go func(sem *util.Semaphore) {
+			defer errors.HandlePanic()
+
+			duration := "7d"
+			offset := "1m"
+			durHrs := "168h"
+			dur := 7 * 24 * time.Hour
+
+			for {
+				sem.Acquire()
+				aggErr, err := warmFunc(duration, durHrs, offset, false)
+				sem.Return()
+
+				log.Infof("aggregation: warm cache: %s", duration)
+				if aggErr == nil && err == nil {
+					time.Sleep(a.GetCacheRefresh(dur))
+				} else {
+					time.Sleep(5 * time.Minute)
+				}
+			}
+		}(sem)
+
+		// 30 day
+		go func(sem *util.Semaphore) {
+			defer errors.HandlePanic()
+
+			for {
+				duration := "30d"
+				offset := "1m"
+				durHrs := "720h"
+				dur := 30 * 24 * time.Hour
+
+				sem.Acquire()
+				aggErr, err := warmFunc(duration, durHrs, offset, false)
+				sem.Return()
+				if aggErr == nil && err == nil {
+					time.Sleep(a.GetCacheRefresh(dur))
+				} else {
+					time.Sleep(5 * time.Minute)
+				}
+			}
+		}(sem)
+	}
+}
+
+// AggregateCostModelHandler handles requests to the aggregated cost model API. See
+// ComputeAggregateCostModel for details.
+func (a *Accesses) AggregateCostModelHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+
+	windowStr := r.URL.Query().Get("window")
+
+	// Convert UTC-RFC3339 pairs to configured UTC offset
+	// e.g. with UTC offset of -0600, 2020-07-01T00:00:00Z becomes
+	// 2020-07-01T06:00:00Z == 2020-07-01T00:00:00-0600
+	// TODO niko/etl fix the frontend because this is confusing if you're
+	// actually asking for UTC time (...Z) and we swap that "Z" out for the
+	// configured UTC offset without asking
+	rfc3339 := `\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ`
+	regex := regexp.MustCompile(fmt.Sprintf(`(%s),(%s)`, rfc3339, rfc3339))
+	match := regex.FindStringSubmatch(windowStr)
+	if match != nil {
+		start, _ := time.Parse(time.RFC3339, match[1])
+		start = start.Add(-env.GetParsedUTCOffset()).In(time.UTC)
+		end, _ := time.Parse(time.RFC3339, match[2])
+		end = end.Add(-env.GetParsedUTCOffset()).In(time.UTC)
+		windowStr = fmt.Sprintf("%sZ,%sZ", start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05Z"))
+	}
+
+	// determine duration and offset from query parameters
+	window, err := kubecost.ParseWindowWithOffset(windowStr, env.GetParsedUTCOffset())
+	if err != nil || window.Start() == nil {
+		http.Error(w, fmt.Sprintf("invalid window: %s", err), http.StatusBadRequest)
+		return
+	}
+
+	durRegex := regexp.MustCompile(`^(\d+)(m|h|d|s)$`)
+	isDurationStr := durRegex.MatchString(windowStr)
+
+	// legacy offset option should override window offset
+	if r.URL.Query().Get("offset") != "" {
+		offset := r.URL.Query().Get("offset")
+		// Shift window by offset, but only when manually set with separate
+		// parameter and window was provided as a duration string. Otherwise,
+		// do not alter the (duration, offset) from ParseWindowWithOffset.
+		if offset != "1m" && isDurationStr {
+			match := durRegex.FindStringSubmatch(offset)
+			if match != nil && len(match) == 3 {
+				dur := time.Minute
+				if match[2] == "h" {
+					dur = time.Hour
+				}
+				if match[2] == "d" {
+					dur = 24 * time.Hour
+				}
+				if match[2] == "s" {
+					dur = time.Second
+				}
+
+				num, _ := strconv.ParseInt(match[1], 10, 64)
+				window = window.Shift(-time.Duration(num) * dur)
+			}
+		}
+	}
+
+	opts := DefaultAggregateQueryOpts()
+
+	// parse remaining query parameters
+	namespace := r.URL.Query().Get("namespace")
+	cluster := r.URL.Query().Get("cluster")
+	labels := r.URL.Query().Get("labels")
+	podprefix := r.URL.Query().Get("podprefix")
+	labelArray := strings.Split(labels, "=")
+	labelArray[0] = strings.ReplaceAll(labelArray[0], "-", "_")
+	labels = strings.Join(labelArray, "=")
+	field := r.URL.Query().Get("aggregation")
+	sharedNamespaces := r.URL.Query().Get("sharedNamespaces")
+	sharedLabelNames := r.URL.Query().Get("sharedLabelNames")
+	sharedLabelValues := r.URL.Query().Get("sharedLabelValues")
+	remote := r.URL.Query().Get("remote") != "false"
+	subfieldStr := r.URL.Query().Get("aggregationSubfield")
+	subfields := []string{}
+	if len(subfieldStr) > 0 {
+		s := strings.Split(r.URL.Query().Get("aggregationSubfield"), ",")
+		for _, rawLabel := range s {
+			subfields = append(subfields, prom.SanitizeLabelName(rawLabel))
+		}
+	}
+
+	idleFlag := r.URL.Query().Get("allocateIdle")
+	if idleFlag == "default" {
+		c, _ := a.CloudProvider.GetConfig()
+		opts.AllocateIdle = (c.DefaultIdle == "true")
+	} else {
+		opts.AllocateIdle = (idleFlag == "true")
+	}
+
+	opts.Rate = r.URL.Query().Get("rate")
+
+	opts.ShareSplit = r.URL.Query().Get("sharedSplit")
+
+	// timeSeries == true maintains the time series dimension of the data,
+	// which by default gets summed over the entire interval
+	opts.IncludeTimeSeries = r.URL.Query().Get("timeSeries") == "true"
+
+	// efficiency has been deprecated in favor of a default to always send efficiency
+	opts.IncludeEfficiency = true
+
+	// TODO niko/caching rename "recomputeCache"
+	// disableCache, if set to "true", tells this function to recompute and
+	// cache the requested data
+	opts.DisableCache = r.URL.Query().Get("disableCache") == "true"
+
+	// clearCache, if set to "true", tells this function to flush the cache,
+	// then recompute and cache the requested data
+	opts.ClearCache = r.URL.Query().Get("clearCache") == "true"
+
+	// noCache avoids the cache altogether, both reading from and writing to
+	opts.NoCache = r.URL.Query().Get("noCache") == "true"
+
+	// noExpireCache should only be used by cache warming to set non-expiring caches
+	opts.NoExpireCache = false
+
+	// etl triggers ETL adapter
+	opts.UseETLAdapter = r.URL.Query().Get("etl") == "true"
+
+	// aggregation field is required
+	if field == "" {
+		http.Error(w, "Missing aggregation field parameter", http.StatusBadRequest)
+		return
+	}
+
+	// aggregation subfield is required when aggregation field is "label"
+	if field == "label" && len(subfields) == 0 {
+		http.Error(w, "Missing aggregation subfield parameter for aggregation by label", http.StatusBadRequest)
+		return
+	}
+
+	// enforce one of four available rate options
+	if opts.Rate != "" && opts.Rate != "hourly" && opts.Rate != "daily" && opts.Rate != "monthly" {
+		http.Error(w, "If set, rate parameter must be one of: 'hourly', 'daily', 'monthly'", http.StatusBadRequest)
+		return
+	}
+
+	// parse cost data filters
+	// namespace and cluster are exact-string-matches
+	// labels are expected to be comma-separated and to take the form key=value
+	// e.g. app=cost-analyzer,app.kubernetes.io/instance=kubecost
+	opts.Filters = map[string]string{
+		"namespace": namespace,
+		"cluster":   cluster,
+		"labels":    labels,
+		"podprefix": podprefix,
+	}
+
+	// parse shared resources
+	sn := []string{}
+	sln := []string{}
+	slv := []string{}
+	if sharedNamespaces != "" {
+		sn = strings.Split(sharedNamespaces, ",")
+	}
+	if sharedLabelNames != "" {
+		sln = strings.Split(sharedLabelNames, ",")
+		slv = strings.Split(sharedLabelValues, ",")
+		if len(sln) != len(slv) || slv[0] == "" {
+			http.Error(w, "Supply exacly one shared label value per shared label name", http.StatusBadRequest)
+			return
+		}
+	}
+	if len(sn) > 0 || len(sln) > 0 {
+		opts.SharedResources = NewSharedResourceInfo(true, sn, sln, slv)
+	}
+
+	// enable remote if it is available and not disabled
+	opts.RemoteEnabled = remote && env.IsRemoteEnabled()
+
+	promClient := a.GetPrometheusClient(remote)
+
+	var data map[string]*Aggregation
+	var message string
+	data, message, err = a.AggAPI.ComputeAggregateCostModel(promClient, window, field, subfields, opts)
+
+	// Find any warnings in http request context
+	warning, _ := util.GetWarning(r)
+
+	if err != nil {
+		if emptyErr, ok := err.(*EmptyDataError); ok {
+			if warning == "" {
+				w.Write(WrapData(map[string]interface{}{}, emptyErr))
+			} else {
+				w.Write(WrapDataWithWarning(map[string]interface{}{}, emptyErr, warning))
+			}
+			return
+		}
+		if boundaryErr, ok := err.(*kubecost.BoundaryError); ok {
+			if window.Start() != nil && window.Start().After(time.Now().Add(-90*24*time.Hour)) {
+				// Asking for data within a 90 day period: it will be available
+				// after the pipeline builds
+				msg := "Data will be available after ETL is built"
+
+				rex := regexp.MustCompile(`(\d+\.*\d*)%`)
+				match := rex.FindStringSubmatch(boundaryErr.Message)
+				if len(match) > 1 {
+					completionPct, err := strconv.ParseFloat(match[1], 64)
+					if err == nil {
+						msg = fmt.Sprintf("%s (%.1f%% complete)", msg, completionPct)
+					}
+				}
+
+				http.Error(w, msg, http.StatusInternalServerError)
+			} else {
+				// Boundary error outside of 90 day period; may not be available
+				http.Error(w, boundaryErr.Error(), http.StatusInternalServerError)
+			}
+			return
+		}
+		errStr := fmt.Sprintf("error computing aggregate cost model: %s", err)
+		http.Error(w, errStr, http.StatusInternalServerError)
+		return
+	}
+
+	if warning == "" {
+		w.Write(WrapDataWithMessage(data, nil, message))
+	} else {
+		w.Write(WrapDataWithMessageAndWarning(data, nil, message, warning))
+	}
+}

+ 195 - 0
pkg/costmodel/aggregation_test.go

@@ -0,0 +1,195 @@
+package costmodel
+
+import (
+	"testing"
+
+	"github.com/kubecost/cost-model/pkg/util"
+)
+
+func TestScaleHourlyCostData(t *testing.T) {
+	costData := map[string]*CostData{}
+
+	start := 1570000000
+	oneHour := 60 * 60
+
+	generateVectorSeries := func(start, count, interval int, value float64) []*util.Vector {
+		vs := []*util.Vector{}
+		for i := 0; i < count; i++ {
+			v := &util.Vector{
+				Timestamp: float64(start + (i * oneHour)),
+				Value:     value,
+			}
+			vs = append(vs, v)
+		}
+		return vs
+	}
+
+	costData["default"] = &CostData{
+		RAMReq:        generateVectorSeries(start, 100, oneHour, 1.0),
+		RAMUsed:       generateVectorSeries(start, 0, oneHour, 1.0),
+		RAMAllocation: generateVectorSeries(start, 100, oneHour, 107.226),
+		CPUReq:        generateVectorSeries(start, 100, oneHour, 0.00317),
+		CPUUsed:       generateVectorSeries(start, 95, oneHour, 1.0),
+		CPUAllocation: generateVectorSeries(start, 2, oneHour, 123.456),
+		PVCData: []*PersistentVolumeClaimData{
+			{Values: generateVectorSeries(start, 100, oneHour, 1.34)},
+		},
+	}
+
+	compressedData := ScaleHourlyCostData(costData, 10)
+
+	act, ok := compressedData["default"]
+	if !ok {
+		t.Errorf("compressed data should have key \"default\"")
+	}
+
+	// RAMReq
+	if len(act.RAMReq) != 10 {
+		t.Errorf("expected RAMReq to have length %d, was actually %d", 10, len(act.RAMReq))
+	}
+	for _, val := range act.RAMReq {
+		if val.Value != 10.0 {
+			t.Errorf("expected each RAMReq Vector to have Value %f, was actually %f", 10.0, val.Value)
+		}
+	}
+
+	// RAMUsed
+	if len(act.RAMUsed) != 0 {
+		t.Errorf("expected RAMUsed to have length %d, was actually %d", 0, len(act.RAMUsed))
+	}
+
+	// RAMAllocation
+	if len(act.RAMAllocation) != 10 {
+		t.Errorf("expected RAMAllocation to have length %d, was actually %d", 10, len(act.RAMAllocation))
+	}
+	for _, val := range act.RAMAllocation {
+		if val.Value != 1072.26 {
+			t.Errorf("expected each RAMAllocation Vector to have Value %f, was actually %f", 1072.26, val.Value)
+		}
+	}
+
+	// CPUReq
+	if len(act.CPUReq) != 10 {
+		t.Errorf("expected CPUReq to have length %d, was actually %d", 10, len(act.CPUReq))
+	}
+	for _, val := range act.CPUReq {
+		if val.Value != 0.0317 {
+			t.Errorf("expected each CPUReq Vector to have Value %f, was actually %f", 0.0317, val.Value)
+		}
+	}
+
+	// CPUUsed
+	if len(act.CPUUsed) != 10 {
+		t.Errorf("expected CPUUsed to have length %d, was actually %d", 10, len(act.CPUUsed))
+	}
+	for _, val := range act.CPUUsed[:len(act.CPUUsed)-1] {
+		if val.Value != 10.0 {
+			t.Errorf("expected each CPUUsed Vector to have Value %f, was actually %f", 10.0, val.Value)
+		}
+	}
+	if act.CPUUsed[len(act.CPUUsed)-1].Value != 5.0 {
+		t.Errorf("expected each CPUUsed Vector to have Value %f, was actually %f", 5.0, act.CPUUsed[len(act.CPUUsed)-1].Value)
+	}
+
+	// CPUAllocation
+	if len(act.CPUAllocation) != 1 {
+		t.Errorf("expected CPUAllocation to have length %d, was actually %d", 1, len(act.CPUAllocation))
+	}
+	if act.CPUAllocation[0].Value != 246.912 {
+		t.Errorf("expected each CPUAllocation Vector to have Value %f, was actually %f", 246.912, act.CPUAllocation[len(act.CPUAllocation)-1].Value)
+	}
+
+	// PVCData
+	if len(act.PVCData[0].Values) != 10 {
+		t.Errorf("expected PVCData[0] to have length %d, was actually %d", 10, len(act.PVCData[0].Values))
+	}
+	for _, val := range act.PVCData[0].Values {
+		if val.Value != 13.4 {
+			t.Errorf("expected each PVCData[0] Vector to have Value %f, was actually %f", 13.4, val.Value)
+		}
+	}
+
+	costData["default"] = &CostData{
+		RAMReq:        generateVectorSeries(start, 100, oneHour, 1.0),
+		RAMUsed:       generateVectorSeries(start, 0, oneHour, 1.0),
+		RAMAllocation: generateVectorSeries(start, 100, oneHour, 107.226),
+		CPUReq:        generateVectorSeries(start, 100, oneHour, 0.00317),
+		CPUUsed:       generateVectorSeries(start, 95, oneHour, 1.0),
+		CPUAllocation: generateVectorSeries(start, 2, oneHour, 124.6),
+		PVCData: []*PersistentVolumeClaimData{
+			{Values: generateVectorSeries(start, 100, oneHour, 1.34)},
+		},
+	}
+
+	scaledData := ScaleHourlyCostData(costData, 0.1)
+
+	act, ok = scaledData["default"]
+	if !ok {
+		t.Errorf("scaled data should have key \"default\"")
+	}
+
+	// RAMReq
+	if len(act.RAMReq) != 100 {
+		t.Errorf("expected RAMReq to have length %d, was actually %d", 100, len(act.RAMReq))
+	}
+	for _, val := range act.RAMReq {
+		if val.Value != 0.1 {
+			t.Errorf("expected each RAMReq Vector to have Value %f, was actually %f", 0.1, val.Value)
+		}
+	}
+
+	// RAMUsed
+	if len(act.RAMUsed) != 0 {
+		t.Errorf("expected RAMUsed to have length %d, was actually %d", 0, len(act.RAMUsed))
+	}
+
+	// RAMAllocation
+	if len(act.RAMAllocation) != 100 {
+		t.Errorf("expected RAMAllocation to have length %d, was actually %d", 100, len(act.RAMAllocation))
+	}
+	for _, val := range act.RAMAllocation {
+		if val.Value != 10.7226 {
+			t.Errorf("expected each RAMAllocation Vector to have Value %f, was actually %f", 10.7226, val.Value)
+		}
+	}
+
+	// CPUReq
+	if len(act.CPUReq) != 100 {
+		t.Errorf("expected CPUReq to have length %d, was actually %d", 100, len(act.CPUReq))
+	}
+	for _, val := range act.CPUReq {
+		if val.Value != 0.000317 {
+			t.Errorf("expected each CPUReq Vector to have Value %f, was actually %f", 0.000317, val.Value)
+		}
+	}
+
+	// CPUUsed
+	if len(act.CPUUsed) != 95 {
+		t.Errorf("expected CPUUsed to have length %d, was actually %d", 95, len(act.CPUUsed))
+	}
+	for _, val := range act.CPUUsed {
+		if val.Value != 0.1 {
+			t.Errorf("expected each CPUUsed Vector to have Value %f, was actually %f", 0.1, val.Value)
+		}
+	}
+
+	// CPUAllocation
+	if len(act.CPUAllocation) != 2 {
+		t.Errorf("expected CPUAllocation to have length %d, was actually %d", 2, len(act.CPUAllocation))
+	}
+	for _, val := range act.CPUAllocation {
+		if val.Value != 12.46 {
+			t.Errorf("expected each CPUAllocation Vector to have Value %f, was actually %f", 12.46, val.Value)
+		}
+	}
+
+	// PVCData
+	if len(act.PVCData[0].Values) != 100 {
+		t.Errorf("expected PVCData[0] to have length %d, was actually %d", 100, len(act.PVCData[0].Values))
+	}
+	for _, val := range act.PVCData[0].Values {
+		if val.Value != .134 {
+			t.Errorf("expected each PVCData[0] Vector to have Value %f, was actually %f", .134, val.Value)
+		}
+	}
+}

+ 26 - 22
pkg/costmodel/cluster.go

@@ -25,13 +25,13 @@ const (
 	  ) by (cluster_id)`
 
 	queryStorage = `sum(
-		avg(avg_over_time(pv_hourly_cost[%s] %s)) by (persistentvolume, cluster_id) * 730 
+		avg(avg_over_time(pv_hourly_cost[%s] %s)) by (persistentvolume, cluster_id) * 730
 		* avg(avg_over_time(kube_persistentvolume_capacity_bytes[%s] %s)) by (persistentvolume, cluster_id) / 1024 / 1024 / 1024
 	  ) by (cluster_id) %s`
 
 	queryTotal = `sum(avg(node_total_hourly_cost) by (node, cluster_id)) * 730 +
 	  sum(
-		avg(avg_over_time(pv_hourly_cost[1h])) by (persistentvolume, cluster_id) * 730 
+		avg(avg_over_time(pv_hourly_cost[1h])) by (persistentvolume, cluster_id) * 730
 		* avg(avg_over_time(kube_persistentvolume_capacity_bytes[1h])) by (persistentvolume, cluster_id) / 1024 / 1024 / 1024
 	  ) by (cluster_id) %s`
 
@@ -117,7 +117,7 @@ type Disk struct {
 	Breakdown  *ClusterCostsBreakdown
 }
 
-func ClusterDisks(client prometheus.Client, provider cloud.Provider, duration, offset time.Duration) (map[string]*Disk, []error) {
+func ClusterDisks(client prometheus.Client, provider cloud.Provider, duration, offset time.Duration) (map[string]*Disk, error) {
 	durationStr := fmt.Sprintf("%dm", int64(duration.Minutes()))
 	offsetStr := fmt.Sprintf(" offset %dm", int64(offset.Minutes()))
 	if offset < time.Minute {
@@ -163,8 +163,8 @@ func ClusterDisks(client prometheus.Client, provider cloud.Provider, duration, o
 	resLocalStorageUsedCost, _ := resChLocalStorageUsedCost.Await()
 	resLocalStorageBytes, _ := resChLocalStorageBytes.Await()
 	resLocalActiveMins, _ := resChLocalActiveMins.Await()
-	if ctx.ErrorCollector.IsError() {
-		return nil, ctx.Errors()
+	if ctx.HasErrors() {
+		return nil, ctx.ErrorCollection()
 	}
 
 	diskMap := map[string]*Disk{}
@@ -399,7 +399,7 @@ var partialCPUMap = map[string]float64{
 	"e2-medium": 1.0,
 }
 
-func ClusterNodes(cp cloud.Provider, client prometheus.Client, duration, offset time.Duration) (map[string]*Node, []error) {
+func ClusterNodes(cp cloud.Provider, client prometheus.Client, duration, offset time.Duration) (map[string]*Node, error) {
 	durationStr := fmt.Sprintf("%dm", int64(duration.Minutes()))
 	offsetStr := fmt.Sprintf(" offset %dm", int64(offset.Minutes()))
 	if offset < time.Minute {
@@ -456,16 +456,17 @@ func ClusterNodes(cp cloud.Provider, client prometheus.Client, duration, offset
 	resNodeRAMUserPct, _ := resChNodeRAMUserPct.Await()
 	resActiveMins, _ := resChActiveMins.Await()
 
-	if optionalCtx.ErrorCollector.IsError() {
+	if optionalCtx.HasErrors() {
 		for _, err := range optionalCtx.Errors() {
 			log.Warningf("ClusterNodes: %s", err)
 		}
 	}
-	if requiredCtx.ErrorCollector.IsError() {
+	if requiredCtx.HasErrors() {
 		for _, err := range requiredCtx.Errors() {
 			log.Errorf("ClusterNodes: %s", err)
 		}
-		return nil, requiredCtx.Errors()
+
+		return nil, requiredCtx.ErrorCollection()
 	}
 
 	nodeMap := map[string]*Node{}
@@ -808,17 +809,17 @@ func ClusterNodes(cp cloud.Provider, client prometheus.Client, duration, offset
 
 	c, err := cp.GetConfig()
 	if err != nil {
-		return nil, []error{err}
+		return nil, err
 	}
 
 	discount, err := ParsePercentString(c.Discount)
 	if err != nil {
-		return nil, []error{err}
+		return nil, err
 	}
 
 	negotiatedDiscount, err := ParsePercentString(c.NegotiatedDiscount)
 	if err != nil {
-		return nil, []error{err}
+		return nil, err
 	}
 
 	for _, node := range nodeMap {
@@ -842,7 +843,7 @@ type LoadBalancer struct {
 	Minutes    float64
 }
 
-func ClusterLoadBalancers(cp cloud.Provider, client prometheus.Client, duration, offset time.Duration) (map[string]*LoadBalancer, []error) {
+func ClusterLoadBalancers(cp cloud.Provider, client prometheus.Client, duration, offset time.Duration) (map[string]*LoadBalancer, error) {
 	durationStr := fmt.Sprintf("%dm", int64(duration.Minutes()))
 	offsetStr := fmt.Sprintf(" offset %dm", int64(offset.Minutes()))
 	if offset < time.Minute {
@@ -869,8 +870,8 @@ func ClusterLoadBalancers(cp cloud.Provider, client prometheus.Client, duration,
 	resLBCost, _ := resChLBCost.Await()
 	resActiveMins, _ := resChActiveMins.Await()
 
-	if ctx.ErrorCollector.IsError() {
-		return nil, ctx.Errors()
+	if ctx.HasErrors() {
+		return nil, ctx.ErrorCollection()
 	}
 
 	loadBalancerMap := map[string]*LoadBalancer{}
@@ -938,7 +939,7 @@ func ClusterLoadBalancers(cp cloud.Provider, client prometheus.Client, duration,
 }
 
 // ComputeClusterCosts gives the cumulative and monthly-rate cluster costs over a window of time for all clusters.
-func ComputeClusterCosts(client prometheus.Client, provider cloud.Provider, window, offset string, withBreakdown bool) (map[string]*ClusterCosts, error) {
+func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider cloud.Provider, window, offset string, withBreakdown bool) (map[string]*ClusterCosts, error) {
 	// Compute number of minutes in the full interval, for use interpolating missed scrapes or scaling missing data
 	start, end, err := util.ParseTimeRange(window, offset)
 	if err != nil {
@@ -1072,7 +1073,7 @@ func ComputeClusterCosts(client prometheus.Client, provider cloud.Provider, wind
 	resTotalRAM, _ := resChs[3].Await()
 	resTotalStorage, _ := resChs[4].Await()
 	if ctx.HasErrors() {
-		return nil, ctx.Errors()[0]
+		return nil, ctx.ErrorCollection()
 	}
 
 	defaultClusterID := env.GetClusterID()
@@ -1094,7 +1095,7 @@ func ComputeClusterCosts(client prometheus.Client, provider cloud.Provider, wind
 
 	// Determine combined discount
 	discount, customDiscount := 0.0, 0.0
-	c, err := A.Cloud.GetConfig()
+	c, err := a.CloudProvider.GetConfig()
 	if err == nil {
 		discount, err = ParsePercentString(c.Discount)
 		if err != nil {
@@ -1148,7 +1149,7 @@ func ComputeClusterCosts(client prometheus.Client, provider cloud.Provider, wind
 		resRAMSystemPct, _ := resChs[7].Await()
 		resRAMUserPct, _ := resChs[8].Await()
 		if ctx.HasErrors() {
-			return nil, ctx.Errors()[0]
+			return nil, ctx.ErrorCollection()
 		}
 
 		for _, result := range resCPUModePct {
@@ -1224,11 +1225,11 @@ func ComputeClusterCosts(client prometheus.Client, provider cloud.Provider, wind
 		}
 	}
 
-	if ctx.ErrorCollector.IsError() {
+	if ctx.HasErrors() {
 		for _, err := range ctx.Errors() {
 			log.Errorf("ComputeClusterCosts: %s", err)
 		}
-		return nil, ctx.Errors()[0]
+		return nil, ctx.ErrorCollection()
 	}
 
 	// Convert intermediate structure to Costs instances
@@ -1374,7 +1375,10 @@ func ClusterCostsOverTime(cli prometheus.Client, provider cloud.Provider, startS
 		// If that fails, return an error because something is actually wrong.
 		qNodes := fmt.Sprintf(queryNodes, localStorageQuery)
 
-		resultNodes, err := ctx.QueryRangeSync(qNodes, start, end, window)
+		resultNodes, warnings, err := ctx.QueryRangeSync(qNodes, start, end, window)
+		for _, warning := range warnings {
+			log.Warningf(warning)
+		}
 		if err != nil {
 			return nil, err
 		}

+ 2 - 2
pkg/costmodel/clusters/clustermap.go

@@ -120,7 +120,7 @@ func (pcm *PrometheusClusterMap) loadClusters() (map[string]*ClusterInfo, error)
 	}
 
 	// Execute Query
-	tryQuery := func() ([]*prom.QueryResult, error) {
+	tryQuery := func() ([]*prom.QueryResult, prometheus.Warnings, error) {
 		ctx := prom.NewContext(pcm.client)
 		return ctx.QuerySync(clusterInfoQuery(offset))
 	}
@@ -131,7 +131,7 @@ func (pcm *PrometheusClusterMap) loadClusters() (map[string]*ClusterInfo, error)
 	// Retry on failure
 	delay := LoadRetryDelay
 	for r := LoadRetries; r > 0; r-- {
-		qr, err = tryQuery()
+		qr, _, err = tryQuery()
 
 		// non-error breaks out of loop
 		if err == nil {

+ 123 - 145
pkg/costmodel/costmodel.go

@@ -12,6 +12,7 @@ import (
 	"github.com/kubecost/cost-model/pkg/clustercache"
 	"github.com/kubecost/cost-model/pkg/costmodel/clusters"
 	"github.com/kubecost/cost-model/pkg/env"
+	"github.com/kubecost/cost-model/pkg/kubecost"
 	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/kubecost/cost-model/pkg/prom"
 	"github.com/kubecost/cost-model/pkg/util"
@@ -19,10 +20,8 @@ import (
 	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
-	"k8s.io/client-go/kubernetes"
 	"k8s.io/klog"
 
-	"github.com/google/uuid"
 	"golang.org/x/sync/singleflight"
 )
 
@@ -132,8 +131,8 @@ const (
 		label_replace(
 			label_replace(
 				avg(
-					count_over_time(kube_pod_container_resource_requests_memory_bytes{container!="",container!="POD", node!=""}[%s] %s) 
-					*  
+					count_over_time(kube_pod_container_resource_requests_memory_bytes{container!="",container!="POD", node!=""}[%s] %s)
+					*
 					avg_over_time(kube_pod_container_resource_requests_memory_bytes{container!="",container!="POD", node!=""}[%s] %s)
 				) by (namespace,container,pod,node,cluster_id) , "container_name","$1","container","(.+)"
 			), "pod_name","$1","pod","(.+)"
@@ -141,25 +140,25 @@ const (
 	) by (namespace,container_name,pod_name,node,cluster_id)`
 	queryRAMUsageStr = `sort_desc(
 		avg(
-			label_replace(count_over_time(container_memory_working_set_bytes{container_name!="",container_name!="POD", instance!=""}[%s] %s), "node", "$1", "instance","(.+)") 
-			* 
-			label_replace(avg_over_time(container_memory_working_set_bytes{container_name!="",container_name!="POD", instance!=""}[%s] %s), "node", "$1", "instance","(.+)") 
+			label_replace(count_over_time(container_memory_working_set_bytes{container_name!="",container_name!="POD", instance!=""}[%s] %s), "node", "$1", "instance","(.+)")
+			*
+			label_replace(avg_over_time(container_memory_working_set_bytes{container_name!="",container_name!="POD", instance!=""}[%s] %s), "node", "$1", "instance","(.+)")
 		) by (namespace,container_name,pod_name,node,cluster_id)
 	)`
 	queryCPURequestsStr = `avg(
 		label_replace(
 			label_replace(
 				avg(
-					count_over_time(kube_pod_container_resource_requests_cpu_cores{container!="",container!="POD", node!=""}[%s] %s) 
-					*  
+					count_over_time(kube_pod_container_resource_requests_cpu_cores{container!="",container!="POD", node!=""}[%s] %s)
+					*
 					avg_over_time(kube_pod_container_resource_requests_cpu_cores{container!="",container!="POD", node!=""}[%s] %s)
 				) by (namespace,container,pod,node,cluster_id) , "container_name","$1","container","(.+)"
 			), "pod_name","$1","pod","(.+)"
-		) 
+		)
 	) by (namespace,container_name,pod_name,node,cluster_id)`
 	queryCPUUsageStr = `avg(
 		label_replace(
-		rate( 
+		rate(
 			container_cpu_usage_seconds_total{container_name!="",container_name!="POD",instance!=""}[%s] %s
 		) , "node", "$1", "instance", "(.+)"
 		)
@@ -168,18 +167,18 @@ const (
 		label_replace(
 			label_replace(
 				avg(
-					count_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!=""}[%s] %s) 
-					*  
+					count_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!=""}[%s] %s)
+					*
 					avg_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!=""}[%s] %s)
 					* %f
 				) by (namespace,container,pod,node,cluster_id) , "container_name","$1","container","(.+)"
 			), "pod_name","$1","pod","(.+)"
-		) 
-	) by (namespace,container_name,pod_name,node,cluster_id) 
+		)
+	) by (namespace,container_name,pod_name,node,cluster_id)
 	* on (pod_name, namespace, cluster_id) group_left(container) label_replace(avg(avg_over_time(kube_pod_status_phase{phase="Running"}[%s] %s)) by (pod,namespace,cluster_id), "pod_name","$1","pod","(.+)")`
-	queryPVRequestsStr = `avg(avg(kube_persistentvolumeclaim_info) by (persistentvolumeclaim, storageclass, namespace, volumename, cluster_id) 
-	* 
-	on (persistentvolumeclaim, namespace, cluster_id) group_right(storageclass, volumename) 
+	queryPVRequestsStr = `avg(avg(kube_persistentvolumeclaim_info) by (persistentvolumeclaim, storageclass, namespace, volumename, cluster_id)
+	*
+	on (persistentvolumeclaim, namespace, cluster_id) group_right(storageclass, volumename)
 	sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace, cluster_id, kubernetes_name)) by (persistentvolumeclaim, storageclass, namespace, volumename, cluster_id)`
 	// queryRAMAllocationByteHours yields the total byte-hour RAM allocation over the given
 	// window, aggregated by container.
@@ -222,7 +221,7 @@ const (
 	normalizationStr          = `max(count_over_time(kube_pod_container_resource_requests_memory_bytes{}[%s] %s))`
 )
 
-func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, clientset kubernetes.Interface, cp costAnalyzerCloud.Provider, window string, offset string, filterNamespace string) (map[string]*CostData, error) {
+func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window string, offset string, filterNamespace string) (map[string]*CostData, error) {
 	queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, window, offset, window, offset)
 	queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, window, offset, window, offset)
 	queryCPURequests := fmt.Sprintf(queryCPURequestsStr, window, offset, window, offset)
@@ -280,13 +279,22 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, clientset kube
 	resNetInternetRequests, _ := resChNetInternetRequests.Await()
 	resNormalization, _ := resChNormalization.Await()
 
+	// NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
+	// NOTE: will not propagate unless coupled with errors.
 	if ctx.HasErrors() {
+		// To keep the context of where the errors are occurring, we log the errors here and pass them the error
+		// back to the caller. The caller should handle the specific case where error is an ErrorCollection
 		for _, promErr := range ctx.Errors() {
-			log.Errorf("ComputeCostData: Prometheus error: %s", promErr.Error())
+			if promErr.Error != nil {
+				log.Errorf("ComputeCostData: Request Error: %s", promErr.Error)
+			}
+			if promErr.ParseError != nil {
+				log.Errorf("ComputeCostData: Parsing Error: %s", promErr.ParseError)
+			}
 		}
 
-		// TODO: Categorize fatal prometheus query failures
-		// return nil, fmt.Errorf("Error querying prometheus: %s", promErr.Error())
+		// ErrorCollection is an collection of errors wrapped in a single error implementation
+		return nil, ctx.ErrorCollection()
 	}
 
 	defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
@@ -671,10 +679,11 @@ func findDeletedPodInfo(cli prometheusClient.Client, missingContainers map[strin
 	if len(missingContainers) > 0 {
 		queryHistoricalPodLabels := fmt.Sprintf(`kube_pod_labels{}[%s]`, window)
 
-		podLabelsResult, err := prom.NewContext(cli).QuerySync(queryHistoricalPodLabels)
+		podLabelsResult, _, err := prom.NewContext(cli).QuerySync(queryHistoricalPodLabels)
 		if err != nil {
 			log.Errorf("failed to parse historical pod labels: %s", err.Error())
 		}
+
 		podLabels := make(map[string]map[string]string)
 		if podLabelsResult != nil {
 			podLabels, err = parsePodLabels(podLabelsResult)
@@ -720,7 +729,7 @@ func findDeletedNodeInfo(cli prometheusClient.Client, missingNodes map[string]*c
 		ramCostRes, _ := ramCostResCh.Await()
 		gpuCostRes, _ := gpuCostResCh.Await()
 		if ctx.HasErrors() {
-			return ctx.Errors()[0]
+			return ctx.ErrorCollection()
 		}
 
 		cpuCosts, err := getCost(cpuCostRes)
@@ -1418,52 +1427,43 @@ func floorMultiple(value int64, multiple int64) int64 {
 
 // Attempt to create a key for the request. Reduce the times to minutes in order to more easily group requests based on
 // real time ranges. If for any reason, the key generation fails, return a uuid to ensure uniqueness.
-func requestKeyFor(startString string, endString string, windowString string, filterNamespace string, filterCluster string, remoteEnabled bool) string {
-	fullLayout := "2006-01-02T15:04:05.000Z"
+func requestKeyFor(window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) string {
 	keyLayout := "2006-01-02T15:04Z"
 
-	sTime, err := time.Parse(fullLayout, startString)
-	if err != nil {
-		klog.V(1).Infof("[Warning] Start=%s failed to parse when generating request key: %s", startString, err.Error())
-		return uuid.New().String()
-	}
-	eTime, err := time.Parse(fullLayout, endString)
-	if err != nil {
-		klog.V(1).Infof("[Warning] End=%s failed to parse when generating request key: %s", endString, err.Error())
-		return uuid.New().String()
-	}
-
 	// We "snap" start time and duration to their closest 5 min multiple less than itself, by
 	// applying a snapped duration to a snapped start time.
-	durMins := int64(eTime.Sub(sTime).Minutes())
+	durMins := int64(window.Minutes())
 	durMins = floorMultiple(durMins, 5)
 
-	sMins := int64(sTime.Minute())
+	sMins := int64(window.Start().Minute())
 	sOffset := sMins - floorMultiple(sMins, 5)
 
-	sTime = sTime.Add(-time.Duration(sOffset) * time.Minute)
-	eTime = sTime.Add(time.Duration(durMins) * time.Minute)
+	sTime := window.Start().Add(-time.Duration(sOffset) * time.Minute)
+	eTime := window.Start().Add(time.Duration(durMins) * time.Minute)
 
 	startKey := sTime.Format(keyLayout)
 	endKey := eTime.Format(keyLayout)
 
-	return fmt.Sprintf("%s,%s,%s,%s,%s,%t", startKey, endKey, windowString, filterNamespace, filterCluster, remoteEnabled)
+	return fmt.Sprintf("%s,%s,%s,%s,%s,%t", startKey, endKey, resolution.String(), filterNamespace, filterCluster, remoteEnabled)
 }
 
+// func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider,
+// 	startString, endString, windowString string, resolutionHours float64, filterNamespace string,
+// 	filterCluster string, remoteEnabled bool, offset string) (map[string]*CostData, error)
+
 // ComputeCostDataRange executes a range query for cost data.
 // Note that "offset" represents the time between the function call and "endString", and is also passed for convenience
-func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, clientset kubernetes.Interface, cp costAnalyzerCloud.Provider,
-	startString, endString, windowString string, resolutionHours float64, filterNamespace string, filterCluster string, remoteEnabled bool, offset string) (map[string]*CostData, error) {
+func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
 	// Create a request key for request grouping. This key will be used to represent the cost-model result
 	// for the specific inputs to prevent multiple queries for identical data.
-	key := requestKeyFor(startString, endString, windowString, filterNamespace, filterCluster, remoteEnabled)
+	key := requestKeyFor(window, resolution, filterNamespace, filterCluster, remoteEnabled)
 
 	klog.V(4).Infof("ComputeCostDataRange with Key: %s", key)
 
 	// If there is already a request out that uses the same data, wait for it to return to share the results.
 	// Otherwise, start executing.
 	result, err, _ := cm.RequestGroup.Do(key, func() (interface{}, error) {
-		return cm.costDataRange(cli, clientset, cp, startString, endString, windowString, resolutionHours, filterNamespace, filterCluster, remoteEnabled, offset)
+		return cm.costDataRange(cli, cp, window, resolution, filterNamespace, filterCluster, remoteEnabled)
 	})
 
 	data, ok := result.(map[string]*CostData)
@@ -1474,82 +1474,84 @@ func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, clientset
 	return data, err
 }
 
-func (cm *CostModel) costDataRange(cli prometheusClient.Client, clientset kubernetes.Interface, cp costAnalyzerCloud.Provider, startString, endString, windowString string, resolutionHours float64, filterNamespace string, filterCluster string, remoteEnabled bool, offset string) (map[string]*CostData, error) {
-	layout := "2006-01-02T15:04:05.000Z"
+func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
+	clusterID := env.GetClusterID()
 
-	start, err := time.Parse(layout, startString)
-	if err != nil {
-		klog.V(1).Infof("Error parsing time " + startString + ". Error: " + err.Error())
-		return nil, err
-	}
+	// durHrs := end.Sub(start).Hours() + 1
 
-	end, err := time.Parse(layout, endString)
-	if err != nil {
-		klog.V(1).Infof("Error parsing time " + endString + ". Error: " + err.Error())
-		return nil, err
+	if window.IsOpen() {
+		return nil, fmt.Errorf("illegal window: %s", window)
 	}
+	start := *window.Start()
+	end := *window.End()
 
-	window, err := time.ParseDuration(windowString)
-	if err != nil {
-		klog.V(1).Infof("Error parsing time " + windowString + ". Error: " + err.Error())
-		return nil, err
+	// Snap resolution to the nearest minute
+	resMins := int64(math.Trunc(resolution.Minutes()))
+	if resMins == 0 {
+		return nil, fmt.Errorf("resolution must be greater than 0.0")
 	}
+	resolution = time.Duration(resMins) * time.Minute
 
-	clusterID := env.GetClusterID()
+	// Warn if resolution does not evenly divide window
+	if int64(window.Minutes())%int64(resolution.Minutes()) != 0 {
+		log.Warningf("CostDataRange: window should be divisible by resolution or else samples may be missed: %s %% %s = %dm", window, resolution, int64(window.Minutes())%int64(resolution.Minutes()))
+	}
 
-	durHrs := end.Sub(start).Hours() + 1
+	// Convert to Prometheus-style duration string in terms of m or h
+	resStr := fmt.Sprintf("%dm", resMins)
+	if resMins%60 == 0 {
+		resStr = fmt.Sprintf("%dh", resMins/60)
+	}
 
-	if remoteEnabled == true {
+	if remoteEnabled {
 		remoteLayout := "2006-01-02T15:04:05Z"
-		remoteStartStr := start.Format(remoteLayout)
-		remoteEndStr := end.Format(remoteLayout)
-		klog.V(1).Infof("Using remote database for query from %s to %s with window %s", startString, endString, windowString)
-		return CostDataRangeFromSQL("", "", windowString, remoteStartStr, remoteEndStr)
+		remoteStartStr := window.Start().Format(remoteLayout)
+		remoteEndStr := window.End().Format(remoteLayout)
+		klog.V(1).Infof("Using remote database for query from %s to %s with window %s", remoteStartStr, remoteEndStr, resolution)
+		return CostDataRangeFromSQL("", "", resolution.String(), remoteStartStr, remoteEndStr)
 	}
 
 	scrapeIntervalSeconds := cm.ScrapeInterval.Seconds()
 
 	ctx := prom.NewContext(cli)
 
-	queryRAMAlloc := fmt.Sprintf(queryRAMAllocationByteHours, windowString, scrapeIntervalSeconds)
-	queryCPUAlloc := fmt.Sprintf(queryCPUAllocationVCPUHours, windowString, scrapeIntervalSeconds)
-	queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, windowString, "", windowString, "")
-	queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, windowString, "", windowString, "")
-	queryCPURequests := fmt.Sprintf(queryCPURequestsStr, windowString, "", windowString, "")
-	queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, windowString, "")
-	queryGPURequests := fmt.Sprintf(queryGPURequestsStr, windowString, "", windowString, "", resolutionHours, windowString, "")
+	queryRAMAlloc := fmt.Sprintf(queryRAMAllocationByteHours, resStr, scrapeIntervalSeconds)
+	queryCPUAlloc := fmt.Sprintf(queryCPUAllocationVCPUHours, resStr, scrapeIntervalSeconds)
+	queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, resStr, "", resStr, "")
+	queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, resStr, "", resStr, "")
+	queryCPURequests := fmt.Sprintf(queryCPURequestsStr, resStr, "", resStr, "")
+	queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, resStr, "")
+	queryGPURequests := fmt.Sprintf(queryGPURequestsStr, resStr, "", resStr, "", resolution.Hours(), resStr, "")
 	queryPVRequests := fmt.Sprintf(queryPVRequestsStr)
-	queryPVCAllocation := fmt.Sprintf(queryPVCAllocationFmt, windowString, scrapeIntervalSeconds)
-	queryPVHourlyCost := fmt.Sprintf(queryPVHourlyCostFmt, windowString)
-	queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, windowString, "")
-	queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, windowString, "")
-	queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, windowString, "")
-	queryNormalization := fmt.Sprintf(normalizationStr, windowString, "")
-
-	queryProfileStart := time.Now()
+	queryPVCAllocation := fmt.Sprintf(queryPVCAllocationFmt, resStr, scrapeIntervalSeconds)
+	queryPVHourlyCost := fmt.Sprintf(queryPVHourlyCostFmt, resStr)
+	queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, resStr, "")
+	queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, resStr, "")
+	queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, resStr, "")
+	queryNormalization := fmt.Sprintf(normalizationStr, resStr, "")
 
 	// Submit all queries for concurrent evaluation
-	resChRAMRequests := ctx.QueryRange(queryRAMRequests, start, end, window)
-	resChRAMUsage := ctx.QueryRange(queryRAMUsage, start, end, window)
-	resChRAMAlloc := ctx.QueryRange(queryRAMAlloc, start, end, window)
-	resChCPURequests := ctx.QueryRange(queryCPURequests, start, end, window)
-	resChCPUUsage := ctx.QueryRange(queryCPUUsage, start, end, window)
-	resChCPUAlloc := ctx.QueryRange(queryCPUAlloc, start, end, window)
-	resChGPURequests := ctx.QueryRange(queryGPURequests, start, end, window)
-	resChPVRequests := ctx.QueryRange(queryPVRequests, start, end, window)
-	resChPVCAlloc := ctx.QueryRange(queryPVCAllocation, start, end, window)
-	resChPVHourlyCost := ctx.QueryRange(queryPVHourlyCost, start, end, window)
-	resChNetZoneRequests := ctx.QueryRange(queryNetZoneRequests, start, end, window)
-	resChNetRegionRequests := ctx.QueryRange(queryNetRegionRequests, start, end, window)
-	resChNetInternetRequests := ctx.QueryRange(queryNetInternetRequests, start, end, window)
-	resChNSLabels := ctx.QueryRange(fmt.Sprintf(queryNSLabels, windowString), start, end, window)
-	resChPodLabels := ctx.QueryRange(fmt.Sprintf(queryPodLabels, windowString), start, end, window)
-	resChServiceLabels := ctx.QueryRange(fmt.Sprintf(queryServiceLabels, windowString), start, end, window)
-	resChDeploymentLabels := ctx.QueryRange(fmt.Sprintf(queryDeploymentLabels, windowString), start, end, window)
-	resChStatefulsetLabels := ctx.QueryRange(fmt.Sprintf(queryStatefulsetLabels, windowString), start, end, window)
-	resChJobs := ctx.QueryRange(queryPodJobs, start, end, window)
-	resChDaemonsets := ctx.QueryRange(queryPodDaemonsets, start, end, window)
-	resChNormalization := ctx.QueryRange(queryNormalization, start, end, window)
+	resChRAMRequests := ctx.QueryRange(queryRAMRequests, start, end, resolution)
+	resChRAMUsage := ctx.QueryRange(queryRAMUsage, start, end, resolution)
+	resChRAMAlloc := ctx.QueryRange(queryRAMAlloc, start, end, resolution)
+	resChCPURequests := ctx.QueryRange(queryCPURequests, start, end, resolution)
+	resChCPUUsage := ctx.QueryRange(queryCPUUsage, start, end, resolution)
+	resChCPUAlloc := ctx.QueryRange(queryCPUAlloc, start, end, resolution)
+	resChGPURequests := ctx.QueryRange(queryGPURequests, start, end, resolution)
+	resChPVRequests := ctx.QueryRange(queryPVRequests, start, end, resolution)
+	resChPVCAlloc := ctx.QueryRange(queryPVCAllocation, start, end, resolution)
+	resChPVHourlyCost := ctx.QueryRange(queryPVHourlyCost, start, end, resolution)
+	resChNetZoneRequests := ctx.QueryRange(queryNetZoneRequests, start, end, resolution)
+	resChNetRegionRequests := ctx.QueryRange(queryNetRegionRequests, start, end, resolution)
+	resChNetInternetRequests := ctx.QueryRange(queryNetInternetRequests, start, end, resolution)
+	resChNSLabels := ctx.QueryRange(fmt.Sprintf(queryNSLabels, resStr), start, end, resolution)
+	resChPodLabels := ctx.QueryRange(fmt.Sprintf(queryPodLabels, resStr), start, end, resolution)
+	resChServiceLabels := ctx.QueryRange(fmt.Sprintf(queryServiceLabels, resStr), start, end, resolution)
+	resChDeploymentLabels := ctx.QueryRange(fmt.Sprintf(queryDeploymentLabels, resStr), start, end, resolution)
+	resChStatefulsetLabels := ctx.QueryRange(fmt.Sprintf(queryStatefulsetLabels, resStr), start, end, resolution)
+	resChJobs := ctx.QueryRange(queryPodJobs, start, end, resolution)
+	resChDaemonsets := ctx.QueryRange(queryPodDaemonsets, start, end, resolution)
+	resChNormalization := ctx.QueryRange(queryNormalization, start, end, resolution)
 
 	// Pull k8s pod, controller, service, and namespace details
 	podlist := cm.Cache.GetAllPods()
@@ -1597,30 +1599,30 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, clientset kubern
 	resJobs, _ := resChJobs.Await()
 	resNormalization, _ := resChNormalization.Await()
 
-	measureTime(queryProfileStart, profileThreshold, fmt.Sprintf("costDataRange(%fh): Prom/k8s Queries", durHrs))
-	defer measureTime(time.Now(), profileThreshold, fmt.Sprintf("costDataRange(%fh): Processing Query Data", durHrs))
-
+	// NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
+	// NOTE: will not propagate unless coupled with errors.
 	if ctx.HasErrors() {
+		// To keep the context of where the errors are occurring, we log the errors here and pass them the error
+		// back to the caller. The caller should handle the specific case where error is an ErrorCollection
 		for _, promErr := range ctx.Errors() {
-			log.Errorf("CostDataRange: Prometheus error: %s", promErr.Error())
+			if promErr.Error != nil {
+				log.Errorf("CostDataRange: Request Error: %s", promErr.Error)
+			}
+			if promErr.ParseError != nil {
+				log.Errorf("CostDataRange: Parsing Error: %s", promErr.ParseError)
+			}
 		}
 
-		// TODO: Categorize fatal prometheus query failures
-		// return nil, fmt.Errorf("Error querying prometheus: %s", promErr.Error())
+		// ErrorCollection is an collection of errors wrapped in a single error implementation
+		return nil, ctx.ErrorCollection()
 	}
 
-	profileStart := time.Now()
-
 	normalizationValue, err := getNormalizations(resNormalization)
 	if err != nil {
-		msg := fmt.Sprintf("error computing normalization for start=%s, end=%s, window=%s, res=%f", start, end, window, resolutionHours*60*60)
+		msg := fmt.Sprintf("error computing normalization for start=%s, end=%s, res=%s", start, end, resolution)
 		return nil, prom.WrapError(err, msg)
 	}
 
-	measureTime(profileStart, profileThreshold, fmt.Sprintf("costDataRange(%fh): compute normalizations", durHrs))
-
-	profileStart = time.Now()
-
 	pvClaimMapping, err := GetPVInfo(resPVRequests, clusterID)
 	if err != nil {
 		// Just log for compatibility with KSM less than 1.6
@@ -1650,10 +1652,6 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, clientset kubern
 		}
 	}
 
-	measureTime(profileStart, profileThreshold, fmt.Sprintf("costDataRange(%fh): process PV data", durHrs))
-
-	profileStart = time.Now()
-
 	nsLabels, err := GetNamespaceLabelsMetrics(resNSLabels, clusterID)
 	if err != nil {
 		klog.V(1).Infof("Unable to get Namespace Labels for Metrics: %s", err.Error())
@@ -1682,10 +1680,6 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, clientset kubern
 		klog.V(1).Infof("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
 	}
 
-	measureTime(profileStart, profileThreshold, fmt.Sprintf("costDataRange(%fh): process labels", durHrs))
-
-	profileStart = time.Now()
-
 	podStatefulsetMetricsMapping, err := getPodDeploymentsWithMetrics(statefulsetLabels, podLabels)
 	if err != nil {
 		klog.V(1).Infof("Unable to get match Statefulset Labels Metrics to Pods: %s", err.Error())
@@ -1720,10 +1714,6 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, clientset kubern
 		networkUsageMap = make(map[string]*NetworkUsageData)
 	}
 
-	measureTime(profileStart, profileThreshold, fmt.Sprintf("costDataRange(%fh): process deployments, services, and network usage", durHrs))
-
-	profileStart = time.Now()
-
 	containerNameCost := make(map[string]*CostData)
 	containers := make(map[string]bool)
 	otherClusterPVRecorded := make(map[string]bool)
@@ -1786,20 +1776,12 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, clientset kubern
 		containers[key] = true
 	}
 
-	measureTime(profileStart, profileThreshold, fmt.Sprintf("costDataRange(%fh): GetContainerMetricVectors", durHrs))
-
-	profileStart = time.Now()
-
 	// Request metrics can show up after pod eviction and completion.
 	// This method synchronizes requests to allocations such that when
 	// allocation is 0, so are requests
 	applyAllocationToRequests(RAMAllocMap, RAMReqMap)
 	applyAllocationToRequests(CPUAllocMap, CPUReqMap)
 
-	measureTime(profileStart, profileThreshold, fmt.Sprintf("costDataRange(%fh): applyAllocationToRequests", durHrs))
-
-	profileStart = time.Now()
-
 	missingNodes := make(map[string]*costAnalyzerCloud.Node)
 	missingContainers := make(map[string]*CostData)
 	for key := range containers {
@@ -1975,8 +1957,6 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, clientset kubern
 		}
 	}
 
-	measureTime(profileStart, profileThreshold, fmt.Sprintf("costDataRange(%fh): build CostData map", durHrs))
-
 	unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping)
 	for k, costs := range unmounted {
 		klog.V(4).Infof("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
@@ -1986,11 +1966,9 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, clientset kubern
 		}
 	}
 
-	w := end.Sub(start)
-	w += window
-	if w.Minutes() > 0 {
-		wStr := fmt.Sprintf("%dm", int(w.Minutes()))
-		err = findDeletedNodeInfo(cli, missingNodes, wStr, offset)
+	if window.Minutes() > 0 {
+		dur, off := window.ToDurationOffset()
+		err = findDeletedNodeInfo(cli, missingNodes, dur, off)
 		if err != nil {
 			klog.V(1).Infof("Error fetching historical node data: %s", err.Error())
 		}
@@ -2077,7 +2055,7 @@ func getNamespaceLabels(cache clustercache.ClusterCache, clusterID string) (map[
 	for _, ns := range nss {
 		labels := make(map[string]string)
 		for k, v := range ns.Labels {
-			labels[SanitizeLabelName(k)] = v
+			labels[prom.SanitizeLabelName(k)] = v
 		}
 		nsToLabels[ns.Name+","+clusterID] = labels
 	}

+ 446 - 109
pkg/costmodel/metrics.go

@@ -2,37 +2,33 @@ package costmodel
 
 import (
 	"math"
-	"regexp"
-	"sort"
 	"strconv"
 	"strings"
 	"sync"
 	"time"
 
-	costAnalyzerCloud "github.com/kubecost/cost-model/pkg/cloud"
+	"github.com/kubecost/cost-model/pkg/cloud"
+	"github.com/kubecost/cost-model/pkg/clustercache"
 	"github.com/kubecost/cost-model/pkg/errors"
 	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/kubecost/cost-model/pkg/prom"
+
+	promclient "github.com/prometheus/client_golang/api"
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"
 	v1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes"
 
 	"k8s.io/klog"
 )
 
-var (
-	invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
-)
-
 //--------------------------------------------------------------------------
 //  StatefulsetCollector
 //--------------------------------------------------------------------------
 
 // StatefulsetCollector is a prometheus collector that generates StatefulsetMetrics
 type StatefulsetCollector struct {
-	KubeClientSet kubernetes.Interface
+	KubeClusterCache clustercache.ClusterCache
 }
 
 // Describe sends the super-set of all possible descriptors of metrics
@@ -43,9 +39,9 @@ func (sc StatefulsetCollector) Describe(ch chan<- *prometheus.Desc) {
 
 // Collect is called by the Prometheus registry when collecting metrics.
 func (sc StatefulsetCollector) Collect(ch chan<- prometheus.Metric) {
-	ds, _ := sc.KubeClientSet.AppsV1().StatefulSets("").List(metav1.ListOptions{})
-	for _, statefulset := range ds.Items {
-		labels, values := kubeLabelsToPrometheusLabels(statefulset.Spec.Selector.MatchLabels)
+	ds := sc.KubeClusterCache.GetAllStatefulSets()
+	for _, statefulset := range ds {
+		labels, values := prom.KubeLabelsToLabels(statefulset.Spec.Selector.MatchLabels)
 		m := newStatefulsetMetric(statefulset.GetName(), statefulset.GetNamespace(), "statefulSet_match_labels", labels, values)
 		ch <- m
 	}
@@ -118,7 +114,7 @@ func (s StatefulsetMetric) Write(m *dto.Metric) error {
 
 // DeploymentCollector is a prometheus collector that generates DeploymentMetrics
 type DeploymentCollector struct {
-	KubeClientSet kubernetes.Interface
+	KubeClusterCache clustercache.ClusterCache
 }
 
 // Describe sends the super-set of all possible descriptors of metrics
@@ -129,9 +125,9 @@ func (sc DeploymentCollector) Describe(ch chan<- *prometheus.Desc) {
 
 // Collect is called by the Prometheus registry when collecting metrics.
 func (sc DeploymentCollector) Collect(ch chan<- prometheus.Metric) {
-	ds, _ := sc.KubeClientSet.AppsV1().Deployments("").List(metav1.ListOptions{})
-	for _, deployment := range ds.Items {
-		labels, values := kubeLabelsToPrometheusLabels(deployment.Spec.Selector.MatchLabels)
+	ds := sc.KubeClusterCache.GetAllDeployments()
+	for _, deployment := range ds {
+		labels, values := prom.KubeLabelsToLabels(deployment.Spec.Selector.MatchLabels)
 		m := newDeploymentMetric(deployment.GetName(), deployment.GetNamespace(), "deployment_match_labels", labels, values)
 		ch <- m
 	}
@@ -204,7 +200,7 @@ func (s DeploymentMetric) Write(m *dto.Metric) error {
 
 // ServiceCollector is a prometheus collector that generates ServiceMetrics
 type ServiceCollector struct {
-	KubeClientSet kubernetes.Interface
+	KubeClusterCache clustercache.ClusterCache
 }
 
 // Describe sends the super-set of all possible descriptors of metrics
@@ -215,9 +211,9 @@ func (sc ServiceCollector) Describe(ch chan<- *prometheus.Desc) {
 
 // Collect is called by the Prometheus registry when collecting metrics.
 func (sc ServiceCollector) Collect(ch chan<- prometheus.Metric) {
-	svcs, _ := sc.KubeClientSet.CoreV1().Services("").List(metav1.ListOptions{})
-	for _, svc := range svcs.Items {
-		labels, values := kubeLabelsToPrometheusLabels(svc.Spec.Selector)
+	svcs := sc.KubeClusterCache.GetAllServices()
+	for _, svc := range svcs {
+		labels, values := prom.KubeLabelsToLabels(svc.Spec.Selector)
 		m := newServiceMetric(svc.GetName(), svc.GetNamespace(), "service_selector_labels", labels, values)
 		ch <- m
 	}
@@ -284,13 +280,179 @@ func (s ServiceMetric) Write(m *dto.Metric) error {
 	return nil
 }
 
+//--------------------------------------------------------------------------
+//  NamespaceAnnotationCollector
+//--------------------------------------------------------------------------
+
+// NamespaceAnnotationCollector is a prometheus collector that generates NamespaceAnnotationMetrics
+type NamespaceAnnotationCollector struct {
+	KubeClusterCache clustercache.ClusterCache
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector.
+func (nsac NamespaceAnnotationCollector) Describe(ch chan<- *prometheus.Desc) {
+	ch <- prometheus.NewDesc("kube_namespace_annotations", "namespace annotations", []string{}, nil)
+}
+
+// Collect is called by the Prometheus registry when collecting metrics.
+func (nsac NamespaceAnnotationCollector) Collect(ch chan<- prometheus.Metric) {
+	namespaces := nsac.KubeClusterCache.GetAllNamespaces()
+	for _, namespace := range namespaces {
+		labels, values := prom.KubeAnnotationsToLabels(namespace.Annotations)
+		m := newNamespaceAnnotationsMetric(namespace.GetName(), "kube_namespace_annotations", labels, values)
+		ch <- m
+	}
+}
+
+//--------------------------------------------------------------------------
+//  NamespaceAnnotationsMetric
+//--------------------------------------------------------------------------
+
+// NamespaceAnnotationsMetric is a prometheus.Metric used to encode namespace annotations
+type NamespaceAnnotationsMetric struct {
+	fqName      string
+	help        string
+	labelNames  []string
+	labelValues []string
+	namespace   string
+}
+
+// Creates a new NamespaceAnnotationsMetric, implementation of prometheus.Metric
+func newNamespaceAnnotationsMetric(namespace, fqname string, labelNames []string, labelValues []string) NamespaceAnnotationsMetric {
+	return NamespaceAnnotationsMetric{
+		namespace:   namespace,
+		fqName:      fqname,
+		labelNames:  labelNames,
+		labelValues: labelValues,
+		help:        "kube_namespace_annotations Namespace Annotations",
+	}
+}
+
+// Desc returns the descriptor for the Metric. This method idempotently
+// returns the same descriptor throughout the lifetime of the Metric.
+func (nam NamespaceAnnotationsMetric) Desc() *prometheus.Desc {
+	l := prometheus.Labels{"namespace": nam.namespace}
+	return prometheus.NewDesc(nam.fqName, nam.help, nam.labelNames, l)
+}
+
+// Write encodes the Metric into a "Metric" Protocol Buffer data
+// transmission object.
+func (nam NamespaceAnnotationsMetric) Write(m *dto.Metric) error {
+	h := float64(1)
+	m.Gauge = &dto.Gauge{
+		Value: &h,
+	}
+
+	var labels []*dto.LabelPair
+	for i := range nam.labelNames {
+		labels = append(labels, &dto.LabelPair{
+			Name:  &nam.labelNames[i],
+			Value: &nam.labelValues[i],
+		})
+	}
+	n := "namespace"
+	labels = append(labels, &dto.LabelPair{
+		Name:  &n,
+		Value: &nam.namespace,
+	})
+	m.Label = labels
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  PodAnnotationCollector
+//--------------------------------------------------------------------------
+
+// PodAnnotationCollector is a prometheus collector that generates PodAnnotationMetrics
+type PodAnnotationCollector struct {
+	KubeClusterCache clustercache.ClusterCache
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector.
+func (pac PodAnnotationCollector) Describe(ch chan<- *prometheus.Desc) {
+	ch <- prometheus.NewDesc("kube_pod_annotations", "pod annotations", []string{}, nil)
+}
+
+// Collect is called by the Prometheus registry when collecting metrics.
+func (pac PodAnnotationCollector) Collect(ch chan<- prometheus.Metric) {
+	pods := pac.KubeClusterCache.GetAllPods()
+	for _, pod := range pods {
+		labels, values := prom.KubeAnnotationsToLabels(pod.Annotations)
+		m := newPodAnnotationMetric(pod.GetNamespace(), pod.GetName(), "kube_pod_annotations", labels, values)
+		ch <- m
+	}
+}
+
+//--------------------------------------------------------------------------
+//  PodAnnotationsMetric
+//--------------------------------------------------------------------------
+
+// PodAnnotationsMetric is a prometheus.Metric used to encode namespace annotations
+type PodAnnotationsMetric struct {
+	name        string
+	fqName      string
+	help        string
+	labelNames  []string
+	labelValues []string
+	namespace   string
+}
+
+// Creates a new PodAnnotationsMetric, implementation of prometheus.Metric
+func newPodAnnotationMetric(namespace, name, fqname string, labelNames []string, labelValues []string) PodAnnotationsMetric {
+	return PodAnnotationsMetric{
+		namespace:   namespace,
+		fqName:      fqname,
+		labelNames:  labelNames,
+		labelValues: labelValues,
+		help:        "kube_pod_annotations Pod Annotations",
+	}
+}
+
+// Desc returns the descriptor for the Metric. This method idempotently
+// returns the same descriptor throughout the lifetime of the Metric.
+func (pam PodAnnotationsMetric) Desc() *prometheus.Desc {
+	l := prometheus.Labels{"namespace": pam.namespace, "pod": pam.name}
+	return prometheus.NewDesc(pam.fqName, pam.help, pam.labelNames, l)
+}
+
+// Write encodes the Metric into a "Metric" Protocol Buffer data
+// transmission object.
+func (pam PodAnnotationsMetric) Write(m *dto.Metric) error {
+	h := float64(1)
+	m.Gauge = &dto.Gauge{
+		Value: &h,
+	}
+
+	var labels []*dto.LabelPair
+	for i := range pam.labelNames {
+		labels = append(labels, &dto.LabelPair{
+			Name:  &pam.labelNames[i],
+			Value: &pam.labelValues[i],
+		})
+	}
+	n := "namespace"
+	labels = append(labels, &dto.LabelPair{
+		Name:  &n,
+		Value: &pam.namespace,
+	})
+	r := "pod"
+	labels = append(labels, &dto.LabelPair{
+		Name:  &r,
+		Value: &pam.name,
+	})
+	m.Label = labels
+	return nil
+}
+
 //--------------------------------------------------------------------------
 //  ClusterInfoCollector
 //--------------------------------------------------------------------------
 
 // ClusterInfoCollector is a prometheus collector that generates ClusterInfoMetrics
 type ClusterInfoCollector struct {
-	Cloud         costAnalyzerCloud.Provider
+	Cloud         cloud.Provider
 	KubeClientSet kubernetes.Interface
 }
 
@@ -360,45 +522,227 @@ func toStringPtr(s string) *string {
 }
 
 //--------------------------------------------------------------------------
-//  Package Functions
+//  Cost Model Metrics Initialization
 //--------------------------------------------------------------------------
 
+// Only allow the metrics to be instantiated and registered once
+var metricsInit sync.Once
+
 var (
-	recordingLock     sync.Mutex
+	cpuGv                      *prometheus.GaugeVec
+	ramGv                      *prometheus.GaugeVec
+	gpuGv                      *prometheus.GaugeVec
+	pvGv                       *prometheus.GaugeVec
+	spotGv                     *prometheus.GaugeVec
+	totalGv                    *prometheus.GaugeVec
+	ramAllocGv                 *prometheus.GaugeVec
+	cpuAllocGv                 *prometheus.GaugeVec
+	gpuAllocGv                 *prometheus.GaugeVec
+	pvAllocGv                  *prometheus.GaugeVec
+	networkZoneEgressCostG     prometheus.Gauge
+	networkRegionEgressCostG   prometheus.Gauge
+	networkInternetEgressCostG prometheus.Gauge
+	clusterManagementCostGv    *prometheus.GaugeVec
+	lbCostGv                   *prometheus.GaugeVec
+)
+
+// initCostModelMetrics uses a sync.Once to ensure that these metrics are only created once
+func initCostModelMetrics(clusterCache clustercache.ClusterCache, provider cloud.Provider) {
+	metricsInit.Do(func() {
+		cpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "node_cpu_hourly_cost",
+			Help: "node_cpu_hourly_cost hourly cost for each cpu on this node",
+		}, []string{"instance", "node", "instance_type", "region", "provider_id"})
+
+		ramGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "node_ram_hourly_cost",
+			Help: "node_ram_hourly_cost hourly cost for each gb of ram on this node",
+		}, []string{"instance", "node", "instance_type", "region", "provider_id"})
+
+		gpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "node_gpu_hourly_cost",
+			Help: "node_gpu_hourly_cost hourly cost for each gpu on this node",
+		}, []string{"instance", "node", "instance_type", "region", "provider_id"})
+
+		pvGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "pv_hourly_cost",
+			Help: "pv_hourly_cost Cost per GB per hour on a persistent disk",
+		}, []string{"volumename", "persistentvolume", "provider_id"})
+
+		spotGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "kubecost_node_is_spot",
+			Help: "kubecost_node_is_spot Cloud provider info about node preemptibility",
+		}, []string{"instance", "node", "instance_type", "region", "provider_id"})
+
+		totalGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "node_total_hourly_cost",
+			Help: "node_total_hourly_cost Total node cost per hour",
+		}, []string{"instance", "node", "instance_type", "region", "provider_id"})
+
+		ramAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "container_memory_allocation_bytes",
+			Help: "container_memory_allocation_bytes Bytes of RAM used",
+		}, []string{"namespace", "pod", "container", "instance", "node"})
+
+		cpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "container_cpu_allocation",
+			Help: "container_cpu_allocation Percent of a single CPU used in a minute",
+		}, []string{"namespace", "pod", "container", "instance", "node"})
+
+		gpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "container_gpu_allocation",
+			Help: "container_gpu_allocation GPU used",
+		}, []string{"namespace", "pod", "container", "instance", "node"})
+
+		pvAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "pod_pvc_allocation",
+			Help: "pod_pvc_allocation Bytes used by a PVC attached to a pod",
+		}, []string{"namespace", "pod", "persistentvolumeclaim", "persistentvolume"})
+
+		networkZoneEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
+			Name: "kubecost_network_zone_egress_cost",
+			Help: "kubecost_network_zone_egress_cost Total cost per GB egress across zones",
+		})
+
+		networkRegionEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
+			Name: "kubecost_network_region_egress_cost",
+			Help: "kubecost_network_region_egress_cost Total cost per GB egress across regions",
+		})
+
+		networkInternetEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
+			Name: "kubecost_network_internet_egress_cost",
+			Help: "kubecost_network_internet_egress_cost Total cost per GB of internet egress.",
+		})
+
+		clusterManagementCostGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Name: "kubecost_cluster_management_cost",
+			Help: "kubecost_cluster_management_cost Hourly cost paid as a cluster management fee.",
+		}, []string{"provisioner_name"})
+
+		lbCostGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // no differentiation between ELB and ALB right now
+			Name: "kubecost_load_balancer_cost",
+			Help: "kubecost_load_balancer_cost Hourly cost of load balancer",
+		}, []string{"ingress_ip", "namespace", "service_name"}) // assumes one ingress IP per load balancer
+
+		// Register cost-model metrics for emission
+		prometheus.MustRegister(cpuGv, ramGv, gpuGv, totalGv, pvGv, spotGv)
+		prometheus.MustRegister(ramAllocGv, cpuAllocGv, gpuAllocGv, pvAllocGv)
+		prometheus.MustRegister(networkZoneEgressCostG, networkRegionEgressCostG, networkInternetEgressCostG)
+		prometheus.MustRegister(clusterManagementCostGv, lbCostGv)
+
+		// General Metric Collectors
+		prometheus.MustRegister(ServiceCollector{
+			KubeClusterCache: clusterCache,
+		})
+		prometheus.MustRegister(DeploymentCollector{
+			KubeClusterCache: clusterCache,
+		})
+		prometheus.MustRegister(StatefulsetCollector{
+			KubeClusterCache: clusterCache,
+		})
+		prometheus.MustRegister(ClusterInfoCollector{
+			KubeClientSet: clusterCache.GetClient(),
+			Cloud:         provider,
+		})
+	})
+}
+
+//--------------------------------------------------------------------------
+//  CostModelMetricsEmitter
+//--------------------------------------------------------------------------
+
+// CostModelMetricsEmitter emits all cost-model specific metrics calculated by
+// the CostModel.ComputeCostData() method.
+type CostModelMetricsEmitter struct {
+	PrometheusClient promclient.Client
+	KubeClusterCache clustercache.ClusterCache
+	CloudProvider    cloud.Provider
+	Model            *CostModel
+
+	// Metrics
+	CPUPriceRecorder              *prometheus.GaugeVec
+	RAMPriceRecorder              *prometheus.GaugeVec
+	PersistentVolumePriceRecorder *prometheus.GaugeVec
+	GPUPriceRecorder              *prometheus.GaugeVec
+	PVAllocationRecorder          *prometheus.GaugeVec
+	NodeSpotRecorder              *prometheus.GaugeVec
+	NodeTotalPriceRecorder        *prometheus.GaugeVec
+	RAMAllocationRecorder         *prometheus.GaugeVec
+	CPUAllocationRecorder         *prometheus.GaugeVec
+	GPUAllocationRecorder         *prometheus.GaugeVec
+	ClusterManagementCostRecorder *prometheus.GaugeVec
+	LBCostRecorder                *prometheus.GaugeVec
+	NetworkZoneEgressRecorder     prometheus.Gauge
+	NetworkRegionEgressRecorder   prometheus.Gauge
+	NetworkInternetEgressRecorder prometheus.Gauge
+
+	// Flow Control
+	recordingLock     *sync.Mutex
 	recordingStopping bool
 	recordingStop     chan bool
-)
+}
+
+// NewCostModelMetricsEmitter creates a new cost-model metrics emitter. Use Start() to begin metric emission.
+func NewCostModelMetricsEmitter(promClient promclient.Client, clusterCache clustercache.ClusterCache, provider cloud.Provider, model *CostModel) *CostModelMetricsEmitter {
+	// init will only actually execute once to register the custom gauges
+	initCostModelMetrics(clusterCache, provider)
+
+	return &CostModelMetricsEmitter{
+		PrometheusClient:              promClient,
+		KubeClusterCache:              clusterCache,
+		CloudProvider:                 provider,
+		Model:                         model,
+		CPUPriceRecorder:              cpuGv,
+		RAMPriceRecorder:              ramGv,
+		GPUPriceRecorder:              gpuGv,
+		PersistentVolumePriceRecorder: pvGv,
+		NodeSpotRecorder:              spotGv,
+		NodeTotalPriceRecorder:        totalGv,
+		RAMAllocationRecorder:         ramAllocGv,
+		CPUAllocationRecorder:         cpuAllocGv,
+		GPUAllocationRecorder:         gpuAllocGv,
+		PVAllocationRecorder:          pvAllocGv,
+		NetworkZoneEgressRecorder:     networkZoneEgressCostG,
+		NetworkRegionEgressRecorder:   networkRegionEgressCostG,
+		NetworkInternetEgressRecorder: networkInternetEgressCostG,
+		ClusterManagementCostRecorder: clusterManagementCostGv,
+		LBCostRecorder:                lbCostGv,
+		recordingLock:                 new(sync.Mutex),
+		recordingStopping:             false,
+		recordingStop:                 nil,
+	}
+}
 
 // Checks to see if there is a metric recording stop channel. If it exists, a new
 // channel is not created and false is returned. If it doesn't exist, a new channel
 // is created and true is returned.
-func checkOrCreateRecordingChan() bool {
-	recordingLock.Lock()
-	defer recordingLock.Unlock()
+func (cmme *CostModelMetricsEmitter) checkOrCreateRecordingChan() bool {
+	cmme.recordingLock.Lock()
+	defer cmme.recordingLock.Unlock()
 
-	if recordingStop != nil {
+	if cmme.recordingStop != nil {
 		return false
 	}
 
-	recordingStop = make(chan bool, 1)
+	cmme.recordingStop = make(chan bool, 1)
 	return true
 }
 
-// IsCostModelMetricRecordingRunning returns true if metric recording is still running.
-func IsCostModelMetricRecordingRunning() bool {
-	recordingLock.Lock()
-	defer recordingLock.Unlock()
+// IsRunning returns true if metric recording is running.
+func (cmme *CostModelMetricsEmitter) IsRunning() bool {
+	cmme.recordingLock.Lock()
+	defer cmme.recordingLock.Unlock()
 
-	return recordingStop != nil
+	return cmme.recordingStop != nil
 }
 
 // StartCostModelMetricRecording starts the go routine that emits metrics used to determine
 // cluster costs.
-func StartCostModelMetricRecording(a *Accesses) bool {
+func (cmme *CostModelMetricsEmitter) Start() bool {
 	// Check to see if we're already recording
 	// This function will create the stop recording channel and return true
 	// if it doesn't exist.
-	if !checkOrCreateRecordingChan() {
+	if !cmme.checkOrCreateRecordingChan() {
 		log.Errorf("Attempted to start cost model metric recording when it's already running.")
 		return false
 	}
@@ -420,45 +764,56 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 		}
 
 		var defaultRegion string = ""
-		nodeList := a.Model.Cache.GetAllNodes()
+		nodeList := cmme.KubeClusterCache.GetAllNodes()
 		if len(nodeList) > 0 {
 			defaultRegion = nodeList[0].Labels[v1.LabelZoneRegion]
 		}
 
 		for {
 			klog.V(4).Info("Recording prices...")
-			podlist := a.Model.Cache.GetAllPods()
+			podlist := cmme.KubeClusterCache.GetAllPods()
 			podStatus := make(map[string]v1.PodPhase)
 			for _, pod := range podlist {
 				podStatus[pod.Name] = pod.Status.Phase
 			}
 
-			cfg, _ := a.Cloud.GetConfig()
+			cfg, _ := cmme.CloudProvider.GetConfig()
 
-			provisioner, clusterManagementCost, err := a.Cloud.ClusterManagementPricing()
+			provisioner, clusterManagementCost, err := cmme.CloudProvider.ClusterManagementPricing()
 			if err != nil {
 				klog.V(1).Infof("Error getting cluster management cost %s", err.Error())
 			}
-			a.ClusterManagementCostRecorder.WithLabelValues(provisioner).Set(clusterManagementCost)
+			cmme.ClusterManagementCostRecorder.WithLabelValues(provisioner).Set(clusterManagementCost)
 
 			// Record network pricing at global scope
-			networkCosts, err := a.Cloud.NetworkPricing()
+			networkCosts, err := cmme.CloudProvider.NetworkPricing()
 			if err != nil {
 				klog.V(4).Infof("Failed to retrieve network costs: %s", err.Error())
 			} else {
-				a.NetworkZoneEgressRecorder.Set(networkCosts.ZoneNetworkEgressCost)
-				a.NetworkRegionEgressRecorder.Set(networkCosts.RegionNetworkEgressCost)
-				a.NetworkInternetEgressRecorder.Set(networkCosts.InternetNetworkEgressCost)
+				cmme.NetworkZoneEgressRecorder.Set(networkCosts.ZoneNetworkEgressCost)
+				cmme.NetworkRegionEgressRecorder.Set(networkCosts.RegionNetworkEgressCost)
+				cmme.NetworkInternetEgressRecorder.Set(networkCosts.InternetNetworkEgressCost)
 			}
 
-			data, err := a.Model.ComputeCostData(a.PrometheusClient, a.KubeClientSet, a.Cloud, "2m", "", "")
+			// TODO: Pass PrometheusClient and CloudProvider into CostModel on instantiation so this isn't so awkward
+			data, err := cmme.Model.ComputeCostData(cmme.PrometheusClient, cmme.CloudProvider, "2m", "", "")
 			if err != nil {
-				klog.V(1).Info("Error in price recording: " + err.Error())
+				// For an error collection, we'll just log the length of the errors (ComputeCostData already logs the
+				// actual errors)
+				if prom.IsErrorCollection(err) {
+					if ec, ok := err.(prom.QueryErrorCollection); ok {
+						klog.V(1).Info("Error in price recording: %d errors occurred", len(ec.Errors()))
+					}
+				} else {
+					klog.V(1).Info("Error in price recording: " + err.Error())
+				}
+
 				// zero the for loop so the time.Sleep will still work
 				data = map[string]*CostData{}
 			}
 
-			nodes, err := a.Model.GetNodeCost(a.Cloud)
+			// TODO: Pass CloudProvider into CostModel on instantiation so this isn't so awkward
+			nodes, err := cmme.Model.GetNodeCost(cmme.CloudProvider)
 			for nodeName, node := range nodes {
 				// Emit costs, guarding against NaN inputs for custom pricing.
 				cpuCost, _ := strconv.ParseFloat(node.VCPUCost, 64)
@@ -499,20 +854,21 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 
 				totalCost := cpu*cpuCost + ramCost*(ram/1024/1024/1024) + gpu*gpuCost
 
-				a.CPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(cpuCost)
-				a.RAMPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(ramCost)
-				a.GPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(gpuCost)
-				a.NodeTotalPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(totalCost)
+				cmme.CPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(cpuCost)
+				cmme.RAMPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(ramCost)
+				cmme.GPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(gpuCost)
+				cmme.NodeTotalPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(totalCost)
 				if node.IsSpot() {
-					a.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(1.0)
+					cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(1.0)
 				} else {
-					a.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(0.0)
+					cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID).Set(0.0)
 				}
 				labelKey := getKeyFromLabelStrings(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID)
 				nodeSeen[labelKey] = true
 			}
 
-			loadBalancers, err := a.Model.GetLBCost(a.Cloud)
+			// TODO: Pass CloudProvider into CostModel on instantiation so this isn't so awkward
+			loadBalancers, err := cmme.Model.GetLBCost(cmme.CloudProvider)
 			for lbKey, lb := range loadBalancers {
 				// TODO: parse (if necessary) and calculate cost associated with loadBalancer based on dynamic cloud prices fetched into each lb struct on GetLBCost() call
 				keyParts := getLabelStringsFromKey(lbKey)
@@ -522,7 +878,7 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 				if len(lb.IngressIPAddresses) > 0 {
 					ingressIP = lb.IngressIPAddresses[0] // assumes one ingress IP per load balancer
 				}
-				a.LBCostRecorder.WithLabelValues(ingressIP, namespace, serviceName).Set(lb.Cost)
+				cmme.LBCostRecorder.WithLabelValues(ingressIP, namespace, serviceName).Set(lb.Cost)
 
 				labelKey := getKeyFromLabelStrings(namespace, serviceName)
 				loadBalancerSeen[labelKey] = true
@@ -542,7 +898,7 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 							if timesClaimed == 0 {
 								timesClaimed = 1 // unallocated PVs are unclaimed but have a full allocation
 							}
-							a.PVAllocationRecorder.WithLabelValues(namespace, podName, pvc.Claim, pvc.VolumeName).Set(pvc.Values[0].Value / float64(timesClaimed))
+							cmme.PVAllocationRecorder.WithLabelValues(namespace, podName, pvc.Claim, pvc.VolumeName).Set(pvc.Values[0].Value / float64(timesClaimed))
 							labelKey := getKeyFromLabelStrings(namespace, podName, pvc.Claim, pvc.VolumeName)
 							pvcSeen[labelKey] = true
 						}
@@ -550,14 +906,14 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 				}
 
 				if len(costs.RAMAllocation) > 0 {
-					a.RAMAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.RAMAllocation[0].Value)
+					cmme.RAMAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.RAMAllocation[0].Value)
 				}
 				if len(costs.CPUAllocation) > 0 {
-					a.CPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.CPUAllocation[0].Value)
+					cmme.CPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.CPUAllocation[0].Value)
 				}
 				if len(costs.GPUReq) > 0 {
 					// allocation here is set to the request because shared GPU usage not yet supported.
-					a.GPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.GPUReq[0].Value)
+					cmme.GPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.GPUReq[0].Value)
 				}
 				labelKey := getKeyFromLabelStrings(namespace, podName, containerName, nodeName, nodeName)
 				if podStatus[podName] == v1.PodRunning { // Only report data for current pods
@@ -566,7 +922,7 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 					containerSeen[labelKey] = false
 				}
 
-				storageClasses := a.Model.Cache.GetAllStorageClasses()
+				storageClasses := cmme.KubeClusterCache.GetAllStorageClasses()
 				storageClassMap := make(map[string]map[string]string)
 				for _, storageClass := range storageClasses {
 					params := storageClass.Parameters
@@ -577,7 +933,7 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 					}
 				}
 
-				pvs := a.Model.Cache.GetAllPersistentVolumes()
+				pvs := cmme.KubeClusterCache.GetAllPersistentVolumes()
 				for _, pv := range pvs {
 					parameters, ok := storageClassMap[pv.Spec.StorageClassName]
 					if !ok {
@@ -589,14 +945,16 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 					} else {
 						region = defaultRegion
 					}
-					cacPv := &costAnalyzerCloud.PV{
+					cacPv := &cloud.PV{
 						Class:      pv.Spec.StorageClassName,
 						Region:     region,
 						Parameters: parameters,
 					}
-					GetPVCost(cacPv, pv, a.Cloud, region)
+
+					// TODO: GetPVCost should be a method in CostModel?
+					GetPVCost(cacPv, pv, cmme.CloudProvider, region)
 					c, _ := strconv.ParseFloat(cacPv.Cost, 64)
-					a.PersistentVolumePriceRecorder.WithLabelValues(pv.Name, pv.Name, cacPv.ProviderID).Set(c)
+					cmme.PersistentVolumePriceRecorder.WithLabelValues(pv.Name, pv.Name, cacPv.ProviderID).Set(c)
 					labelKey := getKeyFromLabelStrings(pv.Name, pv.Name)
 					pvSeen[labelKey] = true
 				}
@@ -605,31 +963,31 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 				if !seen {
 					klog.V(4).Infof("Removing %s from nodes", labelString)
 					labels := getLabelStringsFromKey(labelString)
-					ok := a.NodeTotalPriceRecorder.DeleteLabelValues(labels...)
+					ok := cmme.NodeTotalPriceRecorder.DeleteLabelValues(labels...)
 					if ok {
 						klog.V(4).Infof("removed %s from totalprice", labelString)
 					} else {
 						klog.Infof("FAILURE TO REMOVE %s from totalprice", labelString)
 					}
-					ok = a.NodeSpotRecorder.DeleteLabelValues(labels...)
+					ok = cmme.NodeSpotRecorder.DeleteLabelValues(labels...)
 					if ok {
 						klog.V(4).Infof("removed %s from spot records", labelString)
 					} else {
 						klog.Infof("FAILURE TO REMOVE %s from spot records", labelString)
 					}
-					ok = a.CPUPriceRecorder.DeleteLabelValues(labels...)
+					ok = cmme.CPUPriceRecorder.DeleteLabelValues(labels...)
 					if ok {
 						klog.V(4).Infof("removed %s from cpuprice", labelString)
 					} else {
 						klog.Infof("FAILURE TO REMOVE %s from cpuprice", labelString)
 					}
-					ok = a.GPUPriceRecorder.DeleteLabelValues(labels...)
+					ok = cmme.GPUPriceRecorder.DeleteLabelValues(labels...)
 					if ok {
 						klog.V(4).Infof("removed %s from gpuprice", labelString)
 					} else {
 						klog.Infof("FAILURE TO REMOVE %s from gpuprice", labelString)
 					}
-					ok = a.RAMPriceRecorder.DeleteLabelValues(labels...)
+					ok = cmme.RAMPriceRecorder.DeleteLabelValues(labels...)
 					if ok {
 						klog.V(4).Infof("removed %s from ramprice", labelString)
 					} else {
@@ -643,7 +1001,7 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 			for labelString, seen := range loadBalancerSeen {
 				if !seen {
 					labels := getLabelStringsFromKey(labelString)
-					a.LBCostRecorder.DeleteLabelValues(labels...)
+					cmme.LBCostRecorder.DeleteLabelValues(labels...)
 				} else {
 					loadBalancerSeen[labelString] = false
 				}
@@ -651,9 +1009,9 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 			for labelString, seen := range containerSeen {
 				if !seen {
 					labels := getLabelStringsFromKey(labelString)
-					a.RAMAllocationRecorder.DeleteLabelValues(labels...)
-					a.CPUAllocationRecorder.DeleteLabelValues(labels...)
-					a.GPUAllocationRecorder.DeleteLabelValues(labels...)
+					cmme.RAMAllocationRecorder.DeleteLabelValues(labels...)
+					cmme.CPUAllocationRecorder.DeleteLabelValues(labels...)
+					cmme.GPUAllocationRecorder.DeleteLabelValues(labels...)
 					delete(containerSeen, labelString)
 				} else {
 					containerSeen[labelString] = false
@@ -662,7 +1020,7 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 			for labelString, seen := range pvSeen {
 				if !seen {
 					labels := getLabelStringsFromKey(labelString)
-					a.PersistentVolumePriceRecorder.DeleteLabelValues(labels...)
+					cmme.PersistentVolumePriceRecorder.DeleteLabelValues(labels...)
 					delete(pvSeen, labelString)
 				} else {
 					pvSeen[labelString] = false
@@ -671,7 +1029,7 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 			for labelString, seen := range pvcSeen {
 				if !seen {
 					labels := getLabelStringsFromKey(labelString)
-					a.PVAllocationRecorder.DeleteLabelValues(labels...)
+					cmme.PVAllocationRecorder.DeleteLabelValues(labels...)
 					delete(pvcSeen, labelString)
 				} else {
 					pvcSeen[labelString] = false
@@ -680,11 +1038,11 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 
 			select {
 			case <-time.After(time.Minute):
-			case <-recordingStop:
-				recordingLock.Lock()
-				recordingStopping = false
-				recordingStop = nil
-				recordingLock.Unlock()
+			case <-cmme.recordingStop:
+				cmme.recordingLock.Lock()
+				cmme.recordingStopping = false
+				cmme.recordingStop = nil
+				cmme.recordingLock.Unlock()
 				return
 			}
 		}
@@ -693,35 +1051,14 @@ func StartCostModelMetricRecording(a *Accesses) bool {
 	return true
 }
 
-// StopCostModelMetricRecording halts the metrics emission loop after the current emission is completed
+// Stop halts the metrics emission loop after the current emission is completed
 // or if the emission is paused.
-func StopCostModelMetricRecording() {
-	recordingLock.Lock()
-	defer recordingLock.Unlock()
-
-	if !recordingStopping && recordingStop != nil {
-		recordingStopping = true
-		close(recordingStop)
-	}
-}
+func (cmme *CostModelMetricsEmitter) Stop() {
+	cmme.recordingLock.Lock()
+	defer cmme.recordingLock.Unlock()
 
-// Converts kubernetes labels into prometheus labels.
-func kubeLabelsToPrometheusLabels(labels map[string]string) ([]string, []string) {
-	labelKeys := make([]string, 0, len(labels))
-	for k := range labels {
-		labelKeys = append(labelKeys, k)
+	if !cmme.recordingStopping && cmme.recordingStop != nil {
+		cmme.recordingStopping = true
+		close(cmme.recordingStop)
 	}
-	sort.Strings(labelKeys)
-
-	labelValues := make([]string, 0, len(labels))
-	for i, k := range labelKeys {
-		labelKeys[i] = "label_" + SanitizeLabelName(k)
-		labelValues = append(labelValues, labels[k])
-	}
-	return labelKeys, labelValues
-}
-
-// Replaces all illegal prometheus label characters with _
-func SanitizeLabelName(s string) string {
-	return invalidLabelCharRE.ReplaceAllString(s, "_")
 }

+ 288 - 276
pkg/costmodel/router.go

@@ -9,6 +9,7 @@ import (
 	"reflect"
 	"strconv"
 	"strings"
+	"sync"
 	"time"
 
 	"k8s.io/klog"
@@ -17,12 +18,14 @@ import (
 
 	sentry "github.com/getsentry/sentry-go"
 
-	costAnalyzerCloud "github.com/kubecost/cost-model/pkg/cloud"
+	"github.com/kubecost/cost-model/pkg/cloud"
 	"github.com/kubecost/cost-model/pkg/clustercache"
 	cm "github.com/kubecost/cost-model/pkg/clustermanager"
 	"github.com/kubecost/cost-model/pkg/costmodel/clusters"
 	"github.com/kubecost/cost-model/pkg/env"
 	"github.com/kubecost/cost-model/pkg/errors"
+	"github.com/kubecost/cost-model/pkg/kubecost"
+	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/kubecost/cost-model/pkg/prom"
 	"github.com/kubecost/cost-model/pkg/thanos"
 	prometheusClient "github.com/prometheus/client_golang/api"
@@ -31,7 +34,6 @@ import (
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
 	"github.com/patrickmn/go-cache"
-	"github.com/prometheus/client_golang/prometheus"
 
 	"k8s.io/client-go/kubernetes"
 	"k8s.io/client-go/rest"
@@ -41,6 +43,12 @@ import (
 const (
 	prometheusTroubleshootingEp = "http://docs.kubecost.com/custom-prom#troubleshoot"
 	RFC3339Milli                = "2006-01-02T15:04:05.000Z"
+	maxCacheMinutes1d           = 11
+	maxCacheMinutes2d           = 17
+	maxCacheMinutes7d           = 37
+	maxCacheMinutes30d          = 137
+	CustomPricingSetting        = "CustomPricing"
+	DiscountSetting             = "Discount"
 )
 
 var (
@@ -48,42 +56,92 @@ var (
 	gitCommit string
 )
 
-var Router = httprouter.New()
-var A Accesses
-
+// Accesses defines a singleton application instance, providing access to
+// Prometheus, Kubernetes, the cloud provider, and caches.
 type Accesses struct {
-	PrometheusClient              prometheusClient.Client
-	ThanosClient                  prometheusClient.Client
-	KubeClientSet                 kubernetes.Interface
-	ClusterManager                *cm.ClusterManager
-	ClusterMap                    clusters.ClusterMap
-	Cloud                         costAnalyzerCloud.Provider
-	CPUPriceRecorder              *prometheus.GaugeVec
-	RAMPriceRecorder              *prometheus.GaugeVec
-	PersistentVolumePriceRecorder *prometheus.GaugeVec
-	GPUPriceRecorder              *prometheus.GaugeVec
-	NodeTotalPriceRecorder        *prometheus.GaugeVec
-	NodeSpotRecorder              *prometheus.GaugeVec
-	RAMAllocationRecorder         *prometheus.GaugeVec
-	CPUAllocationRecorder         *prometheus.GaugeVec
-	GPUAllocationRecorder         *prometheus.GaugeVec
-	PVAllocationRecorder          *prometheus.GaugeVec
-	ClusterManagementCostRecorder *prometheus.GaugeVec
-	LBCostRecorder                *prometheus.GaugeVec
-	NetworkZoneEgressRecorder     prometheus.Gauge
-	NetworkRegionEgressRecorder   prometheus.Gauge
-	NetworkInternetEgressRecorder prometheus.Gauge
-	ServiceSelectorRecorder       *prometheus.GaugeVec
-	DeploymentSelectorRecorder    *prometheus.GaugeVec
-	Model                         *CostModel
-	OutOfClusterCache             *cache.Cache
+	Router            *httprouter.Router
+	PrometheusClient  prometheusClient.Client
+	ThanosClient      prometheusClient.Client
+	KubeClientSet     kubernetes.Interface
+	ClusterManager    *cm.ClusterManager
+	ClusterMap        clusters.ClusterMap
+	CloudProvider     cloud.Provider
+	Model             *CostModel
+	MetricsEmitter    *CostModelMetricsEmitter
+	OutOfClusterCache *cache.Cache
+	AggregateCache    *cache.Cache
+	CostDataCache     *cache.Cache
+	ClusterCostsCache *cache.Cache
+	CacheExpiration   map[time.Duration]time.Duration
+	AggAPI            Aggregator
+	// SettingsCache stores current state of app settings
+	SettingsCache *cache.Cache
+	// settingsSubscribers tracks channels through which changes to different
+	// settings will be published in a pub/sub model
+	settingsSubscribers map[string][]chan string
+	settingsMutex       sync.Mutex
+}
+
+// GetPrometheusClient decides whether the default Prometheus client or the Thanos client
+// should be used.
+func (a *Accesses) GetPrometheusClient(remote bool) prometheusClient.Client {
+	// Use Thanos Client if it exists (enabled) and remote flag set
+	var pc prometheusClient.Client
+
+	if remote && a.ThanosClient != nil {
+		pc = a.ThanosClient
+	} else {
+		pc = a.PrometheusClient
+	}
+
+	return pc
 }
 
-type DataEnvelope struct {
+// GetCacheExpiration looks up and returns custom cache expiration for the given duration.
+// If one does not exists, it returns the default cache expiration, which is defined by
+// the particular cache.
+func (a *Accesses) GetCacheExpiration(dur time.Duration) time.Duration {
+	if expiration, ok := a.CacheExpiration[dur]; ok {
+		return expiration
+	}
+	return cache.DefaultExpiration
+}
+
+// GetCacheRefresh determines how long to wait before refreshing the cache for the given duration,
+// which is done 1 minute before we expect the cache to expire, or 1 minute if expiration is
+// not found or is less than 2 minutes.
+func (a *Accesses) GetCacheRefresh(dur time.Duration) time.Duration {
+	expiry := a.GetCacheExpiration(dur).Minutes()
+	if expiry <= 2.0 {
+		return time.Minute
+	}
+	mins := time.Duration(expiry/2.0) * time.Minute
+	return mins
+}
+
+func (a *Accesses) ClusterCostsFromCacheHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+
+	durationHrs := "24h"
+	offset := "1m"
+	pClient := a.GetPrometheusClient(true)
+
+	key := fmt.Sprintf("%s:%s", durationHrs, offset)
+	if data, valid := a.ClusterCostsCache.Get(key); valid {
+		clusterCosts := data.(map[string]*ClusterCosts)
+		w.Write(WrapDataWithMessage(clusterCosts, nil, "clusterCosts cache hit"))
+	} else {
+		data, err := a.ComputeClusterCosts(pClient, a.CloudProvider, durationHrs, offset, true)
+		w.Write(WrapDataWithMessage(data, err, fmt.Sprintf("clusterCosts cache miss: %s", key)))
+	}
+}
+
+type Response struct {
 	Code    int         `json:"code"`
 	Status  string      `json:"status"`
 	Data    interface{} `json:"data"`
 	Message string      `json:"message,omitempty"`
+	Warning string      `json:"warning,omitempty"`
 }
 
 // FilterFunc is a filter that returns true iff the given CostData should be filtered out, and the environment that was used as the filter criteria, if it was an aggregate
@@ -243,48 +301,95 @@ func ParseTimeRange(duration, offset string) (*time.Time, *time.Time, error) {
 	return &startTime, &endTime, nil
 }
 
+func WrapData(data interface{}, err error) []byte {
+	var resp []byte
+
+	if err != nil {
+		klog.V(1).Infof("Error returned to client: %s", err.Error())
+		resp, _ = json.Marshal(&Response{
+			Code:    http.StatusInternalServerError,
+			Status:  "error",
+			Message: err.Error(),
+			Data:    data,
+		})
+	} else {
+		resp, _ = json.Marshal(&Response{
+			Code:   http.StatusOK,
+			Status: "success",
+			Data:   data,
+		})
+	}
+
+	return resp
+}
+
 func WrapDataWithMessage(data interface{}, err error, message string) []byte {
 	var resp []byte
 
 	if err != nil {
 		klog.V(1).Infof("Error returned to client: %s", err.Error())
-		resp, _ = json.Marshal(&DataEnvelope{
+		resp, _ = json.Marshal(&Response{
 			Code:    http.StatusInternalServerError,
 			Status:  "error",
 			Message: err.Error(),
 			Data:    data,
 		})
 	} else {
-		resp, _ = json.Marshal(&DataEnvelope{
+		resp, _ = json.Marshal(&Response{
 			Code:    http.StatusOK,
 			Status:  "success",
 			Data:    data,
 			Message: message,
 		})
-
 	}
 
 	return resp
 }
 
-func WrapData(data interface{}, err error) []byte {
+func WrapDataWithWarning(data interface{}, err error, warning string) []byte {
 	var resp []byte
 
 	if err != nil {
 		klog.V(1).Infof("Error returned to client: %s", err.Error())
-		resp, _ = json.Marshal(&DataEnvelope{
+		resp, _ = json.Marshal(&Response{
 			Code:    http.StatusInternalServerError,
 			Status:  "error",
 			Message: err.Error(),
+			Warning: warning,
 			Data:    data,
 		})
 	} else {
-		resp, _ = json.Marshal(&DataEnvelope{
-			Code:   http.StatusOK,
-			Status: "success",
-			Data:   data,
+		resp, _ = json.Marshal(&Response{
+			Code:    http.StatusOK,
+			Status:  "success",
+			Data:    data,
+			Warning: warning,
 		})
+	}
 
+	return resp
+}
+
+func WrapDataWithMessageAndWarning(data interface{}, err error, message, warning string) []byte {
+	var resp []byte
+
+	if err != nil {
+		klog.V(1).Infof("Error returned to client: %s", err.Error())
+		resp, _ = json.Marshal(&Response{
+			Code:    http.StatusInternalServerError,
+			Status:  "error",
+			Message: err.Error(),
+			Warning: warning,
+			Data:    data,
+		})
+	} else {
+		resp, _ = json.Marshal(&Response{
+			Code:    http.StatusOK,
+			Status:  "success",
+			Data:    data,
+			Message: message,
+			Warning: warning,
+		})
 	}
 
 	return resp
@@ -295,7 +400,7 @@ func (a *Accesses) RefreshPricingData(w http.ResponseWriter, r *http.Request, ps
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	err := a.Cloud.DownloadPricingData()
+	err := a.CloudProvider.DownloadPricingData()
 
 	w.Write(WrapData(nil, err))
 }
@@ -313,7 +418,7 @@ func (a *Accesses) CostDataModel(w http.ResponseWriter, r *http.Request, ps http
 		offset = "offset " + offset
 	}
 
-	data, err := a.Model.ComputeCostData(a.PrometheusClient, a.KubeClientSet, a.Cloud, window, offset, namespace)
+	data, err := a.Model.ComputeCostData(a.PrometheusClient, a.CloudProvider, window, offset, namespace)
 
 	if fields != "" {
 		filteredData := filterFields(fields, data)
@@ -346,7 +451,7 @@ func (a *Accesses) ClusterCosts(w http.ResponseWriter, r *http.Request, ps httpr
 		client = a.PrometheusClient
 	}
 
-	data, err := ComputeClusterCosts(client, a.Cloud, window, offset, true)
+	data, err := a.ComputeClusterCosts(client, a.CloudProvider, window, offset, true)
 	w.Write(WrapData(data, err))
 }
 
@@ -359,7 +464,7 @@ func (a *Accesses) ClusterCostsOverTime(w http.ResponseWriter, r *http.Request,
 	window := r.URL.Query().Get("window")
 	offset := r.URL.Query().Get("offset")
 
-	data, err := ClusterCostsOverTime(a.PrometheusClient, a.Cloud, start, end, window, offset)
+	data, err := ClusterCostsOverTime(a.PrometheusClient, a.CloudProvider, start, end, window, offset)
 	w.Write(WrapData(data, err))
 }
 
@@ -367,16 +472,38 @@ func (a *Accesses) CostDataModelRange(w http.ResponseWriter, r *http.Request, ps
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	start := r.URL.Query().Get("start")
-	end := r.URL.Query().Get("end")
-	window := r.URL.Query().Get("window")
+	startStr := r.URL.Query().Get("start")
+	endStr := r.URL.Query().Get("end")
+	windowStr := r.URL.Query().Get("window")
 	fields := r.URL.Query().Get("filterFields")
 	namespace := r.URL.Query().Get("namespace")
 	cluster := r.URL.Query().Get("cluster")
 	remote := r.URL.Query().Get("remote")
-
 	remoteEnabled := env.IsRemoteEnabled() && remote != "false"
 
+	layout := "2006-01-02T15:04:05.000Z"
+	start, err := time.Parse(layout, startStr)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("invalid start date: %s", startStr), http.StatusBadRequest)
+		return
+	}
+	end, err := time.Parse(layout, endStr)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("invalid end date: %s", endStr), http.StatusBadRequest)
+		return
+	}
+
+	window := kubecost.NewWindow(&start, &end)
+	if window.IsOpen() || window.IsEmpty() || window.IsNegative() {
+		http.Error(w, fmt.Sprintf("invalid date range: %s", window), http.StatusBadRequest)
+		return
+	}
+
+	resolution := time.Hour
+	if resDur, err := time.ParseDuration(windowStr); err == nil {
+		resolution = resDur
+	}
+
 	// Use Thanos Client if it exists (enabled) and remote flag set
 	var pClient prometheusClient.Client
 	if remote != "false" && a.ThanosClient != nil {
@@ -385,8 +512,7 @@ func (a *Accesses) CostDataModelRange(w http.ResponseWriter, r *http.Request, ps
 		pClient = a.PrometheusClient
 	}
 
-	resolutionHours := 1.0
-	data, err := a.Model.ComputeCostDataRange(pClient, a.KubeClientSet, a.Cloud, start, end, window, resolutionHours, namespace, cluster, remoteEnabled, "")
+	data, err := a.Model.ComputeCostDataRange(pClient, a.CloudProvider, window, resolution, namespace, cluster, remoteEnabled)
 	if err != nil {
 		w.Write(WrapData(nil, err))
 	}
@@ -398,55 +524,6 @@ func (a *Accesses) CostDataModelRange(w http.ResponseWriter, r *http.Request, ps
 	}
 }
 
-// CostDataModelRangeLarge is experimental multi-cluster and long-term data storage in SQL support.
-func (a *Accesses) CostDataModelRangeLarge(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	startString := r.URL.Query().Get("start")
-	endString := r.URL.Query().Get("end")
-	windowString := r.URL.Query().Get("window")
-
-	var start time.Time
-	var end time.Time
-	var err error
-
-	if windowString == "" {
-		windowString = "1h"
-	}
-	if startString != "" {
-		start, err = time.Parse(RFC3339Milli, startString)
-		if err != nil {
-			klog.V(1).Infof("Error parsing time " + startString + ". Error: " + err.Error())
-			w.Write(WrapData(nil, err))
-		}
-	} else {
-		window, err := time.ParseDuration(windowString)
-		if err != nil {
-			w.Write(WrapData(nil, fmt.Errorf("Invalid duration '%s'", windowString)))
-
-		}
-		start = time.Now().Add(-2 * window)
-	}
-	if endString != "" {
-		end, err = time.Parse(RFC3339Milli, endString)
-		if err != nil {
-			klog.V(1).Infof("Error parsing time " + endString + ". Error: " + err.Error())
-			w.Write(WrapData(nil, err))
-		}
-	} else {
-		end = time.Now()
-	}
-
-	remoteLayout := "2006-01-02T15:04:05Z"
-	remoteStartStr := start.Format(remoteLayout)
-	remoteEndStr := end.Format(remoteLayout)
-	klog.V(1).Infof("Using remote database for query from %s to %s with window %s", startString, endString, windowString)
-
-	data, err := CostDataRangeFromSQL("", "", windowString, remoteStartStr, remoteEndStr)
-	w.Write(WrapData(data, err))
-}
-
 func parseAggregations(customAggregation, aggregator, filterType string) (string, []string, string) {
 	var key string
 	var filter string
@@ -477,10 +554,10 @@ func (a *Accesses) OutofClusterCosts(w http.ResponseWriter, r *http.Request, ps
 	customAggregation := r.URL.Query().Get("customAggregation")
 	filterType := r.URL.Query().Get("filterType")
 	filterValue := r.URL.Query().Get("filterValue")
-	var data []*costAnalyzerCloud.OutOfClusterAllocation
+	var data []*cloud.OutOfClusterAllocation
 	var err error
 	_, aggregations, filter := parseAggregations(customAggregation, aggregator, filterType)
-	data, err = a.Cloud.ExternalAllocations(start, end, aggregations, filter, filterValue, false)
+	data, err = a.CloudProvider.ExternalAllocations(start, end, aggregations, filter, filterValue, false)
 	w.Write(WrapData(data, err))
 }
 
@@ -517,14 +594,14 @@ func (a *Accesses) OutOfClusterCostsWithCache(w http.ResponseWriter, r *http.Req
 	// attempt to retrieve cost data from cache
 	key := fmt.Sprintf(`%s:%s:%s:%s:%s`, start, end, aggregationkey, filter, filterValue)
 	if value, found := a.OutOfClusterCache.Get(key); found && !disableCache {
-		if data, ok := value.([]*costAnalyzerCloud.OutOfClusterAllocation); ok {
+		if data, ok := value.([]*cloud.OutOfClusterAllocation); ok {
 			w.Write(WrapDataWithMessage(data, nil, fmt.Sprintf("out of cluster cache hit: %s", key)))
 			return
 		}
 		klog.Errorf("caching error: failed to type cast data: %s", key)
 	}
 
-	data, err := a.Cloud.ExternalAllocations(start, end, aggregation, filter, filterValue, false)
+	data, err := a.CloudProvider.ExternalAllocations(start, end, aggregation, filter, filterValue, false)
 	if err == nil {
 		a.OutOfClusterCache.Set(key, data, cache.DefaultExpiration)
 	}
@@ -532,41 +609,41 @@ func (a *Accesses) OutOfClusterCostsWithCache(w http.ResponseWriter, r *http.Req
 	w.Write(WrapDataWithMessage(data, err, fmt.Sprintf("out of cluser cache miss: %s", key)))
 }
 
-func (p *Accesses) GetAllNodePricing(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+func (a *Accesses) GetAllNodePricing(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	data, err := p.Cloud.AllNodePricing()
+	data, err := a.CloudProvider.AllNodePricing()
 	w.Write(WrapData(data, err))
 }
 
-func (p *Accesses) GetConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+func (a *Accesses) GetConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	data, err := p.Cloud.GetConfig()
+	data, err := a.CloudProvider.GetConfig()
 	w.Write(WrapData(data, err))
 }
 
-func (p *Accesses) UpdateSpotInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+func (a *Accesses) UpdateSpotInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	data, err := p.Cloud.UpdateConfig(r.Body, costAnalyzerCloud.SpotInfoUpdateType)
+	data, err := a.CloudProvider.UpdateConfig(r.Body, cloud.SpotInfoUpdateType)
 	if err != nil {
 		w.Write(WrapData(data, err))
 		return
 	}
 	w.Write(WrapData(data, err))
-	err = p.Cloud.DownloadPricingData()
+	err = a.CloudProvider.DownloadPricingData()
 	if err != nil {
 		klog.V(1).Infof("Error redownloading data on config update: %s", err.Error())
 	}
 	return
 }
 
-func (p *Accesses) UpdateAthenaInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+func (a *Accesses) UpdateAthenaInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	data, err := p.Cloud.UpdateConfig(r.Body, costAnalyzerCloud.AthenaInfoUpdateType)
+	data, err := a.CloudProvider.UpdateConfig(r.Body, cloud.AthenaInfoUpdateType)
 	if err != nil {
 		w.Write(WrapData(data, err))
 		return
@@ -575,10 +652,10 @@ func (p *Accesses) UpdateAthenaInfoConfigs(w http.ResponseWriter, r *http.Reques
 	return
 }
 
-func (p *Accesses) UpdateBigQueryInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+func (a *Accesses) UpdateBigQueryInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	data, err := p.Cloud.UpdateConfig(r.Body, costAnalyzerCloud.BigqueryUpdateType)
+	data, err := a.CloudProvider.UpdateConfig(r.Body, cloud.BigqueryUpdateType)
 	if err != nil {
 		w.Write(WrapData(data, err))
 		return
@@ -587,10 +664,10 @@ func (p *Accesses) UpdateBigQueryInfoConfigs(w http.ResponseWriter, r *http.Requ
 	return
 }
 
-func (p *Accesses) UpdateConfigByKey(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+func (a *Accesses) UpdateConfigByKey(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	data, err := p.Cloud.UpdateConfig(r.Body, "")
+	data, err := a.CloudProvider.UpdateConfig(r.Body, "")
 	if err != nil {
 		w.Write(WrapData(data, err))
 		return
@@ -599,11 +676,11 @@ func (p *Accesses) UpdateConfigByKey(w http.ResponseWriter, r *http.Request, ps
 	return
 }
 
-func (p *Accesses) ManagementPlatform(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+func (a *Accesses) ManagementPlatform(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	data, err := p.Cloud.GetManagementPlatform()
+	data, err := a.CloudProvider.GetManagementPlatform()
 	if err != nil {
 		w.Write(WrapData(data, err))
 		return
@@ -612,43 +689,43 @@ func (p *Accesses) ManagementPlatform(w http.ResponseWriter, r *http.Request, ps
 	return
 }
 
-func (p *Accesses) ClusterInfo(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+func (a *Accesses) ClusterInfo(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	data := GetClusterInfo(p.KubeClientSet, p.Cloud)
+	data := GetClusterInfo(a.KubeClientSet, a.CloudProvider)
 
 	w.Write(WrapData(data, nil))
 }
 
-func (p *Accesses) GetClusterInfoMap(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+func (a *Accesses) GetClusterInfoMap(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	data := p.ClusterMap.AsMap()
+	data := a.ClusterMap.AsMap()
 
 	w.Write(WrapData(data, nil))
 }
 
-func (p *Accesses) GetServiceAccountStatus(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
+func (a *Accesses) GetServiceAccountStatus(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	w.Write(WrapData(A.Cloud.ServiceAccountStatus(), nil))
+	w.Write(WrapData(a.CloudProvider.ServiceAccountStatus(), nil))
 }
 
-func (p *Accesses) GetPricingSourceStatus(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
+func (a *Accesses) GetPricingSourceStatus(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	w.Write(WrapData(A.Cloud.PricingSourceStatus(), nil))
+	w.Write(WrapData(a.CloudProvider.PricingSourceStatus(), nil))
 }
 
-func (p *Accesses) GetPrometheusMetadata(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
+func (a *Accesses) GetPrometheusMetadata(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	w.Write(WrapData(prom.Validate(p.PrometheusClient)))
+	w.Write(WrapData(prom.Validate(a.PrometheusClient)))
 }
 
 // Creates a new ClusterManager instance using a boltdb storage. If that fails,
@@ -715,7 +792,7 @@ func handlePanic(p errors.Panic) bool {
 	return p.Type == errors.PanicTypeHTTP
 }
 
-func Initialize(additionalConfigWatchers ...ConfigWatchers) {
+func Initialize(additionalConfigWatchers ...ConfigWatchers) *Accesses {
 	klog.InitFlags(nil)
 	flag.Set("v", "3")
 	flag.Parse()
@@ -815,7 +892,7 @@ func Initialize(additionalConfigWatchers ...ConfigWatchers) {
 	k8sCache.Run()
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
-	cloudProvider, err := costAnalyzerCloud.NewProvider(k8sCache, cloudProviderKey)
+	cloudProvider, err := cloud.NewProvider(k8sCache, cloudProviderKey)
 	if err != nil {
 		panic(err.Error())
 	}
@@ -864,105 +941,7 @@ func Initialize(additionalConfigWatchers ...ConfigWatchers) {
 	// TODO: implement a builder -> controller for stitching new features and other dependencies.
 	clusterManager := newClusterManager()
 
-	cpuGv := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "node_cpu_hourly_cost",
-		Help: "node_cpu_hourly_cost hourly cost for each cpu on this node",
-	}, []string{"instance", "node", "instance_type", "region", "provider_id"})
-
-	ramGv := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "node_ram_hourly_cost",
-		Help: "node_ram_hourly_cost hourly cost for each gb of ram on this node",
-	}, []string{"instance", "node", "instance_type", "region", "provider_id"})
-
-	gpuGv := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "node_gpu_hourly_cost",
-		Help: "node_gpu_hourly_cost hourly cost for each gpu on this node",
-	}, []string{"instance", "node", "instance_type", "region", "provider_id"})
-
-	totalGv := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "node_total_hourly_cost",
-		Help: "node_total_hourly_cost Total node cost per hour",
-	}, []string{"instance", "node", "instance_type", "region", "provider_id"})
-
-	spotGv := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "kubecost_node_is_spot",
-		Help: "kubecost_node_is_spot Cloud provider info about node preemptibility",
-	}, []string{"instance", "node", "instance_type", "region", "provider_id"})
-
-	pvGv := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "pv_hourly_cost",
-		Help: "pv_hourly_cost Cost per GB per hour on a persistent disk",
-	}, []string{"volumename", "persistentvolume", "provider_id"})
-
-	RAMAllocation := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "container_memory_allocation_bytes",
-		Help: "container_memory_allocation_bytes Bytes of RAM used",
-	}, []string{"namespace", "pod", "container", "instance", "node"})
-
-	CPUAllocation := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "container_cpu_allocation",
-		Help: "container_cpu_allocation Percent of a single CPU used in a minute",
-	}, []string{"namespace", "pod", "container", "instance", "node"})
-
-	GPUAllocation := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "container_gpu_allocation",
-		Help: "container_gpu_allocation GPU used",
-	}, []string{"namespace", "pod", "container", "instance", "node"})
-	PVAllocation := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "pod_pvc_allocation",
-		Help: "pod_pvc_allocation Bytes used by a PVC attached to a pod",
-	}, []string{"namespace", "pod", "persistentvolumeclaim", "persistentvolume"})
-
-	NetworkZoneEgressRecorder := prometheus.NewGauge(prometheus.GaugeOpts{
-		Name: "kubecost_network_zone_egress_cost",
-		Help: "kubecost_network_zone_egress_cost Total cost per GB egress across zones",
-	})
-	NetworkRegionEgressRecorder := prometheus.NewGauge(prometheus.GaugeOpts{
-		Name: "kubecost_network_region_egress_cost",
-		Help: "kubecost_network_region_egress_cost Total cost per GB egress across regions",
-	})
-	NetworkInternetEgressRecorder := prometheus.NewGauge(prometheus.GaugeOpts{
-		Name: "kubecost_network_internet_egress_cost",
-		Help: "kubecost_network_internet_egress_cost Total cost per GB of internet egress.",
-	})
-	ClusterManagementCostRecorder := prometheus.NewGaugeVec(prometheus.GaugeOpts{
-		Name: "kubecost_cluster_management_cost",
-		Help: "kubecost_cluster_management_cost Hourly cost paid as a cluster management fee.",
-	}, []string{"provisioner_name"})
-	LBCostRecorder := prometheus.NewGaugeVec(prometheus.GaugeOpts{ // no differentiation between ELB and ALB right now
-		Name: "kubecost_load_balancer_cost",
-		Help: "kubecost_load_balancer_cost Hourly cost of load balancer",
-	}, []string{"ingress_ip", "namespace", "service_name"}) // assumes one ingress IP per load balancer
-
-	prometheus.MustRegister(cpuGv)
-	prometheus.MustRegister(ramGv)
-	prometheus.MustRegister(gpuGv)
-	prometheus.MustRegister(totalGv)
-	prometheus.MustRegister(pvGv)
-	prometheus.MustRegister(spotGv)
-	prometheus.MustRegister(RAMAllocation)
-	prometheus.MustRegister(CPUAllocation)
-	prometheus.MustRegister(PVAllocation)
-	prometheus.MustRegister(GPUAllocation)
-	prometheus.MustRegister(NetworkZoneEgressRecorder, NetworkRegionEgressRecorder, NetworkInternetEgressRecorder)
-	prometheus.MustRegister(ClusterManagementCostRecorder)
-	prometheus.MustRegister(LBCostRecorder)
-	prometheus.MustRegister(ServiceCollector{
-		KubeClientSet: kubeClientset,
-	})
-	prometheus.MustRegister(DeploymentCollector{
-		KubeClientSet: kubeClientset,
-	})
-	prometheus.MustRegister(StatefulsetCollector{
-		KubeClientSet: kubeClientset,
-	})
-	prometheus.MustRegister(ClusterInfoCollector{
-		KubeClientSet: kubeClientset,
-		Cloud:         cloudProvider,
-	})
-
-	// cache responses from model for a default of 5 minutes; clear expired responses every 10 minutes
-	outOfClusterCache := cache.New(time.Minute*5, time.Minute*10)
+	// Initialize metrics here
 
 	remoteEnabled := env.IsRemoteEnabled()
 	if remoteEnabled {
@@ -971,7 +950,7 @@ func Initialize(additionalConfigWatchers ...ConfigWatchers) {
 		if err != nil {
 			klog.Infof("Error saving cluster id %s", err.Error())
 		}
-		_, _, err = costAnalyzerCloud.GetOrCreateClusterMeta(info["id"], info["name"])
+		_, _, err = cloud.GetOrCreateClusterMeta(info["id"], info["name"])
 		if err != nil {
 			klog.Infof("Unable to set cluster id '%s' for cluster '%s', %s", info["id"], info["name"], err.Error())
 		}
@@ -1008,58 +987,91 @@ func Initialize(additionalConfigWatchers ...ConfigWatchers) {
 		clusterMap = clusters.NewClusterMap(promCli, 5*time.Minute)
 	}
 
-	A = Accesses{
-		PrometheusClient:              promCli,
-		ThanosClient:                  thanosClient,
-		KubeClientSet:                 kubeClientset,
-		ClusterManager:                clusterManager,
-		ClusterMap:                    clusterMap,
-		Cloud:                         cloudProvider,
-		CPUPriceRecorder:              cpuGv,
-		RAMPriceRecorder:              ramGv,
-		GPUPriceRecorder:              gpuGv,
-		NodeTotalPriceRecorder:        totalGv,
-		NodeSpotRecorder:              spotGv,
-		RAMAllocationRecorder:         RAMAllocation,
-		CPUAllocationRecorder:         CPUAllocation,
-		GPUAllocationRecorder:         GPUAllocation,
-		PVAllocationRecorder:          PVAllocation,
-		NetworkZoneEgressRecorder:     NetworkZoneEgressRecorder,
-		NetworkRegionEgressRecorder:   NetworkRegionEgressRecorder,
-		NetworkInternetEgressRecorder: NetworkInternetEgressRecorder,
-		PersistentVolumePriceRecorder: pvGv,
-		ClusterManagementCostRecorder: ClusterManagementCostRecorder,
-		LBCostRecorder:                LBCostRecorder,
-		Model:                         NewCostModel(k8sCache, clusterMap, scrapeInterval),
-		OutOfClusterCache:             outOfClusterCache,
+	// cache responses from model and aggregation for a default of 10 minutes;
+	// clear expired responses every 20 minutes
+	aggregateCache := cache.New(time.Minute*10, time.Minute*20)
+	costDataCache := cache.New(time.Minute*10, time.Minute*20)
+	clusterCostsCache := cache.New(cache.NoExpiration, cache.NoExpiration)
+	outOfClusterCache := cache.New(time.Minute*5, time.Minute*10)
+	settingsCache := cache.New(cache.NoExpiration, cache.NoExpiration)
+
+	// query durations that should be cached longer should be registered here
+	// use relatively prime numbers to minimize likelihood of synchronized
+	// attempts at cache warming
+	day := 24 * time.Hour
+	cacheExpiration := map[time.Duration]time.Duration{
+		day:      maxCacheMinutes1d * time.Minute,
+		2 * day:  maxCacheMinutes2d * time.Minute,
+		7 * day:  maxCacheMinutes7d * time.Minute,
+		30 * day: maxCacheMinutes30d * time.Minute,
 	}
 
-	err = A.Cloud.DownloadPricingData()
+	costModel := NewCostModel(k8sCache, clusterMap, scrapeInterval)
+	metricsEmitter := NewCostModelMetricsEmitter(promCli, k8sCache, cloudProvider, costModel)
+
+	a := &Accesses{
+		Router:            httprouter.New(),
+		PrometheusClient:  promCli,
+		ThanosClient:      thanosClient,
+		KubeClientSet:     kubeClientset,
+		ClusterManager:    clusterManager,
+		ClusterMap:        clusterMap,
+		CloudProvider:     cloudProvider,
+		Model:             costModel,
+		MetricsEmitter:    metricsEmitter,
+		AggregateCache:    aggregateCache,
+		CostDataCache:     costDataCache,
+		ClusterCostsCache: clusterCostsCache,
+		OutOfClusterCache: outOfClusterCache,
+		SettingsCache:     settingsCache,
+		CacheExpiration:   cacheExpiration,
+	}
+	// Use the Accesses instance, itself, as the CostModelAggregator. This is
+	// confusing and unconventional, but necessary so that we can swap it
+	// out for the ETL-adapted version elsewhere.
+	// TODO clean this up once ETL is open-sourced.
+	a.AggAPI = a
+
+	// Initialize mechanism for subscribing to settings changes
+	a.InitializeSettingsPubSub()
+
+	// Warm the aggregate cache unless explicitly set to false
+	if env.IsCacheWarmingEnabled() {
+		log.Infof("Init: AggregateCostModel cache warming enabled")
+		a.warmAggregateCostModelCache()
+	} else {
+		log.Infof("Init: AggregateCostModel cache warming disabled")
+	}
+
+	err = a.CloudProvider.DownloadPricingData()
 	if err != nil {
 		klog.V(1).Info("Failed to download pricing data: " + err.Error())
 	}
 
-	StartCostModelMetricRecording(&A)
-
-	managerEndpoints := cm.NewClusterManagerEndpoints(A.ClusterManager)
-
-	Router.GET("/costDataModel", A.CostDataModel)
-	Router.GET("/costDataModelRange", A.CostDataModelRange)
-	Router.GET("/costDataModelRangeLarge", A.CostDataModelRangeLarge)
-	Router.GET("/outOfClusterCosts", A.OutOfClusterCostsWithCache)
-	Router.GET("/allNodePricing", A.GetAllNodePricing)
-	Router.POST("/refreshPricing", A.RefreshPricingData)
-	Router.GET("/clusterCostsOverTime", A.ClusterCostsOverTime)
-	Router.GET("/clusterCosts", A.ClusterCosts)
-	Router.GET("/validatePrometheus", A.GetPrometheusMetadata)
-	Router.GET("/managementPlatform", A.ManagementPlatform)
-	Router.GET("/clusterInfo", A.ClusterInfo)
-	Router.GET("/clusterInfoMap", A.GetClusterInfoMap)
-	Router.GET("/serviceAccountStatus", A.GetServiceAccountStatus)
-	Router.GET("/pricingSourceStatus", A.GetPricingSourceStatus)
+	a.MetricsEmitter.Start()
+
+	managerEndpoints := cm.NewClusterManagerEndpoints(a.ClusterManager)
+
+	a.Router.GET("/costDataModel", a.CostDataModel)
+	a.Router.GET("/costDataModelRange", a.CostDataModelRange)
+	a.Router.GET("/aggregatedCostModel", a.AggregateCostModelHandler)
+	a.Router.GET("/outOfClusterCosts", a.OutOfClusterCostsWithCache)
+	a.Router.GET("/allNodePricing", a.GetAllNodePricing)
+	a.Router.POST("/refreshPricing", a.RefreshPricingData)
+	a.Router.GET("/clusterCostsOverTime", a.ClusterCostsOverTime)
+	a.Router.GET("/clusterCosts", a.ClusterCosts)
+	a.Router.GET("/clusterCostsFromCache", a.ClusterCostsFromCacheHandler)
+	a.Router.GET("/validatePrometheus", a.GetPrometheusMetadata)
+	a.Router.GET("/managementPlatform", a.ManagementPlatform)
+	a.Router.GET("/clusterInfo", a.ClusterInfo)
+	a.Router.GET("/clusterInfoMap", a.GetClusterInfoMap)
+	a.Router.GET("/serviceAccountStatus", a.GetServiceAccountStatus)
+	a.Router.GET("/pricingSourceStatus", a.GetPricingSourceStatus)
 
 	// cluster manager endpoints
-	Router.GET("/clusters", managerEndpoints.GetAllClusters)
-	Router.PUT("/clusters", managerEndpoints.PutCluster)
-	Router.DELETE("/clusters/:id", managerEndpoints.DeleteCluster)
+	a.Router.GET("/clusters", managerEndpoints.GetAllClusters)
+	a.Router.PUT("/clusters", managerEndpoints.PutCluster)
+	a.Router.DELETE("/clusters/:id", managerEndpoints.DeleteCluster)
+
+	return a
 }

+ 157 - 0
pkg/costmodel/settings.go

@@ -0,0 +1,157 @@
+package costmodel
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/kubecost/cost-model/pkg/cloud"
+	"github.com/kubecost/cost-model/pkg/log"
+	"github.com/patrickmn/go-cache"
+	"k8s.io/klog"
+)
+
+// InitializeSettingsPubSub sets up the pub/sub mechanisms and kicks of
+// routines to detect and publish changes, as well as some routines that
+// subscribe and take actions.
+func (a *Accesses) InitializeSettingsPubSub() {
+	a.settingsSubscribers = map[string][]chan string{}
+
+	// Publish settings changes
+	go func(a *Accesses) {
+		for {
+			// Publish changes to custom pricing
+			if a.customPricingHasChanged() {
+				for _, ch := range a.settingsSubscribers[CustomPricingSetting] {
+					if data, ok := a.SettingsCache.Get(CustomPricingSetting); ok {
+						if cpStr, ok := data.(string); ok {
+							ch <- cpStr
+						}
+					}
+				}
+			}
+
+			// Publish changes to discount
+			if a.discountHasChanged() {
+				for _, ch := range a.settingsSubscribers[DiscountSetting] {
+					if data, ok := a.SettingsCache.Get(DiscountSetting); ok {
+						if discStr, ok := data.(string); ok {
+							ch <- discStr
+						}
+					}
+				}
+			}
+
+			time.Sleep(500 * time.Millisecond)
+		}
+	}(a)
+
+	// Clear caches when custom pricing or discount changes
+	go func(a *Accesses) {
+		costDataCacheCh := make(chan string)
+		a.SubscribeToCustomPricingChanges(costDataCacheCh)
+		a.SubscribeToDiscountChanges(costDataCacheCh)
+		for {
+			msg := <-costDataCacheCh
+			log.Infof("Flushing cost data caches: %s", msg)
+			a.AggregateCache.Flush()
+			a.CostDataCache.Flush()
+		}
+	}(a)
+}
+
+// SubscribeToCustomPricingChanges subscribes the given channel to receive
+// custom pricing changes.
+func (a *Accesses) SubscribeToCustomPricingChanges(ch chan string) {
+	a.settingsMutex.Lock()
+	defer a.settingsMutex.Unlock()
+
+	a.settingsSubscribers[CustomPricingSetting] = append(a.settingsSubscribers[CustomPricingSetting], ch)
+}
+
+// SubscribeToDiscountChanges subscribes the given channel to receive discount
+// changes.
+func (a *Accesses) SubscribeToDiscountChanges(ch chan string) {
+	a.settingsMutex.Lock()
+	defer a.settingsMutex.Unlock()
+
+	a.settingsSubscribers[DiscountSetting] = append(a.settingsSubscribers[DiscountSetting], ch)
+}
+
+// customPricingHasChanged returns true if custom pricing settings have changed
+// since the last time this function was called.
+func (a *Accesses) customPricingHasChanged() bool {
+	customPricing, err := a.CloudProvider.GetConfig()
+	if err != nil || customPricing == nil {
+		klog.Errorf("error accessing cloud provider configuration: %s", err)
+		return false
+	}
+
+	// describe parameters by which we determine whether or not custom
+	// pricing settings have changed
+	encodeCustomPricing := func(cp *cloud.CustomPricing) string {
+		return fmt.Sprintf("%s:%s:%s:%s:%s:%s:%s:%s:%+v", cp.CustomPricesEnabled, cp.CPU, cp.SpotCPU,
+			cp.RAM, cp.SpotRAM, cp.GPU, cp.Storage, cp.CurrencyCode, cp.SharedCosts)
+	}
+
+	// compare cached custom pricing parameters with current values
+	cpStr := encodeCustomPricing(customPricing)
+	cpStrCached := ""
+	val, found := a.SettingsCache.Get(CustomPricingSetting)
+	if !found {
+		// if no settings are found (e.g. upon first call) cache custom pricing settings but
+		// return false, as nothing has "changed" per se
+		a.SettingsCache.Set(CustomPricingSetting, cpStr, cache.NoExpiration)
+		return false
+	}
+	cpStrCached, ok := val.(string)
+	if !ok {
+		klog.Errorf("caching error: failed to cast custom pricing to string")
+	}
+	if cpStr == cpStrCached {
+		return false
+	}
+
+	// cache new custom pricing settings
+	a.SettingsCache.Set(CustomPricingSetting, cpStr, cache.DefaultExpiration)
+
+	return true
+}
+
+// discountHasChanged returns true if discount settings have changed
+// since the last time this function was called.
+func (a *Accesses) discountHasChanged() bool {
+	customPricing, err := a.CloudProvider.GetConfig()
+	if err != nil || customPricing == nil {
+		klog.Errorf("error accessing cloud provider configuration: %s", err)
+		return false
+	}
+
+	// describe parameters by which we determine whether or not custom
+	// pricing settings have changed
+	encodeDiscount := func(cp *cloud.CustomPricing) string {
+		return fmt.Sprintf("%s:%s", cp.Discount, cp.NegotiatedDiscount)
+	}
+
+	// compare cached custom pricing parameters with current values
+	discStr := encodeDiscount(customPricing)
+	discStrCached := ""
+	val, found := a.SettingsCache.Get(DiscountSetting)
+	if !found {
+		// if no settings are found (e.g. upon first call) cache custom pricing settings but
+		// return false, as nothing has "changed" per se
+		a.SettingsCache.Set(DiscountSetting, discStr, cache.NoExpiration)
+		return false
+	}
+	discStrCached, ok := val.(string)
+	if !ok {
+		klog.Errorf("caching error: failed to cast discount to string")
+	}
+	if discStr == discStrCached {
+		return false
+	}
+
+	// cache new custom pricing settings
+	a.SettingsCache.Set(DiscountSetting, discStr, cache.DefaultExpiration)
+
+	return true
+}

+ 70 - 0
pkg/env/costmodelenv.go

@@ -1,5 +1,13 @@
 package env
 
+import (
+	"regexp"
+	"strconv"
+	"time"
+
+	"github.com/kubecost/cost-model/pkg/log"
+)
+
 const (
 	AppVersionEnvVar = "APP_VERSION"
 
@@ -22,6 +30,9 @@ const (
 	ConfigPathEnvVar               = "CONFIG_PATH"
 	CloudProviderAPIKeyEnvVar      = "CLOUD_PROVIDER_API_KEY"
 
+	EmitPodAnnotationsMetricEnvVar       = "EMIT_POD_ANNOTATIONS_METRIC"
+	EmitNamespaceAnnotationsMetricEnvVar = "EMIT_NAMESPACE_ANNOTATIONS_METRIC"
+
 	ThanosEnabledEnvVar      = "THANOS_ENABLED"
 	ThanosQueryUrlEnvVar     = "THANOS_QUERY_URL"
 	ThanosOffsetEnvVar       = "THANOS_QUERY_OFFSET"
@@ -43,6 +54,11 @@ const (
 	InsecureSkipVerify = "INSECURE_SKIP_VERIFY"
 
 	KubeConfigPathEnvVar = "KUBECONFIG_PATH"
+
+	UTCOffsetEnvVar = "UTC_OFFSET"
+
+	CacheWarmingEnabledEnvVar = "CACHE_WARMING_ENABLED"
+	ETLEnabledEnvVar          = "ETL_ENABLED"
 )
 
 // GetAWSAccessKeyID returns the environment variable value for AWSAccessKeyIDEnvVar which represents
@@ -51,6 +67,18 @@ func GetAppVersion() string {
 	return Get(AppVersionEnvVar, "1.70.0")
 }
 
+// IsEmitNamespaceAnnotationsMetric returns true if cost-model is configured to emit the kube_namespace_annotations metric
+// containing the namespace annotations
+func IsEmitNamespaceAnnotationsMetric() bool {
+	return GetBool(EmitNamespaceAnnotationsMetricEnvVar, false)
+}
+
+// IsEmitPodAnnotationsMetric returns true if cost-model is configured to emit the kube_pod_annotations metric containing
+// pod annotations.
+func IsEmitPodAnnotationsMetric() bool {
+	return GetBool(EmitPodAnnotationsMetricEnvVar, false)
+}
+
 // GetAWSAccessKeyID returns the environment variable value for AWSAccessKeyIDEnvVar which represents
 // the AWS access key for authentication
 func GetAWSAccessKeyID() string {
@@ -250,3 +278,45 @@ func GetMultiClusterBearerToken() string {
 func GetKubeConfigPath() string {
 	return Get(KubeConfigPathEnvVar, "")
 }
+
+// GetUTCOffset returns the environemnt variable value for UTCOffset
+func GetUTCOffset() string {
+	return Get(UTCOffsetEnvVar, "")
+}
+
+// GetParsedUTCOffset returns the duration of the configured UTC offset
+func GetParsedUTCOffset() time.Duration {
+	offset := time.Duration(0)
+
+	if offsetStr := GetUTCOffset(); offsetStr != "" {
+		regex := regexp.MustCompile(`^(\+|-)(\d\d):(\d\d)$`)
+		match := regex.FindStringSubmatch(offsetStr)
+		if match == nil {
+			log.Warningf("Illegal UTC offset: %s", offsetStr)
+			return offset
+		}
+
+		sig := 1
+		if match[1] == "-" {
+			sig = -1
+		}
+
+		hrs64, _ := strconv.ParseInt(match[2], 10, 64)
+		hrs := sig * int(hrs64)
+
+		mins64, _ := strconv.ParseInt(match[3], 10, 64)
+		mins := sig * int(mins64)
+
+		offset = time.Duration(hrs)*time.Hour + time.Duration(mins)
+	}
+
+	return offset
+}
+
+func IsCacheWarmingEnabled() bool {
+	return GetBool(CacheWarmingEnabledEnvVar, true)
+}
+
+func IsETLEnabled() bool {
+	return GetBool(ETLEnabledEnvVar, true)
+}

+ 0 - 39
pkg/errors/errors.go

@@ -1,39 +0,0 @@
-package errors
-
-import "sync"
-
-// Error collection helper
-type ErrorCollector struct {
-	m      sync.Mutex
-	errors []error
-}
-
-// Reports an error to the collector. Ignores if the error is nil.
-func (ec *ErrorCollector) Report(e error) {
-	if e == nil {
-		return
-	}
-
-	ec.m.Lock()
-	defer ec.m.Unlock()
-
-	ec.errors = append(ec.errors, e)
-}
-
-// Whether or not the collector caught errors
-func (ec *ErrorCollector) IsError() bool {
-	ec.m.Lock()
-	defer ec.m.Unlock()
-
-	return len(ec.errors) > 0
-}
-
-// Errors caught by the collector
-func (ec *ErrorCollector) Errors() []error {
-	ec.m.Lock()
-	defer ec.m.Unlock()
-
-	errs := make([]error, len(ec.errors))
-	copy(errs, ec.errors)
-	return errs
-}

+ 1446 - 0
pkg/kubecost/allocation.go

@@ -0,0 +1,1446 @@
+package kubecost
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/kubecost/cost-model/pkg/log"
+)
+
+// IdleSuffix indicates an idle allocation property
+const IdleSuffix = "__idle__"
+
+// SharedSuffix indicates an shared allocation property
+const SharedSuffix = "__shared__"
+
+// UnallocatedSuffix indicates an unallocated allocation property
+const UnallocatedSuffix = "__unallocated__"
+
+// ShareWeighted indicates that a shared resource should be shared as a
+// proportion of the cost of the remaining allocations.
+const ShareWeighted = "__weighted__"
+
+// ShareEven indicates that a shared resource should be shared evenly across
+// all remaining allocations.
+const ShareEven = "__even__"
+
+// ShareNone indicates that a shareable resource should not be shared
+const ShareNone = "__none__"
+
+// Allocation is a unit of resource allocation and cost for a given window
+// of time and for a given kubernetes construct with its associated set of
+// properties.
+type Allocation struct {
+	Name            string     `json:"name"`
+	Properties      Properties `json:"properties,omitempty"`
+	Start           time.Time  `json:"start"`
+	End             time.Time  `json:"end"`
+	Minutes         float64    `json:"minutes"`
+	ActiveStart     time.Time  `json:"-"`
+	CPUCoreHours    float64    `json:"cpuCoreHours"`
+	CPUCost         float64    `json:"cpuCost"`
+	CPUEfficiency   float64    `json:"cpuEfficiency"`
+	GPUHours        float64    `json:"gpuHours"`
+	GPUCost         float64    `json:"gpuCost"`
+	NetworkCost     float64    `json:"networkCost"`
+	PVByteHours     float64    `json:"pvByteHours"`
+	PVCost          float64    `json:"pvCost"`
+	RAMByteHours    float64    `json:"ramByteHours"`
+	RAMCost         float64    `json:"ramCost"`
+	RAMEfficiency   float64    `json:"ramEfficiency"`
+	SharedCost      float64    `json:"sharedCost"`
+	TotalCost       float64    `json:"totalCost"`
+	TotalEfficiency float64    `json:"totalEfficiency"`
+	// Profiler        *log.Profiler `json:"-"`
+}
+
+// AllocationMatchFunc is a function that can be used to match Allocations by
+// returning true for any given Allocation if a condition is met.
+type AllocationMatchFunc func(*Allocation) bool
+
+// Add returns the result of summing the two given Allocations, which sums the
+// summary fields (e.g. costs, resources) and recomputes efficiency. Neither of
+// the two original Allocations are mutated in the process.
+func (a *Allocation) Add(that *Allocation) (*Allocation, error) {
+	if a == nil {
+		return that.Clone(), nil
+	}
+
+	if !a.Start.Equal(that.Start) || !a.End.Equal(that.End) {
+		return nil, fmt.Errorf("error adding Allocations: mismatched windows")
+	}
+
+	agg := a.Clone()
+	// agg.Profiler = a.Profiler
+	agg.add(that, false, false)
+
+	return agg, nil
+}
+
+// Clone returns a deep copy of the given Allocation
+func (a *Allocation) Clone() *Allocation {
+	if a == nil {
+		return nil
+	}
+
+	return &Allocation{
+		Name:            a.Name,
+		Properties:      a.Properties.Clone(),
+		Start:           a.Start,
+		End:             a.End,
+		Minutes:         a.Minutes,
+		ActiveStart:     a.ActiveStart,
+		CPUCoreHours:    a.CPUCoreHours,
+		CPUCost:         a.CPUCost,
+		CPUEfficiency:   a.CPUEfficiency,
+		GPUHours:        a.GPUHours,
+		GPUCost:         a.GPUCost,
+		NetworkCost:     a.NetworkCost,
+		PVByteHours:     a.PVByteHours,
+		PVCost:          a.PVCost,
+		RAMByteHours:    a.RAMByteHours,
+		RAMCost:         a.RAMCost,
+		RAMEfficiency:   a.RAMEfficiency,
+		SharedCost:      a.SharedCost,
+		TotalCost:       a.TotalCost,
+		TotalEfficiency: a.TotalEfficiency,
+	}
+}
+
+func (a *Allocation) Equal(that *Allocation) bool {
+	if a == nil || that == nil {
+		return false
+	}
+
+	if a.Name != that.Name {
+		return false
+	}
+	if !a.Start.Equal(that.Start) {
+		return false
+	}
+	if !a.End.Equal(that.End) {
+		return false
+	}
+	if a.Minutes != that.Minutes {
+		return false
+	}
+	if !a.ActiveStart.Equal(that.ActiveStart) {
+		return false
+	}
+	if a.CPUCoreHours != that.CPUCoreHours {
+		return false
+	}
+	if a.CPUCost != that.CPUCost {
+		return false
+	}
+	if a.CPUEfficiency != that.CPUEfficiency {
+		return false
+	}
+	if a.GPUHours != that.GPUHours {
+		return false
+	}
+	if a.GPUCost != that.GPUCost {
+		return false
+	}
+	if a.NetworkCost != that.NetworkCost {
+		return false
+	}
+	if a.PVByteHours != that.PVByteHours {
+		return false
+	}
+	if a.PVCost != that.PVCost {
+		return false
+	}
+	if a.RAMByteHours != that.RAMByteHours {
+		return false
+	}
+	if a.RAMCost != that.RAMCost {
+		return false
+	}
+	if a.RAMEfficiency != that.RAMEfficiency {
+		return false
+	}
+	if a.SharedCost != that.SharedCost {
+		return false
+	}
+	if a.TotalCost != that.TotalCost {
+		return false
+	}
+	if a.TotalEfficiency != that.TotalEfficiency {
+		return false
+	}
+	if !a.Properties.Equal(&that.Properties) {
+		return false
+	}
+
+	return true
+}
+
+// Resolution returns the duration of time covered by the Allocation
+func (a *Allocation) Resolution() time.Duration {
+	return a.End.Sub(a.Start)
+}
+
+// IsAggregated is true if the given Allocation has been aggregated, which we
+// define by a lack of Properties.
+func (a *Allocation) IsAggregated() bool {
+	return a == nil || a.Properties == nil
+}
+
+// IsIdle is true if the given Allocation represents idle costs.
+func (a *Allocation) IsIdle() bool {
+	return strings.Contains(a.Name, IdleSuffix)
+}
+
+// IsUnallocated is true if the given Allocation represents unallocated costs.
+func (a *Allocation) IsUnallocated() bool {
+	return strings.Contains(a.Name, UnallocatedSuffix)
+}
+
+// MatchesFilter returns true if the Allocation passes the given AllocationFilter
+func (a *Allocation) MatchesFilter(f AllocationMatchFunc) bool {
+	return f(a)
+}
+
+// MatchesAll takes a variadic list of Properties, returning true iff the
+// Allocation matches each set of Properties.
+func (a *Allocation) MatchesAll(ps ...Properties) bool {
+	// nil Allocation don't match any Properties
+	if a == nil {
+		return false
+	}
+
+	for _, p := range ps {
+		if !a.Properties.Matches(p) {
+			return false
+		}
+	}
+
+	return true
+}
+
+// MatchesOne takes a variadic list of Properties, returning true iff the
+// Allocation matches at least one of the set of Properties.
+func (a *Allocation) MatchesOne(ps ...Properties) bool {
+	// nil Allocation don't match any Properties
+	if a == nil {
+		return false
+	}
+
+	for _, p := range ps {
+		if a.Properties.Matches(p) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// Share works like Add, but converts the entire cost of the given Allocation
+// to SharedCost, rather than adding to the individual resource costs.
+func (a *Allocation) Share(that *Allocation) (*Allocation, error) {
+	if a == nil {
+		return that.Clone(), nil
+	}
+
+	if !a.Start.Equal(that.Start) {
+		return nil, fmt.Errorf("mismatched start time: expected %s, received %s", a.Start, that.Start)
+	}
+	if !a.End.Equal(that.End) {
+		return nil, fmt.Errorf("mismatched start time: expected %s, received %s", a.End, that.End)
+	}
+
+	agg := a.Clone()
+	agg.add(that, true, false)
+
+	return agg, nil
+}
+
+// String represents the given Allocation as a string
+func (a *Allocation) String() string {
+	return fmt.Sprintf("%s%s=%.2f", a.Name, NewWindow(&a.Start, &a.End), a.TotalCost)
+}
+
+func (a *Allocation) add(that *Allocation, isShared, isAccumulating bool) {
+	if a == nil {
+		a = that
+
+		// reset properties
+		thatCluster, _ := that.Properties.GetCluster()
+		thatNode, _ := that.Properties.GetNode()
+		a.Properties = Properties{ClusterProp: thatCluster, NodeProp: thatNode}
+
+		return
+	}
+
+	aCluster, _ := a.Properties.GetCluster()
+	thatCluster, _ := that.Properties.GetCluster()
+	aNode, _ := a.Properties.GetNode()
+	thatNode, _ := that.Properties.GetNode()
+
+	// reset properties
+	a.Properties = nil
+
+	// ensure that we carry cluster ID and/or node over if they're the same
+	// required for idle/shared cost allocation
+	if aCluster == thatCluster {
+		a.Properties = Properties{ClusterProp: aCluster}
+	}
+	if aNode == thatNode {
+		if a.Properties == nil {
+			a.Properties = Properties{NodeProp: aNode}
+		} else {
+			a.Properties.SetNode(aNode)
+		}
+	}
+
+	if that.ActiveStart.Before(a.ActiveStart) {
+		a.ActiveStart = that.ActiveStart
+	}
+
+	if isAccumulating {
+		if a.Start.After(that.Start) {
+			a.Start = that.Start
+		}
+
+		if a.End.Before(that.End) {
+			a.End = that.End
+		}
+
+		a.Minutes += that.Minutes
+	} else if that.Minutes > a.Minutes {
+		a.Minutes = that.Minutes
+	}
+
+	// isShared determines whether the given allocation should be spread evenly
+	// across resources (e.g. sharing idle allocation) or lumped into a shared
+	// cost category (e.g. sharing namespace, labels).
+	if isShared {
+		a.SharedCost += that.TotalCost
+	} else {
+		a.CPUCoreHours += that.CPUCoreHours
+		a.GPUHours += that.GPUHours
+		a.RAMByteHours += that.RAMByteHours
+		a.PVByteHours += that.PVByteHours
+
+		aggCPUCost := a.CPUCost + that.CPUCost
+		if aggCPUCost > 0 {
+			a.CPUEfficiency = (a.CPUEfficiency*a.CPUCost + that.CPUEfficiency*that.CPUCost) / aggCPUCost
+		} else {
+			a.CPUEfficiency = 0.0
+		}
+
+		aggRAMCost := a.RAMCost + that.RAMCost
+		if aggRAMCost > 0 {
+			a.RAMEfficiency = (a.RAMEfficiency*a.RAMCost + that.RAMEfficiency*that.RAMCost) / aggRAMCost
+		} else {
+			a.RAMEfficiency = 0.0
+		}
+
+		aggTotalCost := a.TotalCost + that.TotalCost
+		if aggTotalCost > 0 {
+			a.TotalEfficiency = (a.TotalEfficiency*a.TotalCost + that.TotalEfficiency*that.TotalCost) / aggTotalCost
+		} else {
+			aggTotalCost = 0.0
+		}
+
+		a.SharedCost += that.SharedCost
+		a.CPUCost += that.CPUCost
+		a.GPUCost += that.GPUCost
+		a.NetworkCost += that.NetworkCost
+		a.RAMCost += that.RAMCost
+		a.PVCost += that.PVCost
+	}
+
+	a.TotalCost += that.TotalCost
+}
+
+// AllocationSet stores a set of Allocations, each with a unique name, that share
+// a window. An AllocationSet is mutable, so treat it like a threadsafe map.
+type AllocationSet struct {
+	sync.RWMutex
+	// Profiler    *log.Profiler
+	allocations map[string]*Allocation
+	idleKeys    map[string]bool
+	Window      Window
+	Warnings    []string
+	Errors      []string
+}
+
+// NewAllocationSet instantiates a new AllocationSet and, optionally, inserts
+// the given list of Allocations
+func NewAllocationSet(start, end time.Time, allocs ...*Allocation) *AllocationSet {
+	as := &AllocationSet{
+		allocations: map[string]*Allocation{},
+		Window:      NewWindow(&start, &end),
+	}
+
+	for _, a := range allocs {
+		as.Insert(a)
+	}
+
+	return as
+}
+
+// AllocationAggregationOptions provide advanced functionality to AggregateBy, including
+// filtering results and sharing allocations. FilterFuncs are a list of match
+// functions such that, if any function fails, the allocation is ignored.
+// ShareFuncs are a list of match functions such that, if any function
+// succeeds, the allocation is marked as a shared resource. ShareIdle is a
+// simple flag for sharing idle resources.
+type AllocationAggregationOptions struct {
+	FilterFuncs       []AllocationMatchFunc
+	SplitIdle         bool
+	MergeUnallocated  bool
+	ShareFuncs        []AllocationMatchFunc
+	ShareIdle         string
+	ShareSplit        string
+	SharedHourlyCosts map[string]float64
+}
+
+// AggregateBy aggregates the Allocations in the given AllocationSet by the given
+// Property. This will only be legal if the AllocationSet is divisible by the
+// given Property; e.g. Containers can be divided by Namespace, but not vice-a-versa.
+func (as *AllocationSet) AggregateBy(properties Properties, options *AllocationAggregationOptions) error {
+	// The order of operations for aggregating allocations is as follows:
+	// 1. move shared and/or idle allocations to separate sets if options
+	//    indicate that they should be shared
+	// 2. idle coefficients
+	// 2.a) if idle allocation is to be shared, compute idle coefficients
+	//      (do not compute shared coefficients here, see step 5)
+	// 2.b) if idle allocation is NOT shared, but filters are present, compute
+	//      idle filtration coefficients for the purpose of only returning the
+	//      portion of idle allocation that would have been shared with the
+	//      unfiltered results set. (See unit tests 5.a,b,c)
+	// 3. ignore allocation if it fails any of the FilterFuncs
+	// 4. generate aggregation key and insert allocation into the output set
+	// 5. if there are shared allocations, compute sharing coefficients on
+	//    the aggregated set, then share allocation accordingly
+	// 6. if the merge idle option is enabled, merge any remaining idle
+	//    allocations into a single idle allocation
+
+	// TODO niko/etl revisit (ShareIdle: ShareEven) case, which is probably wrong
+	// (and, frankly, ill-defined; i.e. evenly across clusters? within clusters?)
+
+	if options == nil {
+		options = &AllocationAggregationOptions{}
+	}
+
+	if as.IsEmpty() {
+		return nil
+	}
+
+	// aggSet will collect the aggregated allocations
+	aggSet := &AllocationSet{
+		// Profiler: as.Profiler,
+		Window: as.Window.Clone(),
+	}
+
+	// idleSet will be shared among aggSet after initial aggregation
+	// is complete
+	idleSet := &AllocationSet{
+		// Profiler: as.Profiler,
+		Window: as.Window.Clone(),
+	}
+
+	// shareSet will be shared among aggSet after initial aggregation
+	// is complete
+	shareSet := &AllocationSet{
+		// Profiler: as.Profiler
+		Window: as.Window.Clone(),
+	}
+
+	for name, cost := range options.SharedHourlyCosts {
+		if cost > 0.0 {
+			hours := as.Resolution().Hours()
+
+			// If set ends in the future, adjust hours accordingly
+			diff := time.Now().Sub(as.End())
+			if diff < 0.0 {
+				hours += diff.Hours()
+			}
+
+			totalSharedCost := cost * hours
+
+			shareSet.Insert(&Allocation{
+				Name:       fmt.Sprintf("%s/%s", name, SharedSuffix),
+				Start:      as.Start(),
+				End:        as.End(),
+				SharedCost: totalSharedCost,
+				TotalCost:  totalSharedCost,
+			})
+		}
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	// Loop and find all of the idle and shared allocations initially. Add
+	// them to their respective sets, removing them from the set of
+	// allocations to aggregate.
+	for _, alloc := range as.allocations {
+		cluster, err := alloc.Properties.GetCluster()
+		if err != nil {
+			log.Warningf("AllocationSet.AggregateBy: missing cluster for allocation: %s", alloc.Name)
+			return err
+		}
+
+		// Idle allocation doesn't get aggregated, so it can be passed through,
+		// whether or not it is shared. If it is shared, it is put in idleSet
+		// because shareSet may be split by different rules (even/weighted).
+		if alloc.IsIdle() {
+			// Can't recursively call Delete() due to lock acquisition
+			delete(as.idleKeys, alloc.Name)
+			delete(as.allocations, alloc.Name)
+
+			if options.ShareIdle == ShareEven || options.ShareIdle == ShareWeighted {
+				idleSet.Insert(alloc)
+			} else {
+				aggSet.Insert(alloc)
+			}
+		}
+
+		// If any of the share funcs succeed, share the allocation. Do this
+		// prior to filtering so that shared namespaces, etc do not get
+		// filtered out before we have a chance to share them.
+		for _, sf := range options.ShareFuncs {
+			if sf(alloc) {
+				// Can't recursively call Delete() due to lock acquisition
+				delete(as.idleKeys, alloc.Name)
+				delete(as.allocations, alloc.Name)
+
+				alloc.Name = fmt.Sprintf("%s/%s", cluster, SharedSuffix)
+				shareSet.Insert(alloc)
+				break
+			}
+		}
+	}
+
+	if len(as.allocations) == 0 {
+		log.Warningf("ETL: AggregateBy: no allocations to aggregate")
+		emptySet := &AllocationSet{
+			Window: as.Window.Clone(),
+		}
+		as.allocations = emptySet.allocations
+		return nil
+	}
+
+	// In order to correctly apply idle and shared resource coefficients appropriately,
+	// we need to determine the coefficients for the full set of data. The ensures that
+	// the ratios are maintained through filtering.
+	// idleCoefficients are organized by [cluster][allocation][resource]=coeff
+	var idleCoefficients map[string]map[string]map[string]float64
+	// shareCoefficients are organized by [allocation][resource]=coeff (no cluster)
+	var shareCoefficients map[string]float64
+	var err error
+
+	if idleSet.Length() > 0 && options.ShareIdle != ShareNone {
+		idleCoefficients, err = computeIdleCoeffs(properties, options, as)
+		if err != nil {
+			log.Warningf("AllocationSet.AggregateBy: compute idle coeff: %s", err)
+			return err
+		}
+	}
+
+	// If we're not sharing idle and we're filtering, we need to track the
+	// amount of each idle allocation to "delete" in order to maintain parity
+	// with the idle-allocated results. That is, we want to return only the
+	// idle cost that would have been shared with the unfiltered portion of
+	// the results, not the full idle cost.
+	var idleFiltrationCoefficients map[string]map[string]map[string]float64
+	if len(options.FilterFuncs) > 0 && options.ShareIdle == ShareNone {
+		idleFiltrationCoefficients, err = computeIdleCoeffs(properties, options, as)
+		if err != nil {
+			log.Warningf("AllocationSet.AggregateBy: compute idle coeff: %s", err)
+			return err
+		}
+	}
+
+	for _, alloc := range as.allocations {
+		cluster, err := alloc.Properties.GetCluster()
+		if err != nil {
+			log.Warningf("AllocationSet.AggregateBy: missing cluster for allocation: %s", alloc.Name)
+			return err
+		}
+
+		skip := false
+
+		// If any of the filter funcs fail, immediately skip the allocation.
+		for _, ff := range options.FilterFuncs {
+			if !ff(alloc) {
+				skip = true
+				break
+			}
+		}
+		if skip {
+			// If we are tracking idle filtration coefficients, delete the
+			// entry corresponding to the filtered allocation. (Deleting the
+			// entry will result in that proportional amount being removed
+			// from the idle allocation at the end of the process.)
+			if idleFiltrationCoefficients != nil {
+				if ifcc, ok := idleFiltrationCoefficients[cluster]; ok {
+					delete(ifcc, alloc.Name)
+				}
+			}
+
+			continue
+		}
+
+		// Split idle allocations and distribute among aggregated allocations
+		// NOTE: if idle allocation is off (i.e. ShareIdle == ShareNone) then all
+		// idle allocations will be in the aggSet at this point.
+		if idleSet.Length() > 0 {
+			// Distribute idle allocations by coefficient per-cluster, per-allocation
+			for _, idleAlloc := range idleSet.allocations {
+				// Only share idle if the cluster matches; i.e. the allocation
+				// is from the same cluster as the idle costs
+				idleCluster, err := idleAlloc.Properties.GetCluster()
+				if err != nil {
+					return err
+				}
+				if idleCluster != cluster {
+					continue
+				}
+
+				// Make sure idle coefficients exist
+				if _, ok := idleCoefficients[cluster]; !ok {
+					log.Errorf("ETL: share (idle) allocation: error getting allocation coefficient [no cluster: '%s' in coefficients] for '%s'", cluster, alloc.Name)
+					continue
+				}
+				if _, ok := idleCoefficients[cluster][alloc.Name]; !ok {
+					log.Errorf("ETL: share (idle) allocation: error getting allocation coefficienct for '%s'", alloc.Name)
+					continue
+				}
+
+				alloc.CPUCoreHours += idleAlloc.CPUCoreHours * idleCoefficients[cluster][alloc.Name]["cpu"]
+				alloc.GPUHours += idleAlloc.GPUHours * idleCoefficients[cluster][alloc.Name]["gpu"]
+				alloc.RAMByteHours += idleAlloc.RAMByteHours * idleCoefficients[cluster][alloc.Name]["ram"]
+
+				idleCPUCost := idleAlloc.CPUCost * idleCoefficients[cluster][alloc.Name]["cpu"]
+				idleGPUCost := idleAlloc.GPUCost * idleCoefficients[cluster][alloc.Name]["gpu"]
+				idleRAMCost := idleAlloc.RAMCost * idleCoefficients[cluster][alloc.Name]["ram"]
+				alloc.CPUCost += idleCPUCost
+				alloc.GPUCost += idleGPUCost
+				alloc.RAMCost += idleRAMCost
+				alloc.TotalCost += idleCPUCost + idleGPUCost + idleRAMCost
+			}
+		}
+
+		key, err := alloc.generateKey(properties)
+		if err != nil {
+			return err
+		}
+
+		alloc.Name = key
+		if options.MergeUnallocated && alloc.IsUnallocated() {
+			alloc.Name = UnallocatedSuffix
+		}
+
+		aggSet.Insert(alloc)
+	}
+
+	var clusterIdleFiltrationCoeffs map[string]map[string]float64
+	if idleFiltrationCoefficients != nil {
+		clusterIdleFiltrationCoeffs = map[string]map[string]float64{}
+
+		for cluster, m := range idleFiltrationCoefficients {
+			if _, ok := clusterIdleFiltrationCoeffs[cluster]; !ok {
+				clusterIdleFiltrationCoeffs[cluster] = map[string]float64{
+					"cpu": 0.0,
+					"gpu": 0.0,
+					"ram": 0.0,
+				}
+			}
+
+			for _, n := range m {
+				for resource, val := range n {
+					clusterIdleFiltrationCoeffs[cluster][resource] += val
+				}
+			}
+		}
+	}
+
+	// If we have filters, and so have computed coefficients for scaling idle
+	// allocation costs by cluster, then use those coefficients to scale down
+	// each idle coefficient in the aggSet.
+	if len(aggSet.idleKeys) > 0 && clusterIdleFiltrationCoeffs != nil {
+		for idleKey := range aggSet.idleKeys {
+			idleAlloc := aggSet.Get(idleKey)
+
+			cluster, err := idleAlloc.Properties.GetCluster()
+			if err != nil {
+				log.Warningf("AggregateBy: idle allocation without cluster: %s", idleAlloc)
+			}
+
+			if resourceCoeffs, ok := clusterIdleFiltrationCoeffs[cluster]; ok {
+				idleAlloc.CPUCost *= resourceCoeffs["cpu"]
+				idleAlloc.CPUCoreHours *= resourceCoeffs["cpu"]
+				idleAlloc.RAMCost *= resourceCoeffs["ram"]
+				idleAlloc.RAMByteHours *= resourceCoeffs["ram"]
+				idleAlloc.TotalCost = idleAlloc.CPUCost + idleAlloc.RAMCost
+			}
+
+		}
+	}
+
+	// Split shared allocations and distribute among aggregated allocations
+	if shareSet.Length() > 0 {
+		shareCoefficients, err = computeShareCoeffs(properties, options, aggSet)
+		if err != nil {
+			log.Warningf("AllocationSet.AggregateBy: compute shared coeff: missing cluster ID: %s", err)
+			return err
+		}
+
+		for _, alloc := range aggSet.allocations {
+			if alloc.IsIdle() {
+				// Skip idle allocations (they do not receive shared allocation)
+				continue
+			}
+
+			// Distribute shared allocations by coefficient per-allocation
+			// NOTE: share coefficients do not partition by cluster, like
+			// idle coefficients do.
+			for _, sharedAlloc := range shareSet.allocations {
+				if _, ok := shareCoefficients[alloc.Name]; !ok {
+					log.Errorf("ETL: share allocation: error getting allocation coefficienct for '%s'", alloc.Name)
+					continue
+				}
+
+				alloc.SharedCost += sharedAlloc.TotalCost * shareCoefficients[alloc.Name]
+				alloc.TotalCost += sharedAlloc.TotalCost * shareCoefficients[alloc.Name]
+			}
+		}
+	}
+
+	// Combine all idle allocations into a single "__idle__" allocation
+	if !options.SplitIdle {
+		for _, idleAlloc := range aggSet.IdleAllocations() {
+			aggSet.Delete(idleAlloc.Name)
+			idleAlloc.Name = IdleSuffix
+			aggSet.Insert(idleAlloc)
+		}
+	}
+
+	as.allocations = aggSet.allocations
+
+	return nil
+}
+
+// TODO niko/etl deprecate the use of a map of resources here, we only use totals
+func computeShareCoeffs(properties Properties, options *AllocationAggregationOptions, as *AllocationSet) (map[string]float64, error) {
+	// Compute coeffs by totalling per-allocation, then dividing by the total.
+	coeffs := map[string]float64{}
+
+	// Compute totals for all allocations
+	total := 0.0
+
+	// ShareEven counts each aggregation with even weight, whereas ShareWeighted
+	// counts each aggregation proportionally to its respective costs
+	shareType := options.ShareSplit
+
+	// Record allocation values first, then normalize by totals to get percentages
+	for name, alloc := range as.allocations {
+		if alloc.IsIdle() {
+			// Skip idle allocations in coefficient calculation
+			continue
+		}
+
+		if shareType == ShareEven {
+			// Not additive - set to 1.0 for even distribution
+			coeffs[name] = 1.0
+			// Total is always additive
+			total += 1.0
+		} else {
+			// Both are additive for weighted distribution
+			coeffs[name] += alloc.TotalCost
+			total += alloc.TotalCost
+		}
+	}
+
+	// Normalize coefficients by totals
+	for a := range coeffs {
+		if coeffs[a] > 0 && total > 0 {
+			coeffs[a] /= total
+		} else {
+			log.Warningf("ETL: invalid values for shared coefficients: %d, %d", coeffs[a], total)
+			coeffs[a] = 0.0
+		}
+	}
+
+	return coeffs, nil
+}
+
+func computeIdleCoeffs(properties Properties, options *AllocationAggregationOptions, as *AllocationSet) (map[string]map[string]map[string]float64, error) {
+	types := []string{"cpu", "gpu", "ram"}
+
+	// Compute idle coefficients, then save them in AllocationAggregationOptions
+	coeffs := map[string]map[string]map[string]float64{}
+
+	// Compute totals per resource for CPU, GPU, RAM, and PV
+	totals := map[string]map[string]float64{}
+
+	// ShareEven counts each allocation with even weight, whereas ShareWeighted
+	// counts each allocation proportionally to its respective costs
+	shareType := options.ShareIdle
+
+	// Record allocation values first, then normalize by totals to get percentages
+	for _, alloc := range as.allocations {
+		if alloc.IsIdle() {
+			// Skip idle allocations in coefficient calculation
+			continue
+		}
+
+		// If any of the share funcs succeed, share the allocation. Do this
+		// prior to filtering so that shared namespaces, etc do not get
+		// filtered out before we have a chance to share them.
+		skip := false
+		for _, sf := range options.ShareFuncs {
+			if sf(alloc) {
+				skip = true
+				break
+			}
+		}
+		if skip {
+			continue
+		}
+
+		// We need to key the allocations by cluster id
+		clusterID, err := alloc.Properties.GetCluster()
+		if err != nil {
+			return nil, err
+		}
+
+		// get the name key for the allocation
+		name := alloc.Name
+
+		// Create cluster based tables if they don't exist
+		if _, ok := coeffs[clusterID]; !ok {
+			coeffs[clusterID] = map[string]map[string]float64{}
+		}
+		if _, ok := totals[clusterID]; !ok {
+			totals[clusterID] = map[string]float64{}
+		}
+
+		if _, ok := coeffs[clusterID][name]; !ok {
+			coeffs[clusterID][name] = map[string]float64{}
+		}
+
+		if shareType == ShareEven {
+			for _, r := range types {
+				// Not additive - hard set to 1.0
+				coeffs[clusterID][name][r] = 1.0
+
+				// totals are additive
+				totals[clusterID][r] += 1.0
+			}
+		} else {
+			coeffs[clusterID][name]["cpu"] += alloc.CPUCost
+			coeffs[clusterID][name]["gpu"] += alloc.GPUCost
+			coeffs[clusterID][name]["ram"] += alloc.RAMCost
+
+			totals[clusterID]["cpu"] += alloc.CPUCost
+			totals[clusterID]["gpu"] += alloc.GPUCost
+			totals[clusterID]["ram"] += alloc.RAMCost
+		}
+	}
+
+	// Normalize coefficients by totals
+	for c := range coeffs {
+		for a := range coeffs[c] {
+			for _, r := range types {
+				if coeffs[c][a][r] > 0 && totals[c][r] > 0 {
+					coeffs[c][a][r] /= totals[c][r]
+				}
+			}
+		}
+	}
+
+	return coeffs, nil
+}
+
+func (alloc *Allocation) generateKey(properties Properties) (string, error) {
+	// Names will ultimately be joined into a single name, which uniquely
+	// identifies allocations.
+	names := []string{}
+
+	if properties.HasCluster() {
+		cluster, err := alloc.Properties.GetCluster()
+		if err != nil {
+			return "", err
+		}
+		names = append(names, cluster)
+	}
+
+	if properties.HasNode() {
+		node, err := alloc.Properties.GetNode()
+		if err != nil {
+			return "", err
+		}
+		names = append(names, node)
+	}
+
+	if properties.HasNamespace() {
+		namespace, err := alloc.Properties.GetNamespace()
+		if err != nil {
+			return "", err
+		}
+		names = append(names, namespace)
+	}
+
+	if properties.HasControllerKind() {
+		controllerKind, err := alloc.Properties.GetControllerKind()
+		if err != nil {
+			// Indicate that allocation has no controller
+			controllerKind = UnallocatedSuffix
+		}
+
+		if prop, _ := properties.GetControllerKind(); prop != "" && prop != controllerKind {
+			// The allocation does not have the specified controller kind
+			controllerKind = UnallocatedSuffix
+		}
+		names = append(names, controllerKind)
+	}
+
+	if properties.HasController() {
+		if !properties.HasControllerKind() {
+			controllerKind, err := alloc.Properties.GetControllerKind()
+			if err == nil {
+				names = append(names, controllerKind)
+			}
+		}
+
+		controller, err := alloc.Properties.GetController()
+		if err != nil {
+			// Indicate that allocation has no controller
+			controller = UnallocatedSuffix
+		}
+
+		names = append(names, controller)
+	}
+
+	if properties.HasPod() {
+		pod, err := alloc.Properties.GetPod()
+		if err != nil {
+			return "", err
+		}
+
+		names = append(names, pod)
+	}
+
+	if properties.HasContainer() {
+		container, err := alloc.Properties.GetContainer()
+		if err != nil {
+			return "", err
+		}
+
+		names = append(names, container)
+	}
+
+	if properties.HasService() {
+		services, err := alloc.Properties.GetServices()
+		if err != nil {
+			// Indicate that allocation has no services
+			names = append(names, UnallocatedSuffix)
+		} else {
+			// TODO niko/etl support multi-service aggregation
+			if len(services) > 0 {
+				for _, service := range services {
+					names = append(names, service)
+					break
+				}
+			} else {
+				// Indicate that allocation has no services
+				names = append(names, UnallocatedSuffix)
+			}
+		}
+	}
+
+	if properties.HasLabel() {
+		labels, err := alloc.Properties.GetLabels() // labels that the individual allocation possesses
+		if err != nil {
+			// Indicate that allocation has no labels
+			names = append(names, UnallocatedSuffix)
+		} else {
+			labelNames := []string{}
+
+			aggLabels, err := properties.GetLabels() // potential labels to aggregate on supplied by the API caller
+			if err != nil {
+				// We've already checked HasLabel, so this should never occur
+				return "", err
+			}
+			// calvin - support multi-label aggregation
+			for labelName := range aggLabels {
+				if val, ok := labels[labelName]; ok {
+					labelNames = append(labelNames, fmt.Sprintf("%s=%s", labelName, val))
+				} else if indexOf(UnallocatedSuffix, labelNames) == -1 { // if UnallocatedSuffix not already in names
+					labelNames = append(labelNames, UnallocatedSuffix)
+				}
+			}
+			// resolve arbitrary ordering. e.g., app=app0/env=env0 is the same agg as env=env0/app=app0
+			if len(labelNames) > 1 {
+				sort.Strings(labelNames)
+			}
+			unallocatedSuffixIndex := indexOf(UnallocatedSuffix, labelNames)
+			// suffix should be at index 0 if it exists b/c of underscores
+			if unallocatedSuffixIndex != -1 {
+				labelNames = append(labelNames[:unallocatedSuffixIndex], labelNames[unallocatedSuffixIndex+1:]...)
+				labelNames = append(labelNames, UnallocatedSuffix) // append to end
+			}
+
+			names = append(names, labelNames...)
+		}
+	}
+
+	return strings.Join(names, "/"), nil
+}
+
+// Helper function to check for slice membership. Not sure if repeated elsewhere in our codebase.
+func indexOf(v string, arr []string) int {
+	for i, s := range arr {
+		// This is caseless equivalence
+		if strings.EqualFold(v, s) {
+			return i
+		}
+	}
+	return -1
+}
+
+// Clone returns a new AllocationSet with a deep copy of the given
+// AllocationSet's allocations.
+func (as *AllocationSet) Clone() *AllocationSet {
+	if as == nil {
+		return nil
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+
+	allocs := map[string]*Allocation{}
+	for k, v := range as.allocations {
+		allocs[k] = v.Clone()
+	}
+
+	return &AllocationSet{
+		allocations: allocs,
+		Window:      as.Window.Clone(),
+	}
+}
+
+// Delete removes the allocation with the given name from the set
+func (as *AllocationSet) Delete(name string) {
+	if as == nil {
+		return
+	}
+
+	as.Lock()
+	defer as.Unlock()
+	delete(as.idleKeys, name)
+	delete(as.allocations, name)
+}
+
+// Each invokes the given function for each Allocation in the set
+func (as *AllocationSet) Each(f func(string, *Allocation)) {
+	if as == nil {
+		return
+	}
+
+	for k, a := range as.allocations {
+		f(k, a)
+	}
+}
+
+// End returns the End time of the AllocationSet window
+func (as *AllocationSet) End() time.Time {
+	if as == nil {
+		log.Warningf("Allocation ETL: calling End on nil AllocationSet")
+		return time.Unix(0, 0)
+	}
+	if as.Window.End() == nil {
+		log.Warningf("Allocation ETL: AllocationSet with illegal window: End is nil; len(as.allocations)=%d", len(as.allocations))
+		return time.Unix(0, 0)
+	}
+	return *as.Window.End()
+}
+
+// Get returns the Allocation at the given key in the AllocationSet
+func (as *AllocationSet) Get(key string) *Allocation {
+	as.RLock()
+	defer as.RUnlock()
+
+	if alloc, ok := as.allocations[key]; ok {
+		return alloc
+	}
+
+	return nil
+}
+
+// IdleAllocations returns a map of the idle allocations in the AllocationSet.
+// Returns clones of the actual Allocations, so mutability is not a problem.
+func (as *AllocationSet) IdleAllocations() map[string]*Allocation {
+	idles := map[string]*Allocation{}
+
+	if as.IsEmpty() {
+		return idles
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+
+	for key := range as.idleKeys {
+		if alloc, ok := as.allocations[key]; ok {
+			idles[key] = alloc.Clone()
+		}
+	}
+
+	return idles
+}
+
+// Insert aggregates the current entry in the AllocationSet by the given Allocation,
+// but only if the Allocation is valid, i.e. matches the AllocationSet's window. If
+// there is no existing entry, one is created. Nil error response indicates success.
+func (as *AllocationSet) Insert(that *Allocation) error {
+	return as.insert(that, false)
+}
+
+func (as *AllocationSet) insert(that *Allocation, accumulate bool) error {
+	if as.IsEmpty() {
+		as.Lock()
+		as.allocations = map[string]*Allocation{}
+		as.idleKeys = map[string]bool{}
+		as.Unlock()
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	// Add the given Allocation to the existing entry, if there is one;
+	// otherwise just set directly into allocations
+	if _, ok := as.allocations[that.Name]; !ok {
+		as.allocations[that.Name] = that
+	} else {
+		as.allocations[that.Name].add(that, false, accumulate)
+	}
+
+	// If the given Allocation is an idle one, record that
+	if that.IsIdle() {
+		as.idleKeys[that.Name] = true
+	}
+
+	return nil
+}
+
+// IsEmpty returns true if the AllocationSet is nil, or if it contains
+// zero allocations.
+func (as *AllocationSet) IsEmpty() bool {
+	if as == nil || len(as.allocations) == 0 {
+		return true
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+	return as.allocations == nil || len(as.allocations) == 0
+}
+
+// Length returns the number of Allocations in the set
+func (as *AllocationSet) Length() int {
+	if as == nil {
+		return 0
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+	return len(as.allocations)
+}
+
+// Map clones and returns a map of the AllocationSet's Allocations
+func (as *AllocationSet) Map() map[string]*Allocation {
+	if as.IsEmpty() {
+		return map[string]*Allocation{}
+	}
+
+	return as.Clone().allocations
+}
+
+// MarshalJSON JSON-encodes the AllocationSet
+func (as *AllocationSet) MarshalJSON() ([]byte, error) {
+	as.RLock()
+	defer as.RUnlock()
+	return json.Marshal(as.allocations)
+}
+
+// Resolution returns the AllocationSet's window duration
+func (as *AllocationSet) Resolution() time.Duration {
+	return as.Window.Duration()
+}
+
+func (as *AllocationSet) Set(alloc *Allocation) error {
+	if as.IsEmpty() {
+		as.Lock()
+		as.allocations = map[string]*Allocation{}
+		as.idleKeys = map[string]bool{}
+		as.Unlock()
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	as.allocations[alloc.Name] = alloc
+
+	// If the given Allocation is an idle one, record that
+	if alloc.IsIdle() {
+		as.idleKeys[alloc.Name] = true
+	}
+
+	return nil
+}
+
+// Start returns the Start time of the AllocationSet window
+func (as *AllocationSet) Start() time.Time {
+	if as == nil {
+		log.Warningf("Allocation ETL: calling Start on nil AllocationSet")
+		return time.Unix(0, 0)
+	}
+	if as.Window.Start() == nil {
+		log.Warningf("Allocation ETL: AllocationSet with illegal window: Start is nil; len(as.allocations)=%d", len(as.allocations))
+		return time.Unix(0, 0)
+	}
+	return *as.Window.Start()
+}
+
+// String represents the given Allocation as a string
+func (as *AllocationSet) String() string {
+	if as == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("AllocationSet{length: %d; window: %s; totalCost: %.2f}",
+		as.Length(), as.Window, as.TotalCost())
+}
+
+// TotalCost returns the sum of all TotalCosts of the allocations contained
+func (as *AllocationSet) TotalCost() float64 {
+	if as.IsEmpty() {
+		return 0.0
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+
+	tc := 0.0
+	for _, a := range as.allocations {
+		tc += a.TotalCost
+	}
+	return tc
+}
+
+func (as *AllocationSet) UTCOffset() time.Duration {
+	_, zone := as.Start().Zone()
+	return time.Duration(zone) * time.Second
+}
+
+func (as *AllocationSet) accumulate(that *AllocationSet) (*AllocationSet, error) {
+	if as.IsEmpty() {
+		return that, nil
+	}
+
+	if that.IsEmpty() {
+		return as, nil
+	}
+
+	if that.Start().Before(as.End()) {
+		timefmt := "2006-01-02T15:04:05"
+		err := fmt.Sprintf("that [%s, %s); that [%s, %s)\n", as.Start().Format(timefmt), as.End().Format(timefmt), that.Start().Format(timefmt), that.End().Format(timefmt))
+		return nil, fmt.Errorf("error accumulating AllocationSets: overlapping windows: %s", err)
+	}
+
+	// Set start, end to min(start), max(end)
+	start := as.Start()
+	end := as.End()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	if that.End().After(end) {
+		end = that.End()
+	}
+
+	acc := NewAllocationSet(start, end)
+
+	as.RLock()
+	defer as.RUnlock()
+
+	that.RLock()
+	defer that.RUnlock()
+
+	for _, alloc := range as.allocations {
+		// Change Start and End to match the new window. However, do not
+		// change Minutes because that will be accounted for during the
+		// insert step, if in fact there are two allocations to add.
+		alloc.Start = start
+		alloc.End = end
+
+		err := acc.insert(alloc, true)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	for _, alloc := range that.allocations {
+		// Change Start and End to match the new window. However, do not
+		// change Minutes because that will be accounted for during the
+		// insert step, if in fact there are two allocations to add.
+		alloc.Start = start
+		alloc.End = end
+
+		err := acc.insert(alloc, true)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return acc, nil
+}
+
+type AllocationSetRange struct {
+	sync.RWMutex
+	allocations []*AllocationSet
+}
+
+func NewAllocationSetRange(allocs ...*AllocationSet) *AllocationSetRange {
+	return &AllocationSetRange{
+		allocations: allocs,
+	}
+}
+
+// Accumulate sums each AllocationSet in the given range, returning a single cumulative
+// AllocationSet for the entire range.
+func (asr *AllocationSetRange) Accumulate() (*AllocationSet, error) {
+	var allocSet *AllocationSet
+	var err error
+
+	asr.RLock()
+	defer asr.RUnlock()
+
+	for _, as := range asr.allocations {
+		allocSet, err = allocSet.accumulate(as)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return allocSet, nil
+}
+
+// TODO niko/etl accumulate into lower-resolution chunks of the given resolution
+// func (asr *AllocationSetRange) AccumulateBy(resolution time.Duration) *AllocationSetRange
+
+func (asr *AllocationSetRange) AggregateBy(properties Properties, options *AllocationAggregationOptions) error {
+	aggRange := &AllocationSetRange{allocations: []*AllocationSet{}}
+
+	asr.Lock()
+	defer asr.Unlock()
+
+	for _, as := range asr.allocations {
+		err := as.AggregateBy(properties, options)
+		if err != nil {
+			return err
+		}
+		aggRange.allocations = append(aggRange.allocations, as)
+	}
+
+	asr.allocations = aggRange.allocations
+
+	return nil
+}
+
+func (asr *AllocationSetRange) Append(that *AllocationSet) {
+	asr.Lock()
+	defer asr.Unlock()
+	asr.allocations = append(asr.allocations, that)
+}
+
+// Each invokes the given function for each AllocationSet in the range
+func (asr *AllocationSetRange) Each(f func(int, *AllocationSet)) {
+	if asr == nil {
+		return
+	}
+
+	for i, as := range asr.allocations {
+		f(i, as)
+	}
+}
+
+func (asr *AllocationSetRange) Get(i int) (*AllocationSet, error) {
+	if i < 0 || i >= len(asr.allocations) {
+		return nil, fmt.Errorf("AllocationSetRange: index out of range: %d", i)
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+	return asr.allocations[i], nil
+}
+
+func (asr *AllocationSetRange) Length() int {
+	if asr == nil || asr.allocations == nil {
+		return 0
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+	return len(asr.allocations)
+}
+
+func (asr *AllocationSetRange) MarshalJSON() ([]byte, error) {
+	asr.RLock()
+	asr.RUnlock()
+	return json.Marshal(asr.allocations)
+}
+
+func (asr *AllocationSetRange) Slice() []*AllocationSet {
+	if asr == nil || asr.allocations == nil {
+		return nil
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+	copy := []*AllocationSet{}
+	for _, as := range asr.allocations {
+		copy = append(copy, as.Clone())
+	}
+	return copy
+}
+
+// String represents the given AllocationSetRange as a string
+func (asr *AllocationSetRange) String() string {
+	if asr == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("AllocationSetRange{length: %d}", asr.Length())
+}
+
+func (asr *AllocationSetRange) UTCOffset() time.Duration {
+	if asr.Length() == 0 {
+		return 0
+	}
+
+	as, err := asr.Get(0)
+	if err != nil {
+		return 0
+	}
+	return as.UTCOffset()
+}
+
+// Window returns the full window that the AllocationSetRange spans, from the
+// start of the first AllocationSet to the end of the last one.
+func (asr *AllocationSetRange) Window() Window {
+	if asr == nil || asr.Length() == 0 {
+		return NewWindow(nil, nil)
+	}
+
+	start := asr.allocations[0].Start()
+	end := asr.allocations[asr.Length()-1].End()
+
+	return NewWindow(&start, &end)
+}

+ 1153 - 0
pkg/kubecost/allocation_test.go

@@ -0,0 +1,1153 @@
+package kubecost
+
+import (
+	"fmt"
+	"math"
+	"testing"
+	"time"
+)
+
+const day = 24 * time.Hour
+
+func NewUnitAllocation(name string, start time.Time, resolution time.Duration, props *Properties) *Allocation {
+	if name == "" {
+		name = "cluster1/namespace1/pod1/container1"
+	}
+
+	properties := &Properties{}
+	if props == nil {
+		properties.SetCluster("cluster1")
+		properties.SetNode("node1")
+		properties.SetNamespace("namespace1")
+		properties.SetControllerKind("deployment")
+		properties.SetController("deployment1")
+		properties.SetPod("pod1")
+		properties.SetContainer("container1")
+	} else {
+		properties = props
+	}
+
+	end := start.Add(resolution)
+
+	alloc := &Allocation{
+		Name:            name,
+		Properties:      *properties,
+		Start:           start,
+		End:             end,
+		Minutes:         1440,
+		CPUCoreHours:    1,
+		CPUCost:         1,
+		CPUEfficiency:   1,
+		GPUHours:        1,
+		GPUCost:         1,
+		NetworkCost:     1,
+		PVByteHours:     1,
+		PVCost:          1,
+		RAMByteHours:    1,
+		RAMCost:         1,
+		RAMEfficiency:   1,
+		TotalCost:       5,
+		TotalEfficiency: 1,
+	}
+
+	// If idle allocation, remove non-idle costs, but maintain total cost
+	if alloc.IsIdle() {
+		alloc.PVByteHours = 0.0
+		alloc.PVCost = 0.0
+		alloc.NetworkCost = 0.0
+
+		alloc.CPUCoreHours += 1.0
+		alloc.CPUCost += 1.0
+		alloc.RAMByteHours += 1.0
+		alloc.RAMCost += 1.0
+	}
+
+	return alloc
+}
+
+func TestAllocation_Add(t *testing.T) {
+	var nilAlloc *Allocation
+	zeroAlloc := &Allocation{}
+
+	// nil + nil == nil
+	nilNilSum, err := nilAlloc.Add(nilAlloc)
+	if err != nil {
+		t.Fatalf("Allocation.Add unexpected error: %s", err)
+	}
+	if nilNilSum != nil {
+		t.Fatalf("Allocation.Add failed; exp: nil; act: %s", nilNilSum)
+	}
+
+	// nil + zero == zero
+	nilZeroSum, err := nilAlloc.Add(zeroAlloc)
+	if err != nil {
+		t.Fatalf("Allocation.Add unexpected error: %s", err)
+	}
+	if nilZeroSum == nil || nilZeroSum.TotalCost != 0.0 {
+		t.Fatalf("Allocation.Add failed; exp: 0.0; act: %s", nilZeroSum)
+	}
+
+	// TODO niko/etl more
+}
+
+// TODO niko/etl
+// func TestAllocation_Clone(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocation_IsIdle(t *testing.T) {}
+
+func TestAllocation_MatchesAll(t *testing.T) {
+	var alloc *Allocation
+
+	// nil Allocations never match
+	if alloc.MatchesAll() {
+		t.Fatalf("Allocation.MatchesAll: expected no match on nil allocation")
+	}
+
+	today := time.Now().UTC().Truncate(day)
+	alloc = NewUnitAllocation("", today, day, nil)
+
+	// Matches when no Properties are given
+	if !alloc.MatchesAll() {
+		t.Fatalf("Allocation.MatchesAll: expected match on no conditions")
+	}
+
+	// Matches when all Properties match
+	if !alloc.MatchesAll(Properties{
+		NamespaceProp: "namespace1",
+	}, Properties{
+		ClusterProp:        "cluster1",
+		ControllerKindProp: "deployment",
+	}, Properties{
+		NodeProp: "node1",
+	}) {
+		t.Fatalf("Allocation.MatchesAll: expected match when all Properties are met")
+	}
+
+	// Doesn't match when one Property doesn't match
+	if alloc.MatchesAll(Properties{
+		NamespaceProp: "namespace1",
+		ServiceProp:   []string{"missing"},
+	}, Properties{
+		ClusterProp:        "cluster1",
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Allocation.MatchesAll: expected no match when one Properties is not met")
+	}
+
+	// Doesn't match when no Properties are met
+	if alloc.MatchesAll(Properties{
+		NamespaceProp: "namespace1",
+		ServiceProp:   []string{"missing"},
+	}, Properties{
+		ClusterProp:        "cluster2",
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Allocation.MatchesAll: expected no match when no Properties are met")
+	}
+}
+
+func TestAllocation_MatchesOne(t *testing.T) {
+	var alloc *Allocation
+
+	// nil Allocations never match
+	if alloc.MatchesOne() {
+		t.Fatalf("Allocation.MatchesOne: expected no match on nil allocation")
+	}
+
+	today := time.Now().UTC().Truncate(day)
+	alloc = NewUnitAllocation("", today, day, nil)
+
+	// Doesn't match when no Properties are given
+	if alloc.MatchesOne() {
+		t.Fatalf("Allocation.MatchesOne: expected no match on no conditions")
+	}
+
+	// Matches when all Properties match
+	if !alloc.MatchesOne(Properties{
+		NamespaceProp: "namespace1",
+	}, Properties{
+		ClusterProp:        "cluster1",
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Allocation.MatchesOne: expected match when all Properties are met")
+	}
+
+	// Matches when one Property doesn't match
+	if !alloc.MatchesOne(Properties{
+		NamespaceProp: "namespace1",
+		ServiceProp:   []string{"missing"},
+	}, Properties{
+		ClusterProp:        "cluster1",
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Allocation.MatchesOne: expected match when one Properties is met")
+	}
+
+	// Doesn't match when no Properties are met
+	if alloc.MatchesOne(Properties{
+		NamespaceProp: "namespace1",
+		ServiceProp:   []string{"missing"},
+	}, Properties{
+		ClusterProp:        "cluster2",
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Allocation.MatchesOne: expected no match when no Properties are met")
+	}
+}
+
+func TestAllocation_String(t *testing.T) {
+	// TODO niko/etl
+}
+
+func TestNewAllocationSet(t *testing.T) {
+	// TODO niko/etl
+}
+
+func generateAllocationSet(start time.Time) *AllocationSet {
+	// Idle allocations
+	a1i := NewUnitAllocation(fmt.Sprintf("cluster1/%s", IdleSuffix), start, day, &Properties{
+		ClusterProp: "cluster1",
+		NodeProp: "node1",
+	})
+	a1i.CPUCost = 5.0
+	a1i.RAMCost = 15.0
+	a1i.GPUCost = 0.0
+	a1i.TotalCost = 20.0
+
+	a2i := NewUnitAllocation(fmt.Sprintf("cluster2/%s", IdleSuffix), start, day, &Properties{
+		ClusterProp: "cluster2",
+	})
+	a2i.CPUCost = 5.0
+	a2i.RAMCost = 5.0
+	a2i.GPUCost = 0.0
+	a2i.TotalCost = 10.0
+
+	// Active allocations
+	a1111 := NewUnitAllocation("cluster1/namespace1/pod1/container1", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace1",
+		PodProp:       "pod1",
+		ContainerProp: "container1",
+	})
+	a1111.RAMCost = 11.00
+	a1111.TotalCost = 15.00
+
+	a11abc2 := NewUnitAllocation("cluster1/namespace1/pod-abc/container2", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace1",
+		PodProp:       "pod-abc",
+		ContainerProp: "container2",
+	})
+
+	a11def3 := NewUnitAllocation("cluster1/namespace1/pod-def/container3", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace1",
+		PodProp:       "pod-def",
+		ContainerProp: "container3",
+	})
+
+	a12ghi4 := NewUnitAllocation("cluster1/namespace2/pod-ghi/container4", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-ghi",
+		ContainerProp: "container4",
+	})
+
+	a12ghi5 := NewUnitAllocation("cluster1/namespace2/pod-ghi/container5", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-ghi",
+		ContainerProp: "container5",
+	})
+
+	a12jkl6 := NewUnitAllocation("cluster1/namespace2/pod-jkl/container6", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-jkl",
+		ContainerProp: "container6",
+	})
+
+	a22mno4 := NewUnitAllocation("cluster2/namespace2/pod-mno/container4", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-mno",
+		ContainerProp: "container4",
+	})
+
+	a22mno5 := NewUnitAllocation("cluster2/namespace2/pod-mno/container5", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-mno",
+		ContainerProp: "container5",
+	})
+
+	a22pqr6 := NewUnitAllocation("cluster2/namespace2/pod-pqr/container6", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-pqr",
+		ContainerProp: "container6",
+	})
+
+	a23stu7 := NewUnitAllocation("cluster2/namespace3/pod-stu/container7", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace3",
+		PodProp:       "pod-stu",
+		ContainerProp: "container7",
+	})
+
+	a23vwx8 := NewUnitAllocation("cluster2/namespace3/pod-vwx/container8", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace3",
+		PodProp:       "pod-vwx",
+		ContainerProp: "container8",
+	})
+
+	a23vwx9 := NewUnitAllocation("cluster2/namespace3/pod-vwx/container9", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace3",
+		PodProp:       "pod-vwx",
+		ContainerProp: "container9",
+	})
+
+	// Controllers
+
+	a11abc2.Properties.SetControllerKind("deployment")
+	a11abc2.Properties.SetController("deployment1")
+	a11def3.Properties.SetControllerKind("deployment")
+	a11def3.Properties.SetController("deployment1")
+
+	a12ghi4.Properties.SetControllerKind("deployment")
+	a12ghi4.Properties.SetController("deployment2")
+	a12ghi5.Properties.SetControllerKind("deployment")
+	a12ghi5.Properties.SetController("deployment2")
+	a22mno4.Properties.SetControllerKind("deployment")
+	a22mno4.Properties.SetController("deployment2")
+	a22mno5.Properties.SetControllerKind("deployment")
+	a22mno5.Properties.SetController("deployment2")
+
+	a23stu7.Properties.SetControllerKind("deployment")
+	a23stu7.Properties.SetController("deployment3")
+
+	a12jkl6.Properties.SetControllerKind("daemonset")
+	a12jkl6.Properties.SetController("daemonset1")
+	a22pqr6.Properties.SetControllerKind("daemonset")
+	a22pqr6.Properties.SetController("daemonset1")
+
+	a23vwx8.Properties.SetControllerKind("statefulset")
+	a23vwx8.Properties.SetController("statefulset1")
+	a23vwx9.Properties.SetControllerKind("statefulset")
+	a23vwx9.Properties.SetController("statefulset1")
+
+	// Labels
+
+	a1111.Properties.SetLabels(map[string]string{"app": "app1", "env": "env1"})
+	a12ghi4.Properties.SetLabels(map[string]string{"app": "app2", "env": "env2"})
+	a12ghi5.Properties.SetLabels(map[string]string{"app": "app2", "env": "env2"})
+	a22mno4.Properties.SetLabels(map[string]string{"app": "app2"})
+	a22mno5.Properties.SetLabels(map[string]string{"app": "app2"})
+
+	// Services
+
+	a12jkl6.Properties.SetServices([]string{"service1"})
+	a22pqr6.Properties.SetServices([]string{"service1"})
+
+	return NewAllocationSet(start, start.Add(day),
+		// idle
+		a1i, a2i,
+		// cluster 1, namespace1
+		a1111, a11abc2, a11def3,
+		// cluster 1, namespace 2
+		a12ghi4, a12ghi5, a12jkl6,
+		// cluster 2, namespace 2
+		a22mno4, a22mno5, a22pqr6,
+		// cluster 2, namespace 3
+		a23stu7, a23vwx8, a23vwx9,
+	)
+}
+
+func assertAllocationSetTotals(t *testing.T, as *AllocationSet, msg string, err error, length int, totalCost float64) {
+	if err != nil {
+		t.Fatalf("AllocationSet.AggregateBy[%s]: unexpected error: %s", msg, err)
+	}
+	if as.Length() != length {
+		t.Fatalf("AllocationSet.AggregateBy[%s]: expected set of length %d, actual %d", msg, length, as.Length())
+	}
+	if math.Round(as.TotalCost()*100) != math.Round(totalCost*100) {
+		t.Fatalf("AllocationSet.AggregateBy[%s]: expected total cost %.2f, actual %.2f", msg, totalCost, as.TotalCost())
+	}
+}
+
+func assertAllocationTotals(t *testing.T, as *AllocationSet, msg string, exps map[string]float64) {
+	as.Each(func(k string, a *Allocation) {
+		if exp, ok := exps[a.Name]; ok {
+			if math.Round(a.TotalCost*100) != math.Round(exp*100) {
+				t.Fatalf("AllocationSet.AggregateBy[%s]: expected total cost %.2f, actual %.2f", msg, exp, a.TotalCost)
+			}
+		} else {
+			t.Fatalf("AllocationSet.AggregateBy[%s]: unexpected allocation: %s", msg, a.Name)
+		}
+	})
+}
+
+func assertAllocationWindow(t *testing.T, as *AllocationSet, msg string, expStart, expEnd time.Time, expMinutes float64) {
+	as.Each(func(k string, a *Allocation) {
+		if !a.Start.Equal(expStart) {
+			t.Fatalf("AllocationSet.AggregateBy[%s]: expected start %s, actual %s", msg, expStart, a.Start)
+		}
+		if !a.End.Equal(expEnd) {
+			t.Fatalf("AllocationSet.AggregateBy[%s]: expected end %s, actual %s", msg, expEnd, a.End)
+		}
+		if a.Minutes != expMinutes {
+			t.Fatalf("AllocationSet.AggregateBy[%s]: expected minutes %f, actual %f", msg, expMinutes, a.Minutes)
+		}
+	})
+}
+
+func printAllocationSet(msg string, as *AllocationSet) {
+	fmt.Printf("--- %s ---\n", msg)
+	as.Each(func(k string, a *Allocation) {
+		fmt.Printf(" > %s\n", a)
+	})
+}
+
+func TestAllocationSet_AggregateBy(t *testing.T) {
+	// Test AggregateBy against the following workload topology, which is
+	// generated by generateAllocationSet:
+
+	// | Hierarchy                              | Cost |  CPU |  RAM |  GPU |   PV |  Net |
+	// +----------------------------------------+------+------+------+------+------+------+
+	//   cluster1:
+	//     idle:                                  20.00   5.00  15.00   0.00   0.00   0.00
+	//     namespace1:
+	//       pod1:
+	//         container1: [app=app1, env=env1]   15.00   1.00  11.00   1.00   1.00   1.00
+	//       pod-abc: (deployment1)
+	//         container2:                         5.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-def: (deployment1)
+	//         container3:                         5.00   1.00   1.00   1.00   1.00   1.00
+	//     namespace2:
+	//       pod-ghi: (deployment2)
+	//         container4: [app=app2, env=env2]    5.00   1.00   1.00   1.00   1.00   1.00
+	//         container5: [app=app2, env=env2]    5.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-jkl: (daemonset1)
+	//         container6: {service1}              5.00   1.00   1.00   1.00   1.00   1.00
+	// +-----------------------------------------+------+------+------+------+------+------+
+	//   cluster1 subtotal                        60.00  11.00  31.00   6.00   6.00   6.00
+	// +-----------------------------------------+------+------+------+------+------+------+
+	//   cluster2:
+	//     idle:                                  10.00   5.00   5.00   0.00   0.00   0.00
+	//     namespace2:
+	//       pod-mno: (deployment2)
+	//         container4: [app=app2]              5.00   1.00   1.00   1.00   1.00   1.00
+	//         container5: [app=app2]              5.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-pqr: (daemonset1)
+	//         container6: {service1}              5.00   1.00   1.00   1.00   1.00   1.00
+	//     namespace3:
+	//       pod-stu: (deployment3)
+	//         container7:                         5.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-vwx: (statefulset1)
+	//         container8:                         5.00   1.00   1.00   1.00   1.00   1.00
+	//         container9:                         5.00   1.00   1.00   1.00   1.00   1.00
+	// +----------------------------------------+------+------+------+------+------+------+
+	//   cluster2 subtotal                        40.00  11.00  11.00   6.00   6.00   6.00
+	// +----------------------------------------+------+------+------+------+------+------+
+	//   total                                   100.00  22.00  42.00  12.00  12.00  12.00
+	// +----------------------------------------+------+------+------+------+------+------+
+
+	// Scenarios to test:
+
+	// 1  Single-aggregation
+	// 1a AggregationProperties=(Cluster)
+	// 1b AggregationProperties=(Namespace)
+	// 1c AggregationProperties=(Pod)
+	// 1d AggregationProperties=(Container)
+	// 1e AggregationProperties=(ControllerKind)
+	// 1f AggregationProperties=(Controller)
+	// 1g AggregationProperties=(Service)
+	// 1h AggregationProperties=(Label:app)
+
+	// 2  Multi-aggregation
+	// 2a AggregationProperties=(Cluster, Namespace)
+	// 2b AggregationProperties=(Namespace, Label:app)
+	// 2c AggregationProperties=(Cluster, Namespace, Pod, Container)
+	// 2d AggregationProperties=(Label:app, Label:environment)
+
+	// 3  Share idle
+	// 3a AggregationProperties=(Namespace) ShareIdle=ShareWeighted
+	// 3b AggregationProperties=(Namespace) ShareIdle=ShareEven (TODO niko/etl)
+
+	// 4  Share resources
+	// 4a Share namespace ShareEven
+	// 4b Share cluster ShareWeighted
+	// 4c Share label ShareEven
+	// 4d Share overhead ShareWeighted
+
+	// 5  Filters
+	// 5a Filter by cluster with separate idle
+	// 5b Filter by cluster with shared idle
+	// TODO niko/idle more filter tests
+
+	// 6  Combinations and options
+	// 6a SplitIdle
+	// 6b Share idle with filters
+	// 6c Share resources with filters
+	// 6d Share idle and share resources
+
+	// 7  Edge cases and errors
+	// 7a Empty AggregationProperties
+	// 7b Filter all
+	// 7c Share all
+	// 7d Share and filter the same allocations
+
+	// Definitions and set-up:
+
+	var as *AllocationSet
+	var err error
+
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+
+	numClusters := 2
+	numNamespaces := 3
+	numPods := 9
+	numContainers := 9
+	numControllerKinds := 3
+	numControllers := 5
+	numServices := 1
+	numLabelApps := 2
+
+	// By default, idle is reported as a single, merged allocation
+	numIdle := 1
+	// There will only ever be one __unallocated__
+	numUnallocated := 1
+	// There are two clusters, so each gets an idle entry when they are split
+	numSplitIdle := 2
+
+	activeTotalCost := 70.0
+	idleTotalCost := 30.0
+	sharedOverheadHourlyCost := 7.0
+
+	isNamespace3 := func(a *Allocation) bool {
+		ns, err := a.Properties.GetNamespace()
+		return err == nil && ns == "namespace3"
+	}
+
+	isApp1 := func(a *Allocation) bool {
+		ls, _ := a.Properties.GetLabels()
+		if app, ok := ls["app"]; ok && app == "app1" {
+			return true
+		}
+		return false
+	}
+
+	end := time.Now().UTC().Truncate(day)
+	start := end.Add(-day)
+
+	// Tests:
+
+	// 1  Single-aggregation
+
+	// 1a AggregationProperties=(Cluster)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ClusterProp: ""}, nil)
+	assertAllocationSetTotals(t, as, "1a", err, numClusters+numIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1a", map[string]float64{
+		"cluster1": 40.00,
+		"cluster2": 30.00,
+		IdleSuffix: 30.00,
+	})
+	assertAllocationWindow(t, as, "1a", startYesterday, endYesterday, 1440.0)
+
+	// 1b AggregationProperties=(Namespace)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1b", err, numNamespaces+numIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1b", map[string]float64{
+		"namespace1": 25.00,
+		"namespace2": 30.00,
+		"namespace3": 15.00,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "1b", startYesterday, endYesterday, 1440.0)
+
+	// 1c AggregationProperties=(Pod)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{PodProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1c", err, numPods+numIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1c", map[string]float64{
+		"pod-jkl":  5.00,
+		"pod-stu":  5.00,
+		"pod-abc":  5.00,
+		"pod-pqr":  5.00,
+		"pod-def":  5.00,
+		"pod-vwx":  10.00,
+		"pod1":     15.00,
+		"pod-mno":  10.00,
+		"pod-ghi":  10.00,
+		IdleSuffix: 30.00,
+	})
+	assertAllocationWindow(t, as, "1c", startYesterday, endYesterday, 1440.0)
+
+	// 1d AggregationProperties=(Container)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ContainerProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1d", err, numContainers+numIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1d", map[string]float64{
+		"container2": 5.00,
+		"container9": 5.00,
+		"container6": 10.00,
+		"container3": 5.00,
+		"container4": 10.00,
+		"container7": 5.00,
+		"container8": 5.00,
+		"container5": 10.00,
+		"container1": 15.00,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "1d", startYesterday, endYesterday, 1440.0)
+
+	// 1e AggregationProperties=(ControllerKind)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ControllerKindProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1e", err, numControllerKinds+numIdle+numUnallocated, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1e", map[string]float64{
+		"daemonset":       10.00,
+		"deployment":      35.00,
+		"statefulset":     10.00,
+		IdleSuffix:        30.00,
+		UnallocatedSuffix: 15.00,
+	})
+	assertAllocationWindow(t, as, "1e", startYesterday, endYesterday, 1440.0)
+
+	// 1f AggregationProperties=(Controller)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ControllerProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1f", err, numControllers+numIdle+numUnallocated, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1f", map[string]float64{
+		"deployment/deployment2":   20.00,
+		"daemonset/daemonset1":     10.00,
+		"deployment/deployment3":   5.00,
+		"statefulset/statefulset1": 10.00,
+		"deployment/deployment1":   10.00,
+		IdleSuffix:                 30.00,
+		UnallocatedSuffix:          15.00,
+	})
+	assertAllocationWindow(t, as, "1f", startYesterday, endYesterday, 1440.0)
+
+	// 1g AggregationProperties=(Service)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ServiceProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1g", err, numServices+numIdle+numUnallocated, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1g", map[string]float64{
+		"service1":        10.00,
+		IdleSuffix:        30.00,
+		UnallocatedSuffix: 60.00,
+	})
+	assertAllocationWindow(t, as, "1g", startYesterday, endYesterday, 1440.0)
+
+	// 1h AggregationProperties=(Label:app)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{LabelProp: map[string]string{"app": ""}}, nil)
+	assertAllocationSetTotals(t, as, "1h", err, numLabelApps+numIdle+numUnallocated, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1h", map[string]float64{
+		"app=app1":        15.00,
+		"app=app2":        20.00,
+		IdleSuffix:        30.00,
+		UnallocatedSuffix: 35.00,
+	})
+	assertAllocationWindow(t, as, "1h", startYesterday, endYesterday, 1440.0)
+
+	// 1i AggregationProperties=(ControllerKind:deployment)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ControllerKindProp: "deployment"}, nil)
+	assertAllocationSetTotals(t, as, "1i", err, 1+numIdle+numUnallocated, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1i", map[string]float64{
+		"deployment":      35.00,
+		IdleSuffix:        30.00,
+		UnallocatedSuffix: 35.00,
+	})
+	assertAllocationWindow(t, as, "1i", startYesterday, endYesterday, 1440.0)
+
+	// 2  Multi-aggregation
+
+	// 2a AggregationProperties=(Cluster, Namespace)
+	// 2b AggregationProperties=(Namespace, Label:app)
+	// 2c AggregationProperties=(Cluster, Namespace, Pod, Container)
+
+	// 2d AggregationProperties=(Label:app, Label:environment)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{LabelProp: map[string]string{"app": "", "env": ""}}, nil)
+	// sets should be {idle, unallocated, app1/env1, app2/env2, app2/unallocated}
+	assertAllocationSetTotals(t, as, "2d", err, numIdle+numUnallocated+3, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "2d", map[string]float64{
+		"app=app1/env=env1":             15.00,
+		"app=app2/env=env2":             10.00,
+		"app=app2/" + UnallocatedSuffix: 10.00,
+		IdleSuffix:                      30.00,
+		UnallocatedSuffix:               35.00,
+	})
+
+	// 2e AggregationProperties=(Cluster, Label:app, Label:environment)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ClusterProp: "", LabelProp: map[string]string{"app": "", "env": ""}}, nil)
+	assertAllocationSetTotals(t, as, "2e", err, 6, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "2e", map[string]float64{
+		"cluster1/app=app2/env=env2":             10.00,
+		"__idle__":                               30.00,
+		"cluster1/app=app1/env=env1":             15.00,
+		"cluster1/" + UnallocatedSuffix:          15.00,
+		"cluster2/app=app2/" + UnallocatedSuffix: 10.00,
+		"cluster2/" + UnallocatedSuffix:          20.00,
+	})
+
+	// // TODO niko/etl
+
+	// // 3  Share idle
+
+	// 3a AggregationProperties=(Namespace) ShareIdle=ShareWeighted
+	// namespace1: 39.6875 = 25.00 + 5.00*(3.00/6.00) + 15.0*(13.0/16.0)
+	// namespace2: 40.3125 = 30.00 + 5.0*(3.0/6.0) + 15.0*(3.0/16.0) + 5.0*(3.0/6.0) + 5.0*(3.0/6.0)
+	// namespace3: 20.0000 = 15.00 + 5.0*(3.0/6.0) + 5.0*(3.0/6.0)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{ShareIdle: ShareWeighted})
+	assertAllocationSetTotals(t, as, "3a", err, numNamespaces, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "3a", map[string]float64{
+		"namespace1": 39.69,
+		"namespace2": 40.31,
+		"namespace3": 20.00,
+	})
+	assertAllocationWindow(t, as, "3a", startYesterday, endYesterday, 1440.0)
+
+	// 3b AggregationProperties=(Namespace) ShareIdle=ShareEven
+	// namespace1: 35.0000 = 25.00 + 5.00*(1.0/2.0) + 15.0*(1.0/2.0)
+	// namespace2: 45.0000 = 30.00 + 5.0*(1.0/2.0) + 15.0*(1.0/2.0) + 5.0*(1.0/2.0) + 5.0*(1.0/2.0)
+	// namespace3: 20.0000 = 15.00 + 5.0*(1.0/2.0) + 5.0*(1.0/2.0)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{ShareIdle: ShareEven})
+	assertAllocationSetTotals(t, as, "3a", err, numNamespaces, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "3a", map[string]float64{
+		"namespace1": 35.00,
+		"namespace2": 45.00,
+		"namespace3": 20.00,
+	})
+	assertAllocationWindow(t, as, "3b", startYesterday, endYesterday, 1440.0)
+
+	// 4  Share resources
+
+	// 4a Share namespace ShareEven
+	// namespace1: 32.5000 = 25.00 + 15.00*(1.0/2.0)
+	// namespace2: 37.5000 = 30.00 + 15.00*(1.0/2.0)
+	// idle:       30.0000
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{
+		ShareFuncs: []AllocationMatchFunc{isNamespace3},
+		ShareSplit: ShareEven,
+	})
+	assertAllocationSetTotals(t, as, "4a", err, numNamespaces, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "4a", map[string]float64{
+		"namespace1": 32.50,
+		"namespace2": 37.50,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "4a", startYesterday, endYesterday, 1440.0)
+
+	// 4b Share namespace ShareWeighted
+	// namespace1: 32.5000 =
+	// namespace2: 37.5000 =
+	// idle:       30.0000
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{
+		ShareFuncs: []AllocationMatchFunc{isNamespace3},
+		ShareSplit: ShareWeighted,
+	})
+	assertAllocationSetTotals(t, as, "4b", err, numNamespaces, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "4b", map[string]float64{
+		"namespace1": 31.82,
+		"namespace2": 38.18,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "4b", startYesterday, endYesterday, 1440.0)
+
+	// 4c Share label ShareEven
+	// namespace1: 15.0000 = 25.00 - 15.00 + 15.00*(1.0/3.0)
+	// namespace2: 35.0000 = 30.00 + 15.00*(1.0/3.0)
+	// namespace3: 20.0000 = 15.00 + 15.00*(1.0/3.0)
+	// idle:       30.0000
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{
+		ShareFuncs: []AllocationMatchFunc{isApp1},
+		ShareSplit: ShareEven,
+	})
+	assertAllocationSetTotals(t, as, "4c", err, numNamespaces+numIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "4c", map[string]float64{
+		"namespace1": 15.00,
+		"namespace2": 35.00,
+		"namespace3": 20.00,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "4c", startYesterday, endYesterday, 1440.0)
+
+	// 4d Share overhead ShareWeighted
+	// namespace1: 37.5000 = 25.00 + (7.0*24.0)*(25.00/70.00)
+	// namespace2: 45.0000 = 30.00 + (7.0*24.0)*(30.00/70.00)
+	// namespace3: 22.5000 = 15.00 + (7.0*24.0)*(15.00/70.00)
+	// idle:       30.0000
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{
+		SharedHourlyCosts: map[string]float64{"total": sharedOverheadHourlyCost},
+		ShareSplit:        ShareWeighted,
+	})
+	assertAllocationSetTotals(t, as, "4d", err, numNamespaces+numIdle, activeTotalCost+idleTotalCost+(sharedOverheadHourlyCost*24.0))
+	assertAllocationTotals(t, as, "4d", map[string]float64{
+		"namespace1": 85.00,
+		"namespace2": 102.00,
+		"namespace3": 51.00,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "4d", startYesterday, endYesterday, 1440.0)
+
+	// 5  Filters
+
+	isCluster := func(matchCluster string) func(*Allocation) bool {
+		return func(a *Allocation) bool {
+			cluster, err := a.Properties.GetCluster()
+			return err == nil && cluster == matchCluster
+		}
+	}
+
+	isNamespace := func(matchNamespace string) func(*Allocation) bool {
+		return func(a *Allocation) bool {
+			namespace, err := a.Properties.GetNamespace()
+			return err == nil && namespace == matchNamespace
+		}
+	}
+
+	// 5a Filter by cluster with separate idle
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ClusterProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isCluster("cluster1")},
+		ShareIdle:   ShareNone,
+	})
+	assertAllocationSetTotals(t, as, "5a", err, 2, 60.0)
+	assertAllocationTotals(t, as, "5a", map[string]float64{
+		"cluster1": 40.00,
+		IdleSuffix: 20.00,
+	})
+	assertAllocationWindow(t, as, "5a", startYesterday, endYesterday, 1440.0)
+
+	// 5b Filter by cluster with shared idle
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ClusterProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isCluster("cluster1")},
+		ShareIdle:   ShareWeighted,
+	})
+	assertAllocationSetTotals(t, as, "5b", err, 1, 60.0)
+	assertAllocationTotals(t, as, "5b", map[string]float64{
+		"cluster1": 60.00,
+	})
+	assertAllocationWindow(t, as, "5b", startYesterday, endYesterday, 1440.0)
+
+	// 5c Filter by cluster, agg by namespace, with separate idle
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isCluster("cluster1")},
+		ShareIdle:   ShareNone,
+	})
+	assertAllocationSetTotals(t, as, "5c", err, 3, 60.0)
+	assertAllocationTotals(t, as, "5c", map[string]float64{
+		"namespace1": 25.00,
+		"namespace2": 15.00,
+		IdleSuffix:   20.00,
+	})
+	assertAllocationWindow(t, as, "5c", startYesterday, endYesterday, 1440.0)
+
+	// 5d Filter by namespace, agg by cluster, with separate idle
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ClusterProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
+		ShareIdle:   ShareNone,
+	})
+	assertAllocationSetTotals(t, as, "5d", err, 3, 40.31)
+	assertAllocationTotals(t, as, "5d", map[string]float64{
+		"cluster1": 15.00,
+		"cluster2": 15.00,
+		IdleSuffix: 10.31,
+	})
+	assertAllocationWindow(t, as, "5d", startYesterday, endYesterday, 1440.0)
+
+	// 6  Combinations and options
+
+	// 6a SplitIdle
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: ""}, &AllocationAggregationOptions{SplitIdle: true})
+	assertAllocationSetTotals(t, as, "6a", err, numNamespaces+numSplitIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "6a", map[string]float64{
+		"namespace1":                           25.00,
+		"namespace2":                           30.00,
+		"namespace3":                           15.00,
+		fmt.Sprintf("cluster1/%s", IdleSuffix): 20.00,
+		fmt.Sprintf("cluster2/%s", IdleSuffix): 10.00,
+	})
+	assertAllocationWindow(t, as, "6a", startYesterday, endYesterday, 1440.0)
+
+	// 6b Share idle weighted with filters
+
+	// Should match values from unfiltered aggregation
+	// as = generateAllocationSet(start)
+	// err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{ShareIdle: ShareWeighted})
+	// printAllocationSet("6b unfiltered", as)
+
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
+		ShareIdle:   ShareWeighted,
+	})
+	assertAllocationSetTotals(t, as, "6b", err, 1, 40.31)
+	assertAllocationTotals(t, as, "6b", map[string]float64{
+		"namespace2": 40.31,
+	})
+	assertAllocationWindow(t, as, "6b", startYesterday, endYesterday, 1440.0)
+
+	// 6c Share idle even with filters
+
+	// Should match values from unfiltered aggregation
+	// as = generateAllocationSet(start)
+	// err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{ShareIdle: ShareEven})
+	// printAllocationSet("6c unfiltered", as)
+
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
+		ShareIdle:   ShareEven,
+	})
+	assertAllocationSetTotals(t, as, "6b", err, 1, 45.00)
+	assertAllocationTotals(t, as, "6b", map[string]float64{
+		"namespace2": 45.00,
+	})
+	assertAllocationWindow(t, as, "6b", startYesterday, endYesterday, 1440.0)
+
+	// 6d Share resources with filters
+	// 6e Share idle and share resources
+
+	// 7  Edge cases and errors
+
+	// 7a Empty AggregationProperties
+	// 7b Filter all
+	// 7c Share all
+	// 7d Share and filter the same allocations
+}
+
+// TODO niko/etl
+//func TestAllocationSet_Clone(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Delete(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_End(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_IdleAllocations(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Insert(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_IsEmpty(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Length(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Map(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_MarshalJSON(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Resolution(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Seconds(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Set(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Start(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_TotalCost(t *testing.T) {}
+
+// TODO niko/etl
+//func TestNewAllocationSetRange(t *testing.T) {}
+
+func TestAllocationSetRange_Accumulate(t *testing.T) {
+	ago2d := time.Now().UTC().Truncate(day).Add(-2 * day)
+	yesterday := time.Now().UTC().Truncate(day).Add(-day)
+	today := time.Now().UTC().Truncate(day)
+	tomorrow := time.Now().UTC().Truncate(day).Add(day)
+
+	// Accumulating any combination of nil and/or empty set should result in empty set
+	result, err := NewAllocationSetRange(nil).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating nil AllocationSetRange: %s", err)
+	}
+	if !result.IsEmpty() {
+		t.Fatalf("accumulating nil AllocationSetRange: expected empty; actual %s", result)
+	}
+
+	result, err = NewAllocationSetRange(nil, nil).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating nil AllocationSetRange: %s", err)
+	}
+	if !result.IsEmpty() {
+		t.Fatalf("accumulating nil AllocationSetRange: expected empty; actual %s", result)
+	}
+
+	result, err = NewAllocationSetRange(NewAllocationSet(yesterday, today)).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating nil AllocationSetRange: %s", err)
+	}
+	if !result.IsEmpty() {
+		t.Fatalf("accumulating nil AllocationSetRange: expected empty; actual %s", result)
+	}
+
+	result, err = NewAllocationSetRange(nil, NewAllocationSet(ago2d, yesterday), nil, NewAllocationSet(today, tomorrow), nil).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating nil AllocationSetRange: %s", err)
+	}
+	if !result.IsEmpty() {
+		t.Fatalf("accumulating nil AllocationSetRange: expected empty; actual %s", result)
+	}
+
+	todayAS := NewAllocationSet(today, tomorrow)
+	todayAS.Set(NewUnitAllocation("", today, day, nil))
+
+	yesterdayAS := NewAllocationSet(yesterday, today)
+	yesterdayAS.Set(NewUnitAllocation("", yesterday, day, nil))
+
+	// Accumulate non-nil with nil should result in copy of non-nil, regardless of order
+	result, err = NewAllocationSetRange(nil, todayAS).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating AllocationSetRange of length 1: %s", err)
+	}
+	if result == nil {
+		t.Fatalf("accumulating AllocationSetRange: expected AllocationSet; actual %s", result)
+	}
+	if result.TotalCost() != 5.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected total cost 5.0; actual %f", result.TotalCost())
+	}
+
+	result, err = NewAllocationSetRange(todayAS, nil).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating AllocationSetRange of length 1: %s", err)
+	}
+	if result == nil {
+		t.Fatalf("accumulating AllocationSetRange: expected AllocationSet; actual %s", result)
+	}
+	if result.TotalCost() != 5.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected total cost 5.0; actual %f", result.TotalCost())
+	}
+
+	result, err = NewAllocationSetRange(nil, todayAS, nil).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating AllocationSetRange of length 1: %s", err)
+	}
+	if result == nil {
+		t.Fatalf("accumulating AllocationSetRange: expected AllocationSet; actual %s", result)
+	}
+	if result.TotalCost() != 5.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected total cost 5.0; actual %f", result.TotalCost())
+	}
+
+	// Accumulate two non-nil should result in sum of both with appropriate start, end
+	result, err = NewAllocationSetRange(yesterdayAS, todayAS).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating AllocationSetRange of length 1: %s", err)
+	}
+	if result == nil {
+		t.Fatalf("accumulating AllocationSetRange: expected AllocationSet; actual %s", result)
+	}
+	if result.TotalCost() != 10.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected total cost 10.0; actual %f", result.TotalCost())
+	}
+	allocMap := result.Map()
+	if len(allocMap) != 1 {
+		t.Fatalf("accumulating AllocationSetRange: expected length 1; actual length %d", len(allocMap))
+	}
+	alloc := allocMap["cluster1/namespace1/pod1/container1"]
+	if alloc == nil {
+		t.Fatalf("accumulating AllocationSetRange: expected allocation 'cluster1/namespace1/pod1/container1'")
+	}
+	if alloc.CPUCoreHours != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", result.TotalCost())
+	}
+	if alloc.CPUCost != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.CPUCost)
+	}
+	if alloc.CPUEfficiency != 1.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 1.0; actual %f", alloc.CPUEfficiency)
+	}
+	if alloc.GPUHours != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.GPUHours)
+	}
+	if alloc.GPUCost != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.GPUCost)
+	}
+	if alloc.NetworkCost != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.NetworkCost)
+	}
+	if alloc.PVByteHours != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.PVByteHours)
+	}
+	if alloc.PVCost != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.PVCost)
+	}
+	if alloc.RAMByteHours != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.RAMByteHours)
+	}
+	if alloc.RAMCost != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.RAMCost)
+	}
+	if alloc.RAMEfficiency != 1.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 1.0; actual %f", alloc.RAMEfficiency)
+	}
+	if alloc.TotalCost != 10.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 10.0; actual %f", alloc.TotalCost)
+	}
+	if alloc.TotalEfficiency != 1.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 1.0; actual %f", alloc.TotalEfficiency)
+	}
+	if !alloc.Start.Equal(yesterday) {
+		t.Fatalf("accumulating AllocationSetRange: expected to start %s; actual %s", yesterday, alloc.Start)
+	}
+	if !alloc.End.Equal(tomorrow) {
+		t.Fatalf("accumulating AllocationSetRange: expected to end %s; actual %s", tomorrow, alloc.End)
+	}
+	if alloc.Minutes != 2880.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected %f minutes; actual %f", 2880.0, alloc.Minutes)
+	}
+}
+
+// TODO niko/etl
+// func TestAllocationSetRange_AccumulateBy(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_AggregateBy(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_Append(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_Length(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_MarshalJSON(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_Slice(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_Window(t *testing.T) {}

+ 2812 - 0
pkg/kubecost/asset.go

@@ -0,0 +1,2812 @@
+package kubecost
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/json"
+	"fmt"
+	"math"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/kubecost/cost-model/pkg/log"
+)
+
+const timeFmt = "2006-01-02T15:04:05-0700"
+
+// Asset defines an entity within a cluster that has a defined cost over a
+// given period of time.
+type Asset interface {
+	// Type identifies the kind of Asset, which must always exist and should
+	// be defined by the underlying type implementing the interface.
+	Type() AssetType
+
+	// Properties are a map of predefined traits, which may or may not exist,
+	// but must conform to the AssetProperty schema
+	Properties() *AssetProperties
+	SetProperties(*AssetProperties)
+
+	// Labels are a map of undefined string-to-string values
+	Labels() AssetLabels
+	SetLabels(AssetLabels)
+
+	// Monetary values
+	Adjustment() float64
+	SetAdjustment(float64)
+	TotalCost() float64
+
+	// Temporal values
+	Start() time.Time
+	End() time.Time
+	Minutes() float64
+	Window() Window
+	ExpandWindow(Window)
+	SetStartEnd(time.Time, time.Time)
+
+	// Operations and comparisons
+	Add(Asset) Asset
+	Clone() Asset
+	Equal(Asset) bool
+
+	// Representations
+	encoding.BinaryMarshaler
+	encoding.BinaryUnmarshaler
+	json.Marshaler
+	fmt.Stringer
+}
+
+// key is used to determine uniqueness of an Asset, for instance during Insert
+// to determine if two Assets should be combined. Passing nil props indicates
+// that all available props should be used. Passing empty props indicates that
+// no props should be used (e.g. to aggregate all assets). Passing one or more
+// props will key by only those props.
+func key(a Asset, props []AssetProperty) string {
+	keys := []string{}
+
+	if props == nil {
+		props = []AssetProperty{
+			AssetProviderProp,
+			AssetAccountProp,
+			AssetProjectProp,
+			AssetCategoryProp,
+			AssetClusterProp,
+			AssetTypeProp,
+			AssetServiceProp,
+			AssetProviderIDProp,
+			AssetNameProp,
+		}
+	}
+
+	for _, prop := range props {
+		switch true {
+		case prop == AssetProviderProp && a.Properties().Provider != "":
+			keys = append(keys, a.Properties().Provider)
+		case prop == AssetAccountProp && a.Properties().Account != "":
+			keys = append(keys, a.Properties().Account)
+		case prop == AssetProjectProp && a.Properties().Project != "":
+			keys = append(keys, a.Properties().Project)
+		case prop == AssetClusterProp && a.Properties().Cluster != "":
+			keys = append(keys, a.Properties().Cluster)
+		case prop == AssetCategoryProp && a.Properties().Category != "":
+			keys = append(keys, a.Properties().Category)
+		case prop == AssetTypeProp && a.Type().String() != "":
+			keys = append(keys, a.Type().String())
+		case prop == AssetServiceProp && a.Properties().Service != "":
+			keys = append(keys, a.Properties().Service)
+		case prop == AssetProviderIDProp && a.Properties().ProviderID != "":
+			keys = append(keys, a.Properties().ProviderID)
+		case prop == AssetNameProp && a.Properties().Name != "":
+			keys = append(keys, a.Properties().Name)
+		}
+	}
+
+	return strings.Join(keys, "/")
+}
+
+func toString(a Asset) string {
+	return fmt.Sprintf("%s{%s}%s=%.2f", a.Type().String(), a.Properties(), a.Window(), a.TotalCost())
+}
+
+// AssetLabels is a schema-free mapping of key/value pairs that can be
+// attributed to an Asset as a flexible a
+type AssetLabels map[string]string
+
+// Clone returns a cloned map of labels
+func (al AssetLabels) Clone() AssetLabels {
+	clone := AssetLabels{}
+
+	for label, value := range al {
+		clone[label] = value
+	}
+
+	return clone
+}
+
+// Equal returns true only if the two set of labels are exact matches
+func (al AssetLabels) Equal(that AssetLabels) bool {
+	if len(al) != len(that) {
+		return false
+	}
+
+	for label, value := range al {
+		if thatValue, ok := that[label]; !ok || thatValue != value {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Merge retains only the labels shared with the given AssetLabels
+func (al AssetLabels) Merge(that AssetLabels) AssetLabels {
+	result := AssetLabels{}
+
+	for label, value := range al {
+		if thatValue, ok := that[label]; ok && thatValue == value {
+			result[label] = value
+		}
+	}
+
+	return result
+}
+
+// AssetMatchFunc is a function that can be used to match Assets by
+// returning true for any given Asset if a condition is met.
+type AssetMatchFunc func(Asset) bool
+
+// AssetType identifies a type of Asset
+type AssetType int
+
+const (
+	// AnyAssetType describes the Any AssetType
+	AnyAssetType AssetType = iota
+
+	// CloudAssetType describes the Cloud AssetType
+	CloudAssetType
+
+	// ClusterManagementAssetType describes the ClusterManagement AssetType
+	ClusterManagementAssetType
+
+	// DiskAssetType describes the Disk AssetType
+	DiskAssetType
+
+	// LoadBalancerAssetType describes the LoadBalancer AssetType
+	LoadBalancerAssetType
+
+	// NetworkAssetType describes the Network AssetType
+	NetworkAssetType
+
+	// NodeAssetType describes the Node AssetType
+	NodeAssetType
+
+	// SharedAssetType describes the Shared AssetType
+	SharedAssetType
+)
+
+// ParseAssetType attempts to parse the given string into an AssetType
+func ParseAssetType(text string) (AssetType, error) {
+	switch strings.TrimSpace(strings.ToLower(text)) {
+	case "cloud":
+		return CloudAssetType, nil
+	case "clustermanagement":
+		return ClusterManagementAssetType, nil
+	case "disk":
+		return DiskAssetType, nil
+	case "loadbalancer":
+		return LoadBalancerAssetType, nil
+	case "network":
+		return NetworkAssetType, nil
+	case "node":
+		return NodeAssetType, nil
+	case "shared":
+		return SharedAssetType, nil
+	}
+	return AnyAssetType, fmt.Errorf("invalid asset type: %s", text)
+}
+
+// String converts the given AssetType to a string
+func (at AssetType) String() string {
+	return [...]string{
+		"Asset",
+		"Cloud",
+		"ClusterManagement",
+		"Disk",
+		"LoadBalancer",
+		"Network",
+		"Node",
+		"Shared",
+	}[at]
+}
+
+// Any is the most general Asset, which is usually created as a result of
+// adding two Assets of different types.
+type Any struct {
+	labels     AssetLabels
+	properties *AssetProperties
+	start      time.Time
+	end        time.Time
+	window     Window
+	adjustment float64
+	Cost       float64
+}
+
+// NewAsset creates a new Any-type Asset for the given period of time
+func NewAsset(start, end time.Time, window Window) *Any {
+	return &Any{
+		labels:     AssetLabels{},
+		properties: &AssetProperties{},
+		start:      start,
+		end:        end,
+		window:     window.Clone(),
+	}
+}
+
+// Type returns the Asset's type
+func (a *Any) Type() AssetType {
+	return AnyAssetType
+}
+
+// Properties returns the Asset's properties
+func (a *Any) Properties() *AssetProperties {
+	return a.properties
+}
+
+// SetProperties sets the Asset's properties
+func (a *Any) SetProperties(props *AssetProperties) {
+	a.properties = props
+}
+
+// Labels returns the Asset's labels
+func (a *Any) Labels() AssetLabels {
+	return a.labels
+}
+
+// SetLabels sets the Asset's labels
+func (a *Any) SetLabels(labels AssetLabels) {
+	a.labels = labels
+}
+
+// Adjustment returns the Asset's cost adjustment
+func (a *Any) Adjustment() float64 {
+	return a.adjustment
+}
+
+// SetAdjustment sets the Asset's cost adjustment
+func (a *Any) SetAdjustment(adj float64) {
+	a.adjustment = adj
+}
+
+// TotalCost returns the Asset's TotalCost
+func (a *Any) TotalCost() float64 {
+	return a.Cost + a.adjustment
+}
+
+// Start returns the Asset's start time within the window
+func (a *Any) Start() time.Time {
+	return a.start
+}
+
+// End returns the Asset's end time within the window
+func (a *Any) End() time.Time {
+	return a.end
+}
+
+// Minutes returns the number of minutes the Asset was active within the window
+func (a *Any) Minutes() float64 {
+	return a.End().Sub(a.Start()).Minutes()
+}
+
+// Window returns the Asset's window
+func (a *Any) Window() Window {
+	return a.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (a *Any) ExpandWindow(window Window) {
+	a.window = a.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (a *Any) SetStartEnd(start, end time.Time) {
+	if a.Window().Contains(start) {
+		a.start = start
+	} else {
+		log.Warningf("Any.SetStartEnd: start %s not in %s", start, a.Window())
+	}
+
+	if a.Window().Contains(end) {
+		a.end = end
+	} else {
+		log.Warningf("Any.SetStartEnd: end %s not in %s", end, a.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (a *Any) Add(that Asset) Asset {
+	this := a.Clone().(*Any)
+
+	props := a.Properties().Merge(that.Properties())
+	labels := a.Labels().Merge(that.Labels())
+
+	start := a.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := a.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := a.Window().Expand(that.Window())
+
+	this.start = start
+	this.end = end
+	this.window = window
+	this.SetProperties(props)
+	this.SetLabels(labels)
+	this.adjustment += that.Adjustment()
+	this.Cost += (that.TotalCost() - that.Adjustment())
+
+	return this
+}
+
+// Clone returns a cloned instance of the Asset
+func (a *Any) Clone() Asset {
+	return &Any{
+		labels:     a.labels.Clone(),
+		properties: a.properties.Clone(),
+		start:      a.start,
+		end:        a.end,
+		window:     a.window.Clone(),
+		adjustment: a.adjustment,
+		Cost:       a.Cost,
+	}
+}
+
+// Equal returns true if the given Asset is an exact match of the receiver
+func (a *Any) Equal(that Asset) bool {
+	t, ok := that.(*Any)
+	if !ok {
+		return false
+	}
+
+	if !a.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !a.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !a.start.Equal(t.start) {
+		return false
+	}
+	if !a.end.Equal(t.end) {
+		return false
+	}
+	if !a.window.Equal(t.window) {
+		return false
+	}
+
+	if a.Cost != t.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshaler
+func (a *Any) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncode(buffer, "properties", a.Properties(), ",")
+	jsonEncode(buffer, "labels", a.Labels(), ",")
+	jsonEncodeString(buffer, "window", a.Window().String(), ",")
+	jsonEncodeString(buffer, "start", a.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", a.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", a.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "adjustment", a.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", a.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (a *Any) String() string {
+	return toString(a)
+}
+
+// Cloud describes a cloud asset
+type Cloud struct {
+	labels     AssetLabels
+	properties *AssetProperties
+	start      time.Time
+	end        time.Time
+	window     Window
+	adjustment float64
+	Cost       float64
+}
+
+// NewCloud returns a new Cloud Asset
+func NewCloud(category, providerID string, start, end time.Time, window Window) *Cloud {
+	properties := &AssetProperties{
+		Category:   category,
+		ProviderID: providerID,
+	}
+
+	return &Cloud{
+		labels:     AssetLabels{},
+		properties: properties,
+		start:      start,
+		end:        end,
+		window:     window.Clone(),
+	}
+}
+
+// Type returns the AssetType
+func (ca *Cloud) Type() AssetType {
+	return CloudAssetType
+}
+
+// Properties returns the AssetProperties
+func (ca *Cloud) Properties() *AssetProperties {
+	return ca.properties
+}
+
+// SetProperties sets the Asset's properties
+func (ca *Cloud) SetProperties(props *AssetProperties) {
+	ca.properties = props
+}
+
+// Labels returns the AssetLabels
+func (ca *Cloud) Labels() AssetLabels {
+	return ca.labels
+}
+
+// SetLabels sets the Asset's labels
+func (ca *Cloud) SetLabels(labels AssetLabels) {
+	ca.labels = labels
+}
+
+// Adjustment returns the Asset's adjustment value
+func (ca *Cloud) Adjustment() float64 {
+	return ca.adjustment
+}
+
+// SetAdjustment sets the Asset's adjustment value
+func (ca *Cloud) SetAdjustment(adj float64) {
+	ca.adjustment = adj
+}
+
+// TotalCost returns the Asset's total cost
+func (ca *Cloud) TotalCost() float64 {
+	return ca.Cost + ca.adjustment
+}
+
+// Start returns the Asset's precise start time within the window
+func (ca *Cloud) Start() time.Time {
+	return ca.start
+}
+
+// End returns the Asset's precise end time within the window
+func (ca *Cloud) End() time.Time {
+	return ca.end
+}
+
+// Minutes returns the number of Minutes the Asset ran
+func (ca *Cloud) Minutes() float64 {
+	return ca.End().Sub(ca.Start()).Minutes()
+}
+
+// Window returns the window within which the Asset ran
+func (ca *Cloud) Window() Window {
+	return ca.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (ca *Cloud) ExpandWindow(window Window) {
+	ca.window = ca.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (ca *Cloud) SetStartEnd(start, end time.Time) {
+	if ca.Window().Contains(start) {
+		ca.start = start
+	} else {
+		log.Warningf("Cloud.SetStartEnd: start %s not in %s", start, ca.Window())
+	}
+
+	if ca.Window().Contains(end) {
+		ca.end = end
+	} else {
+		log.Warningf("Cloud.SetStartEnd: end %s not in %s", end, ca.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (ca *Cloud) Add(a Asset) Asset {
+	// Cloud + Cloud = Cloud
+	if that, ok := a.(*Cloud); ok {
+		this := ca.Clone().(*Cloud)
+		this.add(that)
+		return this
+	}
+
+	props := ca.Properties().Merge(a.Properties())
+	labels := ca.Labels().Merge(a.Labels())
+
+	start := ca.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := ca.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := ca.Window().Expand(a.Window())
+
+	// Cloud + !Cloud = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = ca.Adjustment() + a.Adjustment()
+	any.Cost = (ca.TotalCost() - ca.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (ca *Cloud) add(that *Cloud) {
+	if ca == nil {
+		ca = that
+		return
+	}
+
+	props := ca.Properties().Merge(that.Properties())
+	labels := ca.Labels().Merge(that.Labels())
+
+	start := ca.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := ca.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := ca.Window().Expand(that.Window())
+
+	ca.start = start
+	ca.end = end
+	ca.window = window
+	ca.SetProperties(props)
+	ca.SetLabels(labels)
+	ca.adjustment += that.adjustment
+	ca.Cost += that.Cost
+}
+
+// Clone returns a cloned instance of the Asset
+func (ca *Cloud) Clone() Asset {
+	return &Cloud{
+		labels:     ca.labels.Clone(),
+		properties: ca.properties.Clone(),
+		start:      ca.start,
+		end:        ca.end,
+		window:     ca.window.Clone(),
+		adjustment: ca.adjustment,
+		Cost:       ca.Cost,
+	}
+}
+
+// Equal returns true if the given Asset precisely equals the Asset
+func (ca *Cloud) Equal(a Asset) bool {
+	that, ok := a.(*Cloud)
+	if !ok {
+		return false
+	}
+
+	if !ca.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !ca.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !ca.start.Equal(that.start) {
+		return false
+	}
+	if !ca.end.Equal(that.end) {
+		return false
+	}
+	if !ca.window.Equal(that.window) {
+		return false
+	}
+
+	if ca.adjustment != that.adjustment {
+		return false
+	}
+
+	if ca.Cost != that.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshaler
+func (ca *Cloud) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", ca.Type().String(), ",")
+	jsonEncode(buffer, "properties", ca.Properties(), ",")
+	jsonEncode(buffer, "labels", ca.Labels(), ",")
+	jsonEncodeString(buffer, "window", ca.Window().String(), ",")
+	jsonEncodeString(buffer, "start", ca.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", ca.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", ca.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "adjustment", ca.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", ca.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (ca *Cloud) String() string {
+	return toString(ca)
+}
+
+// ClusterManagement describes a provider's cluster management fee
+type ClusterManagement struct {
+	labels     AssetLabels
+	properties *AssetProperties
+	window     Window
+	Cost       float64
+}
+
+// NewClusterManagement creates and returns a new ClusterManagement instance
+func NewClusterManagement(provider, cluster string, window Window) *ClusterManagement {
+	properties := &AssetProperties{
+		Category: ManagementCategory,
+		Provider: ParseProvider(provider),
+		Cluster:  cluster,
+		Service:  KubernetesService,
+	}
+
+	return &ClusterManagement{
+		labels:     AssetLabels{},
+		properties: properties,
+		window:     window.Clone(),
+	}
+}
+
+// Type returns the Asset's type
+func (cm *ClusterManagement) Type() AssetType {
+	return ClusterManagementAssetType
+}
+
+// Properties returns the Asset's properties
+func (cm *ClusterManagement) Properties() *AssetProperties {
+	return cm.properties
+}
+
+// SetProperties sets the Asset's properties
+func (cm *ClusterManagement) SetProperties(props *AssetProperties) {
+	cm.properties = props
+}
+
+// Labels returns the Asset's labels
+func (cm *ClusterManagement) Labels() AssetLabels {
+	return cm.labels
+}
+
+// SetLabels sets the Asset's properties
+func (cm *ClusterManagement) SetLabels(props AssetLabels) {
+	cm.labels = props
+}
+
+// Adjustment does not apply to ClusterManagement
+func (cm *ClusterManagement) Adjustment() float64 {
+	return 0.0
+}
+
+// SetAdjustment does not apply to ClusterManagement
+func (cm *ClusterManagement) SetAdjustment(float64) {
+	return
+}
+
+// TotalCost returns the Asset's total cost
+func (cm *ClusterManagement) TotalCost() float64 {
+	return cm.Cost
+}
+
+// Start returns the Asset's precise start time within the window
+func (cm *ClusterManagement) Start() time.Time {
+	return *cm.window.Start()
+}
+
+// End returns the Asset's precise end time within the window
+func (cm *ClusterManagement) End() time.Time {
+	return *cm.window.End()
+}
+
+// Minutes returns the number of minutes the Asset ran
+func (cm *ClusterManagement) Minutes() float64 {
+	return cm.Window().Minutes()
+}
+
+// Window return the Asset's window
+func (cm *ClusterManagement) Window() Window {
+	return cm.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (cm *ClusterManagement) ExpandWindow(window Window) {
+	cm.window = cm.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields (not applicable here)
+func (cm *ClusterManagement) SetStartEnd(start, end time.Time) {
+	return
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (cm *ClusterManagement) Add(a Asset) Asset {
+	// ClusterManagement + ClusterManagement = ClusterManagement
+	if that, ok := a.(*ClusterManagement); ok {
+		this := cm.Clone().(*ClusterManagement)
+		this.add(that)
+		return this
+	}
+
+	props := cm.Properties().Merge(a.Properties())
+	labels := cm.Labels().Merge(a.Labels())
+
+	start := cm.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := cm.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := cm.Window().Expand(a.Window())
+
+	// ClusterManagement + !ClusterManagement = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = cm.Adjustment() + a.Adjustment()
+	any.Cost = (cm.TotalCost() - cm.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (cm *ClusterManagement) add(that *ClusterManagement) {
+	if cm == nil {
+		cm = that
+		return
+	}
+
+	props := cm.Properties().Merge(that.Properties())
+	labels := cm.Labels().Merge(that.Labels())
+	window := cm.Window().Expand(that.Window())
+
+	cm.window = window
+	cm.SetProperties(props)
+	cm.SetLabels(labels)
+	cm.Cost += that.Cost
+}
+
+// Clone returns a cloned instance of the Asset
+func (cm *ClusterManagement) Clone() Asset {
+	return &ClusterManagement{
+		labels:     cm.labels.Clone(),
+		properties: cm.properties.Clone(),
+		window:     cm.window.Clone(),
+		Cost:       cm.Cost,
+	}
+}
+
+// Equal returns true if the given Asset exactly matches the Asset
+func (cm *ClusterManagement) Equal(a Asset) bool {
+	that, ok := a.(*ClusterManagement)
+	if !ok {
+		return false
+	}
+
+	if !cm.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !cm.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !cm.window.Equal(that.window) {
+		return false
+	}
+
+	if cm.Cost != that.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshler
+func (cm *ClusterManagement) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", cm.Type().String(), ",")
+	jsonEncode(buffer, "properties", cm.Properties(), ",")
+	jsonEncode(buffer, "labels", cm.Labels(), ",")
+	jsonEncodeString(buffer, "window", cm.Window().String(), ",")
+	jsonEncodeString(buffer, "start", cm.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", cm.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", cm.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", cm.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (cm *ClusterManagement) String() string {
+	return toString(cm)
+}
+
+// Disk represents an in-cluster disk Asset
+type Disk struct {
+	labels     AssetLabels
+	properties *AssetProperties
+	start      time.Time
+	end        time.Time
+	window     Window
+	adjustment float64
+	Cost       float64
+	ByteHours  float64
+	Local      float64
+	Breakdown  *Breakdown
+}
+
+// NewDisk creates and returns a new Disk Asset
+func NewDisk(name, cluster, providerID string, start, end time.Time, window Window) *Disk {
+	properties := &AssetProperties{
+		Category:   StorageCategory,
+		Name:       name,
+		Cluster:    cluster,
+		ProviderID: providerID,
+		Service:    KubernetesService,
+	}
+
+	return &Disk{
+		labels:     AssetLabels{},
+		properties: properties,
+		start:      start,
+		end:        end,
+		window:     window,
+		Breakdown:  &Breakdown{},
+	}
+}
+
+// Type returns the AssetType of the Asset
+func (d *Disk) Type() AssetType {
+	return DiskAssetType
+}
+
+// Properties returns the Asset's properties
+func (d *Disk) Properties() *AssetProperties {
+	return d.properties
+}
+
+// SetProperties sets the Asset's properties
+func (d *Disk) SetProperties(props *AssetProperties) {
+	d.properties = props
+}
+
+// Labels returns the Asset's labels
+func (d *Disk) Labels() AssetLabels {
+	return d.labels
+}
+
+// SetLabels sets the Asset's labels
+func (d *Disk) SetLabels(labels AssetLabels) {
+	d.labels = labels
+}
+
+// Adjustment returns the Asset's cost adjustment
+func (d *Disk) Adjustment() float64 {
+	return d.adjustment
+}
+
+// SetAdjustment sets the Asset's cost adjustment
+func (d *Disk) SetAdjustment(adj float64) {
+	d.adjustment = adj
+}
+
+// TotalCost returns the Asset's total cost
+func (d *Disk) TotalCost() float64 {
+	return d.Cost + d.adjustment
+}
+
+// Start returns the precise start time of the Asset within the window
+func (d *Disk) Start() time.Time {
+	return d.start
+}
+
+// End returns the precise start time of the Asset within the window
+func (d *Disk) End() time.Time {
+	return d.end
+}
+
+// Minutes returns the number of minutes the Asset ran
+func (d *Disk) Minutes() float64 {
+	diskMins := d.end.Sub(d.start).Minutes()
+	windowMins := d.window.Minutes()
+
+	if diskMins > windowMins {
+		log.Warningf("Asset ETL: Disk.Minutes exceeds window: %.2f > %.2f", diskMins, windowMins)
+		diskMins = windowMins
+	}
+
+	if diskMins < 0 {
+		diskMins = 0
+	}
+
+	return diskMins
+}
+
+// Window returns the window within which the Asset
+func (d *Disk) Window() Window {
+	return d.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (d *Disk) ExpandWindow(window Window) {
+	d.window = d.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (d *Disk) SetStartEnd(start, end time.Time) {
+	if d.Window().Contains(start) {
+		d.start = start
+	} else {
+		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, d.Window())
+	}
+
+	if d.Window().Contains(end) {
+		d.end = end
+	} else {
+		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, d.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (d *Disk) Add(a Asset) Asset {
+	// Disk + Disk = Disk
+	if that, ok := a.(*Disk); ok {
+		this := d.Clone().(*Disk)
+		this.add(that)
+		return this
+	}
+
+	props := d.Properties().Merge(a.Properties())
+	labels := d.Labels().Merge(a.Labels())
+
+	start := d.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := d.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := d.Window().Expand(a.Window())
+
+	// Disk + !Disk = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = d.Adjustment() + a.Adjustment()
+	any.Cost = (d.TotalCost() - d.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (d *Disk) add(that *Disk) {
+	if d == nil {
+		d = that
+		return
+	}
+
+	props := d.Properties().Merge(that.Properties())
+	labels := d.Labels().Merge(that.Labels())
+	d.SetProperties(props)
+	d.SetLabels(labels)
+
+	start := d.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := d.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := d.Window().Expand(that.Window())
+	d.start = start
+	d.end = end
+	d.window = window
+
+	totalCost := d.Cost + that.Cost
+	if totalCost > 0.0 {
+		d.Breakdown.Idle = (d.Breakdown.Idle*d.Cost + that.Breakdown.Idle*that.Cost) / totalCost
+		d.Breakdown.Other = (d.Breakdown.Other*d.Cost + that.Breakdown.Other*that.Cost) / totalCost
+		d.Breakdown.System = (d.Breakdown.System*d.Cost + that.Breakdown.System*that.Cost) / totalCost
+		d.Breakdown.User = (d.Breakdown.User*d.Cost + that.Breakdown.User*that.Cost) / totalCost
+
+		d.Local = (d.TotalCost()*d.Local + that.TotalCost()*that.Local) / (d.TotalCost() + that.TotalCost())
+	} else {
+		d.Local = (d.Local + that.Local) / 2.0
+	}
+
+	d.adjustment += that.adjustment
+	d.Cost += that.Cost
+
+	d.ByteHours += that.ByteHours
+}
+
+// Clone returns a cloned instance of the Asset
+func (d *Disk) Clone() Asset {
+	return &Disk{
+		properties: d.properties.Clone(),
+		labels:     d.labels.Clone(),
+		start:      d.start,
+		end:        d.end,
+		window:     d.window.Clone(),
+		adjustment: d.adjustment,
+		Cost:       d.Cost,
+		ByteHours:  d.ByteHours,
+		Local:      d.Local,
+		Breakdown:  d.Breakdown.Clone(),
+	}
+}
+
+// Equal returns true if the two Assets match exactly
+func (d *Disk) Equal(a Asset) bool {
+	that, ok := a.(*Disk)
+	if !ok {
+		return false
+	}
+
+	if !d.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !d.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !d.Start().Equal(that.Start()) {
+		return false
+	}
+	if !d.End().Equal(that.End()) {
+		return false
+	}
+	if !d.window.Equal(that.window) {
+		return false
+	}
+
+	if d.adjustment != that.adjustment {
+		return false
+	}
+	if d.Cost != that.Cost {
+		return false
+	}
+
+	if d.ByteHours != that.ByteHours {
+		return false
+	}
+	if d.Local != that.Local {
+		return false
+	}
+	if !d.Breakdown.Equal(that.Breakdown) {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements the json.Marshaler interface
+func (d *Disk) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", d.Type().String(), ",")
+	jsonEncode(buffer, "properties", d.Properties(), ",")
+	jsonEncode(buffer, "labels", d.Labels(), ",")
+	jsonEncodeString(buffer, "window", d.Window().String(), ",")
+	jsonEncodeString(buffer, "start", d.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", d.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", d.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "byteHours", d.ByteHours, ",")
+	jsonEncodeFloat64(buffer, "bytes", d.Bytes(), ",")
+	jsonEncode(buffer, "breakdown", d.Breakdown, ",")
+	jsonEncodeFloat64(buffer, "adjustment", d.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", d.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (d *Disk) String() string {
+	return toString(d)
+}
+
+// Bytes returns the number of bytes belonging to the disk. This could be
+// fractional because it's the number of byte*hours divided by the number of
+// hours running; e.g. the sum of a 100GiB disk running for the first 10 hours
+// and a 30GiB disk running for the last 20 hours of the same 24-hour window
+// would produce:
+//   (100*10 + 30*20) / 24 = 66.667GiB
+// However, any number of disks running for the full span of a window will
+// report the actual number of bytes of the static disk; e.g. the above
+// scenario for one entire 24-hour window:
+//   (100*24 + 30*24) / 24 = (100 + 30) = 130GiB
+func (d *Disk) Bytes() float64 {
+	// [b*hr]*([min/hr]*[1/min]) = [b*hr]/[hr] = b
+	return d.ByteHours * (60.0 / d.Minutes())
+}
+
+// Breakdown describes a resource's use as a percentage of various usage types
+type Breakdown struct {
+	Idle   float64 `json:"idle"`
+	Other  float64 `json:"other"`
+	System float64 `json:"system"`
+	User   float64 `json:"user"`
+}
+
+// Clone returns a cloned instance of the Breakdown
+func (b *Breakdown) Clone() *Breakdown {
+	if b == nil {
+		return nil
+	}
+
+	return &Breakdown{
+		Idle:   b.Idle,
+		Other:  b.Other,
+		System: b.System,
+		User:   b.User,
+	}
+}
+
+// Equal returns true if the two Breakdowns are exact matches
+func (b *Breakdown) Equal(that *Breakdown) bool {
+	if b == nil || that == nil {
+		return false
+	}
+
+	if b.Idle != that.Idle {
+		return false
+	}
+	if b.Other != that.Other {
+		return false
+	}
+	if b.System != that.System {
+		return false
+	}
+	if b.User != that.User {
+		return false
+	}
+
+	return true
+}
+
+// Network is an Asset representing a single node's network costs
+type Network struct {
+	properties *AssetProperties
+	labels     AssetLabels
+	start      time.Time
+	end        time.Time
+	window     Window
+	adjustment float64
+	Cost       float64
+}
+
+// NewNetwork creates and returns a new Network Asset
+func NewNetwork(name, cluster, providerID string, start, end time.Time, window Window) *Network {
+	properties := &AssetProperties{
+		Category:   NetworkCategory,
+		Name:       name,
+		Cluster:    cluster,
+		ProviderID: providerID,
+		Service:    KubernetesService,
+	}
+
+	return &Network{
+		properties: properties,
+		labels:     AssetLabels{},
+		start:      start,
+		end:        end,
+		window:     window.Clone(),
+	}
+}
+
+// Type returns the AssetType of the Asset
+func (n *Network) Type() AssetType {
+	return NetworkAssetType
+}
+
+// Properties returns the Asset's properties
+func (n *Network) Properties() *AssetProperties {
+	return n.properties
+}
+
+// SetProperties sets the Asset's properties
+func (n *Network) SetProperties(props *AssetProperties) {
+	n.properties = props
+}
+
+// Labels returns the Asset's labels
+func (n *Network) Labels() AssetLabels {
+	return n.labels
+}
+
+// SetLabels sets the Asset's labels
+func (n *Network) SetLabels(labels AssetLabels) {
+	n.labels = labels
+}
+
+// Adjustment returns the Asset's cost adjustment
+func (n *Network) Adjustment() float64 {
+	return n.adjustment
+}
+
+// SetAdjustment sets the Asset's cost adjustment
+func (n *Network) SetAdjustment(adj float64) {
+	n.adjustment = adj
+}
+
+// TotalCost returns the Asset's total cost
+func (n *Network) TotalCost() float64 {
+	return n.Cost + n.adjustment
+}
+
+// Start returns the precise start time of the Asset within the window
+func (n *Network) Start() time.Time {
+	return n.start
+}
+
+// End returns the precise end time of the Asset within the window
+func (n *Network) End() time.Time {
+	return n.end
+}
+
+// Minutes returns the number of minutes the Asset ran within the window
+func (n *Network) Minutes() float64 {
+	netMins := n.end.Sub(n.start).Minutes()
+	windowMins := n.window.Minutes()
+
+	if netMins > windowMins {
+		log.Warningf("Asset ETL: Network.Minutes exceeds window: %.2f > %.2f", netMins, windowMins)
+		netMins = windowMins
+	}
+
+	if netMins < 0 {
+		netMins = 0
+	}
+
+	return netMins
+}
+
+// Window returns the window within which the Asset ran
+func (n *Network) Window() Window {
+	return n.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (n *Network) ExpandWindow(window Window) {
+	n.window = n.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (n *Network) SetStartEnd(start, end time.Time) {
+	if n.Window().Contains(start) {
+		n.start = start
+	} else {
+		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, n.Window())
+	}
+
+	if n.Window().Contains(end) {
+		n.end = end
+	} else {
+		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, n.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (n *Network) Add(a Asset) Asset {
+	// Network + Network = Network
+	if that, ok := a.(*Network); ok {
+		this := n.Clone().(*Network)
+		this.add(that)
+		return this
+	}
+
+	props := n.Properties().Merge(a.Properties())
+	labels := n.Labels().Merge(a.Labels())
+
+	start := n.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := n.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := n.Window().Expand(a.Window())
+
+	// Network + !Network = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = n.Adjustment() + a.Adjustment()
+	any.Cost = (n.TotalCost() - n.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (n *Network) add(that *Network) {
+	if n == nil {
+		n = that
+		return
+	}
+
+	props := n.Properties().Merge(that.Properties())
+	labels := n.Labels().Merge(that.Labels())
+	n.SetProperties(props)
+	n.SetLabels(labels)
+
+	start := n.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := n.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := n.Window().Expand(that.Window())
+	n.start = start
+	n.end = end
+	n.window = window
+
+	n.Cost += that.Cost
+	n.adjustment += that.adjustment
+}
+
+// Clone returns a deep copy of the given Network
+func (n *Network) Clone() Asset {
+	if n == nil {
+		return nil
+	}
+
+	return &Network{
+		properties: n.properties.Clone(),
+		labels:     n.labels.Clone(),
+		start:      n.start,
+		end:        n.end,
+		window:     n.window.Clone(),
+		adjustment: n.adjustment,
+		Cost:       n.Cost,
+	}
+}
+
+// Equal returns true if the tow Assets match exactly
+func (n *Network) Equal(a Asset) bool {
+	that, ok := a.(*Network)
+	if !ok {
+		return false
+	}
+
+	if !n.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !n.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !n.Start().Equal(that.Start()) {
+		return false
+	}
+	if !n.End().Equal(that.End()) {
+		return false
+	}
+	if !n.window.Equal(that.window) {
+		return false
+	}
+
+	if n.adjustment != that.adjustment {
+		return false
+	}
+	if n.Cost != that.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshal interface
+func (n *Network) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", n.Type().String(), ",")
+	jsonEncode(buffer, "properties", n.Properties(), ",")
+	jsonEncode(buffer, "labels", n.Labels(), ",")
+	jsonEncodeString(buffer, "window", n.Window().String(), ",")
+	jsonEncodeString(buffer, "start", n.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", n.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", n.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "adjustment", n.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", n.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (n *Network) String() string {
+	return toString(n)
+}
+
+// Node is an Asset representing a single node in a cluster
+type Node struct {
+	properties   *AssetProperties
+	labels       AssetLabels
+	start        time.Time
+	end          time.Time
+	window       Window
+	adjustment   float64
+	NodeType     string
+	CPUCoreHours float64
+	RAMByteHours float64
+	CPUBreakdown *Breakdown
+	RAMBreakdown *Breakdown
+	CPUCost      float64
+	GPUCost      float64
+	RAMCost      float64
+	Discount     float64
+	Preemptible  float64
+}
+
+// NewNode creates and returns a new Node Asset
+func NewNode(name, cluster, providerID string, start, end time.Time, window Window) *Node {
+	properties := &AssetProperties{
+		Category:   ComputeCategory,
+		Name:       name,
+		Cluster:    cluster,
+		ProviderID: providerID,
+		Service:    KubernetesService,
+	}
+
+	return &Node{
+		properties:   properties,
+		labels:       AssetLabels{},
+		start:        start,
+		end:          end,
+		window:       window.Clone(),
+		CPUBreakdown: &Breakdown{},
+		RAMBreakdown: &Breakdown{},
+	}
+}
+
+// Type returns the AssetType of the Asset
+func (n *Node) Type() AssetType {
+	return NodeAssetType
+}
+
+// Properties returns the Asset's properties
+func (n *Node) Properties() *AssetProperties {
+	return n.properties
+}
+
+// SetProperties sets the Asset's properties
+func (n *Node) SetProperties(props *AssetProperties) {
+	n.properties = props
+}
+
+// Labels returns the Asset's labels
+func (n *Node) Labels() AssetLabels {
+	return n.labels
+}
+
+// SetLabels sets the Asset's labels
+func (n *Node) SetLabels(labels AssetLabels) {
+	n.labels = labels
+}
+
+// Adjustment returns the Asset's cost adjustment
+func (n *Node) Adjustment() float64 {
+	return n.adjustment
+}
+
+// SetAdjustment sets the Asset's cost adjustment
+func (n *Node) SetAdjustment(adj float64) {
+	n.adjustment = adj
+}
+
+// TotalCost returns the Asset's total cost
+func (n *Node) TotalCost() float64 {
+	return ((n.CPUCost + n.RAMCost) * (1.0 - n.Discount)) + n.GPUCost + n.adjustment
+}
+
+// Start returns the precise start time of the Asset within the window
+func (n *Node) Start() time.Time {
+	return n.start
+}
+
+// End returns the precise end time of the Asset within the window
+func (n *Node) End() time.Time {
+	return n.end
+}
+
+// Minutes returns the number of minutes the Asset ran within the window
+func (n *Node) Minutes() float64 {
+	nodeMins := n.end.Sub(n.start).Minutes()
+	windowMins := n.window.Minutes()
+
+	if nodeMins > windowMins {
+		log.Warningf("Asset ETL: Node.Minutes exceeds window: %.2f > %.2f", nodeMins, windowMins)
+		nodeMins = windowMins
+	}
+
+	if nodeMins < 0 {
+		nodeMins = 0
+	}
+
+	return nodeMins
+}
+
+// Window returns the window within which the Asset ran
+func (n *Node) Window() Window {
+	return n.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (n *Node) ExpandWindow(window Window) {
+	n.window = n.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (n *Node) SetStartEnd(start, end time.Time) {
+	if n.Window().Contains(start) {
+		n.start = start
+	} else {
+		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, n.Window())
+	}
+
+	if n.Window().Contains(end) {
+		n.end = end
+	} else {
+		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, n.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (n *Node) Add(a Asset) Asset {
+	// Node + Node = Node
+	if that, ok := a.(*Node); ok {
+		this := n.Clone().(*Node)
+		this.add(that)
+		return this
+	}
+
+	props := n.Properties().Merge(a.Properties())
+	labels := n.Labels().Merge(a.Labels())
+
+	start := n.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := n.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := n.Window().Expand(a.Window())
+
+	// Node + !Node = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = n.Adjustment() + a.Adjustment()
+	any.Cost = (n.TotalCost() - n.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (n *Node) add(that *Node) {
+	if n == nil {
+		n = that
+		return
+	}
+
+	props := n.Properties().Merge(that.Properties())
+	labels := n.Labels().Merge(that.Labels())
+	n.SetProperties(props)
+	n.SetLabels(labels)
+
+	if n.NodeType != that.NodeType {
+		n.NodeType = ""
+	}
+
+	start := n.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := n.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := n.Window().Expand(that.Window())
+	n.start = start
+	n.end = end
+	n.window = window
+
+	// Order of operations for node costs is:
+	//   Discount(CPU + RAM) + GPU + Adjustment
+
+	// Combining discounts, then involves weighting each discount by each
+	// respective (CPU + RAM) cost. Combining preemptible, on the other
+	// hand, is done with all three (but not Adjustment, which can change
+	// without triggering a re-computation of Preemtible).
+
+	disc := (n.CPUCost+n.RAMCost)*(1.0-n.Discount) + (that.CPUCost+that.RAMCost)*(1.0-that.Discount)
+	nonDisc := (n.CPUCost + n.RAMCost) + (that.CPUCost + that.RAMCost)
+	if nonDisc > 0 {
+		n.Discount = 1.0 - (disc / nonDisc)
+	} else {
+		n.Discount = (n.Discount + that.Discount) / 2.0
+	}
+
+	nNoAdj := n.TotalCost() - n.Adjustment()
+	thatNoAdj := that.TotalCost() - that.Adjustment()
+	if (nNoAdj + thatNoAdj) > 0 {
+		n.Preemptible = (nNoAdj*n.Preemptible + thatNoAdj*that.Preemptible) / (nNoAdj + thatNoAdj)
+	} else {
+		n.Preemptible = (n.Preemptible + that.Preemptible) / 2.0
+	}
+
+	totalCPUCost := n.CPUCost + that.CPUCost
+	if totalCPUCost > 0.0 {
+		n.CPUBreakdown.Idle = (n.CPUBreakdown.Idle*n.CPUCost + that.CPUBreakdown.Idle*that.CPUCost) / totalCPUCost
+		n.CPUBreakdown.Other = (n.CPUBreakdown.Other*n.CPUCost + that.CPUBreakdown.Other*that.CPUCost) / totalCPUCost
+		n.CPUBreakdown.System = (n.CPUBreakdown.System*n.CPUCost + that.CPUBreakdown.System*that.CPUCost) / totalCPUCost
+		n.CPUBreakdown.User = (n.CPUBreakdown.User*n.CPUCost + that.CPUBreakdown.User*that.CPUCost) / totalCPUCost
+	}
+
+	totalRAMCost := n.RAMCost + that.RAMCost
+	if totalRAMCost > 0.0 {
+		n.RAMBreakdown.Idle = (n.RAMBreakdown.Idle*n.RAMCost + that.RAMBreakdown.Idle*that.RAMCost) / totalRAMCost
+		n.RAMBreakdown.Other = (n.RAMBreakdown.Other*n.RAMCost + that.RAMBreakdown.Other*that.RAMCost) / totalRAMCost
+		n.RAMBreakdown.System = (n.RAMBreakdown.System*n.RAMCost + that.RAMBreakdown.System*that.RAMCost) / totalRAMCost
+		n.RAMBreakdown.User = (n.RAMBreakdown.User*n.RAMCost + that.RAMBreakdown.User*that.RAMCost) / totalRAMCost
+	}
+
+	n.CPUCoreHours += that.CPUCoreHours
+	n.RAMByteHours += that.RAMByteHours
+
+	n.CPUCost += that.CPUCost
+	n.GPUCost += that.GPUCost
+	n.RAMCost += that.RAMCost
+	n.adjustment += that.adjustment
+}
+
+// Clone returns a deep copy of the given Node
+func (n *Node) Clone() Asset {
+	if n == nil {
+		return nil
+	}
+
+	return &Node{
+		properties:   n.properties.Clone(),
+		labels:       n.labels.Clone(),
+		start:        n.start,
+		end:          n.end,
+		window:       n.window.Clone(),
+		adjustment:   n.adjustment,
+		NodeType:     n.NodeType,
+		CPUCoreHours: n.CPUCoreHours,
+		RAMByteHours: n.RAMByteHours,
+		CPUBreakdown: n.CPUBreakdown.Clone(),
+		RAMBreakdown: n.RAMBreakdown.Clone(),
+		CPUCost:      n.CPUCost,
+		GPUCost:      n.GPUCost,
+		RAMCost:      n.RAMCost,
+		Preemptible:  n.Preemptible,
+		Discount:     n.Discount,
+	}
+}
+
+// Equal returns true if the tow Assets match exactly
+func (n *Node) Equal(a Asset) bool {
+	that, ok := a.(*Node)
+	if !ok {
+		return false
+	}
+
+	if !n.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !n.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !n.Start().Equal(that.Start()) {
+		return false
+	}
+	if !n.End().Equal(that.End()) {
+		return false
+	}
+	if !n.window.Equal(that.window) {
+		return false
+	}
+
+	if n.adjustment != that.adjustment {
+		return false
+	}
+
+	if n.NodeType != that.NodeType {
+		return false
+	}
+	if n.CPUCoreHours != that.CPUCoreHours {
+		return false
+	}
+	if n.RAMByteHours != that.RAMByteHours {
+		return false
+	}
+	if !n.CPUBreakdown.Equal(that.CPUBreakdown) {
+		return false
+	}
+	if !n.RAMBreakdown.Equal(that.RAMBreakdown) {
+		return false
+	}
+	if n.CPUCost != that.CPUCost {
+		return false
+	}
+	if n.GPUCost != that.GPUCost {
+		return false
+	}
+	if n.RAMCost != that.RAMCost {
+		return false
+	}
+	if n.Discount != that.Discount {
+		return false
+	}
+	if n.Preemptible != that.Preemptible {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshal interface
+func (n *Node) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", n.Type().String(), ",")
+	jsonEncode(buffer, "properties", n.Properties(), ",")
+	jsonEncode(buffer, "labels", n.Labels(), ",")
+	jsonEncodeString(buffer, "window", n.Window().String(), ",")
+	jsonEncodeString(buffer, "start", n.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", n.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", n.Minutes(), ",")
+	jsonEncodeString(buffer, "nodeType", n.NodeType, ",")
+	jsonEncodeFloat64(buffer, "cpuCores", n.CPUCores(), ",")
+	jsonEncodeFloat64(buffer, "ramBytes", n.RAMBytes(), ",")
+	jsonEncodeFloat64(buffer, "cpuCoreHours", n.CPUCoreHours, ",")
+	jsonEncodeFloat64(buffer, "ramByteHours", n.RAMByteHours, ",")
+	jsonEncode(buffer, "cpuBreakdown", n.CPUBreakdown, ",")
+	jsonEncode(buffer, "ramBreakdown", n.RAMBreakdown, ",")
+	jsonEncodeFloat64(buffer, "preemptible", n.Preemptible, ",")
+	jsonEncodeFloat64(buffer, "discount", n.Discount, ",")
+	jsonEncodeFloat64(buffer, "cpuCost", n.CPUCost, ",")
+	jsonEncodeFloat64(buffer, "gpuCost", n.GPUCost, ",")
+	jsonEncodeFloat64(buffer, "ramCost", n.RAMCost, ",")
+	jsonEncodeFloat64(buffer, "adjustment", n.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", n.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (n *Node) String() string {
+	return toString(n)
+}
+
+// IsPreemptible returns true if the node is 100% preemptible. It's possible
+// to be "partially preemptible" by adding a preemptible node with a
+// non-preemptible node.
+func (n *Node) IsPreemptible() bool {
+	return n.Preemptible == 1.0
+}
+
+// CPUCores returns the number of cores belonging to the node. This could be
+// fractional because it's the number of core*hours divided by the number of
+// hours running; e.g. the sum of a 4-core node running for the first 10 hours
+// and a 3-core node running for the last 20 hours of the same 24-hour window
+// would produce:
+//   (4*10 + 3*20) / 24 = 4.167 cores
+// However, any number of cores running for the full span of a window will
+// report the actual number of cores of the static node; e.g. the above
+// scenario for one entire 24-hour window:
+//   (4*24 + 3*24) / 24 = (4 + 3) = 7 cores
+func (n *Node) CPUCores() float64 {
+	// [core*hr]*([min/hr]*[1/min]) = [core*hr]/[hr] = core
+	return n.CPUCoreHours * (60.0 / n.Minutes())
+}
+
+// RAMBytes returns the amount of RAM belonging to the node. This could be
+// fractional because it's the number of byte*hours divided by the number of
+// hours running; e.g. the sum of a 12GiB-RAM node running for the first 10 hours
+// and a 16GiB-RAM node running for the last 20 hours of the same 24-hour window
+// would produce:
+//   (12*10 + 16*20) / 24 = 18.333GiB RAM
+// However, any number of cores running for the full span of a window will
+// report the actual number of cores of the static node; e.g. the above
+// scenario for one entire 24-hour window:
+//   (12*24 + 16*24) / 24 = (12 + 16) = 28 cores
+func (n *Node) RAMBytes() float64 {
+	// [b*hr]*([min/hr]*[1/min]) = [b*hr]/[hr] = b
+	return n.RAMByteHours * (60.0 / n.Minutes())
+}
+
+// LoadBalancer is an Asset representing a single load balancer in a cluster
+// TODO: add GB of ingress processed, numForwardingRules once we start recording those to prometheus metric
+type LoadBalancer struct {
+	properties *AssetProperties
+	labels     AssetLabels
+	start      time.Time
+	end        time.Time
+	window     Window
+	adjustment float64
+	Cost       float64
+}
+
+// NewLoadBalancer instantiates and returns a new LoadBalancer
+func NewLoadBalancer(name, cluster, providerID string, start, end time.Time, window Window) *LoadBalancer {
+	properties := &AssetProperties{
+		Category:   NetworkCategory,
+		Name:       name,
+		Cluster:    cluster,
+		ProviderID: providerID,
+		Service:    KubernetesService,
+	}
+
+	return &LoadBalancer{
+		properties: properties,
+		labels:     AssetLabels{},
+		start:      start,
+		end:        end,
+		window:     window,
+	}
+}
+
+// Type returns the AssetType of the Asset
+func (lb *LoadBalancer) Type() AssetType {
+	return LoadBalancerAssetType
+}
+
+// Properties returns the Asset's properties
+func (lb *LoadBalancer) Properties() *AssetProperties {
+	return lb.properties
+}
+
+// SetProperties sets the Asset's properties
+func (lb *LoadBalancer) SetProperties(props *AssetProperties) {
+	lb.properties = props
+}
+
+// Labels returns the Asset's labels
+func (lb *LoadBalancer) Labels() AssetLabels {
+	return lb.labels
+}
+
+// SetLabels sets the Asset's labels
+func (lb *LoadBalancer) SetLabels(labels AssetLabels) {
+	lb.labels = labels
+}
+
+// Adjustment returns the Asset's cost adjustment
+func (lb *LoadBalancer) Adjustment() float64 {
+	return lb.adjustment
+}
+
+// SetAdjustment sets the Asset's cost adjustment
+func (lb *LoadBalancer) SetAdjustment(adj float64) {
+	lb.adjustment = adj
+}
+
+// TotalCost returns the total cost of the Asset
+func (lb *LoadBalancer) TotalCost() float64 {
+	return lb.Cost + lb.adjustment
+}
+
+// Start returns the preceise start point of the Asset within the window
+func (lb *LoadBalancer) Start() time.Time {
+	return lb.start
+}
+
+// End returns the preceise end point of the Asset within the window
+func (lb *LoadBalancer) End() time.Time {
+	return lb.end
+}
+
+// Minutes returns the number of minutes the Asset ran within the window
+func (lb *LoadBalancer) Minutes() float64 {
+	return lb.end.Sub(lb.start).Minutes()
+}
+
+// Window returns the window within which the Asset ran
+func (lb *LoadBalancer) Window() Window {
+	return lb.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (lb *LoadBalancer) ExpandWindow(w Window) {
+	lb.window = lb.window.Expand(w)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (lb *LoadBalancer) SetStartEnd(start, end time.Time) {
+	if lb.Window().Contains(start) {
+		lb.start = start
+	} else {
+		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, lb.Window())
+	}
+
+	if lb.Window().Contains(end) {
+		lb.end = end
+	} else {
+		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, lb.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (lb *LoadBalancer) Add(a Asset) Asset {
+	// LoadBalancer + LoadBalancer = LoadBalancer
+	if that, ok := a.(*LoadBalancer); ok {
+		this := lb.Clone().(*LoadBalancer)
+		this.add(that)
+		return this
+	}
+
+	props := lb.Properties().Merge(a.Properties())
+	labels := lb.Labels().Merge(a.Labels())
+
+	start := lb.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := lb.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := lb.Window().Expand(a.Window())
+
+	// LoadBalancer + !LoadBalancer = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = lb.Adjustment() + a.Adjustment()
+	any.Cost = (lb.TotalCost() - lb.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (lb *LoadBalancer) add(that *LoadBalancer) {
+	if lb == nil {
+		lb = that
+		return
+	}
+
+	props := lb.Properties().Merge(that.Properties())
+	labels := lb.Labels().Merge(that.Labels())
+	lb.SetProperties(props)
+	lb.SetLabels(labels)
+
+	start := lb.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := lb.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := lb.Window().Expand(that.Window())
+	lb.start = start
+	lb.end = end
+	lb.window = window
+
+	lb.Cost += that.Cost
+	lb.adjustment += that.adjustment
+}
+
+// Clone returns a cloned instance of the given Asset
+func (lb *LoadBalancer) Clone() Asset {
+	return &LoadBalancer{
+		properties: lb.properties.Clone(),
+		labels:     lb.labels.Clone(),
+		start:      lb.start,
+		end:        lb.end,
+		window:     lb.window.Clone(),
+		adjustment: lb.adjustment,
+		Cost:       lb.Cost,
+	}
+}
+
+// Equal returns true if the tow Assets match precisely
+func (lb *LoadBalancer) Equal(a Asset) bool {
+	that, ok := a.(*LoadBalancer)
+	if !ok {
+		return false
+	}
+
+	if !lb.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !lb.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !lb.Start().Equal(that.Start()) {
+		return false
+	}
+	if !lb.End().Equal(that.End()) {
+		return false
+	}
+	if !lb.window.Equal(that.window) {
+		return false
+	}
+
+	if lb.adjustment != that.adjustment {
+		return false
+	}
+	if lb.Cost != that.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshal
+func (lb *LoadBalancer) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", lb.Type().String(), ",")
+	jsonEncode(buffer, "properties", lb.Properties(), ",")
+	jsonEncode(buffer, "labels", lb.Labels(), ",")
+	jsonEncodeString(buffer, "window", lb.Window().String(), ",")
+	jsonEncodeString(buffer, "start", lb.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", lb.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", lb.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "adjustment", lb.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", lb.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (lb *LoadBalancer) String() string {
+	return toString(lb)
+}
+
+// SharedAsset is an Asset representing a shared cost
+type SharedAsset struct {
+	properties *AssetProperties
+	labels     AssetLabels
+	window     Window
+	Cost       float64
+}
+
+// NewSharedAsset creates and returns a new SharedAsset
+func NewSharedAsset(name string, window Window) *SharedAsset {
+	properties := &AssetProperties{
+		Name:     name,
+		Category: SharedCategory,
+		Service:  OtherCategory,
+	}
+
+	return &SharedAsset{
+		properties: properties,
+		labels:     AssetLabels{},
+		window:     window.Clone(),
+	}
+}
+
+// Type returns the AssetType of the Asset
+func (sa *SharedAsset) Type() AssetType {
+	return SharedAssetType
+}
+
+// Properties returns the Asset's properties
+func (sa *SharedAsset) Properties() *AssetProperties {
+	return sa.properties
+}
+
+// SetProperties sets the Asset's properties
+func (sa *SharedAsset) SetProperties(props *AssetProperties) {
+	sa.properties = props
+}
+
+// Labels returns the Asset's labels
+func (sa *SharedAsset) Labels() AssetLabels {
+	return sa.labels
+}
+
+// SetLabels sets the Asset's labels
+func (sa *SharedAsset) SetLabels(labels AssetLabels) {
+	sa.labels = labels
+}
+
+// Adjustment is not relevant to SharedAsset, but required to implement Asset
+func (sa *SharedAsset) Adjustment() float64 {
+	return 0.0
+}
+
+// SetAdjustment is not relevant to SharedAsset, but required to implement Asset
+func (sa *SharedAsset) SetAdjustment(float64) {
+	return
+}
+
+// TotalCost returns the Asset's total cost
+func (sa *SharedAsset) TotalCost() float64 {
+	return sa.Cost
+}
+
+// Start returns the start time of the Asset
+func (sa *SharedAsset) Start() time.Time {
+	return *sa.window.start
+}
+
+// End returns the end time of the Asset
+func (sa *SharedAsset) End() time.Time {
+	return *sa.window.end
+}
+
+// Minutes returns the number of minutes the SharedAsset ran within the window
+func (sa *SharedAsset) Minutes() float64 {
+	return sa.window.Minutes()
+}
+
+// Window returns the window within the SharedAsset ran
+func (sa *SharedAsset) Window() Window {
+	return sa.window
+}
+
+// ExpandWindow expands the Asset's window
+func (sa *SharedAsset) ExpandWindow(w Window) {
+	sa.window = sa.window.Expand(w)
+}
+
+// SetStartEnd sets the Asset's Start and End fields (not applicable here)
+func (sa *SharedAsset) SetStartEnd(start, end time.Time) {
+	return
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (sa *SharedAsset) Add(a Asset) Asset {
+	// SharedAsset + SharedAsset = SharedAsset
+	if that, ok := a.(*SharedAsset); ok {
+		this := sa.Clone().(*SharedAsset)
+		this.add(that)
+		return this
+	}
+
+	props := sa.Properties().Merge(a.Properties())
+	labels := sa.Labels().Merge(a.Labels())
+
+	start := sa.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := sa.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := sa.Window().Expand(a.Window())
+
+	// SharedAsset + !SharedAsset = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = sa.Adjustment() + a.Adjustment()
+	any.Cost = (sa.TotalCost() - sa.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (sa *SharedAsset) add(that *SharedAsset) {
+	if sa == nil {
+		sa = that
+		return
+	}
+
+	props := sa.Properties().Merge(that.Properties())
+	labels := sa.Labels().Merge(that.Labels())
+	sa.SetProperties(props)
+	sa.SetLabels(labels)
+
+	window := sa.Window().Expand(that.Window())
+	sa.window = window
+
+	sa.Cost += that.Cost
+}
+
+// Clone returns a deep copy of the given SharedAsset
+func (sa *SharedAsset) Clone() Asset {
+	if sa == nil {
+		return nil
+	}
+
+	return &SharedAsset{
+		properties: sa.properties.Clone(),
+		labels:     sa.labels.Clone(),
+		window:     sa.window.Clone(),
+		Cost:       sa.Cost,
+	}
+}
+
+// Equal returns true if the two Assets are exact matches
+func (sa *SharedAsset) Equal(a Asset) bool {
+	that, ok := a.(*SharedAsset)
+	if !ok {
+		return false
+	}
+
+	if !sa.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !sa.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !sa.Start().Equal(that.Start()) {
+		return false
+	}
+	if !sa.End().Equal(that.End()) {
+		return false
+	}
+	if !sa.window.Equal(that.window) {
+		return false
+	}
+
+	if sa.Cost != that.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshaler
+func (sa *SharedAsset) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", sa.Type().String(), ",")
+	jsonEncode(buffer, "properties", sa.Properties(), ",")
+	jsonEncode(buffer, "labels", sa.Labels(), ",")
+	jsonEncode(buffer, "properties", sa.Properties(), ",")
+	jsonEncode(buffer, "labels", sa.Labels(), ",")
+	jsonEncodeString(buffer, "window", sa.Window().String(), ",")
+	jsonEncodeString(buffer, "start", sa.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", sa.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", sa.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", sa.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (sa *SharedAsset) String() string {
+	return toString(sa)
+}
+
+// AssetSet stores a set of Assets, each with a unique name, that share
+// a window. An AssetSet is mutable, so treat it like a threadsafe map.
+type AssetSet struct {
+	sync.RWMutex
+	assets   map[string]Asset
+	props    []AssetProperty
+	Window   Window
+	Warnings []string
+	Errors   []string
+}
+
+// NewAssetSet instantiates a new AssetSet and, optionally, inserts
+// the given list of Assets
+func NewAssetSet(start, end time.Time, assets ...Asset) *AssetSet {
+	as := &AssetSet{
+		assets: map[string]Asset{},
+		Window: NewWindow(&start, &end),
+	}
+
+	for _, a := range assets {
+		as.Insert(a)
+	}
+
+	return as
+}
+
+// AggregateBy aggregates the Assets in the AssetSet by the given list of
+// AssetProperties, such that each asset is binned by a key determined by its
+// relevant property values.
+func (as *AssetSet) AggregateBy(props []AssetProperty, opts *AssetAggregationOptions) error {
+	if opts == nil {
+		opts = &AssetAggregationOptions{}
+	}
+
+	if as.IsEmpty() && len(opts.SharedHourlyCosts) == 0 {
+		return nil
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	aggSet := NewAssetSet(as.Start(), as.End())
+	aggSet.props = props
+
+	// Compute hours of the given AssetSet, and if it ends in the future,
+	// adjust the hours accordingly
+	hours := as.Window.Minutes() / 60.0
+	diff := time.Now().Sub(as.End())
+	if diff < 0.0 {
+		hours += diff.Hours()
+	}
+
+	// Insert a shared asset for each shared cost
+	for name, hourlyCost := range opts.SharedHourlyCosts {
+		sa := NewSharedAsset(name, as.Window.Clone())
+		sa.Cost = hourlyCost * hours
+
+		aggSet.Insert(sa)
+	}
+
+	// Delete the Assets that don't pass each filter
+	for _, ff := range opts.FilterFuncs {
+		for key, asset := range as.assets {
+			if !ff(asset) {
+				delete(as.assets, key)
+			}
+		}
+	}
+
+	// Insert each asset into the new set, which will be keyed by the props
+	// on aggSet, resulting in aggregation.
+	for _, asset := range as.assets {
+		aggSet.Insert(asset)
+	}
+
+	// Assign the aggregated values back to the original set
+	as.assets = aggSet.assets
+	as.props = props
+
+	return nil
+}
+
+// Clone returns a new AssetSet with a deep copy of the given
+// AssetSet's assets.
+func (as *AssetSet) Clone() *AssetSet {
+	if as == nil {
+		return nil
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+
+	assets := map[string]Asset{}
+	for k, v := range as.assets {
+		assets[k] = v.Clone()
+	}
+
+	var props []AssetProperty
+	if as.props != nil {
+		props = []AssetProperty{}
+		for _, p := range as.props {
+			props = append(props, p)
+		}
+	}
+
+	s := as.Start()
+	e := as.End()
+
+	return &AssetSet{
+		Window: NewWindow(&s, &e),
+		assets: assets,
+		props:  props,
+	}
+}
+
+// Each invokes the given function for each Asset in the set
+func (as *AssetSet) Each(f func(string, Asset)) {
+	if as == nil {
+		return
+	}
+
+	for k, a := range as.assets {
+		f(k, a)
+	}
+}
+
+// End returns the end time of the AssetSet's window
+func (as *AssetSet) End() time.Time {
+	return *as.Window.End()
+}
+
+// FindMatch attempts to find a match in the AssetSet for the given Asset on
+// the provided properties and labels. If a match is not found, FindMatch
+// returns nil and a Not Found error.
+func (as *AssetSet) FindMatch(query Asset, props []AssetProperty) (Asset, error) {
+	as.RLock()
+	defer as.RUnlock()
+
+	matchKey := key(query, props)
+	for _, asset := range as.assets {
+		if key(asset, props) == matchKey {
+			return asset, nil
+		}
+	}
+
+	return nil, fmt.Errorf("Asset not found to match %s on %v", query, props)
+}
+
+// ReconciliationMatch attempts to find an exact match in the AssetSet on
+// (Category, ProviderID). If a match is found, it returns the Asset with the
+// intent to adjuts it. If no match exists, it attempts to find one on only
+// (ProviderID). If that match is found, it returns the Asset with the intent
+// to insert the associated Cloud cost.
+func (as *AssetSet) ReconciliationMatch(query Asset) (Asset, bool, error) {
+	as.RLock()
+	defer as.RUnlock()
+
+	// Full match means matching on (Category, ProviderID)
+	fullMatchProps := []AssetProperty{AssetCategoryProp, AssetProviderIDProp}
+	fullMatchKey := key(query, fullMatchProps)
+
+	// Partial match means matching only on (ProviderID)
+	providerIDMatchProps := []AssetProperty{AssetProviderIDProp}
+	providerIDMatchKey := key(query, providerIDMatchProps)
+
+	var providerIDMatch Asset
+	for _, asset := range as.assets {
+		if key(asset, fullMatchProps) == fullMatchKey {
+			return asset, true, nil
+		}
+		if key(asset, providerIDMatchProps) == providerIDMatchKey {
+			// Found a partial match. Save it until after all other options
+			// have been checked for full matches.
+			providerIDMatch = asset
+		}
+	}
+
+	// No full match was found, so return partial match, if found.
+	if providerIDMatch != nil {
+		return providerIDMatch, false, nil
+	}
+
+	return nil, false, fmt.Errorf("Asset not found to match %s", query)
+}
+
+// Get returns the Asset in the AssetSet at the given key, or nil and false
+// if no Asset exists for the given key
+func (as *AssetSet) Get(key string) (Asset, bool) {
+	as.RLock()
+	defer as.RUnlock()
+
+	if a, ok := as.assets[key]; ok {
+		return a, true
+	}
+	return nil, false
+}
+
+// Insert inserts the given Asset into the AssetSet, using the AssetSet's
+// configured properties to determine the key under which the Asset will
+// be inserted.
+func (as *AssetSet) Insert(asset Asset) error {
+	if as.IsEmpty() {
+		as.Lock()
+		as.assets = map[string]Asset{}
+		as.Unlock()
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	// Determine key into which to Insert the Asset.
+	k := key(asset, as.props)
+
+	// Add the given Asset to the existing entry, if there is one;
+	// otherwise just set directly into assets
+	if _, ok := as.assets[k]; !ok {
+		as.assets[k] = asset
+	} else {
+		as.assets[k] = as.assets[k].Add(asset)
+	}
+
+	// Expand the window, just to be safe. It's possible that the asset will
+	// be set into the map without expanding it to the AssetSet's window.
+	as.assets[k].ExpandWindow(as.Window)
+
+	return nil
+}
+
+// IsEmpty returns true if the AssetSet is nil, or if it contains
+// zero assets.
+func (as *AssetSet) IsEmpty() bool {
+	if as == nil || len(as.assets) == 0 {
+		return true
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+	return as.assets == nil || len(as.assets) == 0
+}
+
+func (as *AssetSet) Length() int {
+	if as == nil {
+		return 0
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+	return len(as.assets)
+}
+
+// Map clones and returns a map of the AssetSet's Assets
+func (as *AssetSet) Map() map[string]Asset {
+	if as.IsEmpty() {
+		return map[string]Asset{}
+	}
+
+	return as.Clone().assets
+}
+
+// MarshalJSON JSON-encodes the AssetSet
+func (as *AssetSet) MarshalJSON() ([]byte, error) {
+	as.RLock()
+	defer as.RUnlock()
+	return json.Marshal(as.assets)
+}
+
+func (as *AssetSet) Set(asset Asset, props []AssetProperty) {
+	if as.IsEmpty() {
+		as.Lock()
+		as.assets = map[string]Asset{}
+		as.Unlock()
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	// Expand the window to match the AssetSet, then set it
+	asset.ExpandWindow(as.Window)
+	as.assets[key(asset, props)] = asset
+}
+
+func (as *AssetSet) Start() time.Time {
+	return *as.Window.Start()
+}
+
+func (as *AssetSet) TotalCost() float64 {
+	tc := 0.0
+
+	as.Lock()
+	defer as.Unlock()
+
+	for _, a := range as.assets {
+		tc += a.TotalCost()
+	}
+
+	return tc
+}
+
+func (as *AssetSet) UTCOffset() time.Duration {
+	_, zone := as.Start().Zone()
+	return time.Duration(zone) * time.Second
+}
+
+func (as *AssetSet) accumulate(that *AssetSet) (*AssetSet, error) {
+	if as == nil {
+		return that, nil
+	}
+
+	if that == nil {
+		return as, nil
+	}
+
+	// In the case of an AssetSetRange with empty entries, we may end up with
+	// an incoming as without props, even though we are trying to aggregate
+	// by props. This handles that case, assigning the correct props.
+	if !propsEqual(as.props, that.props) {
+		if len(as.props) == 0 {
+			as.props = that.props
+		}
+	}
+
+	// Set start, end to min(start), max(end)
+	start := as.Start()
+	end := as.End()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	if that.End().After(end) {
+		end = that.End()
+	}
+
+	if as.IsEmpty() && that.IsEmpty() {
+		return NewAssetSet(start, end), nil
+	}
+
+	acc := NewAssetSet(start, end)
+	acc.props = as.props
+
+	as.RLock()
+	defer as.RUnlock()
+
+	that.RLock()
+	defer that.RUnlock()
+
+	for _, asset := range as.assets {
+		err := acc.Insert(asset)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	for _, asset := range that.assets {
+		err := acc.Insert(asset)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return acc, nil
+}
+
+type AssetSetRange struct {
+	sync.RWMutex
+	assets []*AssetSet
+}
+
+func NewAssetSetRange(assets ...*AssetSet) *AssetSetRange {
+	return &AssetSetRange{
+		assets: assets,
+	}
+}
+
+// Accumulate sums each AssetSet in the given range, returning a single cumulative
+// AssetSet for the entire range.
+func (asr *AssetSetRange) Accumulate() (*AssetSet, error) {
+	var assetSet *AssetSet
+	var err error
+
+	asr.RLock()
+	defer asr.RUnlock()
+
+	for _, as := range asr.assets {
+		assetSet, err = assetSet.accumulate(as)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return assetSet, nil
+}
+
+type AssetAggregationOptions struct {
+	SharedHourlyCosts map[string]float64
+	FilterFuncs       []AssetMatchFunc
+}
+
+func (asr *AssetSetRange) AggregateBy(props []AssetProperty, opts *AssetAggregationOptions) error {
+	aggRange := &AssetSetRange{assets: []*AssetSet{}}
+
+	asr.Lock()
+	defer asr.Unlock()
+
+	for _, as := range asr.assets {
+		err := as.AggregateBy(props, opts)
+		if err != nil {
+			return err
+		}
+
+		aggRange.assets = append(aggRange.assets, as)
+	}
+
+	asr.assets = aggRange.assets
+
+	return nil
+}
+
+func (asr *AssetSetRange) Append(that *AssetSet) {
+	asr.Lock()
+	defer asr.Unlock()
+	asr.assets = append(asr.assets, that)
+}
+
+// Each invokes the given function for each AssetSet in the range
+func (asr *AssetSetRange) Each(f func(int, *AssetSet)) {
+	if asr == nil {
+		return
+	}
+
+	for i, as := range asr.assets {
+		f(i, as)
+	}
+}
+
+func (asr *AssetSetRange) Get(i int) (*AssetSet, error) {
+	if i < 0 || i >= len(asr.assets) {
+		return nil, fmt.Errorf("AssetSetRange: index out of range: %d", i)
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+	return asr.assets[i], nil
+}
+
+func (asr *AssetSetRange) Length() int {
+	if asr == nil || asr.assets == nil {
+		return 0
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+	return len(asr.assets)
+}
+
+func (asr *AssetSetRange) MarshalJSON() ([]byte, error) {
+	asr.RLock()
+	asr.RUnlock()
+	return json.Marshal(asr.assets)
+}
+
+func (asr *AssetSetRange) UTCOffset() time.Duration {
+	if asr.Length() == 0 {
+		return 0
+	}
+
+	as, err := asr.Get(0)
+	if err != nil {
+		return 0
+	}
+	return as.UTCOffset()
+}
+
+// Window returns the full window that the AssetSetRange spans, from the
+// start of the first AssetSet to the end of the last one.
+func (asr *AssetSetRange) Window() Window {
+	if asr == nil || asr.Length() == 0 {
+		return NewWindow(nil, nil)
+	}
+
+	start := asr.assets[0].Start()
+	end := asr.assets[asr.Length()-1].End()
+
+	return NewWindow(&start, &end)
+}
+
+// TODO move everything below to a separate package
+
+func jsonEncodeFloat64(buffer *bytes.Buffer, name string, val float64, comma string) {
+	var encoding string
+	if math.IsNaN(val) {
+		encoding = fmt.Sprintf("\"%s\":null%s", name, comma)
+	} else {
+		encoding = fmt.Sprintf("\"%s\":%f%s", name, val, comma)
+	}
+
+	buffer.WriteString(encoding)
+}
+
+func jsonEncodeString(buffer *bytes.Buffer, name, val, comma string) {
+	buffer.WriteString(fmt.Sprintf("\"%s\":\"%s\"%s", name, val, comma))
+}
+
+func jsonEncode(buffer *bytes.Buffer, name string, obj interface{}, comma string) {
+	buffer.WriteString(fmt.Sprintf("\"%s\":", name))
+	if bytes, err := json.Marshal(obj); err != nil {
+		buffer.WriteString("null")
+	} else {
+		buffer.Write(bytes)
+	}
+	buffer.WriteString(comma)
+}

+ 1013 - 0
pkg/kubecost/asset_test.go

@@ -0,0 +1,1013 @@
+package kubecost
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"testing"
+	"time"
+)
+
+var start1 = time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
+var start2 = start1.Add(day)
+var start3 = start2.Add(day)
+var start4 = start2.Add(day)
+
+var windows = []Window{
+	NewWindow(&start1, &start2),
+	NewWindow(&start2, &start3),
+	NewWindow(&start3, &start4),
+}
+
+const delta = 0.00001
+const gb = 1024 * 1024 * 1024
+
+func approx(a, b, delta float64) bool {
+	return math.Abs(a-b) < delta
+}
+
+func TestAny_Add(t *testing.T) {
+	any1 := NewAsset(*windows[0].start, *windows[0].end, windows[0])
+	any1.SetProperties(&AssetProperties{
+		Name:       "any1",
+		Cluster:    "cluster1",
+		ProviderID: "any1",
+	})
+	any1.Cost = 9.0
+	any1.SetAdjustment(1.0)
+
+	any2 := NewAsset(*windows[0].start, *windows[0].end, windows[0])
+	any2.SetProperties(&AssetProperties{
+		Name:       "any2",
+		Cluster:    "cluster1",
+		ProviderID: "any2",
+	})
+	any2.Cost = 4.0
+	any2.SetAdjustment(1.0)
+
+	any3 := any1.Add(any2)
+
+	// Check that the sums and properties are correct
+	if any3.TotalCost() != 15.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 15.0, any3.TotalCost())
+	}
+	if any3.Adjustment() != 2.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 2.0, any3.Adjustment())
+	}
+	if any3.Properties().Cluster != "cluster1" {
+		t.Fatalf("Any.Add: expected %s; got %s", "cluster1", any3.Properties().Cluster)
+	}
+	if any3.Type() != AnyAssetType {
+		t.Fatalf("Any.Add: expected %s; got %s", AnyAssetType, any3.Type())
+	}
+	if any3.Properties().ProviderID != "" {
+		t.Fatalf("Any.Add: expected %s; got %s", "", any3.Properties().ProviderID)
+	}
+	if any3.Properties().Name != "" {
+		t.Fatalf("Any.Add: expected %s; got %s", "", any3.Properties().Name)
+	}
+
+	// Check that the original assets are unchanged
+	if any1.TotalCost() != 10.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 10.0, any1.TotalCost())
+	}
+	if any1.Adjustment() != 1.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 1.0, any1.Adjustment())
+	}
+	if any2.TotalCost() != 5.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 5.0, any2.TotalCost())
+	}
+	if any2.Adjustment() != 1.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 1.0, any2.Adjustment())
+	}
+}
+
+func TestAny_Clone(t *testing.T) {
+	any1 := NewAsset(*windows[0].start, *windows[0].end, windows[0])
+	any1.SetProperties(&AssetProperties{
+		Name:       "any1",
+		Cluster:    "cluster1",
+		ProviderID: "any1",
+	})
+	any1.Cost = 9.0
+	any1.SetAdjustment(1.0)
+
+	any2 := any1.Clone()
+
+	any1.Cost = 18.0
+	any1.SetAdjustment(2.0)
+
+	// any2 should match any1, even after mutating any1
+	if any2.TotalCost() != 10.0 {
+		t.Fatalf("Any.Clone: expected %f; got %f", 10.0, any2.TotalCost())
+	}
+	if any2.Adjustment() != 1.0 {
+		t.Fatalf("Any.Clone: expected %f; got %f", 1.0, any2.Adjustment())
+	}
+}
+
+func TestAny_MarshalJSON(t *testing.T) {
+	any1 := NewAsset(*windows[0].start, *windows[0].end, windows[0])
+	any1.SetProperties(&AssetProperties{
+		Name:       "any1",
+		Cluster:    "cluster1",
+		ProviderID: "any1",
+	})
+	any1.Cost = 9.0
+	any1.SetAdjustment(1.0)
+
+	_, err := json.Marshal(any1)
+	if err != nil {
+		t.Fatalf("Any.MarshalJSON: unexpected error: %s", err)
+	}
+
+	any2 := NewAsset(*windows[0].start, *windows[0].end, windows[0])
+	any2.SetProperties(&AssetProperties{
+		Name:       "any2",
+		Cluster:    "cluster1",
+		ProviderID: "any2",
+	})
+	any2.Cost = math.NaN()
+	any2.SetAdjustment(1.0)
+
+	_, err = json.Marshal(any2)
+	if err != nil {
+		t.Fatalf("Any.MarshalJSON: unexpected error: %s", err)
+	}
+}
+
+func TestDisk_Add(t *testing.T) {
+	// 1. aggregate: add size, local
+	// 2. accumulate: don't add size, local
+
+	hours := windows[0].Duration().Hours()
+
+	// Aggregate: two disks, one window
+	disk1 := NewDisk("disk1", "cluster1", "disk1", *windows[0].start, *windows[0].end, windows[0])
+	disk1.ByteHours = 100.0 * gb * hours
+	disk1.Cost = 9.0
+	disk1.SetAdjustment(1.0)
+
+	if disk1.Bytes() != 100.0*gb {
+		t.Fatalf("Disk.Add: expected %f; got %f", 100.0*gb, disk1.Bytes())
+	}
+
+	disk2 := NewDisk("disk2", "cluster1", "disk2", *windows[0].start, *windows[0].end, windows[0])
+	disk2.ByteHours = 60.0 * gb * hours
+	disk2.Cost = 4.0
+	disk2.Local = 1.0
+	disk2.SetAdjustment(1.0)
+
+	if disk2.Bytes() != 60.0*gb {
+		t.Fatalf("Disk.Add: expected %f; got %f", 60.0*gb, disk2.Bytes())
+	}
+
+	diskT := disk1.Add(disk2).(*Disk)
+
+	// Check that the sums and properties are correct
+	if diskT.TotalCost() != 15.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 15.0, diskT.TotalCost())
+	}
+	if diskT.Adjustment() != 2.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 2.0, diskT.Adjustment())
+	}
+	if diskT.Properties().Cluster != "cluster1" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "cluster1", diskT.Properties().Cluster)
+	}
+	if diskT.Type() != DiskAssetType {
+		t.Fatalf("Disk.Add: expected %s; got %s", AnyAssetType, diskT.Type())
+	}
+	if diskT.Properties().ProviderID != "" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "", diskT.Properties().ProviderID)
+	}
+	if diskT.Properties().Name != "" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "", diskT.Properties().Name)
+	}
+	if diskT.Bytes() != 160.0*gb {
+		t.Fatalf("Disk.Add: expected %f; got %f", 160.0*gb, diskT.Bytes())
+	}
+	if !approx(diskT.Local, 0.333333, delta) {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.333333, diskT.Local)
+	}
+
+	// Check that the original assets are unchanged
+	if disk1.TotalCost() != 10.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 10.0, disk1.TotalCost())
+	}
+	if disk1.Adjustment() != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, disk1.Adjustment())
+	}
+	if disk1.Local != 0.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.0, disk1.Local)
+	}
+	if disk2.TotalCost() != 5.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 5.0, disk2.TotalCost())
+	}
+	if disk2.Adjustment() != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, disk2.Adjustment())
+	}
+	if disk2.Local != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, disk2.Local)
+	}
+
+	disk3 := NewDisk("disk3", "cluster1", "disk3", *windows[0].start, *windows[0].end, windows[0])
+	disk3.ByteHours = 0.0 * hours
+	disk3.Cost = 0.0
+	disk3.Local = 0.0
+	disk3.SetAdjustment(0.0)
+
+	disk4 := NewDisk("disk4", "cluster1", "disk4", *windows[0].start, *windows[0].end, windows[0])
+	disk4.ByteHours = 0.0 * hours
+	disk4.Cost = 0.0
+	disk4.Local = 1.0
+	disk4.SetAdjustment(0.0)
+
+	diskT = disk3.Add(disk4).(*Disk)
+
+	if diskT.TotalCost() != 0.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.0, diskT.TotalCost())
+	}
+	if diskT.Local != 0.5 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.5, diskT.Local)
+	}
+
+	// Accumulate: one disks, two windows
+	diskA1 := NewDisk("diskA1", "cluster1", "diskA1", *windows[0].start, *windows[0].end, windows[0])
+	diskA1.ByteHours = 100 * gb * hours
+	diskA1.Cost = 9.0
+	diskA1.SetAdjustment(1.0)
+
+	diskA2 := NewDisk("diskA2", "cluster1", "diskA2", *windows[1].start, *windows[1].end, windows[1])
+	diskA2.ByteHours = 100 * gb * hours
+	diskA2.Cost = 9.0
+	diskA2.SetAdjustment(1.0)
+
+	diskAT := diskA1.Add(diskA2).(*Disk)
+
+	// Check that the sums and properties are correct
+	if diskAT.TotalCost() != 20.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 20.0, diskAT.TotalCost())
+	}
+	if diskAT.Adjustment() != 2.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 2.0, diskAT.Adjustment())
+	}
+	if diskAT.Properties().Cluster != "cluster1" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "cluster1", diskAT.Properties().Cluster)
+	}
+	if diskAT.Type() != DiskAssetType {
+		t.Fatalf("Disk.Add: expected %s; got %s", AnyAssetType, diskAT.Type())
+	}
+	if diskAT.Properties().ProviderID != "" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "", diskAT.Properties().ProviderID)
+	}
+	if diskAT.Properties().Name != "" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "", diskAT.Properties().Name)
+	}
+	if diskAT.Bytes() != 100.0*gb {
+		t.Fatalf("Disk.Add: expected %f; got %f", 100.0*gb, diskT.Bytes())
+	}
+	if diskAT.Local != 0.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.0, diskAT.Local)
+	}
+
+	// Check that the original assets are unchanged
+	if diskA1.TotalCost() != 10.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 10.0, diskA1.TotalCost())
+	}
+	if diskA1.Adjustment() != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, diskA1.Adjustment())
+	}
+	if diskA1.Local != 0.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.0, diskA1.Local)
+	}
+	if diskA2.TotalCost() != 10.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 10.0, diskA2.TotalCost())
+	}
+	if diskA2.Adjustment() != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, diskA2.Adjustment())
+	}
+	if diskA2.Local != 0.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.0, diskA2.Local)
+	}
+}
+
+func TestDisk_Clone(t *testing.T) {
+	disk1 := NewDisk("disk1", "cluster1", "disk1", *windows[0].start, *windows[0].end, windows[0])
+	disk1.Local = 0.0
+	disk1.Cost = 9.0
+	disk1.SetAdjustment(1.0)
+
+	disk2 := disk1.Clone().(*Disk)
+
+	disk2.Local = 1.0
+	disk1.Cost = 18.0
+	disk1.SetAdjustment(2.0)
+
+	// disk2 should match disk1, even after mutating disk1
+	if disk2.TotalCost() != 10.0 {
+		t.Fatalf("Any.Clone: expected %f; got %f", 10.0, disk2.TotalCost())
+	}
+	if disk2.Adjustment() != 1.0 {
+		t.Fatalf("Any.Clone: expected %f; got %f", 1.0, disk2.Adjustment())
+	}
+	if disk2.Local != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, disk2.Local)
+	}
+}
+
+func TestDisk_MarshalJSON(t *testing.T) {
+	disk := NewDisk("disk", "cluster", "providerID", *windows[0].start, *windows[0].end, windows[0])
+	disk.SetLabels(AssetLabels{
+		"label": "value",
+	})
+	disk.Cost = 9.0
+	disk.SetAdjustment(1.0)
+
+	_, err := json.Marshal(disk)
+	if err != nil {
+		t.Fatalf("Disk.MarshalJSON: unexpected error: %s", err)
+	}
+}
+
+func TestNode_Add(t *testing.T) {
+	// 1. aggregate: add size, local
+	// 2. accumulate: don't add size, local
+
+	hours := windows[0].Duration().Hours()
+
+	// Aggregate: two nodes, one window
+	node1 := NewNode("node1", "cluster1", "node1", *windows[0].start, *windows[0].end, windows[0])
+	node1.CPUCoreHours = 1.0 * hours
+	node1.RAMByteHours = 2.0 * gb * hours
+	node1.GPUCost = 0.0
+	node1.CPUCost = 8.0
+	node1.RAMCost = 4.0
+	node1.Discount = 0.3
+	node1.CPUBreakdown = &Breakdown{
+		Idle:   0.6,
+		System: 0.2,
+		User:   0.2,
+		Other:  0.0,
+	}
+	node1.RAMBreakdown = &Breakdown{
+		Idle:   0.6,
+		System: 0.2,
+		User:   0.2,
+		Other:  0.0,
+	}
+	node1.SetAdjustment(1.6)
+
+	node2 := NewNode("node2", "cluster1", "node2", *windows[0].start, *windows[0].end, windows[0])
+	node2.CPUCoreHours = 1.0 * hours
+	node2.RAMByteHours = 2.0 * gb * hours
+	node2.GPUCost = 0.0
+	node2.CPUCost = 3.0
+	node2.RAMCost = 1.0
+	node2.Discount = 0.0
+	node1.CPUBreakdown = &Breakdown{
+		Idle:   0.9,
+		System: 0.05,
+		User:   0.0,
+		Other:  0.05,
+	}
+	node1.RAMBreakdown = &Breakdown{
+		Idle:   0.9,
+		System: 0.05,
+		User:   0.0,
+		Other:  0.05,
+	}
+	node2.SetAdjustment(1.0)
+
+	nodeT := node1.Add(node2).(*Node)
+
+	// Check that the sums and properties are correct
+	if !approx(nodeT.TotalCost(), 15.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 15.0, nodeT.TotalCost())
+	}
+	if nodeT.Adjustment() != 2.6 {
+		t.Fatalf("Node.Add: expected %f; got %f", 2.6, nodeT.Adjustment())
+	}
+	if nodeT.Properties().Cluster != "cluster1" {
+		t.Fatalf("Node.Add: expected %s; got %s", "cluster1", nodeT.Properties().Cluster)
+	}
+	if nodeT.Type() != NodeAssetType {
+		t.Fatalf("Node.Add: expected %s; got %s", AnyAssetType, nodeT.Type())
+	}
+	if nodeT.Properties().ProviderID != "" {
+		t.Fatalf("Node.Add: expected %s; got %s", "", nodeT.Properties().ProviderID)
+	}
+	if nodeT.Properties().Name != "" {
+		t.Fatalf("Node.Add: expected %s; got %s", "", nodeT.Properties().Name)
+	}
+	if nodeT.CPUCores() != 2.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 2.0, nodeT.CPUCores())
+	}
+	if nodeT.RAMBytes() != 4.0*gb {
+		t.Fatalf("Node.Add: expected %f; got %f", 4.0*gb, nodeT.RAMBytes())
+	}
+
+	// Check that the original assets are unchanged
+	if !approx(node1.TotalCost(), 10.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 10.0, node1.TotalCost())
+	}
+	if node1.Adjustment() != 1.6 {
+		t.Fatalf("Node.Add: expected %f; got %f", 1.0, node1.Adjustment())
+	}
+	if !approx(node2.TotalCost(), 5.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 5.0, node2.TotalCost())
+	}
+	if node2.Adjustment() != 1.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 1.0, node2.Adjustment())
+	}
+
+	// Check that we don't divide by zero computing Local
+	node3 := NewNode("node3", "cluster1", "node3", *windows[0].start, *windows[0].end, windows[0])
+	node3.CPUCoreHours = 0 * hours
+	node3.RAMByteHours = 0 * hours
+	node3.GPUCost = 0
+	node3.CPUCost = 0.0
+	node3.RAMCost = 0.0
+	node3.Discount = 0.3
+	node3.SetAdjustment(0.0)
+
+	node4 := NewNode("node4", "cluster1", "node4", *windows[0].start, *windows[0].end, windows[0])
+	node4.CPUCoreHours = 0 * hours
+	node4.RAMByteHours = 0 * hours
+	node4.GPUCost = 0
+	node4.CPUCost = 0.0
+	node4.RAMCost = 0.0
+	node4.Discount = 0.1
+	node4.SetAdjustment(0.0)
+
+	nodeT = node3.Add(node4).(*Node)
+
+	// Check that the sums and properties are correct and without NaNs
+	if nodeT.TotalCost() != 0.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 0.0, nodeT.TotalCost())
+	}
+	if nodeT.Discount != 0.2 {
+		t.Fatalf("Node.Add: expected %f; got %f", 0.2, nodeT.Discount)
+	}
+
+	// Accumulate: one nodes, two window
+	nodeA1 := NewNode("nodeA1", "cluster1", "nodeA1", *windows[0].start, *windows[0].end, windows[0])
+	nodeA1.CPUCoreHours = 1.0 * hours
+	nodeA1.RAMByteHours = 2.0 * gb * hours
+	nodeA1.GPUCost = 0.0
+	nodeA1.CPUCost = 8.0
+	nodeA1.RAMCost = 4.0
+	nodeA1.Discount = 0.3
+	nodeA1.SetAdjustment(1.6)
+
+	nodeA2 := NewNode("nodeA2", "cluster1", "nodeA2", *windows[1].start, *windows[1].end, windows[1])
+	nodeA2.CPUCoreHours = 1.0 * hours
+	nodeA2.RAMByteHours = 2.0 * gb * hours
+	nodeA2.GPUCost = 0.0
+	nodeA2.CPUCost = 3.0
+	nodeA2.RAMCost = 1.0
+	nodeA2.Discount = 0.0
+	nodeA2.SetAdjustment(1.0)
+
+	nodeAT := nodeA1.Add(nodeA2).(*Node)
+
+	// Check that the sums and properties are correct
+	if !approx(nodeAT.TotalCost(), 15.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 15.0, nodeAT.TotalCost())
+	}
+	if nodeAT.Adjustment() != 2.6 {
+		t.Fatalf("Node.Add: expected %f; got %f", 2.6, nodeAT.Adjustment())
+	}
+	if nodeAT.Properties().Cluster != "cluster1" {
+		t.Fatalf("Node.Add: expected %s; got %s", "cluster1", nodeAT.Properties().Cluster)
+	}
+	if nodeAT.Type() != NodeAssetType {
+		t.Fatalf("Node.Add: expected %s; got %s", AnyAssetType, nodeAT.Type())
+	}
+	if nodeAT.Properties().ProviderID != "" {
+		t.Fatalf("Node.Add: expected %s; got %s", "", nodeAT.Properties().ProviderID)
+	}
+	if nodeAT.Properties().Name != "" {
+		t.Fatalf("Node.Add: expected %s; got %s", "", nodeAT.Properties().Name)
+	}
+	if nodeAT.CPUCores() != 1.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 1.0, nodeAT.CPUCores())
+	}
+	if nodeAT.RAMBytes() != 2.0*gb {
+		t.Fatalf("Node.Add: expected %f; got %f", 2.0*gb, nodeAT.RAMBytes())
+	}
+
+	// Check that the original assets are unchanged
+	if !approx(nodeA1.TotalCost(), 10.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 10.0, nodeA1.TotalCost())
+	}
+	if nodeA1.Adjustment() != 1.6 {
+		t.Fatalf("Node.Add: expected %f; got %f", 1.0, nodeA1.Adjustment())
+	}
+	if !approx(nodeA2.TotalCost(), 5.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 5.0, nodeA2.TotalCost())
+	}
+	if nodeA2.Adjustment() != 1.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 1.0, nodeA2.Adjustment())
+	}
+}
+
+func TestNode_Clone(t *testing.T) {
+	// TODO
+}
+
+func TestNode_MarshalJSON(t *testing.T) {
+	node := NewNode("node", "cluster", "providerID", *windows[0].start, *windows[0].end, windows[0])
+	node.SetLabels(AssetLabels{
+		"label": "value",
+	})
+	node.CPUCost = 9.0
+	node.RAMCost = 0.0
+	node.CPUCoreHours = 123.0
+	node.RAMByteHours = 13323.0
+	node.SetAdjustment(1.0)
+
+	_, err := json.Marshal(node)
+	if err != nil {
+		t.Fatalf("Node.MarshalJSON: unexpected error: %s", err)
+	}
+}
+
+func TestClusterManagement_Add(t *testing.T) {
+	cm1 := NewClusterManagement("gcp", "cluster1", windows[0])
+	cm1.Cost = 9.0
+
+	cm2 := NewClusterManagement("gcp", "cluster1", windows[0])
+	cm2.Cost = 4.0
+
+	cm3 := cm1.Add(cm2)
+
+	// Check that the sums and properties are correct
+	if cm3.TotalCost() != 13.0 {
+		t.Fatalf("ClusterManagement.Add: expected %f; got %f", 13.0, cm3.TotalCost())
+	}
+	if cm3.Properties().Cluster != "cluster1" {
+		t.Fatalf("ClusterManagement.Add: expected %s; got %s", "cluster1", cm3.Properties().Cluster)
+	}
+	if cm3.Type() != ClusterManagementAssetType {
+		t.Fatalf("ClusterManagement.Add: expected %s; got %s", ClusterManagementAssetType, cm3.Type())
+	}
+
+	// Check that the original assets are unchanged
+	if cm1.TotalCost() != 9.0 {
+		t.Fatalf("ClusterManagement.Add: expected %f; got %f", 9.0, cm1.TotalCost())
+	}
+	if cm2.TotalCost() != 4.0 {
+		t.Fatalf("ClusterManagement.Add: expected %f; got %f", 4.0, cm2.TotalCost())
+	}
+}
+
+func TestClusterManagement_Clone(t *testing.T) {
+	// TODO
+}
+
+func TestCloudAny_Add(t *testing.T) {
+	ca1 := NewCloud(ComputeCategory, "ca1", *windows[0].start, *windows[0].end, windows[0])
+	ca1.Cost = 9.0
+	ca1.SetAdjustment(1.0)
+
+	ca2 := NewCloud(StorageCategory, "ca2", *windows[0].start, *windows[0].end, windows[0])
+	ca2.Cost = 4.0
+	ca2.SetAdjustment(1.0)
+
+	ca3 := ca1.Add(ca2)
+
+	// Check that the sums and properties are correct
+	if ca3.TotalCost() != 15.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 15.0, ca3.TotalCost())
+	}
+	if ca3.Adjustment() != 2.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 2.0, ca3.Adjustment())
+	}
+	if ca3.Type() != CloudAssetType {
+		t.Fatalf("Any.Add: expected %s; got %s", CloudAssetType, ca3.Type())
+	}
+
+	// Check that the original assets are unchanged
+	if ca1.TotalCost() != 10.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 10.0, ca1.TotalCost())
+	}
+	if ca1.Adjustment() != 1.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 1.0, ca1.Adjustment())
+	}
+	if ca2.TotalCost() != 5.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 5.0, ca2.TotalCost())
+	}
+	if ca2.Adjustment() != 1.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 1.0, ca2.Adjustment())
+	}
+}
+
+func TestCloudAny_Clone(t *testing.T) {
+	// TODO
+}
+
+func TestAssetSet_AggregateBy(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+	window := NewWindow(&startYesterday, &endYesterday)
+
+	// Scenarios to test:
+
+	// 1  Single-aggregation
+	// 1a []AssetProperty=[Cluster]
+	// 1b []AssetProperty=[Type]
+	// 1c []AssetProperty=[Nil]
+	// 1d []AssetProperty=nil
+
+	// 2  Multi-aggregation
+	// 2a []AssetProperty=[Cluster,Type]
+
+	// 3  Share resources
+	// 3a Shared hourly cost > 0.0
+
+	// Definitions and set-up:
+
+	var as *AssetSet
+	var err error
+
+	// Tests:
+
+	// 1  Single-aggregation
+
+	// 1a []AssetProperty=[Cluster]
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy([]AssetProperty{AssetClusterProp}, nil)
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1a", window, map[string]float64{
+		"cluster1": 26.0,
+		"cluster2": 15.0,
+		"cluster3": 19.0,
+	}, nil)
+
+	// 1b []AssetProperty=[Type]
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy([]AssetProperty{AssetTypeProp}, nil)
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1b", window, map[string]float64{
+		"Node":              49.0,
+		"Disk":              8.0,
+		"ClusterManagement": 3.0,
+	}, nil)
+
+	// 1c []AssetProperty=[Nil]
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy([]AssetProperty{}, nil)
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1c", window, map[string]float64{
+		"": 60.0,
+	}, nil)
+
+	// 1d []AssetProperty=nil
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy(nil, nil)
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1d", window, map[string]float64{
+		"Compute/cluster1/Node/Kubernetes/gcp-node1/node1":     7.00,
+		"Compute/cluster1/Node/Kubernetes/gcp-node2/node2":     5.50,
+		"Compute/cluster1/Node/Kubernetes/gcp-node3/node3":     6.50,
+		"Storage/cluster1/Disk/Kubernetes/gcp-disk1/disk1":     2.50,
+		"Storage/cluster1/Disk/Kubernetes/gcp-disk2/disk2":     1.50,
+		"GCP/Management/cluster1/ClusterManagement/Kubernetes": 3.00,
+		"Compute/cluster2/Node/Kubernetes/gcp-node4/node4":     11.00,
+		"Storage/cluster2/Disk/Kubernetes/gcp-disk3/disk3":     2.50,
+		"Storage/cluster2/Disk/Kubernetes/gcp-disk4/disk4":     1.50,
+		"GCP/Management/cluster2/ClusterManagement/Kubernetes": 0.00,
+		"Compute/cluster3/Node/Kubernetes/aws-node5/node5":     19.00,
+	}, nil)
+
+	// 2  Multi-aggregation
+
+	// 2a []AssetProperty=[Cluster,Type]
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy([]AssetProperty{AssetClusterProp, AssetTypeProp}, nil)
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "2a", window, map[string]float64{
+		"cluster1/Node":              19.0,
+		"cluster1/Disk":              4.0,
+		"cluster1/ClusterManagement": 3.0,
+		"cluster2/Node":              11.0,
+		"cluster2/Disk":              4.0,
+		"cluster2/ClusterManagement": 0.0,
+		"cluster3/Node":              19.0,
+	}, nil)
+
+	// 3  Share resources
+
+	// 3a Shared hourly cost > 0.0
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy([]AssetProperty{AssetTypeProp}, &AssetAggregationOptions{
+		SharedHourlyCosts: map[string]float64{"shared1": 0.5},
+	})
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1a", window, map[string]float64{
+		"Node":              49.0,
+		"Disk":              8.0,
+		"ClusterManagement": 3.0,
+		"Shared":            12.0,
+	}, nil)
+}
+
+func TestAssetSet_FindMatch(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+	s, e := startYesterday, endYesterday
+	w := NewWindow(&s, &e)
+
+	var query, match Asset
+	var as *AssetSet
+	var err error
+
+	// Assert success of a simple match of Type and ProviderID
+	as = generateAssetSet(startYesterday)
+	query = NewNode("", "", "gcp-node3", s, e, w)
+	match, err = as.FindMatch(query, []AssetProperty{AssetTypeProp, AssetProviderIDProp})
+	if err != nil {
+		t.Fatalf("AssetSet.FindMatch: unexpected error: %s", err)
+	}
+
+	// Assert error of a simple non-match of Type and ProviderID
+	as = generateAssetSet(startYesterday)
+	query = NewNode("", "", "aws-node3", s, e, w)
+	match, err = as.FindMatch(query, []AssetProperty{AssetTypeProp, AssetProviderIDProp})
+	if err == nil {
+		t.Fatalf("AssetSet.FindMatch: expected error (no match); found %s", match)
+	}
+
+	// Assert error of matching ProviderID, but not Type
+	as = generateAssetSet(startYesterday)
+	query = NewCloud(ComputeCategory, "gcp-node3", s, e, w)
+	match, err = as.FindMatch(query, []AssetProperty{AssetTypeProp, AssetProviderIDProp})
+	if err == nil {
+		t.Fatalf("AssetSet.FindMatch: expected error (no match); found %s", match)
+	}
+}
+
+func TestAssetSetRange_Accumulate(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+
+	startD2 := startYesterday
+	startD1 := startD2.Add(-day)
+	startD0 := startD1.Add(-day)
+
+	window := NewWindow(&startD0, &endYesterday)
+
+	var asr *AssetSetRange
+	var as *AssetSet
+	var err error
+
+	asr = NewAssetSetRange(
+		generateAssetSet(startD0),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+	err = asr.AggregateBy(nil, nil)
+	as, err = asr.Accumulate()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1a", window, map[string]float64{
+		"Compute/cluster1/Node/Kubernetes/gcp-node1/node1":     21.00,
+		"Compute/cluster1/Node/Kubernetes/gcp-node2/node2":     16.50,
+		"Compute/cluster1/Node/Kubernetes/gcp-node3/node3":     19.50,
+		"Storage/cluster1/Disk/Kubernetes/gcp-disk1/disk1":     7.50,
+		"Storage/cluster1/Disk/Kubernetes/gcp-disk2/disk2":     4.50,
+		"GCP/Management/cluster1/ClusterManagement/Kubernetes": 9.00,
+		"Compute/cluster2/Node/Kubernetes/gcp-node4/node4":     33.00,
+		"Storage/cluster2/Disk/Kubernetes/gcp-disk3/disk3":     7.50,
+		"Storage/cluster2/Disk/Kubernetes/gcp-disk4/disk4":     4.50,
+		"GCP/Management/cluster2/ClusterManagement/Kubernetes": 0.00,
+		"Compute/cluster3/Node/Kubernetes/aws-node5/node5":     57.00,
+	}, nil)
+
+	asr = NewAssetSetRange(
+		generateAssetSet(startD0),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+	err = asr.AggregateBy([]AssetProperty{}, nil)
+	as, err = asr.Accumulate()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1b", window, map[string]float64{
+		"": 180.00,
+	}, nil)
+
+	asr = NewAssetSetRange(
+		generateAssetSet(startD0),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+	err = asr.AggregateBy([]AssetProperty{AssetTypeProp}, nil)
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	as, err = asr.Accumulate()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1c", window, map[string]float64{
+		"Node":              147.0,
+		"Disk":              24.0,
+		"ClusterManagement": 9.0,
+	}, nil)
+
+	asr = NewAssetSetRange(
+		generateAssetSet(startD0),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+	err = asr.AggregateBy([]AssetProperty{AssetClusterProp}, nil)
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	as, err = asr.Accumulate()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1c", window, map[string]float64{
+		"cluster1": 78.0,
+		"cluster2": 45.0,
+		"cluster3": 57.0,
+	}, nil)
+
+	// Accumulation with aggregation should work, even when the first AssetSet
+	// is empty (this was previously an issue)
+	asr = NewAssetSetRange(
+		NewAssetSet(startD0, startD1),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+	err = asr.AggregateBy([]AssetProperty{AssetTypeProp}, nil)
+	as, err = asr.Accumulate()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1d", window, map[string]float64{
+		"Node":              98.00,
+		"Disk":              16.00,
+		"ClusterManagement": 6.00,
+	}, nil)
+}
+
+func assertAssetSet(t *testing.T, as *AssetSet, msg string, window Window, exps map[string]float64, err error) {
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy[%s]: unexpected error: %s", msg, err)
+	}
+	if as.Length() != len(exps) {
+		t.Fatalf("AssetSet.AggregateBy[%s]: expected set of length %d, actual %d", msg, len(exps), as.Length())
+	}
+	if !as.Window.Equal(window) {
+		t.Fatalf("AssetSet.AggregateBy[%s]: expected window %s, actual %s", msg, window, as.Window)
+	}
+	as.Each(func(key string, a Asset) {
+		if exp, ok := exps[key]; ok {
+			if math.Round(a.TotalCost()*100) != math.Round(exp*100) {
+				t.Fatalf("AssetSet.AggregateBy[%s]: key %s expected total cost %.2f, actual %.2f", msg, key, exp, a.TotalCost())
+			}
+			if !a.Window().Equal(window) {
+				t.Fatalf("AssetSet.AggregateBy[%s]: key %s expected window %s, actual %s", msg, key, window, as.Window)
+			}
+		} else {
+			t.Fatalf("AssetSet.AggregateBy[%s]: unexpected asset: %s", msg, key)
+		}
+	})
+}
+
+// generateAssetSet generates the following topology:
+//
+// | Asset                        | Cost |  Adj |
+// +------------------------------+------+------+
+//   cluster1:
+//     node1:                        6.00   1.00
+//     node2:                        4.00   1.50
+//     node3:                        7.00  -0.50
+//     disk1:                        2.50   0.00
+//     disk2:                        1.50   0.00
+//     clusterManagement1:           3.00   0.00
+// +------------------------------+------+------+
+//   cluster1 subtotal              24.00   2.00
+// +------------------------------+------+------+
+//   cluster2:
+//     node4:                       12.00  -1.00
+//     disk3:                        2.50   0.00
+//     disk4:                        1.50   0.00
+//     clusterManagement2:           0.00   0.00
+// +------------------------------+------+------+
+//   cluster2 subtotal              16.00  -1.00
+// +------------------------------+------+------+
+//   cluster3:
+//     node5:                       17.00   2.00
+// +------------------------------+------+------+
+//   cluster3 subtotal              17.00   2.00
+// +------------------------------+------+------+
+//   total                          57.00   3.00
+// +------------------------------+------+------+
+func generateAssetSet(start time.Time) *AssetSet {
+	end := start.Add(day)
+	window := NewWindow(&start, &end)
+
+	hours := window.Duration().Hours()
+
+	node1 := NewNode("node1", "cluster1", "gcp-node1", *window.Clone().start, *window.Clone().end, window.Clone())
+	node1.CPUCost = 4.0
+	node1.RAMCost = 4.0
+	node1.GPUCost = 2.0
+	node1.Discount = 0.5
+	node1.CPUCoreHours = 2.0 * hours
+	node1.RAMByteHours = 4.0 * gb * hours
+	node1.SetAdjustment(1.0)
+
+	node2 := NewNode("node2", "cluster1", "gcp-node2", *window.Clone().start, *window.Clone().end, window.Clone())
+	node2.CPUCost = 4.0
+	node2.RAMCost = 4.0
+	node2.GPUCost = 0.0
+	node2.Discount = 0.5
+	node2.CPUCoreHours = 2.0 * hours
+	node2.RAMByteHours = 4.0 * gb * hours
+	node2.SetAdjustment(1.5)
+
+	node3 := NewNode("node3", "cluster1", "gcp-node3", *window.Clone().start, *window.Clone().end, window.Clone())
+	node3.CPUCost = 4.0
+	node3.RAMCost = 4.0
+	node3.GPUCost = 3.0
+	node3.Discount = 0.5
+	node3.CPUCoreHours = 2.0 * hours
+	node3.RAMByteHours = 4.0 * gb * hours
+	node3.SetAdjustment(-0.5)
+
+	node4 := NewNode("node4", "cluster2", "gcp-node4", *window.Clone().start, *window.Clone().end, window.Clone())
+	node4.CPUCost = 10.0
+	node4.RAMCost = 6.0
+	node4.GPUCost = 0.0
+	node4.Discount = 0.25
+	node4.CPUCoreHours = 4.0 * hours
+	node4.RAMByteHours = 12.0 * gb * hours
+	node4.SetAdjustment(-1.0)
+
+	node5 := NewNode("node5", "cluster3", "aws-node5", *window.Clone().start, *window.Clone().end, window.Clone())
+	node5.CPUCost = 10.0
+	node5.RAMCost = 7.0
+	node5.GPUCost = 0.0
+	node5.Discount = 0.0
+	node5.CPUCoreHours = 8.0 * hours
+	node5.RAMByteHours = 24.0 * gb * hours
+	node5.SetAdjustment(2.0)
+
+	disk1 := NewDisk("disk1", "cluster1", "gcp-disk1", *window.Clone().start, *window.Clone().end, window.Clone())
+	disk1.Cost = 2.5
+	disk1.ByteHours = 100 * gb * hours
+
+	disk2 := NewDisk("disk2", "cluster1", "gcp-disk2", *window.Clone().start, *window.Clone().end, window.Clone())
+	disk2.Cost = 1.5
+	disk2.ByteHours = 60 * gb * hours
+
+	disk3 := NewDisk("disk3", "cluster2", "gcp-disk3", *window.Clone().start, *window.Clone().end, window.Clone())
+	disk3.Cost = 2.5
+	disk3.ByteHours = 100 * gb * hours
+
+	disk4 := NewDisk("disk4", "cluster2", "gcp-disk4", *window.Clone().start, *window.Clone().end, window.Clone())
+	disk4.Cost = 1.5
+	disk4.ByteHours = 100 * gb * hours
+
+	cm1 := NewClusterManagement("gcp", "cluster1", window.Clone())
+	cm1.Cost = 3.0
+
+	cm2 := NewClusterManagement("gcp", "cluster2", window.Clone())
+	cm2.Cost = 0.0
+
+	return NewAssetSet(
+		start, end,
+		// cluster 1
+		node1, node2, node3, disk1, disk2, cm1,
+		// cluster 2
+		node4, disk3, disk4, cm2,
+		// cluster 3
+		node5,
+	)
+}
+
+func printAssetSet(msg string, as *AssetSet) {
+	fmt.Printf("--- %s ---\n", msg)
+	as.Each(func(key string, a Asset) {
+		fmt.Printf(" > %s: %s\n", key, a)
+	})
+}

+ 343 - 0
pkg/kubecost/assetprops.go

@@ -0,0 +1,343 @@
+package kubecost
+
+import (
+	"fmt"
+	"strings"
+)
+
+// AssetProperty is a kind of property belonging to an Asset
+type AssetProperty string
+
+const (
+	// AssetNilProp is the zero-value of AssetProperty
+	AssetNilProp AssetProperty = ""
+
+	// AssetAccountProp describes the account of the Asset
+	AssetAccountProp AssetProperty = "account"
+
+	// AssetCategoryProp describes the category of the Asset
+	AssetCategoryProp AssetProperty = "category"
+
+	// AssetClusterProp describes the cluster of the Asset
+	AssetClusterProp AssetProperty = "cluster"
+
+	// AssetNameProp describes the name of the Asset
+	AssetNameProp AssetProperty = "name"
+
+	// AssetProjectProp describes the project of the Asset
+	AssetProjectProp AssetProperty = "project"
+
+	// AssetProviderProp describes the provider of the Asset
+	AssetProviderProp AssetProperty = "provider"
+
+	// AssetProviderIDProp describes the providerID of the Asset
+	AssetProviderIDProp AssetProperty = "providerID"
+
+	// AssetServiceProp describes the service of the Asset
+	AssetServiceProp AssetProperty = "service"
+
+	// AssetTypeProp describes the type of the Asset
+	AssetTypeProp AssetProperty = "type"
+)
+
+// ParseAssetProperty attempts to parse a string into an AssetProperty
+func ParseAssetProperty(text string) (AssetProperty, error) {
+	switch strings.TrimSpace(strings.ToLower(text)) {
+	case "account":
+		return AssetAccountProp, nil
+	case "category":
+		return AssetCategoryProp, nil
+	case "cluster":
+		return AssetClusterProp, nil
+	case "name":
+		return AssetNameProp, nil
+	case "project":
+		return AssetProjectProp, nil
+	case "provider":
+		return AssetProviderProp, nil
+	case "providerID":
+		return AssetProviderIDProp, nil
+	case "service":
+		return AssetServiceProp, nil
+	case "type":
+		return AssetTypeProp, nil
+	}
+	return AssetNilProp, fmt.Errorf("invalid asset property: %s", text)
+}
+
+func propsEqual(p1, p2 []AssetProperty) bool {
+	if len(p1) != len(p2) {
+		return false
+	}
+
+	for _, p := range p1 {
+		if !hasProp(p2, p) {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Category options
+
+// ComputeCategory signifies the Compute Category
+const ComputeCategory = "Compute"
+
+// StorageCategory signifies the Storage Category
+const StorageCategory = "Storage"
+
+// NetworkCategory signifies the Network Category
+const NetworkCategory = "Network"
+
+// ManagementCategory signifies the Management Category
+const ManagementCategory = "Management"
+
+// SharedCategory signifies an unassigned Category
+const SharedCategory = "Shared"
+
+// OtherCategory signifies an unassigned Category
+const OtherCategory = "Other"
+
+// Provider options
+
+// AWSProvider describes the provider AWS
+const AWSProvider = "AWS"
+
+// GCPProvider describes the provider GCP
+const GCPProvider = "GCP"
+
+// AzureProvider describes the provider Azure
+const AzureProvider = "Azure"
+
+// NilProvider describes unknown provider
+const NilProvider = "-"
+
+// Service options
+
+const KubernetesService = "Kubernetes"
+
+// ParseProvider attempts to parse and return a known provider, given a string
+func ParseProvider(str string) string {
+	switch strings.ToLower(strings.TrimSpace(str)) {
+	case "aws", "eks", "amazon":
+		return AWSProvider
+	case "gcp", "gke", "google":
+		return GCPProvider
+	case "azure":
+		return AzureProvider
+	default:
+		return NilProvider
+	}
+}
+
+// AssetProperties describes all properties assigned to an Asset.
+type AssetProperties struct {
+	Category   string `json:"category,omitempty"`
+	Provider   string `json:"provider,omitempty"`
+	Account    string `json:"account,omitempty"`
+	Project    string `json:"project,omitempty"`
+	Service    string `json:"service,omitempty"`
+	Cluster    string `json:"cluster,omitempty"`
+	Name       string `json:"name,omitempty"`
+	ProviderID string `json:"providerID,omitempty"`
+}
+
+// Clone returns a cloned instance of the given AssetProperties
+func (ap *AssetProperties) Clone() *AssetProperties {
+	if ap == nil {
+		return nil
+	}
+
+	clone := &AssetProperties{}
+	clone.Category = ap.Category
+	clone.Provider = ap.Provider
+	clone.Account = ap.Account
+	clone.Project = ap.Project
+	clone.Service = ap.Service
+	clone.Cluster = ap.Cluster
+	clone.Name = ap.Name
+	clone.ProviderID = ap.ProviderID
+
+	return clone
+}
+
+// Equal returns true only if both AssetProperties are non-nil exact matches
+func (ap *AssetProperties) Equal(that *AssetProperties) bool {
+	if ap == nil || that == nil {
+		return false
+	}
+
+	if ap.Category != that.Category {
+		return false
+	}
+
+	if ap.Provider != that.Provider {
+		return false
+	}
+
+	if ap.Account != that.Account {
+		return false
+	}
+
+	if ap.Project != that.Project {
+		return false
+	}
+
+	if ap.Service != that.Service {
+		return false
+	}
+
+	if ap.Cluster != that.Cluster {
+		return false
+	}
+
+	if ap.Name != that.Name {
+		return false
+	}
+
+	if ap.ProviderID != that.ProviderID {
+		return false
+	}
+
+	return true
+}
+
+// Keys returns the list of string values used to key the Asset based on the
+// list of properties provided.
+func (ap *AssetProperties) Keys(props []AssetProperty) []string {
+	keys := []string{}
+
+	if ap == nil {
+		return keys
+	}
+
+	if (props == nil || hasProp(props, AssetCategoryProp)) && ap.Category != "" {
+		keys = append(keys, ap.Category)
+	}
+
+	if (props == nil || hasProp(props, AssetProviderProp)) && ap.Provider != "" {
+		keys = append(keys, ap.Provider)
+	}
+
+	if (props == nil || hasProp(props, AssetAccountProp)) && ap.Account != "" {
+		keys = append(keys, ap.Account)
+	}
+
+	if (props == nil || hasProp(props, AssetProjectProp)) && ap.Project != "" {
+		keys = append(keys, ap.Project)
+	}
+
+	if (props == nil || hasProp(props, AssetServiceProp)) && ap.Service != "" {
+		keys = append(keys, ap.Service)
+	}
+
+	if (props == nil || hasProp(props, AssetClusterProp)) && ap.Cluster != "" {
+		keys = append(keys, ap.Cluster)
+	}
+
+	if (props == nil || hasProp(props, AssetNameProp)) && ap.Name != "" {
+		keys = append(keys, ap.Name)
+	}
+
+	if (props == nil || hasProp(props, AssetProviderIDProp)) && ap.ProviderID != "" {
+		keys = append(keys, ap.ProviderID)
+	}
+
+	return keys
+}
+
+// Merge retains only the properties shared with the given AssetProperties
+func (ap *AssetProperties) Merge(that *AssetProperties) *AssetProperties {
+	if ap == nil || that == nil {
+		return nil
+	}
+
+	result := &AssetProperties{}
+
+	if ap.Category == that.Category {
+		result.Category = ap.Category
+	}
+
+	if ap.Provider == that.Provider {
+		result.Provider = ap.Provider
+	}
+
+	if ap.Account == that.Account {
+		result.Account = ap.Account
+	}
+
+	if ap.Project == that.Project {
+		result.Project = ap.Project
+	}
+
+	if ap.Service == that.Service {
+		result.Service = ap.Service
+	}
+
+	if ap.Cluster == that.Cluster {
+		result.Cluster = ap.Cluster
+	}
+
+	if ap.Name == that.Name {
+		result.Name = ap.Name
+	}
+
+	if ap.ProviderID == that.ProviderID {
+		result.ProviderID = ap.ProviderID
+	}
+
+	return result
+}
+
+// String represents the properties as a string
+func (ap *AssetProperties) String() string {
+	if ap == nil {
+		return "<nil>"
+	}
+
+	strs := []string{}
+
+	if ap.Category != "" {
+		strs = append(strs, "Category:"+ap.Category)
+	}
+
+	if ap.Provider != "" {
+		strs = append(strs, "Provider:"+ap.Provider)
+	}
+
+	if ap.Account != "" {
+		strs = append(strs, "Account:"+ap.Account)
+	}
+
+	if ap.Project != "" {
+		strs = append(strs, "Project:"+ap.Project)
+	}
+
+	if ap.Service != "" {
+		strs = append(strs, "Service:"+ap.Service)
+	}
+
+	if ap.Cluster != "" {
+		strs = append(strs, "Cluster:"+ap.Cluster)
+	}
+
+	if ap.Name != "" {
+		strs = append(strs, "Name:"+ap.Name)
+	}
+
+	if ap.ProviderID != "" {
+		strs = append(strs, "ProviderID:"+ap.ProviderID)
+	}
+
+	return strings.Join(strs, ",")
+}
+
+func hasProp(props []AssetProperty, prop AssetProperty) bool {
+	for _, p := range props {
+		if p == prop {
+			return true
+		}
+	}
+	return false
+}

+ 24 - 0
pkg/kubecost/bingen.go

@@ -0,0 +1,24 @@
+package kubecost
+
+// @bingen:generate:Any
+// @bingen:generate:Asset
+// @bingen:generate:AssetLabels
+// @bingen:generate:AssetProperties
+// @bingen:generate:AssetProperty
+// @bingen:generate:AssetSet
+// @bingen:generate:AssetSetRange
+// @bingen:generate:Breakdown
+// @bingen:generate:Cloud
+// @bingen:generate:ClusterManagement
+// @bingen:generate:Disk
+// @bingen:generate:LoadBalancer
+// @bingen:generate:Network
+// @bingen:generate:Node
+// @bingen:generate:SharedAsset
+// @bingen:generate:Window
+
+// @bingen:generate:Allocation
+// @bingen:generate:AllocationSet
+// @bingen:generate:AllocationSetRange
+
+//go:generate bingen -package=kubecost -version=4 -buffer=github.com/kubecost/cost-model/pkg/util

+ 2840 - 0
pkg/kubecost/kubecost_codecs.go

@@ -0,0 +1,2840 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+//                             DO NOT MODIFY
+//
+//                          ┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻
+//
+//
+//            This source file was automatically generated by bingen.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+package kubecost
+
+import (
+	"encoding"
+	"fmt"
+	"reflect"
+	"strings"
+	"time"
+
+	util "github.com/kubecost/cost-model/pkg/util"
+)
+
+const (
+	// GeneratorPackageName is the package the generator is targetting
+	GeneratorPackageName string = "kubecost"
+
+	// CodecVersion is the version passed into the generator
+	CodecVersion uint8 = 4
+)
+
+//--------------------------------------------------------------------------
+//  Type Map
+//--------------------------------------------------------------------------
+
+// Generated type map for resolving interface implementations to
+// to concrete types
+var typeMap map[string]reflect.Type = map[string]reflect.Type{
+	"Allocation":         reflect.TypeOf((*Allocation)(nil)).Elem(),
+	"AllocationSet":      reflect.TypeOf((*AllocationSet)(nil)).Elem(),
+	"AllocationSetRange": reflect.TypeOf((*AllocationSetRange)(nil)).Elem(),
+	"Any":                reflect.TypeOf((*Any)(nil)).Elem(),
+	"AssetProperties":    reflect.TypeOf((*AssetProperties)(nil)).Elem(),
+	"AssetSet":           reflect.TypeOf((*AssetSet)(nil)).Elem(),
+	"AssetSetRange":      reflect.TypeOf((*AssetSetRange)(nil)).Elem(),
+	"Breakdown":          reflect.TypeOf((*Breakdown)(nil)).Elem(),
+	"Cloud":              reflect.TypeOf((*Cloud)(nil)).Elem(),
+	"ClusterManagement":  reflect.TypeOf((*ClusterManagement)(nil)).Elem(),
+	"Disk":               reflect.TypeOf((*Disk)(nil)).Elem(),
+	"LoadBalancer":       reflect.TypeOf((*LoadBalancer)(nil)).Elem(),
+	"Network":            reflect.TypeOf((*Network)(nil)).Elem(),
+	"Node":               reflect.TypeOf((*Node)(nil)).Elem(),
+	"SharedAsset":        reflect.TypeOf((*SharedAsset)(nil)).Elem(),
+	"Window":             reflect.TypeOf((*Window)(nil)).Elem(),
+}
+
+//--------------------------------------------------------------------------
+//  Type Helpers
+//--------------------------------------------------------------------------
+
+// typeToString determines the basic properties of the type, the qualifier, package path, and
+// type name, and returns the qualified type
+func typeToString(f interface{}) string {
+	qual := ""
+	t := reflect.TypeOf(f)
+	if t.Kind() == reflect.Ptr {
+		t = t.Elem()
+		qual = "*"
+	}
+
+	return fmt.Sprintf("%s%s.%s", qual, t.PkgPath(), t.Name())
+}
+
+// resolveType uses the name of a type and returns the package, base type name, and whether
+// or not it's a pointer.
+func resolveType(t string) (pkg string, name string, isPtr bool) {
+	isPtr = t[:1] == "*"
+	if isPtr {
+		t = t[1:]
+	}
+
+	slashIndex := strings.LastIndex(t, "/")
+	if slashIndex >= 0 {
+		t = t[slashIndex+1:]
+	}
+	parts := strings.Split(t, ".")
+	if parts[0] == GeneratorPackageName {
+		parts[0] = ""
+	}
+
+	pkg = parts[0]
+	name = parts[1]
+	return
+}
+
+//--------------------------------------------------------------------------
+//  Allocation
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Allocation instance
+// into a byte array
+func (target *Allocation) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	buff.WriteString(target.Name) // write string
+	// --- [begin][write][reference](Properties) ---
+	a, errA := target.Properties.MarshalBinary()
+	if errA != nil {
+		return nil, errA
+	}
+	buff.WriteInt(len(a))
+	buff.WriteBytes(a)
+	// --- [end][write][reference](Properties) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.Start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.End.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	buff.WriteFloat64(target.Minutes) // write float64
+	// --- [begin][write][reference](time.Time) ---
+	d, errD := target.ActiveStart.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][reference](time.Time) ---
+
+	buff.WriteFloat64(target.CPUCoreHours)    // write float64
+	buff.WriteFloat64(target.CPUCost)         // write float64
+	buff.WriteFloat64(target.CPUEfficiency)   // write float64
+	buff.WriteFloat64(target.GPUHours)        // write float64
+	buff.WriteFloat64(target.GPUCost)         // write float64
+	buff.WriteFloat64(target.NetworkCost)     // write float64
+	buff.WriteFloat64(target.PVByteHours)     // write float64
+	buff.WriteFloat64(target.PVCost)          // write float64
+	buff.WriteFloat64(target.RAMByteHours)    // write float64
+	buff.WriteFloat64(target.RAMCost)         // write float64
+	buff.WriteFloat64(target.RAMEfficiency)   // write float64
+	buff.WriteFloat64(target.SharedCost)      // write float64
+	buff.WriteFloat64(target.TotalCost)       // write float64
+	buff.WriteFloat64(target.TotalEfficiency) // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Allocation type
+func (target *Allocation) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Allocation. Expected %d, got %d", CodecVersion, version)
+	}
+
+	a := buff.ReadString() // read string
+	target.Name = a
+
+	// --- [begin][read][reference](Properties) ---
+	b := &Properties{}
+	c := buff.ReadInt()    // byte array length
+	d := buff.ReadBytes(c) // byte array
+	errA := b.UnmarshalBinary(d)
+	if errA != nil {
+		return errA
+	}
+	target.Properties = *b
+	// --- [end][read][reference](Properties) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	e := &time.Time{}
+	f := buff.ReadInt()    // byte array length
+	g := buff.ReadBytes(f) // byte array
+	errB := e.UnmarshalBinary(g)
+	if errB != nil {
+		return errB
+	}
+	target.Start = *e
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	h := &time.Time{}
+	l := buff.ReadInt()    // byte array length
+	m := buff.ReadBytes(l) // byte array
+	errC := h.UnmarshalBinary(m)
+	if errC != nil {
+		return errC
+	}
+	target.End = *h
+	// --- [end][read][reference](time.Time) ---
+
+	n := buff.ReadFloat64() // read float64
+	target.Minutes = n
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errD := o.UnmarshalBinary(q)
+	if errD != nil {
+		return errD
+	}
+	target.ActiveStart = *o
+	// --- [end][read][reference](time.Time) ---
+
+	r := buff.ReadFloat64() // read float64
+	target.CPUCoreHours = r
+
+	s := buff.ReadFloat64() // read float64
+	target.CPUCost = s
+
+	t := buff.ReadFloat64() // read float64
+	target.CPUEfficiency = t
+
+	u := buff.ReadFloat64() // read float64
+	target.GPUHours = u
+
+	w := buff.ReadFloat64() // read float64
+	target.GPUCost = w
+
+	x := buff.ReadFloat64() // read float64
+	target.NetworkCost = x
+
+	y := buff.ReadFloat64() // read float64
+	target.PVByteHours = y
+
+	z := buff.ReadFloat64() // read float64
+	target.PVCost = z
+
+	aa := buff.ReadFloat64() // read float64
+	target.RAMByteHours = aa
+
+	bb := buff.ReadFloat64() // read float64
+	target.RAMCost = bb
+
+	cc := buff.ReadFloat64() // read float64
+	target.RAMEfficiency = cc
+
+	dd := buff.ReadFloat64() // read float64
+	target.SharedCost = dd
+
+	ee := buff.ReadFloat64() // read float64
+	target.TotalCost = ee
+
+	ff := buff.ReadFloat64() // read float64
+	target.TotalEfficiency = ff
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  AllocationSet
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this AllocationSet instance
+// into a byte array
+func (target *AllocationSet) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	if target.allocations == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]*Allocation) ---
+		buff.WriteInt(len(target.allocations)) // map length
+		for k, v := range target.allocations {
+			buff.WriteString(k) // write string
+			if v == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][struct](Allocation) ---
+				a, errA := v.MarshalBinary()
+				if errA != nil {
+					return nil, errA
+				}
+				buff.WriteInt(len(a))
+				buff.WriteBytes(a)
+				// --- [end][write][struct](Allocation) ---
+
+			}
+		}
+		// --- [end][write][map](map[string]*Allocation) ---
+
+	}
+	if target.idleKeys == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]bool) ---
+		buff.WriteInt(len(target.idleKeys)) // map length
+		for kk, vv := range target.idleKeys {
+			buff.WriteString(kk) // write string
+			buff.WriteBool(vv)   // write bool
+		}
+		// --- [end][write][map](map[string]bool) ---
+
+	}
+	// --- [begin][write][struct](Window) ---
+	b, errB := target.Window.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][struct](Window) ---
+
+	if target.Warnings == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]string) ---
+		buff.WriteInt(len(target.Warnings)) // array length
+		for i := 0; i < len(target.Warnings); i++ {
+			buff.WriteString(target.Warnings[i]) // write string
+		}
+		// --- [end][write][slice]([]string) ---
+
+	}
+	if target.Errors == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]string) ---
+		buff.WriteInt(len(target.Errors)) // array length
+		for j := 0; j < len(target.Errors); j++ {
+			buff.WriteString(target.Errors[j]) // write string
+		}
+		// --- [end][write][slice]([]string) ---
+
+	}
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the AllocationSet type
+func (target *AllocationSet) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling AllocationSet. Expected %d, got %d", CodecVersion, version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.allocations = nil
+	} else {
+		// --- [begin][read][map](map[string]*Allocation) ---
+		a := make(map[string]*Allocation)
+		b := buff.ReadInt() // map len
+		for i := 0; i < b; i++ {
+			var k string
+			c := buff.ReadString() // read string
+			k = c
+
+			var v *Allocation
+			if buff.ReadUInt8() == uint8(0) {
+				v = nil
+			} else {
+				// --- [begin][read][struct](Allocation) ---
+				d := &Allocation{}
+				e := buff.ReadInt()    // byte array length
+				f := buff.ReadBytes(e) // byte array
+				errA := d.UnmarshalBinary(f)
+				if errA != nil {
+					return errA
+				}
+				v = d
+				// --- [end][read][struct](Allocation) ---
+
+			}
+			a[k] = v
+		}
+		target.allocations = a
+		// --- [end][read][map](map[string]*Allocation) ---
+
+	}
+	if buff.ReadUInt8() == uint8(0) {
+		target.idleKeys = nil
+	} else {
+		// --- [begin][read][map](map[string]bool) ---
+		g := make(map[string]bool)
+		h := buff.ReadInt() // map len
+		for j := 0; j < h; j++ {
+			var kk string
+			l := buff.ReadString() // read string
+			kk = l
+
+			var vv bool
+			m := buff.ReadBool() // read bool
+			vv = m
+
+			g[kk] = vv
+		}
+		target.idleKeys = g
+		// --- [end][read][map](map[string]bool) ---
+
+	}
+	// --- [begin][read][struct](Window) ---
+	n := &Window{}
+	o := buff.ReadInt()    // byte array length
+	p := buff.ReadBytes(o) // byte array
+	errB := n.UnmarshalBinary(p)
+	if errB != nil {
+		return errB
+	}
+	target.Window = *n
+	// --- [end][read][struct](Window) ---
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.Warnings = nil
+	} else {
+		// --- [begin][read][slice]([]string) ---
+		r := buff.ReadInt() // array len
+		q := make([]string, r)
+		for ii := 0; ii < r; ii++ {
+			var s string
+			t := buff.ReadString() // read string
+			s = t
+
+			q[ii] = s
+		}
+		target.Warnings = q
+		// --- [end][read][slice]([]string) ---
+
+	}
+	if buff.ReadUInt8() == uint8(0) {
+		target.Errors = nil
+	} else {
+		// --- [begin][read][slice]([]string) ---
+		w := buff.ReadInt() // array len
+		u := make([]string, w)
+		for jj := 0; jj < w; jj++ {
+			var x string
+			y := buff.ReadString() // read string
+			x = y
+
+			u[jj] = x
+		}
+		target.Errors = u
+		// --- [end][read][slice]([]string) ---
+
+	}
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  AllocationSetRange
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this AllocationSetRange instance
+// into a byte array
+func (target *AllocationSetRange) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	if target.allocations == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]*AllocationSet) ---
+		buff.WriteInt(len(target.allocations)) // array length
+		for i := 0; i < len(target.allocations); i++ {
+			if target.allocations[i] == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][struct](AllocationSet) ---
+				a, errA := target.allocations[i].MarshalBinary()
+				if errA != nil {
+					return nil, errA
+				}
+				buff.WriteInt(len(a))
+				buff.WriteBytes(a)
+				// --- [end][write][struct](AllocationSet) ---
+
+			}
+		}
+		// --- [end][write][slice]([]*AllocationSet) ---
+
+	}
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the AllocationSetRange type
+func (target *AllocationSetRange) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling AllocationSetRange. Expected %d, got %d", CodecVersion, version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.allocations = nil
+	} else {
+		// --- [begin][read][slice]([]*AllocationSet) ---
+		b := buff.ReadInt() // array len
+		a := make([]*AllocationSet, b)
+		for i := 0; i < b; i++ {
+			var c *AllocationSet
+			if buff.ReadUInt8() == uint8(0) {
+				c = nil
+			} else {
+				// --- [begin][read][struct](AllocationSet) ---
+				d := &AllocationSet{}
+				e := buff.ReadInt()    // byte array length
+				f := buff.ReadBytes(e) // byte array
+				errA := d.UnmarshalBinary(f)
+				if errA != nil {
+					return errA
+				}
+				c = d
+				// --- [end][read][struct](AllocationSet) ---
+
+			}
+			a[i] = c
+		}
+		target.allocations = a
+		// --- [end][read][slice]([]*AllocationSet) ---
+
+	}
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Any
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Any instance
+// into a byte array
+func (target *Any) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment) // write float64
+	buff.WriteFloat64(target.Cost)       // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Any type
+func (target *Any) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Any. Expected %d, got %d", CodecVersion, version)
+	}
+
+	// --- [begin][read][alias](AssetLabels) ---
+	var a map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		a = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		b := make(map[string]string)
+		c := buff.ReadInt() // map len
+		for i := 0; i < c; i++ {
+			var k string
+			d := buff.ReadString() // read string
+			k = d
+
+			var v string
+			e := buff.ReadString() // read string
+			v = e
+
+			b[k] = v
+		}
+		a = b
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(a)
+	// --- [end][read][alias](AssetLabels) ---
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		f := &AssetProperties{}
+		g := buff.ReadInt()    // byte array length
+		h := buff.ReadBytes(g) // byte array
+		errA := f.UnmarshalBinary(h)
+		if errA != nil {
+			return errA
+		}
+		target.properties = f
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadFloat64() // read float64
+	target.Cost = w
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  AssetProperties
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this AssetProperties instance
+// into a byte array
+func (target *AssetProperties) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	buff.WriteString(target.Category)   // write string
+	buff.WriteString(target.Provider)   // write string
+	buff.WriteString(target.Account)    // write string
+	buff.WriteString(target.Project)    // write string
+	buff.WriteString(target.Service)    // write string
+	buff.WriteString(target.Cluster)    // write string
+	buff.WriteString(target.Name)       // write string
+	buff.WriteString(target.ProviderID) // write string
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the AssetProperties type
+func (target *AssetProperties) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling AssetProperties. Expected %d, got %d", CodecVersion, version)
+	}
+
+	a := buff.ReadString() // read string
+	target.Category = a
+
+	b := buff.ReadString() // read string
+	target.Provider = b
+
+	c := buff.ReadString() // read string
+	target.Account = c
+
+	d := buff.ReadString() // read string
+	target.Project = d
+
+	e := buff.ReadString() // read string
+	target.Service = e
+
+	f := buff.ReadString() // read string
+	target.Cluster = f
+
+	g := buff.ReadString() // read string
+	target.Name = g
+
+	h := buff.ReadString() // read string
+	target.ProviderID = h
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  AssetSet
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this AssetSet instance
+// into a byte array
+func (target *AssetSet) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	if target.assets == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]Asset) ---
+		buff.WriteInt(len(target.assets)) // map length
+		for k, v := range target.assets {
+			buff.WriteString(k) // write string
+			if v == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][interface](Asset) ---
+				a := reflect.ValueOf(v).Interface()
+				b, okA := a.(encoding.BinaryMarshaler)
+				if !okA {
+					return nil, fmt.Errorf("Type: %s does not implement encoding.BinaryMarshaler", typeToString(v))
+				}
+				c, errA := b.MarshalBinary()
+				if errA != nil {
+					return nil, errA
+				}
+				buff.WriteString(typeToString(v))
+				buff.WriteInt(len(c))
+				buff.WriteBytes(c)
+				// --- [end][write][interface](Asset) ---
+
+			}
+		}
+		// --- [end][write][map](map[string]Asset) ---
+
+	}
+	if target.props == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]AssetProperty) ---
+		buff.WriteInt(len(target.props)) // array length
+		for i := 0; i < len(target.props); i++ {
+			// --- [begin][write][alias](AssetProperty) ---
+			buff.WriteString(string(target.props[i])) // write string
+			// --- [end][write][alias](AssetProperty) ---
+
+		}
+		// --- [end][write][slice]([]AssetProperty) ---
+
+	}
+	// --- [begin][write][struct](Window) ---
+	d, errB := target.Window.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	if target.Warnings == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]string) ---
+		buff.WriteInt(len(target.Warnings)) // array length
+		for j := 0; j < len(target.Warnings); j++ {
+			buff.WriteString(target.Warnings[j]) // write string
+		}
+		// --- [end][write][slice]([]string) ---
+
+	}
+	if target.Errors == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]string) ---
+		buff.WriteInt(len(target.Errors)) // array length
+		for ii := 0; ii < len(target.Errors); ii++ {
+			buff.WriteString(target.Errors[ii]) // write string
+		}
+		// --- [end][write][slice]([]string) ---
+
+	}
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the AssetSet type
+func (target *AssetSet) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling AssetSet. Expected %d, got %d", CodecVersion, version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.assets = nil
+	} else {
+		// --- [begin][read][map](map[string]Asset) ---
+		a := make(map[string]Asset)
+		b := buff.ReadInt() // map len
+		for i := 0; i < b; i++ {
+			var k string
+			c := buff.ReadString() // read string
+			k = c
+
+			var v Asset
+			if buff.ReadUInt8() == uint8(0) {
+				v = nil
+			} else {
+				// --- [begin][read][interface](Asset) ---
+				d := buff.ReadString()
+				_, e, _ := resolveType(d)
+				if _, ok := typeMap[e]; !ok {
+					return fmt.Errorf("Unknown Type: %s", e)
+				}
+				f, okA := reflect.New(typeMap[e]).Interface().(interface{ UnmarshalBinary([]byte) error })
+				if !okA {
+					return fmt.Errorf("Type: %s does not implement UnmarshalBinary([]byte) error", e)
+				}
+				g := buff.ReadInt()    // byte array length
+				h := buff.ReadBytes(g) // byte array
+				errA := f.UnmarshalBinary(h)
+				if errA != nil {
+					return errA
+				}
+				v = f.(Asset)
+				// --- [end][read][interface](Asset) ---
+
+			}
+			a[k] = v
+		}
+		target.assets = a
+		// --- [end][read][map](map[string]Asset) ---
+
+	}
+	if buff.ReadUInt8() == uint8(0) {
+		target.props = nil
+	} else {
+		// --- [begin][read][slice]([]AssetProperty) ---
+		m := buff.ReadInt() // array len
+		l := make([]AssetProperty, m)
+		for j := 0; j < m; j++ {
+			// --- [begin][read][alias](AssetProperty) ---
+			var o string
+			p := buff.ReadString() // read string
+			o = p
+
+			n := AssetProperty(o)
+			// --- [end][read][alias](AssetProperty) ---
+
+			l[j] = n
+		}
+		target.props = l
+		// --- [end][read][slice]([]AssetProperty) ---
+
+	}
+	// --- [begin][read][struct](Window) ---
+	q := &Window{}
+	r := buff.ReadInt()    // byte array length
+	s := buff.ReadBytes(r) // byte array
+	errB := q.UnmarshalBinary(s)
+	if errB != nil {
+		return errB
+	}
+	target.Window = *q
+	// --- [end][read][struct](Window) ---
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.Warnings = nil
+	} else {
+		// --- [begin][read][slice]([]string) ---
+		u := buff.ReadInt() // array len
+		t := make([]string, u)
+		for ii := 0; ii < u; ii++ {
+			var w string
+			x := buff.ReadString() // read string
+			w = x
+
+			t[ii] = w
+		}
+		target.Warnings = t
+		// --- [end][read][slice]([]string) ---
+
+	}
+	if buff.ReadUInt8() == uint8(0) {
+		target.Errors = nil
+	} else {
+		// --- [begin][read][slice]([]string) ---
+		z := buff.ReadInt() // array len
+		y := make([]string, z)
+		for jj := 0; jj < z; jj++ {
+			var aa string
+			bb := buff.ReadString() // read string
+			aa = bb
+
+			y[jj] = aa
+		}
+		target.Errors = y
+		// --- [end][read][slice]([]string) ---
+
+	}
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  AssetSetRange
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this AssetSetRange instance
+// into a byte array
+func (target *AssetSetRange) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	if target.assets == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]*AssetSet) ---
+		buff.WriteInt(len(target.assets)) // array length
+		for i := 0; i < len(target.assets); i++ {
+			if target.assets[i] == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][struct](AssetSet) ---
+				a, errA := target.assets[i].MarshalBinary()
+				if errA != nil {
+					return nil, errA
+				}
+				buff.WriteInt(len(a))
+				buff.WriteBytes(a)
+				// --- [end][write][struct](AssetSet) ---
+
+			}
+		}
+		// --- [end][write][slice]([]*AssetSet) ---
+
+	}
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the AssetSetRange type
+func (target *AssetSetRange) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling AssetSetRange. Expected %d, got %d", CodecVersion, version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.assets = nil
+	} else {
+		// --- [begin][read][slice]([]*AssetSet) ---
+		b := buff.ReadInt() // array len
+		a := make([]*AssetSet, b)
+		for i := 0; i < b; i++ {
+			var c *AssetSet
+			if buff.ReadUInt8() == uint8(0) {
+				c = nil
+			} else {
+				// --- [begin][read][struct](AssetSet) ---
+				d := &AssetSet{}
+				e := buff.ReadInt()    // byte array length
+				f := buff.ReadBytes(e) // byte array
+				errA := d.UnmarshalBinary(f)
+				if errA != nil {
+					return errA
+				}
+				c = d
+				// --- [end][read][struct](AssetSet) ---
+
+			}
+			a[i] = c
+		}
+		target.assets = a
+		// --- [end][read][slice]([]*AssetSet) ---
+
+	}
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Breakdown
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Breakdown instance
+// into a byte array
+func (target *Breakdown) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	buff.WriteFloat64(target.Idle)   // write float64
+	buff.WriteFloat64(target.Other)  // write float64
+	buff.WriteFloat64(target.System) // write float64
+	buff.WriteFloat64(target.User)   // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Breakdown type
+func (target *Breakdown) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Breakdown. Expected %d, got %d", CodecVersion, version)
+	}
+
+	a := buff.ReadFloat64() // read float64
+	target.Idle = a
+
+	b := buff.ReadFloat64() // read float64
+	target.Other = b
+
+	c := buff.ReadFloat64() // read float64
+	target.System = c
+
+	d := buff.ReadFloat64() // read float64
+	target.User = d
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Cloud
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Cloud instance
+// into a byte array
+func (target *Cloud) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment) // write float64
+	buff.WriteFloat64(target.Cost)       // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Cloud type
+func (target *Cloud) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Cloud. Expected %d, got %d", CodecVersion, version)
+	}
+
+	// --- [begin][read][alias](AssetLabels) ---
+	var a map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		a = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		b := make(map[string]string)
+		c := buff.ReadInt() // map len
+		for i := 0; i < c; i++ {
+			var k string
+			d := buff.ReadString() // read string
+			k = d
+
+			var v string
+			e := buff.ReadString() // read string
+			v = e
+
+			b[k] = v
+		}
+		a = b
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(a)
+	// --- [end][read][alias](AssetLabels) ---
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		f := &AssetProperties{}
+		g := buff.ReadInt()    // byte array length
+		h := buff.ReadBytes(g) // byte array
+		errA := f.UnmarshalBinary(h)
+		if errA != nil {
+			return errA
+		}
+		target.properties = f
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadFloat64() // read float64
+	target.Cost = w
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  ClusterManagement
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this ClusterManagement instance
+// into a byte array
+func (target *ClusterManagement) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][struct](Window) ---
+	b, errB := target.window.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.Cost) // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the ClusterManagement type
+func (target *ClusterManagement) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling ClusterManagement. Expected %d, got %d", CodecVersion, version)
+	}
+
+	// --- [begin][read][alias](AssetLabels) ---
+	var a map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		a = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		b := make(map[string]string)
+		c := buff.ReadInt() // map len
+		for i := 0; i < c; i++ {
+			var k string
+			d := buff.ReadString() // read string
+			k = d
+
+			var v string
+			e := buff.ReadString() // read string
+			v = e
+
+			b[k] = v
+		}
+		a = b
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(a)
+	// --- [end][read][alias](AssetLabels) ---
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		f := &AssetProperties{}
+		g := buff.ReadInt()    // byte array length
+		h := buff.ReadBytes(g) // byte array
+		errA := f.UnmarshalBinary(h)
+		if errA != nil {
+			return errA
+		}
+		target.properties = f
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][struct](Window) ---
+	l := &Window{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.window = *l
+	// --- [end][read][struct](Window) ---
+
+	o := buff.ReadFloat64() // read float64
+	target.Cost = o
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Disk
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Disk instance
+// into a byte array
+func (target *Disk) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment) // write float64
+	buff.WriteFloat64(target.Cost)       // write float64
+	buff.WriteFloat64(target.ByteHours)  // write float64
+	buff.WriteFloat64(target.Local)      // write float64
+	if target.Breakdown == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](Breakdown) ---
+		e, errE := target.Breakdown.MarshalBinary()
+		if errE != nil {
+			return nil, errE
+		}
+		buff.WriteInt(len(e))
+		buff.WriteBytes(e)
+		// --- [end][write][struct](Breakdown) ---
+
+	}
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Disk type
+func (target *Disk) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Disk. Expected %d, got %d", CodecVersion, version)
+	}
+
+	// --- [begin][read][alias](AssetLabels) ---
+	var a map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		a = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		b := make(map[string]string)
+		c := buff.ReadInt() // map len
+		for i := 0; i < c; i++ {
+			var k string
+			d := buff.ReadString() // read string
+			k = d
+
+			var v string
+			e := buff.ReadString() // read string
+			v = e
+
+			b[k] = v
+		}
+		a = b
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(a)
+	// --- [end][read][alias](AssetLabels) ---
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		f := &AssetProperties{}
+		g := buff.ReadInt()    // byte array length
+		h := buff.ReadBytes(g) // byte array
+		errA := f.UnmarshalBinary(h)
+		if errA != nil {
+			return errA
+		}
+		target.properties = f
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadFloat64() // read float64
+	target.Cost = w
+
+	x := buff.ReadFloat64() // read float64
+	target.ByteHours = x
+
+	y := buff.ReadFloat64() // read float64
+	target.Local = y
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.Breakdown = nil
+	} else {
+		// --- [begin][read][struct](Breakdown) ---
+		z := &Breakdown{}
+		aa := buff.ReadInt()     // byte array length
+		bb := buff.ReadBytes(aa) // byte array
+		errE := z.UnmarshalBinary(bb)
+		if errE != nil {
+			return errE
+		}
+		target.Breakdown = z
+		// --- [end][read][struct](Breakdown) ---
+
+	}
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  LoadBalancer
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this LoadBalancer instance
+// into a byte array
+func (target *LoadBalancer) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment) // write float64
+	buff.WriteFloat64(target.Cost)       // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the LoadBalancer type
+func (target *LoadBalancer) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling LoadBalancer. Expected %d, got %d", CodecVersion, version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		a := &AssetProperties{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.properties = a
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][alias](AssetLabels) ---
+	var d map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		d = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		e := make(map[string]string)
+		f := buff.ReadInt() // map len
+		for i := 0; i < f; i++ {
+			var k string
+			g := buff.ReadString() // read string
+			k = g
+
+			var v string
+			h := buff.ReadString() // read string
+			v = h
+
+			e[k] = v
+		}
+		d = e
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(d)
+	// --- [end][read][alias](AssetLabels) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadFloat64() // read float64
+	target.Cost = w
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Network
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Network instance
+// into a byte array
+func (target *Network) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment) // write float64
+	buff.WriteFloat64(target.Cost)       // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Network type
+func (target *Network) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Network. Expected %d, got %d", CodecVersion, version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		a := &AssetProperties{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.properties = a
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][alias](AssetLabels) ---
+	var d map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		d = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		e := make(map[string]string)
+		f := buff.ReadInt() // map len
+		for i := 0; i < f; i++ {
+			var k string
+			g := buff.ReadString() // read string
+			k = g
+
+			var v string
+			h := buff.ReadString() // read string
+			v = h
+
+			e[k] = v
+		}
+		d = e
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(d)
+	// --- [end][read][alias](AssetLabels) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadFloat64() // read float64
+	target.Cost = w
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Node
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Node instance
+// into a byte array
+func (target *Node) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment)   // write float64
+	buff.WriteString(target.NodeType)      // write string
+	buff.WriteFloat64(target.CPUCoreHours) // write float64
+	buff.WriteFloat64(target.RAMByteHours) // write float64
+	if target.CPUBreakdown == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](Breakdown) ---
+		e, errE := target.CPUBreakdown.MarshalBinary()
+		if errE != nil {
+			return nil, errE
+		}
+		buff.WriteInt(len(e))
+		buff.WriteBytes(e)
+		// --- [end][write][struct](Breakdown) ---
+
+	}
+	if target.RAMBreakdown == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](Breakdown) ---
+		f, errF := target.RAMBreakdown.MarshalBinary()
+		if errF != nil {
+			return nil, errF
+		}
+		buff.WriteInt(len(f))
+		buff.WriteBytes(f)
+		// --- [end][write][struct](Breakdown) ---
+
+	}
+	buff.WriteFloat64(target.CPUCost)     // write float64
+	buff.WriteFloat64(target.GPUCost)     // write float64
+	buff.WriteFloat64(target.RAMCost)     // write float64
+	buff.WriteFloat64(target.Discount)    // write float64
+	buff.WriteFloat64(target.Preemptible) // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Node type
+func (target *Node) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Node. Expected %d, got %d", CodecVersion, version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		a := &AssetProperties{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.properties = a
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][alias](AssetLabels) ---
+	var d map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		d = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		e := make(map[string]string)
+		f := buff.ReadInt() // map len
+		for i := 0; i < f; i++ {
+			var k string
+			g := buff.ReadString() // read string
+			k = g
+
+			var v string
+			h := buff.ReadString() // read string
+			v = h
+
+			e[k] = v
+		}
+		d = e
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(d)
+	// --- [end][read][alias](AssetLabels) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadString() // read string
+	target.NodeType = w
+
+	x := buff.ReadFloat64() // read float64
+	target.CPUCoreHours = x
+
+	y := buff.ReadFloat64() // read float64
+	target.RAMByteHours = y
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.CPUBreakdown = nil
+	} else {
+		// --- [begin][read][struct](Breakdown) ---
+		z := &Breakdown{}
+		aa := buff.ReadInt()     // byte array length
+		bb := buff.ReadBytes(aa) // byte array
+		errE := z.UnmarshalBinary(bb)
+		if errE != nil {
+			return errE
+		}
+		target.CPUBreakdown = z
+		// --- [end][read][struct](Breakdown) ---
+
+	}
+	if buff.ReadUInt8() == uint8(0) {
+		target.RAMBreakdown = nil
+	} else {
+		// --- [begin][read][struct](Breakdown) ---
+		cc := &Breakdown{}
+		dd := buff.ReadInt()     // byte array length
+		ee := buff.ReadBytes(dd) // byte array
+		errF := cc.UnmarshalBinary(ee)
+		if errF != nil {
+			return errF
+		}
+		target.RAMBreakdown = cc
+		// --- [end][read][struct](Breakdown) ---
+
+	}
+	ff := buff.ReadFloat64() // read float64
+	target.CPUCost = ff
+
+	gg := buff.ReadFloat64() // read float64
+	target.GPUCost = gg
+
+	hh := buff.ReadFloat64() // read float64
+	target.RAMCost = hh
+
+	ll := buff.ReadFloat64() // read float64
+	target.Discount = ll
+
+	mm := buff.ReadFloat64() // read float64
+	target.Preemptible = mm
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  SharedAsset
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this SharedAsset instance
+// into a byte array
+func (target *SharedAsset) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	// --- [begin][write][struct](Window) ---
+	b, errB := target.window.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.Cost) // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the SharedAsset type
+func (target *SharedAsset) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling SharedAsset. Expected %d, got %d", CodecVersion, version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		a := &AssetProperties{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.properties = a
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][alias](AssetLabels) ---
+	var d map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		d = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		e := make(map[string]string)
+		f := buff.ReadInt() // map len
+		for i := 0; i < f; i++ {
+			var k string
+			g := buff.ReadString() // read string
+			k = g
+
+			var v string
+			h := buff.ReadString() // read string
+			v = h
+
+			e[k] = v
+		}
+		d = e
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(d)
+	// --- [end][read][alias](AssetLabels) ---
+
+	// --- [begin][read][struct](Window) ---
+	l := &Window{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.window = *l
+	// --- [end][read][struct](Window) ---
+
+	o := buff.ReadFloat64() // read float64
+	target.Cost = o
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Window
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Window instance
+// into a byte array
+func (target *Window) MarshalBinary() (data []byte, err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	if target.start == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][reference](time.Time) ---
+		a, errA := target.start.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][reference](time.Time) ---
+
+	}
+	if target.end == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][reference](time.Time) ---
+		b, errB := target.end.MarshalBinary()
+		if errB != nil {
+			return nil, errB
+		}
+		buff.WriteInt(len(b))
+		buff.WriteBytes(b)
+		// --- [end][write][reference](time.Time) ---
+
+	}
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Window type
+func (target *Window) UnmarshalBinary(data []byte) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != CodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Window. Expected %d, got %d", CodecVersion, version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.start = nil
+	} else {
+		// --- [begin][read][reference](time.Time) ---
+		a := &time.Time{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.start = a
+		// --- [end][read][reference](time.Time) ---
+
+	}
+	if buff.ReadUInt8() == uint8(0) {
+		target.end = nil
+	} else {
+		// --- [begin][read][reference](time.Time) ---
+		d := &time.Time{}
+		e := buff.ReadInt()    // byte array length
+		f := buff.ReadBytes(e) // byte array
+		errB := d.UnmarshalBinary(f)
+		if errB != nil {
+			return errB
+		}
+		target.end = d
+		// --- [end][read][reference](time.Time) ---
+
+	}
+	return nil
+}

+ 638 - 0
pkg/kubecost/kubecost_codecs_test.go

@@ -0,0 +1,638 @@
+package kubecost
+
+import (
+	"testing"
+	"time"
+)
+
+func TestAllocation_BinaryEncoding(t *testing.T) {
+	// TODO niko/etl
+}
+
+func TestAllocationSet_BinaryEncoding(t *testing.T) {
+	// TODO niko/etl
+}
+
+func BenchmarkAllocationSetRange_BinaryEncoding(b *testing.B) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+	startD2 := startYesterday
+	startD1 := startD2.Add(-day)
+	startD0 := startD1.Add(-day)
+
+	var asr0, asr1 *AllocationSetRange
+	var bs []byte
+	var err error
+
+	asr0 = NewAllocationSetRange(
+		generateAllocationSet(startD0),
+		generateAllocationSet(startD1),
+		generateAllocationSet(startD2),
+	)
+
+	for it := 0; it < b.N; it++ {
+		bs, err = asr0.MarshalBinary()
+		if err != nil {
+			b.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+			return
+		}
+
+		asr1 = &AllocationSetRange{}
+		err = asr1.UnmarshalBinary(bs)
+		if err != nil {
+			b.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+			return
+		}
+
+		if asr0.Length() != asr1.Length() {
+			b.Fatalf("AllocationSetRange.Binary: expected %d; found %d", asr0.Length(), asr1.Length())
+		}
+		if !asr0.Window().Equal(asr1.Window()) {
+			b.Fatalf("AllocationSetRange.Binary: expected %s; found %s", asr0.Window(), asr1.Window())
+		}
+
+		asr0.Each(func(i int, as0 *AllocationSet) {
+			as1, err := asr1.Get(i)
+			if err != nil {
+				b.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+			}
+
+			if as0.Length() != as1.Length() {
+				b.Fatalf("AllocationSetRange.Binary: expected %d; found %d", as0.Length(), as1.Length())
+			}
+			if !as0.Window.Equal(as1.Window) {
+				b.Fatalf("AllocationSetRange.Binary: expected %s; found %s", as0.Window, as1.Window)
+			}
+
+			as0.Each(func(k string, a0 *Allocation) {
+				a1 := as1.Get(k)
+				if a1 == nil {
+					b.Fatalf("AllocationSetRange.Binary: missing Allocation: %s", a0)
+				}
+
+				if !a0.Equal(a1) {
+					b.Fatalf("AllocationSetRange.Binary: unequal Allocations \"%s\": expected %s; found %s", k, a0, a1)
+				}
+			})
+		})
+	}
+}
+
+func TestAllocationSetRange_BinaryEncoding(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+	startD2 := startYesterday
+	startD1 := startD2.Add(-day)
+	startD0 := startD1.Add(-day)
+
+	var asr0, asr1 *AllocationSetRange
+	var bs []byte
+	var err error
+
+	asr0 = NewAllocationSetRange(
+		generateAllocationSet(startD0),
+		generateAllocationSet(startD1),
+		generateAllocationSet(startD2),
+	)
+
+	bs, err = asr0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+		return
+	}
+
+	asr1 = &AllocationSetRange{}
+	err = asr1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+		return
+	}
+
+	if asr0.Length() != asr1.Length() {
+		t.Fatalf("AllocationSetRange.Binary: expected %d; found %d", asr0.Length(), asr1.Length())
+	}
+	if !asr0.Window().Equal(asr1.Window()) {
+		t.Fatalf("AllocationSetRange.Binary: expected %s; found %s", asr0.Window(), asr1.Window())
+	}
+
+	asr0.Each(func(i int, as0 *AllocationSet) {
+		as1, err := asr1.Get(i)
+		if err != nil {
+			t.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+		}
+
+		if as0.Length() != as1.Length() {
+			t.Fatalf("AllocationSetRange.Binary: expected %d; found %d", as0.Length(), as1.Length())
+		}
+		if !as0.Window.Equal(as1.Window) {
+			t.Fatalf("AllocationSetRange.Binary: expected %s; found %s", as0.Window, as1.Window)
+		}
+
+		as0.Each(func(k string, a0 *Allocation) {
+			a1 := as1.Get(k)
+			if a1 == nil {
+				t.Fatalf("AllocationSetRange.Binary: missing Allocation: %s", a0)
+			}
+
+			if !a0.Equal(a1) {
+				t.Fatalf("AllocationSetRange.Binary: unequal Allocations \"%s\": expected %s; found %s", k, a0, a1)
+			}
+		})
+	})
+}
+
+func TestAny_BinaryEncoding(t *testing.T) {
+	start := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	end := start.Add(24 * time.Hour)
+	window := NewWindow(&start, &end)
+
+	var a0, a1 *Any
+	var bs []byte
+	var err error
+
+	a0 = NewAsset(*window.start, *window.end, window)
+	a0.SetProperties(&AssetProperties{
+		Name:       "any1",
+		Cluster:    "cluster1",
+		ProviderID: "世界",
+	})
+	a0.Cost = 123.45
+	a0.SetAdjustment(1.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Any.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &Any{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Any.Binary: unexpected error: %s", err)
+	}
+
+	if a1.Properties().Name != a0.Properties().Name {
+		t.Fatalf("Any.Binary: expected %s, found %s", a0.Properties().Name, a1.Properties().Name)
+	}
+	if a1.Properties().Cluster != a0.Properties().Cluster {
+		t.Fatalf("Any.Binary: expected %s, found %s", a0.Properties().Cluster, a1.Properties().Cluster)
+	}
+	if a1.Properties().ProviderID != a0.Properties().ProviderID {
+		t.Fatalf("Any.Binary: expected %s, found %s", a0.Properties().ProviderID, a1.Properties().ProviderID)
+	}
+	if a1.Adjustment() != a0.Adjustment() {
+		t.Fatalf("Any.Binary: expected %f, found %f", a0.Adjustment(), a1.Adjustment())
+	}
+	if a1.TotalCost() != a0.TotalCost() {
+		t.Fatalf("Any.Binary: expected %f, found %f", a0.TotalCost(), a1.TotalCost())
+	}
+	if !a1.Window().Equal(a0.Window()) {
+		t.Fatalf("Any.Binary: expected %s, found %s", a0.Window(), a1.Window())
+	}
+}
+
+func TestAsset_BinaryEncoding(t *testing.T) {
+	// TODO niko/etl
+}
+
+func TestAssetSet_BinaryEncoding(t *testing.T) {
+	// TODO niko/etl
+}
+
+func TestAssetSetRange_BinaryEncoding(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+	startD2 := startYesterday
+	startD1 := startD2.Add(-day)
+	startD0 := startD1.Add(-day)
+
+	var asr0, asr1 *AssetSetRange
+	var bs []byte
+	var err error
+
+	asr0 = NewAssetSetRange(
+		generateAssetSet(startD0),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+
+	bs, err = asr0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("AssetSetRange.Binary: unexpected error: %s", err)
+		return
+	}
+
+	asr1 = &AssetSetRange{}
+	err = asr1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("AssetSetRange.Binary: unexpected error: %s", err)
+		return
+	}
+
+	if asr0.Length() != asr1.Length() {
+		t.Fatalf("AssetSetRange.Binary: expected %d; found %d", asr0.Length(), asr1.Length())
+	}
+	if !asr0.Window().Equal(asr1.Window()) {
+		t.Fatalf("AssetSetRange.Binary: expected %s; found %s", asr0.Window(), asr1.Window())
+	}
+
+	asr0.Each(func(i int, as0 *AssetSet) {
+		as1, err := asr1.Get(i)
+		if err != nil {
+			t.Fatalf("AssetSetRange.Binary: unexpected error: %s", err)
+		}
+
+		if as0.Length() != as1.Length() {
+			t.Fatalf("AssetSetRange.Binary: expected %d; found %d", as0.Length(), as1.Length())
+		}
+		if !as0.Window.Equal(as1.Window) {
+			t.Fatalf("AssetSetRange.Binary: expected %s; found %s", as0.Window, as1.Window)
+		}
+
+		as0.Each(func(k string, a0 Asset) {
+			a1, ok := as1.Get(k)
+			if !ok {
+				t.Fatalf("AssetSetRange.Binary: missing Asset: %s", a0)
+			}
+
+			if !a0.Equal(a1) {
+				t.Fatalf("AssetSetRange.Binary: unequal Assets \"%s\": expected %s; found %s", k, a0, a1)
+			}
+		})
+	})
+}
+
+func TestBreakdown_BinaryEncoding(t *testing.T) {
+	var b0, b1 *Breakdown
+	var bs []byte
+	var err error
+
+	b0 = &Breakdown{
+		Idle:   0.75,
+		Other:  0.1,
+		System: 0.0,
+		User:   0.15,
+	}
+
+	bs, err = b0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Breakdown.Binary: unexpected error: %s", err)
+	}
+
+	b1 = &Breakdown{}
+	err = b1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Breakdown.Binary: unexpected error: %s", err)
+	}
+
+	if b1.Idle != b0.Idle {
+		t.Fatalf("Breakdown.Binary: expected %f, found %f", b0.Idle, b1.Idle)
+	}
+	if b1.Other != b0.Other {
+		t.Fatalf("Breakdown.Binary: expected %f, found %f", b0.Other, b1.Other)
+	}
+	if b1.System != b0.System {
+		t.Fatalf("Breakdown.Binary: expected %f, found %f", b0.System, b1.System)
+	}
+	if b1.User != b0.User {
+		t.Fatalf("Breakdown.Binary: expected %f, found %f", b0.User, b1.User)
+	}
+}
+
+func TestCloudAny_BinaryEncoding(t *testing.T) {
+	ws := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	we := ws.Add(24 * time.Hour)
+	window := NewWindow(&ws, &we)
+
+	var a0, a1 *Cloud
+	var bs []byte
+	var err error
+
+	a0 = NewCloud(ComputeCategory, "providerid1", *window.start, *window.end, window)
+	a0.Cost = 6.09
+	a0.SetAdjustment(-1.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("CloudAny.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &Cloud{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("CloudAny.Binary: unexpected error: %s", err)
+	}
+
+	if !a0.Equal(a1) {
+		t.Fatalf("CloudAny.Binary: expected %v, found %v", a0, a1)
+	}
+}
+
+func TestClusterManagement_BinaryEncoding(t *testing.T) {
+	ws := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	we := ws.Add(24 * time.Hour)
+	window := NewWindow(&ws, &we)
+
+	var a0, a1 *ClusterManagement
+	var bs []byte
+	var err error
+
+	a0 = NewClusterManagement("aws", "cluster1", window)
+	a0.Cost = 4.003
+	a0.SetAdjustment(-3.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("ClusterManagement.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &ClusterManagement{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("ClusterManagement.Binary: unexpected error: %s", err)
+	}
+
+	if !a0.Equal(a1) {
+		t.Fatalf("ClusterManagement.Binary: expected %v, found %v", a0, a1)
+	}
+}
+
+func TestDisk_BinaryEncoding(t *testing.T) {
+	ws := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	we := ws.Add(24 * time.Hour)
+	window := NewWindow(&ws, &we)
+	hours := window.Duration().Hours()
+
+	start := time.Date(2020, time.September, 16, 3, 0, 0, 0, time.UTC)
+	end := time.Date(2020, time.September, 16, 15, 12, 0, 0, time.UTC)
+
+	var a0, a1 *Disk
+	var bs []byte
+	var err error
+
+	a0 = NewDisk("any1", "cluster1", "世界", start, end, window)
+	a0.ByteHours = 100 * 1024 * 1024 * 1024 * hours
+	a0.Cost = 4.003
+	a0.Local = 0.4
+	a0.Breakdown = &Breakdown{
+		Idle:   0.9,
+		Other:  0.05,
+		System: 0.05,
+		User:   0.0,
+	}
+	a0.SetAdjustment(-3.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Disk.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &Disk{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Disk.Binary: unexpected error: %s", err)
+	}
+
+	if !a0.Equal(a1) {
+		t.Fatalf("Disk.Binary: expected %v, found %v", a0, a1)
+	}
+}
+
+func TestNode_BinaryEncoding(t *testing.T) {
+	ws := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	we := ws.Add(24 * time.Hour)
+	window := NewWindow(&ws, &we)
+	hours := window.Duration().Hours()
+
+	start := time.Date(2020, time.September, 16, 3, 0, 0, 0, time.UTC)
+	end := time.Date(2020, time.September, 16, 15, 12, 0, 0, time.UTC)
+
+	var a0, a1 *Node
+	var bs []byte
+	var err error
+
+	a0 = NewNode("any1", "cluster1", "世界", start, end, window)
+	a0.NodeType = "n2-standard"
+	a0.Preemptible = 1.0
+	a0.CPUCoreHours = 2.0 * hours
+	a0.RAMByteHours = 12.0 * gb * hours
+	a0.CPUCost = 1.50
+	a0.GPUCost = 30.44
+	a0.RAMCost = 15.0
+	a0.Discount = 0.9
+	a0.CPUBreakdown = &Breakdown{
+		Idle:   0.9,
+		Other:  0.05,
+		System: 0.05,
+		User:   0.0,
+	}
+	a0.RAMBreakdown = &Breakdown{
+		Idle:   0.4,
+		Other:  0.05,
+		System: 0.05,
+		User:   0.5,
+	}
+	a0.SetAdjustment(1.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Node.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &Node{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Node.Binary: unexpected error: %s", err)
+	}
+
+	if !a0.Equal(a1) {
+		t.Fatalf("Node.Binary: expected %v, found %v", a0, a1)
+	}
+}
+
+func TestProperties_BinaryEncoding(t *testing.T) {
+	var p0, p1 *Properties
+	var bs []byte
+	var err error
+
+	// empty properties
+	p0 = &Properties{}
+	bs, err = p0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	p1 = &Properties{}
+	err = p1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	if !p0.Equal(p1) {
+		t.Fatalf("Properties.Binary: expected %s; found %s", p0, p1)
+	}
+
+	// complete properties
+	p0 = &Properties{}
+	p0.SetCluster("cluster1")
+	p0.SetContainer("container-abc-1")
+	p0.SetController("daemonset-abc")
+	p0.SetControllerKind("daemonset")
+	p0.SetNamespace("namespace1")
+	p0.SetNode("node1")
+	p0.SetPod("daemonset-abc-123")
+	p0.SetLabels(map[string]string{
+		"app":  "cost-analyzer",
+		"tier": "frontend",
+	})
+	p0.SetServices([]string{"kubecost-frontend"})
+	bs, err = p0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	p1 = &Properties{}
+	err = p1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	if !p0.Equal(p1) {
+		t.Fatalf("Properties.Binary: expected %s; found %s", p0, p1)
+	}
+
+	// incomplete properties
+	p0 = &Properties{}
+	p0.SetCluster("cluster1")
+	p0.SetController("daemonset-abc")
+	p0.SetControllerKind("daemonset")
+	p0.SetNamespace("namespace1")
+	p0.SetServices([]string{})
+	bs, err = p0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	p1 = &Properties{}
+	err = p1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	if !p0.Equal(p1) {
+		t.Fatalf("Properties.Binary: expected %s; found %s", p0, p1)
+	}
+}
+
+func TestShared_BinaryEncoding(t *testing.T) {
+	ws := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	we := ws.Add(24 * time.Hour)
+	window := NewWindow(&ws, &we)
+
+	var a0, a1 *SharedAsset
+	var bs []byte
+	var err error
+
+	a0 = NewSharedAsset("any1", window)
+	a0.Cost = 4.04
+	a0.SetAdjustment(1.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("SharedAsset.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &SharedAsset{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("SharedAsset.Binary: unexpected error: %s", err)
+	}
+
+	if !a0.Equal(a1) {
+		t.Fatalf("SharedAsset.Binary: expected %v, found %v", a0, a1)
+	}
+}
+
+func TestWindow_BinaryEncoding(t *testing.T) {
+	var w0, w1 Window
+	var bs []byte
+	var err error
+
+	// Window (nil, nil)
+	w0 = NewWindow(nil, nil)
+	bs, err = w0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	err = w1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	if w1.Start() != w0.Start() {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.Start(), w1.Start())
+	}
+	if w1.End() != w0.End() {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.End(), w1.End())
+	}
+
+	// Window (time, nil)
+	ts := time.Now()
+	w0 = NewWindow(&ts, nil)
+	bs, err = w0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	err = w1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	if !w1.Start().Equal(*w0.Start()) {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.Start(), w1.Start())
+	}
+	if w1.End() != w0.End() {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.End(), w1.End())
+	}
+
+	// Window (nil, time)
+	te := time.Now()
+	w0 = NewWindow(nil, &te)
+	bs, err = w0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	err = w1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	if w1.Start() != w0.Start() {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.Start(), w1.Start())
+	}
+	if !w1.End().Equal(*w0.End()) {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.End(), w1.End())
+	}
+
+	// Window (time, time)
+	ts, te = time.Now(), time.Now()
+	w0 = NewWindow(&ts, &te)
+	bs, err = w0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	err = w1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	if !w1.Start().Equal(*w0.Start()) {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.Start(), w1.Start())
+	}
+	if !w1.End().Equal(*w0.End()) {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.End(), w1.End())
+	}
+}

+ 744 - 0
pkg/kubecost/properties.go

@@ -0,0 +1,744 @@
+package kubecost
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	util "github.com/kubecost/cost-model/pkg/util"
+)
+
+type Property string
+
+const (
+	NilProp            Property = ""
+	ClusterProp        Property = "cluster"
+	NodeProp           Property = "node"
+	ContainerProp      Property = "container"
+	ControllerProp     Property = "controller"
+	ControllerKindProp Property = "controllerKind"
+	LabelProp          Property = "label"
+	AnnotationProp     Property = "annotation"
+	NamespaceProp      Property = "namespace"
+	PodProp            Property = "pod"
+	ServiceProp        Property = "service"
+)
+
+var availableProperties []Property = []Property{
+	NilProp,
+	ClusterProp,
+	NodeProp,
+	ContainerProp,
+	ControllerProp,
+	ControllerKindProp,
+	LabelProp,
+	AnnotationProp,
+	NamespaceProp,
+	PodProp,
+	ServiceProp,
+}
+
+func ParseProperty(prop string) Property {
+	for _, property := range availableProperties {
+		if strings.ToLower(string(property)) == strings.ToLower(prop) {
+			return property
+		}
+	}
+	return NilProp
+}
+
+func (p Property) String() string {
+	return string(p)
+}
+
+type PropertyValue struct {
+	Property Property
+	Value    interface{}
+}
+
+// Properties describes a set of Kubernetes objects.
+type Properties map[Property]interface{}
+
+// TODO niko/etl make sure Services deep copy works correctly
+func (p *Properties) Clone() Properties {
+	if p == nil {
+		return nil
+	}
+
+	clone := Properties{}
+	for k, v := range *p {
+		clone[k] = v
+	}
+	return clone
+}
+
+func (p *Properties) Equal(that *Properties) bool {
+	if p == nil || that == nil {
+		return false
+	}
+
+	if p.Length() != that.Length() {
+		return false
+	}
+
+	pCluster, _ := p.GetCluster()
+	thatCluster, _ := that.GetCluster()
+	if pCluster != thatCluster {
+		return false
+	}
+
+	pNode, _ := p.GetNode()
+	thatNode, _ := that.GetNode()
+	if pNode != thatNode {
+		return false
+	}
+
+	pContainer, _ := p.GetContainer()
+	thatContainer, _ := that.GetContainer()
+	if pContainer != thatContainer {
+		return false
+	}
+
+	pController, _ := p.GetController()
+	thatController, _ := that.GetController()
+	if pController != thatController {
+		return false
+	}
+
+	pControllerKind, _ := p.GetControllerKind()
+	thatControllerKind, _ := that.GetControllerKind()
+	if pControllerKind != thatControllerKind {
+		return false
+	}
+
+	pNamespace, _ := p.GetNamespace()
+	thatNamespace, _ := that.GetNamespace()
+	if pNamespace != thatNamespace {
+		return false
+	}
+
+	pPod, _ := p.GetPod()
+	thatPod, _ := that.GetPod()
+	if pPod != thatPod {
+		return false
+	}
+
+	pLabels, _ := p.GetLabels()
+	thatLabels, _ := that.GetLabels()
+	if len(pLabels) != len(thatLabels) {
+		for k, pv := range pLabels {
+			tv, ok := thatLabels[k]
+			if !ok || tv != pv {
+				return false
+			}
+		}
+		return false
+	}
+
+	pAnnotations, _ := p.GetAnnotations()
+	thatAnnotations, _ := that.GetAnnotations()
+	if len(pAnnotations) != len(thatAnnotations) {
+		for k, pv := range pAnnotations {
+			tv, ok := thatAnnotations[k]
+			if !ok || tv != pv {
+				return false
+			}
+		}
+		return false
+	}
+
+	pServices, _ := p.GetServices()
+	thatServices, _ := that.GetServices()
+	if len(pServices) != len(thatServices) {
+		sort.Strings(pServices)
+		sort.Strings(thatServices)
+		for i, pv := range pServices {
+			tv := thatServices[i]
+			if tv != pv {
+				return false
+			}
+		}
+		return false
+	}
+
+	return true
+}
+
+func (p *Properties) Intersection(that Properties) Properties {
+	spec := &Properties{}
+
+	sCluster, sErr := p.GetCluster()
+	tCluster, tErr := that.GetCluster()
+	if sErr == nil && tErr == nil && sCluster == tCluster {
+		spec.SetCluster(sCluster)
+	}
+
+	sNode, sErr := p.GetNode()
+	tNode, tErr := that.GetNode()
+	if sErr == nil && tErr == nil && sNode == tNode {
+		spec.SetNode(sNode)
+	}
+
+	sContainer, sErr := p.GetContainer()
+	tContainer, tErr := that.GetContainer()
+	if sErr == nil && tErr == nil && sContainer == tContainer {
+		spec.SetContainer(sContainer)
+	}
+
+	sController, sErr := p.GetController()
+	tController, tErr := that.GetController()
+	if sErr == nil && tErr == nil && sController == tController {
+		spec.SetController(sController)
+	}
+
+	sControllerKind, sErr := p.GetControllerKind()
+	tControllerKind, tErr := that.GetControllerKind()
+	if sErr == nil && tErr == nil && sControllerKind == tControllerKind {
+		spec.SetControllerKind(sControllerKind)
+	}
+
+	sNamespace, sErr := p.GetNamespace()
+	tNamespace, tErr := that.GetNamespace()
+	if sErr == nil && tErr == nil && sNamespace == tNamespace {
+		spec.SetNamespace(sNamespace)
+	}
+
+	sPod, sErr := p.GetPod()
+	tPod, tErr := that.GetPod()
+	if sErr == nil && tErr == nil && sPod == tPod {
+		spec.SetPod(sPod)
+	}
+
+	// TODO niko/etl intersection of services and labels and annotations
+
+	return *spec
+}
+
+// Length returns the number of Properties
+func (p *Properties) Length() int {
+	if p == nil {
+		return 0
+	}
+	return len(*p)
+}
+
+func (p *Properties) Matches(that Properties) bool {
+	// The only Properties that a nil Properties matches is an empty one
+	if p == nil {
+		return that.Length() == 0
+	}
+
+	// Matching on cluster, namespace, controller, controller kind, pod,
+	// and container are simple string equality comparisons. By default,
+	// we assume a match. For each Property given to match, we say that the
+	// match fails if we don't have that Property, or if we have it but the
+	// strings are not equal.
+
+	if thatCluster, thatErr := that.GetCluster(); thatErr == nil {
+		if thisCluster, thisErr := p.GetCluster(); thisErr != nil || thisCluster != thatCluster {
+			return false
+		}
+	}
+
+	if thatNode, thatErr := that.GetNode(); thatErr == nil {
+		if thisNode, thisErr := p.GetNode(); thisErr != nil || thisNode != thatNode {
+			return false
+		}
+	}
+
+	if thatNamespace, thatErr := that.GetNamespace(); thatErr == nil {
+		if thisNamespace, thisErr := p.GetNamespace(); thisErr != nil || thisNamespace != thatNamespace {
+			return false
+		}
+	}
+
+	if thatController, thatErr := that.GetController(); thatErr == nil {
+		if thisController, thisErr := p.GetController(); thisErr != nil || thisController != thatController {
+			return false
+		}
+	}
+
+	if thatControllerKind, thatErr := that.GetControllerKind(); thatErr == nil {
+		if thisControllerKind, thisErr := p.GetControllerKind(); thisErr != nil || thisControllerKind != thatControllerKind {
+			return false
+		}
+	}
+
+	if thatPod, thatErr := that.GetPod(); thatErr == nil {
+		if thisPod, thisErr := p.GetPod(); thisErr != nil || thisPod != thatPod {
+			return false
+		}
+	}
+
+	if thatContainer, thatErr := that.GetContainer(); thatErr == nil {
+		if thisContainer, thisErr := p.GetContainer(); thisErr != nil || thisContainer != thatContainer {
+			return false
+		}
+	}
+
+	// Matching on Services only occurs if a non-zero length slice of strings
+	// is given. The comparison fails if there exists a string to match that is
+	// not present in our slice of services.
+	if thatServices, thatErr := that.GetServices(); thatErr == nil && len(thatServices) > 0 {
+		thisServices, thisErr := p.GetServices()
+		if thisErr != nil {
+			return false
+		}
+
+		for _, service := range thatServices {
+			match := false
+			for _, s := range thisServices {
+				if s == service {
+					match = true
+					break
+				}
+			}
+			if !match {
+				return false
+			}
+		}
+	}
+
+	// Matching on Labels only occurs if a non-zero length map of strings is
+	// given. The comparison fails if there exists a key/value pair to match
+	// that is not present in our set of labels.
+	if thatServices, thatErr := that.GetServices(); thatErr == nil && len(thatServices) > 0 {
+		thisServices, thisErr := p.GetServices()
+		if thisErr != nil {
+			return false
+		}
+
+		for _, service := range thatServices {
+			match := false
+			for _, s := range thisServices {
+				if s == service {
+					match = true
+					break
+				}
+			}
+			if !match {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+func (p *Properties) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+
+	strs := []string{}
+	for key, prop := range *p {
+		strs = append(strs, fmt.Sprintf("%s:%s", key, prop))
+	}
+	return fmt.Sprintf("{%s}", strings.Join(strs, "; "))
+}
+
+func (p *Properties) Get(prop Property) (string, error) {
+	if raw, ok := (*p)[prop]; ok {
+		if result, ok := raw.(string); ok {
+			return result, nil
+		}
+		return "", fmt.Errorf("%s is not a string", prop)
+	}
+	return "", fmt.Errorf("%s not set", prop)
+}
+
+func (p *Properties) Has(prop Property) bool {
+	_, ok := (*p)[prop]
+	return ok
+}
+
+func (p *Properties) Set(prop Property, value string) {
+	(*p)[prop] = value
+}
+
+func (p *Properties) GetCluster() (string, error) {
+	if raw, ok := (*p)[ClusterProp]; ok {
+		if cluster, ok := raw.(string); ok {
+			return cluster, nil
+		}
+		return "", fmt.Errorf("ClusterProp is not a string")
+	}
+	return "", fmt.Errorf("ClusterProp not set")
+}
+
+func (p *Properties) HasCluster() bool {
+	_, ok := (*p)[ClusterProp]
+	return ok
+}
+
+func (p *Properties) SetCluster(cluster string) {
+	(*p)[ClusterProp] = cluster
+}
+
+func (p *Properties) GetNode() (string, error) {
+	if raw, ok := (*p)[NodeProp]; ok {
+		if node, ok := raw.(string); ok {
+			return node, nil
+		}
+		return "", fmt.Errorf("NodeProp is not a string")
+	}
+	return "", fmt.Errorf("NodeProp not set")
+}
+
+func (p *Properties) HasNode() bool {
+	_, ok := (*p)[NodeProp]
+	return ok
+}
+
+func (p *Properties) SetNode(node string) {
+	(*p)[NodeProp] = node
+}
+
+func (p *Properties) GetContainer() (string, error) {
+	if raw, ok := (*p)[ContainerProp]; ok {
+		if container, ok := raw.(string); ok {
+			return container, nil
+		}
+		return "", fmt.Errorf("ContainerProp is not a string")
+	}
+	return "", fmt.Errorf("ContainerProp not set")
+}
+
+func (p *Properties) HasContainer() bool {
+	_, ok := (*p)[ContainerProp]
+	return ok
+}
+
+func (p *Properties) SetContainer(container string) {
+	(*p)[ContainerProp] = container
+}
+
+func (p *Properties) GetController() (string, error) {
+	if raw, ok := (*p)[ControllerProp]; ok {
+		if controller, ok := raw.(string); ok {
+			return controller, nil
+		}
+		return "", fmt.Errorf("ControllerProp is not a string")
+	}
+	return "", fmt.Errorf("ControllerProp not set")
+}
+
+func (p *Properties) HasController() bool {
+	_, ok := (*p)[ControllerProp]
+	return ok
+}
+
+func (p *Properties) SetController(controller string) {
+	(*p)[ControllerProp] = controller
+}
+
+func (p *Properties) GetControllerKind() (string, error) {
+	if raw, ok := (*p)[ControllerKindProp]; ok {
+		if controllerKind, ok := raw.(string); ok {
+			return controllerKind, nil
+		}
+		return "", fmt.Errorf("ControllerKindProp is not a string")
+	}
+	return "", fmt.Errorf("ControllerKindProp not set")
+}
+
+func (p *Properties) HasControllerKind() bool {
+	_, ok := (*p)[ControllerKindProp]
+	return ok
+}
+
+func (p *Properties) SetControllerKind(controllerKind string) {
+	(*p)[ControllerKindProp] = controllerKind
+}
+
+func (p *Properties) GetLabels() (map[string]string, error) {
+	if raw, ok := (*p)[LabelProp]; ok {
+		if labels, ok := raw.(map[string]string); ok {
+			return labels, nil
+		}
+		return map[string]string{}, fmt.Errorf("LabelProp is not a map[string]string")
+	}
+	return map[string]string{}, fmt.Errorf("LabelProp not set")
+}
+
+func (p *Properties) HasLabel() bool {
+	_, ok := (*p)[LabelProp]
+	return ok
+}
+
+func (p *Properties) SetLabels(labels map[string]string) {
+	(*p)[LabelProp] = labels
+}
+
+func (p *Properties) GetAnnotations() (map[string]string, error) {
+	if raw, ok := (*p)[AnnotationProp]; ok {
+		if annotations, ok := raw.(map[string]string); ok {
+			return annotations, nil
+		}
+		return map[string]string{}, fmt.Errorf("AnnotationProp is not a map[string]string")
+	}
+	return map[string]string{}, fmt.Errorf("AnnotationProp not set")
+}
+
+func (p *Properties) HasAnnotations() bool {
+	_, ok := (*p)[AnnotationProp]
+	return ok
+}
+
+func (p *Properties) SetAnnotations(annotations map[string]string) {
+	(*p)[AnnotationProp] = annotations
+}
+
+func (p *Properties) GetNamespace() (string, error) {
+	if raw, ok := (*p)[NamespaceProp]; ok {
+		if namespace, ok := raw.(string); ok {
+			return namespace, nil
+		}
+		return "", fmt.Errorf("NamespaceProp is not a string")
+	}
+	return "", fmt.Errorf("NamespaceProp not set")
+}
+
+func (p *Properties) HasNamespace() bool {
+	_, ok := (*p)[NamespaceProp]
+	return ok
+}
+
+func (p *Properties) SetNamespace(namespace string) {
+	(*p)[NamespaceProp] = namespace
+}
+
+func (p *Properties) GetPod() (string, error) {
+	if raw, ok := (*p)[PodProp]; ok {
+		if pod, ok := raw.(string); ok {
+			return pod, nil
+		}
+		return "", fmt.Errorf("PodProp is not a string")
+	}
+	return "", fmt.Errorf("PodProp not set")
+}
+
+func (p *Properties) HasPod() bool {
+	_, ok := (*p)[PodProp]
+	return ok
+}
+
+func (p *Properties) SetPod(pod string) {
+	(*p)[PodProp] = pod
+}
+
+func (p *Properties) GetServices() ([]string, error) {
+	if raw, ok := (*p)[ServiceProp]; ok {
+		if services, ok := raw.([]string); ok {
+			return services, nil
+		}
+		return []string{}, fmt.Errorf("ServiceProp is not a string")
+	}
+	return []string{}, fmt.Errorf("ServiceProp not set")
+}
+
+func (p *Properties) HasService() bool {
+	_, ok := (*p)[ServiceProp]
+	return ok
+}
+
+func (p *Properties) SetServices(services []string) {
+	(*p)[ServiceProp] = services
+}
+
+func (p *Properties) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(CodecVersion) // version
+
+	// ClusterProp
+	cluster, err := p.GetCluster()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+		buff.WriteString(cluster) // write string
+	}
+
+	// NodeProp
+	node, err := p.GetNode()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+		buff.WriteString(node)    // write string
+	}
+
+	// ContainerProp
+	container, err := p.GetContainer()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))   // write non-nil byte
+		buff.WriteString(container) // write string
+	}
+
+	// ControllerProp
+	controller, err := p.GetController()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))    // write non-nil byte
+		buff.WriteString(controller) // write string
+	}
+
+	// ControllerKindProp
+	controllerKind, err := p.GetControllerKind()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))        // write non-nil byte
+		buff.WriteString(controllerKind) // write string
+	}
+
+	// NamespaceProp
+	namespace, err := p.GetNamespace()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))   // write non-nil byte
+		buff.WriteString(namespace) // write string
+	}
+
+	// PodProp
+	pod, err := p.GetPod()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+		buff.WriteString(pod)     // write string
+	}
+
+	// LabelProp
+	labels, err := p.GetLabels()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))  // write non-nil byte
+		buff.WriteInt(len(labels)) // map length
+		for k, v := range labels {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+	}
+
+	// AnnotationProp
+	annotations, err := p.GetAnnotations()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))       // write non-nil byte
+		buff.WriteInt(len(annotations)) // map length
+		for k, v := range annotations {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+	}
+
+	// ServiceProp
+	services, err := p.GetServices()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))    // write non-nil byte
+		buff.WriteInt(len(services)) // slice length
+		for _, v := range services {
+			buff.WriteString(v) // write string
+		}
+	}
+
+	return buff.Bytes(), nil
+}
+
+func (p *Properties) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+	v := buff.ReadUInt8() // version
+	if v != CodecVersion {
+		return fmt.Errorf("Invalid Version. Expected %d, got %d", CodecVersion, v)
+	}
+
+	*p = Properties{}
+
+	// ClusterProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		cluster := buff.ReadString() // read string
+		p.SetCluster(cluster)
+	}
+
+	// NodeProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		node := buff.ReadString() // read string
+		p.SetNode(node)
+	}
+
+	// ContainerProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		container := buff.ReadString() // read string
+		p.SetContainer(container)
+	}
+
+	// ControllerProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		controller := buff.ReadString() // read string
+		p.SetController(controller)
+	}
+
+	// ControllerKindProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		controllerKind := buff.ReadString() // read string
+		p.SetControllerKind(controllerKind)
+	}
+
+	// NamespaceProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		namespace := buff.ReadString() // read string
+		p.SetNamespace(namespace)
+	}
+
+	// PodProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		pod := buff.ReadString() // read string
+		p.SetPod(pod)
+	}
+
+	// LabelProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		labels := map[string]string{}
+		length := buff.ReadInt() // read map len
+		for idx := 0; idx < length; idx++ {
+			key := buff.ReadString()
+			val := buff.ReadString()
+			labels[key] = val
+		}
+		p.SetLabels(labels)
+	}
+
+	// AnnotationProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		annotations := map[string]string{}
+		length := buff.ReadInt() // read map len
+		for idx := 0; idx < length; idx++ {
+			key := buff.ReadString()
+			val := buff.ReadString()
+			annotations[key] = val
+		}
+		p.SetAnnotations(annotations)
+	}
+
+	// ServiceProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		services := []string{}
+		length := buff.ReadInt() // read map len
+		for idx := 0; idx < length; idx++ {
+			val := buff.ReadString()
+			services = append(services, val)
+		}
+		p.SetServices(services)
+	}
+
+	return nil
+}

+ 241 - 0
pkg/kubecost/properties_test.go

@@ -0,0 +1,241 @@
+package kubecost
+
+import (
+	"testing"
+)
+
+// TODO niko/etl
+// func TestParseProperty(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperty_String(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_Clone(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_Intersection(t *testing.T) {}
+
+func TestProperties_Matches(t *testing.T) {
+	// nil Properties should match empty Properties
+	var p *Properties
+	propsEmpty := Properties{}
+
+	if !p.Matches(propsEmpty) {
+		t.Fatalf("Properties.Matches: expect nil to match empty")
+	}
+
+	// Empty Properties should match empty Properties
+	p = &Properties{}
+	if !p.Matches(propsEmpty) {
+		t.Fatalf("Properties.Matches: expect nil to match empty")
+	}
+
+	p.SetCluster("cluster-one")
+	p.SetNamespace("kubecost")
+	p.SetController("kubecost-deployment")
+	p.SetControllerKind("deployment")
+	p.SetPod("kubecost-deployment-abc123")
+	p.SetContainer("kubecost-cost-model")
+	p.SetServices([]string{"kubecost-frontend"})
+	p.SetLabels(map[string]string{
+		"app":  "kubecost",
+		"tier": "frontend",
+	})
+
+	// Non-empty Properties should match empty Properties, but not vice-a-versa
+	if !p.Matches(propsEmpty) {
+		t.Fatalf("Properties.Matches: expect nil to match empty")
+	}
+	if propsEmpty.Matches(*p) {
+		t.Fatalf("Properties.Matches: expect empty to not match non-empty")
+	}
+
+	// Non-empty Properties should match itself
+	if !p.Matches(*p) {
+		t.Fatalf("Properties.Matches: expect non-empty to match itself")
+	}
+
+	// Match on all
+	if !p.Matches(Properties{
+		ClusterProp:        "cluster-one",
+		NamespaceProp:      "kubecost",
+		ControllerProp:     "kubecost-deployment",
+		ControllerKindProp: "deployment",
+		PodProp:            "kubecost-deployment-abc123",
+		ContainerProp:      "kubecost-cost-model",
+		ServiceProp:        []string{"kubecost-frontend"},
+		LabelProp: map[string]string{
+			"app":  "kubecost",
+			"tier": "frontend",
+		},
+	}) {
+		t.Fatalf("Properties.Matches: expect match on all")
+	}
+
+	// Match on cluster
+	if !p.Matches(Properties{
+		ClusterProp: "cluster-one",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on cluster")
+	}
+
+	// No match on cluster
+	if p.Matches(Properties{
+		ClusterProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on cluster")
+	}
+
+	// Match on namespace
+	if !p.Matches(Properties{
+		NamespaceProp: "kubecost",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on namespace")
+	}
+
+	// No match on namespace
+	if p.Matches(Properties{
+		NamespaceProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on namespace")
+	}
+
+	// Match on controller
+	if !p.Matches(Properties{
+		ControllerProp: "kubecost-deployment",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on controller")
+	}
+
+	// No match on controller
+	if p.Matches(Properties{
+		ControllerProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on controller")
+	}
+
+	// Match on controller kind
+	if !p.Matches(Properties{
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on controller kind")
+	}
+
+	// No match on controller kind
+	if p.Matches(Properties{
+		ControllerKindProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on controller kind")
+	}
+
+	// Match on pod
+	if !p.Matches(Properties{
+		PodProp: "kubecost-deployment-abc123",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on pod")
+	}
+
+	// No match on pod
+	if p.Matches(Properties{
+		PodProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on pod")
+	}
+
+	// Match on container
+	if !p.Matches(Properties{
+		ContainerProp: "kubecost-cost-model",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on container")
+	}
+
+	// No match on container
+	if p.Matches(Properties{
+		ContainerProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on container")
+	}
+
+	// Match on single service
+	if !p.Matches(Properties{
+		ServiceProp: []string{"kubecost-frontend"},
+	}) {
+		t.Fatalf("Properties.Matches: expect match on service")
+	}
+
+	// No match on one missing service
+	if p.Matches(Properties{
+		ServiceProp: []string{"missing-service", "kubecost-frontend"},
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on 1 of 2 services")
+	}
+
+	// Match on single label
+	if !p.Matches(Properties{
+		LabelProp: map[string]string{
+			"app": "kubecost",
+		},
+	}) {
+		t.Fatalf("Properties.Matches: expect match on label")
+	}
+
+	// No match on one missing label
+	if !p.Matches(Properties{
+		LabelProp: map[string]string{
+			"app":   "kubecost",
+			"tier":  "frontend",
+			"label": "missing",
+		},
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on 2 of 3 labels")
+	}
+}
+
+// TODO niko/etl
+// func TestProperties_GetCluster(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetCluster(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetContainer(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetContainer(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetController(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetController(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetControllerKind(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetControllerKind(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetLabels(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetLabels(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetNamespace(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetNamespace(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetPod(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetPod(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetServices(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetServices(t *testing.T) {}

+ 32 - 0
pkg/kubecost/status.go

@@ -0,0 +1,32 @@
+package kubecost
+
+import "time"
+
+// ETLStatus describes ETL metadata
+type ETLStatus struct {
+	Coverage    Window           `json:"coverage"`
+	Progress    float64          `json:"progress"`
+	RefreshRate string           `json:"refreshRate"`
+	StartTime   time.Time        `json:"startTime"`
+	UTCOffset   string           `json:"utcOffset"`
+	Backup      *DirectoryStatus `json:"backup,omitempty"`
+}
+
+// DirectoryStatus describes metadata of a directory of files
+type DirectoryStatus struct {
+	Path         string       `json:"path"`
+	Size         string       `json:"size"`
+	LastModified time.Time    `json:"lastModified"`
+	FileCount    int          `json:"fileCount"`
+	Files        []FileStatus `json:"files"`
+}
+
+// FileStatus describes the metadata of a single file
+type FileStatus struct {
+	Name         string            `json:"name"`
+	Size         string            `json:"size"`
+	LastModified time.Time         `json:"lastModified"`
+	Details      map[string]string `json:"details,omitempty"`
+	Errors       []string          `json:"errors,omitempty"`
+	Warnings     []string          `json:"warnings,omitempty"`
+}

+ 518 - 0
pkg/kubecost/window.go

@@ -0,0 +1,518 @@
+package kubecost
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"regexp"
+	"strconv"
+	"time"
+)
+
+const (
+	minutesPerDay  = 60 * 24
+	minutesPerHour = 60
+	hoursPerDay    = 24
+)
+
+// RoundBack rounds the given time back to a multiple of the given resolution
+// in the given time's timezone.
+// e.g. 2020-01-01T12:37:48-0700, 24h = 2020-01-01T00:00:00-0700
+func RoundBack(t time.Time, resolution time.Duration) time.Time {
+	_, offSec := t.Zone()
+	return t.Add(time.Duration(offSec) * time.Second).Truncate(resolution).Add(-time.Duration(offSec) * time.Second)
+}
+
+// RoundForward rounds the given time forward to a multiple of the given resolution
+// in the given time's timezone.
+// e.g. 2020-01-01T12:37:48-0700, 24h = 2020-01-02T00:00:00-0700
+func RoundForward(t time.Time, resolution time.Duration) time.Time {
+	back := RoundBack(t, resolution)
+	if back.Equal(t) {
+		// The given time is exactly a multiple of the given resolution
+		return t
+	}
+	return back.Add(resolution)
+}
+
+// Window defines a period of time with a start and an end. If either start or
+// end are nil it indicates an open time period.
+type Window struct {
+	start *time.Time
+	end   *time.Time
+}
+
+// NewWindow creates and returns a new Window instance from the given times
+func NewWindow(start, end *time.Time) Window {
+	return Window{
+		start: start,
+		end:   end,
+	}
+}
+
+// NewClosedWindow creates and returns a new Window instance from the given
+// times, which cannot be nil, so they are value types.
+func NewClosedWindow(start, end time.Time) Window {
+	return Window{
+		start: &start,
+		end:   &end,
+	}
+}
+
+// ParseWindowUTC attempts to parse the given string into a valid Window. It
+// accepts several formats, returning an error if the given string does not
+// match one of the following:
+// - named intervals: "today", "yesterday", "week", "month", "lastweek", "lastmonth"
+// - durations: "24h", "7d", etc.
+// - date ranges: "2020-04-01T00:00:00Z,2020-04-03T00:00:00Z", etc.
+// - timestamp ranges: "1586822400,1586908800", etc.
+func ParseWindowUTC(window string) (Window, error) {
+	return parseWindow(window, time.Now().UTC())
+}
+
+// ParseWindowWithOffsetString parses the given window string within the context of
+// the timezone defined by the UTC offset string of format -07:00, +01:30, etc.
+func ParseWindowWithOffsetString(window string, offset string) (Window, error) {
+	if offset == "UTC" || offset == "" {
+		return ParseWindowUTC(window)
+	}
+
+	regex := regexp.MustCompile(`^(\+|-)(\d\d):(\d\d)$`)
+	match := regex.FindStringSubmatch(offset)
+	if match == nil {
+		return Window{}, fmt.Errorf("illegal UTC offset: '%s'; should be of form '-07:00'", offset)
+	}
+
+	sig := 1
+	if match[1] == "-" {
+		sig = -1
+	}
+
+	hrs64, _ := strconv.ParseInt(match[2], 10, 64)
+	hrs := sig * int(hrs64)
+
+	mins64, _ := strconv.ParseInt(match[3], 10, 64)
+	mins := sig * int(mins64)
+
+	loc := time.FixedZone(fmt.Sprintf("UTC%s", offset), (hrs*60*60)+(mins*60))
+	now := time.Now().In(loc)
+	return parseWindow(window, now)
+}
+
+// ParseWindowWithOffset parses the given window string within the context of
+// the timezone defined by the UTC offset.
+func ParseWindowWithOffset(window string, offset time.Duration) (Window, error) {
+	loc := time.FixedZone("", int(offset.Seconds()))
+	now := time.Now().In(loc)
+	return parseWindow(window, now)
+}
+
+// parseWindow generalizes the parsing of window strings, relative to a given
+// moment in time, defined as "now".
+func parseWindow(window string, now time.Time) (Window, error) {
+	// compute UTC offset in terms of minutes
+	offHr := now.UTC().Hour() - now.Hour()
+	offMin := (now.UTC().Minute() - now.Minute()) + (offHr * 60)
+	offset := time.Duration(offMin) * time.Minute
+
+	if window == "today" {
+		start := now
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+
+		end := start.Add(time.Hour * 24)
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "yesterday" {
+		start := now
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+		start = start.Add(time.Hour * -24)
+
+		end := start.Add(time.Hour * 24)
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "week" {
+		// now
+		start := now
+		// 00:00 today, accounting for timezone offset
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+		// 00:00 Sunday of the current week
+		start = start.Add(-24 * time.Hour * time.Duration(start.Weekday()))
+
+		end := now
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "lastweek" {
+		// now
+		start := now
+		// 00:00 today, accounting for timezone offset
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+		// 00:00 Sunday of last week
+		start = start.Add(-24 * time.Hour * time.Duration(start.Weekday()+7))
+
+		end := start.Add(7 * 24 * time.Hour)
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "month" {
+		// now
+		start := now
+		// 00:00 today, accounting for timezone offset
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+		// 00:00 1st of this month
+		start = start.Add(-24 * time.Hour * time.Duration(start.Day()-1))
+
+		end := now
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "month" {
+		// now
+		start := now
+		// 00:00 today, accounting for timezone offset
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+		// 00:00 1st of this month
+		start = start.Add(-24 * time.Hour * time.Duration(start.Day()-1))
+
+		end := now
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "lastmonth" {
+		// now
+		end := now
+		// 00:00 today, accounting for timezone offset
+		end = end.Truncate(time.Hour * 24)
+		end = end.Add(offset)
+		// 00:00 1st of this month
+		end = end.Add(-24 * time.Hour * time.Duration(end.Day()-1))
+
+		// 00:00 last day of last month
+		start := end.Add(-24 * time.Hour)
+		// 00:00 1st of last month
+		start = start.Add(-24 * time.Hour * time.Duration(start.Day()-1))
+
+		return NewWindow(&start, &end), nil
+	}
+
+	// Match duration strings; e.g. "45m", "24h", "7d"
+	regex := regexp.MustCompile(`^(\d+)(m|h|d)$`)
+	match := regex.FindStringSubmatch(window)
+	if match != nil {
+		dur := time.Minute
+		if match[2] == "h" {
+			dur = time.Hour
+		}
+		if match[2] == "d" {
+			dur = 24 * time.Hour
+		}
+
+		num, _ := strconv.ParseInt(match[1], 10, 64)
+
+		end := now
+		start := end.Add(-time.Duration(num) * dur)
+
+		return NewWindow(&start, &end), nil
+	}
+
+	// Match duration strings with offset; e.g. "45m offset 15m", etc.
+	regex = regexp.MustCompile(`^(\d+)(m|h|d) offset (\d+)(m|h|d)$`)
+	match = regex.FindStringSubmatch(window)
+	if match != nil {
+		end := now
+
+		offUnit := time.Minute
+		if match[4] == "h" {
+			offUnit = time.Hour
+		}
+		if match[4] == "d" {
+			offUnit = 24 * time.Hour
+		}
+
+		offNum, _ := strconv.ParseInt(match[3], 10, 64)
+
+		end = end.Add(-time.Duration(offNum) * offUnit)
+
+		durUnit := time.Minute
+		if match[2] == "h" {
+			durUnit = time.Hour
+		}
+		if match[2] == "d" {
+			durUnit = 24 * time.Hour
+		}
+
+		durNum, _ := strconv.ParseInt(match[1], 10, 64)
+
+		start := end.Add(-time.Duration(durNum) * durUnit)
+
+		return NewWindow(&start, &end), nil
+	}
+
+	// Match timestamp pairs, e.g. "1586822400,1586908800" or "1586822400-1586908800"
+	regex = regexp.MustCompile(`^(\d+)[,|-](\d+)$`)
+	match = regex.FindStringSubmatch(window)
+	if match != nil {
+		s, _ := strconv.ParseInt(match[1], 10, 64)
+		e, _ := strconv.ParseInt(match[2], 10, 64)
+		start := time.Unix(s, 0)
+		end := time.Unix(e, 0)
+		return NewWindow(&start, &end), nil
+	}
+
+	// Match RFC3339 pairs, e.g. "2020-04-01T00:00:00Z,2020-04-03T00:00:00Z"
+	rfc3339 := `\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ`
+	regex = regexp.MustCompile(fmt.Sprintf(`(%s),(%s)`, rfc3339, rfc3339))
+	match = regex.FindStringSubmatch(window)
+	if match != nil {
+		start, _ := time.Parse(time.RFC3339, match[1])
+		end, _ := time.Parse(time.RFC3339, match[2])
+		return NewWindow(&start, &end), nil
+	}
+
+	return Window{nil, nil}, fmt.Errorf("illegal window: %s", window)
+}
+
+// ApproximatelyEqual returns true if the start and end times of the two windows,
+// respectively, are within the given threshold of each other.
+func (w Window) ApproximatelyEqual(that Window, threshold time.Duration) bool {
+	return approxEqual(w.start, that.start, threshold) && approxEqual(w.end, that.end, threshold)
+}
+
+func approxEqual(x *time.Time, y *time.Time, threshold time.Duration) bool {
+	// both times are nil, so they are equal
+	if x == nil && y == nil {
+		return true
+	}
+
+	// one time is nil, but the other is not, so they are not equal
+	if x == nil || y == nil {
+		return false
+	}
+
+	// neither time is nil, so they are approximately close if their times are
+	// within the given threshold
+	delta := math.Abs((*x).Sub(*y).Seconds())
+	return delta < threshold.Seconds()
+}
+
+func (w Window) Clone() Window {
+	var start, end *time.Time
+	var s, e time.Time
+
+	if w.start != nil {
+		s = *w.start
+		start = &s
+	}
+
+	if w.end != nil {
+		e = *w.end
+		end = &e
+	}
+
+	return NewWindow(start, end)
+}
+
+func (w Window) Contains(t time.Time) bool {
+	if w.start != nil && t.Before(*w.start) {
+		return false
+	}
+
+	if w.end != nil && t.After(*w.end) {
+		return false
+	}
+
+	return true
+}
+
+func (w Window) Duration() time.Duration {
+	if w.IsOpen() {
+		// TODO test
+		return time.Duration(math.Inf(1.0))
+	}
+
+	return w.end.Sub(*w.start)
+}
+
+func (w Window) End() *time.Time {
+	return w.end
+}
+
+func (w Window) Equal(that Window) bool {
+	if w.start != nil && that.start != nil && !w.start.Equal(*that.start) {
+		// starts are not nil, but not equal
+		return false
+	}
+
+	if w.end != nil && that.end != nil && !w.end.Equal(*that.end) {
+		// ends are not nil, but not equal
+		return false
+	}
+
+	if (w.start == nil && that.start != nil) || (w.start != nil && that.start == nil) {
+		// one start is nil, the other is not
+		return false
+	}
+
+	if (w.end == nil && that.end != nil) || (w.end != nil && that.end == nil) {
+		// one end is nil, the other is not
+		return false
+	}
+
+	// either both starts are nil, or they match; likewise for the ends
+	return true
+}
+
+func (w Window) ExpandStart(start time.Time) Window {
+	if w.start == nil || start.Before(*w.start) {
+		w.start = &start
+	}
+	return w
+}
+
+func (w Window) ExpandEnd(end time.Time) Window {
+	if w.end == nil || end.After(*w.end) {
+		w.end = &end
+	}
+	return w
+}
+
+func (w Window) Expand(that Window) Window {
+	return w.ExpandStart(*that.start).ExpandEnd(*that.end)
+}
+
+func (w Window) Hours() float64 {
+	if w.IsOpen() {
+		return math.Inf(1)
+	}
+
+	return w.end.Sub(*w.start).Hours()
+}
+
+func (w Window) IsEmpty() bool {
+	return !w.IsOpen() && w.end.Equal(*w.Start())
+}
+
+func (w Window) IsNegative() bool {
+	return !w.IsOpen() && w.end.Before(*w.Start())
+}
+
+func (w Window) IsOpen() bool {
+	return w.start == nil || w.end == nil
+}
+
+func (w Window) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	buffer.WriteString(fmt.Sprintf("\"start\":\"%s\",", w.start.Format("2006-01-02T15:04:05-0700")))
+	buffer.WriteString(fmt.Sprintf("\"end\":\"%s\"", w.end.Format("2006-01-02T15:04:05-0700")))
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+func (w Window) Minutes() float64 {
+	if w.IsOpen() {
+		return math.Inf(1)
+	}
+
+	return w.end.Sub(*w.start).Minutes()
+}
+
+func (w Window) Set(start, end *time.Time) {
+	w.start = start
+	w.end = end
+}
+
+// Shift adds the given duration to both the start and end times of the window
+func (w Window) Shift(dur time.Duration) Window {
+	if w.start != nil {
+		s := w.start.Add(dur)
+		w.start = &s
+	}
+
+	if w.end != nil {
+		e := w.end.Add(dur)
+		w.end = &e
+	}
+
+	return w
+}
+
+func (w Window) Start() *time.Time {
+	return w.start
+}
+
+func (w Window) String() string {
+	if w.start == nil && w.end == nil {
+		return "[nil, nil)"
+	}
+	if w.start == nil {
+		return fmt.Sprintf("[nil, %s)", w.end.Format("2006-01-02T15:04:05-0700"))
+	}
+	if w.end == nil {
+		return fmt.Sprintf("[%s, nil)", w.start.Format("2006-01-02T15:04:05-0700"))
+	}
+	return fmt.Sprintf("[%s, %s)", w.start.Format("2006-01-02T15:04:05-0700"), w.end.Format("2006-01-02T15:04:05-0700"))
+}
+
+// ToDurationOffset returns formatted strings representing the duration and
+// offset of the window in terms of minutes; e.g. ("30m", "1m")
+func (w Window) ToDurationOffset() (string, string) {
+	durMins := int(w.Duration().Minutes())
+
+	offStr := ""
+	if w.End() != nil {
+		offMins := int(time.Now().Sub(*w.End()).Minutes())
+		if offMins > 1 {
+			offStr = fmt.Sprintf("%dm", int(offMins))
+		} else if offMins < -1 {
+			durMins += offMins
+		}
+	}
+
+	// default to formatting in terms of minutes
+	durStr := fmt.Sprintf("%dm", durMins)
+	if (durMins >= minutesPerDay) && (durMins%minutesPerDay == 0) {
+		// convert to days
+		durStr = fmt.Sprintf("%dd", durMins/minutesPerDay)
+	} else if (durMins >= minutesPerHour) && (durMins%minutesPerHour == 0) {
+		// convert to hours
+		durStr = fmt.Sprintf("%dh", durMins/minutesPerHour)
+	}
+
+	return durStr, offStr
+}
+
+type BoundaryError struct {
+	Requested Window
+	Supported Window
+	Message   string
+}
+
+func NewBoundaryError(req, sup Window, msg string) *BoundaryError {
+	return &BoundaryError{
+		Requested: req,
+		Supported: sup,
+		Message:   msg,
+	}
+}
+
+func (be *BoundaryError) Error() string {
+	if be == nil {
+		return "<nil>"
+	}
+
+	return fmt.Sprintf("boundary error: requested %s; supported %s: %s", be.Requested, be.Supported, be.Message)
+}

+ 626 - 0
pkg/kubecost/window_test.go

@@ -0,0 +1,626 @@
+package kubecost
+
+import (
+	"fmt"
+	"testing"
+	"time"
+)
+
+func TestRoundBack(t *testing.T) {
+	boulder := time.FixedZone("Boulder", -7*60*60)
+	beijing := time.FixedZone("Beijing", 8*60*60)
+
+	to := time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)
+	tb := RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, boulder)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, boulder)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 37, 48, 0, boulder)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, beijing)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, beijing)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 59, 59, 0, beijing)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, time.UTC)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, time.UTC)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 59, 0, 0, time.UTC)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00Z; actual %s", tb)
+	}
+}
+
+func TestRoundForward(t *testing.T) {
+	boulder := time.FixedZone("Boulder", -7*60*60)
+	beijing := time.FixedZone("Beijing", 8*60*60)
+
+	to := time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)
+	tb := RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundForward: expected 2020-01-01T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, boulder)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, boulder)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 37, 48, 0, boulder)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundForward: expected 2020-01-01T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, beijing)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, beijing)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 59, 59, 0, beijing)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundForward: expected 2020-01-01T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, time.UTC)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, time.UTC)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 59, 0, 0, time.UTC)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00Z; actual %s", tb)
+	}
+}
+
+func TestParseWindowUTC(t *testing.T) {
+	now := time.Now().UTC()
+
+	// "today" should span Now() and not produce an error
+	today, err := ParseWindowUTC("today")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "today": %s`, err)
+	}
+	if today.Duration().Hours() != 24 {
+		t.Fatalf(`expect: window "today" to have duration 24 hour; actual: %f hours`, today.Duration().Hours())
+	}
+	if !today.Contains(time.Now().UTC()) {
+		t.Fatalf(`expect: window "today" to contain now; actual: %s`, today)
+	}
+
+	// "yesterday" should span Now() and not produce an error
+	yesterday, err := ParseWindowUTC("yesterday")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "yesterday": %s`, err)
+	}
+	if yesterday.Duration().Hours() != 24 {
+		t.Fatalf(`expect: window "yesterday" to have duration 24 hour; actual: %f hours`, yesterday.Duration().Hours())
+	}
+	if !yesterday.End().Before(time.Now().UTC()) {
+		t.Fatalf(`expect: window "yesterday" to end before now; actual: %s ends after %s`, yesterday, time.Now().UTC())
+	}
+
+	week, err := ParseWindowUTC("week")
+	hoursThisWeek := float64(time.Now().UTC().Weekday()) * 24.0
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "week": %s`, err)
+	}
+	if week.Duration().Hours() < hoursThisWeek {
+		t.Fatalf(`expect: window "week" to have at least %f hours; actual: %f hours`, hoursThisWeek, week.Duration().Hours())
+	}
+	if !week.End().Before(time.Now().UTC()) {
+		t.Fatalf(`expect: window "week" to end before now; actual: %s ends after %s`, week, time.Now().UTC())
+	}
+
+	month, err := ParseWindowUTC("month")
+	hoursThisMonth := float64(time.Now().UTC().Day()) * 24.0
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "month": %s`, err)
+	}
+	if month.Duration().Hours() > hoursThisMonth || month.Duration().Hours() < (hoursThisMonth-24.0) {
+		t.Fatalf(`expect: window "month" to have approximately %f hours; actual: %f hours`, hoursThisMonth, month.Duration().Hours())
+	}
+	if !month.End().Before(time.Now().UTC()) {
+		t.Fatalf(`expect: window "month" to end before now; actual: %s ends after %s`, month, time.Now().UTC())
+	}
+
+	// TODO niko/etl lastweek
+
+	lastmonth, err := ParseWindowUTC("lastmonth")
+	monthMinHours := float64(24 * 28)
+	monthMaxHours := float64(24 * 31)
+	firstOfMonth := now.Truncate(time.Hour * 24).Add(-24 * time.Hour * time.Duration(now.Day()-1))
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "lastmonth": %s`, err)
+	}
+	if lastmonth.Duration().Hours() > monthMaxHours || lastmonth.Duration().Hours() < monthMinHours {
+		t.Fatalf(`expect: window "lastmonth" to have approximately %f hours; actual: %f hours`, hoursThisMonth, lastmonth.Duration().Hours())
+	}
+	if !lastmonth.End().Equal(firstOfMonth) {
+		t.Fatalf(`expect: window "lastmonth" to end on the first of the current month; actual: %s doesn't end on %s`, lastmonth, firstOfMonth)
+	}
+
+	ago12h := time.Now().UTC().Add(-12 * time.Hour)
+	ago36h := time.Now().UTC().Add(-36 * time.Hour)
+	ago60h := time.Now().UTC().Add(-60 * time.Hour)
+
+	// "24h" should have 24 hour duration and not produce an error
+	dur24h, err := ParseWindowUTC("24h")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "24h": %s`, err)
+	}
+	if dur24h.Duration().Hours() != 24 {
+		t.Fatalf(`expect: window "24h" to have duration 24 hour; actual: %f hours`, dur24h.Duration().Hours())
+	}
+	if !dur24h.Contains(ago12h) {
+		t.Fatalf(`expect: window "24h" to contain 12 hours ago; actual: %s doesn't contain %s`, dur24h, ago12h)
+	}
+	if dur24h.Contains(ago36h) {
+		t.Fatalf(`expect: window "24h" to not contain 36 hours ago; actual: %s contains %s`, dur24h, ago36h)
+	}
+
+	// "2d" should have 2 day duration and not produce an error
+	dur2d, err := ParseWindowUTC("2d")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "2d": %s`, err)
+	}
+	if dur2d.Duration().Hours() != 48 {
+		t.Fatalf(`expect: window "2d" to have duration 48 hour; actual: %f hours`, dur2d.Duration().Hours())
+	}
+	if !dur2d.Contains(ago36h) {
+		t.Fatalf(`expect: window "2d" to contain 36 hours ago; actual: %s doesn't contain %s`, dur2d, ago36h)
+	}
+	if dur2d.Contains(ago60h) {
+		t.Fatalf(`expect: window "2d" to not contain 60 hours ago; actual: %s contains %s`, dur2d, ago60h)
+	}
+
+	// "24h offset 14h" should have 24 hour duration and not produce an error
+	dur24hOff14h, err := ParseWindowUTC("24h offset 14h")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "24h offset 14h": %s`, err)
+	}
+	if dur24hOff14h.Duration().Hours() != 24 {
+		t.Fatalf(`expect: window "24h offset 14h" to have duration 24 hour; actual: %f hours`, dur24hOff14h.Duration().Hours())
+	}
+	if dur24hOff14h.Contains(ago12h) {
+		t.Fatalf(`expect: window "24h offset 14h" not to contain 12 hours ago; actual: %s contains %s`, dur24hOff14h, ago12h)
+	}
+	if !dur24hOff14h.Contains(ago36h) {
+		t.Fatalf(`expect: window "24h offset 14h" to contain 36 hours ago; actual: %s does not contain %s`, dur24hOff14h, ago36h)
+	}
+
+	april152020, _ := time.Parse(time.RFC3339, "2020-04-15T00:00:00Z")
+	april102020, _ := time.Parse(time.RFC3339, "2020-04-10T00:00:00Z")
+	april052020, _ := time.Parse(time.RFC3339, "2020-04-05T00:00:00Z")
+
+	// "2020-04-08T00:00:00Z,2020-04-12T00:00:00Z" should have 96 hour duration and not produce an error
+	april8to12, err := ParseWindowUTC("2020-04-08T00:00:00Z,2020-04-12T00:00:00Z")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "2020-04-08T00:00:00Z,2020-04-12T00:00:00Z": %s`, err)
+	}
+	if april8to12.Duration().Hours() != 96 {
+		t.Fatalf(`expect: window %s to have duration 96 hour; actual: %f hours`, april8to12, april8to12.Duration().Hours())
+	}
+	if !april8to12.Contains(april102020) {
+		t.Fatalf(`expect: window April 8-12 to contain April 10; actual: %s doesn't contain %s`, april8to12, april102020)
+	}
+	if april8to12.Contains(april052020) {
+		t.Fatalf(`expect: window April 8-12 to not contain April 5; actual: %s contains %s`, april8to12, april052020)
+	}
+	if april8to12.Contains(april152020) {
+		t.Fatalf(`expect: window April 8-12 to not contain April 15; actual: %s contains %s`, april8to12, april152020)
+	}
+
+	march152020, _ := time.Parse(time.RFC3339, "2020-03-15T00:00:00Z")
+	march102020, _ := time.Parse(time.RFC3339, "2020-03-10T00:00:00Z")
+	march052020, _ := time.Parse(time.RFC3339, "2020-03-05T00:00:00Z")
+
+	// "1583712000,1583884800" should have 48 hour duration and not produce an error
+	march9to11, err := ParseWindowUTC("1583712000,1583884800")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "2020-04-08T00:00:00Z,2020-04-12T00:00:00Z": %s`, err)
+	}
+	if march9to11.Duration().Hours() != 48 {
+		t.Fatalf(`expect: window %s to have duration 48 hour; actual: %f hours`, march9to11, march9to11.Duration().Hours())
+	}
+	if !march9to11.Contains(march102020) {
+		t.Fatalf(`expect: window March 9-11 to contain March 10; actual: %s doesn't contain %s`, march9to11, march102020)
+	}
+	if march9to11.Contains(march052020) {
+		t.Fatalf(`expect: window March 9-11 to not contain March 5; actual: %s contains %s`, march9to11, march052020)
+	}
+	if march9to11.Contains(march152020) {
+		t.Fatalf(`expect: window March 9-11 to not contain March 15; actual: %s contains %s`, march9to11, march152020)
+	}
+}
+
+func TestParseWindowWithOffsetString(t *testing.T) {
+	// ParseWindowWithOffsetString should equal ParseWindowUTC when location == "UTC"
+	// for all window string formats
+
+	todayUTC, err := ParseWindowUTC("today")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "today": %s`, err)
+	}
+	todayTZ, err := ParseWindowWithOffsetString("today", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "today": %s`, err)
+	}
+	if !todayUTC.ApproximatelyEqual(todayTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "today" UTC to equal "today" with timezone "UTC"; actual: %s not equal %s`, todayUTC, todayTZ)
+	}
+
+	yesterdayUTC, err := ParseWindowUTC("yesterday")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "yesterday": %s`, err)
+	}
+	yesterdayTZ, err := ParseWindowWithOffsetString("yesterday", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "yesterday": %s`, err)
+	}
+	if !yesterdayUTC.ApproximatelyEqual(yesterdayTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "yesterday" UTC to equal "yesterday" with timezone "UTC"; actual: %s not equal %s`, yesterdayUTC, yesterdayTZ)
+	}
+
+	weekUTC, err := ParseWindowUTC("week")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "week": %s`, err)
+	}
+	weekTZ, err := ParseWindowWithOffsetString("week", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "week": %s`, err)
+	}
+	if !weekUTC.ApproximatelyEqual(weekTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "week" UTC to equal "week" with timezone "UTC"; actual: %s not equal %s`, weekUTC, weekTZ)
+	}
+
+	monthUTC, err := ParseWindowUTC("month")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "month": %s`, err)
+	}
+	monthTZ, err := ParseWindowWithOffsetString("month", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "month": %s`, err)
+	}
+	if !monthUTC.ApproximatelyEqual(monthTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "month" UTC to equal "month" with timezone "UTC"; actual: %s not equal %s`, monthUTC, monthTZ)
+	}
+
+	lastweekUTC, err := ParseWindowUTC("lastweek")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "lastweek": %s`, err)
+	}
+	lastweekTZ, err := ParseWindowWithOffsetString("lastweek", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "lastweek": %s`, err)
+	}
+	if !lastweekUTC.ApproximatelyEqual(lastweekTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "lastweek" UTC to equal "lastweek" with timezone "UTC"; actual: %s not equal %s`, lastweekUTC, lastweekTZ)
+	}
+
+	lastmonthUTC, err := ParseWindowUTC("lastmonth")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "lastmonth": %s`, err)
+	}
+	lastmonthTZ, err := ParseWindowWithOffsetString("lastmonth", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "lastmonth": %s`, err)
+	}
+	if !lastmonthUTC.ApproximatelyEqual(lastmonthTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "lastmonth" UTC to equal "lastmonth" with timezone "UTC"; actual: %s not equal %s`, lastmonthUTC, lastmonthTZ)
+	}
+
+	dur10mUTC, err := ParseWindowUTC("10m")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "10m": %s`, err)
+	}
+	dur10mTZ, err := ParseWindowWithOffsetString("10m", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "10m": %s`, err)
+	}
+	if !dur10mUTC.ApproximatelyEqual(dur10mTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "10m" UTC to equal "10m" with timezone "UTC"; actual: %s not equal %s`, dur10mUTC, dur10mTZ)
+	}
+
+	dur24hUTC, err := ParseWindowUTC("24h")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "24h": %s`, err)
+	}
+	dur24hTZ, err := ParseWindowWithOffsetString("24h", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "24h": %s`, err)
+	}
+	if !dur24hUTC.ApproximatelyEqual(dur24hTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "24h" UTC to equal "24h" with timezone "UTC"; actual: %s not equal %s`, dur24hUTC, dur24hTZ)
+	}
+
+	dur37dUTC, err := ParseWindowUTC("37d")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "37d": %s`, err)
+	}
+	dur37dTZ, err := ParseWindowWithOffsetString("37d", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "37d": %s`, err)
+	}
+	if !dur37dUTC.ApproximatelyEqual(dur37dTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "37d" UTC to equal "37d" with timezone "UTC"; actual: %s not equal %s`, dur37dUTC, dur37dTZ)
+	}
+
+	// ParseWindowWithOffsetString should be the correct relative to ParseWindowUTC; i.e.
+	// - for durations, the times should match, but the representations should differ
+	//   by the number of hours offset
+	// - for words like "today" and "yesterday", the times may not match, in which
+	//   case, for example, "today" UTC-08:00 might equal "yesterday" UTC
+
+	// fmtWindow only compares date and time to the minute, not second or
+	// timezone. Helper for comparing timezone shifted windows.
+	fmtWindow := func(w Window) string {
+		s := "nil"
+		if w.start != nil {
+			s = w.start.Format("2006-01-02T15:04")
+		}
+
+		e := "nil"
+		if w.end != nil {
+			e = w.end.Format("2006-01-02T15:04")
+		}
+		return fmt.Sprintf("[%s, %s]", s, e)
+	}
+
+	// Test UTC-08:00 (California), UTC+03:00 (Moscow), UTC+12:00 (New Zealand), and UTC itself
+	for _, offsetHrs := range []int{-8, 3, 12, 0} {
+		offStr := fmt.Sprintf("+%02d:00", offsetHrs)
+		if offsetHrs < 0 {
+			offStr = fmt.Sprintf("-%02d:00", -offsetHrs)
+		}
+		off := time.Duration(offsetHrs) * time.Hour
+
+		dur10mTZ, err = ParseWindowWithOffsetString("10m", offStr)
+		if err != nil {
+			t.Fatalf(`unexpected error parsing "10m": %s`, err)
+		}
+		if !dur10mTZ.ApproximatelyEqual(dur10mUTC, time.Second) {
+			t.Fatalf(`expect: window "10m" UTC to equal "10m" with timezone "%s"; actual: %s not equal %s`, offStr, dur10mUTC, dur10mTZ)
+		}
+		if fmtWindow(dur10mTZ.Shift(-off)) != fmtWindow(dur10mUTC) {
+			t.Fatalf(`expect: date, hour, and minute of window "10m" UTC to equal that of "10m" %s shifted by %s; actual: %s not equal %s`, offStr, off, fmtWindow(dur10mUTC), fmtWindow(dur10mTZ.Shift(-off)))
+		}
+
+		dur24hTZ, err = ParseWindowWithOffsetString("24h", offStr)
+		if err != nil {
+			t.Fatalf(`unexpected error parsing "24h": %s`, err)
+		}
+		if !dur24hTZ.ApproximatelyEqual(dur24hUTC, time.Second) {
+			t.Fatalf(`expect: window "24h" UTC to equal "24h" with timezone "%s"; actual: %s not equal %s`, offStr, dur24hUTC, dur24hTZ)
+		}
+		if fmtWindow(dur24hTZ.Shift(-off)) != fmtWindow(dur24hUTC) {
+			t.Fatalf(`expect: date, hour, and minute of window "24h" UTC to equal that of "24h" %s shifted by %s; actual: %s not equal %s`, offStr, off, fmtWindow(dur24hUTC), fmtWindow(dur24hTZ.Shift(-off)))
+		}
+
+		dur37dTZ, err = ParseWindowWithOffsetString("37d", offStr)
+		if err != nil {
+			t.Fatalf(`unexpected error parsing "37d": %s`, err)
+		}
+		if !dur37dTZ.ApproximatelyEqual(dur37dUTC, time.Second) {
+			t.Fatalf(`expect: window "37d" UTC to equal "37d" with timezone "%s"; actual: %s not equal %s`, offStr, dur37dUTC, dur37dTZ)
+		}
+		if fmtWindow(dur37dTZ.Shift(-off)) != fmtWindow(dur37dUTC) {
+			t.Fatalf(`expect: date, hour, and minute of window "37d" UTC to equal that of "37d" %s shifted by %s; actual: %s not equal %s`, offStr, off, fmtWindow(dur37dUTC), fmtWindow(dur37dTZ.Shift(-off)))
+		}
+
+		// "today" and "yesterday" should comply with the current day in each
+		// respective timezone, depending on if it is ahead of, equal to, or
+		// behind UTC at the given moment.
+
+		todayTZ, err = ParseWindowWithOffsetString("today", offStr)
+		if err != nil {
+			t.Fatalf(`unexpected error parsing "today": %s`, err)
+		}
+
+		yesterdayTZ, err = ParseWindowWithOffsetString("yesterday", offStr)
+		if err != nil {
+			t.Fatalf(`unexpected error parsing "yesterday": %s`, err)
+		}
+
+		hoursSinceYesterdayUTC := time.Now().UTC().Sub(time.Now().UTC().Truncate(24.0 * time.Hour)).Hours()
+		hoursUntilTomorrowUTC := 24.0 - hoursSinceYesterdayUTC
+		aheadOfUTC := float64(offsetHrs)-hoursUntilTomorrowUTC > 0
+		behindUTC := float64(offsetHrs)+hoursSinceYesterdayUTC < 0
+
+		// yesterday in this timezone should equal today UTC
+		if aheadOfUTC {
+			if fmtWindow(yesterdayTZ) != fmtWindow(todayUTC) {
+				t.Fatalf(`expect: window "today" UTC to equal "yesterday" with timezone "%s"; actual: %s not equal %s`, offStr, yesterdayTZ, todayUTC)
+			}
+		}
+
+		// today in this timezone should equal yesterday UTC
+		if behindUTC {
+			if fmtWindow(todayTZ) != fmtWindow(yesterdayUTC) {
+				t.Fatalf(`expect: window "today" UTC to equal "yesterday" with timezone "%s"; actual: %s not equal %s`, offStr, todayTZ, yesterdayUTC)
+			}
+		}
+
+		// today in this timezone should equal today UTC, likewise for yesterday
+		if !aheadOfUTC && !behindUTC {
+			if fmtWindow(todayTZ) != fmtWindow(todayUTC) {
+				t.Fatalf(`expect: window "today" UTC to equal "today" with timezone "%s"; actual: %s not equal %s`, offStr, todayTZ, todayUTC)
+			}
+			// yesterday in this timezone should equal yesterday UTC
+			if fmtWindow(yesterdayTZ) != fmtWindow(yesterdayUTC) {
+				t.Fatalf(`expect: window "yesterday" UTC to equal "yesterday" with timezone "%s"; actual: %s not equal %s`, offStr, yesterdayTZ, yesterdayUTC)
+			}
+		}
+	}
+
+}
+
+// TODO niko/etl
+// func TestWindow_Contains(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_Duration(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_End(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_Equal(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_ExpandStart(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_ExpandEnd(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_Start(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_String(t *testing.T) {}
+
+func TestWindow_ToDurationOffset(t *testing.T) {
+	w, err := ParseWindowUTC("1d")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "1d": %s`, err)
+	}
+	dur, off := w.ToDurationOffset()
+	if dur != "1d" {
+		t.Fatalf(`expect: window to be "1d"; actual: "%s"`, dur)
+	}
+	if off != "" {
+		t.Fatalf(`expect: offset to be ""; actual: "%s"`, off)
+	}
+
+	w, err = ParseWindowUTC("3h")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "1d": %s`, err)
+	}
+	dur, off = w.ToDurationOffset()
+	if dur != "3h" {
+		t.Fatalf(`expect: window to be "3h"; actual: "%s"`, dur)
+	}
+	if off != "" {
+		t.Fatalf(`expect: offset to be ""; actual: "%s"`, off)
+	}
+
+	w, err = ParseWindowUTC("10m")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "1d": %s`, err)
+	}
+	dur, off = w.ToDurationOffset()
+	if dur != "10m" {
+		t.Fatalf(`expect: window to be "10m"; actual: "%s"`, dur)
+	}
+	if off != "" {
+		t.Fatalf(`expect: offset to be ""; actual: "%s"`, off)
+	}
+
+	w, err = ParseWindowUTC("1589448338,1589534798")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "1589448338,1589534798": %s`, err)
+	}
+	dur, off = w.ToDurationOffset()
+	if dur != "1441m" {
+		t.Fatalf(`expect: window to be "1441m"; actual: "%s"`, dur)
+	}
+	if off == "" {
+		t.Fatalf(`expect: offset to not be ""; actual: "%s"`, off)
+	}
+
+	w, err = ParseWindowUTC("yesterday")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "1589448338,1589534798": %s`, err)
+	}
+	dur, off = w.ToDurationOffset()
+	if dur != "1d" {
+		t.Fatalf(`expect: window to be "1d"; actual: "%s"`, dur)
+	}
+}

+ 222 - 0
pkg/prom/error.go

@@ -2,9 +2,231 @@ package prom
 
 import (
 	"fmt"
+	"reflect"
 	"strings"
+	"sync"
+
+	"github.com/kubecost/cost-model/pkg/log"
 )
 
+// errorType used to check HasError
+var errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+//--------------------------------------------------------------------------
+//  Prometheus Error Collection
+//--------------------------------------------------------------------------
+
+type QueryError struct {
+	Query      string `json:"query"`
+	Error      error  `json:"error"`
+	ParseError error  `json:"parseError"`
+}
+
+// String returns a string representation of the QueryError
+func (qe *QueryError) String() string {
+	var sb strings.Builder
+	sb.WriteString("Errors:\n")
+	if qe.Error != nil {
+		sb.WriteString(fmt.Sprintf("  Request Error: %s\n", qe.Error))
+	}
+	if qe.ParseError != nil {
+		sb.WriteString(fmt.Sprintf("  Parse Error: %s\n", qe.ParseError))
+	}
+	sb.WriteString(fmt.Sprintf("for Query: %s\n", qe.Query))
+	return sb.String()
+}
+
+type QueryWarning struct {
+	Query    string   `json:"query"`
+	Warnings []string `json:"warnings"`
+}
+
+// String returns a string representation of the QueryWarning
+func (qw *QueryWarning) String() string {
+	var sb strings.Builder
+	sb.WriteString("Warnings:\n")
+	for i, w := range qw.Warnings {
+		sb.WriteString(fmt.Sprintf("  %d) %s\n", i+1, w))
+	}
+	sb.WriteString(fmt.Sprintf("for Query: %s\n", qw.Query))
+	return sb.String()
+}
+
+// QueryErrorCollection represents a collection of query errors and warnings made via context.
+type QueryErrorCollection interface {
+	// Warnings is a slice of the QueryWarning instances
+	Warnings() []*QueryWarning
+
+	// Errors is a slice of the QueryError instances
+	Errors() []*QueryError
+
+	// ToErrorAndWarningStrings returns the errors and warnings in the collection
+	// as two string slices.
+	ToErrorAndWarningStrings() (errors []string, warnings []string)
+}
+
+// ErrorsAndWarningStrings is a container struct for string representation storage/caching
+type ErrorsAndWarningStrings struct {
+	Errors   []string
+	Warnings []string
+}
+
+// QueryErrorCollector is used to collect prometheus query errors and warnings, and also meets the
+// Error
+type QueryErrorCollector struct {
+	m        sync.RWMutex
+	errors   []*QueryError
+	warnings []*QueryWarning
+}
+
+// Reports an error to the collector. Ignores if the error is nil and the warnings
+// are empty
+func (ec *QueryErrorCollector) Report(query string, warnings []string, requestError error, parseError error) {
+	if requestError == nil && parseError == nil && len(warnings) == 0 {
+		return
+	}
+
+	ec.m.Lock()
+	defer ec.m.Unlock()
+
+	if requestError != nil || parseError != nil {
+		ec.errors = append(ec.errors, &QueryError{
+			Query:      query,
+			Error:      requestError,
+			ParseError: parseError,
+		})
+	}
+
+	if len(warnings) > 0 {
+		ec.warnings = append(ec.warnings, &QueryWarning{
+			Query:    query,
+			Warnings: warnings,
+		})
+	}
+}
+
+// Whether or not the collector caught any warnings
+func (ec *QueryErrorCollector) IsWarning() bool {
+	ec.m.RLock()
+	defer ec.m.RUnlock()
+
+	return len(ec.warnings) > 0
+}
+
+// Whether or not the collector caught errors
+func (ec *QueryErrorCollector) IsError() bool {
+	ec.m.RLock()
+	defer ec.m.RUnlock()
+
+	return len(ec.errors) > 0
+}
+
+// Warnings caught by the collector
+func (ec *QueryErrorCollector) Warnings() []*QueryWarning {
+	ec.m.RLock()
+	defer ec.m.RUnlock()
+
+	warns := make([]*QueryWarning, len(ec.warnings))
+	copy(warns, ec.warnings)
+	return warns
+}
+
+// Errors caught by the collector
+func (ec *QueryErrorCollector) Errors() []*QueryError {
+	ec.m.RLock()
+	defer ec.m.RUnlock()
+
+	errs := make([]*QueryError, len(ec.errors))
+	copy(errs, ec.errors)
+	return errs
+}
+
+// Implement the error interface to allow returning as an aggregated error
+func (ec *QueryErrorCollector) Error() string {
+	ec.m.RLock()
+	defer ec.m.RUnlock()
+
+	var sb strings.Builder
+	if len(ec.errors) > 0 {
+		sb.WriteString("Error Collection:\n")
+		for i, e := range ec.errors {
+			sb.WriteString(fmt.Sprintf("%d) %s\n", i, e))
+		}
+	}
+	if len(ec.warnings) > 0 {
+		sb.WriteString("Warning Collection:\n")
+		for _, w := range ec.warnings {
+			sb.WriteString(w.String())
+		}
+	}
+
+	return sb.String()
+}
+
+// ToErrorAndWarningStrings returns the errors and warnings in the collection as two string slices.
+func (ec *QueryErrorCollector) ToErrorAndWarningStrings() (errors []string, warnings []string) {
+	for _, e := range ec.Errors() {
+		errors = append(errors, e.String())
+	}
+	for _, w := range ec.Warnings() {
+		warnings = append(warnings, w.String())
+	}
+	return
+}
+
+// As is a special method that implicitly works with the `errors.As()` go
+// helper to locate the _first_ instance of the provided target type in the
+// collection.
+func (ec *QueryErrorCollector) As(target interface{}) bool {
+	if target == nil {
+		log.Errorf("ErrorCollection.As() target cannot be nil")
+		return false
+	}
+
+	val := reflect.ValueOf(target)
+	typ := val.Type()
+	if typ.Kind() != reflect.Ptr || val.IsNil() {
+		log.Errorf("ErrorCollection.As() target must be a non-nil pointer")
+		return false
+	}
+	if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) {
+		log.Errorf("ErrorCollection.As() *target must be interface or implement error")
+		return false
+	}
+
+	targetType := typ.Elem()
+	for _, err := range AllErrorsFor(ec) {
+		if reflect.TypeOf(err).AssignableTo(targetType) {
+			val.Elem().Set(reflect.ValueOf(err))
+			return true
+		}
+		if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// IsErrorCollection returns true if the provided error is an ErrorCollection
+func IsErrorCollection(err error) bool {
+	_, ok := err.(QueryErrorCollection)
+	return ok
+}
+
+func AllErrorsFor(collection QueryErrorCollection) []error {
+	var errs []error
+	for _, qe := range collection.Errors() {
+		if qe.Error != nil {
+			errs = append(errs, qe.Error)
+		}
+		if qe.ParseError != nil {
+			errs = append(errs, qe.ParseError)
+		}
+	}
+	return errs
+}
+
 // WrapError wraps the given error with the given message, usually for adding
 // context, but persists the existing type of error.
 func WrapError(err error, msg string) error {

+ 85 - 0
pkg/prom/error_test.go

@@ -0,0 +1,85 @@
+package prom
+
+import (
+	"errors"
+	"fmt"
+	"testing"
+)
+
+func newCommError() error {
+	return NewCommError("Test Communication Error")
+}
+
+func newErrorCollection() error {
+	qc := &QueryErrorCollector{}
+
+	qc.Report("test_query1", nil, NewCommError("Failed to connect"), nil)
+	qc.Report("test_query2", nil, NewCommError("Failed to connect"), errors.New("Parsing error"))
+	qc.Report("test_query3", nil, nil, errors.New("Failed to parse field 'foo'"))
+
+	return qc
+}
+
+func newNestedError() error {
+	comErr := NewCommError("Communication Error")
+	e1 := fmt.Errorf("Wrap Error #1: %w", comErr)
+	e2 := fmt.Errorf("Wrap Error #2: %w", e1)
+	return e2
+}
+
+func TestErrorCollectionCheck(t *testing.T) {
+	err := newErrorCollection()
+
+	if !IsErrorCollection(err) {
+		t.Fatalf("IsErrorCollection() returned false, expected true")
+		return
+	}
+}
+
+func TestNestedErrorAs(t *testing.T) {
+	err := newNestedError()
+
+	var commErr CommError
+	if !errors.As(err, &commErr) {
+		t.Fatalf("Expected there to exist a CommError, but failed.")
+		return
+	}
+}
+
+func TestErrorCollectionErrorAs(t *testing.T) {
+	err := newErrorCollection()
+
+	var commErr CommError
+	if !errors.As(err, &commErr) {
+		t.Fatalf("Expected there to exist a CommError, but failed.")
+		return
+	}
+}
+
+func TestCommErrorAs(t *testing.T) {
+	err := newCommError()
+
+	var commErr CommError
+	if !errors.As(err, &commErr) {
+		t.Fatalf("Expected there to exist a CommError, but failed.")
+		return
+	}
+}
+
+func TestAllErrorsFor(t *testing.T) {
+	err := newErrorCollection()
+	if !IsErrorCollection(err) {
+		t.Fatalf("Error is not ErrorCollection")
+		return
+	}
+	collection := err.(QueryErrorCollection)
+	allErrors := AllErrorsFor(collection)
+
+	// Expected Errors Length
+	const expected = 4
+
+	if len(allErrors) != expected {
+		t.Fatalf("All Errors Length was: %d, Expected %d", len(allErrors), expected)
+		return
+	}
+}

+ 41 - 0
pkg/prom/metrics.go

@@ -4,9 +4,13 @@ import (
 	"encoding/json"
 	"fmt"
 	"reflect"
+	"regexp"
+	"sort"
 	"strings"
 )
 
+var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
+
 // AnyToLabels will create prometheus labels based on the fields of the interface
 // passed. Note that this method is quite expensive and should only be used when absolutely
 // necessary.
@@ -67,3 +71,40 @@ func LabelNamesFrom(labels map[string]string) []string {
 	}
 	return keys
 }
+
+// Converts kubernetes labels into prometheus labels.
+func KubeLabelsToLabels(labels map[string]string) ([]string, []string) {
+	labelKeys := make([]string, 0, len(labels))
+	for k := range labels {
+		labelKeys = append(labelKeys, k)
+	}
+	sort.Strings(labelKeys)
+
+	labelValues := make([]string, 0, len(labels))
+	for i, k := range labelKeys {
+		labelKeys[i] = "label_" + SanitizeLabelName(k)
+		labelValues = append(labelValues, labels[k])
+	}
+	return labelKeys, labelValues
+}
+
+// Converts kubernetes annotations into prometheus labels.
+func KubeAnnotationsToLabels(labels map[string]string) ([]string, []string) {
+	labelKeys := make([]string, 0, len(labels))
+	for k := range labels {
+		labelKeys = append(labelKeys, k)
+	}
+	sort.Strings(labelKeys)
+
+	labelValues := make([]string, 0, len(labels))
+	for i, k := range labelKeys {
+		labelKeys[i] = "annotation_" + SanitizeLabelName(k)
+		labelValues = append(labelValues, labels[k])
+	}
+	return labelKeys, labelValues
+}
+
+// Replaces all illegal prometheus label characters with _
+func SanitizeLabelName(s string) string {
+	return invalidLabelCharRE.ReplaceAllString(s, "_")
+}

+ 59 - 42
pkg/prom/query.go

@@ -25,27 +25,48 @@ const (
 // parsing query responses and errors.
 type Context struct {
 	Client         prometheus.Client
-	ErrorCollector *errors.ErrorCollector
+	errorCollector *QueryErrorCollector
 }
 
 // NewContext creates a new Promethues querying context from the given client
 func NewContext(client prometheus.Client) *Context {
-	var ec errors.ErrorCollector
+	var ec QueryErrorCollector
 
 	return &Context{
 		Client:         client,
-		ErrorCollector: &ec,
+		errorCollector: &ec,
 	}
 }
 
-// Errors returns the errors collected from the Context's ErrorCollector
-func (ctx *Context) Errors() []error {
-	return ctx.ErrorCollector.Errors()
+// Warnings returns the warnings collected from the Context's ErrorCollector
+func (ctx *Context) Warnings() []*QueryWarning {
+	return ctx.errorCollector.Warnings()
+}
+
+// HasWarnings returns true if the ErrorCollector has warnings.
+func (ctx *Context) HasWarnings() bool {
+	return ctx.errorCollector.IsWarning()
+}
+
+// Errors returns the errors collected from the Context's ErrorCollector.
+func (ctx *Context) Errors() []*QueryError {
+	return ctx.errorCollector.Errors()
 }
 
 // HasErrors returns true if the ErrorCollector has errors
 func (ctx *Context) HasErrors() bool {
-	return ctx.ErrorCollector.IsError()
+	return ctx.errorCollector.IsError()
+}
+
+// ErrorCollection returns the aggregation of errors if there exists errors. Otherwise,
+// nil is returned
+func (ctx *Context) ErrorCollection() error {
+	if ctx.errorCollector.IsError() {
+		// errorCollector implements the error interface
+		return ctx.errorCollector
+	}
+
+	return nil
 }
 
 // Query returns a QueryResultsChan, then runs the given query and sends the
@@ -98,18 +119,18 @@ func (ctx *Context) ProfileQueryAll(queries ...string) []QueryResultsChan {
 	return resChs
 }
 
-func (ctx *Context) QuerySync(query string) ([]*QueryResult, error) {
-	raw, err := ctx.query(query)
+func (ctx *Context) QuerySync(query string) ([]*QueryResult, prometheus.Warnings, error) {
+	raw, warnings, err := ctx.query(query)
 	if err != nil {
-		return nil, err
+		return nil, warnings, err
 	}
 
 	results := NewQueryResults(query, raw)
 	if results.Error != nil {
-		return nil, results.Error
+		return nil, warnings, results.Error
 	}
 
-	return results.Results, nil
+	return results.Results, warnings, nil
 }
 
 // QueryURL returns the URL used to query Prometheus
@@ -123,13 +144,11 @@ func runQuery(query string, ctx *Context, resCh QueryResultsChan, profileLabel s
 	defer errors.HandlePanic()
 	startQuery := time.Now()
 
-	raw, promErr := ctx.query(query)
-	ctx.ErrorCollector.Report(promErr)
-
+	raw, warnings, requestError := ctx.query(query)
 	results := NewQueryResults(query, raw)
-	if results.Error != nil {
-		ctx.ErrorCollector.Report(results.Error)
-	}
+
+	// report all warnings, request, and parse errors (nils will be ignored)
+	ctx.errorCollector.Report(query, warnings, requestError, results.Error)
 
 	if profileLabel != "" {
 		log.Profile(startQuery, profileLabel)
@@ -138,7 +157,7 @@ func runQuery(query string, ctx *Context, resCh QueryResultsChan, profileLabel s
 	resCh <- results
 }
 
-func (ctx *Context) query(query string) (interface{}, error) {
+func (ctx *Context) query(query string) (interface{}, prometheus.Warnings, error) {
 	u := ctx.Client.URL(epQuery, nil)
 	q := u.Query()
 	q.Set("query", query)
@@ -146,7 +165,7 @@ func (ctx *Context) query(query string) (interface{}, error) {
 
 	req, err := http.NewRequest(http.MethodPost, u.String(), nil)
 	if err != nil {
-		return nil, err
+		return nil, nil, err
 	}
 
 	resp, body, warnings, err := ctx.Client.Do(context.Background(), req)
@@ -155,19 +174,19 @@ func (ctx *Context) query(query string) (interface{}, error) {
 	}
 	if err != nil {
 		if resp == nil {
-			return nil, fmt.Errorf("query error: '%s' fetching query '%s'", err.Error(), query)
+			return nil, warnings, fmt.Errorf("query error: '%s' fetching query '%s'", err.Error(), query)
 		}
 
-		return nil, fmt.Errorf("query error %d: '%s' fetching query '%s'", resp.StatusCode, err.Error(), query)
+		return nil, warnings, fmt.Errorf("query error %d: '%s' fetching query '%s'", resp.StatusCode, err.Error(), query)
 	}
 
 	var toReturn interface{}
 	err = json.Unmarshal(body, &toReturn)
 	if err != nil {
-		return nil, fmt.Errorf("query error: '%s' fetching query '%s'", err.Error(), query)
+		return nil, warnings, fmt.Errorf("query error: '%s' fetching query '%s'", err.Error(), query)
 	}
 
-	return toReturn, nil
+	return toReturn, warnings, nil
 }
 
 func (ctx *Context) QueryRange(query string, start, end time.Time, step time.Duration) QueryResultsChan {
@@ -186,18 +205,18 @@ func (ctx *Context) ProfileQueryRange(query string, start, end time.Time, step t
 	return resCh
 }
 
-func (ctx *Context) QueryRangeSync(query string, start, end time.Time, step time.Duration) ([]*QueryResult, error) {
-	raw, err := ctx.queryRange(query, start, end, step)
+func (ctx *Context) QueryRangeSync(query string, start, end time.Time, step time.Duration) ([]*QueryResult, prometheus.Warnings, error) {
+	raw, warnings, err := ctx.queryRange(query, start, end, step)
 	if err != nil {
-		return nil, err
+		return nil, warnings, err
 	}
 
 	results := NewQueryResults(query, raw)
 	if results.Error != nil {
-		return nil, results.Error
+		return nil, warnings, results.Error
 	}
 
-	return results.Results, nil
+	return results.Results, warnings, nil
 }
 
 // QueryRangeURL returns the URL used to query_range Prometheus
@@ -211,13 +230,11 @@ func runQueryRange(query string, start, end time.Time, step time.Duration, ctx *
 	defer errors.HandlePanic()
 	startQuery := time.Now()
 
-	raw, promErr := ctx.queryRange(query, start, end, step)
-	ctx.ErrorCollector.Report(promErr)
-
+	raw, warnings, requestError := ctx.queryRange(query, start, end, step)
 	results := NewQueryResults(query, raw)
-	if results.Error != nil {
-		ctx.ErrorCollector.Report(results.Error)
-	}
+
+	// report all warnings, request, and parse errors (nils will be ignored)
+	ctx.errorCollector.Report(query, warnings, requestError, results.Error)
 
 	if profileLabel != "" {
 		log.Profile(startQuery, profileLabel)
@@ -226,7 +243,7 @@ func runQueryRange(query string, start, end time.Time, step time.Duration, ctx *
 	resCh <- results
 }
 
-func (ctx *Context) queryRange(query string, start, end time.Time, step time.Duration) (interface{}, error) {
+func (ctx *Context) queryRange(query string, start, end time.Time, step time.Duration) (interface{}, prometheus.Warnings, error) {
 	u := ctx.Client.URL(epQueryRange, nil)
 	q := u.Query()
 	q.Set("query", query)
@@ -237,7 +254,7 @@ func (ctx *Context) queryRange(query string, start, end time.Time, step time.Dur
 
 	req, err := http.NewRequest(http.MethodPost, u.String(), nil)
 	if err != nil {
-		return nil, err
+		return nil, nil, err
 	}
 
 	resp, body, warnings, err := ctx.Client.Do(context.Background(), req)
@@ -246,24 +263,24 @@ func (ctx *Context) queryRange(query string, start, end time.Time, step time.Dur
 	}
 	if err != nil {
 		if resp == nil {
-			return nil, fmt.Errorf("Error: %s, Body: %s Query: %s", err.Error(), body, query)
+			return nil, warnings, fmt.Errorf("Error: %s, Body: %s Query: %s", err.Error(), body, query)
 		}
 
-		return nil, fmt.Errorf("%d (%s) Headers: %s Error: %s Body: %s Query: %s", resp.StatusCode, http.StatusText(resp.StatusCode), util.HeaderString(resp.Header), body, err.Error(), query)
+		return nil, warnings, fmt.Errorf("%d (%s) Headers: %s Error: %s Body: %s Query: %s", resp.StatusCode, http.StatusText(resp.StatusCode), util.HeaderString(resp.Header), body, err.Error(), query)
 	}
 
 	// Unsuccessful Status Code, log body and status
 	statusCode := resp.StatusCode
 	statusText := http.StatusText(statusCode)
 	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
-		return nil, fmt.Errorf("%d (%s) Headers: %s, Body: %s Query: %s", statusCode, statusText, util.HeaderString(resp.Header), body, query)
+		return nil, warnings, fmt.Errorf("%d (%s) Headers: %s, Body: %s Query: %s", statusCode, statusText, util.HeaderString(resp.Header), body, query)
 	}
 
 	var toReturn interface{}
 	err = json.Unmarshal(body, &toReturn)
 	if err != nil {
-		return nil, fmt.Errorf("%d (%s) Headers: %s Error: %s Body: %s Query: %s", statusCode, statusText, util.HeaderString(resp.Header), err.Error(), body, query)
+		return nil, warnings, fmt.Errorf("%d (%s) Headers: %s Error: %s Body: %s Query: %s", statusCode, statusText, util.HeaderString(resp.Header), err.Error(), body, query)
 	}
 
-	return toReturn, nil
+	return toReturn, warnings, nil
 }

+ 1 - 1
pkg/prom/validate.go

@@ -33,7 +33,7 @@ func Validate(cli prometheus.Client) (*PrometheusMetadata, error) {
 func validate(cli prometheus.Client, q string) (*PrometheusMetadata, error) {
 	ctx := NewContext(cli)
 
-	resUp, err := ctx.QuerySync(q)
+	resUp, _, err := ctx.QuerySync(q)
 	if err != nil {
 		return &PrometheusMetadata{
 			Running:            false,

+ 400 - 0
pkg/util/buffer.go

@@ -0,0 +1,400 @@
+package util
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"io"
+	"math"
+	"reflect"
+	"unsafe"
+)
+
+// NonPrimitiveTypeError represents an error where the user provided a non-primitive data type for reading/writing
+var NonPrimitiveTypeError error = errors.New("Type provided to read/write does not fit inside 8 bytes.")
+
+// Buffer is a utility type which implements a very basic binary protocol for
+// writing core go types.
+type Buffer struct {
+	b *bytes.Buffer
+}
+
+// NewBuffer creates a new Buffer instance using LittleEndian ByteOrder.
+func NewBuffer() *Buffer {
+	var b bytes.Buffer
+	return &Buffer{
+		b: &b,
+	}
+}
+
+// NewBufferFromBytes creates a new Buffer instance using the provided byte slice.
+// The new buffer assumes ownership of the byte slice.
+func NewBufferFromBytes(b []byte) *Buffer {
+	return &Buffer{
+		b: bytes.NewBuffer(b),
+	}
+}
+
+// NewBufferFrom creates a new Buffer instance using the remaining unread data from the
+// provided Buffer instance. The new buffer assumes ownership of the underlying data.
+func NewBufferFrom(b *Buffer) *Buffer {
+	bb := b.Bytes()
+	return &Buffer{
+		b: bytes.NewBuffer(bb),
+	}
+}
+
+// WriteBool writes a bool value to the buffer.
+func (b *Buffer) WriteBool(t bool) {
+	write(b.b, t)
+}
+
+// WriteInt writes an int value to the buffer.
+func (b *Buffer) WriteInt(i int) {
+	write(b.b, int32(i))
+}
+
+// WriteInt8 writes an int8 value to the buffer.
+func (b *Buffer) WriteInt8(i int8) {
+	write(b.b, i)
+}
+
+// WriteInt16 writes an int16 value to the buffer.
+func (b *Buffer) WriteInt16(i int16) {
+	write(b.b, i)
+}
+
+// WriteInt32 writes an int32 value to the buffer.
+func (b *Buffer) WriteInt32(i int32) {
+	write(b.b, i)
+}
+
+// WriteInt64 writes an int64 value to the buffer.
+func (b *Buffer) WriteInt64(i int64) {
+	write(b.b, i)
+}
+
+// WriteUInt writes a uint value to the buffer.
+func (b *Buffer) WriteUInt(i uint) {
+	write(b.b, i)
+}
+
+// WriteUInt8 writes a uint8 value to the buffer.
+func (b *Buffer) WriteUInt8(i uint8) {
+	write(b.b, i)
+}
+
+// WriteUInt16 writes a uint16 value to the buffer.
+func (b *Buffer) WriteUInt16(i uint16) {
+	write(b.b, i)
+}
+
+// WriteUInt32 writes a uint32 value to the buffer.
+func (b *Buffer) WriteUInt32(i uint32) {
+	write(b.b, i)
+}
+
+// WriteUInt64 writes a uint64 value to the buffer.
+func (b *Buffer) WriteUInt64(i uint64) {
+	write(b.b, i)
+}
+
+// WriteFloat32 writes a float32 value to the buffer.
+func (b *Buffer) WriteFloat32(i float32) {
+	write(b.b, i)
+}
+
+// WriteFloat64 writes a float64 value to the buffer.
+func (b *Buffer) WriteFloat64(i float64) {
+	write(b.b, i)
+}
+
+// WriteString writes the string's length as a uint16 followed by the string contents.
+func (b *Buffer) WriteString(i string) {
+	s := stringToBytes(i)
+	write(b.b, uint16(len(s)))
+	b.b.Write(s)
+}
+
+// WriteBytes writes the contents of the byte slice to the buffer.
+func (b *Buffer) WriteBytes(bytes []byte) {
+	b.b.Write(bytes)
+}
+
+// ReadBool reads a bool value from the buffer.
+func (b *Buffer) ReadBool() bool {
+	var i bool
+	read(b.b, &i)
+	return i
+}
+
+// ReadInt reads an int value from the buffer.
+func (b *Buffer) ReadInt() int {
+	var i int32
+	read(b.b, &i)
+	return int(i)
+}
+
+// ReadInt8 reads an int8 value from the buffer.
+func (b *Buffer) ReadInt8() int8 {
+	var i int8
+	read(b.b, &i)
+	return i
+}
+
+// ReadInt16 reads an int16 value from the buffer.
+func (b *Buffer) ReadInt16() int16 {
+	var i int16
+	read(b.b, &i)
+	return i
+}
+
+// ReadInt32 reads an int32 value from the buffer.
+func (b *Buffer) ReadInt32() int32 {
+	var i int32
+	read(b.b, &i)
+	return i
+}
+
+// ReadInt64 reads an int64 value from the buffer.
+func (b *Buffer) ReadInt64() int64 {
+	var i int64
+	read(b.b, &i)
+	return i
+}
+
+// ReadUInt reads a uint value from the buffer.
+func (b *Buffer) ReadUInt() uint {
+	var i uint
+	read(b.b, &i)
+	return i
+}
+
+// ReadUInt8 reads a uint8 value from the buffer.
+func (b *Buffer) ReadUInt8() uint8 {
+	var i uint8
+	read(b.b, &i)
+	return i
+}
+
+// ReadUInt16 reads a uint16 value from the buffer.
+func (b *Buffer) ReadUInt16() uint16 {
+	var i uint16
+	read(b.b, &i)
+	return i
+}
+
+// ReadUInt32 reads a uint32 value from the buffer.
+func (b *Buffer) ReadUInt32() uint32 {
+	var i uint32
+	read(b.b, &i)
+	return i
+}
+
+// ReadUInt64 reads a uint64 value from the buffer.
+func (b *Buffer) ReadUInt64() uint64 {
+	var i uint64
+	read(b.b, &i)
+	return i
+}
+
+// ReadFloat32 reads a float32 value from the buffer.
+func (b *Buffer) ReadFloat32() float32 {
+	var i float32
+	read(b.b, &i)
+	return i
+}
+
+// ReadFloat64 reads a float64 value from the buffer.
+func (b *Buffer) ReadFloat64() float64 {
+	var i float64
+	read(b.b, &i)
+	return i
+}
+
+// ReadString reads a uint16 value from the buffer representing the string's length,
+// then uses the length to extract the exact length []byte representing the string.
+func (b *Buffer) ReadString() string {
+	var l uint16
+	read(b.b, &l)
+	return bytesToString(b.b.Next(int(l)))
+}
+
+// ReadBytes reads the specified length from the buffer and returns the byte slice.
+func (b *Buffer) ReadBytes(length int) []byte {
+	return b.b.Next(length)
+}
+
+// Bytes returns the unread portion of the underlying buffer storage.
+func (b *Buffer) Bytes() []byte {
+	return b.b.Bytes()
+}
+
+// Read reads structured binary data from r into data.
+func read(r *bytes.Buffer, data interface{}) error {
+	order := binary.LittleEndian
+
+	var b [8]byte
+	if n := intDataSize(data); n != 0 {
+		bs := b[:n]
+
+		if _, err := readFull(r, bs); err != nil {
+			return err
+		}
+
+		switch data := data.(type) {
+		case *bool:
+			*data = bs[0] != 0
+		case *int8:
+			*data = int8(bs[0])
+		case *uint8:
+			*data = bs[0]
+		case *int16:
+			*data = int16(order.Uint16(bs))
+		case *uint16:
+			*data = order.Uint16(bs)
+		case *int32:
+			*data = int32(order.Uint32(bs))
+		case *uint32:
+			*data = order.Uint32(bs)
+		case *int64:
+			*data = int64(order.Uint64(bs))
+		case *uint64:
+			*data = order.Uint64(bs)
+		case *float32:
+			*data = math.Float32frombits(order.Uint32(bs))
+		case *float64:
+			*data = math.Float64frombits(order.Uint64(bs))
+		default:
+			n = 0 // fast path doesn't apply
+		}
+
+		if n != 0 {
+			return nil
+		}
+	}
+
+	return NonPrimitiveTypeError
+}
+
+// read full is a bytes.Buffer specific implementation of ioutil.ReadFull() which
+// avoids escaping our stack allocated scratch bytes
+func readFull(r *bytes.Buffer, buf []byte) (n int, err error) {
+	min := len(buf)
+	for n < min && err == nil {
+		var nn int
+		nn, err = r.Read(buf[n:])
+		n += nn
+	}
+	if n >= min {
+		err = nil
+	} else if n > 0 && err == io.EOF {
+		err = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+// Write writes the binary representation of data into w.
+func write(w *bytes.Buffer, data interface{}) error {
+	order := binary.LittleEndian
+
+	var b [8]byte
+	if n := intDataSize(data); n != 0 {
+		bs := b[:n]
+
+		switch v := data.(type) {
+		case *bool:
+			if *v {
+				bs[0] = 1
+			} else {
+				bs[0] = 0
+			}
+		case bool:
+			if v {
+				bs[0] = 1
+			} else {
+				bs[0] = 0
+			}
+		case *int8:
+			bs[0] = byte(*v)
+		case int8:
+			bs[0] = byte(v)
+		case *uint8:
+			bs[0] = *v
+		case uint8:
+			bs[0] = v
+		case *int16:
+			order.PutUint16(bs, uint16(*v))
+		case int16:
+			order.PutUint16(bs, uint16(v))
+		case *uint16:
+			order.PutUint16(bs, *v)
+		case uint16:
+			order.PutUint16(bs, v)
+		case *int32:
+			order.PutUint32(bs, uint32(*v))
+		case int32:
+			order.PutUint32(bs, uint32(v))
+		case *uint32:
+			order.PutUint32(bs, *v)
+		case uint32:
+			order.PutUint32(bs, v)
+		case *int64:
+			order.PutUint64(bs, uint64(*v))
+		case int64:
+			order.PutUint64(bs, uint64(v))
+		case *uint64:
+			order.PutUint64(bs, *v)
+		case uint64:
+			order.PutUint64(bs, v)
+		case *float32:
+			order.PutUint32(bs, math.Float32bits(*v))
+		case float32:
+			order.PutUint32(bs, math.Float32bits(v))
+		case *float64:
+			order.PutUint64(bs, math.Float64bits(*v))
+		case float64:
+			order.PutUint64(bs, math.Float64bits(v))
+		}
+
+		_, err := w.Write(bs)
+		return err
+	}
+
+	return NonPrimitiveTypeError
+}
+
+// intDataSize returns the size of the data required to represent the data when encoded.
+// It returns zero if the type cannot be implemented by the fast path in Read or Write.
+func intDataSize(data interface{}) int {
+	switch data.(type) {
+	case bool, int8, uint8, *bool, *int8, *uint8:
+		return 1
+	case int16, uint16, *int16, *uint16:
+		return 2
+	case int32, uint32, *int32, *uint32:
+		return 4
+	case int64, uint64, *int64, *uint64:
+		return 8
+	case float32, *float32:
+		return 4
+	case float64, *float64:
+		return 8
+	}
+	return 0
+}
+
+// Direct byte to string conversion that doesn't allocate.
+func bytesToString(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
+
+// Direct string to byte conversion that doesn't allocate.
+func stringToBytes(s string) (b []byte) {
+	strh := (*reflect.StringHeader)(unsafe.Pointer(&s))
+	sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+	sh.Data = strh.Data
+	sh.Len = strh.Len
+	sh.Cap = strh.Len
+	return b
+}

+ 22 - 0
pkg/util/http.go

@@ -1,6 +1,7 @@
 package util
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 	"net/url"
@@ -36,6 +37,27 @@ func NewQueryParams(values url.Values) QueryParams {
 	return mapper.NewMapper(&queryParamsMap{values})
 }
 
+//--------------------------------------------------------------------------
+//  HTTP Context Utilities
+//--------------------------------------------------------------------------
+
+const (
+	ContextWarning string = "Warning"
+)
+
+// GetWarning Extracts a warning message from the request context if it exists
+func GetWarning(r *http.Request) (warning string, ok bool) {
+	warning, ok = r.Context().Value(ContextWarning).(string)
+	return
+}
+
+// SetWarning Sets the warning context on the provided request and returns a new instance of the request
+// with the new context.
+func SetWarning(r *http.Request, warning string) *http.Request {
+	ctx := context.WithValue(r.Context(), ContextWarning, warning)
+	return r.WithContext(ctx)
+}
+
 //--------------------------------------------------------------------------
 //  Package Funcs
 //--------------------------------------------------------------------------

+ 65 - 0
pkg/util/strings.go

@@ -0,0 +1,65 @@
+package util
+
+import (
+	"fmt"
+	"math"
+	"math/rand"
+	"time"
+)
+
+func init() {
+	rand.Seed(time.Now().UnixNano())
+}
+
+var alpha = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+var alphanumeric = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
+
+const (
+	_ = 1 << (10 * iota)
+	// KiB is bytes per Kibibyte
+	KiB
+	// MiB is bytes per Mebibyte
+	MiB
+	// GiB is bytes per Gibibyte
+	GiB
+	// TiB is bytes per Tebibyte
+	TiB
+)
+
+// RandSeq generates a pseudo-random alphabetic string of the given length
+func RandSeq(n int) string {
+	b := make([]rune, n)
+	for i := range b {
+		b[i] = alpha[rand.Intn(len(alpha))]
+	}
+	return string(b)
+}
+
+// FormatBytes takes a number of bytes and formats it as a string
+func FormatBytes(numBytes int64) string {
+	if numBytes > TiB {
+		return fmt.Sprintf("%.2fTiB", float64(numBytes)/TiB)
+	}
+	if numBytes > GiB {
+		return fmt.Sprintf("%.2fGiB", float64(numBytes)/GiB)
+	}
+	if numBytes > MiB {
+		return fmt.Sprintf("%.2fMiB", float64(numBytes)/MiB)
+	}
+	if numBytes > KiB {
+		return fmt.Sprintf("%.2fKiB", float64(numBytes)/KiB)
+	}
+	return fmt.Sprintf("%dB", numBytes)
+}
+
+// FormatUTCOffset converts a duration to a string of format "-07:00"
+func FormatUTCOffset(dur time.Duration) string {
+	utcOffSig := "+"
+	if dur.Hours() < 0 {
+		utcOffSig = "-"
+	}
+	utcOffHrs := int(math.Trunc(math.Abs(dur.Hours())))
+	utcOffMin := int(math.Abs(dur.Minutes())) - (utcOffHrs * 60)
+
+	return fmt.Sprintf("%s%02d:%02d", utcOffSig, utcOffHrs, utcOffMin)
+}

+ 10 - 3
pkg/util/time.go

@@ -7,10 +7,17 @@ import (
 )
 
 const (
-	MinsPerHour   = 60.0
-	HoursPerDay   = 24.0
+	// MinsPerHour expresses the amount of minutes in an hour
+	MinsPerHour = 60.0
+
+	// HoursPerDay expresses the amount of hours in a day
+	HoursPerDay = 24.0
+
+	// HoursPerMonth expresses the amount of hours in a month
 	HoursPerMonth = 730.0
-	DaysPerMonth  = 30.42
+
+	// DaysPerMonth expresses the amount of days in a month
+	DaysPerMonth = 30.42
 )
 
 // ParseDuration converts a Prometheus-style duration string into a Duration

+ 1 - 1
test/cloud_test.go

@@ -1,4 +1,4 @@
-package costmodel_test
+package test
 
 import (
 	"os"

+ 1 - 1
test/clusterinfo_test.go

@@ -1,4 +1,4 @@
-package costmodel_test
+package test
 
 import (
 	"encoding/json"

+ 1 - 1
test/keytuple_test.go

@@ -1,4 +1,4 @@
-package costmodel_test
+package test
 
 import (
 	"strings"

+ 1 - 1
test/remote_cluster_test.go

@@ -1,4 +1,4 @@
-package costmodel_test
+package test
 
 import (
 	_ "k8s.io/client-go/plugin/pkg/client/auth"

+ 1 - 1
test/util_test.go

@@ -1,4 +1,4 @@
-package costmodel_test
+package test
 
 import (
 	"net/http"