Bläddra i källkod

add rego rules and opa to recommender system

Alexander Belanger 3 år sedan
förälder
incheckning
5a983109bb

+ 35 - 0
api/types/monitor.go

@@ -0,0 +1,35 @@
+package types
+
+import "time"
+
+type MonitorTestStatus string
+
+const (
+	MonitorTestStatusSuccess MonitorTestStatus = "success"
+	MonitorTestStatusFailed  MonitorTestStatus = "failed"
+)
+
+type MonitorTestSeverity string
+
+const (
+	MonitorTestSeverityCritical MonitorTestSeverity = "critical"
+	MonitorTestSeverityHigh     MonitorTestSeverity = "high"
+	MonitorTestSeverityLow      MonitorTestSeverity = "low"
+)
+
+type MonitorTestResult struct {
+	ProjectID uint   `json:"project_id"`
+	ClusterID uint   `json:"cluster_id"`
+	Category  string `json:"category"`
+	ObjectID  string `json:"object_id"`
+
+	LastStatusChange *time.Time `json:"last_status_change"`
+
+	LastTested    *time.Time        `json:"last_tested"`
+	LastRunResult MonitorTestStatus `json:"last_run_result"`
+
+	Title   string `json:"title"`
+	Message string `json:"message"`
+
+	Severity MonitorTestSeverity `json:"severity"`
+}

+ 15 - 9
go.mod

@@ -51,8 +51,8 @@ require (
 	golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26
 	google.golang.org/api v0.62.0
 	google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03
-	google.golang.org/grpc v1.47.0
-	google.golang.org/protobuf v1.28.0
+	google.golang.org/grpc v1.49.0
+	google.golang.org/protobuf v1.28.1
 	gorm.io/driver/sqlite v1.1.3
 	gorm.io/gorm v1.22.3
 	helm.sh/helm/v3 v3.9.0
@@ -81,7 +81,9 @@ require (
 	github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
 	github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
 	github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 // indirect
+	github.com/OneOfOne/xxhash v1.2.8 // indirect
 	github.com/PuerkitoBio/goquery v1.5.1 // indirect
+	github.com/agnivade/levenshtein v1.1.1 // indirect
 	github.com/andybalholm/cascadia v1.1.0 // indirect
 	github.com/aws/aws-sdk-go-v2 v1.16.4 // indirect
 	github.com/aws/aws-sdk-go-v2/config v1.15.9 // indirect
@@ -110,8 +112,12 @@ require (
 	github.com/mmcdole/gofeed v1.1.3 // indirect
 	github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf // indirect
 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+	github.com/open-policy-agent/opa v0.44.0 // indirect
 	github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
+	github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
+	github.com/tchap/go-patricia/v2 v2.3.1 // indirect
 	github.com/xanzy/go-gitlab v0.68.0 // indirect
+	github.com/yashtewari/glob-intersection v0.1.0 // indirect
 	go.uber.org/goleak v1.1.12 // indirect
 )
 
@@ -131,7 +137,7 @@ require (
 	github.com/Masterminds/sprig/v3 v3.2.2 // indirect
 	github.com/Masterminds/squirrel v1.5.3 // indirect
 	github.com/Microsoft/go-winio v0.5.2 // indirect
-	github.com/Microsoft/hcsshim v0.9.3 // indirect
+	github.com/Microsoft/hcsshim v0.9.4 // indirect
 	github.com/PuerkitoBio/purell v1.1.1 // indirect
 	github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
 	github.com/apex/log v1.9.0 // indirect
@@ -146,7 +152,7 @@ require (
 	github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
 	github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect
 	github.com/containerd/cgroups v1.0.3 // indirect
-	github.com/containerd/containerd v1.6.6 // indirect
+	github.com/containerd/containerd v1.6.8 // indirect
 	github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect
 	github.com/cyphar/filepath-securejoin v0.2.3 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
@@ -248,10 +254,10 @@ require (
 	github.com/pelletier/go-toml v1.9.5 // indirect
 	github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/prometheus/client_golang v1.12.2 // indirect
+	github.com/prometheus/client_golang v1.13.0 // indirect
 	github.com/prometheus/client_model v0.2.0 // indirect
-	github.com/prometheus/common v0.35.0 // indirect
-	github.com/prometheus/procfs v0.7.3 // indirect
+	github.com/prometheus/common v0.37.0 // indirect
+	github.com/prometheus/procfs v0.8.0 // indirect
 	github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 // indirect
 	github.com/rivo/uniseg v0.2.0 // indirect
 	github.com/rubenv/sql-migrate v1.1.2 // indirect
@@ -261,7 +267,7 @@ require (
 	github.com/sendgrid/rest v2.6.3+incompatible // indirect
 	github.com/sergi/go-diff v1.2.0 // indirect
 	github.com/shopspring/decimal v1.3.1 // indirect
-	github.com/sirupsen/logrus v1.8.1 // indirect
+	github.com/sirupsen/logrus v1.9.0 // indirect
 	github.com/spf13/afero v1.6.0 // indirect
 	github.com/spf13/cast v1.5.0 // indirect
 	github.com/spf13/jwalterweatherman v1.1.0 // indirect
@@ -278,7 +284,7 @@ require (
 	go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect
 	golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
 	golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
-	golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b // indirect
+	golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
 	golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
 	golang.org/x/text v0.3.7 // indirect
 	golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect

+ 33 - 0
go.sum

@@ -190,6 +190,8 @@ github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQN
 github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
 github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo=
 github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I=
+github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
 github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
 github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
 github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -197,6 +199,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
 github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw=
 github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
+github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
 github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
 github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
 github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE=
@@ -213,6 +217,8 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:H
 github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
 github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
 github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
+github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
+github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
 github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
 github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
 github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
@@ -248,6 +254,7 @@ github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3st
 github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
 github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
 github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@@ -478,6 +485,8 @@ github.com/containerd/containerd v1.6.3 h1:JfgUEIAH07xDWk6kqz0P3ArZt+KJ9YeihSC9u
 github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig=
 github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0=
 github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0=
+github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs=
+github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0=
 github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
 github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
 github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -604,6 +613,7 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn
 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
 github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
 github.com/digitalocean/godo v1.75.0 h1:UijUv60I095CqJqGKdjY2RTPnnIa4iFddmq+1wfyS4Y=
 github.com/digitalocean/godo v1.75.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs=
 github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
@@ -1611,6 +1621,8 @@ github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl
 github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
 github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
 github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/open-policy-agent/opa v0.44.0 h1:sEZthsrWBqIN+ShTMJ0Hcz6a3GkYsY4FaB2S/ou2hZk=
+github.com/open-policy-agent/opa v0.44.0/go.mod h1:YpJaFIk5pq89n/k72c1lVvfvR5uopdJft2tMg1CW/yU=
 github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -1710,6 +1722,8 @@ github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVD
 github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
 github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
 github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
+github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
 github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -1732,6 +1746,8 @@ github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuI
 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
 github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE=
 github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
 github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -1746,6 +1762,8 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
 github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
 github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
 github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
 github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA=
 github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
@@ -1757,6 +1775,8 @@ github.com/quasilyte/go-ruleguard/dsl v0.3.10/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQ
 github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc=
 github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50=
 github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
 github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
 github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 h1:xe+mmCnDN82KhC010l3NfYlA8ZbOuzbXAzSYBa6wbMc=
 github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8/go.mod h1:WIfMkQNY+oq/mWwtsjOYHIZBuwthioY2srOmljJkTnk=
@@ -1840,6 +1860,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf
 github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
 github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
 github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
 github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
@@ -1926,7 +1948,10 @@ github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg
 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=
 github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
+github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
 github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
 github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
 github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
@@ -2013,6 +2038,8 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
 github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
 github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
 github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
+github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg=
+github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
 github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc=
 github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
 github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
@@ -2475,6 +2502,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbuf
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8=
 golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -2807,6 +2836,8 @@ google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8=
 google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
 google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
 google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
+google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -2823,6 +2854,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
 google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
 google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
 gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

+ 41 - 0
internal/models/monitor.go

@@ -0,0 +1,41 @@
+package models
+
+import (
+	"time"
+
+	"github.com/porter-dev/porter/api/types"
+	"gorm.io/gorm"
+)
+
+type MonitorTestResult struct {
+	gorm.Model
+
+	ProjectID uint
+	ClusterID uint
+	Category  string
+	ObjectID  string
+
+	LastStatusChange *time.Time
+	LastTested       *time.Time
+	LastRunResult    string
+
+	Title   string
+	Message string
+
+	Severity string
+}
+
+func (m *MonitorTestResult) ToMonitorTestResultType() *types.MonitorTestResult {
+	return &types.MonitorTestResult{
+		ProjectID:        m.ProjectID,
+		ClusterID:        m.ClusterID,
+		Category:         m.Category,
+		ObjectID:         m.ObjectID,
+		LastStatusChange: m.LastStatusChange,
+		LastTested:       m.LastTested,
+		LastRunResult:    types.MonitorTestStatus(m.LastRunResult),
+		Title:            m.Title,
+		Message:          m.Message,
+		Severity:         types.MonitorTestSeverity(m.Severity),
+	}
+}

+ 48 - 0
internal/opa/config.yaml

@@ -0,0 +1,48 @@
+web:
+  kind: "helm_release"
+  match:
+    chart_name: "web"
+  policies:
+  - path: "./policies/web/web_version.rego"
+    name: "web.version"
+nginx:
+  kind: "helm_release"
+  match:
+    name: nginx-ingress
+    namespace: ingress-nginx
+  policies:
+  - path: "./policies/nginx/nginx_version.rego"
+    name: "nginx.version"
+  - path: "./policies/nginx/nginx_topology_spread_constraints.rego"
+    name: "nginx.topology_spread_constraints"
+  - path: "./policies/nginx/memory_limits.rego"
+    name: "nginx.memory_limits"
+  - path: "./policies/nginx/wait_shutdown.rego"
+    name: "nginx.wait_shutdown"
+prometheus:
+  kind: "helm_release"
+  match:
+    name: prometheus
+    namespace: monitoring
+  policies:
+  - path: "./policies/prometheus/server_memory_limits.rego"
+    name: "prometheus.server_memory_limits"
+  - path: "./policies/prometheus/alertmanager_memory_limits.rego"
+    name: "prometheus.alertmanager_memory_limits"
+  - path: "./policies/prometheus/kubestatemetrics_memory_limits.rego"
+    name: "prometheus.kubestatemetrics_memory_limits"
+  - path: "./policies/prometheus/pushgateway_memory_limits.rego"
+    name: "prometheus.pushgateway_memory_limits"
+  - path: "./policies/prometheus/nodeexporter_memory_limits.rego"
+    name: "prometheus.nodeexporter_memory_limits"
+nginx_pod:
+  kind: "pod"
+  match:
+    namespace: ingress-nginx
+    labels:
+      app.kubernetes.io/component: "controller"
+      app.kubernetes.io/instance: "nginx-ingress"
+      app.kubernetes.io/name: "ingress-nginx"
+  policies:
+  - path: "./policies/pod/running.rego"
+    name: "pod.running"

+ 78 - 0
internal/opa/loader.go

@@ -0,0 +1,78 @@
+package opa
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"path/filepath"
+
+	"github.com/open-policy-agent/opa/rego"
+	"sigs.k8s.io/yaml"
+)
+
+type ConfigFile map[string]ConfigFilePolicyCollection
+
+type ConfigFilePolicyCollection struct {
+	Kind     string             `yaml:"kind"`
+	Match    MatchParameters    `yaml:"match"`
+	Policies []ConfigFilePolicy `yaml:"policies"`
+}
+
+type ConfigFilePolicy struct {
+	Path string
+	Name string
+}
+
+func LoadPolicies(configFilePathDir string) (*KubernetesPolicies, error) {
+	// read and parse the config file
+	fileBytes, err := ioutil.ReadFile(filepath.Join(configFilePathDir, "config.yaml"))
+
+	if err != nil {
+		return nil, err
+	}
+
+	configFile := make(map[string]ConfigFilePolicyCollection)
+
+	err = yaml.Unmarshal(fileBytes, &configFile)
+
+	if err != nil {
+		return nil, err
+	}
+
+	// load each map entry
+	policies := make(map[string]KubernetesOPAQueryCollection)
+
+	for name, cfPolicyCollection := range configFile {
+		queries := make([]rego.PreparedEvalQuery, 0)
+
+		for _, cfPolicy := range cfPolicyCollection.Policies {
+			fileBytes, err := ioutil.ReadFile(filepath.Join(configFilePathDir, cfPolicy.Path))
+
+			if err != nil {
+				return nil, err
+			}
+
+			query, err := rego.New(
+				rego.Query(fmt.Sprintf("data.%s", cfPolicy.Name)),
+				rego.Module(cfPolicy.Name, string(fileBytes)),
+			).PrepareForEval(context.Background())
+
+			if err != nil {
+				// Handle error.
+				return nil, err
+			}
+
+			queries = append(queries, query)
+		}
+
+		policies[name] = KubernetesOPAQueryCollection{
+			Kind:    KubernetesBuiltInKind(cfPolicyCollection.Kind),
+			Queries: queries,
+			Match:   cfPolicyCollection.Match,
+		}
+	}
+
+	return &KubernetesPolicies{
+		Policies: policies,
+	}, nil
+}

+ 221 - 0
internal/opa/opa.go

@@ -0,0 +1,221 @@
+package opa
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/mitchellh/mapstructure"
+	"github.com/open-policy-agent/opa/rego"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/helm"
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/pkg/logger"
+	"helm.sh/helm/v3/pkg/release"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+type KubernetesPolicies struct {
+	Policies map[string]KubernetesOPAQueryCollection
+}
+
+type KubernetesOPARunner struct {
+	*KubernetesPolicies
+
+	k8sAgent *kubernetes.Agent
+}
+
+type KubernetesBuiltInKind string
+
+const (
+	HelmRelease KubernetesBuiltInKind = "helm_release"
+	Pod         KubernetesBuiltInKind = "pod"
+)
+
+type KubernetesOPAQueryCollection struct {
+	Kind    KubernetesBuiltInKind
+	Match   MatchParameters
+	Queries []rego.PreparedEvalQuery
+}
+
+type MatchParameters struct {
+	Name      string `json:"name"`
+	Namespace string `json:"namespace"`
+
+	ChartName string `json:"chart_name"`
+
+	Labels map[string]string `json:"labels"`
+}
+
+type OPARecommenderQueryResult struct {
+	Allow bool `mapstructure:"Allow"`
+
+	CategoryName string
+	ObjectID     string
+
+	PolicyVersion  string `mapstructure:"POLICY_VERSION"`
+	PolicySeverity string `mapstructure:"POLICY_SEVERITY"`
+	PolicyTitle    string `mapstructure:"POLICY_TITLE"`
+	PolicyMessage  string `mapstructure:"POLICY_MESSAGE"`
+}
+
+func NewRunner(policies *KubernetesPolicies, k8sAgent *kubernetes.Agent) *KubernetesOPARunner {
+	return &KubernetesOPARunner{policies, k8sAgent}
+}
+
+func (runner *KubernetesOPARunner) GetRecommendationsByName(name string) ([]*OPARecommenderQueryResult, error) {
+	// look up to determine if the name is registered
+	queryCollection, exists := runner.Policies[name]
+
+	if !exists {
+		return nil, fmt.Errorf("No policies for %s found", name)
+	}
+
+	switch queryCollection.Kind {
+	case HelmRelease:
+		return runner.runHelmReleaseQueries(name, queryCollection)
+	case Pod:
+		return runner.runPodQueries(name, queryCollection)
+	default:
+		return nil, fmt.Errorf("Not a supported query kind")
+	}
+}
+
+func (runner *KubernetesOPARunner) SetK8sAgent(k8sAgent *kubernetes.Agent) {
+	runner.k8sAgent = k8sAgent
+}
+
+func (runner *KubernetesOPARunner) runHelmReleaseQueries(name string, collection KubernetesOPAQueryCollection) ([]*OPARecommenderQueryResult, error) {
+	res := make([]*OPARecommenderQueryResult, 0)
+
+	helmAgent, err := helm.GetAgentFromK8sAgent("secret", collection.Match.Namespace, logger.New(false, os.Stdout), runner.k8sAgent)
+
+	if err != nil {
+		return nil, err
+	}
+
+	// get the matching helm release(s) based on the match
+	var helmReleases []*release.Release
+
+	if collection.Match.Name != "" {
+		helmRelease, err := helmAgent.GetRelease(collection.Match.Name, 0, false)
+
+		if err != nil {
+			return nil, err
+		}
+
+		helmReleases = append(helmReleases, helmRelease)
+	} else if collection.Match.ChartName != "" {
+		prefilterReleases, err := helmAgent.ListReleases(collection.Match.Namespace, &types.ReleaseListFilter{
+			ByDate: true,
+			StatusFilter: []string{
+				"deployed",
+				"pending",
+				"pending-install",
+				"pending-upgrade",
+				"pending-rollback",
+				"failed",
+			},
+		})
+
+		if err != nil {
+			return nil, err
+		}
+
+		for _, prefilterRelease := range prefilterReleases {
+			if prefilterRelease.Chart.Name() == collection.Match.ChartName {
+				helmReleases = append(helmReleases, prefilterRelease)
+			}
+		}
+	} else {
+		return nil, fmt.Errorf("invalid match parameters")
+	}
+
+	for _, helmRelease := range helmReleases {
+		for _, query := range collection.Queries {
+			results, err := query.Eval(
+				context.Background(),
+				rego.EvalInput(map[string]interface{}{
+					"version": helmRelease.Chart.Metadata.Version,
+					"values":  helmRelease.Config,
+				}),
+			)
+
+			if err != nil {
+				return nil, err
+			}
+
+			if len(results) == 1 {
+				queryRes := &OPARecommenderQueryResult{
+					ObjectID:     fmt.Sprintf("helm_release/%s/%s", helmRelease.Namespace, helmRelease.Name),
+					CategoryName: name,
+				}
+
+				err = mapstructure.Decode(results[0].Expressions[0].Value, queryRes)
+
+				if err != nil {
+					return nil, err
+				}
+
+				res = append(res, queryRes)
+			}
+		}
+	}
+
+	return res, nil
+}
+
+func (runner *KubernetesOPARunner) runPodQueries(name string, collection KubernetesOPAQueryCollection) ([]*OPARecommenderQueryResult, error) {
+	res := make([]*OPARecommenderQueryResult, 0)
+
+	lselArr := make([]string, 0)
+
+	for k, v := range collection.Match.Labels {
+		lselArr = append(lselArr, fmt.Sprintf("%s=%s", k, v))
+	}
+
+	lsel := strings.Join(lselArr, ",")
+
+	pods, err := runner.k8sAgent.GetPodsByLabel(lsel, collection.Match.Namespace)
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, pod := range pods.Items {
+		unstructuredPod, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pod)
+
+		if err != nil {
+			return nil, err
+		}
+
+		for _, query := range collection.Queries {
+			results, err := query.Eval(
+				context.Background(),
+				rego.EvalInput(unstructuredPod),
+			)
+
+			if err != nil {
+				return nil, err
+			}
+
+			if len(results) == 1 {
+				queryRes := &OPARecommenderQueryResult{
+					ObjectID:     fmt.Sprintf("pod/%s/%s", pod.Namespace, pod.Name),
+					CategoryName: name,
+				}
+
+				err = mapstructure.Decode(results[0].Expressions[0].Value, queryRes)
+
+				if err != nil {
+					return nil, err
+				}
+
+				res = append(res, queryRes)
+			}
+		}
+	}
+
+	return res, nil
+}

+ 34 - 0
internal/opa/policies/nginx/memory_limits.rego

@@ -0,0 +1,34 @@
+package nginx.memory_limits
+
+import future.keywords.if
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# controller:
+#   resources:
+#     limits:
+#       cpu: 250m
+#       memory: 275Mi
+#     requests:
+#       cpu: 250m
+#       memory: 275Mi
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("NGINX ingress controller should have memory limits set", [])
+
+allow if {
+	input.values.controller.resources.limits.memory
+}
+
+POLICY_MESSAGE := sprintf("Success: NGINX ingress controller has memory limits set", []) if allow
+
+else := sprintf("Failed: NGINX ingress controller does not have memory limits set", []) {
+	true
+}

+ 44 - 0
internal/opa/policies/nginx/nginx_topology_spread_constraints.rego

@@ -0,0 +1,44 @@
+package nginx.topology_spread_constraints
+
+import future.keywords.if
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of topologySpreadConstraints as a soft constraint. We look
+# for Helm values of the form:
+# 
+# controller:
+#   topologySpreadConstraints:
+#     - labelSelector:
+#         matchLabels:
+#           app.kubernetes.io/component: controller
+#           app.kubernetes.io/instance: nginx-ingress
+#           app.kubernetes.io/name: ingress-nginx
+#       maxSkew: 1
+#       topologyKey: kubernetes.io/hostname
+#       whenUnsatisfiable: DoNotSchedule
+#     - labelSelector:
+#         matchLabels:
+#           app.kubernetes.io/component: controller
+#           app.kubernetes.io/instance: nginx-ingress
+#           app.kubernetes.io/name: ingress-nginx
+#       maxSkew: 1
+#       topologyKey: topology.kubernetes.io/zone
+#       whenUnsatisfiable: ScheduleAnyway
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("NGINX ingress controller should have topology spread constraints", [])
+
+allow if {
+	count(input.values.controller.topologySpreadConstraints) >= 1
+}
+
+POLICY_MESSAGE := sprintf("Success: NGINX ingress controller has topology spread constraints", []) if allow
+
+else := sprintf("Failed: NGINX ingress controller does not have topology spread constraints set", []) {
+	true
+}

+ 23 - 0
internal/opa/policies/nginx/nginx_version.rego

@@ -0,0 +1,23 @@
+package nginx.version
+
+import future.keywords.if
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+# TODO: set the actual latest stable version
+latest_stable_version := "0.4.18"
+
+POLICY_TITLE := sprintf("The NGINX version should be at least v%s", [latest_stable_version])
+
+trimmedVersion := trim_left(input.version, "v")
+
+# semver.compare returns -1 if latest_stable_version < trimmedVersion
+allow if semver.compare(latest_stable_version, trimmedVersion) == -1
+
+POLICY_MESSAGE := sprintf("Success: NGINX version is up-to-date", []) if allow
+
+else := sprintf("Failed: latest stable version is %s, but you are on %s", [latest_stable_version, trimmedVersion]) {
+	true
+}

+ 35 - 0
internal/opa/policies/nginx/wait_shutdown.rego

@@ -0,0 +1,35 @@
+package nginx.wait_shutdown
+
+import future.keywords.if
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the modification of the wait-shutdown script as a soft constraint. We look
+# for Helm values of the form:
+# 
+# controller:
+#   lifecycle:
+#     preStop:
+#       exec:
+#         command:
+#           - sh
+#           - '-c'
+#           - sleep 120 && /wait-shutdown
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("NGINX ingress controller should have a modified wait-shutdown script", [])
+
+allow if {
+	input.values.controller.lifecycle.preStop.exec.command
+	count(input.values.controller.lifecycle.preStop.exec.command) != 1
+}
+
+POLICY_MESSAGE := sprintf("Success: NGINX ingress controller has a properly modified wait-shutdown script set", []) if allow
+
+else := sprintf("Failed: NGINX ingress controller does not have a properly modified wait-shutdown script", []) {
+	true
+}

+ 46 - 0
internal/opa/policies/pod/running.rego

@@ -0,0 +1,46 @@
+package pod.running
+
+import future.keywords.contains
+import future.keywords.every
+import future.keywords.if
+import future.keywords.in
+
+# TODO: this file needs a lot of work to capture all pod statuses and container statuses. 
+# It currently only checks if a pod is in a "Running" status and if all containers are in
+# running status.
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Pod %s in namespace %s should be running", [input.metadata.name, input.metadata.namespace])
+
+allow if {
+	input.status.phase == "Running"
+	#   msg := sprintf("Pod '%s' is not running", [input.metadata.name])
+}
+
+allow if {
+	every containerStatus in input.status.containerStatuses {
+		containerStatus.state.running
+	}
+	#     msg := sprintf("Container '%s' in pod '%s' was not running", [containerStatus.state.running, input.metadata.name])
+
+}
+
+POLICY_MESSAGE := sprintf("Success: pod is running", []) if allow
+
+else := sprintf("Failed: the pod is not running", []) {
+	true
+}
+
+# TODO: REWORK SO THAT FAILURE MESSAGES WILL LOOK SOMETHING LIKE:
+# POLICY_SUCCESS_MESSAGE := "The pod is running successfully"
+# failure contains msg1 if {
+#   input.status.phase != "Running"
+#   msg1 := sprintf("Pod '%s' is not running", [input.metadata.name])
+# }
+# failure contains msg2 if {
+#   some containerStatus in input.status.containerStatuses 
+#   not containerStatus.state.running
+#   msg2 := sprintf("Container '%s' in pod '%s' is not running", [containerStatus.name, input.metadata.name])
+# }

+ 34 - 0
internal/opa/policies/prometheus/alertmanager_memory_limits.rego

@@ -0,0 +1,34 @@
+package prometheus.alertmanager_memory_limits
+
+import future.keywords.if
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# alertmanager:
+#   resources:
+#     limits:
+#       cpu: 200m
+#       memory: 256Mi
+#     requests:
+#       cpu: 10m
+#       memory: 256Mi
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Prometheus alert-manager should have memory limits set", [])
+
+allow if {
+	input.values.alertmanager.resources.limits.memory
+}
+
+POLICY_MESSAGE := sprintf("Success: Prometheus alert-manager has memory limits set", []) if allow
+
+else := sprintf("Failed: Prometheus alert-manager does not have memory limits set", []) {
+	true
+}

+ 34 - 0
internal/opa/policies/prometheus/kubestatemetrics_memory_limits.rego

@@ -0,0 +1,34 @@
+package prometheus.kubestatemetrics_memory_limits
+
+import future.keywords.if
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# kube-state-metrics:
+#   resources:
+#     limits:
+#       cpu: 200m
+#       memory: 256Mi
+#     requests:
+#       cpu: 10m
+#       memory: 256Mi
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Prometheus kube-state-metrics should have memory limits set", [])
+
+allow if {
+	input.values["kube-state-metrics"].resources.limits.memory
+}
+
+POLICY_MESSAGE := sprintf("Success: Prometheus kube-state-metrics has memory limits set", []) if allow
+
+else := sprintf("Failed: Prometheus kube-state-metrics does not have memory limits set", []) {
+	true
+}

+ 34 - 0
internal/opa/policies/prometheus/nodeexporter_memory_limits.rego

@@ -0,0 +1,34 @@
+package prometheus.nodeexporter_memory_limits
+
+import future.keywords.if
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# pushgateway:
+#   resources:
+#     limits:
+#       cpu: 200m
+#       memory: 256Mi
+#     requests:
+#       cpu: 10m
+#       memory: 256Mi
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Prometheus nodeExporter should have memory limits set", [])
+
+allow if {
+	input.values.nodeExporter.resources.limits.memory
+}
+
+POLICY_MESSAGE := sprintf("Success: Prometheus nodeExporter has memory limits set", []) if allow
+
+else := sprintf("Failed: Prometheus nodeExporter does not have memory limits set", []) {
+	true
+}

+ 34 - 0
internal/opa/policies/prometheus/pushgateway_memory_limits.rego

@@ -0,0 +1,34 @@
+package prometheus.pushgateway_memory_limits
+
+import future.keywords.if
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# pushgateway:
+#   resources:
+#     limits:
+#       cpu: 200m
+#       memory: 256Mi
+#     requests:
+#       cpu: 10m
+#       memory: 256Mi
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Prometheus pushgateway should have memory limits set", [])
+
+allow if {
+	input.values.pushgateway.resources.limits.memory
+}
+
+POLICY_MESSAGE := sprintf("Success: Prometheus pushgateway has memory limits set", []) if allow
+
+else := sprintf("Failed: Prometheus pushgateway does not have memory limits set", []) {
+	true
+}

+ 34 - 0
internal/opa/policies/prometheus/server_memory_limits.rego

@@ -0,0 +1,34 @@
+package prometheus.server_memory_limits
+
+import future.keywords.if
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# server:
+#   resources:
+#     limits:
+#       cpu: 500m
+#       memory: 400Mi
+#     requests:
+#       cpu: 100m
+#       memory: 400Mi
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Prometheus server should have memory limits set", [])
+
+allow if {
+	input.values.server.resources.limits.memory
+}
+
+POLICY_MESSAGE := sprintf("Success: Prometheus server has memory limits set", []) if allow
+
+else := sprintf("Failed: Prometheus server does not have memory limits set", []) {
+	true
+}

+ 23 - 0
internal/opa/policies/web/web_version.rego

@@ -0,0 +1,23 @@
+package web.version
+
+import future.keywords.if
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+# TODO: set the actual latest stable version
+latest_stable_version := "0.115.0"
+
+POLICY_TITLE := sprintf("The web version should be at least v%s", [latest_stable_version])
+
+trimmedVersion := trim_left(input.version, "v")
+
+# semver.compare returns -1 if latest_stable_version < trimmedVersion
+allow if semver.compare(latest_stable_version, trimmedVersion) == -1
+
+POLICY_MESSAGE := sprintf("Success: web version is up-to-date", []) if allow
+
+else := sprintf("Failed: latest stable version is %s, but you are on %s", [latest_stable_version, trimmedVersion]) {
+	true
+}

+ 1 - 0
internal/repository/gorm/migrate.go

@@ -56,6 +56,7 @@ func AutoMigrate(db *gorm.DB, debug bool) error {
 		&models.StackResource{},
 		&models.StackSourceConfig{},
 		&models.StackEnvGroup{},
+		&models.MonitorTestResult{},
 		&ints.KubeIntegration{},
 		&ints.BasicIntegration{},
 		&ints.OIDCIntegration{},

+ 44 - 0
internal/repository/gorm/monitor.go

@@ -0,0 +1,44 @@
+package gorm
+
+import (
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+	"gorm.io/gorm"
+)
+
+// MonitorTestResultRepository contains methods for querying MonitorTestResult models
+type MonitorTestResultRepository struct {
+	db *gorm.DB
+}
+
+// NewBuildEventRepository returns a BuildEventRepository which uses
+// gorm.DB for querying the database
+func NewMonitorTestResultRepository(db *gorm.DB) repository.MonitorTestResultRepository {
+	return &MonitorTestResultRepository{db}
+}
+
+func (m *MonitorTestResultRepository) CreateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error) {
+	if err := m.db.Create(monitor).Error; err != nil {
+		return nil, err
+	}
+
+	return monitor, nil
+}
+
+func (m *MonitorTestResultRepository) ReadMonitorTestResult(projectID, clusterID uint, operationID string) (*models.MonitorTestResult, error) {
+	res := &models.MonitorTestResult{}
+
+	if err := m.db.Where("project_id = ? AND cluster_id = ? AND operation_id = ?", projectID, clusterID, operationID).Find(res).Error; err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (m *MonitorTestResultRepository) UpdateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error) {
+	if err := m.db.Save(monitor).Error; err != nil {
+		return nil, err
+	}
+
+	return monitor, nil
+}

+ 6 - 0
internal/repository/gorm/repository.go

@@ -48,6 +48,7 @@ type GormRepository struct {
 	policy                    repository.PolicyRepository
 	tag                       repository.TagRepository
 	stack                     repository.StackRepository
+	monitor                   repository.MonitorTestResultRepository
 }
 
 func (t *GormRepository) User() repository.UserRepository {
@@ -214,6 +215,10 @@ func (t *GormRepository) Stack() repository.StackRepository {
 	return t.stack
 }
 
+func (t *GormRepository) MonitorTestResult() repository.MonitorTestResultRepository {
+	return t.monitor
+}
+
 // NewRepository returns a Repository which persists users in memory
 // and accepts a parameter that can trigger read/write errors
 func NewRepository(db *gorm.DB, key *[32]byte, storageBackend credentials.CredentialStorage) repository.Repository {
@@ -259,5 +264,6 @@ func NewRepository(db *gorm.DB, key *[32]byte, storageBackend credentials.Creden
 		policy:                    NewPolicyRepository(db),
 		tag:                       NewTagRepository(db),
 		stack:                     NewStackRepository(db),
+		monitor:                   NewMonitorTestResultRepository(db),
 	}
 }

+ 9 - 0
internal/repository/monitor.go

@@ -0,0 +1,9 @@
+package repository
+
+import "github.com/porter-dev/porter/internal/models"
+
+type MonitorTestResultRepository interface {
+	CreateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error)
+	ReadMonitorTestResult(projectID, clusterID uint, operationID string) (*models.MonitorTestResult, error)
+	UpdateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error)
+}

+ 1 - 0
internal/repository/repository.go

@@ -42,4 +42,5 @@ type Repository interface {
 	Policy() PolicyRepository
 	Tag() TagRepository
 	Stack() StackRepository
+	MonitorTestResult() MonitorTestResultRepository
 }

+ 24 - 0
internal/repository/test/monitor.go

@@ -0,0 +1,24 @@
+package test
+
+import (
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+)
+
+type MonitorTestResultRepository struct{}
+
+func NewMonitorTestResultRepository(canQuery bool) repository.MonitorTestResultRepository {
+	return &MonitorTestResultRepository{}
+}
+
+func (n *MonitorTestResultRepository) CreateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error) {
+	panic("not implemented") // TODO: Implement
+}
+
+func (n *MonitorTestResultRepository) ReadMonitorTestResult(projectID, clusterID uint, operationID string) (*models.MonitorTestResult, error) {
+	panic("not implemented") // TODO: Implement
+}
+
+func (n *MonitorTestResultRepository) UpdateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error) {
+	panic("not implemented") // TODO: Implement
+}

+ 6 - 0
internal/repository/test/repository.go

@@ -46,6 +46,7 @@ type TestRepository struct {
 	policy                    repository.PolicyRepository
 	tag                       repository.TagRepository
 	stack                     repository.StackRepository
+	monitor                   repository.MonitorTestResultRepository
 }
 
 func (t *TestRepository) User() repository.UserRepository {
@@ -212,6 +213,10 @@ func (t *TestRepository) Stack() repository.StackRepository {
 	return t.stack
 }
 
+func (t *TestRepository) MonitorTestResult() repository.MonitorTestResultRepository {
+	return t.monitor
+}
+
 // NewRepository returns a Repository which persists users in memory
 // and accepts a parameter that can trigger read/write errors
 func NewRepository(canQuery bool, failingMethods ...string) repository.Repository {
@@ -257,5 +262,6 @@ func NewRepository(canQuery bool, failingMethods ...string) repository.Repositor
 		policy:                    NewPolicyRepository(canQuery),
 		tag:                       NewTagRepository(),
 		stack:                     NewStackRepository(),
+		monitor:                   NewMonitorTestResultRepository(canQuery),
 	}
 }

+ 233 - 0
workers/jobs/recommender.go

@@ -0,0 +1,233 @@
+//go:build ee
+
+/*
+
+                            === NGINX Recommender Job ===
+
+This job checks an NGINX instance installed on a cluster and makes a recommendation.
+
+TODO: recommender alg details
+
+*/
+
+package jobs
+
+import (
+	"errors"
+	"fmt"
+	"log"
+	"time"
+
+	"github.com/mitchellh/mapstructure"
+	"github.com/porter-dev/porter/api/server/shared/config/env"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+
+	"github.com/porter-dev/porter/ee/integrations/vault"
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/oauth"
+	"github.com/porter-dev/porter/internal/opa"
+	"github.com/porter-dev/porter/internal/repository"
+	rcreds "github.com/porter-dev/porter/internal/repository/credentials"
+	rgorm "github.com/porter-dev/porter/internal/repository/gorm"
+	"golang.org/x/oauth2"
+	"gorm.io/gorm"
+)
+
+type recommender struct {
+	enqueueTime          time.Time
+	db                   *gorm.DB
+	repo                 repository.Repository
+	doConf               *oauth2.Config
+	projectID, clusterID uint
+	collectionName       string
+	policies             *opa.KubernetesPolicies
+}
+
+// HelmRevisionsCountTrackerOpts holds the options required to run this job
+type RecommenderOpts struct {
+	DBConf         *env.DBConf
+	DOClientID     string
+	DOClientSecret string
+	DOScopes       []string
+	ServerURL      string
+
+	Input map[string]interface{}
+}
+
+type recommenderInput struct {
+	ProjectID uint `form:"required" mapstructure:"project_id"`
+	ClusterID uint `form:"required" mapstructure:"cluster_id"`
+
+	CollectionName string `form:"required" mapstructure:"name"`
+}
+
+type Recommendation struct {
+	// ID         RecommendationID
+	Message   string
+	Automatic bool
+	// Severity   RecommendationSeverity
+	Warning    string
+	LastTested time.Time
+}
+
+func NewRecommender(
+	db *gorm.DB,
+	enqueueTime time.Time,
+	opts *RecommenderOpts,
+	opaPolicies *opa.KubernetesPolicies,
+) (*recommender, error) {
+	var credBackend rcreds.CredentialStorage
+
+	if opts.DBConf.VaultAPIKey != "" && opts.DBConf.VaultServerURL != "" && opts.DBConf.VaultPrefix != "" {
+		credBackend = vault.NewClient(
+			opts.DBConf.VaultServerURL,
+			opts.DBConf.VaultAPIKey,
+			opts.DBConf.VaultPrefix,
+		)
+	}
+
+	var key [32]byte
+
+	for i, b := range []byte(opts.DBConf.EncryptionKey) {
+		key[i] = b
+	}
+
+	repo := rgorm.NewRepository(db, &key, credBackend)
+
+	doConf := oauth.NewDigitalOceanClient(&oauth.Config{
+		ClientID:     opts.DOClientID,
+		ClientSecret: opts.DOClientSecret,
+		Scopes:       opts.DOScopes,
+		BaseURL:      opts.ServerURL,
+	})
+
+	// parse input
+	parsedInput := &recommenderInput{}
+	err := mapstructure.Decode(opts.Input, parsedInput)
+
+	if err != nil {
+		return nil, err
+	}
+
+	// validate
+	validator := requestutils.NewDefaultValidator()
+
+	if requestErr := validator.Validate(parsedInput); requestErr != nil {
+		return nil, fmt.Errorf(requestErr.Error())
+	}
+
+	return &recommender{
+		enqueueTime, db, repo, doConf, parsedInput.ProjectID, parsedInput.ClusterID, parsedInput.CollectionName, opaPolicies,
+	}, nil
+}
+
+func (n *recommender) ID() string {
+	return "recommender"
+}
+
+func (n *recommender) EnqueueTime() time.Time {
+	return n.enqueueTime
+}
+
+func (n *recommender) Run() error {
+	fmt.Println(n.projectID, n.clusterID)
+
+	cluster, err := n.repo.Cluster().ReadCluster(n.projectID, n.clusterID)
+
+	if err != nil {
+		log.Printf("error reading cluster ID %d: %v. skipping cluster ...", n.clusterID, err)
+		return err
+	}
+
+	k8sAgent, err := kubernetes.GetAgentOutOfClusterConfig(&kubernetes.OutOfClusterConfig{
+		Cluster:                   cluster,
+		Repo:                      n.repo,
+		DigitalOceanOAuth:         n.doConf,
+		AllowInClusterConnections: false,
+	})
+
+	if err != nil {
+		log.Printf("error getting k8s agent for cluster ID %d: %v. skipping cluster ...", n.clusterID, err)
+		return err
+	}
+
+	runner := opa.NewRunner(n.policies, k8sAgent)
+
+	queryResults, err := runner.GetRecommendationsByName(n.collectionName)
+
+	if err != nil {
+		log.Printf("error querying opa policies for cluster ID %d: %v. skipping cluster ...", n.clusterID, err)
+		return err
+	}
+
+	for _, queryRes := range queryResults {
+		fmt.Println(queryRes.ObjectID, queryRes.Allow, queryRes.PolicyTitle, queryRes.PolicyMessage)
+
+		monitor, err := n.repo.MonitorTestResult().ReadMonitorTestResult(n.projectID, n.clusterID, queryRes.ObjectID)
+
+		if err != nil {
+			if errors.Is(err, gorm.ErrRecordNotFound) {
+				monitor, err = n.repo.MonitorTestResult().CreateMonitorTestResult(n.getMonitorTestResultFromQueryResult(queryRes))
+			} else {
+				return err
+			}
+		} else {
+			monitor, err = n.repo.MonitorTestResult().UpdateMonitorTestResult(mergeMonitorTestResultFromQueryResult(monitor, queryRes))
+		}
+
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (n *recommender) getMonitorTestResultFromQueryResult(queryRes *opa.OPARecommenderQueryResult) *models.MonitorTestResult {
+	runResult := types.MonitorTestStatusSuccess
+
+	if !queryRes.Allow {
+		runResult = types.MonitorTestStatusFailed
+	}
+
+	currTime := time.Now()
+
+	return &models.MonitorTestResult{
+		ProjectID:        n.projectID,
+		ClusterID:        n.clusterID,
+		Category:         queryRes.CategoryName,
+		ObjectID:         queryRes.ObjectID,
+		LastStatusChange: &currTime,
+		LastTested:       &currTime,
+		LastRunResult:    string(runResult),
+		Title:            queryRes.PolicyTitle,
+		Message:          queryRes.PolicyMessage,
+		Severity:         queryRes.PolicySeverity,
+	}
+}
+
+func mergeMonitorTestResultFromQueryResult(monitor *models.MonitorTestResult, queryRes *opa.OPARecommenderQueryResult) *models.MonitorTestResult {
+	runResult := types.MonitorTestStatusSuccess
+
+	if !queryRes.Allow {
+		runResult = types.MonitorTestStatusFailed
+	}
+
+	currTime := time.Now()
+
+	if isStatusChange := monitor.LastRunResult == string(runResult); isStatusChange {
+		monitor.LastStatusChange = &currTime
+	}
+
+	monitor.LastTested = &currTime
+	monitor.LastRunResult = string(runResult)
+	monitor.Title = queryRes.PolicyTitle
+	monitor.Message = queryRes.PolicyMessage
+	monitor.Severity = queryRes.PolicySeverity
+
+	return monitor
+}
+
+func (n *recommender) SetData([]byte) {}

+ 0 - 358
workers/jobs/recommender_nginx_ingress.go

@@ -1,358 +0,0 @@
-//go:build ee
-
-/*
-
-                            === NGINX Recommender Job ===
-
-This job checks an NGINX instance installed on a cluster and makes a recommendation.
-
-TODO: recommender alg details
-
-*/
-
-package jobs
-
-import (
-	"context"
-	"fmt"
-	"log"
-	"os"
-	"strings"
-	"time"
-
-	"github.com/mitchellh/mapstructure"
-	"github.com/porter-dev/porter/api/server/shared/config/env"
-	"github.com/porter-dev/porter/api/server/shared/requestutils"
-	"github.com/porter-dev/porter/pkg/logger"
-	"k8s.io/apimachinery/pkg/api/resource"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-	"github.com/porter-dev/porter/ee/integrations/vault"
-	"github.com/porter-dev/porter/internal/helm"
-	"github.com/porter-dev/porter/internal/helm/grapher"
-	"github.com/porter-dev/porter/internal/kubernetes"
-	"github.com/porter-dev/porter/internal/models"
-	"github.com/porter-dev/porter/internal/oauth"
-	"github.com/porter-dev/porter/internal/repository"
-	rcreds "github.com/porter-dev/porter/internal/repository/credentials"
-	rgorm "github.com/porter-dev/porter/internal/repository/gorm"
-	"golang.org/x/oauth2"
-	"gorm.io/gorm"
-)
-
-type nginxRecommender struct {
-	enqueueTime          time.Time
-	db                   *gorm.DB
-	repo                 repository.Repository
-	doConf               *oauth2.Config
-	projectID, clusterID uint
-}
-
-// HelmRevisionsCountTrackerOpts holds the options required to run this job
-type NGINXRecommenderOpts struct {
-	DBConf         *env.DBConf
-	DOClientID     string
-	DOClientSecret string
-	DOScopes       []string
-	ServerURL      string
-
-	Input map[string]interface{}
-}
-
-type nginxRecommenderInput struct {
-	ProjectID uint `form:"required" mapstructure:"project_id"`
-	ClusterID uint `form:"required" mapstructure:"cluster_id"`
-}
-
-type RecommendationSeverity string
-
-const (
-	RecommendationSeverityUrgent RecommendationSeverity = "urgent"
-	RecommendationSeverityHigh   RecommendationSeverity = "high"
-	RecommendationSeverityLow    RecommendationSeverity = "low"
-)
-
-type RecommendationID string
-
-const (
-	RecommendationIDNginxIngressHPA                      RecommendationID = "nginx-ingress-hpa"
-	RecommendationIDNginxIngressTopologySpreadConstraint RecommendationID = "nginx-ingress-topology-spread-constraint"
-	RecommendationIDNginxIngressMemory                   RecommendationID = "nginx-ingress-memory-limit"
-	RecommendationIDNginxLifecycleHook                   RecommendationID = "nginx-ingress-lifecycle-hook"
-)
-
-type Recommendation struct {
-	ID         RecommendationID
-	Message    string
-	Automatic  bool
-	Severity   RecommendationSeverity
-	Warning    string
-	LastTested time.Time
-}
-
-func NewNGINXRecommender(
-	db *gorm.DB,
-	enqueueTime time.Time,
-	opts *NGINXRecommenderOpts,
-) (*nginxRecommender, error) {
-	var credBackend rcreds.CredentialStorage
-
-	if opts.DBConf.VaultAPIKey != "" && opts.DBConf.VaultServerURL != "" && opts.DBConf.VaultPrefix != "" {
-		credBackend = vault.NewClient(
-			opts.DBConf.VaultServerURL,
-			opts.DBConf.VaultAPIKey,
-			opts.DBConf.VaultPrefix,
-		)
-	}
-
-	var key [32]byte
-
-	for i, b := range []byte(opts.DBConf.EncryptionKey) {
-		key[i] = b
-	}
-
-	repo := rgorm.NewRepository(db, &key, credBackend)
-
-	doConf := oauth.NewDigitalOceanClient(&oauth.Config{
-		ClientID:     opts.DOClientID,
-		ClientSecret: opts.DOClientSecret,
-		Scopes:       opts.DOScopes,
-		BaseURL:      opts.ServerURL,
-	})
-
-	// parse input
-	parsedInput := &nginxRecommenderInput{}
-	err := mapstructure.Decode(opts.Input, parsedInput)
-
-	if err != nil {
-		return nil, err
-	}
-
-	// validate
-	validator := requestutils.NewDefaultValidator()
-
-	if requestErr := validator.Validate(parsedInput); requestErr != nil {
-		return nil, fmt.Errorf(requestErr.Error())
-	}
-
-	return &nginxRecommender{
-		enqueueTime, db, repo, doConf, parsedInput.ProjectID, parsedInput.ClusterID,
-	}, nil
-}
-
-func (n *nginxRecommender) ID() string {
-	return "nginx-recommender"
-}
-
-func (n *nginxRecommender) EnqueueTime() time.Time {
-	return n.enqueueTime
-}
-
-func (n *nginxRecommender) Run() error {
-	fmt.Println(n.projectID, n.clusterID)
-
-	cluster, err := n.repo.Cluster().ReadCluster(n.projectID, n.clusterID)
-
-	if err != nil {
-		log.Printf("error reading cluster ID %d: %v. skipping cluster ...", n.clusterID, err)
-		return err
-	}
-
-	k8sAgent, err := kubernetes.GetAgentOutOfClusterConfig(&kubernetes.OutOfClusterConfig{
-		Cluster:                   cluster,
-		Repo:                      n.repo,
-		DigitalOceanOAuth:         n.doConf,
-		AllowInClusterConnections: false,
-	})
-
-	if err != nil {
-		log.Printf("error getting k8s agent for cluster ID %d: %v. skipping cluster ...", n.clusterID, err)
-		return err
-	}
-
-	helmAgent, err := helm.GetAgentOutOfClusterConfig(&helm.Form{
-		Cluster:                   cluster,
-		Namespace:                 "ingress-nginx",
-		Repo:                      n.repo,
-		DigitalOceanOAuth:         n.doConf,
-		AllowInClusterConnections: false,
-	}, logger.New(true, os.Stdout))
-
-	if err != nil {
-		log.Printf("error getting helm agent for cluster ID %d: %v. skipping cluster ...", n.clusterID, err)
-		return err
-	}
-
-	// read the nginx ingress helm release
-	nginxIngressRelease, err := helmAgent.GetRelease("nginx-ingress", 0, false)
-
-	if err != nil {
-		log.Printf("could not get nginx-ingress for cluster ID %d: %v. skipping cluster ...", n.clusterID, err)
-		return err
-	}
-
-	// parse the manifests for the deployment name
-	multiArr := grapher.ImportMultiDocYAML([]byte(nginxIngressRelease.Manifest))
-
-	grapherObj := grapher.ParseObjs(multiArr, "ingress-nginx")
-
-	recs := generateRecommendations(k8sAgent, cluster, grapherObj)
-
-	for _, rec := range recs {
-		fmt.Println(rec.ID, rec.Message)
-	}
-
-	return nil
-}
-
-func generateRecommendations(k8sAgent *kubernetes.Agent, cluster *models.Cluster, grapherObj []grapher.Object) []*Recommendation {
-	res := make([]*Recommendation, 0)
-
-	if hpaRec := generateHPARecommendation(grapherObj); hpaRec != nil {
-		res = append(res, hpaRec)
-	}
-
-	if tscRec := generateTopologySpreadConstraintRecommendation(k8sAgent, grapherObj); tscRec != nil {
-		res = append(res, tscRec)
-	}
-
-	if memRec := generateMemoryLimitRecommendation(k8sAgent, grapherObj); memRec != nil {
-		res = append(res, memRec)
-	}
-
-	if lhRec := generateLifecycleHookRecommendation(k8sAgent, cluster, grapherObj); lhRec != nil {
-		res = append(res, lhRec)
-	}
-
-	return res
-}
-
-func generateHPARecommendation(grapherObj []grapher.Object) *Recommendation {
-	// check if a horizontal pod autoscaler has been enabled
-	isEnabled := false
-
-	for _, obj := range grapherObj {
-		if strings.ToLower(obj.Kind) == "horizontalpodautoscaler" {
-			isEnabled = true
-		}
-	}
-
-	// if not enabled, return recommendation
-	if !isEnabled {
-		return &Recommendation{
-			Severity:  RecommendationSeverityLow,
-			ID:        "nginx-ingress-hpa",
-			Message:   "Horizontal pod autoscaling should be enabled on the NGINX ingress controller, which allows for the proxy to scale during load.",
-			Automatic: true,
-		}
-	}
-
-	return nil
-}
-
-func generateTopologySpreadConstraintRecommendation(k8sAgent *kubernetes.Agent, grapherObj []grapher.Object) *Recommendation {
-	for _, obj := range grapherObj {
-		if strings.ToLower(obj.Kind) == "deployment" {
-			// query the live deployment
-			depl, err := k8sAgent.Clientset.AppsV1().Deployments(obj.Namespace).Get(context.Background(), obj.Name, v1.GetOptions{})
-
-			if err != nil {
-				continue
-			}
-
-			// make sure deployment is a controller type
-			if compLabel, exists := depl.Labels["app.kubernetes.io/component"]; exists && compLabel == "controller" {
-				// check if the pod has a topology spread constraint set
-				if len(depl.Spec.Template.Spec.TopologySpreadConstraints) == 0 {
-					return &Recommendation{
-						Severity:  RecommendationSeverityLow,
-						ID:        RecommendationIDNginxIngressTopologySpreadConstraint,
-						Message:   "Topology spread constraints should be enabled on the NGINX deployment, which ensures that the NGINX instances are balanced across different zones and machines.",
-						Automatic: true,
-					}
-				}
-			}
-		}
-	}
-
-	return nil
-}
-
-func generateMemoryLimitRecommendation(k8sAgent *kubernetes.Agent, grapherObj []grapher.Object) *Recommendation {
-	for _, obj := range grapherObj {
-		if strings.ToLower(obj.Kind) == "deployment" {
-			// query the live deployment
-			depl, err := k8sAgent.Clientset.AppsV1().Deployments(obj.Namespace).Get(context.Background(), obj.Name, v1.GetOptions{})
-
-			if err != nil {
-				continue
-			}
-
-			// make sure deployment is a controller type
-			if compLabel, exists := depl.Labels["app.kubernetes.io/component"]; exists && compLabel == "controller" {
-				// make sure the controller container has memory limits set
-				for _, container := range depl.Spec.Template.Spec.Containers {
-					if container.Name == "controller" {
-						if mem := container.Resources.Limits.Memory(); mem == nil || resource.NewQuantity(0, resource.BinarySI).Equal(*mem) {
-							return &Recommendation{
-								Severity:  RecommendationSeverityHigh,
-								ID:        RecommendationIDNginxIngressMemory,
-								Message:   "Memory limits should be enabled for the NGINX instance.",
-								Automatic: true,
-							}
-						}
-					}
-				}
-			}
-		}
-	}
-
-	return nil
-}
-
-func generateLifecycleHookRecommendation(k8sAgent *kubernetes.Agent, cluster *models.Cluster, grapherObj []grapher.Object) *Recommendation {
-	// only generate this recommendation for EKS clusters
-	if cluster.AWSIntegrationID == 0 {
-		return nil
-	}
-
-	rec := &Recommendation{
-		Severity:  RecommendationSeverityLow,
-		ID:        RecommendationIDNginxLifecycleHook,
-		Message:   "Lifecycle hook should be modified to sleep for 2 minutes before NGINX ingress termination, to allow for AWS load balancers to update targets.",
-		Automatic: true,
-	}
-
-	for _, obj := range grapherObj {
-		if strings.ToLower(obj.Kind) == "deployment" {
-			// query the live deployment
-			depl, err := k8sAgent.Clientset.AppsV1().Deployments(obj.Namespace).Get(context.Background(), obj.Name, v1.GetOptions{})
-
-			if err != nil {
-				continue
-			}
-
-			// make sure deployment is a controller type
-			if compLabel, exists := depl.Labels["app.kubernetes.io/component"]; exists && compLabel == "controller" {
-				// make sure the controller container has memory limits set
-				for _, container := range depl.Spec.Template.Spec.Containers {
-					if container.Name != "controller" {
-						continue
-					}
-
-					if container.Lifecycle == nil || container.Lifecycle.PreStop == nil || container.Lifecycle.PreStop.Exec == nil {
-						return rec
-					}
-
-					if len(container.Lifecycle.PreStop.Exec.Command) == 0 || container.Lifecycle.PreStop.Exec.Command[0] == "/wait-shutdown" {
-						return rec
-					}
-				}
-			}
-		}
-	}
-
-	return nil
-}
-
-func (n *nginxRecommender) SetData([]byte) {}

+ 41 - 7
workers/main.go

@@ -18,15 +18,23 @@ import (
 	"github.com/joeshaw/envdecode"
 	"github.com/porter-dev/porter/api/server/shared/config/env"
 	"github.com/porter-dev/porter/internal/adapter"
+	"github.com/porter-dev/porter/internal/opa"
+	"github.com/porter-dev/porter/internal/repository"
 	"github.com/porter-dev/porter/internal/worker"
 	"github.com/porter-dev/porter/workers/jobs"
 	"gorm.io/gorm"
+
+	"github.com/porter-dev/porter/ee/integrations/vault"
+	rcreds "github.com/porter-dev/porter/internal/repository/credentials"
+	pgorm "github.com/porter-dev/porter/internal/repository/gorm"
 )
 
 var (
-	jobQueue   chan worker.Job
-	envDecoder = EnvConf{}
-	dbConn     *gorm.DB
+	jobQueue    chan worker.Job
+	envDecoder  = EnvConf{}
+	dbConn      *gorm.DB
+	repo        repository.Repository
+	opaPolicies *opa.KubernetesPolicies
 )
 
 // EnvConf holds the environment variables for this binary
@@ -43,6 +51,8 @@ type EnvConf struct {
 	S3BucketName       string `env:"S3_BUCKET_NAME"`
 	EncryptionKey      string `env:"S3_ENCRYPTION_KEY"`
 
+	OPAConfigFileDir string `env:"OPA_CONFIG_FILE_DIR,default=./internal/opa"`
+
 	Port uint `env:"PORT,default=3000"`
 }
 
@@ -62,6 +72,30 @@ func main() {
 
 	dbConn = db
 
+	var credBackend rcreds.CredentialStorage
+
+	if envDecoder.DBConf.VaultAPIKey != "" && envDecoder.DBConf.VaultServerURL != "" && envDecoder.DBConf.VaultPrefix != "" {
+		credBackend = vault.NewClient(
+			envDecoder.DBConf.VaultServerURL,
+			envDecoder.DBConf.VaultAPIKey,
+			envDecoder.DBConf.VaultPrefix,
+		)
+	}
+
+	var key [32]byte
+
+	for i, b := range []byte(envDecoder.DBConf.EncryptionKey) {
+		key[i] = b
+	}
+
+	repo = pgorm.NewRepository(db, &key, credBackend)
+
+	opaPolicies, err = opa.LoadPolicies(envDecoder.OPAConfigFileDir)
+
+	if err != nil {
+		log.Fatalln(err)
+	}
+
 	jobQueue = make(chan worker.Job, envDecoder.MaxQueue)
 	d := worker.NewDispatcher(int(envDecoder.MaxWorkers))
 
@@ -174,18 +208,18 @@ func getJob(id string, input map[string]interface{}) worker.Job {
 		}
 
 		return newJob
-	} else if id == "nginx-recommender" {
-		newJob, err := jobs.NewNGINXRecommender(dbConn, time.Now().UTC(), &jobs.NGINXRecommenderOpts{
+	} else if id == "recommender" {
+		newJob, err := jobs.NewRecommender(dbConn, time.Now().UTC(), &jobs.RecommenderOpts{
 			DBConf:         &envDecoder.DBConf,
 			DOClientID:     envDecoder.DOClientID,
 			DOClientSecret: envDecoder.DOClientSecret,
 			DOScopes:       []string{"read", "write"},
 			ServerURL:      envDecoder.ServerURL,
 			Input:          input,
-		})
+		}, opaPolicies)
 
 		if err != nil {
-			log.Printf("error creating job with ID: nginx-recommender. Error: %v", err)
+			log.Printf("error creating job with ID: recommender. Error: %v", err)
 			return nil
 		}