Просмотр исходного кода

Merge pull request #2366 from porter-dev/belanger/recommender-system-2

Initial recommender system for Helm chart versions and values
abelanger5 3 лет назад
Родитель
Сommit
287baf8fa6
38 измененных файлов с 1821 добавлено и 16 удалено
  1. 48 0
      .air.worker.toml
  2. 10 1
      .github/workflows/dev.yaml
  3. 3 0
      Makefile
  4. 35 0
      api/types/monitor.go
  5. 15 9
      go.mod
  6. 33 0
      go.sum
  7. 64 0
      internal/models/monitor.go
  8. 119 0
      internal/opa/config.yaml
  9. 80 0
      internal/opa/loader.go
  10. 365 0
      internal/opa/opa.go
  11. 32 0
      internal/opa/policies/cert-manager/cainjector_memory_limits.rego
  12. 25 0
      internal/opa/policies/cert-manager/cert_manager_version.rego
  13. 32 0
      internal/opa/policies/cert-manager/controller_memory_limits.rego
  14. 33 0
      internal/opa/policies/cert-manager/webhook_memory_limits.rego
  15. 30 0
      internal/opa/policies/certificates/expiry_two_weeks.rego
  16. 37 0
      internal/opa/policies/nginx/memory_limits.rego
  17. 47 0
      internal/opa/policies/nginx/nginx_topology_spread_constraints.rego
  18. 25 0
      internal/opa/policies/nginx/nginx_version.rego
  19. 38 0
      internal/opa/policies/nginx/wait_shutdown.rego
  20. 38 0
      internal/opa/policies/pod/running.rego
  21. 37 0
      internal/opa/policies/prometheus/alertmanager_memory_limits.rego
  22. 37 0
      internal/opa/policies/prometheus/kubestatemetrics_memory_limits.rego
  23. 37 0
      internal/opa/policies/prometheus/nodeexporter_memory_limits.rego
  24. 25 0
      internal/opa/policies/prometheus/prometheus_version.rego
  25. 37 0
      internal/opa/policies/prometheus/pushgateway_memory_limits.rego
  26. 37 0
      internal/opa/policies/prometheus/server_memory_limits.rego
  27. 26 0
      internal/opa/policies/web/web_version.rego
  28. 1 0
      internal/repository/gorm/migrate.go
  29. 44 0
      internal/repository/gorm/monitor.go
  30. 6 0
      internal/repository/gorm/repository.go
  31. 9 0
      internal/repository/monitor.go
  32. 1 0
      internal/repository/repository.go
  33. 24 0
      internal/repository/test/monitor.go
  34. 6 0
      internal/repository/test/repository.go
  35. 14 0
      scripts/dev-environment/StartWorkerServer.sh
  36. 4 0
      workers/Dockerfile
  37. 300 0
      workers/jobs/recommender.go
  38. 67 6
      workers/main.go

+ 48 - 0
.air.worker.toml

@@ -0,0 +1,48 @@
+# Config file for [Air](https://github.com/cosmtrek/air) in TOML format
+
+# Working directory
+# . or absolute path, please note that the directories following must be under root.
+root = "."
+tmp_dir = "tmp"
+
+[build]
+# Just plain old shell command. You could use `make` as well.
+cmd = "go build -o ./tmp/workers -tags ee -ldflags=\"-X 'main.Version=dev-ee'\" ./workers"
+
+# Binary file yields from `cmd`.
+bin = "tmp/workers"
+# Customize binary.
+full_bin = "tmp/workers"
+# Watch these filename extensions.
+include_ext = ["go", "mod", "sum", "html"]
+# Ignore these filename extensions or directories.
+exclude_dir = ["tmp", "dashboard"]
+# Watch these directories if you specified.
+include_dir = []
+# Exclude files.
+exclude_file = []
+# This log file places in your tmp_dir.
+log = "air.log"
+# It's not necessary to trigger build each time file changes if it's too frequent.
+delay = 1000 # ms
+# Stop running old binary when build errors occur.
+stop_on_error = true
+# Send Interrupt signal before killing process (windows does not support this feature)
+send_interrupt = false
+# Delay after sending Interrupt signal
+kill_delay = 500 # ms
+
+[log]
+# Show log time
+time = false
+
+[color]
+# Customize each part's color. If no color found, use the raw app log.
+main = "magenta"
+watcher = "cyan"
+build = "yellow"
+runner = "green"
+
+[misc]
+# Delete tmp directory on exit
+clean_on_exit = true

+ 10 - 1
.github/workflows/dev.yaml

@@ -162,13 +162,22 @@ jobs:
           aws-access-key-id: ${{ secrets.ECR_DEV_AWS_ACCESS_KEY_ID }}
           aws-secret-access-key: ${{ secrets.ECR_DEV_AWS_ACCESS_SECRET_KEY }}
           aws-region: us-east-2
+      - name: Set up Cloud SDK
+        uses: google-github-actions/setup-gcloud@v0
+        with:
+          project_id: ${{ secrets.GCP_PROJECT_ID }}
+          service_account_key: ${{ secrets.GCP_SA_KEY }}
+          export_default_credentials: true
+      - name: Log in to gcloud CLI
+        run: gcloud auth configure-docker
       - name: Login to ECR
         id: login-ecr
         run: |
           aws ecr get-login-password --region us-east-2 | docker login --username AWS --password-stdin 801172602658.dkr.ecr.us-east-2.amazonaws.com
       - name: Build
         run: |
-          DOCKER_BUILDKIT=1 docker build . -t 801172602658.dkr.ecr.us-east-2.amazonaws.com/worker-pool:${{ steps.vars.outputs.sha_short }} -f ./workers/Dockerfile
+          DOCKER_BUILDKIT=1 docker build . -t 801172602658.dkr.ecr.us-east-2.amazonaws.com/worker-pool:${{ steps.vars.outputs.sha_short }} -t gcr.io/porter-dev-273614/worker-pool:dev -f ./workers/Dockerfile
       - name: Push to ECR
         run: |
           docker push 801172602658.dkr.ecr.us-east-2.amazonaws.com/worker-pool:${{ steps.vars.outputs.sha_short }}
+          docker push gcr.io/porter-dev-273614/worker-pool:dev

+ 3 - 0
Makefile

@@ -21,3 +21,6 @@ build-cli-dev:
 
 start-provisioner-dev: install setup-env-files
 	bash ./scripts/dev-environment/StartProvisionerServer.sh
+
+start-worker-dev: install setup-env-files
+	bash ./scripts/dev-environment/StartWorkerServer.sh

+ 35 - 0
api/types/monitor.go

@@ -0,0 +1,35 @@
+package types
+
+import "time"
+
+type MonitorTestStatus string
+
+const (
+	MonitorTestStatusSuccess MonitorTestStatus = "success"
+	MonitorTestStatusFailed  MonitorTestStatus = "failed"
+)
+
+type MonitorTestSeverity string
+
+const (
+	MonitorTestSeverityCritical MonitorTestSeverity = "critical"
+	MonitorTestSeverityHigh     MonitorTestSeverity = "high"
+	MonitorTestSeverityLow      MonitorTestSeverity = "low"
+)
+
+type MonitorTestResult struct {
+	ProjectID uint   `json:"project_id"`
+	ClusterID uint   `json:"cluster_id"`
+	Category  string `json:"category"`
+	ObjectID  string `json:"object_id"`
+
+	LastStatusChange *time.Time `json:"last_status_change"`
+
+	LastTested    *time.Time        `json:"last_tested"`
+	LastRunResult MonitorTestStatus `json:"last_run_result"`
+
+	Title   string `json:"title"`
+	Message string `json:"message"`
+
+	Severity MonitorTestSeverity `json:"severity"`
+}

+ 15 - 9
go.mod

@@ -51,8 +51,8 @@ require (
 	golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26
 	google.golang.org/api v0.88.0
 	google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03
-	google.golang.org/grpc v1.47.0
-	google.golang.org/protobuf v1.28.0
+	google.golang.org/grpc v1.49.0
+	google.golang.org/protobuf v1.28.1
 	gorm.io/driver/sqlite v1.1.3
 	gorm.io/gorm v1.22.3
 	helm.sh/helm/v3 v3.9.0
@@ -84,7 +84,9 @@ require (
 	github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
 	github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
 	github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 // indirect
+	github.com/OneOfOne/xxhash v1.2.8 // indirect
 	github.com/PuerkitoBio/goquery v1.5.1 // indirect
+	github.com/agnivade/levenshtein v1.1.1 // indirect
 	github.com/andybalholm/cascadia v1.1.0 // indirect
 	github.com/aws/aws-sdk-go-v2 v1.16.4 // indirect
 	github.com/aws/aws-sdk-go-v2/config v1.15.9 // indirect
@@ -114,8 +116,12 @@ require (
 	github.com/mmcdole/gofeed v1.1.3 // indirect
 	github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf // indirect
 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+	github.com/open-policy-agent/opa v0.44.0 // indirect
 	github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
+	github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
+	github.com/tchap/go-patricia/v2 v2.3.1 // indirect
 	github.com/xanzy/go-gitlab v0.68.0 // indirect
+	github.com/yashtewari/glob-intersection v0.1.0 // indirect
 	go.uber.org/goleak v1.1.12 // indirect
 )
 
@@ -135,7 +141,7 @@ require (
 	github.com/Masterminds/sprig/v3 v3.2.2 // indirect
 	github.com/Masterminds/squirrel v1.5.3 // indirect
 	github.com/Microsoft/go-winio v0.5.2 // indirect
-	github.com/Microsoft/hcsshim v0.9.3 // indirect
+	github.com/Microsoft/hcsshim v0.9.4 // indirect
 	github.com/PuerkitoBio/purell v1.1.1 // indirect
 	github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
 	github.com/apex/log v1.9.0 // indirect
@@ -150,7 +156,7 @@ require (
 	github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
 	github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect
 	github.com/containerd/cgroups v1.0.3 // indirect
-	github.com/containerd/containerd v1.6.6 // indirect
+	github.com/containerd/containerd v1.6.8 // indirect
 	github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect
 	github.com/cyphar/filepath-securejoin v0.2.3 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
@@ -252,10 +258,10 @@ require (
 	github.com/pelletier/go-toml v1.9.5 // indirect
 	github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/prometheus/client_golang v1.12.2 // indirect
+	github.com/prometheus/client_golang v1.13.0 // indirect
 	github.com/prometheus/client_model v0.2.0 // indirect
-	github.com/prometheus/common v0.35.0 // indirect
-	github.com/prometheus/procfs v0.7.3 // indirect
+	github.com/prometheus/common v0.37.0 // indirect
+	github.com/prometheus/procfs v0.8.0 // indirect
 	github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 // indirect
 	github.com/rivo/uniseg v0.2.0 // indirect
 	github.com/rubenv/sql-migrate v1.1.2 // indirect
@@ -265,7 +271,7 @@ require (
 	github.com/sendgrid/rest v2.6.3+incompatible // indirect
 	github.com/sergi/go-diff v1.2.0 // indirect
 	github.com/shopspring/decimal v1.3.1 // indirect
-	github.com/sirupsen/logrus v1.8.1 // indirect
+	github.com/sirupsen/logrus v1.9.0 // indirect
 	github.com/spf13/afero v1.6.0 // indirect
 	github.com/spf13/cast v1.5.0 // indirect
 	github.com/spf13/jwalterweatherman v1.1.0 // indirect
@@ -282,7 +288,7 @@ require (
 	go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect
 	golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
 	golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
-	golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b // indirect
+	golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
 	golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
 	golang.org/x/text v0.3.7 // indirect
 	golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect

+ 33 - 0
go.sum

@@ -207,6 +207,8 @@ github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQN
 github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
 github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo=
 github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I=
+github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
 github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
 github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
 github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -214,6 +216,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
 github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw=
 github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
+github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
 github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
 github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
 github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE=
@@ -230,6 +234,8 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:H
 github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
 github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
 github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
+github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
+github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
 github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
 github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
 github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
@@ -265,6 +271,7 @@ github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3st
 github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
 github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
 github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@@ -495,6 +502,8 @@ github.com/containerd/containerd v1.6.3 h1:JfgUEIAH07xDWk6kqz0P3ArZt+KJ9YeihSC9u
 github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig=
 github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0=
 github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0=
+github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs=
+github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0=
 github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
 github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
 github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -621,6 +630,7 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn
 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
 github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
 github.com/digitalocean/godo v1.75.0 h1:UijUv60I095CqJqGKdjY2RTPnnIa4iFddmq+1wfyS4Y=
 github.com/digitalocean/godo v1.75.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs=
 github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
@@ -1637,6 +1647,8 @@ github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl
 github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
 github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
 github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/open-policy-agent/opa v0.44.0 h1:sEZthsrWBqIN+ShTMJ0Hcz6a3GkYsY4FaB2S/ou2hZk=
+github.com/open-policy-agent/opa v0.44.0/go.mod h1:YpJaFIk5pq89n/k72c1lVvfvR5uopdJft2tMg1CW/yU=
 github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -1736,6 +1748,8 @@ github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVD
 github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
 github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
 github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
+github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
 github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -1758,6 +1772,8 @@ github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuI
 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
 github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE=
 github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
 github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -1772,6 +1788,8 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
 github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
 github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
 github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
 github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA=
 github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
@@ -1783,6 +1801,8 @@ github.com/quasilyte/go-ruleguard/dsl v0.3.10/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQ
 github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc=
 github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50=
 github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
 github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
 github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 h1:xe+mmCnDN82KhC010l3NfYlA8ZbOuzbXAzSYBa6wbMc=
 github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8/go.mod h1:WIfMkQNY+oq/mWwtsjOYHIZBuwthioY2srOmljJkTnk=
@@ -1866,6 +1886,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf
 github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
 github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
 github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
 github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
@@ -1952,7 +1974,10 @@ github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg
 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=
 github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
+github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
 github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
 github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
 github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
@@ -2039,6 +2064,8 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
 github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
 github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
 github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
+github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg=
+github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
 github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc=
 github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
 github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
@@ -2515,6 +2542,8 @@ golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8=
 golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -2885,6 +2914,8 @@ google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu
 google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
 google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
 google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
+google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -2901,6 +2932,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
 google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
 google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
 gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

+ 64 - 0
internal/models/monitor.go

@@ -0,0 +1,64 @@
+package models
+
+import (
+	"strings"
+	"time"
+
+	"github.com/porter-dev/porter/api/types"
+	"gorm.io/gorm"
+)
+
+type MonitorTestResult struct {
+	gorm.Model
+
+	ProjectID uint
+	ClusterID uint
+	Category  string
+	ObjectID  string
+
+	LastStatusChange  *time.Time
+	LastTested        *time.Time
+	LastRunResult     string
+	LastRunResultEnum uint
+
+	Title   string
+	Message string
+
+	Severity     string
+	SeverityEnum uint
+}
+
+func (m *MonitorTestResult) ToMonitorTestResultType() *types.MonitorTestResult {
+	return &types.MonitorTestResult{
+		ProjectID:        m.ProjectID,
+		ClusterID:        m.ClusterID,
+		Category:         m.Category,
+		ObjectID:         m.ObjectID,
+		LastStatusChange: m.LastStatusChange,
+		LastTested:       m.LastTested,
+		LastRunResult:    types.MonitorTestStatus(m.LastRunResult),
+		Title:            m.Title,
+		Message:          m.Message,
+		Severity:         types.MonitorTestSeverity(m.Severity),
+	}
+}
+
+func GetSeverityEnum(severity string) uint {
+	switch strings.ToLower(severity) {
+	case string(types.MonitorTestSeverityCritical):
+		return 2
+	case string(types.MonitorTestSeverityHigh):
+		return 1
+	default:
+		return 0
+	}
+}
+
+func GetLastRunResultEnum(lastRunResult string) uint {
+	switch strings.ToLower(lastRunResult) {
+	case string(types.MonitorTestStatusFailed):
+		return 1
+	default:
+		return 0
+	}
+}

+ 119 - 0
internal/opa/config.yaml

@@ -0,0 +1,119 @@
+web:
+  kind: "helm_release"
+  match:
+    chart_name: "web"
+  policies:
+  - path: "./policies/web/web_version.rego"
+    name: "web.version"
+nginx:
+  kind: "helm_release"
+  match:
+    name: nginx-ingress
+    namespace: ingress-nginx
+  mustExist: true
+  policies:
+  - path: "./policies/nginx/nginx_version.rego"
+    name: "nginx.version"
+  - path: "./policies/nginx/nginx_topology_spread_constraints.rego"
+    name: "nginx.topology_spread_constraints"
+  - path: "./policies/nginx/memory_limits.rego"
+    name: "nginx.memory_limits"
+  - path: "./policies/nginx/wait_shutdown.rego"
+    name: "nginx.wait_shutdown"
+cert-manager:
+  kind: "helm_release"
+  match:
+    name: cert-manager
+    namespace: cert-manager
+  mustExist: true
+  policies:
+  - path: "./policies/cert-manager/cert_manager_version.rego"
+    name: "cert_manager.version"
+  - path: "./policies/cert-manager/cainjector_memory_limits.rego"
+    name: "cert_manager.cainjector_memory_limits"
+  - path: "./policies/cert-manager/controller_memory_limits.rego"
+    name: "cert_manager.controller_memory_limits"
+  - path: "./policies/cert-manager/webhook_memory_limits.rego"
+    name: "cert_manager.webhook_memory_limits"
+prometheus:
+  kind: "helm_release"
+  match:
+    name: prometheus
+    namespace: monitoring
+  mustExist: true
+  policies:
+  - path: "./policies/prometheus/server_memory_limits.rego"
+    name: "prometheus.server_memory_limits"
+  - path: "./policies/prometheus/alertmanager_memory_limits.rego"
+    name: "prometheus.alertmanager_memory_limits"
+  - path: "./policies/prometheus/kubestatemetrics_memory_limits.rego"
+    name: "prometheus.kubestatemetrics_memory_limits"
+  - path: "./policies/prometheus/pushgateway_memory_limits.rego"
+    name: "prometheus.pushgateway_memory_limits"
+  - path: "./policies/prometheus/nodeexporter_memory_limits.rego"
+    name: "prometheus.nodeexporter_memory_limits"
+  - path: "./policies/prometheus/prometheus_version.rego"
+    name: "prometheus.version"
+nginx_pod:
+  kind: "pod"
+  match:
+    namespace: ingress-nginx
+    labels:
+      app.kubernetes.io/component: "controller"
+      app.kubernetes.io/instance: "nginx-ingress"
+      app.kubernetes.io/name: "ingress-nginx"
+  policies:
+  - path: "./policies/pod/running.rego"
+    name: "pod.running"
+prometheus_server_pod:
+  kind: "pod"
+  match:
+    namespace: monitoring
+    labels:
+      app: "prometheus"
+      component: "server"
+      release: "prometheus"
+  policies:
+  - path: "./policies/pod/running.rego"
+    name: "pod.running"
+prometheus_alertmanager_pod:
+  kind: "pod"
+  match:
+    namespace: monitoring
+    labels:
+      app: "prometheus"
+      component: "alertmanager"
+      release: "prometheus"
+  policies:
+  - path: "./policies/pod/running.rego"
+    name: "pod.running"
+porter_agent_pod:
+  kind: "pod"
+  match:
+    namespace: porter-agent-system
+    labels:
+      control-plane: "controller-manager"
+  policies:
+  - path: "./policies/pod/running.rego"
+    name: "pod.running"
+porter_agent_redis_pod:
+  kind: "pod"
+  match:
+    namespace: porter-agent-system
+    labels:
+      app.kubernetes.io/component: "master"
+      app.kubernetes.io/instance: "porter-agent"
+      app.kubernetes.io/managed-by: "Helm"
+      app.kubernetes.io/name: "redis"
+  policies:
+  - path: "./policies/pod/running.rego"
+    name: "pod.running"
+certificates:
+  kind: "crd_list"
+  match:
+    group: cert-manager.io
+    version: v1
+    resource: certificates
+  policies:
+  - path: "./policies/certificates/expiry_two_weeks.rego"
+    name: "certificates.expiry_two_weeks"

+ 80 - 0
internal/opa/loader.go

@@ -0,0 +1,80 @@
+package opa
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"path/filepath"
+
+	"github.com/open-policy-agent/opa/rego"
+	"sigs.k8s.io/yaml"
+)
+
+type ConfigFile map[string]ConfigFilePolicyCollection
+
+type ConfigFilePolicyCollection struct {
+	Kind      string             `yaml:"kind"`
+	Match     MatchParameters    `yaml:"match"`
+	MustExist bool               `yaml:"mustExist"`
+	Policies  []ConfigFilePolicy `yaml:"policies"`
+}
+
+type ConfigFilePolicy struct {
+	Path string
+	Name string
+}
+
+func LoadPolicies(configFilePathDir string) (*KubernetesPolicies, error) {
+	// read and parse the config file
+	fileBytes, err := ioutil.ReadFile(filepath.Join(configFilePathDir, "config.yaml"))
+
+	if err != nil {
+		return nil, err
+	}
+
+	configFile := make(map[string]ConfigFilePolicyCollection)
+
+	err = yaml.Unmarshal(fileBytes, &configFile)
+
+	if err != nil {
+		return nil, err
+	}
+
+	// load each map entry
+	policies := make(map[string]KubernetesOPAQueryCollection)
+
+	for name, cfPolicyCollection := range configFile {
+		queries := make([]rego.PreparedEvalQuery, 0)
+
+		for _, cfPolicy := range cfPolicyCollection.Policies {
+			fileBytes, err := ioutil.ReadFile(filepath.Join(configFilePathDir, cfPolicy.Path))
+
+			if err != nil {
+				return nil, err
+			}
+
+			query, err := rego.New(
+				rego.Query(fmt.Sprintf("data.%s", cfPolicy.Name)),
+				rego.Module(cfPolicy.Name, string(fileBytes)),
+			).PrepareForEval(context.Background())
+
+			if err != nil {
+				// Handle error.
+				return nil, err
+			}
+
+			queries = append(queries, query)
+		}
+
+		policies[name] = KubernetesOPAQueryCollection{
+			Kind:      KubernetesBuiltInKind(cfPolicyCollection.Kind),
+			Queries:   queries,
+			Match:     cfPolicyCollection.Match,
+			MustExist: cfPolicyCollection.MustExist,
+		}
+	}
+
+	return &KubernetesPolicies{
+		Policies: policies,
+	}, nil
+}

+ 365 - 0
internal/opa/opa.go

@@ -0,0 +1,365 @@
+package opa
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/mitchellh/mapstructure"
+	"github.com/open-policy-agent/opa/rego"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/helm"
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/pkg/logger"
+	"helm.sh/helm/v3/pkg/release"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/dynamic"
+)
+
+type KubernetesPolicies struct {
+	Policies map[string]KubernetesOPAQueryCollection
+}
+
+type KubernetesOPARunner struct {
+	*KubernetesPolicies
+
+	k8sAgent      *kubernetes.Agent
+	dynamicClient dynamic.Interface
+}
+
+type KubernetesBuiltInKind string
+
+const (
+	HelmRelease KubernetesBuiltInKind = "helm_release"
+	Pod         KubernetesBuiltInKind = "pod"
+	CRDList     KubernetesBuiltInKind = "crd_list"
+)
+
+type KubernetesOPAQueryCollection struct {
+	Kind      KubernetesBuiltInKind
+	Match     MatchParameters
+	MustExist bool
+	Queries   []rego.PreparedEvalQuery
+}
+
+type MatchParameters struct {
+	Name      string `json:"name"`
+	Namespace string `json:"namespace"`
+
+	ChartName string `json:"chart_name"`
+
+	Labels map[string]string `json:"labels"`
+
+	// parameters for CRDs
+	Group    string `json:"group"`
+	Version  string `json:"version"`
+	Resource string `json:"resource"`
+}
+
+type OPARecommenderQueryResult struct {
+	Allow bool
+
+	CategoryName string
+	ObjectID     string
+
+	PolicyVersion  string
+	PolicySeverity string
+	PolicyTitle    string
+	PolicyMessage  string
+}
+
+type rawQueryResult struct {
+	Allow          bool   `mapstructure:"ALLOW"`
+	PolicyID       string `mapstructure:"POLICY_ID"`
+	PolicyVersion  string `mapstructure:"POLICY_VERSION"`
+	PolicySeverity string `mapstructure:"POLICY_SEVERITY"`
+	PolicyTitle    string `mapstructure:"POLICY_TITLE"`
+	SuccessMessage string `mapstructure:"POLICY_SUCCESS_MESSAGE"`
+
+	FailureMessage []string `mapstructure:"FAILURE_MESSAGE"`
+}
+
+func NewRunner(policies *KubernetesPolicies, k8sAgent *kubernetes.Agent, dynamicClient dynamic.Interface) *KubernetesOPARunner {
+	return &KubernetesOPARunner{policies, k8sAgent, dynamicClient}
+}
+
+func (runner *KubernetesOPARunner) GetRecommendations(categories []string) ([]*OPARecommenderQueryResult, error) {
+	collectionNames := categories
+
+	if len(categories) == 0 {
+		for catName, _ := range runner.Policies {
+			collectionNames = append(collectionNames, catName)
+		}
+	}
+
+	res := make([]*OPARecommenderQueryResult, 0)
+
+	for _, name := range collectionNames {
+		// look up to determine if the name is registered
+		queryCollection, exists := runner.Policies[name]
+
+		if !exists {
+			return nil, fmt.Errorf("No policies for %s found", name)
+		}
+
+		var currResults []*OPARecommenderQueryResult
+		var err error
+
+		switch queryCollection.Kind {
+		case HelmRelease:
+			currResults, err = runner.runHelmReleaseQueries(name, queryCollection)
+		case Pod:
+			currResults, err = runner.runPodQueries(name, queryCollection)
+		case CRDList:
+			currResults, err = runner.runCRDListQueries(name, queryCollection)
+		default:
+			fmt.Printf("%s is not a supported query kind", queryCollection.Kind)
+			continue
+		}
+
+		if err != nil {
+			fmt.Printf("%s", err.Error())
+			continue
+		}
+
+		res = append(res, currResults...)
+	}
+
+	return res, nil
+}
+
+func (runner *KubernetesOPARunner) SetK8sAgent(k8sAgent *kubernetes.Agent) {
+	runner.k8sAgent = k8sAgent
+}
+
+func (runner *KubernetesOPARunner) runHelmReleaseQueries(name string, collection KubernetesOPAQueryCollection) ([]*OPARecommenderQueryResult, error) {
+	res := make([]*OPARecommenderQueryResult, 0)
+
+	helmAgent, err := helm.GetAgentFromK8sAgent("secret", collection.Match.Namespace, logger.New(false, os.Stdout), runner.k8sAgent)
+
+	if err != nil {
+		return nil, err
+	}
+
+	// get the matching helm release(s) based on the match
+	var helmReleases []*release.Release
+
+	if collection.Match.Name != "" {
+		helmRelease, err := helmAgent.GetRelease(collection.Match.Name, 0, false)
+
+		if err != nil {
+			if collection.MustExist && strings.Contains(err.Error(), "not found") {
+				return []*OPARecommenderQueryResult{
+					{
+						Allow:          false,
+						ObjectID:       fmt.Sprintf("helm_release/%s/%s/%s", collection.Match.Namespace, collection.Match.Name, "exists"),
+						CategoryName:   name,
+						PolicyVersion:  "v0.0.1",
+						PolicySeverity: "high",
+						PolicyTitle:    fmt.Sprintf("The helm release %s must exist", collection.Match.Name),
+						PolicyMessage:  "The helm release was not found on the cluster",
+					},
+				}, nil
+			} else {
+				return nil, err
+			}
+		} else if collection.MustExist {
+			res = append(res, &OPARecommenderQueryResult{
+				Allow:          true,
+				ObjectID:       fmt.Sprintf("helm_release/%s/%s/%s", collection.Match.Namespace, collection.Match.Name, "exists"),
+				CategoryName:   name,
+				PolicyVersion:  "v0.0.1",
+				PolicySeverity: "high",
+				PolicyTitle:    fmt.Sprintf("The helm release %s must exist", collection.Match.Name),
+				PolicyMessage:  "The helm release was found",
+			})
+		}
+
+		helmReleases = append(helmReleases, helmRelease)
+	} else if collection.Match.ChartName != "" {
+		prefilterReleases, err := helmAgent.ListReleases(collection.Match.Namespace, &types.ReleaseListFilter{
+			ByDate: true,
+			StatusFilter: []string{
+				"deployed",
+				"pending",
+				"pending-install",
+				"pending-upgrade",
+				"pending-rollback",
+				"failed",
+			},
+		})
+
+		if err != nil {
+			return nil, err
+		}
+
+		for _, prefilterRelease := range prefilterReleases {
+			if prefilterRelease.Chart.Name() == collection.Match.ChartName {
+				helmReleases = append(helmReleases, prefilterRelease)
+			}
+		}
+	} else {
+		return nil, fmt.Errorf("invalid match parameters")
+	}
+
+	for _, helmRelease := range helmReleases {
+		for _, query := range collection.Queries {
+			results, err := query.Eval(
+				context.Background(),
+				rego.EvalInput(map[string]interface{}{
+					"version": helmRelease.Chart.Metadata.Version,
+					"values":  helmRelease.Config,
+				}),
+			)
+
+			if err != nil {
+				return nil, err
+			}
+
+			if len(results) == 1 {
+				rawQueryRes := &rawQueryResult{}
+
+				err = mapstructure.Decode(results[0].Expressions[0].Value, rawQueryRes)
+
+				if err != nil {
+					return nil, err
+				}
+
+				res = append(res, rawQueryResToRecommenderQueryResult(
+					rawQueryRes,
+					fmt.Sprintf("helm_release/%s/%s/%s", helmRelease.Namespace, helmRelease.Name, rawQueryRes.PolicyID),
+					name,
+				))
+			}
+		}
+	}
+
+	return res, nil
+}
+
+func (runner *KubernetesOPARunner) runPodQueries(name string, collection KubernetesOPAQueryCollection) ([]*OPARecommenderQueryResult, error) {
+	res := make([]*OPARecommenderQueryResult, 0)
+
+	lselArr := make([]string, 0)
+
+	for k, v := range collection.Match.Labels {
+		lselArr = append(lselArr, fmt.Sprintf("%s=%s", k, v))
+	}
+
+	lsel := strings.Join(lselArr, ",")
+
+	pods, err := runner.k8sAgent.GetPodsByLabel(lsel, collection.Match.Namespace)
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, pod := range pods.Items {
+		unstructuredPod, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pod)
+
+		if err != nil {
+			return nil, err
+		}
+
+		for _, query := range collection.Queries {
+			results, err := query.Eval(
+				context.Background(),
+				rego.EvalInput(unstructuredPod),
+			)
+
+			if err != nil {
+				return nil, err
+			}
+
+			if len(results) == 1 {
+				rawQueryRes := &rawQueryResult{}
+
+				err = mapstructure.Decode(results[0].Expressions[0].Value, rawQueryRes)
+
+				if err != nil {
+					return nil, err
+				}
+
+				res = append(res, rawQueryResToRecommenderQueryResult(
+					rawQueryRes,
+					fmt.Sprintf("pod/%s/%s", pod.Namespace, pod.Name),
+					name,
+				))
+			}
+		}
+	}
+
+	return res, nil
+}
+
+func (runner *KubernetesOPARunner) runCRDListQueries(name string, collection KubernetesOPAQueryCollection) ([]*OPARecommenderQueryResult, error) {
+	res := make([]*OPARecommenderQueryResult, 0)
+
+	objRes := schema.GroupVersionResource{
+		Group:    collection.Match.Group,
+		Version:  collection.Match.Version,
+		Resource: collection.Match.Resource,
+	}
+
+	crdList, err := runner.dynamicClient.Resource(objRes).Namespace(collection.Match.Namespace).List(context.Background(), v1.ListOptions{})
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, crd := range crdList.Items {
+		for _, query := range collection.Queries {
+			results, err := query.Eval(
+				context.Background(),
+				rego.EvalInput(crd.Object),
+			)
+
+			if err != nil {
+				return nil, err
+			}
+
+			if len(results) == 1 {
+				rawQueryRes := &rawQueryResult{}
+
+				err = mapstructure.Decode(results[0].Expressions[0].Value, rawQueryRes)
+
+				if err != nil {
+					return nil, err
+				}
+
+				res = append(res, rawQueryResToRecommenderQueryResult(
+					rawQueryRes,
+					fmt.Sprintf("%s/%s/%s/%s", collection.Match.Group, collection.Match.Version, collection.Match.Resource, rawQueryRes.PolicyID),
+					name,
+				))
+			}
+		}
+	}
+
+	return res, nil
+}
+
+func rawQueryResToRecommenderQueryResult(rawQueryRes *rawQueryResult, objectID, categoryName string) *OPARecommenderQueryResult {
+	queryRes := &OPARecommenderQueryResult{
+		ObjectID:     objectID,
+		CategoryName: categoryName,
+	}
+
+	message := rawQueryRes.SuccessMessage
+
+	// if failure, compose failure messages into single string
+	if !rawQueryRes.Allow {
+		message = strings.Join(rawQueryRes.FailureMessage, ". ")
+	}
+
+	queryRes.PolicyMessage = message
+	queryRes.Allow = rawQueryRes.Allow
+	queryRes.PolicySeverity = rawQueryRes.PolicySeverity
+	queryRes.PolicyTitle = rawQueryRes.PolicyTitle
+	queryRes.PolicyVersion = rawQueryRes.PolicyVersion
+
+	return queryRes
+}

+ 32 - 0
internal/opa/policies/cert-manager/cainjector_memory_limits.rego

@@ -0,0 +1,32 @@
+package cert_manager.cainjector_memory_limits
+
+import future.keywords
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# resources:
+#   limits:
+#     memory: 512Mi
+#   requests:
+#     cpu: 50m
+#     memory: 512Mi
+
+POLICY_ID := "cainjector_memory_limits"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Cert-manager CA injector should have memory limits set", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: Cert-manager CA injector has memory limits set", [])
+
+allow if {
+	input.values.cainjector.resources.limits.memory
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := "Failed: Cert-manager CA injector does not have memory limits set"
+}

+ 25 - 0
internal/opa/policies/cert-manager/cert_manager_version.rego

@@ -0,0 +1,25 @@
+package cert_manager.version
+
+import future.keywords
+
+POLICY_ID := "cert_manager_version"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+latest_stable_version := "1.5.5"
+
+POLICY_TITLE := sprintf("The cert-manager version should be at least v%s", [latest_stable_version])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: cert-manager version is up-to-date", [])
+
+trimmedVersion := trim_left(input.version, "v")
+
+# semver.compare returns -1 if latest_stable_version < trimmedVersion
+allow if semver.compare(latest_stable_version, trimmedVersion) <= 0
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := sprintf("Failed: latest stable version is %s, but you are on %s", [latest_stable_version, trimmedVersion])
+}

+ 32 - 0
internal/opa/policies/cert-manager/controller_memory_limits.rego

@@ -0,0 +1,32 @@
+package cert_manager.controller_memory_limits
+
+import future.keywords
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# resources:
+#   limits:
+#     memory: 512Mi
+#   requests:
+#     cpu: 50m
+#     memory: 512Mi
+
+POLICY_ID := "controller_memory_limits"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Cert-manager controller should have memory limits set", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: Cert-manager controller has memory limits set", [])
+
+allow if {
+	input.values.resources.limits.memory
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := "Failed: Cert-manager controller does not have memory limits set"
+}

+ 33 - 0
internal/opa/policies/cert-manager/webhook_memory_limits.rego

@@ -0,0 +1,33 @@
+package cert_manager.webhook_memory_limits
+
+import future.keywords
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# webhook:
+#   resources:
+#     limits:
+#       memory: 512Mi
+#     requests:
+#       cpu: 50m
+#       memory: 512Mi
+
+POLICY_ID := "webhook_memory_limits"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Cert-manager webhook should have memory limits set", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: Cert-manager webhook has memory limits set", [])
+
+allow if {
+	input.values.webhook.resources.limits.memory
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := "Failed: Cert-manager webhook does not have memory limits set"
+}

+ 30 - 0
internal/opa/policies/certificates/expiry_two_weeks.rego

@@ -0,0 +1,30 @@
+package certificates.expiry_two_weeks
+
+import future.keywords
+
+POLICY_ID := sprintf("certificates_expiry_two_weeks_%s_%s", [input.metadata.namespace, input.metadata.name])
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Certificate %s/%s should have longer than 2 weeks left before expiry", [input.metadata.namespace, input.metadata.name])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: certificate %s/%s has longer than 2 weeks before expiry", [input.metadata.namespace, input.metadata.name])
+
+allow if {
+	not rfc3339_expiry_within_2_weeks(input.status.notAfter)
+}
+
+FAILURE_MESSAGE contains msg if {
+	rfc3339_expiry_within_2_weeks(input.status.notAfter)
+	msg := sprintf("Certificate expires at %s, which is less than 2 weeks from now", [input.status.notAfter])
+}
+
+rfc3339_lt(a, b) if {
+	time.parse_rfc3339_ns(a) < time.parse_rfc3339_ns(b)
+}
+
+rfc3339_expiry_within_2_weeks(a) if {
+	time.add_date(time.parse_rfc3339_ns(a), 0, 0, -14) < time.now_ns()
+}

+ 37 - 0
internal/opa/policies/nginx/memory_limits.rego

@@ -0,0 +1,37 @@
+package nginx.memory_limits
+
+import future.keywords
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# controller:
+#   resources:
+#     limits:
+#       cpu: 250m
+#       memory: 275Mi
+#     requests:
+#       cpu: 250m
+#       memory: 275Mi
+
+POLICY_ID := "nginx_memory_limits"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("NGINX ingress controller should have memory limits set", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: NGINX ingress controller has memory limits set", [])
+
+allow if {
+	input.values.controller.resources.limits.memory
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := "Failed: NGINX ingress controller does not have memory limits set"
+}

+ 47 - 0
internal/opa/policies/nginx/nginx_topology_spread_constraints.rego

@@ -0,0 +1,47 @@
+package nginx.topology_spread_constraints
+
+import future.keywords
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of topologySpreadConstraints as a soft constraint. We look
+# for Helm values of the form:
+# 
+# controller:
+#   topologySpreadConstraints:
+#     - labelSelector:
+#         matchLabels:
+#           app.kubernetes.io/component: controller
+#           app.kubernetes.io/instance: nginx-ingress
+#           app.kubernetes.io/name: ingress-nginx
+#       maxSkew: 1
+#       topologyKey: kubernetes.io/hostname
+#       whenUnsatisfiable: DoNotSchedule
+#     - labelSelector:
+#         matchLabels:
+#           app.kubernetes.io/component: controller
+#           app.kubernetes.io/instance: nginx-ingress
+#           app.kubernetes.io/name: ingress-nginx
+#       maxSkew: 1
+#       topologyKey: topology.kubernetes.io/zone
+#       whenUnsatisfiable: ScheduleAnyway
+
+POLICY_ID := "nginx_topology_spread_constraints"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("NGINX ingress controller should have topology spread constraints", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: NGINX ingress controller has topology spread constraints", [])
+
+allow if {
+	count(input.values.controller.topologySpreadConstraints) >= 1
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := "Failed: NGINX ingress controller does not have topology spread constraints set"
+}

+ 25 - 0
internal/opa/policies/nginx/nginx_version.rego

@@ -0,0 +1,25 @@
+package nginx.version
+
+import future.keywords
+
+POLICY_ID := "nginx_version"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+latest_stable_version := "4.0.18"
+
+POLICY_TITLE := sprintf("The NGINX version should be at least v%s", [latest_stable_version])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: NGINX version is up-to-date", [])
+
+trimmedVersion := trim_left(input.version, "v")
+
+# semver.compare returns -1 if latest_stable_version < trimmedVersion
+allow if semver.compare(latest_stable_version, trimmedVersion) <= 0
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := sprintf("Failed: latest stable version is %s, but you are on %s", [latest_stable_version, trimmedVersion])
+}

+ 38 - 0
internal/opa/policies/nginx/wait_shutdown.rego

@@ -0,0 +1,38 @@
+package nginx.wait_shutdown
+
+import future.keywords
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the modification of the wait-shutdown script as a soft constraint. We look
+# for Helm values of the form:
+# 
+# controller:
+#   lifecycle:
+#     preStop:
+#       exec:
+#         command:
+#           - sh
+#           - '-c'
+#           - sleep 120 && /wait-shutdown
+
+POLICY_ID := "nginx_wait_shutdown"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("NGINX ingress controller should have a modified wait-shutdown script", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: NGINX ingress controller has a properly modified wait-shutdown script set", [])
+
+allow if {
+	input.values.controller.lifecycle.preStop.exec.command
+	count(input.values.controller.lifecycle.preStop.exec.command) != 1
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := sprintf("Failed: NGINX ingress controller does not have a properly modified wait-shutdown script", [])
+}

+ 38 - 0
internal/opa/policies/pod/running.rego

@@ -0,0 +1,38 @@
+package pod.running
+
+import future.keywords.contains
+import future.keywords.every
+import future.keywords.if
+import future.keywords.in
+
+# TODO: this file needs a lot of work to capture all pod statuses and container statuses. 
+# It currently only checks if a pod is in a "Running" status and if all containers are in
+# running status.
+POLICY_ID := "pod_running"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Pod %s in namespace %s should be running", [input.metadata.name, input.metadata.namespace])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: pod is running", [])
+
+allow if {
+	input.status.phase == "Running"
+
+	every containerStatus in input.status.containerStatuses {
+		containerStatus.state.running
+	}
+}
+
+FAILURE_MESSAGE contains msg1 if {
+	input.status.phase != "Running"
+	msg1 := sprintf("Pod %s does not have a Running status", [input.metadata.name])
+}
+
+FAILURE_MESSAGE contains msg2 if {
+	some containerStatus in input.status.containerStatuses
+	not containerStatus.state.running
+	msg2 := sprintf("Container %s in pod %s is not running", [containerStatus.name, input.metadata.name])
+}

+ 37 - 0
internal/opa/policies/prometheus/alertmanager_memory_limits.rego

@@ -0,0 +1,37 @@
+package prometheus.alertmanager_memory_limits
+
+import future.keywords
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# alertmanager:
+#   resources:
+#     limits:
+#       cpu: 200m
+#       memory: 256Mi
+#     requests:
+#       cpu: 10m
+#       memory: 256Mi
+
+POLICY_ID := "alertmanager_memory_limits"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Prometheus alert-manager should have memory limits set", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: Prometheus alert-manager has memory limits set", [])
+
+allow if {
+	input.values.alertmanager.resources.limits.memory
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := "Failed: Prometheus alert-manager does not have memory limits set"
+}

+ 37 - 0
internal/opa/policies/prometheus/kubestatemetrics_memory_limits.rego

@@ -0,0 +1,37 @@
+package prometheus.kubestatemetrics_memory_limits
+
+import future.keywords
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# kube-state-metrics:
+#   resources:
+#     limits:
+#       cpu: 200m
+#       memory: 256Mi
+#     requests:
+#       cpu: 10m
+#       memory: 256Mi
+
+POLICY_ID := "kubestatemetrics_memory_limits"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Prometheus kube-state-metrics should have memory limits set", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: Prometheus kube-state-metrics has memory limits set", [])
+
+allow if {
+	input.values["kube-state-metrics"].resources.limits.memory
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := "Failed: Prometheus kube-state-metrics does not have memory limits set"
+}

+ 37 - 0
internal/opa/policies/prometheus/nodeexporter_memory_limits.rego

@@ -0,0 +1,37 @@
+package prometheus.nodeexporter_memory_limits
+
+import future.keywords
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# nodeExporter:
+#   resources:
+#     limits:
+#       cpu: 200m
+#       memory: 256Mi
+#     requests:
+#       cpu: 10m
+#       memory: 256Mi
+
+POLICY_ID := "nodeexporter_memory_limits"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Prometheus nodeExporter should have memory limits set", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: Prometheus nodeExporter has memory limits set", [])
+
+allow if {
+	input.values.nodeExporter.resources.limits.memory
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := "Failed: Prometheus nodeExporter does not have memory limits set"
+}

+ 25 - 0
internal/opa/policies/prometheus/prometheus_version.rego

@@ -0,0 +1,25 @@
+package prometheus.version
+
+import future.keywords
+
+POLICY_ID := "prometheus_version"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+latest_stable_version := "15.5.3"
+
+POLICY_TITLE := sprintf("The Prometheus version should be at least v%s", [latest_stable_version])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: Prometheus version is up-to-date", [])
+
+trimmedVersion := trim_left(input.version, "v")
+
+# semver.compare returns -1 if latest_stable_version < trimmedVersion
+allow if semver.compare(latest_stable_version, trimmedVersion) <= 0
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := sprintf("Failed: latest stable version is %s, but you are on %s", [latest_stable_version, trimmedVersion])
+}

+ 37 - 0
internal/opa/policies/prometheus/pushgateway_memory_limits.rego

@@ -0,0 +1,37 @@
+package prometheus.pushgateway_memory_limits
+
+import future.keywords
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# pushgateway:
+#   resources:
+#     limits:
+#       cpu: 200m
+#       memory: 256Mi
+#     requests:
+#       cpu: 10m
+#       memory: 256Mi
+
+POLICY_ID := "pushgateway_memory_limits"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Prometheus pushgateway should have memory limits set", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: Prometheus pushgateway has memory limits set", [])
+
+allow if {
+	input.values.pushgateway.resources.limits.memory
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := "Failed: Prometheus pushgateway does not have memory limits set"
+}

+ 37 - 0
internal/opa/policies/prometheus/server_memory_limits.rego

@@ -0,0 +1,37 @@
+package prometheus.server_memory_limits
+
+import future.keywords
+
+# Policy expects input structure of form:
+# values: {}
+
+# This policy tests for the existence of memory limits as a hard constraint. We look
+# for Helm values of the form:
+# 
+# server:
+#   resources:
+#     limits:
+#       cpu: 500m
+#       memory: 400Mi
+#     requests:
+#       cpu: 100m
+#       memory: 400Mi
+
+POLICY_ID := "server_memory_limits"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("Prometheus server should have memory limits set", [])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: Prometheus server has memory limits set", [])
+
+allow if {
+	input.values.server.resources.limits.memory
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := "Failed: Prometheus server does not have memory limits set"
+}

+ 26 - 0
internal/opa/policies/web/web_version.rego

@@ -0,0 +1,26 @@
+package web.version
+
+import future.keywords
+
+POLICY_ID := "web_version"
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+# TODO: set the actual latest stable version
+latest_stable_version := "0.115.0"
+
+POLICY_TITLE := sprintf("The web version should be at least v%s", [latest_stable_version])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: web version is up-to-date", [])
+
+trimmedVersion := trim_left(input.version, "v")
+
+# semver.compare returns -1 if latest_stable_version < trimmedVersion
+allow if semver.compare(latest_stable_version, trimmedVersion) == -1
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := sprintf("Failed: latest stable version is %s, but you are on %s", [latest_stable_version, trimmedVersion])
+}

+ 1 - 0
internal/repository/gorm/migrate.go

@@ -56,6 +56,7 @@ func AutoMigrate(db *gorm.DB, debug bool) error {
 		&models.StackResource{},
 		&models.StackSourceConfig{},
 		&models.StackEnvGroup{},
+		&models.MonitorTestResult{},
 		&ints.KubeIntegration{},
 		&ints.BasicIntegration{},
 		&ints.OIDCIntegration{},

+ 44 - 0
internal/repository/gorm/monitor.go

@@ -0,0 +1,44 @@
+package gorm
+
+import (
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+	"gorm.io/gorm"
+)
+
+// MonitorTestResultRepository contains methods for querying MonitorTestResult models
+type MonitorTestResultRepository struct {
+	db *gorm.DB
+}
+
+// NewBuildEventRepository returns a BuildEventRepository which uses
+// gorm.DB for querying the database
+func NewMonitorTestResultRepository(db *gorm.DB) repository.MonitorTestResultRepository {
+	return &MonitorTestResultRepository{db}
+}
+
+func (m *MonitorTestResultRepository) CreateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error) {
+	if err := m.db.Create(monitor).Error; err != nil {
+		return nil, err
+	}
+
+	return monitor, nil
+}
+
+func (m *MonitorTestResultRepository) ReadMonitorTestResult(projectID, clusterID uint, objectID string) (*models.MonitorTestResult, error) {
+	res := &models.MonitorTestResult{}
+
+	if err := m.db.Where("project_id = ? AND cluster_id = ? AND object_id = ?", projectID, clusterID, objectID).First(res).Error; err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (m *MonitorTestResultRepository) UpdateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error) {
+	if err := m.db.Save(monitor).Error; err != nil {
+		return nil, err
+	}
+
+	return monitor, nil
+}

+ 6 - 0
internal/repository/gorm/repository.go

@@ -48,6 +48,7 @@ type GormRepository struct {
 	policy                    repository.PolicyRepository
 	tag                       repository.TagRepository
 	stack                     repository.StackRepository
+	monitor                   repository.MonitorTestResultRepository
 }
 
 func (t *GormRepository) User() repository.UserRepository {
@@ -214,6 +215,10 @@ func (t *GormRepository) Stack() repository.StackRepository {
 	return t.stack
 }
 
+func (t *GormRepository) MonitorTestResult() repository.MonitorTestResultRepository {
+	return t.monitor
+}
+
 // NewRepository returns a Repository which persists users in memory
 // and accepts a parameter that can trigger read/write errors
 func NewRepository(db *gorm.DB, key *[32]byte, storageBackend credentials.CredentialStorage) repository.Repository {
@@ -259,5 +264,6 @@ func NewRepository(db *gorm.DB, key *[32]byte, storageBackend credentials.Creden
 		policy:                    NewPolicyRepository(db),
 		tag:                       NewTagRepository(db),
 		stack:                     NewStackRepository(db),
+		monitor:                   NewMonitorTestResultRepository(db),
 	}
 }

+ 9 - 0
internal/repository/monitor.go

@@ -0,0 +1,9 @@
+package repository
+
+import "github.com/porter-dev/porter/internal/models"
+
+type MonitorTestResultRepository interface {
+	CreateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error)
+	ReadMonitorTestResult(projectID, clusterID uint, operationID string) (*models.MonitorTestResult, error)
+	UpdateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error)
+}

+ 1 - 0
internal/repository/repository.go

@@ -42,4 +42,5 @@ type Repository interface {
 	Policy() PolicyRepository
 	Tag() TagRepository
 	Stack() StackRepository
+	MonitorTestResult() MonitorTestResultRepository
 }

+ 24 - 0
internal/repository/test/monitor.go

@@ -0,0 +1,24 @@
+package test
+
+import (
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+)
+
+type MonitorTestResultRepository struct{}
+
+func NewMonitorTestResultRepository(canQuery bool) repository.MonitorTestResultRepository {
+	return &MonitorTestResultRepository{}
+}
+
+func (n *MonitorTestResultRepository) CreateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error) {
+	panic("not implemented") // TODO: Implement
+}
+
+func (n *MonitorTestResultRepository) ReadMonitorTestResult(projectID, clusterID uint, operationID string) (*models.MonitorTestResult, error) {
+	panic("not implemented") // TODO: Implement
+}
+
+func (n *MonitorTestResultRepository) UpdateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error) {
+	panic("not implemented") // TODO: Implement
+}

+ 6 - 0
internal/repository/test/repository.go

@@ -46,6 +46,7 @@ type TestRepository struct {
 	policy                    repository.PolicyRepository
 	tag                       repository.TagRepository
 	stack                     repository.StackRepository
+	monitor                   repository.MonitorTestResultRepository
 }
 
 func (t *TestRepository) User() repository.UserRepository {
@@ -212,6 +213,10 @@ func (t *TestRepository) Stack() repository.StackRepository {
 	return t.stack
 }
 
+func (t *TestRepository) MonitorTestResult() repository.MonitorTestResultRepository {
+	return t.monitor
+}
+
 // NewRepository returns a Repository which persists users in memory
 // and accepts a parameter that can trigger read/write errors
 func NewRepository(canQuery bool, failingMethods ...string) repository.Repository {
@@ -257,5 +262,6 @@ func NewRepository(canQuery bool, failingMethods ...string) repository.Repositor
 		policy:                    NewPolicyRepository(canQuery),
 		tag:                       NewTagRepository(),
 		stack:                     NewStackRepository(),
+		monitor:                   NewMonitorTestResultRepository(canQuery),
 	}
 }

+ 14 - 0
scripts/dev-environment/StartWorkerServer.sh

@@ -0,0 +1,14 @@
+#!/bin/bash
+
+# Load env variables for backend
+if [[ -e ./docker/.env ]]
+then
+  set -a # automatically export all variables
+  source ./docker/.env
+  set +a
+else 
+  echo "Couldn't find any backend env variables, exiting process"
+  exit
+fi
+
+air -c .air.worker.toml

+ 4 - 0
workers/Dockerfile

@@ -26,5 +26,9 @@ WORKDIR /app
 RUN apk update && apk add curl
 
 COPY --from=build /app/bin/worker-pool /usr/bin/
+COPY /internal/opa/config.yaml /app/opa/config.yaml
+COPY /internal/opa/policies /app/opa/policies
+
+ENV OPA_CONFIG_FILE_DIR /app/opa
 
 ENTRYPOINT [ "worker-pool" ]

+ 300 - 0
workers/jobs/recommender.go

@@ -0,0 +1,300 @@
+//go:build ee
+
+/*
+
+                            === Recommender Job ===
+
+This job checks to see if a cluster matches policies set by the OPA config file.
+
+*/
+
+package jobs
+
+import (
+	"errors"
+	"fmt"
+	"log"
+	"time"
+
+	"github.com/mitchellh/mapstructure"
+	"github.com/porter-dev/porter/api/server/shared/config/env"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+
+	"github.com/porter-dev/porter/ee/integrations/vault"
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/oauth"
+	"github.com/porter-dev/porter/internal/opa"
+	"github.com/porter-dev/porter/internal/repository"
+	rcreds "github.com/porter-dev/porter/internal/repository/credentials"
+	rgorm "github.com/porter-dev/porter/internal/repository/gorm"
+	"golang.org/x/oauth2"
+	"gorm.io/gorm"
+)
+
+type recommender struct {
+	enqueueTime          time.Time
+	db                   *gorm.DB
+	repo                 repository.Repository
+	doConf               *oauth2.Config
+	clusterAndProjectIDs []clusterAndProjectID
+	categories           []string
+	policies             *opa.KubernetesPolicies
+}
+
+// RecommenderOpts holds the options required to run this job
+type RecommenderOpts struct {
+	DBConf         *env.DBConf
+	DOClientID     string
+	DOClientSecret string
+	DOScopes       []string
+	ServerURL      string
+
+	LegacyProjectIDs []uint
+
+	Input map[string]interface{}
+}
+
+type recommenderInput struct {
+	Projects  []uint `mapstructure:"projects"`
+	ClusterID uint   `mapstructure:"cluster_id"`
+
+	Priority string `mapstructure:"priority"`
+
+	Categories []string `mapstructure:"categories"`
+}
+
+type clusterAndProjectID struct {
+	clusterID uint
+	projectID uint
+}
+
+func NewRecommender(
+	db *gorm.DB,
+	enqueueTime time.Time,
+	opts *RecommenderOpts,
+	opaPolicies *opa.KubernetesPolicies,
+) (*recommender, error) {
+	var credBackend rcreds.CredentialStorage
+
+	if opts.DBConf.VaultAPIKey != "" && opts.DBConf.VaultServerURL != "" && opts.DBConf.VaultPrefix != "" {
+		credBackend = vault.NewClient(
+			opts.DBConf.VaultServerURL,
+			opts.DBConf.VaultAPIKey,
+			opts.DBConf.VaultPrefix,
+		)
+	}
+
+	var key [32]byte
+
+	for i, b := range []byte(opts.DBConf.EncryptionKey) {
+		key[i] = b
+	}
+
+	repo := rgorm.NewRepository(db, &key, credBackend)
+
+	doConf := oauth.NewDigitalOceanClient(&oauth.Config{
+		ClientID:     opts.DOClientID,
+		ClientSecret: opts.DOClientSecret,
+		Scopes:       opts.DOScopes,
+		BaseURL:      opts.ServerURL,
+	})
+
+	// parse input
+	parsedInput := &recommenderInput{}
+	err := mapstructure.Decode(opts.Input, parsedInput)
+
+	if err != nil {
+		return nil, err
+	}
+
+	// validate
+	validator := requestutils.NewDefaultValidator()
+
+	if requestErr := validator.Validate(parsedInput); requestErr != nil {
+		return nil, fmt.Errorf(requestErr.Error())
+	}
+
+	clusterIDs, err := getClustersToParse(db, repo.Cluster(), parsedInput, opts.LegacyProjectIDs)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return &recommender{
+		enqueueTime, db, repo, doConf, clusterIDs, parsedInput.Categories, opaPolicies,
+	}, nil
+}
+
+func getClustersToParse(db *gorm.DB, clusterRepo repository.ClusterRepository, input *recommenderInput, legacyProjects []uint) ([]clusterAndProjectID, error) {
+	// if the project and cluster ID is set, make sure that the project id matches the cluster's
+	// project id
+	if input.ClusterID != 0 {
+		if len(input.Projects) != 1 {
+			return nil, fmt.Errorf("if cluster ID is passed, you must pass the matching project ID")
+		}
+
+		_, err := clusterRepo.ReadCluster(input.Projects[0], input.ClusterID)
+
+		if err != nil {
+			return nil, err
+		}
+
+		return []clusterAndProjectID{{
+			clusterID: input.ClusterID,
+			projectID: input.Projects[0],
+		}}, nil
+	}
+
+	// if there are no projects set, query for all clusters within the relevant projects
+	clusters := make([]*models.Cluster, 0)
+
+	query := db.Where(`clusters.project_id IN (?) OR clusters.project_id IN (
+		SELECT p2.id FROM projects AS p2
+		INNER JOIN project_usages ON p2.id=project_usages.project_id
+		WHERE project_usages.resource_cpu != 10 AND project_usages.resource_memory != 20000 AND project_usages.clusters != 1 AND project_usages.users != 1
+	)`, legacyProjects)
+
+	if err := query.Find(&clusters).Error; err != nil {
+		return nil, err
+	}
+
+	res := make([]clusterAndProjectID, 0)
+
+	for _, cluster := range clusters {
+		res = append(res, clusterAndProjectID{
+			clusterID: cluster.ID,
+			projectID: cluster.ProjectID,
+		})
+	}
+
+	return res, nil
+}
+
+func (n *recommender) ID() string {
+	return "recommender"
+}
+
+func (n *recommender) EnqueueTime() time.Time {
+	return n.enqueueTime
+}
+
+func (n *recommender) Run() error {
+	for _, ids := range n.clusterAndProjectIDs {
+		fmt.Println(ids.projectID, ids.clusterID)
+
+		cluster, err := n.repo.Cluster().ReadCluster(ids.projectID, ids.clusterID)
+
+		if err != nil {
+			log.Printf("error reading cluster ID %d: %v. skipping cluster ...", ids.clusterID, err)
+			continue
+		}
+
+		k8sAgent, err := kubernetes.GetAgentOutOfClusterConfig(&kubernetes.OutOfClusterConfig{
+			Cluster:                   cluster,
+			Repo:                      n.repo,
+			DigitalOceanOAuth:         n.doConf,
+			AllowInClusterConnections: false,
+		})
+
+		if err != nil {
+			log.Printf("error getting k8s agent for cluster ID %d: %v. skipping cluster ...", ids.clusterID, err)
+			continue
+		}
+
+		dynamicClient, err := kubernetes.GetDynamicClientOutOfClusterConfig(&kubernetes.OutOfClusterConfig{
+			Cluster:                   cluster,
+			Repo:                      n.repo,
+			DigitalOceanOAuth:         n.doConf,
+			AllowInClusterConnections: false,
+		})
+
+		if err != nil {
+			log.Printf("error getting dynamic client for cluster ID %d: %v. skipping cluster ...", ids.clusterID, err)
+			continue
+		}
+
+		runner := opa.NewRunner(n.policies, k8sAgent, dynamicClient)
+
+		queryResults, err := runner.GetRecommendations(n.categories)
+
+		if err != nil {
+			log.Printf("error querying opa policies for cluster ID %d: %v. skipping cluster ...", ids.clusterID, err)
+			continue
+		}
+
+		for _, queryRes := range queryResults {
+			fmt.Println(queryRes.ObjectID, queryRes.Allow, queryRes.PolicyTitle, queryRes.PolicyMessage)
+
+			monitor, err := n.repo.MonitorTestResult().ReadMonitorTestResult(ids.projectID, ids.clusterID, queryRes.ObjectID)
+
+			if err != nil {
+				if errors.Is(err, gorm.ErrRecordNotFound) {
+					monitor, err = n.repo.MonitorTestResult().CreateMonitorTestResult(n.getMonitorTestResultFromQueryResult(cluster, queryRes))
+				} else {
+					continue
+				}
+			} else {
+				monitor, err = n.repo.MonitorTestResult().UpdateMonitorTestResult(mergeMonitorTestResultFromQueryResult(monitor, queryRes))
+			}
+
+			if err != nil {
+				continue
+			}
+		}
+	}
+
+	return nil
+}
+
+func (n *recommender) getMonitorTestResultFromQueryResult(cluster *models.Cluster, queryRes *opa.OPARecommenderQueryResult) *models.MonitorTestResult {
+	runResult := types.MonitorTestStatusSuccess
+
+	if !queryRes.Allow {
+		runResult = types.MonitorTestStatusFailed
+	}
+
+	currTime := time.Now()
+
+	return &models.MonitorTestResult{
+		ProjectID:         cluster.ProjectID,
+		ClusterID:         cluster.ID,
+		Category:          queryRes.CategoryName,
+		ObjectID:          queryRes.ObjectID,
+		LastStatusChange:  &currTime,
+		LastTested:        &currTime,
+		LastRunResult:     string(runResult),
+		LastRunResultEnum: models.GetLastRunResultEnum(string(runResult)),
+		Title:             queryRes.PolicyTitle,
+		Message:           queryRes.PolicyMessage,
+		Severity:          queryRes.PolicySeverity,
+		SeverityEnum:      models.GetSeverityEnum(queryRes.PolicySeverity),
+	}
+}
+
+func mergeMonitorTestResultFromQueryResult(monitor *models.MonitorTestResult, queryRes *opa.OPARecommenderQueryResult) *models.MonitorTestResult {
+	runResult := types.MonitorTestStatusSuccess
+
+	if !queryRes.Allow {
+		runResult = types.MonitorTestStatusFailed
+	}
+
+	currTime := time.Now()
+
+	if isStatusChange := monitor.LastRunResult == string(runResult); isStatusChange {
+		monitor.LastStatusChange = &currTime
+	}
+
+	monitor.LastTested = &currTime
+	monitor.LastRunResult = string(runResult)
+	monitor.Title = queryRes.PolicyTitle
+	monitor.Message = queryRes.PolicyMessage
+	monitor.Severity = queryRes.PolicySeverity
+	monitor.SeverityEnum = models.GetSeverityEnum(queryRes.PolicySeverity)
+	monitor.LastRunResultEnum = models.GetLastRunResultEnum(string(runResult))
+
+	return monitor
+}
+
+func (n *recommender) SetData([]byte) {}

+ 67 - 6
workers/main.go

@@ -4,6 +4,7 @@ package main
 
 import (
 	"context"
+	"encoding/json"
 	"fmt"
 	"log"
 	"net/http"
@@ -17,15 +18,23 @@ import (
 	"github.com/joeshaw/envdecode"
 	"github.com/porter-dev/porter/api/server/shared/config/env"
 	"github.com/porter-dev/porter/internal/adapter"
+	"github.com/porter-dev/porter/internal/opa"
+	"github.com/porter-dev/porter/internal/repository"
 	"github.com/porter-dev/porter/internal/worker"
 	"github.com/porter-dev/porter/workers/jobs"
 	"gorm.io/gorm"
+
+	"github.com/porter-dev/porter/ee/integrations/vault"
+	rcreds "github.com/porter-dev/porter/internal/repository/credentials"
+	pgorm "github.com/porter-dev/porter/internal/repository/gorm"
 )
 
 var (
-	jobQueue   chan worker.Job
-	envDecoder = EnvConf{}
-	dbConn     *gorm.DB
+	jobQueue    chan worker.Job
+	envDecoder  = EnvConf{}
+	dbConn      *gorm.DB
+	repo        repository.Repository
+	opaPolicies *opa.KubernetesPolicies
 )
 
 // EnvConf holds the environment variables for this binary
@@ -42,6 +51,10 @@ type EnvConf struct {
 	S3BucketName       string `env:"S3_BUCKET_NAME"`
 	EncryptionKey      string `env:"S3_ENCRYPTION_KEY"`
 
+	OPAConfigFileDir string `env:"OPA_CONFIG_FILE_DIR,default=./internal/opa"`
+
+	LegacyProjectIDs []uint `env:"LEGACY_PROJECT_IDS"`
+
 	Port uint `env:"PORT,default=3000"`
 }
 
@@ -61,6 +74,30 @@ func main() {
 
 	dbConn = db
 
+	var credBackend rcreds.CredentialStorage
+
+	if envDecoder.DBConf.VaultAPIKey != "" && envDecoder.DBConf.VaultServerURL != "" && envDecoder.DBConf.VaultPrefix != "" {
+		credBackend = vault.NewClient(
+			envDecoder.DBConf.VaultServerURL,
+			envDecoder.DBConf.VaultAPIKey,
+			envDecoder.DBConf.VaultPrefix,
+		)
+	}
+
+	var key [32]byte
+
+	for i, b := range []byte(envDecoder.DBConf.EncryptionKey) {
+		key[i] = b
+	}
+
+	repo = pgorm.NewRepository(db, &key, credBackend)
+
+	opaPolicies, err = opa.LoadPolicies(envDecoder.OPAConfigFileDir)
+
+	if err != nil {
+		log.Fatalln(err)
+	}
+
 	jobQueue = make(chan worker.Job, envDecoder.MaxQueue)
 	d := worker.NewDispatcher(int(envDecoder.MaxWorkers))
 
@@ -124,14 +161,21 @@ func httpService() http.Handler {
 	r.Use(middleware.Logger)
 	r.Use(middleware.Recoverer)
 	r.Use(middleware.Heartbeat("/ping"))
-	r.Use(middleware.AllowContentType("application/json"))
+	// r.Use(middleware.AllowContentType("application/json"))
 
 	r.Mount("/debug", middleware.Profiler())
 
 	log.Println("setting up HTTP POST endpoint to enqueue jobs")
 
 	r.Post("/enqueue/{id}", func(w http.ResponseWriter, r *http.Request) {
-		job := getJob(chi.URLParam(r, "id"))
+		req := make(map[string]interface{})
+
+		if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+			log.Printf("error converting body to json: %v", err)
+			return
+		}
+
+		job := getJob(chi.URLParam(r, "id"), req)
 
 		if job == nil {
 			w.WriteHeader(http.StatusNotFound)
@@ -145,7 +189,7 @@ func httpService() http.Handler {
 	return r
 }
 
-func getJob(id string) worker.Job {
+func getJob(id string, input map[string]interface{}) worker.Job {
 	if id == "helm-revisions-count-tracker" {
 		newJob, err := jobs.NewHelmRevisionsCountTracker(dbConn, time.Now().UTC(), &jobs.HelmRevisionsCountTrackerOpts{
 			DBConf:             &envDecoder.DBConf,
@@ -165,6 +209,23 @@ func getJob(id string) worker.Job {
 			return nil
 		}
 
+		return newJob
+	} else if id == "recommender" {
+		newJob, err := jobs.NewRecommender(dbConn, time.Now().UTC(), &jobs.RecommenderOpts{
+			DBConf:           &envDecoder.DBConf,
+			DOClientID:       envDecoder.DOClientID,
+			DOClientSecret:   envDecoder.DOClientSecret,
+			DOScopes:         []string{"read", "write"},
+			ServerURL:        envDecoder.ServerURL,
+			Input:            input,
+			LegacyProjectIDs: envDecoder.LegacyProjectIDs,
+		}, opaPolicies)
+
+		if err != nil {
+			log.Printf("error creating job with ID: recommender. Error: %v", err)
+			return nil
+		}
+
 		return newJob
 	}