Browse Source

add sendgrid incident alerting

Alexander Belanger 3 years ago
parent
commit
ecb2b5e999

+ 51 - 3
api/server/handlers/cluster/notify_new_incident.go

@@ -10,8 +10,11 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/integrations/slack"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/notifier"
+	"github.com/porter-dev/porter/internal/notifier/sendgrid"
+	"github.com/porter-dev/porter/internal/notifier/slack"
+	"github.com/porter-dev/porter/internal/repository"
 )
 
 type NotifyNewIncidentHandler struct {
@@ -61,10 +64,36 @@ func (c *NotifyNewIncidentHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		notifConf = conf.ToNotificationConfigType()
 	}
 
-	notifier := slack.NewIncidentsNotifier(notifConf, slackInts...)
+	users, err := getUsersByProjectID(c.Repo(), cluster.ProjectID)
+
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	notifiers := make([]notifier.IncidentNotifier, 0)
+
+	if c.Config().SlackConf != nil {
+		notifiers = append(notifiers, slack.NewIncidentNotifier(notifConf, slackInts...))
+	}
+
+	if sc := c.Config().ServerConf; sc.SendgridAPIKey != "" && sc.SendgridSenderEmail != "" && sc.SendgridIncidentAlertTemplateID != "" {
+		notifiers = append(notifiers, sendgrid.NewIncidentNotifier(&sendgrid.IncidentNotifierOpts{
+			SharedOpts: &sendgrid.SharedOpts{
+				APIKey:      c.Config().ServerConf.SendgridAPIKey,
+				SenderEmail: c.Config().ServerConf.SendgridSenderEmail,
+			},
+			IncidentAlertTemplateID: sc.SendgridIncidentAlertTemplateID,
+			Users:                   users,
+		}))
+	}
+
+	multi := notifier.NewMultiIncidentNotifier(
+		notifiers...,
+	)
 
 	if !cluster.NotificationsDisabled {
-		err := notifier.NotifyNew(
+		err := multi.NotifyNew(
 			request, fmt.Sprintf(
 				"%s/cluster-dashboard/incidents/%s?namespace=%s",
 				c.Config().ServerConf.ServerURL,
@@ -79,3 +108,22 @@ func (c *NotifyNewIncidentHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		}
 	}
 }
+
+func getUsersByProjectID(repo repository.Repository, projectID uint) ([]*models.User, error) {
+	roles, err := repo.Project().ListProjectRoles(projectID)
+
+	if err != nil {
+		return nil, err
+	}
+
+	roleMap := make(map[uint]*models.Role)
+	idArr := make([]uint, 0)
+
+	for _, role := range roles {
+		roleCp := role
+		roleMap[role.UserID] = &roleCp
+		idArr = append(idArr, role.UserID)
+	}
+
+	return repo.User().ListUsersByIDs(idArr)
+}

+ 2 - 2
api/server/handlers/cluster/notify_resolved_incident.go

@@ -10,8 +10,8 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/integrations/slack"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/notifier/slack"
 )
 
 type NotifyResolvedIncidentHandler struct {
@@ -61,7 +61,7 @@ func (c *NotifyResolvedIncidentHandler) ServeHTTP(w http.ResponseWriter, r *http
 		notifConf = conf.ToNotificationConfigType()
 	}
 
-	notifier := slack.NewIncidentsNotifier(notifConf, slackInts...)
+	notifier := slack.NewIncidentNotifier(notifConf, slackInts...)
 
 	if !cluster.NotificationsDisabled {
 		err := notifier.NotifyResolved(

+ 0 - 409
api/server/handlers/kube_events/create.go

@@ -1,409 +0,0 @@
-package kube_events
-
-import (
-	"errors"
-	"fmt"
-	"net/http"
-	"net/url"
-	"strings"
-	"time"
-
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/helm/grapher"
-	"github.com/porter-dev/porter/internal/integrations/slack"
-	"github.com/porter-dev/porter/internal/kubernetes"
-	"github.com/porter-dev/porter/internal/models"
-	"gorm.io/gorm"
-)
-
-type CreateKubeEventHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-func NewCreateKubeEventHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *CreateKubeEventHandler {
-	return &CreateKubeEventHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-func (c *CreateKubeEventHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
-
-	request := &types.CreateKubeEventRequest{}
-
-	if ok := c.DecodeAndValidate(w, r, request); !ok {
-		return
-	}
-
-	// Look for an event matching by the name, namespace, and was last updated within the
-	// grouping threshold time. If so, we append a subevent to the existing event.
-	kubeEvent, err := c.Repo().KubeEvent().ReadEventByGroup(proj.ID, cluster.ID, &types.GroupOptions{
-		Name:          request.Name,
-		Namespace:     request.Namespace,
-		ResourceType:  request.ResourceType,
-		ThresholdTime: time.Now().Add(-15 * time.Minute),
-	})
-
-	foundMatchedEvent := kubeEvent != nil
-
-	if !foundMatchedEvent {
-		kubeEvent, err = c.Repo().KubeEvent().CreateEvent(&models.KubeEvent{
-			ProjectID:    proj.ID,
-			ClusterID:    cluster.ID,
-			ResourceType: request.ResourceType,
-			Name:         request.Name,
-			OwnerType:    request.OwnerType,
-			OwnerName:    request.OwnerName,
-			Namespace:    request.Namespace,
-		})
-
-		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-			return
-		}
-	}
-
-	// append the subevent to the event
-	err = c.Repo().KubeEvent().AppendSubEvent(kubeEvent, &models.KubeSubEvent{
-		EventType: request.EventType,
-		Message:   request.Message,
-		Reason:    request.Reason,
-		Timestamp: request.Timestamp,
-	})
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	w.WriteHeader(http.StatusCreated)
-
-	if strings.ToLower(string(request.EventType)) == "critical" &&
-		strings.ToLower(request.ResourceType) == "pod" &&
-		request.Message != "Unable to determine the root cause of the error" {
-		agent, err := c.GetAgent(r, cluster, request.Namespace)
-
-		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-			return
-		}
-
-		err = notifyPodCrashing(c.Config(), agent, proj, cluster, request)
-
-		if err != nil {
-			c.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(err))
-		}
-	}
-}
-
-func mapKubeEventToMessage(event *types.CreateKubeEventRequest) string {
-	if strings.HasSuffix(event.Reason, "RunContainerError") {
-		if strings.Contains(event.Message, "exec:") {
-			return fmt.Sprintf("Application launch error: %s\n",
-				strings.Split(strings.SplitAfter(event.Message, "exec: ")[1], ": unknown")[0])
-		}
-	} else if strings.HasSuffix(event.Reason, "ImagePullBackOff") {
-		return "Deployment error: The application image could not be pulled from the registry"
-	}
-
-	return event.Message
-}
-
-func notifyPodCrashing(
-	config *config.Config,
-	agent *kubernetes.Agent,
-	project *models.Project,
-	cluster *models.Cluster,
-	event *types.CreateKubeEventRequest,
-) error {
-	// if cluster has notifications turned off, don't alert
-	if cluster.NotificationsDisabled {
-		return nil
-	}
-
-	// attempt to get a matching Porter release to get the notification configuration
-	var conf *models.NotificationConfig
-	var notifConfig *types.NotificationConfig
-	var notifyOpts *slack.NotifyOpts
-	var matchedRel *models.Release
-	var err error
-
-	if isJob := strings.ToLower(event.OwnerType) == "job"; isJob {
-		// check that the job alert is valid and get proper message
-		jobOwner, jobMsg, jobName, shouldAlert, err := getJobAlert(agent, event.Name, event.Namespace)
-
-		if err != nil {
-			return err
-		} else if !shouldAlert {
-			return nil
-		}
-
-		// look for a matching job notification config
-		jobNC, err := config.Repo.JobNotificationConfig().ReadNotificationConfig(project.ID, cluster.ID, jobName, event.Namespace)
-
-		if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
-			return err
-		}
-
-		if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
-			// if the job notification config does not exist, create it
-			jobNC = &models.JobNotificationConfig{
-				Name:             jobName,
-				Namespace:        event.Namespace,
-				ProjectID:        project.ID,
-				ClusterID:        cluster.ID,
-				LastNotifiedTime: time.Now(),
-			}
-
-			jobNC, err = config.Repo.JobNotificationConfig().CreateNotificationConfig(jobNC)
-
-			if err != nil {
-				return err
-			}
-		} else if err != nil {
-			return err
-		} else if err == nil && jobNC != nil {
-			// If the job notification config does exist, check if the job notification config states that
-			// a notification should happen. If so, notify.
-			if !jobNC.ShouldNotify() {
-				return nil
-			}
-		}
-
-		notifyOpts = &slack.NotifyOpts{
-			ProjectID:   cluster.ProjectID,
-			ClusterID:   cluster.ID,
-			ClusterName: cluster.Name,
-			Name:        jobOwner,
-			Namespace:   event.Namespace,
-			Info:        fmt.Sprintf("%s", jobMsg),
-			Timestamp:   &event.Timestamp,
-			URL: fmt.Sprintf(
-				"%s/jobs/%s/%s/%s?project_id=%d&job=%s",
-				config.ServerConf.ServerURL,
-				cluster.Name,
-				event.Namespace,
-				jobOwner,
-				cluster.ProjectID,
-				jobName,
-			),
-		}
-	} else {
-		matchedRel := getMatchedPorterRelease(config, cluster.ID, event.OwnerName, event.Namespace)
-
-		// for now, we only notify for Porter releases that have been deployed through Porter
-		if matchedRel == nil {
-			return nil
-		}
-
-		conf, err = config.Repo.NotificationConfig().ReadNotificationConfig(matchedRel.NotificationConfig)
-
-		if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
-			conf = &models.NotificationConfig{
-				Enabled: true,
-				Success: true,
-				Failure: true,
-			}
-
-			conf, err = config.Repo.NotificationConfig().CreateNotificationConfig(conf)
-
-			if err != nil {
-				return err
-			}
-
-			if err != nil {
-				return err
-			}
-
-			matchedRel.NotificationConfig = conf.ID
-			matchedRel, err = config.Repo.Release().UpdateRelease(matchedRel)
-
-			if err != nil {
-				return err
-			}
-
-			notifConfig = conf.ToNotificationConfigType()
-		} else if err != nil {
-			return err
-		} else if err == nil && conf != nil {
-			if !conf.ShouldNotify() {
-				return nil
-			}
-
-			notifConfig = conf.ToNotificationConfigType()
-		}
-
-		notifyOpts = &slack.NotifyOpts{
-			ProjectID:   cluster.ProjectID,
-			ClusterID:   cluster.ID,
-			ClusterName: cluster.Name,
-			Name:        event.OwnerName,
-			Namespace:   event.Namespace,
-			Info:        mapKubeEventToMessage(event),
-			URL: fmt.Sprintf(
-				"%s/applications/%s/%s/%s?project_id=%d",
-				config.ServerConf.ServerURL,
-				url.PathEscape(cluster.Name),
-				matchedRel.Namespace,
-				matchedRel.Name,
-				cluster.ProjectID,
-			),
-		}
-	}
-
-	slackInts, _ := config.Repo.SlackIntegration().ListSlackIntegrationsByProjectID(project.ID)
-
-	notifier := slack.NewSlackNotifier(notifConfig, slackInts...)
-	notifyOpts.Status = slack.StatusPodCrashed
-
-	err = notifier.Notify(notifyOpts)
-
-	if err != nil {
-		return err
-	}
-
-	// update the last updated time
-	if matchedRel != nil && conf != nil {
-		conf.LastNotifiedTime = time.Now()
-		conf, err = config.Repo.NotificationConfig().UpdateNotificationConfig(conf)
-	}
-
-	return err
-}
-
-// getMatchedPorterRelease attempts to find a matching Porter release from the name of a controller.
-// For example, if the controller has a suffix "-web", it is likely a Porter web application, and
-// so we query for a Porter release with a matching name. Returns nil if no match is found
-func getMatchedPorterRelease(config *config.Config, clusterID uint, ownerName, namespace string) *models.Release {
-	matchingName := ""
-
-	if strings.Contains(ownerName, "-web") {
-		matchingName = strings.Split(ownerName, "-web")[0]
-	} else if strings.Contains(ownerName, "-worker") {
-		matchingName = strings.Split(ownerName, "-worker")[0]
-	} else if strings.Contains(ownerName, "-job") {
-		matchingName = strings.Split(ownerName, "-job")[0]
-	}
-
-	rel, err := config.Repo.Release().ReadRelease(clusterID, matchingName, namespace)
-
-	if err != nil {
-		return nil
-	}
-
-	return rel
-}
-
-func getJobAlert(agent *kubernetes.Agent, name, namespace string) (
-	ownerName string,
-	msg string,
-	jobName string,
-	shouldAlert bool,
-	err error,
-) {
-	ownerName = ""
-
-	pod, err := agent.GetPodByName(name, namespace)
-
-	// if the pod is not found, we should not alert for this pod
-	if err != nil && errors.Is(err, kubernetes.IsNotFoundError) {
-		return "", "", "", false, nil
-	} else if err != nil {
-		return "", "", "", false, err
-	}
-
-	ownerJobName := ""
-
-	// get the owner name for the pod by looking at the owner reference
-	if ownerRefArr := pod.ObjectMeta.OwnerReferences; len(ownerRefArr) > 0 {
-		for _, ownerRef := range ownerRefArr {
-			if strings.ToLower(ownerRef.Kind) == "job" {
-				ownerJobName = ownerRef.Name
-			}
-		}
-	}
-
-	if ownerJobName == "" {
-		return "", "", "", false, nil
-	}
-
-	// lookup the job in the cluster
-	job, err := agent.GetJob(grapher.Object{
-		Kind:      "Job",
-		Name:      ownerJobName,
-		Namespace: namespace,
-	})
-
-	if err != nil {
-		return "", "", "", false, nil
-	}
-
-	if jobReleaseLabel, exists := job.ObjectMeta.Labels["meta.helm.sh/release-name"]; exists {
-		ownerName = jobReleaseLabel
-	}
-
-	// if we don't have an owner name, don't alert -- the link will be broken
-	if ownerName == "" {
-		return "", "", "", false, nil
-	}
-
-	// only alert for jobs that are newer than 24 hours
-	if podTime := pod.Status.StartTime; podTime != nil && podTime.After(time.Now().Add(-24*time.Hour)) {
-		// find container statuses relating to the actual job container. We don't alert on sidecar containers
-		for _, containerStatus := range pod.Status.ContainerStatuses {
-			if containerStatus.Name != "sidecar" && containerStatus.Name != "cloud-sql-proxy" {
-				state := containerStatus.State
-				if state.Terminated != nil && state.Terminated.ExitCode != 0 {
-					// before alerting, we check pod events to make sure the pod was not moved due to normal behavior such as scale down
-					events, err := agent.ListEvents(name, namespace)
-
-					if err == nil && len(events.Items) > 0 {
-						for _, event := range events.Items {
-							// if event is ScaleDown, don't alert
-							if event.Reason == "ScaleDown" && strings.Contains(event.Message, "deleting pod for node scale down") {
-								return ownerName, "", ownerJobName, false, nil
-							}
-						}
-					}
-
-					// next, if the exit code is 255, we check that the job doesn't have a different associated pod.
-					// exit code 255 can mean this pod was moved to a different node due to node eviction, scaledown,
-					// unhealthy node, etc
-					if state.Terminated.ExitCode == 255 {
-						jobPods, err := agent.GetJobPods(namespace, ownerJobName)
-
-						if err == nil && len(jobPods) > 0 {
-							for _, jobPod := range jobPods {
-								if jobPod.ObjectMeta.Name != name {
-									return ownerName, "", ownerJobName, false, nil
-								}
-							}
-						}
-					}
-
-					msg := fmt.Sprintf("Job terminated with non-zero exit code: exit code %d.", state.Terminated.ExitCode)
-
-					if state.Terminated.Message != "" {
-						msg += fmt.Sprintf(" Error: %s", state.Terminated.Message)
-					}
-
-					return ownerName, msg, ownerJobName, true, nil
-				}
-			}
-		}
-	}
-
-	return "", "", "", false, nil
-}

+ 0 - 46
api/server/handlers/kube_events/get.go

@@ -1,46 +0,0 @@
-package kube_events
-
-import (
-	"net/http"
-
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/server/shared/requestutils"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/models"
-)
-
-type GetKubeEventHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-func NewGetKubeEventHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *GetKubeEventHandler {
-	return &GetKubeEventHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-func (c *GetKubeEventHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
-	kubeEventID, _ := requestutils.GetURLParamUint(r, types.URLParamKubeEventID)
-
-	// handle write to the database
-	kubeEvent, err := c.Repo().KubeEvent().ReadEvent(kubeEventID, proj.ID, cluster.ID)
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	c.WriteResult(w, r, kubeEvent.ToKubeEventType())
-}

+ 0 - 96
api/server/handlers/kube_events/get_log_buckets.go

@@ -1,96 +0,0 @@
-package kube_events
-
-import (
-	"fmt"
-	"net/http"
-	"strings"
-
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/server/shared/requestutils"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/kubernetes/porter_agent"
-	"github.com/porter-dev/porter/internal/models"
-)
-
-type GetKubeEventLogBucketsHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-func NewGetKubeEventLogBucketsHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *GetKubeEventLogBucketsHandler {
-	return &GetKubeEventLogBucketsHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-func (c *GetKubeEventLogBucketsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
-	kubeEventID, _ := requestutils.GetURLParamUint(r, types.URLParamKubeEventID)
-
-	kubeEvent, err := c.Repo().KubeEvent().ReadEvent(kubeEventID, proj.ID, cluster.ID)
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	// if the kube event is not a pod type, throw a bad request error to the user
-	if strings.ToLower(kubeEvent.ResourceType) != "pod" {
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
-			fmt.Errorf("event resource type must be pod to get logs"),
-			http.StatusBadRequest,
-		))
-
-		return
-	}
-
-	req := &types.GetKubeEventLogsRequest{}
-
-	if ok := c.DecodeAndValidate(w, r, req); !ok {
-		return
-	}
-
-	agent, err := c.GetAgent(r, cluster, "")
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	// get agent service
-	agentSvc, err := porter_agent.GetAgentService(agent.Clientset)
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	resp, err := porter_agent.GetLogBucketsFromPorterAgent(agent.Clientset, agentSvc, &porter_agent.LogBucketPathOpts{
-		Pod:       kubeEvent.Name,
-		Namespace: kubeEvent.Namespace,
-	})
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	if resp.Error != "" {
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf(resp.Error), http.StatusBadRequest))
-		return
-	}
-
-	c.WriteResult(w, r, &types.GetKubeEventLogBucketsResponse{
-		LogBuckets: resp.AvailableBuckets,
-	})
-}

+ 0 - 97
api/server/handlers/kube_events/get_logs.go

@@ -1,97 +0,0 @@
-package kube_events
-
-import (
-	"fmt"
-	"net/http"
-	"strings"
-
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/server/shared/requestutils"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/kubernetes/porter_agent"
-	"github.com/porter-dev/porter/internal/models"
-)
-
-type GetKubeEventLogsHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-func NewGetKubeEventLogsHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *GetKubeEventLogsHandler {
-	return &GetKubeEventLogsHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-func (c *GetKubeEventLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
-	kubeEventID, _ := requestutils.GetURLParamUint(r, types.URLParamKubeEventID)
-
-	kubeEvent, err := c.Repo().KubeEvent().ReadEvent(kubeEventID, proj.ID, cluster.ID)
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	// if the kube event is not a pod type, throw a bad request error to the user
-	if strings.ToLower(kubeEvent.ResourceType) != "pod" {
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
-			fmt.Errorf("event resource type must be pod to get logs"),
-			http.StatusBadRequest,
-		))
-
-		return
-	}
-
-	req := &types.GetKubeEventLogsRequest{}
-
-	if ok := c.DecodeAndValidate(w, r, req); !ok {
-		return
-	}
-
-	agent, err := c.GetAgent(r, cluster, "")
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	// get agent service
-	agentSvc, err := porter_agent.GetAgentService(agent.Clientset)
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	resp, err := porter_agent.GetLogsFromPorterAgent(agent.Clientset, agentSvc, &porter_agent.LogPathOpts{
-		Timestamp: req.Timestamp,
-		Pod:       kubeEvent.Name,
-		Namespace: kubeEvent.Namespace,
-	})
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	if resp.Error != "" {
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf(resp.Error), http.StatusBadRequest))
-		return
-	}
-
-	c.WriteResult(w, r, &types.GetKubeEventLogsResponse{
-		Logs: resp.Logs,
-	})
-}

+ 0 - 60
api/server/handlers/kube_events/list.go

@@ -1,60 +0,0 @@
-package kube_events
-
-import (
-	"net/http"
-
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/models"
-)
-
-type ListKubeEventsHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-func NewListKubeEventsHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *ListKubeEventsHandler {
-	return &ListKubeEventsHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-func (c *ListKubeEventsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
-
-	request := &types.ListKubeEventRequest{}
-
-	if ok := c.DecodeAndValidate(w, r, request); !ok {
-		return
-	}
-
-	kubeEvents, count, err := c.Repo().KubeEvent().ListEventsByProjectID(proj.ID, cluster.ID, request)
-
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	resp := &types.ListKubeEventsResponse{
-		Count:      count,
-		Limit:      request.Limit,
-		Skip:       request.Skip,
-		KubeEvents: []*types.KubeEvent{},
-	}
-
-	for _, kubeEvent := range kubeEvents {
-		resp.KubeEvents = append(resp.KubeEvents, kubeEvent.ToKubeEventType())
-	}
-
-	c.WriteResult(w, r, resp)
-}

+ 8 - 7
api/server/handlers/release/upgrade.go

@@ -14,8 +14,9 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/helm"
-	"github.com/porter-dev/porter/internal/integrations/slack"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/notifier"
+	"github.com/porter-dev/porter/internal/notifier/slack"
 	"github.com/porter-dev/porter/internal/stacks"
 	"helm.sh/helm/v3/pkg/release"
 )
@@ -182,9 +183,9 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		notifConf = conf.ToNotificationConfigType()
 	}
 
-	notifier := slack.NewSlackNotifier(notifConf, slackInts...)
+	deplNotifier := slack.NewDeploymentNotifier(notifConf, slackInts...)
 
-	notifyOpts := &slack.NotifyOpts{
+	notifyOpts := &notifier.NotifyOpts{
 		ProjectID:   cluster.ProjectID,
 		ClusterID:   cluster.ID,
 		ClusterName: cluster.Name,
@@ -201,11 +202,11 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 	}
 
 	if upgradeErr != nil {
-		notifyOpts.Status = slack.StatusHelmFailed
+		notifyOpts.Status = notifier.StatusHelmFailed
 		notifyOpts.Info = upgradeErr.Error()
 
 		if !cluster.NotificationsDisabled {
-			notifier.Notify(notifyOpts)
+			deplNotifier.Notify(notifyOpts)
 		}
 
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
@@ -217,11 +218,11 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 	}
 
 	if helmRelease.Chart != nil && helmRelease.Chart.Metadata.Name != "job" {
-		notifyOpts.Status = slack.StatusHelmDeployed
+		notifyOpts.Status = notifier.StatusHelmDeployed
 		notifyOpts.Version = helmRelease.Version
 
 		if !cluster.NotificationsDisabled {
-			notifier.Notify(notifyOpts)
+			deplNotifier.Notify(notifyOpts)
 		}
 	}
 

+ 8 - 7
api/server/handlers/release/upgrade_webhook.go

@@ -14,7 +14,8 @@ import (
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/analytics"
 	"github.com/porter-dev/porter/internal/helm"
-	"github.com/porter-dev/porter/internal/integrations/slack"
+	"github.com/porter-dev/porter/internal/notifier"
+	"github.com/porter-dev/porter/internal/notifier/slack"
 	"gorm.io/gorm"
 )
 
@@ -155,9 +156,9 @@ func (c *WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		notifConf = conf.ToNotificationConfigType()
 	}
 
-	notifier := slack.NewSlackNotifier(notifConf, slackInts...)
+	deplNotifier := slack.NewDeploymentNotifier(notifConf, slackInts...)
 
-	notifyOpts := &slack.NotifyOpts{
+	notifyOpts := &notifier.NotifyOpts{
 		ProjectID:   release.ProjectID,
 		ClusterID:   cluster.ID,
 		ClusterName: cluster.Name,
@@ -176,11 +177,11 @@ func (c *WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	rel, err = helmAgent.UpgradeReleaseByValues(conf, c.Config().DOConf)
 
 	if err != nil {
-		notifyOpts.Status = slack.StatusHelmFailed
+		notifyOpts.Status = notifier.StatusHelmFailed
 		notifyOpts.Info = err.Error()
 
 		if !cluster.NotificationsDisabled {
-			notifier.Notify(notifyOpts)
+			deplNotifier.Notify(notifyOpts)
 		}
 
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
@@ -192,11 +193,11 @@ func (c *WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	}
 
 	if rel.Chart != nil && rel.Chart.Metadata.Name != "job" {
-		notifyOpts.Status = slack.StatusHelmDeployed
+		notifyOpts.Status = notifier.StatusHelmDeployed
 		notifyOpts.Version = rel.Version
 
 		if !cluster.NotificationsDisabled {
-			notifier.Notify(notifyOpts)
+			deplNotifier.Notify(notifyOpts)
 		}
 	}
 

+ 8 - 7
api/server/handlers/v1/release/upgrade.go

@@ -15,8 +15,9 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/helm"
-	"github.com/porter-dev/porter/internal/integrations/slack"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/notifier"
+	"github.com/porter-dev/porter/internal/notifier/slack"
 	"helm.sh/helm/v3/pkg/release"
 )
 
@@ -166,9 +167,9 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		notifConf = conf.ToNotificationConfigType()
 	}
 
-	notifier := slack.NewSlackNotifier(notifConf, slackInts...)
+	deplNotifier := slack.NewDeploymentNotifier(notifConf, slackInts...)
 
-	notifyOpts := &slack.NotifyOpts{
+	notifyOpts := &notifier.NotifyOpts{
 		ProjectID:   cluster.ProjectID,
 		ClusterID:   cluster.ID,
 		ClusterName: cluster.Name,
@@ -185,11 +186,11 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 	}
 
 	if upgradeErr != nil {
-		notifyOpts.Status = slack.StatusHelmFailed
+		notifyOpts.Status = notifier.StatusHelmFailed
 		notifyOpts.Info = upgradeErr.Error()
 
 		if !cluster.NotificationsDisabled {
-			notifier.Notify(notifyOpts)
+			deplNotifier.Notify(notifyOpts)
 		}
 
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
@@ -201,11 +202,11 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 	}
 
 	if helmRelease.Chart != nil && helmRelease.Chart.Metadata.Name != "job" {
-		notifyOpts.Status = slack.StatusHelmDeployed
+		notifyOpts.Status = notifier.StatusHelmDeployed
 		notifyOpts.Version = helmRelease.Version
 
 		if !cluster.NotificationsDisabled {
-			notifier.Notify(notifyOpts)
+			deplNotifier.Notify(notifyOpts)
 		}
 	}
 

+ 0 - 146
api/server/router/cluster.go

@@ -7,7 +7,6 @@ import (
 	"github.com/porter-dev/porter/api/server/handlers/cluster"
 	"github.com/porter-dev/porter/api/server/handlers/database"
 	"github.com/porter-dev/porter/api/server/handlers/environment"
-	"github.com/porter-dev/porter/api/server/handlers/kube_events"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/server/shared/router"
@@ -855,151 +854,6 @@ func getClusterRoutes(
 		Router:   r,
 	})
 
-	// GET /api/projects/{project_id}/clusters/{cluster_id}/kube_events -> kube_events.NewGetKubeEventHandler
-	listKubeEventsEndpoint := factory.NewAPIEndpoint(
-		&types.APIRequestMetadata{
-			Verb:   types.APIVerbGet,
-			Method: types.HTTPVerbGet,
-			Path: &types.Path{
-				Parent:       basePath,
-				RelativePath: relPath + "/kube_events",
-			},
-			Scopes: []types.PermissionScope{
-				types.UserScope,
-				types.ProjectScope,
-				types.ClusterScope,
-			},
-		},
-	)
-
-	listKubeEventsHandler := kube_events.NewListKubeEventsHandler(
-		config,
-		factory.GetDecoderValidator(),
-		factory.GetResultWriter(),
-	)
-
-	routes = append(routes, &router.Route{
-		Endpoint: listKubeEventsEndpoint,
-		Handler:  listKubeEventsHandler,
-		Router:   r,
-	})
-
-	// GET /api/projects/{project_id}/clusters/{cluster_id}/kube_events -> kube_events.NewGetKubeEventHandler
-	getKubeEventEndpoint := factory.NewAPIEndpoint(
-		&types.APIRequestMetadata{
-			Verb:   types.APIVerbGet,
-			Method: types.HTTPVerbGet,
-			Path: &types.Path{
-				Parent:       basePath,
-				RelativePath: fmt.Sprintf("%s/kube_events/{%s}", relPath, types.URLParamKubeEventID),
-			},
-			Scopes: []types.PermissionScope{
-				types.UserScope,
-				types.ProjectScope,
-				types.ClusterScope,
-			},
-		},
-	)
-
-	getKubeEventHandler := kube_events.NewGetKubeEventHandler(
-		config,
-		factory.GetDecoderValidator(),
-		factory.GetResultWriter(),
-	)
-
-	routes = append(routes, &router.Route{
-		Endpoint: getKubeEventEndpoint,
-		Handler:  getKubeEventHandler,
-		Router:   r,
-	})
-
-	// GET /api/projects/{project_id}/clusters/{cluster_id}/kube_events/{kube_event_id}/logs -> kube_events.NewGetKubeEventLogsHandler
-	getKubeEventLogsEndpoint := factory.NewAPIEndpoint(
-		&types.APIRequestMetadata{
-			Verb:   types.APIVerbGet,
-			Method: types.HTTPVerbGet,
-			Path: &types.Path{
-				Parent:       basePath,
-				RelativePath: fmt.Sprintf("%s/kube_events/{%s}/logs", relPath, types.URLParamKubeEventID),
-			},
-			Scopes: []types.PermissionScope{
-				types.UserScope,
-				types.ProjectScope,
-				types.ClusterScope,
-			},
-		},
-	)
-
-	getKubeEventLogsHandler := kube_events.NewGetKubeEventLogsHandler(
-		config,
-		factory.GetDecoderValidator(),
-		factory.GetResultWriter(),
-	)
-
-	routes = append(routes, &router.Route{
-		Endpoint: getKubeEventLogsEndpoint,
-		Handler:  getKubeEventLogsHandler,
-		Router:   r,
-	})
-
-	// GET /api/projects/{project_id}/clusters/{cluster_id}/kube_events/{kube_event_id}/log_buckets -> kube_events.NewGetKubeEventLogBucketsHandler
-	getKubeEventLogBucketsEndpoint := factory.NewAPIEndpoint(
-		&types.APIRequestMetadata{
-			Verb:   types.APIVerbGet,
-			Method: types.HTTPVerbGet,
-			Path: &types.Path{
-				Parent:       basePath,
-				RelativePath: fmt.Sprintf("%s/kube_events/{%s}/log_buckets", relPath, types.URLParamKubeEventID),
-			},
-			Scopes: []types.PermissionScope{
-				types.UserScope,
-				types.ProjectScope,
-				types.ClusterScope,
-			},
-		},
-	)
-
-	getKubeEventLogBucketsHandler := kube_events.NewGetKubeEventLogBucketsHandler(
-		config,
-		factory.GetDecoderValidator(),
-		factory.GetResultWriter(),
-	)
-
-	routes = append(routes, &router.Route{
-		Endpoint: getKubeEventLogBucketsEndpoint,
-		Handler:  getKubeEventLogBucketsHandler,
-		Router:   r,
-	})
-
-	// POST /api/projects/{project_id}/clusters/{cluster_id}/kube_events -> kube_events.NewCreateKubeEventHandler
-	createKubeEventsEndpoint := factory.NewAPIEndpoint(
-		&types.APIRequestMetadata{
-			Verb:   types.APIVerbCreate,
-			Method: types.HTTPVerbPost,
-			Path: &types.Path{
-				Parent:       basePath,
-				RelativePath: relPath + "/kube_events",
-			},
-			Scopes: []types.PermissionScope{
-				types.UserScope,
-				types.ProjectScope,
-				types.ClusterScope,
-			},
-		},
-	)
-
-	createKubeEventsHandler := kube_events.NewCreateKubeEventHandler(
-		config,
-		factory.GetDecoderValidator(),
-		factory.GetResultWriter(),
-	)
-
-	routes = append(routes, &router.Route{
-		Endpoint: createKubeEventsEndpoint,
-		Handler:  createKubeEventsHandler,
-		Router:   r,
-	})
-
 	// GET /api/projects/{project_id}/clusters/{cluster_id}/prometheus/ingresses -> cluster.NewListNGINXIngressesHandler
 	listNGINXIngressesEndpoint := factory.NewAPIEndpoint(
 		&types.APIRequestMetadata{

+ 8 - 6
api/server/shared/config/env/envconfs.go

@@ -52,12 +52,14 @@ type ServerConf struct {
 	GoogleClientSecret     string `env:"GOOGLE_CLIENT_SECRET"`
 	GoogleRestrictedDomain string `env:"GOOGLE_RESTRICTED_DOMAIN"`
 
-	SendgridAPIKey                  string `env:"SENDGRID_API_KEY"`
-	SendgridPWResetTemplateID       string `env:"SENDGRID_PW_RESET_TEMPLATE_ID"`
-	SendgridPWGHTemplateID          string `env:"SENDGRID_PW_GH_TEMPLATE_ID"`
-	SendgridVerifyEmailTemplateID   string `env:"SENDGRID_VERIFY_EMAIL_TEMPLATE_ID"`
-	SendgridProjectInviteTemplateID string `env:"SENDGRID_INVITE_TEMPLATE_ID"`
-	SendgridSenderEmail             string `env:"SENDGRID_SENDER_EMAIL"`
+	SendgridAPIKey                     string `env:"SENDGRID_API_KEY"`
+	SendgridPWResetTemplateID          string `env:"SENDGRID_PW_RESET_TEMPLATE_ID"`
+	SendgridPWGHTemplateID             string `env:"SENDGRID_PW_GH_TEMPLATE_ID"`
+	SendgridVerifyEmailTemplateID      string `env:"SENDGRID_VERIFY_EMAIL_TEMPLATE_ID"`
+	SendgridProjectInviteTemplateID    string `env:"SENDGRID_INVITE_TEMPLATE_ID"`
+	SendgridIncidentAlertTemplateID    string `env:"SENDGRID_INCIDENT_ALERT_TEMPLATE_ID"`
+	SendgridIncidentResolvedTemplateID string `env:"SENDGRID_INCIDENT_RESOLVED_TEMPLATE_ID"`
+	SendgridSenderEmail                string `env:"SENDGRID_SENDER_EMAIL"`
 
 	SlackClientID     string `env:"SLACK_CLIENT_ID"`
 	SlackClientSecret string `env:"SLACK_CLIENT_SECRET"`

+ 5 - 3
api/server/shared/config/loader/loader.go

@@ -107,13 +107,15 @@ func (e *EnvConfigLoader) LoadConfig() (res *config.Config, err error) {
 	res.UserNotifier = &notifier.EmptyUserNotifier{}
 
 	if res.Metadata.Email {
-		res.UserNotifier = sendgrid.NewUserNotifier(&sendgrid.Client{
-			APIKey:                  envConf.ServerConf.SendgridAPIKey,
+		res.UserNotifier = sendgrid.NewUserNotifier(&sendgrid.UserNotifierOpts{
+			SharedOpts: &sendgrid.SharedOpts{
+				APIKey:      envConf.ServerConf.SendgridAPIKey,
+				SenderEmail: envConf.ServerConf.SendgridSenderEmail,
+			},
 			PWResetTemplateID:       envConf.ServerConf.SendgridPWResetTemplateID,
 			PWGHTemplateID:          envConf.ServerConf.SendgridPWGHTemplateID,
 			VerifyEmailTemplateID:   envConf.ServerConf.SendgridVerifyEmailTemplateID,
 			ProjectInviteTemplateID: envConf.ServerConf.SendgridProjectInviteTemplateID,
-			SenderEmail:             envConf.ServerConf.SendgridSenderEmail,
 		})
 	}
 

+ 0 - 272
internal/integrations/slack/notifier.go

@@ -1,272 +0,0 @@
-package slack
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"net/http"
-	"strings"
-	"time"
-
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/models/integrations"
-)
-
-type Notifier interface {
-	Notify(opts *NotifyOpts) error
-}
-
-type DeploymentStatus string
-
-const (
-	StatusHelmDeployed DeploymentStatus = "helm_deployed"
-	StatusPodCrashed   DeploymentStatus = "pod_crashed"
-	StatusHelmFailed   DeploymentStatus = "helm_failed"
-)
-
-type NotifyOpts struct {
-	// ProjectID is the id of the Porter project that this deployment belongs to
-	ProjectID uint
-
-	// ClusterID is the id of the Porter cluster that this deployment belongs to
-	ClusterID uint
-
-	// ClusterName is the name of the cluster that this deployment was deployed in
-	ClusterName string
-
-	// Status is the current status of the deployment.
-	Status DeploymentStatus
-
-	// Info is any additional information about this status, such as an error message if
-	// the deployment failed.
-	Info string
-
-	// Name is the name of the deployment that this notification refers to.
-	Name string
-
-	// Namespace is the Kubernetes namespace of the deployment that this notification refers to.
-	Namespace string
-
-	URL string
-
-	Timestamp *time.Time
-
-	Version int
-}
-
-type SlackNotifier struct {
-	slackInts []*integrations.SlackIntegration
-	Config    *types.NotificationConfig
-}
-
-func NewSlackNotifier(conf *types.NotificationConfig, slackInts ...*integrations.SlackIntegration) Notifier {
-	return &SlackNotifier{
-		slackInts: slackInts,
-		Config:    conf,
-	}
-}
-
-type SlackPayload struct {
-	Blocks []*SlackBlock `json:"blocks"`
-}
-
-type SlackBlock struct {
-	Type string     `json:"type"`
-	Text *SlackText `json:"text,omitempty"`
-}
-
-type SlackText struct {
-	Type string `json:"type"`
-	Text string `json:"text"`
-}
-
-func (s *SlackNotifier) Notify(opts *NotifyOpts) error {
-	if s.Config != nil {
-		if !s.Config.Enabled {
-			return nil
-		}
-		if opts.Status == StatusHelmDeployed && !s.Config.Success {
-			return nil
-		}
-		if opts.Status == StatusPodCrashed && !s.Config.Failure {
-			return nil
-		}
-		if opts.Status == StatusHelmFailed && !s.Config.Failure {
-			return nil
-		}
-	}
-
-	// we create a basic payload as a fallback if the detailed payload with "info" fails, due to
-	// marshaling errors on the Slack API side.
-	blocks, basicBlocks := getSlackBlocks(opts)
-
-	slackPayload := &SlackPayload{
-		Blocks: blocks,
-	}
-
-	basicSlackPayload := &SlackPayload{
-		Blocks: basicBlocks,
-	}
-
-	basicPayload, err := json.Marshal(basicSlackPayload)
-
-	if err != nil {
-		return err
-	}
-
-	payload, err := json.Marshal(slackPayload)
-
-	if err != nil {
-		return err
-	}
-
-	basicReqBody := bytes.NewReader(basicPayload)
-	reqBody := bytes.NewReader(payload)
-	client := &http.Client{
-		Timeout: time.Second * 5,
-	}
-
-	for _, slackInt := range s.slackInts {
-		resp, err := client.Post(string(slackInt.Webhook), "application/json", reqBody)
-
-		if err != nil || resp.StatusCode != 200 {
-			client.Post(string(slackInt.Webhook), "application/json", basicReqBody)
-		}
-	}
-
-	return nil
-}
-
-func getSlackBlocks(opts *NotifyOpts) ([]*SlackBlock, []*SlackBlock) {
-	res := []*SlackBlock{}
-
-	if opts.Status == StatusHelmDeployed || opts.Status == StatusHelmFailed {
-		res = append(res, getHelmMessageBlock(opts))
-	} else if opts.Status == StatusPodCrashed {
-		res = append(res, getPodCrashedMessageBlock(opts))
-	}
-
-	res = append(
-		res,
-		getDividerBlock(),
-		getMarkdownBlock(fmt.Sprintf("*Name:* %s", "`"+opts.Name+"`")),
-		getMarkdownBlock(fmt.Sprintf("*Namespace:* %s", "`"+opts.Namespace+"`")),
-	)
-
-	if opts.Timestamp != nil {
-		res = append(res, getMarkdownBlock(fmt.Sprintf(
-			"*Timestamp:* <!date^%d^Alerted at {date_num} {time_secs}|Alerted at %s>",
-			opts.Timestamp.Unix(),
-			opts.Timestamp.Format("2006-01-02 15:04:05 UTC"),
-		)),
-		)
-	}
-
-	if opts.Status == StatusHelmDeployed || opts.Status == StatusHelmFailed {
-		res = append(res, getMarkdownBlock(fmt.Sprintf("*Version:* %d", opts.Version)))
-	}
-
-	basicRes := res
-
-	infoBlock := getInfoBlock(opts)
-
-	if infoBlock != nil {
-		res = append(res, infoBlock)
-	}
-
-	return res, basicRes
-}
-
-func getDividerBlock() *SlackBlock {
-	return &SlackBlock{
-		Type: "divider",
-	}
-}
-
-func getMarkdownBlock(md string) *SlackBlock {
-	return &SlackBlock{
-		Type: "section",
-		Text: &SlackText{
-			Type: "mrkdwn",
-			Text: md,
-		},
-	}
-}
-
-func getHelmMessageBlock(opts *NotifyOpts) *SlackBlock {
-	var md string
-
-	switch opts.Status {
-	case StatusHelmDeployed:
-		md = getHelmSuccessMessage(opts)
-	case StatusHelmFailed:
-		md = getHelmFailedMessage(opts)
-	}
-
-	return getMarkdownBlock(md)
-}
-
-func getPodCrashedMessageBlock(opts *NotifyOpts) *SlackBlock {
-	md := fmt.Sprintf(
-		":x: Your application %s crashed on Porter. <%s|View the application.>",
-		"`"+opts.Name+"`",
-		opts.URL,
-	)
-
-	return getMarkdownBlock(md)
-}
-
-func getInfoBlock(opts *NotifyOpts) *SlackBlock {
-	var md string
-
-	switch opts.Status {
-	case StatusHelmFailed:
-		md = getFailedInfoMessage(opts)
-	case StatusPodCrashed:
-		md = getFailedInfoMessage(opts)
-	default:
-		return nil
-	}
-
-	return getMarkdownBlock(md)
-}
-
-func getHelmSuccessMessage(opts *NotifyOpts) string {
-	return fmt.Sprintf(
-		":rocket: Your application %s was successfully updated on Porter! <%s|View the new release.>",
-		"`"+opts.Name+"`",
-		opts.URL,
-	)
-}
-
-func getHelmFailedMessage(opts *NotifyOpts) string {
-	return fmt.Sprintf(
-		":x: Your application %s failed to deploy on Porter. <%s|View the status here.>",
-		"`"+opts.Name+"`",
-		opts.URL,
-	)
-}
-
-func getFailedInfoMessage(opts *NotifyOpts) string {
-	info := opts.Info
-
-	// TODO: this casing is quite ugly and looks for particular types of API server
-	// errors, otherwise it truncates the error message to 200 characters. This should
-	// handle the errors more gracefully.
-	if strings.Contains(info, "Invalid value:") {
-		errArr := strings.Split(info, "Invalid value:")
-
-		// look for "unmarshalerDecoder" error
-		if strings.Contains(info, "unmarshalerDecoder") {
-			udArr := strings.Split(info, "unmarshalerDecoder:")
-
-			info = errArr[0] + udArr[1]
-		} else {
-			info = errArr[0] + "..."
-		}
-	} else if len(info) > 200 {
-		info = info[0:200] + "..."
-	}
-
-	return fmt.Sprintf("```\n%s\n```", info)
-}

+ 45 - 0
internal/notifier/deployment_notifier.go

@@ -0,0 +1,45 @@
+package notifier
+
+import "time"
+
+type Notifier interface {
+	Notify(opts *NotifyOpts) error
+}
+
+type DeploymentStatus string
+
+const (
+	StatusHelmDeployed DeploymentStatus = "helm_deployed"
+	StatusPodCrashed   DeploymentStatus = "pod_crashed"
+	StatusHelmFailed   DeploymentStatus = "helm_failed"
+)
+
+type NotifyOpts struct {
+	// ProjectID is the id of the Porter project that this deployment belongs to
+	ProjectID uint
+
+	// ClusterID is the id of the Porter cluster that this deployment belongs to
+	ClusterID uint
+
+	// ClusterName is the name of the cluster that this deployment was deployed in
+	ClusterName string
+
+	// Status is the current status of the deployment.
+	Status DeploymentStatus
+
+	// Info is any additional information about this status, such as an error message if
+	// the deployment failed.
+	Info string
+
+	// Name is the name of the deployment that this notification refers to.
+	Name string
+
+	// Namespace is the Kubernetes namespace of the deployment that this notification refers to.
+	Namespace string
+
+	URL string
+
+	Timestamp *time.Time
+
+	Version int
+}

+ 36 - 0
internal/notifier/incident_notifier.go

@@ -0,0 +1,36 @@
+package notifier
+
+import "github.com/porter-dev/porter/api/types"
+
+type IncidentNotifier interface {
+	NotifyNew(incident *types.Incident, url string) error
+	NotifyResolved(incident *types.Incident, url string) error
+}
+
+type MultiIncidentNotifier struct {
+	notifiers []IncidentNotifier
+}
+
+func NewMultiIncidentNotifier(notifiers ...IncidentNotifier) IncidentNotifier {
+	return &MultiIncidentNotifier{notifiers}
+}
+
+func (m *MultiIncidentNotifier) NotifyNew(incident *types.Incident, url string) error {
+	for _, n := range m.notifiers {
+		if err := n.NotifyNew(incident, url); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (m *MultiIncidentNotifier) NotifyResolved(incident *types.Incident, url string) error {
+	for _, n := range m.notifiers {
+		if err := n.NotifyResolved(incident, url); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}

+ 6 - 0
internal/notifier/sendgrid/client.go

@@ -0,0 +1,6 @@
+package sendgrid
+
+type SharedOpts struct {
+	APIKey      string
+	SenderEmail string
+}

+ 69 - 0
internal/notifier/sendgrid/incident_notifier.go

@@ -0,0 +1,69 @@
+package sendgrid
+
+import (
+	"fmt"
+
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/notifier"
+	"github.com/sendgrid/sendgrid-go"
+	"github.com/sendgrid/sendgrid-go/helpers/mail"
+)
+
+type IncidentNotifier struct {
+	opts *IncidentNotifierOpts
+}
+
+type IncidentNotifierOpts struct {
+	*SharedOpts
+	IncidentAlertTemplateID    string
+	IncidentResolvedTemplateID string
+	Users                      []*models.User
+}
+
+func NewIncidentNotifier(opts *IncidentNotifierOpts) notifier.IncidentNotifier {
+	return &IncidentNotifier{opts}
+}
+
+func (s *IncidentNotifier) NotifyNew(incident *types.Incident, url string) error {
+	request := sendgrid.GetRequest(s.opts.APIKey, "/v3/mail/send", "https://api.sendgrid.com")
+	request.Method = "POST"
+
+	addrs := make([]*mail.Email, 0)
+
+	for _, user := range s.opts.Users {
+		addrs = append(addrs, &mail.Email{
+			Address: user.Email,
+		})
+	}
+
+	sgMail := &mail.SGMailV3{
+		Personalizations: []*mail.Personalization{
+			{
+				To: addrs,
+				DynamicTemplateData: map[string]interface{}{
+					"incident_text": incident.Summary,
+					"app_url":       url,
+					"subject":       fmt.Sprintf("Your application %s crashed on Porter", incident.ReleaseName),
+					"preheader":     incident.Summary,
+					"created_at":    fmt.Sprintf("%v", incident.CreatedAt),
+				},
+			},
+		},
+		From: &mail.Email{
+			Address: s.opts.SenderEmail,
+			Name:    "Porter",
+		},
+		TemplateID: s.opts.IncidentAlertTemplateID,
+	}
+
+	request.Body = mail.GetRequestBody(sgMail)
+
+	_, err := sendgrid.API(request)
+
+	return err
+}
+
+func (s *IncidentNotifier) NotifyResolved(incident *types.Incident, url string) error {
+	return nil
+}

+ 17 - 18
internal/notifier/sendgrid/sendgrid.go → internal/notifier/sendgrid/user_notifier.go

@@ -7,24 +7,23 @@ import (
 )
 
 type UserNotifier struct {
-	client *Client
+	opts *UserNotifierOpts
 }
 
-type Client struct {
-	APIKey                  string
+type UserNotifierOpts struct {
+	*SharedOpts
 	PWResetTemplateID       string
 	PWGHTemplateID          string
 	VerifyEmailTemplateID   string
 	ProjectInviteTemplateID string
-	SenderEmail             string
 }
 
-func NewUserNotifier(client *Client) notifier.UserNotifier {
-	return &UserNotifier{client}
+func NewUserNotifier(opts *UserNotifierOpts) notifier.UserNotifier {
+	return &UserNotifier{opts}
 }
 
 func (s *UserNotifier) SendPasswordResetEmail(opts *notifier.SendPasswordResetEmailOpts) error {
-	request := sendgrid.GetRequest(s.client.APIKey, "/v3/mail/send", "https://api.sendgrid.com")
+	request := sendgrid.GetRequest(s.opts.APIKey, "/v3/mail/send", "https://api.sendgrid.com")
 	request.Method = "POST"
 
 	sgMail := &mail.SGMailV3{
@@ -42,10 +41,10 @@ func (s *UserNotifier) SendPasswordResetEmail(opts *notifier.SendPasswordResetEm
 			},
 		},
 		From: &mail.Email{
-			Address: s.client.SenderEmail,
+			Address: s.opts.SenderEmail,
 			Name:    "Porter",
 		},
-		TemplateID: s.client.PWResetTemplateID,
+		TemplateID: s.opts.PWResetTemplateID,
 	}
 
 	request.Body = mail.GetRequestBody(sgMail)
@@ -56,7 +55,7 @@ func (s *UserNotifier) SendPasswordResetEmail(opts *notifier.SendPasswordResetEm
 }
 
 func (s *UserNotifier) SendGithubRelinkEmail(opts *notifier.SendGithubRelinkEmailOpts) error {
-	request := sendgrid.GetRequest(s.client.APIKey, "/v3/mail/send", "https://api.sendgrid.com")
+	request := sendgrid.GetRequest(s.opts.APIKey, "/v3/mail/send", "https://api.sendgrid.com")
 	request.Method = "POST"
 
 	sgMail := &mail.SGMailV3{
@@ -74,10 +73,10 @@ func (s *UserNotifier) SendGithubRelinkEmail(opts *notifier.SendGithubRelinkEmai
 			},
 		},
 		From: &mail.Email{
-			Address: s.client.SenderEmail,
+			Address: s.opts.SenderEmail,
 			Name:    "Porter",
 		},
-		TemplateID: s.client.PWGHTemplateID,
+		TemplateID: s.opts.PWGHTemplateID,
 	}
 
 	request.Body = mail.GetRequestBody(sgMail)
@@ -88,7 +87,7 @@ func (s *UserNotifier) SendGithubRelinkEmail(opts *notifier.SendGithubRelinkEmai
 }
 
 func (s *UserNotifier) SendEmailVerification(opts *notifier.SendEmailVerificationOpts) error {
-	request := sendgrid.GetRequest(s.client.APIKey, "/v3/mail/send", "https://api.sendgrid.com")
+	request := sendgrid.GetRequest(s.opts.APIKey, "/v3/mail/send", "https://api.sendgrid.com")
 	request.Method = "POST"
 
 	sgMail := &mail.SGMailV3{
@@ -106,10 +105,10 @@ func (s *UserNotifier) SendEmailVerification(opts *notifier.SendEmailVerificatio
 			},
 		},
 		From: &mail.Email{
-			Address: s.client.SenderEmail,
+			Address: s.opts.SenderEmail,
 			Name:    "Porter",
 		},
-		TemplateID: s.client.VerifyEmailTemplateID,
+		TemplateID: s.opts.VerifyEmailTemplateID,
 	}
 
 	request.Body = mail.GetRequestBody(sgMail)
@@ -120,7 +119,7 @@ func (s *UserNotifier) SendEmailVerification(opts *notifier.SendEmailVerificatio
 }
 
 func (s *UserNotifier) SendProjectInviteEmail(opts *notifier.SendProjectInviteEmailOpts) error {
-	request := sendgrid.GetRequest(s.client.APIKey, "/v3/mail/send", "https://api.sendgrid.com")
+	request := sendgrid.GetRequest(s.opts.APIKey, "/v3/mail/send", "https://api.sendgrid.com")
 	request.Method = "POST"
 
 	sgMail := &mail.SGMailV3{
@@ -139,10 +138,10 @@ func (s *UserNotifier) SendProjectInviteEmail(opts *notifier.SendProjectInviteEm
 			},
 		},
 		From: &mail.Email{
-			Address: s.client.SenderEmail,
+			Address: s.opts.SenderEmail,
 			Name:    "Porter",
 		},
-		TemplateID: s.client.ProjectInviteTemplateID,
+		TemplateID: s.opts.ProjectInviteTemplateID,
 	}
 
 	request.Body = mail.GetRequestBody(sgMail)

+ 95 - 0
internal/notifier/slack/deployment_notifier.go

@@ -0,0 +1,95 @@
+package slack
+
+import (
+	"bytes"
+	"encoding/json"
+	"net/http"
+	"time"
+
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models/integrations"
+	"github.com/porter-dev/porter/internal/notifier"
+)
+
+type DeploymentNotifier struct {
+	slackInts []*integrations.SlackIntegration
+	Config    *types.NotificationConfig
+}
+
+func NewDeploymentNotifier(conf *types.NotificationConfig, slackInts ...*integrations.SlackIntegration) *DeploymentNotifier {
+	return &DeploymentNotifier{
+		slackInts: slackInts,
+		Config:    conf,
+	}
+}
+
+type SlackPayload struct {
+	Blocks []*SlackBlock `json:"blocks"`
+}
+
+type SlackBlock struct {
+	Type string     `json:"type"`
+	Text *SlackText `json:"text,omitempty"`
+}
+
+type SlackText struct {
+	Type string `json:"type"`
+	Text string `json:"text"`
+}
+
+func (s *DeploymentNotifier) Notify(opts *notifier.NotifyOpts) error {
+	if s.Config != nil {
+		if !s.Config.Enabled {
+			return nil
+		}
+		if opts.Status == notifier.StatusHelmDeployed && !s.Config.Success {
+			return nil
+		}
+		if opts.Status == notifier.StatusPodCrashed && !s.Config.Failure {
+			return nil
+		}
+		if opts.Status == notifier.StatusHelmFailed && !s.Config.Failure {
+			return nil
+		}
+	}
+
+	// we create a basic payload as a fallback if the detailed payload with "info" fails, due to
+	// marshaling errors on the Slack API side.
+	blocks, basicBlocks := getSlackBlocks(opts)
+
+	slackPayload := &SlackPayload{
+		Blocks: blocks,
+	}
+
+	basicSlackPayload := &SlackPayload{
+		Blocks: basicBlocks,
+	}
+
+	basicPayload, err := json.Marshal(basicSlackPayload)
+
+	if err != nil {
+		return err
+	}
+
+	payload, err := json.Marshal(slackPayload)
+
+	if err != nil {
+		return err
+	}
+
+	basicReqBody := bytes.NewReader(basicPayload)
+	reqBody := bytes.NewReader(payload)
+	client := &http.Client{
+		Timeout: time.Second * 5,
+	}
+
+	for _, slackInt := range s.slackInts {
+		resp, err := client.Post(string(slackInt.Webhook), "application/json", reqBody)
+
+		if err != nil || resp.StatusCode != 200 {
+			client.Post(string(slackInt.Webhook), "application/json", basicReqBody)
+		}
+	}
+
+	return nil
+}

+ 142 - 0
internal/notifier/slack/helpers.go

@@ -0,0 +1,142 @@
+package slack
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/porter-dev/porter/internal/notifier"
+)
+
+func getSlackBlocks(opts *notifier.NotifyOpts) ([]*SlackBlock, []*SlackBlock) {
+	res := []*SlackBlock{}
+
+	if opts.Status == notifier.StatusHelmDeployed || opts.Status == notifier.StatusHelmFailed {
+		res = append(res, getHelmMessageBlock(opts))
+	} else if opts.Status == notifier.StatusPodCrashed {
+		res = append(res, getPodCrashedMessageBlock(opts))
+	}
+
+	res = append(
+		res,
+		getDividerBlock(),
+		getMarkdownBlock(fmt.Sprintf("*Name:* %s", "`"+opts.Name+"`")),
+		getMarkdownBlock(fmt.Sprintf("*Namespace:* %s", "`"+opts.Namespace+"`")),
+	)
+
+	if opts.Timestamp != nil {
+		res = append(res, getMarkdownBlock(fmt.Sprintf(
+			"*Timestamp:* <!date^%d^Alerted at {date_num} {time_secs}|Alerted at %s>",
+			opts.Timestamp.Unix(),
+			opts.Timestamp.Format("2006-01-02 15:04:05 UTC"),
+		)),
+		)
+	}
+
+	if opts.Status == notifier.StatusHelmDeployed || opts.Status == notifier.StatusHelmFailed {
+		res = append(res, getMarkdownBlock(fmt.Sprintf("*Version:* %d", opts.Version)))
+	}
+
+	basicRes := res
+
+	infoBlock := getInfoBlock(opts)
+
+	if infoBlock != nil {
+		res = append(res, infoBlock)
+	}
+
+	return res, basicRes
+}
+
+func getDividerBlock() *SlackBlock {
+	return &SlackBlock{
+		Type: "divider",
+	}
+}
+
+func getMarkdownBlock(md string) *SlackBlock {
+	return &SlackBlock{
+		Type: "section",
+		Text: &SlackText{
+			Type: "mrkdwn",
+			Text: md,
+		},
+	}
+}
+
+func getHelmMessageBlock(opts *notifier.NotifyOpts) *SlackBlock {
+	var md string
+
+	switch opts.Status {
+	case notifier.StatusHelmDeployed:
+		md = getHelmSuccessMessage(opts)
+	case notifier.StatusHelmFailed:
+		md = getHelmFailedMessage(opts)
+	}
+
+	return getMarkdownBlock(md)
+}
+
+func getPodCrashedMessageBlock(opts *notifier.NotifyOpts) *SlackBlock {
+	md := fmt.Sprintf(
+		":x: Your application %s crashed on Porter. <%s|View the application.>",
+		"`"+opts.Name+"`",
+		opts.URL,
+	)
+
+	return getMarkdownBlock(md)
+}
+
+func getInfoBlock(opts *notifier.NotifyOpts) *SlackBlock {
+	var md string
+
+	switch opts.Status {
+	case notifier.StatusHelmFailed:
+		md = getFailedInfoMessage(opts)
+	case notifier.StatusPodCrashed:
+		md = getFailedInfoMessage(opts)
+	default:
+		return nil
+	}
+
+	return getMarkdownBlock(md)
+}
+
+func getHelmSuccessMessage(opts *notifier.NotifyOpts) string {
+	return fmt.Sprintf(
+		":rocket: Your application %s was successfully updated on Porter! <%s|View the new release.>",
+		"`"+opts.Name+"`",
+		opts.URL,
+	)
+}
+
+func getHelmFailedMessage(opts *notifier.NotifyOpts) string {
+	return fmt.Sprintf(
+		":x: Your application %s failed to deploy on Porter. <%s|View the status here.>",
+		"`"+opts.Name+"`",
+		opts.URL,
+	)
+}
+
+func getFailedInfoMessage(opts *notifier.NotifyOpts) string {
+	info := opts.Info
+
+	// TODO: this casing is quite ugly and looks for particular types of API server
+	// errors, otherwise it truncates the error message to 200 characters. This should
+	// handle the errors more gracefully.
+	if strings.Contains(info, "Invalid value:") {
+		errArr := strings.Split(info, "Invalid value:")
+
+		// look for "unmarshalerDecoder" error
+		if strings.Contains(info, "unmarshalerDecoder") {
+			udArr := strings.Split(info, "unmarshalerDecoder:")
+
+			info = errArr[0] + udArr[1]
+		} else {
+			info = errArr[0] + "..."
+		}
+	} else if len(info) > 200 {
+		info = info[0:200] + "..."
+	}
+
+	return fmt.Sprintf("```\n%s\n```", info)
+}

+ 5 - 5
internal/integrations/slack/incidents_notifier.go → internal/notifier/slack/incident_notifier.go

@@ -11,19 +11,19 @@ import (
 	"github.com/porter-dev/porter/internal/models/integrations"
 )
 
-type IncidentsNotifier struct {
+type IncidentNotifier struct {
 	slackInts []*integrations.SlackIntegration
 	Config    *types.NotificationConfig
 }
 
-func NewIncidentsNotifier(conf *types.NotificationConfig, slackInts ...*integrations.SlackIntegration) *IncidentsNotifier {
-	return &IncidentsNotifier{
+func NewIncidentNotifier(conf *types.NotificationConfig, slackInts ...*integrations.SlackIntegration) *IncidentNotifier {
+	return &IncidentNotifier{
 		slackInts: slackInts,
 		Config:    conf,
 	}
 }
 
-func (s *IncidentsNotifier) NotifyNew(incident *types.Incident, url string) error {
+func (s *IncidentNotifier) NotifyNew(incident *types.Incident, url string) error {
 	res := []*SlackBlock{}
 
 	topSectionMarkdwn := fmt.Sprintf(
@@ -74,7 +74,7 @@ func (s *IncidentsNotifier) NotifyNew(incident *types.Incident, url string) erro
 	return nil
 }
 
-func (s *IncidentsNotifier) NotifyResolved(incident *types.Incident, url string) error {
+func (s *IncidentNotifier) NotifyResolved(incident *types.Incident, url string) error {
 	res := []*SlackBlock{}
 
 	createdAt := incident.CreatedAt

+ 0 - 0
internal/notifier/notifier.go → internal/notifier/user_notifier.go