2023-12-19 00:17:56 -05:00
|
|
|
package scheduler_k3s
|
|
|
|
|
|
|
|
|
|
import (
|
2024-01-17 21:48:06 -05:00
|
|
|
"bufio"
|
2024-01-17 18:06:18 -05:00
|
|
|
"context"
|
|
|
|
|
"encoding/base64"
|
2024-01-18 16:55:09 -05:00
|
|
|
"encoding/json"
|
2024-01-18 05:28:07 -05:00
|
|
|
"errors"
|
2023-12-19 00:17:56 -05:00
|
|
|
"fmt"
|
2024-01-17 21:48:06 -05:00
|
|
|
"io"
|
2024-01-12 23:55:42 -05:00
|
|
|
"os"
|
2024-01-17 20:56:30 -05:00
|
|
|
"os/signal"
|
2024-01-17 18:06:18 -05:00
|
|
|
"path/filepath"
|
|
|
|
|
"strconv"
|
2024-01-17 20:56:30 -05:00
|
|
|
"strings"
|
|
|
|
|
"syscall"
|
2024-01-17 18:06:18 -05:00
|
|
|
"time"
|
2023-12-19 00:17:56 -05:00
|
|
|
|
2024-01-18 18:42:55 -05:00
|
|
|
appjson "github.com/dokku/dokku/plugins/app-json"
|
2023-12-19 00:17:56 -05:00
|
|
|
"github.com/dokku/dokku/plugins/common"
|
2024-01-17 18:06:18 -05:00
|
|
|
"github.com/dokku/dokku/plugins/config"
|
2024-01-18 06:08:18 -05:00
|
|
|
"github.com/dokku/dokku/plugins/cron"
|
2024-01-17 21:48:06 -05:00
|
|
|
"github.com/fatih/color"
|
2024-01-18 06:08:18 -05:00
|
|
|
"github.com/kballard/go-shellquote"
|
2024-01-12 23:55:42 -05:00
|
|
|
"github.com/rancher/wharfie/pkg/registries"
|
2024-01-18 16:55:09 -05:00
|
|
|
"github.com/ryanuber/columnize"
|
2024-01-17 18:06:18 -05:00
|
|
|
orderedmap "github.com/wk8/go-ordered-map/v2"
|
2024-01-12 23:55:42 -05:00
|
|
|
"gopkg.in/yaml.v3"
|
2024-01-17 18:06:18 -05:00
|
|
|
appsv1 "k8s.io/api/apps/v1"
|
2024-01-17 20:56:30 -05:00
|
|
|
corev1 "k8s.io/api/core/v1"
|
2024-01-17 21:48:06 -05:00
|
|
|
v1 "k8s.io/api/core/v1"
|
2024-01-18 05:28:07 -05:00
|
|
|
"k8s.io/kubernetes/pkg/client/conditions"
|
2024-01-17 21:48:06 -05:00
|
|
|
"k8s.io/utils/ptr"
|
2023-12-19 00:17:56 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// TriggerInstall runs the install step for the scheduler-k3s plugin
|
|
|
|
|
func TriggerInstall() error {
|
|
|
|
|
if err := common.PropertySetup("scheduler-k3s"); err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to install the scheduler-k3s plugin: %s", err.Error())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TriggerPostAppCloneSetup creates new scheduler-k3s files
|
|
|
|
|
func TriggerPostAppCloneSetup(oldAppName string, newAppName string) error {
|
|
|
|
|
err := common.PropertyClone("scheduler-k3s", oldAppName, newAppName)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TriggerPostAppRenameSetup renames scheduler-k3s files
|
|
|
|
|
func TriggerPostAppRenameSetup(oldAppName string, newAppName string) error {
|
|
|
|
|
if err := common.PropertyClone("scheduler-k3s", oldAppName, newAppName); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := common.PropertyDestroy("scheduler-k3s", oldAppName); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 06:38:12 -05:00
|
|
|
// TriggerPostDelete destroys the scheduler-k3s data for a given app container
|
|
|
|
|
func TriggerPostDelete(appName string) error {
|
2023-12-19 00:17:56 -05:00
|
|
|
dataErr := common.RemoveAppDataDirectory("scheduler-k3s", appName)
|
|
|
|
|
propertyErr := common.PropertyDestroy("scheduler-k3s", appName)
|
|
|
|
|
|
|
|
|
|
if dataErr != nil {
|
|
|
|
|
return dataErr
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return propertyErr
|
|
|
|
|
}
|
2024-01-12 02:39:51 -05:00
|
|
|
|
|
|
|
|
// TriggerPostRegistryLogin updates the `/etc/rancher/k3s/registries.yaml` to include
|
2024-01-12 23:55:42 -05:00
|
|
|
// auth information for the registry. Note that if the file does not exist, it won't be updated.
|
2024-01-12 02:39:51 -05:00
|
|
|
func TriggerPostRegistryLogin(server string, username string, password string) error {
|
2024-01-12 23:55:42 -05:00
|
|
|
if !common.FileExists("/usr/local/bin/k3s") {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
registry := registries.Registry{}
|
|
|
|
|
registryFile := "/etc/rancher/k3s/registries.yaml"
|
|
|
|
|
yamlFile, err := os.ReadFile(registryFile)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to read existing registries.yaml: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = yaml.Unmarshal(yamlFile, registry)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to unmarshal registry configuration from yaml: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
common.LogInfo1("Updating k3s configuration")
|
|
|
|
|
if registry.Auths == nil {
|
|
|
|
|
registry.Auths = map[string]registries.AuthConfig{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if server == "docker.io" {
|
|
|
|
|
server = "registry-1.docker.io"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
registry.Auths[server] = registries.AuthConfig{
|
|
|
|
|
Username: username,
|
|
|
|
|
Password: password,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data, err := yaml.Marshal(®istry)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to marshal registry configuration to yaml: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 18:06:18 -05:00
|
|
|
if err := os.WriteFile(registryFile, data, os.FileMode(0644)); err != nil {
|
2024-01-12 23:55:42 -05:00
|
|
|
return fmt.Errorf("Unable to write registry configuration to file: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-12 02:39:51 -05:00
|
|
|
// todo: auth against all nodes in cluster
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2024-01-17 18:06:18 -05:00
|
|
|
|
|
|
|
|
// TriggerSchedulerDeploy deploys an image tag for a given application
|
|
|
|
|
func TriggerSchedulerDeploy(scheduler string, appName string, imageTag string) error {
|
|
|
|
|
if scheduler != "k3s" {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
s, err := common.PlugnTriggerOutput("ps-current-scale", []string{appName}...)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
processes, err := common.ParseScaleOutput(s)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
namespace := common.PropertyGetDefault("scheduler-k3s", appName, "namespace", "default")
|
|
|
|
|
if err := createKubernetesNamespace(ctx, namespace); err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating kubernetes namespace for deployment: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
image, err := common.GetDeployingAppImageName(appName, imageTag, "")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting deploying app image name: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
deployTimeout := common.PropertyGetDefault("scheduler-k3s", appName, "deploy-timeout", "300s")
|
|
|
|
|
if _, err := strconv.Atoi(deployTimeout); err == nil {
|
|
|
|
|
deployTimeout = fmt.Sprintf("%ss", deployTimeout)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
deployRollback := common.PropertyGetDefault("scheduler-k3s", appName, "rollback-on-failure", "false")
|
|
|
|
|
allowRollbacks, err := strconv.ParseBool(deployRollback)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error parsing rollback-on-failure value as boolean: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
imagePullSecrets := common.PropertyGetDefault("scheduler-k3s", appName, "image-pull-secrets", "")
|
|
|
|
|
|
|
|
|
|
imageSourceType := "dockerfile"
|
|
|
|
|
if common.IsImageCnbBased(image) {
|
|
|
|
|
imageSourceType = "pack"
|
|
|
|
|
} else if common.IsImageHerokuishBased(image, appName) {
|
|
|
|
|
imageSourceType = "herokuish"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
env, err := config.LoadMergedAppEnv(appName)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error loading environment for deployment: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
chartDir, err := os.MkdirTemp("", "dokku-chart-")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating chart directory: %w", err)
|
|
|
|
|
}
|
|
|
|
|
defer os.RemoveAll(chartDir)
|
|
|
|
|
|
|
|
|
|
common.LogDebug(fmt.Sprintf("Using chart directory: %s", chartDir))
|
|
|
|
|
if err := os.MkdirAll(filepath.Join(chartDir, "templates"), os.FileMode(0755)); err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating chart templates directory: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
deploymentId := time.Now().Unix()
|
|
|
|
|
replacements := orderedmap.New[string, string]()
|
|
|
|
|
replacements.Set("DEPLOYMENT_ID_QUOTED", "{{.Values.deploment_id | quote}}")
|
|
|
|
|
replacements.Set("DEPLOYMENT_ID", "{{.Values.deploment_id}}")
|
|
|
|
|
|
|
|
|
|
secret := templateKubernetesSecret(Secret{
|
|
|
|
|
AppName: appName,
|
|
|
|
|
Env: env.Map(),
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
})
|
2024-01-18 05:28:07 -05:00
|
|
|
err = writeResourceToFile(WriteResourceInput{
|
2024-01-17 18:06:18 -05:00
|
|
|
Object: &secret,
|
|
|
|
|
Path: filepath.Join(chartDir, "templates/secret.yaml"),
|
|
|
|
|
Replacements: replacements,
|
|
|
|
|
AppendContents: `{{- with .Values.secrets }}
|
|
|
|
|
data:
|
|
|
|
|
{{- toYaml . | nindent 2 }}
|
|
|
|
|
{{- end }}
|
|
|
|
|
`,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error printing deployment: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
portMaps, err := getPortMaps(appName)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting port mappings for deployment: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
primaryPort := int32(5000)
|
|
|
|
|
for _, portMap := range portMaps {
|
|
|
|
|
primaryPort = portMap.ContainerPort
|
|
|
|
|
if primaryPort != 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 18:42:55 -05:00
|
|
|
appJSON, err := appjson.GetAppJSON(appName)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting app.json for deployment: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 06:08:18 -05:00
|
|
|
workingDir := common.GetWorkingDir(appName, image)
|
2024-01-17 18:06:18 -05:00
|
|
|
deployments := map[string]appsv1.Deployment{}
|
|
|
|
|
i := 0
|
|
|
|
|
for processType := range processes {
|
|
|
|
|
startCommand, err := getStartCommand(StartCommandInput{
|
|
|
|
|
AppName: appName,
|
|
|
|
|
ProcessType: processType,
|
|
|
|
|
ImageSourceType: imageSourceType,
|
|
|
|
|
Port: primaryPort,
|
|
|
|
|
Env: env.Map(),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting start command for deployment: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i++
|
|
|
|
|
replicaCountPlaceholder := int32(i * 1000)
|
|
|
|
|
|
2024-01-18 18:42:55 -05:00
|
|
|
healthchecks, ok := appJSON.Healthchecks[processType]
|
|
|
|
|
if !ok {
|
|
|
|
|
healthchecks = []appjson.Healthcheck{}
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 18:06:18 -05:00
|
|
|
// todo: implement deployment annotations
|
|
|
|
|
// todo: implement pod annotations
|
|
|
|
|
// todo: implement volumes
|
|
|
|
|
deployment, err := templateKubernetesDeployment(Deployment{
|
|
|
|
|
AppName: appName,
|
|
|
|
|
Command: startCommand.Command,
|
|
|
|
|
Image: image,
|
|
|
|
|
ImagePullSecrets: imagePullSecrets,
|
|
|
|
|
ImageSourceType: imageSourceType,
|
2024-01-18 18:42:55 -05:00
|
|
|
Healthchecks: healthchecks,
|
2024-01-17 18:06:18 -05:00
|
|
|
Namespace: namespace,
|
|
|
|
|
PrimaryPort: primaryPort,
|
|
|
|
|
PortMaps: portMaps,
|
|
|
|
|
ProcessType: processType,
|
|
|
|
|
Replicas: replicaCountPlaceholder,
|
2024-01-18 06:08:18 -05:00
|
|
|
WorkingDir: workingDir,
|
2024-01-17 18:06:18 -05:00
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error templating deployment: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
replacements.Set(fmt.Sprintf("replicas: %d", replicaCountPlaceholder), fmt.Sprintf("replicas: {{.Values.processes.%s.replicas}}", processType))
|
|
|
|
|
deployments[processType] = deployment
|
2024-01-18 05:28:07 -05:00
|
|
|
err = writeResourceToFile(WriteResourceInput{
|
2024-01-17 18:06:18 -05:00
|
|
|
Object: &deployment,
|
|
|
|
|
Path: filepath.Join(chartDir, fmt.Sprintf("templates/deployment-%s.yaml", deployment.Name)),
|
|
|
|
|
Replacements: replacements,
|
|
|
|
|
})
|
2024-01-18 06:08:18 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error printing deployment: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 18:06:18 -05:00
|
|
|
replacements.Delete(fmt.Sprintf("replicas: %d", replicaCountPlaceholder))
|
2024-01-18 06:08:18 -05:00
|
|
|
}
|
2024-01-17 18:06:18 -05:00
|
|
|
|
2024-01-18 06:08:18 -05:00
|
|
|
cronEntries, err := cron.FetchCronEntries(appName)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error fetching cron entries: %w", err)
|
|
|
|
|
}
|
2024-01-18 16:51:33 -05:00
|
|
|
|
|
|
|
|
clientset, err := NewKubernetesClient()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating kubernetes client: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cronJobs, err := clientset.ListCronJobs(ctx, ListCronJobsInput{
|
|
|
|
|
LabelSelector: fmt.Sprintf("dokku.com/app-name=%s", appName),
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error listing cron jobs: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 06:08:18 -05:00
|
|
|
for _, cronEntry := range cronEntries {
|
2024-01-18 16:51:33 -05:00
|
|
|
suffix := ""
|
|
|
|
|
for _, cronJob := range cronJobs {
|
|
|
|
|
if cronJob.Labels["dokku.com/cron-id"] == cronEntry.ID {
|
|
|
|
|
var ok bool
|
|
|
|
|
suffix, ok = cronJob.Annotations["dokku.com/job-suffix"]
|
|
|
|
|
if !ok {
|
|
|
|
|
suffix = ""
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 06:08:18 -05:00
|
|
|
words, err := shellquote.Split(cronEntry.Command)
|
2024-01-17 18:06:18 -05:00
|
|
|
if err != nil {
|
2024-01-18 06:08:18 -05:00
|
|
|
return fmt.Errorf("Error parsing cron command: %w", err)
|
|
|
|
|
}
|
|
|
|
|
cronJob, err := templateKubernetesCronJob(Job{
|
|
|
|
|
AppName: appName,
|
|
|
|
|
Command: words,
|
|
|
|
|
Env: map[string]string{},
|
2024-01-18 16:51:33 -05:00
|
|
|
ID: cronEntry.ID,
|
2024-01-18 06:08:18 -05:00
|
|
|
Image: image,
|
|
|
|
|
ImagePullSecrets: imagePullSecrets,
|
|
|
|
|
ImageSourceType: imageSourceType,
|
|
|
|
|
Namespace: namespace,
|
2024-01-18 16:51:33 -05:00
|
|
|
ProcessType: "cron",
|
2024-01-18 06:08:18 -05:00
|
|
|
Schedule: cronEntry.Schedule,
|
2024-01-18 16:51:33 -05:00
|
|
|
Suffix: suffix,
|
2024-01-18 06:08:18 -05:00
|
|
|
WorkingDir: workingDir,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error templating cron job: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = writeResourceToFile(WriteResourceInput{
|
|
|
|
|
Object: &cronJob,
|
|
|
|
|
Path: filepath.Join(chartDir, fmt.Sprintf("templates/cron-job-%s.yaml", cronEntry.ID)),
|
|
|
|
|
Replacements: replacements,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error printing cron job: %w", err)
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if deployment, ok := deployments["web"]; ok {
|
|
|
|
|
service := templateKubernetesService(Service{
|
|
|
|
|
AppName: appName,
|
|
|
|
|
Namespace: namespace,
|
2024-01-18 01:02:49 -05:00
|
|
|
PortMaps: portMaps,
|
2024-01-17 18:06:18 -05:00
|
|
|
})
|
|
|
|
|
|
2024-01-18 05:28:07 -05:00
|
|
|
err := writeResourceToFile(WriteResourceInput{
|
2024-01-17 18:06:18 -05:00
|
|
|
Object: &service,
|
|
|
|
|
Path: filepath.Join(chartDir, "templates/service-web.yaml"),
|
|
|
|
|
Replacements: replacements,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error printing service: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = createIngressRoutesFiles(CreateIngressRoutesInput{
|
|
|
|
|
AppName: appName,
|
|
|
|
|
ChartDir: chartDir,
|
|
|
|
|
Deployment: deployment,
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
PortMaps: portMaps,
|
|
|
|
|
Service: service,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating ingress routes: %w", err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
chart := &Chart{
|
|
|
|
|
ApiVersion: "v2",
|
|
|
|
|
AppVersion: "1.0.0",
|
|
|
|
|
Name: appName,
|
|
|
|
|
Version: fmt.Sprintf("0.0.%d", deploymentId),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = writeYaml(WriteYamlInput{
|
|
|
|
|
Object: chart,
|
|
|
|
|
Path: filepath.Join(chartDir, "Chart.yaml"),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error writing chart: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
values := &Values{
|
|
|
|
|
DeploymentID: fmt.Sprint(deploymentId),
|
|
|
|
|
Secrets: map[string]string{},
|
|
|
|
|
Processes: map[string]ValuesProcess{},
|
|
|
|
|
}
|
|
|
|
|
for processType, processCount := range processes {
|
|
|
|
|
values.Processes[processType] = ValuesProcess{
|
|
|
|
|
Replicas: int32(processCount),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for key, value := range env.Map() {
|
|
|
|
|
values.Secrets[key] = base64.StdEncoding.EncodeToString([]byte(value))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = writeYaml(WriteYamlInput{
|
|
|
|
|
Object: values,
|
|
|
|
|
Path: filepath.Join(chartDir, "values.yaml"),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error writing chart: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
helmAgent, err := NewHelmAgent(namespace, DeployLogPrinter)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating helm agent: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
chartPath, err := filepath.Abs(chartDir)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting chart path: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
timeoutDuration, err := time.ParseDuration(deployTimeout)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error parsing deploy timeout duration: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = helmAgent.InstallOrUpgradeChart(ChartInput{
|
|
|
|
|
ChartPath: chartPath,
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
ReleaseName: fmt.Sprintf("dokku-%s", appName),
|
|
|
|
|
RollbackOnFailure: allowRollbacks,
|
|
|
|
|
Timeout: timeoutDuration,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 17:09:00 -05:00
|
|
|
common.LogInfo1("Running post-deploy")
|
|
|
|
|
_, err = common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Args: []string{appName, "", "", imageTag},
|
|
|
|
|
CaptureOutput: false,
|
|
|
|
|
StreamStdio: true,
|
|
|
|
|
Trigger: "core-post-deploy",
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error running core-post-deploy: %w", err)
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
_, err = common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Args: []string{appName, "", "", imageTag},
|
|
|
|
|
CaptureOutput: false,
|
|
|
|
|
StreamStdio: true,
|
|
|
|
|
Trigger: "post-deploy",
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error running post-deploy: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 18:06:18 -05:00
|
|
|
return nil
|
|
|
|
|
}
|
2024-01-17 20:56:30 -05:00
|
|
|
|
|
|
|
|
// TriggerSchedulerEnter enters a container for a given application
|
|
|
|
|
func TriggerSchedulerEnter(scheduler string, appName string, processType string, podName string, args []string) error {
|
|
|
|
|
if scheduler != "k3s" {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
common.LogDebug(fmt.Sprintf("%s %s %s %s", scheduler, appName, processType, podName))
|
|
|
|
|
clientset, err := NewKubernetesClient()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating kubernetes client: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 23:28:10 -05:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
signals := make(chan os.Signal, 1)
|
|
|
|
|
signal.Notify(signals, os.Interrupt, syscall.SIGHUP,
|
|
|
|
|
syscall.SIGINT,
|
|
|
|
|
syscall.SIGQUIT,
|
|
|
|
|
syscall.SIGTERM)
|
2024-01-17 20:56:30 -05:00
|
|
|
go func() {
|
2024-01-17 23:28:10 -05:00
|
|
|
<-signals
|
2024-01-17 20:56:30 -05:00
|
|
|
cancel()
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
namespace := common.PropertyGetDefault("scheduler-k3s", appName, "namespace", "default")
|
|
|
|
|
|
|
|
|
|
labelSelector := []string{fmt.Sprintf("dokku.com/app-name=%s", appName)}
|
|
|
|
|
processIndex := 1
|
|
|
|
|
if processType != "" {
|
|
|
|
|
parts := strings.SplitN(processType, ".", 2)
|
|
|
|
|
if len(parts) == 2 {
|
|
|
|
|
processType = parts[0]
|
|
|
|
|
processIndex, err = strconv.Atoi(parts[1])
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error parsing process index: %w", err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
labelSelector = append(labelSelector, fmt.Sprintf("dokku.com/process-type=%s", processType))
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
pods, err := clientset.ListPods(ctx, ListPodsInput{
|
|
|
|
|
Namespace: namespace,
|
2024-01-17 20:56:30 -05:00
|
|
|
LabelSelector: strings.Join(labelSelector, ","),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error listing pods: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
if len(pods) == 0 {
|
2024-01-17 20:56:30 -05:00
|
|
|
return fmt.Errorf("No pods found for app %s", appName)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
processIndex--
|
2024-01-18 16:14:33 -05:00
|
|
|
if processIndex > len(pods) {
|
2024-01-17 20:56:30 -05:00
|
|
|
return fmt.Errorf("Process index %d out of range for app %s", processIndex, appName)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var selectedPod corev1.Pod
|
|
|
|
|
if podName != "" {
|
2024-01-18 16:14:33 -05:00
|
|
|
for _, pod := range pods {
|
2024-01-17 20:56:30 -05:00
|
|
|
if pod.Name == podName {
|
|
|
|
|
selectedPod = pod
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2024-01-18 16:14:33 -05:00
|
|
|
selectedPod = pods[processIndex]
|
2024-01-17 20:56:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
processType, ok := selectedPod.Labels["dokku.com/process-type"]
|
|
|
|
|
if !ok {
|
|
|
|
|
return fmt.Errorf("Pod %s does not have a process type label", selectedPod.Name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
command := args
|
|
|
|
|
if len(args) == 0 {
|
|
|
|
|
command = []string{"/bin/bash"}
|
|
|
|
|
if globalShell, err := common.PlugnTriggerOutputAsString("config-get-global", []string{"DOKKU_APP_SHELL"}...); err == nil && globalShell != "" {
|
|
|
|
|
command = []string{globalShell}
|
|
|
|
|
}
|
|
|
|
|
if appShell, err := common.PlugnTriggerOutputAsString("config-get", []string{appName, "DOKKU_APP_SHELL"}...); err == nil && appShell != "" {
|
|
|
|
|
command = []string{appShell}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 05:28:07 -05:00
|
|
|
entrypoint := ""
|
|
|
|
|
if selectedPod.Annotations["dokku.com/builder-type"] == "herokuish" {
|
|
|
|
|
entrypoint = "/exec"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return enterPod(ctx, EnterPodInput{
|
|
|
|
|
AppName: appName,
|
|
|
|
|
Clientset: clientset,
|
|
|
|
|
Command: command,
|
|
|
|
|
Entrypoint: entrypoint,
|
|
|
|
|
ProcessType: processType,
|
|
|
|
|
SelectedPod: selectedPod,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 21:48:06 -05:00
|
|
|
// TriggerSchedulerLogs displays logs for a given application
|
|
|
|
|
func TriggerSchedulerLogs(scheduler string, appName string, processType string, tail bool, quiet bool, numLines int64) error {
|
|
|
|
|
if scheduler != "k3s" {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
clientset, err := NewKubernetesClient()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating kubernetes client: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 23:28:10 -05:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
signals := make(chan os.Signal, 1)
|
|
|
|
|
signal.Notify(signals, os.Interrupt, syscall.SIGHUP,
|
|
|
|
|
syscall.SIGINT,
|
|
|
|
|
syscall.SIGQUIT,
|
|
|
|
|
syscall.SIGTERM)
|
2024-01-17 21:48:06 -05:00
|
|
|
go func() {
|
2024-01-17 23:28:10 -05:00
|
|
|
<-signals
|
2024-01-17 21:48:06 -05:00
|
|
|
cancel()
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
labelSelector := []string{fmt.Sprintf("dokku.com/app-name=%s", appName)}
|
|
|
|
|
processIndex := 0
|
|
|
|
|
if processType != "" {
|
|
|
|
|
parts := strings.SplitN(processType, ".", 2)
|
|
|
|
|
if len(parts) == 2 {
|
|
|
|
|
processType = parts[0]
|
|
|
|
|
processIndex, err = strconv.Atoi(parts[1])
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error parsing process index: %w", err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
labelSelector = append(labelSelector, fmt.Sprintf("dokku.com/process-type=%s", processType))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
namespace := common.PropertyGetDefault("scheduler-k3s", appName, "namespace", "default")
|
2024-01-18 16:14:33 -05:00
|
|
|
pods, err := clientset.ListPods(ctx, ListPodsInput{
|
|
|
|
|
Namespace: namespace,
|
2024-01-17 21:48:06 -05:00
|
|
|
LabelSelector: strings.Join(labelSelector, ","),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error listing pods: %w", err)
|
|
|
|
|
}
|
2024-01-18 16:14:33 -05:00
|
|
|
if len(pods) == 0 {
|
2024-01-17 21:48:06 -05:00
|
|
|
return fmt.Errorf("No pods found for app %s", appName)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ch := make(chan bool)
|
|
|
|
|
|
2024-01-18 05:28:07 -05:00
|
|
|
if os.Getenv("FORCE_TTY") == "1" {
|
2024-01-17 23:28:10 -05:00
|
|
|
color.NoColor = false
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 21:48:06 -05:00
|
|
|
colors := []color.Attribute{
|
|
|
|
|
color.FgRed,
|
|
|
|
|
color.FgYellow,
|
|
|
|
|
color.FgGreen,
|
|
|
|
|
color.FgCyan,
|
|
|
|
|
color.FgBlue,
|
|
|
|
|
color.FgMagenta,
|
|
|
|
|
}
|
|
|
|
|
// colorIndex := 0
|
2024-01-18 16:14:33 -05:00
|
|
|
for i := 0; i < len(pods); i++ {
|
2024-01-17 21:48:06 -05:00
|
|
|
if processIndex > 0 && i != (processIndex-1) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logOptions := v1.PodLogOptions{
|
|
|
|
|
Follow: tail,
|
|
|
|
|
}
|
|
|
|
|
if numLines > 0 {
|
|
|
|
|
logOptions.TailLines = ptr.To(numLines)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
podColor := colors[i%len(colors)]
|
|
|
|
|
dynoText := color.New(podColor).SprintFunc()
|
2024-01-18 16:14:33 -05:00
|
|
|
podName := pods[i].Name
|
2024-01-17 21:48:06 -05:00
|
|
|
podLogs, err := clientset.Client.CoreV1().Pods(namespace).GetLogs(podName, &logOptions).Stream(ctx)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
buffer := bufio.NewReader(podLogs)
|
2024-01-17 23:28:10 -05:00
|
|
|
go func(ctx context.Context, buffer *bufio.Reader, prettyText func(a ...interface{}) string, ch chan bool) {
|
2024-01-17 21:48:06 -05:00
|
|
|
defer func() {
|
|
|
|
|
ch <- true
|
|
|
|
|
}()
|
|
|
|
|
for {
|
2024-01-17 23:28:10 -05:00
|
|
|
select {
|
|
|
|
|
case <-ctx.Done(): // if cancel() execute
|
|
|
|
|
ch <- true
|
2024-01-17 21:48:06 -05:00
|
|
|
return
|
2024-01-17 23:28:10 -05:00
|
|
|
default:
|
|
|
|
|
str, readErr := buffer.ReadString('\n')
|
|
|
|
|
if readErr == io.EOF {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if str == "" {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !quiet {
|
|
|
|
|
str = fmt.Sprintf("%s %s", dynoText(fmt.Sprintf("app[%s]:", podName)), str)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_, err := fmt.Print(str)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
2024-01-17 21:48:06 -05:00
|
|
|
}
|
|
|
|
|
}
|
2024-01-17 23:28:10 -05:00
|
|
|
}(ctx, buffer, dynoText, ch)
|
2024-01-17 21:48:06 -05:00
|
|
|
}
|
|
|
|
|
<-ch
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2024-01-18 00:19:20 -05:00
|
|
|
|
2024-01-18 05:28:07 -05:00
|
|
|
// TriggerSchedulerRun runs a command in an ephemeral container
|
|
|
|
|
func TriggerSchedulerRun(scheduler string, appName string, envCount int, args []string) error {
|
|
|
|
|
if scheduler != "k3s" {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extraEnv := map[string]string{}
|
|
|
|
|
if envCount > 0 {
|
|
|
|
|
var envPairs []string
|
|
|
|
|
envPairs, args = args[0:envCount], args[envCount:]
|
|
|
|
|
for _, envPair := range envPairs {
|
|
|
|
|
parts := strings.SplitN(envPair, "=", 2)
|
|
|
|
|
if len(parts) != 2 {
|
|
|
|
|
return fmt.Errorf("Invalid environment variable pair: %s", envPair)
|
|
|
|
|
}
|
|
|
|
|
extraEnv[parts[0]] = parts[1]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
imageTag, err := common.GetRunningImageTag(appName, "")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting running image tag: %w", err)
|
|
|
|
|
}
|
|
|
|
|
imageRepo := common.GetAppImageRepo(appName)
|
|
|
|
|
image, err := common.GetDeployingAppImageName(appName, imageTag, imageRepo)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting deploying app image name: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
imageStage, err := common.DockerInspect(image, "{{ index .Config.Labels \"com.dokku.image-stage\" }}")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting image stage: %w", err)
|
|
|
|
|
}
|
|
|
|
|
if imageStage != "release" {
|
|
|
|
|
common.LogWarn("Invalid image stage detected: expected 'release', got '$IMAGE_STAGE'")
|
|
|
|
|
return fmt.Errorf("Successfully deploy your app to fix dokku run calls")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dokkuRmContainer := os.Getenv("DOKKU_RM_CONTAINER")
|
|
|
|
|
if dokkuRmContainer == "" {
|
|
|
|
|
appRmContainer, err := common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Trigger: "config-get",
|
|
|
|
|
Args: []string{appName, "DOKKU_RM_CONTAINER"},
|
|
|
|
|
CaptureOutput: true,
|
|
|
|
|
StreamStdio: false,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
globalRmContainer, err := common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Trigger: "config-get-global",
|
|
|
|
|
Args: []string{"DOKKU_RM_CONTAINER"},
|
|
|
|
|
CaptureOutput: true,
|
|
|
|
|
StreamStdio: false,
|
|
|
|
|
})
|
|
|
|
|
if err == nil {
|
|
|
|
|
dokkuRmContainer = globalRmContainer.Stdout
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
dokkuRmContainer = appRmContainer.Stdout
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if dokkuRmContainer == "" {
|
|
|
|
|
dokkuRmContainer = "true"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rmContainer, err := strconv.ParseBool(dokkuRmContainer)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error parsing DOKKU_RM_CONTAINER value as boolean: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
labels := map[string]string{
|
|
|
|
|
"dokku.com/app-name": appName,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if os.Getenv("DOKKU_TRACE") == "1" {
|
|
|
|
|
extraEnv["TRACE"] = "true"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
processType := "run"
|
|
|
|
|
if os.Getenv("DOKKU_CRON_ID") != "" {
|
|
|
|
|
processType = "cron"
|
|
|
|
|
labels["dokku.com/cron-id"] = os.Getenv("DOKKU_CRON_ID")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
imageSourceType, err := common.DockerInspect(image, "{{ index .Config.Labels \"com.dokku.builder-type\" }}")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting image builder type: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// todo: do something with docker args
|
|
|
|
|
command := args
|
|
|
|
|
if len(args) == 0 {
|
|
|
|
|
command = []string{"/bin/bash"}
|
|
|
|
|
if globalShell, err := common.PlugnTriggerOutputAsString("config-get-global", []string{"DOKKU_APP_SHELL"}...); err == nil && globalShell != "" {
|
|
|
|
|
command = []string{globalShell}
|
|
|
|
|
}
|
|
|
|
|
if appShell, err := common.PlugnTriggerOutputAsString("config-get", []string{appName, "DOKKU_APP_SHELL"}...); err == nil && appShell != "" {
|
|
|
|
|
command = []string{appShell}
|
|
|
|
|
}
|
|
|
|
|
} else if len(args) == 1 {
|
|
|
|
|
resp, err := common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Trigger: "procfile-get-command",
|
|
|
|
|
Args: []string{appName, args[0], "5000"},
|
|
|
|
|
CaptureOutput: true,
|
|
|
|
|
StreamStdio: false,
|
|
|
|
|
})
|
|
|
|
|
if err == nil && resp.Stdout != "" {
|
|
|
|
|
common.LogInfo1Quiet(fmt.Sprintf("Found '%s' in Procfile, running that command", args[0]))
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
// todo: run command in procfile
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
entrypoint := ""
|
|
|
|
|
if imageSourceType == "herokuish" {
|
|
|
|
|
entrypoint = "/exec"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
namespace := common.PropertyGetDefault("scheduler-k3s", appName, "namespace", "default")
|
|
|
|
|
helmAgent, err := NewHelmAgent(namespace, DevNullPrinter)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating helm agent: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
values, err := helmAgent.GetValues(fmt.Sprintf("dokku-%s", appName))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting helm values: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
deploymentIDValue, ok := values["deploment_id"].(string)
|
|
|
|
|
if !ok {
|
|
|
|
|
return fmt.Errorf("Deployment ID is not a string")
|
|
|
|
|
}
|
|
|
|
|
if len(deploymentIDValue) == 0 {
|
|
|
|
|
return fmt.Errorf("Deployment ID is empty")
|
|
|
|
|
}
|
|
|
|
|
deploymentID, err := strconv.ParseInt(deploymentIDValue, 10, 64)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error parsing deployment ID: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
attachToPod := os.Getenv("DOKKU_DETACH_CONTAINER") != "1"
|
|
|
|
|
imagePullSecrets := common.PropertyGetDefault("scheduler-k3s", appName, "image-pull-secrets", "")
|
|
|
|
|
workingDir := common.GetWorkingDir(appName, image)
|
|
|
|
|
job, err := templateKubernetesJob(Job{
|
|
|
|
|
AppName: appName,
|
|
|
|
|
Command: command,
|
|
|
|
|
DeploymentID: deploymentID,
|
|
|
|
|
Entrypoint: entrypoint,
|
|
|
|
|
Env: extraEnv,
|
|
|
|
|
Image: image,
|
|
|
|
|
ImagePullSecrets: imagePullSecrets,
|
|
|
|
|
ImageSourceType: imageSourceType,
|
|
|
|
|
Interactive: attachToPod,
|
|
|
|
|
Labels: labels,
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
ProcessType: processType,
|
|
|
|
|
RemoveContainer: rmContainer,
|
|
|
|
|
WorkingDir: workingDir,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error templating job: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if os.Getenv("FORCE_TTY") == "1" {
|
|
|
|
|
color.NoColor = false
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
clientset, err := NewKubernetesClient()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating kubernetes client: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 05:28:07 -05:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
signals := make(chan os.Signal, 1)
|
|
|
|
|
signal.Notify(signals, os.Interrupt, syscall.SIGHUP,
|
|
|
|
|
syscall.SIGINT,
|
|
|
|
|
syscall.SIGQUIT,
|
|
|
|
|
syscall.SIGTERM)
|
|
|
|
|
go func() {
|
|
|
|
|
<-signals
|
2024-01-18 16:14:33 -05:00
|
|
|
if attachToPod {
|
|
|
|
|
clientset.DeleteJob(ctx, DeleteJobInput{ // nolint: errcheck
|
|
|
|
|
Name: job.Name,
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
})
|
|
|
|
|
}
|
2024-01-18 05:28:07 -05:00
|
|
|
cancel()
|
|
|
|
|
}()
|
|
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
createdJob, err := clientset.CreateJob(ctx, CreateJobInput{
|
|
|
|
|
Job: job,
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
})
|
2024-01-18 05:28:07 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating job: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
if attachToPod {
|
|
|
|
|
defer func() {
|
|
|
|
|
clientset.DeleteJob(ctx, DeleteJobInput{ // nolint: errcheck
|
|
|
|
|
Name: job.Name,
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
})
|
|
|
|
|
}()
|
2024-01-18 05:28:07 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
batchJobSelector := fmt.Sprintf("batch.kubernetes.io/job-name=%s", createdJob.Name)
|
2024-01-18 16:14:33 -05:00
|
|
|
pods, err := waitForPodToExist(ctx, WaitForPodToExistInput{
|
2024-01-18 05:28:07 -05:00
|
|
|
Clientset: clientset,
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
RetryCount: 3,
|
|
|
|
|
Selector: batchJobSelector,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error waiting for pod to exist: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
for _, pod := range pods {
|
2024-01-18 05:28:07 -05:00
|
|
|
common.LogQuiet(pod.Name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !attachToPod {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
err = waitForPodBySelectorRunning(ctx, WaitForPodBySelectorRunningInput{
|
2024-01-18 05:28:07 -05:00
|
|
|
Clientset: clientset,
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
Selector: batchJobSelector,
|
|
|
|
|
Timeout: 300,
|
|
|
|
|
Waiter: isPodReady,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
if errors.Is(err, conditions.ErrPodCompleted) {
|
2024-01-18 16:14:33 -05:00
|
|
|
pods, podErr := clientset.ListPods(ctx, ListPodsInput{
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
LabelSelector: batchJobSelector,
|
2024-01-18 05:28:07 -05:00
|
|
|
})
|
|
|
|
|
if podErr != nil {
|
|
|
|
|
return fmt.Errorf("Error completed pod: %w", err)
|
|
|
|
|
}
|
2024-01-18 16:14:33 -05:00
|
|
|
selectedPod := pods[0]
|
2024-01-18 05:28:07 -05:00
|
|
|
if selectedPod.Status.Phase == v1.PodFailed {
|
|
|
|
|
for _, status := range selectedPod.Status.ContainerStatuses {
|
|
|
|
|
if status.Name != fmt.Sprintf("%s-%s", appName, processType) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if status.State.Terminated == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
return fmt.Errorf("Unable to attach as the pod has already exited with a failed exit code: %s", status.State.Terminated.Message)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return fmt.Errorf("Unable to attach as the pod has already exited with a failed exit code")
|
|
|
|
|
} else if selectedPod.Status.Phase == v1.PodSucceeded {
|
|
|
|
|
return errors.New("Unable to attach as the pod has already exited with a successful exit code")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return fmt.Errorf("Error waiting for pod to be running: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
pods, err = clientset.ListPods(ctx, ListPodsInput{
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
LabelSelector: batchJobSelector,
|
2024-01-18 05:28:07 -05:00
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting pod: %w", err)
|
|
|
|
|
}
|
2024-01-18 16:14:33 -05:00
|
|
|
selectedPod := pods[0]
|
2024-01-18 05:28:07 -05:00
|
|
|
|
|
|
|
|
switch selectedPod.Status.Phase {
|
|
|
|
|
case v1.PodFailed, v1.PodSucceeded:
|
|
|
|
|
if selectedPod.Status.Phase == v1.PodFailed {
|
|
|
|
|
for _, status := range selectedPod.Status.ContainerStatuses {
|
|
|
|
|
if status.Name != fmt.Sprintf("%s-%s", appName, processType) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if status.State.Terminated == nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
return fmt.Errorf("Unable to attach as the pod has already exited with a failed exit code: %s", status.State.Terminated.Message)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return fmt.Errorf("Unable to attach as the pod has already exited with a failed exit code")
|
|
|
|
|
} else if selectedPod.Status.Phase == v1.PodSucceeded {
|
|
|
|
|
return errors.New("Unable to attach as the pod has already exited with a successful exit code")
|
|
|
|
|
}
|
|
|
|
|
case v1.PodRunning:
|
2024-01-18 16:14:33 -05:00
|
|
|
return enterPod(ctx, EnterPodInput{
|
2024-01-18 05:28:07 -05:00
|
|
|
AppName: appName,
|
|
|
|
|
Clientset: clientset,
|
|
|
|
|
Command: command,
|
|
|
|
|
Entrypoint: entrypoint,
|
|
|
|
|
ProcessType: processType,
|
|
|
|
|
SelectedPod: selectedPod,
|
|
|
|
|
})
|
|
|
|
|
default:
|
|
|
|
|
return fmt.Errorf("Unable to attach as the pod is in an unknown state: %s", selectedPod.Status.Phase)
|
|
|
|
|
}
|
|
|
|
|
// todo: support scheduler-post-run
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 16:55:09 -05:00
|
|
|
// TriggerSchedulerRunList lists one-off run pods for a given application
|
|
|
|
|
func TriggerSchedulerRunList(scheduler string, appName string, format string) error {
|
|
|
|
|
if scheduler != "k3s" {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
clientset, err := NewKubernetesClient()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating kubernetes client: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
namespace := common.PropertyGetDefault("scheduler-k3s", appName, "namespace", "default")
|
|
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
cronJobs, err := clientset.ListCronJobs(ctx, ListCronJobsInput{
|
|
|
|
|
LabelSelector: fmt.Sprintf("dokku.com/app-name=%s", appName),
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting cron jobs: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type CronJobEntry struct {
|
|
|
|
|
ID string `json:"id"`
|
|
|
|
|
AppName string `json:"app"`
|
|
|
|
|
Command string `json:"command"`
|
|
|
|
|
Schedule string `json:"schedule"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data := []CronJobEntry{}
|
|
|
|
|
lines := []string{"ID | Schedule | Command"}
|
|
|
|
|
for _, cronJob := range cronJobs {
|
|
|
|
|
command := ""
|
|
|
|
|
for _, container := range cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers {
|
|
|
|
|
if container.Name == fmt.Sprintf("%s-cron", appName) {
|
|
|
|
|
command = strings.Join(container.Args, " ")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cronID, ok := cronJob.Labels["dokku.com/cron-id"]
|
|
|
|
|
if !ok {
|
|
|
|
|
common.LogWarn(fmt.Sprintf("Cron job %s does not have a cron ID label", cronJob.Name))
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
lines = append(lines, fmt.Sprintf("%s | %s | %s", cronID, cronJob.Spec.Schedule, command))
|
|
|
|
|
data = append(data, CronJobEntry{
|
|
|
|
|
ID: cronID,
|
|
|
|
|
AppName: appName,
|
|
|
|
|
Command: command,
|
|
|
|
|
Schedule: cronJob.Spec.Schedule,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if format == "stdout" {
|
|
|
|
|
result := columnize.SimpleFormat(lines)
|
|
|
|
|
fmt.Println(result)
|
|
|
|
|
} else {
|
|
|
|
|
b, err := json.Marshal(data)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error marshalling cron jobs: %w", err)
|
|
|
|
|
}
|
|
|
|
|
fmt.Println(string(b))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
// TriggerSchedulerPostDelete destroys the scheduler-k3s data for a given app container
|
|
|
|
|
func TriggerSchedulerPostDelete(scheduler string, appName string) error {
|
|
|
|
|
if scheduler != "k3s" {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
namespace := common.PropertyGetDefault("scheduler-k3s", appName, "namespace", "default")
|
|
|
|
|
helmAgent, err := NewHelmAgent(namespace, DeployLogPrinter)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating helm agent: %w", err)
|
2024-01-18 05:28:07 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
err = helmAgent.UninstallChart(fmt.Sprintf("dokku-%s", appName))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error uninstalling chart: %w", err)
|
|
|
|
|
}
|
2024-01-18 05:28:07 -05:00
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
return nil
|
2024-01-18 05:28:07 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
// TriggerSchedulerStop stops an application
|
|
|
|
|
func TriggerSchedulerStop(scheduler string, appName string) error {
|
|
|
|
|
if scheduler != "k3s" {
|
|
|
|
|
return nil
|
2024-01-18 05:28:07 -05:00
|
|
|
}
|
2024-01-18 15:11:51 -05:00
|
|
|
|
|
|
|
|
clientset, err := NewKubernetesClient()
|
2024-01-18 05:28:07 -05:00
|
|
|
if err != nil {
|
2024-01-18 15:11:51 -05:00
|
|
|
return fmt.Errorf("Error creating kubernetes client: %w", err)
|
2024-01-18 05:28:07 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
signals := make(chan os.Signal, 1)
|
|
|
|
|
signal.Notify(signals, os.Interrupt, syscall.SIGHUP,
|
|
|
|
|
syscall.SIGINT,
|
|
|
|
|
syscall.SIGQUIT,
|
|
|
|
|
syscall.SIGTERM)
|
|
|
|
|
go func() {
|
|
|
|
|
<-signals
|
|
|
|
|
cancel()
|
|
|
|
|
}()
|
2024-01-18 05:28:07 -05:00
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
namespace := common.PropertyGetDefault("scheduler-k3s", appName, "namespace", "default")
|
2024-01-18 16:14:33 -05:00
|
|
|
deployments, err := clientset.ListDeployments(ctx, ListDeploymentsInput{
|
|
|
|
|
Namespace: namespace,
|
2024-01-18 15:11:51 -05:00
|
|
|
LabelSelector: fmt.Sprintf("dokku.com/app-name=%s", appName),
|
2024-01-18 05:28:07 -05:00
|
|
|
})
|
|
|
|
|
if err != nil {
|
2024-01-18 15:11:51 -05:00
|
|
|
return fmt.Errorf("Error listing deployments: %w", err)
|
2024-01-18 05:28:07 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
for _, deployment := range deployments {
|
2024-01-18 15:11:51 -05:00
|
|
|
processType, ok := deployment.Annotations["dokku.com/process-type"]
|
|
|
|
|
if !ok {
|
|
|
|
|
return fmt.Errorf("Deployment %s does not have a process type annotation", deployment.Name)
|
|
|
|
|
}
|
|
|
|
|
common.LogVerboseQuiet(fmt.Sprintf("Stopping %s process", processType))
|
2024-01-18 16:14:33 -05:00
|
|
|
err := clientset.ScaleDeployment(ctx, ScaleDeploymentInput{
|
|
|
|
|
Name: deployment.Name,
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
Replicas: 0,
|
|
|
|
|
})
|
2024-01-18 15:11:51 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error updating deployment scale: %w", err)
|
2024-01-18 05:28:07 -05:00
|
|
|
}
|
|
|
|
|
}
|
2024-01-18 15:11:51 -05:00
|
|
|
|
2024-01-18 05:28:07 -05:00
|
|
|
return nil
|
|
|
|
|
}
|