2024-01-17 18:06:18 -05:00
|
|
|
package scheduler_k3s
|
|
|
|
|
|
|
|
|
|
import (
|
2024-02-27 16:09:10 -05:00
|
|
|
"bytes"
|
2024-01-17 18:06:18 -05:00
|
|
|
"context"
|
2024-02-27 22:37:06 -05:00
|
|
|
"encoding/base64"
|
2024-01-22 05:12:49 -05:00
|
|
|
"errors"
|
2024-01-17 18:06:18 -05:00
|
|
|
"fmt"
|
2024-01-23 03:26:32 -05:00
|
|
|
"net"
|
2024-01-18 15:11:51 -05:00
|
|
|
"os"
|
2024-01-23 07:25:03 -05:00
|
|
|
"path/filepath"
|
2024-01-21 23:48:38 -05:00
|
|
|
"sort"
|
|
|
|
|
"strconv"
|
2024-01-19 19:26:06 -05:00
|
|
|
"strings"
|
2024-02-27 16:09:10 -05:00
|
|
|
"text/template"
|
2024-01-18 15:11:51 -05:00
|
|
|
"time"
|
2024-01-17 18:06:18 -05:00
|
|
|
|
2024-01-23 01:29:50 -05:00
|
|
|
appjson "github.com/dokku/dokku/plugins/app-json"
|
2024-01-17 18:06:18 -05:00
|
|
|
"github.com/dokku/dokku/plugins/common"
|
2024-02-05 13:44:47 -05:00
|
|
|
nginxvhosts "github.com/dokku/dokku/plugins/nginx-vhosts"
|
2024-01-30 08:01:42 -05:00
|
|
|
resty "github.com/go-resty/resty/v2"
|
2024-03-06 17:50:52 -05:00
|
|
|
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
|
2024-01-30 08:01:42 -05:00
|
|
|
"golang.org/x/sync/errgroup"
|
2024-01-22 05:12:49 -05:00
|
|
|
"gopkg.in/yaml.v3"
|
2024-01-17 18:06:18 -05:00
|
|
|
corev1 "k8s.io/api/core/v1"
|
2024-01-18 15:11:51 -05:00
|
|
|
v1 "k8s.io/api/core/v1"
|
2024-03-06 17:50:52 -05:00
|
|
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
2024-01-23 01:29:50 -05:00
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
2024-01-17 18:06:18 -05:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2024-01-18 15:11:51 -05:00
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
|
|
|
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
|
|
|
"k8s.io/client-go/tools/remotecommand"
|
|
|
|
|
"k8s.io/kubectl/pkg/util/term"
|
|
|
|
|
"k8s.io/kubernetes/pkg/client/conditions"
|
2024-02-27 14:26:16 -05:00
|
|
|
"k8s.io/utils/ptr"
|
2024-01-17 18:06:18 -05:00
|
|
|
"mvdan.cc/sh/v3/shell"
|
|
|
|
|
)
|
|
|
|
|
|
2024-01-21 23:48:38 -05:00
|
|
|
// EnterPodInput contains all the information needed to enter a pod
|
2024-01-18 15:11:51 -05:00
|
|
|
type EnterPodInput struct {
|
2024-01-21 23:48:38 -05:00
|
|
|
// Clientset is the kubernetes clientset
|
|
|
|
|
Clientset KubernetesClient
|
|
|
|
|
|
|
|
|
|
// Command is the command to run
|
|
|
|
|
Command []string
|
|
|
|
|
|
|
|
|
|
// Entrypoint is the entrypoint to run
|
|
|
|
|
Entrypoint string
|
|
|
|
|
|
|
|
|
|
// SelectedContainerName is the container name to enter
|
2024-01-19 19:26:06 -05:00
|
|
|
SelectedContainerName string
|
2024-01-21 23:48:38 -05:00
|
|
|
|
|
|
|
|
// SelectedPod is the pod to enter
|
|
|
|
|
SelectedPod v1.Pod
|
|
|
|
|
|
|
|
|
|
// WaitTimeout is the timeout to wait for the pod to be ready
|
|
|
|
|
WaitTimeout int
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Node contains information about a node
|
|
|
|
|
type Node struct {
|
|
|
|
|
// Name is the name of the node
|
|
|
|
|
Name string
|
|
|
|
|
|
|
|
|
|
// Roles is the roles of the node
|
|
|
|
|
Roles []string
|
|
|
|
|
|
|
|
|
|
// Ready is whether the node is ready
|
|
|
|
|
Ready bool
|
|
|
|
|
|
|
|
|
|
// RemoteHost is the remote host
|
|
|
|
|
RemoteHost string
|
|
|
|
|
|
|
|
|
|
// Version is the version of the node
|
|
|
|
|
Version string
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns a string representation of the node
|
|
|
|
|
func (n Node) String() string {
|
|
|
|
|
return fmt.Sprintf("%s|%s|%s|%s", n.Name, strconv.FormatBool(n.Ready), strings.Join(n.Roles, ","), n.Version)
|
2024-01-18 15:11:51 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-17 18:06:18 -05:00
|
|
|
// StartCommandInput contains all the information needed to get the start command
|
|
|
|
|
type StartCommandInput struct {
|
|
|
|
|
// AppName is the name of the app
|
|
|
|
|
AppName string
|
|
|
|
|
// ProcessType is the process type
|
|
|
|
|
ProcessType string
|
|
|
|
|
// ImageSourceType is the image source type
|
|
|
|
|
ImageSourceType string
|
|
|
|
|
// Port is the port
|
|
|
|
|
Port int32
|
|
|
|
|
// Env is the environment variables
|
|
|
|
|
Env map[string]string
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartCommandOutput contains the start command
|
|
|
|
|
type StartCommandOutput struct {
|
|
|
|
|
// Command is the start command
|
|
|
|
|
Command []string
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-20 04:58:02 -05:00
|
|
|
type WaitForNodeToExistInput struct {
|
|
|
|
|
Clientset KubernetesClient
|
|
|
|
|
Namespace string
|
|
|
|
|
RetryCount int
|
|
|
|
|
NodeName string
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
type WaitForPodBySelectorRunningInput struct {
|
2024-01-19 19:26:06 -05:00
|
|
|
Clientset KubernetesClient
|
|
|
|
|
Namespace string
|
|
|
|
|
LabelSelector string
|
|
|
|
|
PodName string
|
|
|
|
|
Timeout int
|
|
|
|
|
Waiter func(ctx context.Context, clientset KubernetesClient, podName, namespace string) wait.ConditionWithContextFunc
|
2024-01-18 15:11:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type WaitForPodToExistInput struct {
|
2024-01-19 19:26:06 -05:00
|
|
|
Clientset KubernetesClient
|
|
|
|
|
Namespace string
|
|
|
|
|
RetryCount int
|
|
|
|
|
PodName string
|
|
|
|
|
LabelSelector string
|
2024-01-18 15:11:51 -05:00
|
|
|
}
|
|
|
|
|
|
2024-03-06 01:11:40 -05:00
|
|
|
// applyKedaClusterTriggerAuthentications applies keda cluster trigger authentications chart to the cluster
|
|
|
|
|
func applyKedaClusterTriggerAuthentications(ctx context.Context, triggerType string, metadata map[string]string) error {
|
|
|
|
|
chartDir, err := os.MkdirTemp("", "keda-cluster-trigger-authentications-chart-")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating keda-cluster-trigger-authentications chart directory: %w", err)
|
|
|
|
|
}
|
|
|
|
|
defer os.RemoveAll(chartDir)
|
|
|
|
|
|
|
|
|
|
// create the chart.yaml
|
|
|
|
|
chart := &Chart{
|
|
|
|
|
ApiVersion: "v2",
|
|
|
|
|
AppVersion: "1.0.0",
|
|
|
|
|
Icon: "https://dokku.com/assets/dokku-logo.svg",
|
|
|
|
|
Name: fmt.Sprintf("keda-cluster-trigger-authentications-%s", triggerType),
|
|
|
|
|
Version: "0.0.1",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = writeYaml(WriteYamlInput{
|
|
|
|
|
Object: chart,
|
|
|
|
|
Path: filepath.Join(chartDir, "Chart.yaml"),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error writing keda-cluster-trigger-authentications chart: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// create the values.yaml
|
|
|
|
|
values := ClusterKedaValues{
|
|
|
|
|
Secrets: map[string]string{},
|
|
|
|
|
Type: triggerType,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for key, value := range metadata {
|
|
|
|
|
values.Secrets[key] = base64.StdEncoding.EncodeToString([]byte(value))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := os.MkdirAll(filepath.Join(chartDir, "templates"), os.FileMode(0755)); err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating keda-cluster-trigger-authentications chart templates directory: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = writeYaml(WriteYamlInput{
|
|
|
|
|
Object: values,
|
|
|
|
|
Path: filepath.Join(chartDir, "values.yaml"),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error writing chart: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
templateFiles := []string{"keda-cluster-trigger-authentication", "keda-cluster-secret"}
|
|
|
|
|
for _, template := range templateFiles {
|
|
|
|
|
b, err := templates.ReadFile(fmt.Sprintf("templates/chart/%s.yaml", template))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error reading %s template: %w", template, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
filename := filepath.Join(chartDir, "templates", fmt.Sprintf("%s.yaml", template))
|
|
|
|
|
err = os.WriteFile(filename, b, os.FileMode(0644))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error writing %s template: %w", template, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if os.Getenv("DOKKU_TRACE") == "1" {
|
|
|
|
|
common.CatFile(filename)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
b, err := templates.ReadFile("templates/chart/_helpers.tpl")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error reading _helpers template: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
helpersFile := filepath.Join(chartDir, "templates", "_helpers.tpl")
|
|
|
|
|
err = os.WriteFile(helpersFile, b, os.FileMode(0644))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error writing _helpers template: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if os.Getenv("DOKKU_TRACE") == "1" {
|
|
|
|
|
common.CatFile(helpersFile)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// install the chart
|
|
|
|
|
helmAgent, err := NewHelmAgent("keda", DeployLogPrinter)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating helm agent: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
chartPath, err := filepath.Abs(chartDir)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error getting chart path: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
timeoutDuration, err := time.ParseDuration("300s")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error parsing deploy timeout duration: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = helmAgent.InstallOrUpgradeChart(ctx, ChartInput{
|
|
|
|
|
ChartPath: chartPath,
|
|
|
|
|
Namespace: "keda",
|
|
|
|
|
ReleaseName: fmt.Sprintf("keda-cluster-trigger-authentications-%s", triggerType),
|
|
|
|
|
RollbackOnFailure: true,
|
|
|
|
|
Timeout: timeoutDuration,
|
2024-03-06 21:27:39 -05:00
|
|
|
Wait: true,
|
2024-03-06 01:11:40 -05:00
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error installing keda-cluster-trigger-authentications-%s chart: %w", triggerType, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
common.LogInfo1Quiet(fmt.Sprintf("Applied keda-cluster-trigger-authentications-%s chart", triggerType))
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-24 02:39:39 -05:00
|
|
|
func applyClusterIssuers(ctx context.Context) error {
|
2024-01-23 07:25:03 -05:00
|
|
|
chartDir, err := os.MkdirTemp("", "cluster-issuer-chart-")
|
|
|
|
|
if err != nil {
|
2024-01-24 02:19:21 -05:00
|
|
|
return fmt.Errorf("Error creating cluster-issuer chart directory: %w", err)
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
defer os.RemoveAll(chartDir)
|
|
|
|
|
|
|
|
|
|
// create the chart.yaml
|
|
|
|
|
chart := &Chart{
|
|
|
|
|
ApiVersion: "v2",
|
|
|
|
|
AppVersion: "1.0.0",
|
2024-01-26 06:55:46 -05:00
|
|
|
Icon: "https://dokku.com/assets/dokku-logo.svg",
|
2024-01-24 02:39:39 -05:00
|
|
|
Name: "cluster-issuers",
|
2024-01-23 07:25:03 -05:00
|
|
|
Version: "0.0.1",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = writeYaml(WriteYamlInput{
|
|
|
|
|
Object: chart,
|
|
|
|
|
Path: filepath.Join(chartDir, "Chart.yaml"),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
2024-01-24 02:19:21 -05:00
|
|
|
return fmt.Errorf("Error writing cluster-issuer chart: %w", err)
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// create the values.yaml
|
|
|
|
|
letsencryptEmailStag := getGlobalLetsencryptEmailStag()
|
|
|
|
|
letsencryptEmailProd := getGlobalLetsencryptEmailProd()
|
|
|
|
|
|
|
|
|
|
clusterIssuerValues := ClusterIssuerValues{
|
|
|
|
|
ClusterIssuers: map[string]ClusterIssuer{
|
|
|
|
|
"letsencrypt-stag": {
|
2024-02-12 01:30:17 -05:00
|
|
|
Email: letsencryptEmailStag,
|
|
|
|
|
Enabled: letsencryptEmailStag != "",
|
|
|
|
|
IngressClass: getGlobalIngressClass(),
|
|
|
|
|
Name: "letsencrypt-stag",
|
|
|
|
|
Server: "https://acme-staging-v02.api.letsencrypt.org/directory",
|
2024-01-23 07:25:03 -05:00
|
|
|
},
|
|
|
|
|
"letsencrypt-prod": {
|
2024-02-12 01:30:17 -05:00
|
|
|
Email: letsencryptEmailProd,
|
|
|
|
|
Enabled: letsencryptEmailProd != "",
|
|
|
|
|
IngressClass: getGlobalIngressClass(),
|
|
|
|
|
Name: "letsencrypt-prod",
|
|
|
|
|
Server: "https://acme-v02.api.letsencrypt.org/directory",
|
2024-01-23 07:25:03 -05:00
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := os.MkdirAll(filepath.Join(chartDir, "templates"), os.FileMode(0755)); err != nil {
|
2024-01-24 02:19:21 -05:00
|
|
|
return fmt.Errorf("Error creating cluster-issuer chart templates directory: %w", err)
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = writeYaml(WriteYamlInput{
|
|
|
|
|
Object: clusterIssuerValues,
|
|
|
|
|
Path: filepath.Join(chartDir, "values.yaml"),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
2024-01-24 02:19:21 -05:00
|
|
|
return fmt.Errorf("Error writing chart: %w", err)
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// create the templates/cluster-issuer.yaml
|
|
|
|
|
b, err := templates.ReadFile("templates/chart/cluster-issuer.yaml")
|
|
|
|
|
if err != nil {
|
2024-01-24 02:19:21 -05:00
|
|
|
return fmt.Errorf("Error reading cluster-issuer template: %w", err)
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
filename := filepath.Join(chartDir, "templates", "cluster-issuer.yaml")
|
|
|
|
|
err = os.WriteFile(filename, b, os.FileMode(0644))
|
|
|
|
|
if err != nil {
|
2024-01-24 02:19:21 -05:00
|
|
|
return fmt.Errorf("Error writing cluster-issuer template: %w", err)
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if os.Getenv("DOKKU_TRACE") == "1" {
|
|
|
|
|
common.CatFile(filename)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// install the chart
|
|
|
|
|
helmAgent, err := NewHelmAgent("cert-manager", DevNullPrinter)
|
|
|
|
|
if err != nil {
|
2024-01-24 02:19:21 -05:00
|
|
|
return fmt.Errorf("Error creating helm agent: %w", err)
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
chartPath, err := filepath.Abs(chartDir)
|
|
|
|
|
if err != nil {
|
2024-01-24 02:19:21 -05:00
|
|
|
return fmt.Errorf("Error getting chart path: %w", err)
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
timeoutDuration, err := time.ParseDuration("300s")
|
|
|
|
|
if err != nil {
|
2024-01-24 02:19:21 -05:00
|
|
|
return fmt.Errorf("Error parsing deploy timeout duration: %w", err)
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = helmAgent.InstallOrUpgradeChart(ctx, ChartInput{
|
|
|
|
|
ChartPath: chartPath,
|
|
|
|
|
Namespace: "cert-manager",
|
|
|
|
|
ReleaseName: "cluster-issuers",
|
|
|
|
|
RollbackOnFailure: true,
|
|
|
|
|
Timeout: timeoutDuration,
|
2024-03-06 21:27:39 -05:00
|
|
|
Wait: true,
|
2024-01-23 07:25:03 -05:00
|
|
|
})
|
|
|
|
|
if err != nil {
|
2024-01-24 02:19:21 -05:00
|
|
|
return fmt.Errorf("Error installing cluster-issuer chart: %w", err)
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-24 02:19:21 -05:00
|
|
|
return nil
|
2024-01-23 07:25:03 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-17 18:06:18 -05:00
|
|
|
func createKubernetesNamespace(ctx context.Context, namespaceName string) error {
|
2024-01-17 20:56:30 -05:00
|
|
|
clientset, err := NewKubernetesClient()
|
2024-01-17 18:06:18 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 05:28:07 -05:00
|
|
|
namespace := corev1.Namespace{
|
2024-01-17 18:06:18 -05:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
|
Name: namespaceName,
|
2024-01-18 00:24:50 -05:00
|
|
|
Annotations: map[string]string{
|
|
|
|
|
"dokku.com/managed": "true",
|
|
|
|
|
},
|
|
|
|
|
Labels: map[string]string{
|
|
|
|
|
"dokku.com/managed": "true",
|
|
|
|
|
},
|
2024-01-17 18:06:18 -05:00
|
|
|
},
|
|
|
|
|
}
|
2024-01-18 16:14:33 -05:00
|
|
|
_, err = clientset.CreateNamespace(ctx, CreateNamespaceInput{
|
2024-01-22 05:12:49 -05:00
|
|
|
Name: namespace,
|
2024-01-18 16:14:33 -05:00
|
|
|
})
|
2024-01-17 18:06:18 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
func enterPod(ctx context.Context, input EnterPodInput) error {
|
|
|
|
|
coreclient, err := corev1client.NewForConfig(&input.Clientset.RestConfig)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating corev1 client: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-19 19:26:06 -05:00
|
|
|
labelSelector := []string{}
|
|
|
|
|
for k, v := range input.SelectedPod.Labels {
|
|
|
|
|
labelSelector = append(labelSelector, fmt.Sprintf("%s=%s", k, v))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input.WaitTimeout > 0 {
|
|
|
|
|
input.WaitTimeout = 5
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = waitForPodBySelectorRunning(ctx, WaitForPodBySelectorRunningInput{
|
|
|
|
|
Clientset: input.Clientset,
|
|
|
|
|
Namespace: input.SelectedPod.Namespace,
|
|
|
|
|
LabelSelector: strings.Join(labelSelector, ","),
|
|
|
|
|
PodName: input.SelectedPod.Name,
|
|
|
|
|
Timeout: input.WaitTimeout,
|
|
|
|
|
Waiter: isPodReady,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error waiting for pod to be ready: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
defaultContainerName, hasDefaultContainer := input.SelectedPod.Annotations["kubectl.kubernetes.io/default-container"]
|
|
|
|
|
if input.SelectedContainerName == "" && hasDefaultContainer {
|
|
|
|
|
input.SelectedContainerName = defaultContainerName
|
|
|
|
|
}
|
|
|
|
|
if input.SelectedContainerName == "" {
|
|
|
|
|
return fmt.Errorf("No container specified and no default container found")
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
req := coreclient.RESTClient().Post().
|
|
|
|
|
Resource("pods").
|
|
|
|
|
Namespace(input.SelectedPod.Namespace).
|
|
|
|
|
Name(input.SelectedPod.Name).
|
|
|
|
|
SubResource("exec")
|
|
|
|
|
|
2024-01-19 19:26:06 -05:00
|
|
|
req.Param("container", input.SelectedContainerName)
|
2024-01-18 15:11:51 -05:00
|
|
|
req.Param("stdin", "true")
|
|
|
|
|
req.Param("stdout", "true")
|
|
|
|
|
req.Param("stderr", "true")
|
|
|
|
|
req.Param("tty", "true")
|
|
|
|
|
|
|
|
|
|
if input.Entrypoint != "" {
|
|
|
|
|
req.Param("command", input.Entrypoint)
|
|
|
|
|
}
|
|
|
|
|
for _, cmd := range input.Command {
|
|
|
|
|
req.Param("command", cmd)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
t := term.TTY{
|
|
|
|
|
In: os.Stdin,
|
|
|
|
|
Out: os.Stdout,
|
|
|
|
|
Raw: true,
|
|
|
|
|
}
|
|
|
|
|
size := t.GetSize()
|
|
|
|
|
sizeQueue := t.MonitorSize(size)
|
|
|
|
|
|
|
|
|
|
return t.Safe(func() error {
|
|
|
|
|
exec, err := remotecommand.NewSPDYExecutor(&input.Clientset.RestConfig, "POST", req.URL())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating executor: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
|
|
|
|
Stdin: os.Stdin,
|
|
|
|
|
Stdout: os.Stdout,
|
|
|
|
|
Stderr: os.Stderr,
|
|
|
|
|
Tty: true,
|
|
|
|
|
TerminalSizeQueue: sizeQueue,
|
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 18:06:18 -05:00
|
|
|
func extractStartCommand(input StartCommandInput) string {
|
|
|
|
|
command := ""
|
|
|
|
|
if input.ImageSourceType == "herokuish" {
|
2024-01-18 17:15:42 -05:00
|
|
|
return "/start " + input.ProcessType
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-20 04:58:02 -05:00
|
|
|
resp, err := common.CallPlugnTrigger(common.PlugnTriggerInput{
|
2024-02-12 20:28:31 -05:00
|
|
|
Trigger: "config-get",
|
|
|
|
|
Args: []string{input.AppName, "DOKKU_START_CMD"},
|
2024-01-18 15:11:51 -05:00
|
|
|
})
|
2024-01-20 04:58:02 -05:00
|
|
|
if err == nil && resp.ExitCode == 0 && len(resp.Stdout) > 0 {
|
|
|
|
|
command = strings.TrimSpace(resp.Stdout)
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input.ImageSourceType == "dockerfile" {
|
2024-01-20 04:58:02 -05:00
|
|
|
resp, err := common.CallPlugnTrigger(common.PlugnTriggerInput{
|
2024-02-12 20:28:31 -05:00
|
|
|
Trigger: "config-get",
|
|
|
|
|
Args: []string{input.AppName, "DOKKU_DOCKERFILE_START_CMD"},
|
2024-01-18 15:11:51 -05:00
|
|
|
})
|
2024-01-20 04:58:02 -05:00
|
|
|
if err == nil && resp.ExitCode == 0 && len(resp.Stdout) > 0 {
|
|
|
|
|
command = strings.TrimSpace(resp.Stdout)
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if command == "" {
|
2024-03-14 00:46:55 -04:00
|
|
|
results, _ := common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Trigger: "procfile-get-command",
|
|
|
|
|
Args: []string{input.AppName, input.ProcessType, fmt.Sprint(input.Port)},
|
|
|
|
|
})
|
|
|
|
|
command = results.StdoutContents()
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return command
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-06 05:16:20 -05:00
|
|
|
// getAnnotations retrieves annotations for a given app and process type
|
|
|
|
|
func getAnnotations(appName string, processType string) (ProcessAnnotations, error) {
|
|
|
|
|
annotations := ProcessAnnotations{}
|
|
|
|
|
certificateAnnotations, err := getAnnotation(appName, processType, "certificate")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.CertificateAnnotations = certificateAnnotations
|
|
|
|
|
|
|
|
|
|
cronJobAnnotations, err := getAnnotation(appName, processType, "cronjob")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.CronJobAnnotations = cronJobAnnotations
|
|
|
|
|
|
|
|
|
|
deploymentAnnotations, err := getAnnotation(appName, processType, "deployment")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.DeploymentAnnotations = deploymentAnnotations
|
|
|
|
|
|
2024-02-06 22:48:22 -05:00
|
|
|
ingressAnnotations, err := getIngressAnnotations(appName, processType)
|
2024-02-06 05:16:20 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.IngressAnnotations = ingressAnnotations
|
|
|
|
|
|
|
|
|
|
jobAnnotations, err := getAnnotation(appName, processType, "job")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.JobAnnotations = jobAnnotations
|
|
|
|
|
|
2024-02-27 14:26:16 -05:00
|
|
|
kedaScalingObjectAnnotations, err := getAnnotation(appName, processType, "keda_scaled_object")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.KedaScalingObjectAnnotations = kedaScalingObjectAnnotations
|
|
|
|
|
|
2024-02-27 18:23:28 -05:00
|
|
|
kedaSecretAnnotations, err := getAnnotation(appName, processType, "keda_secret")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.KedaSecretAnnotations = kedaSecretAnnotations
|
|
|
|
|
|
|
|
|
|
kedaTriggerAuthenticationAnnotations, err := getAnnotation(appName, processType, "keda_trigger_authentication")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.KedaTriggerAuthenticationAnnotations = kedaTriggerAuthenticationAnnotations
|
|
|
|
|
|
2024-02-06 05:16:20 -05:00
|
|
|
podAnnotations, err := getAnnotation(appName, processType, "pod")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.PodAnnotations = podAnnotations
|
|
|
|
|
|
|
|
|
|
secretAnnotations, err := getAnnotation(appName, processType, "secret")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.SecretAnnotations = secretAnnotations
|
|
|
|
|
|
|
|
|
|
serviceAnnotations, err := getAnnotation(appName, processType, "service")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.ServiceAnnotations = serviceAnnotations
|
|
|
|
|
|
|
|
|
|
serviceAccountAnnotations, err := getAnnotation(appName, processType, "serviceaccount")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.ServiceAccountAnnotations = serviceAccountAnnotations
|
|
|
|
|
|
|
|
|
|
traefikIngressRouteAnnotations, err := getAnnotation(appName, processType, "traefik_ingressroute")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.TraefikIngressRouteAnnotations = traefikIngressRouteAnnotations
|
|
|
|
|
|
|
|
|
|
traefikMiddlewareAnnotations, err := getAnnotation(appName, processType, "traefik_middleware")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
annotations.TraefikMiddlewareAnnotations = traefikMiddlewareAnnotations
|
|
|
|
|
|
|
|
|
|
return annotations, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-27 23:09:39 -05:00
|
|
|
// GetAutoscalingInput contains all the information needed to get autoscaling config
|
|
|
|
|
type GetAutoscalingInput struct {
|
|
|
|
|
// AppName is the name of the app
|
|
|
|
|
AppName string
|
|
|
|
|
|
|
|
|
|
// ProcessType is the process type
|
|
|
|
|
ProcessType string
|
|
|
|
|
|
|
|
|
|
// Replicas is the number of replicas
|
|
|
|
|
Replicas int
|
|
|
|
|
|
|
|
|
|
// KedaValues is the keda values
|
|
|
|
|
KedaValues GlobalKedaValues
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-27 14:26:16 -05:00
|
|
|
// getAutoscaling retrieves autoscaling config for a given app and process type
|
2024-02-27 23:09:39 -05:00
|
|
|
func getAutoscaling(input GetAutoscalingInput) (ProcessAutoscaling, error) {
|
|
|
|
|
config, ok, err := appjson.GetAutoscalingConfig(input.AppName, input.ProcessType, input.Replicas)
|
2024-02-27 14:26:16 -05:00
|
|
|
if err != nil {
|
2024-02-27 23:09:39 -05:00
|
|
|
common.LogWarn(fmt.Sprintf("Error getting autoscaling config for %s: %v", input.AppName, err))
|
2024-02-27 14:26:16 -05:00
|
|
|
return ProcessAutoscaling{}, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !ok {
|
2024-02-27 23:09:39 -05:00
|
|
|
common.LogWarn(fmt.Sprintf("No autoscaling config found for %s", input.AppName))
|
2024-02-27 14:26:16 -05:00
|
|
|
return ProcessAutoscaling{}, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-27 16:09:10 -05:00
|
|
|
replacements := map[string]string{
|
2024-02-27 23:09:39 -05:00
|
|
|
"APP_NAME": input.AppName,
|
|
|
|
|
"PROCESS_TYPE": input.ProcessType,
|
|
|
|
|
"DEPLOYMENT_NAME": fmt.Sprintf("%s-%s", input.AppName, input.ProcessType),
|
2024-02-27 16:09:10 -05:00
|
|
|
}
|
|
|
|
|
|
2024-02-27 14:26:16 -05:00
|
|
|
triggers := []ProcessAutoscalingTrigger{}
|
|
|
|
|
for _, trigger := range config.Triggers {
|
2024-02-27 16:09:10 -05:00
|
|
|
metadata := map[string]string{}
|
|
|
|
|
for key, value := range trigger.Metadata {
|
|
|
|
|
tmpl, err := template.New("").Delims("[[", "]]").Parse(value)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return ProcessAutoscaling{}, fmt.Errorf("Error parsing autoscaling trigger metadata: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var output bytes.Buffer
|
|
|
|
|
if err := tmpl.Execute(&output, replacements); err != nil {
|
|
|
|
|
return ProcessAutoscaling{}, fmt.Errorf("Error executing autoscaling trigger metadata template: %w", err)
|
|
|
|
|
}
|
|
|
|
|
metadata[key] = output.String()
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-27 23:09:39 -05:00
|
|
|
trigger := ProcessAutoscalingTrigger{
|
2024-02-27 14:26:16 -05:00
|
|
|
Name: trigger.Name,
|
|
|
|
|
Type: trigger.Type,
|
2024-02-27 16:09:10 -05:00
|
|
|
Metadata: metadata,
|
2024-02-27 23:09:39 -05:00
|
|
|
}
|
|
|
|
|
|
2024-02-28 17:05:08 -05:00
|
|
|
if auth, ok := input.KedaValues.Authentications[trigger.Type]; ok {
|
2024-02-27 23:09:39 -05:00
|
|
|
trigger.AuthenticationRef = &ProcessAutoscalingTriggerAuthenticationRef{
|
2024-02-28 17:05:08 -05:00
|
|
|
Name: auth.Name,
|
2024-03-05 23:27:20 -05:00
|
|
|
Kind: string(auth.Kind),
|
2024-02-28 17:05:08 -05:00
|
|
|
}
|
|
|
|
|
} else if auth, ok := input.KedaValues.GlobalAuthentications[trigger.Type]; ok {
|
|
|
|
|
trigger.AuthenticationRef = &ProcessAutoscalingTriggerAuthenticationRef{
|
|
|
|
|
Name: auth.Name,
|
2024-03-05 23:27:20 -05:00
|
|
|
Kind: string(auth.Kind),
|
2024-02-27 23:09:39 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
triggers = append(triggers, trigger)
|
2024-02-27 14:26:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
autoscaling := ProcessAutoscaling{
|
|
|
|
|
CooldownPeriodSeconds: ptr.Deref(config.CooldownPeriodSeconds, 300),
|
|
|
|
|
Enabled: len(triggers) > 0,
|
|
|
|
|
MaxReplicas: ptr.Deref(config.MaxQuantity, 0),
|
|
|
|
|
MinReplicas: ptr.Deref(config.MinQuantity, 0),
|
|
|
|
|
PollingIntervalSeconds: ptr.Deref(config.PollingIntervalSeconds, 30),
|
|
|
|
|
Triggers: triggers,
|
|
|
|
|
Type: "keda",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return autoscaling, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-27 22:37:06 -05:00
|
|
|
// getKedaValues retrieves keda values for a given app and process type
|
2024-02-28 17:05:08 -05:00
|
|
|
func getKedaValues(ctx context.Context, clientset KubernetesClient, appName string) (GlobalKedaValues, error) {
|
2024-02-28 11:44:02 -05:00
|
|
|
properties, err := common.PropertyGetAllByPrefix("scheduler-k3s", appName, TriggerAuthPropertyPrefix)
|
2024-02-27 22:37:06 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return GlobalKedaValues{}, fmt.Errorf("Error getting trigger-auth properties: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auths := map[string]KedaAuthentication{}
|
|
|
|
|
for key, value := range properties {
|
2024-02-28 11:44:02 -05:00
|
|
|
parts := strings.SplitN(strings.TrimPrefix(key, TriggerAuthPropertyPrefix), ".", 2)
|
2024-02-27 22:37:06 -05:00
|
|
|
if len(parts) != 2 {
|
|
|
|
|
return GlobalKedaValues{}, fmt.Errorf("Invalid trigger-auth property format: %s", key)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
authType := parts[0]
|
|
|
|
|
secretKey := parts[1]
|
|
|
|
|
if len(secretKey) == 0 {
|
|
|
|
|
return GlobalKedaValues{}, fmt.Errorf("Invalid trigger-auth property format: %s", key)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if _, ok := auths[authType]; !ok {
|
|
|
|
|
auths[authType] = KedaAuthentication{
|
2024-02-28 17:05:08 -05:00
|
|
|
Name: fmt.Sprintf("%s-%s", appName, authType),
|
2024-02-27 22:37:06 -05:00
|
|
|
Type: authType,
|
2024-03-05 23:27:20 -05:00
|
|
|
Kind: KedaAuthenticationKind_TriggerAuthentication,
|
2024-02-27 22:37:06 -05:00
|
|
|
Secrets: make(map[string]string),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
auths[authType].Secrets[secretKey] = base64.StdEncoding.EncodeToString([]byte(value))
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-28 17:05:08 -05:00
|
|
|
items, err := clientset.ListClusterTriggerAuthentications(ctx, ListClusterTriggerAuthenticationsInput{})
|
|
|
|
|
if err != nil {
|
2024-03-06 17:50:52 -05:00
|
|
|
if !k8serrors.IsNotFound(err) {
|
|
|
|
|
return GlobalKedaValues{}, fmt.Errorf("Error listing cluster trigger authentications: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
common.LogWarn(fmt.Sprintf("Error listing cluster trigger authentications: %v", err))
|
|
|
|
|
common.LogWarn("Continuing with no cluster trigger authentications")
|
|
|
|
|
common.LogWarn("This may be due to the keda helm chart not being installed")
|
|
|
|
|
items = []kedav1alpha1.ClusterTriggerAuthentication{}
|
2024-02-28 17:05:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
globalAuths := map[string]KedaAuthentication{}
|
|
|
|
|
for _, item := range items {
|
2024-03-08 05:23:50 -05:00
|
|
|
globalAuths[item.Name] = KedaAuthentication{
|
2024-02-28 17:05:08 -05:00
|
|
|
Name: item.Name,
|
2024-03-05 23:27:20 -05:00
|
|
|
Kind: KedaAuthenticationKind_ClusterTriggerAuthentication,
|
2024-03-08 05:23:50 -05:00
|
|
|
Type: item.Name,
|
2024-02-28 17:05:08 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-27 22:37:06 -05:00
|
|
|
return GlobalKedaValues{
|
2024-02-28 17:05:08 -05:00
|
|
|
Authentications: auths,
|
|
|
|
|
GlobalAuthentications: globalAuths,
|
2024-02-27 22:37:06 -05:00
|
|
|
}, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-06 05:16:20 -05:00
|
|
|
// getGlobalAnnotations retrieves global annotations for a given app
|
|
|
|
|
func getGlobalAnnotations(appName string) (ProcessAnnotations, error) {
|
|
|
|
|
return getAnnotations(appName, GlobalProcessType)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// getAnnotation retrieves an annotation for a given app, process type, and resource type
|
|
|
|
|
func getAnnotation(appName string, processType string, resourceType string) (map[string]string, error) {
|
|
|
|
|
annotations := map[string]string{}
|
|
|
|
|
annotationsList, err := common.PropertyListGet("scheduler-k3s", appName, fmt.Sprintf("%s.%s", processType, resourceType))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return annotations, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, annotation := range annotationsList {
|
|
|
|
|
parts := strings.SplitN(annotation, ": ", 2)
|
|
|
|
|
if len(parts) != 2 {
|
|
|
|
|
return annotations, fmt.Errorf("Invalid annotation format: %s", annotation)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
annotations[parts[0]] = parts[1]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return annotations, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
func getDeployTimeout(appName string) string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", appName, "deploy-timeout", "")
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
func getGlobalDeployTimeout() string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", "--global", "deploy-timeout", "300s")
|
2024-01-22 06:40:04 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
func getComputedDeployTimeout(appName string) string {
|
|
|
|
|
deployTimeout := getDeployTimeout(appName)
|
|
|
|
|
if deployTimeout == "" {
|
|
|
|
|
deployTimeout = getGlobalDeployTimeout()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return deployTimeout
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
func getImagePullSecrets(appName string) string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", appName, "image-pull-secrets", "")
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
func getGlobalImagePullSecrets() string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", "--global", "image-pull-secrets", "")
|
|
|
|
|
}
|
2024-01-17 18:06:18 -05:00
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
func getComputedImagePullSecrets(appName string) string {
|
|
|
|
|
imagePullSecrets := getImagePullSecrets(appName)
|
|
|
|
|
if imagePullSecrets == "" {
|
|
|
|
|
imagePullSecrets = getGlobalImagePullSecrets()
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
return imagePullSecrets
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-30 10:57:24 -05:00
|
|
|
func getGlobalIngressClass() string {
|
2024-02-12 01:30:17 -05:00
|
|
|
return common.PropertyGetDefault("scheduler-k3s", "--global", "ingress-class", DefaultIngressClass)
|
2024-01-30 10:57:24 -05:00
|
|
|
}
|
|
|
|
|
|
2024-02-06 22:48:22 -05:00
|
|
|
func getIngressAnnotations(appName string, processType string) (map[string]string, error) {
|
2024-02-05 13:44:47 -05:00
|
|
|
type annotation struct {
|
|
|
|
|
annotation string
|
|
|
|
|
getter func(appName string) string
|
|
|
|
|
locationSnippet func(value string) string
|
|
|
|
|
serverSnippet func(value string) string
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
locationLines := []string{}
|
|
|
|
|
serverLines := []string{}
|
|
|
|
|
|
|
|
|
|
properties := map[string]annotation{
|
|
|
|
|
"access-log-path": {
|
|
|
|
|
getter: nginxvhosts.ComputedAccessLogPath,
|
|
|
|
|
serverSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("access_log %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
"bind-address-ipv4": {
|
|
|
|
|
getter: nginxvhosts.ComputedBindAddressIPv4,
|
|
|
|
|
},
|
|
|
|
|
"bind-address-ipv6": {
|
|
|
|
|
getter: nginxvhosts.ComputedBindAddressIPv6,
|
|
|
|
|
},
|
2024-10-16 00:43:35 -04:00
|
|
|
"client-body-timeout": {
|
|
|
|
|
getter: nginxvhosts.ComputedClientBodyTimeout,
|
|
|
|
|
serverSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("client_body_timeout %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
"client-header-timeout": {
|
|
|
|
|
getter: nginxvhosts.ComputedClientHeaderTimeout,
|
|
|
|
|
serverSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("client_header_timeout %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
2024-02-05 13:44:47 -05:00
|
|
|
"client-max-body-size": {
|
|
|
|
|
annotation: "nginx.ingress.kubernetes.io/proxy-body-size",
|
|
|
|
|
getter: nginxvhosts.ComputedClientMaxBodySize,
|
|
|
|
|
},
|
|
|
|
|
"disable-custom-config": {
|
|
|
|
|
getter: nginxvhosts.ComputedDisableCustomConfig,
|
|
|
|
|
},
|
|
|
|
|
"error-log-path": {
|
|
|
|
|
getter: nginxvhosts.ComputedErrorLogPath,
|
|
|
|
|
serverSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("error_log %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
// todo: handle hsts properly
|
|
|
|
|
"hsts-include-subdomains": {
|
|
|
|
|
getter: nginxvhosts.ComputedHSTSIncludeSubdomains,
|
|
|
|
|
},
|
|
|
|
|
"hsts-max-age": {
|
|
|
|
|
getter: nginxvhosts.ComputedHSTSMaxAge,
|
|
|
|
|
},
|
|
|
|
|
"hsts-preload": {
|
|
|
|
|
getter: nginxvhosts.ComputedHSTSPreload,
|
|
|
|
|
},
|
|
|
|
|
"hsts": {
|
|
|
|
|
getter: nginxvhosts.ComputedHSTS,
|
|
|
|
|
},
|
2024-10-16 00:43:35 -04:00
|
|
|
"keepalive-timeout": {
|
|
|
|
|
getter: nginxvhosts.ComputedKeepaliveTimeout,
|
|
|
|
|
serverSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("keepalive_timeout %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
"lingering-timeout": {
|
|
|
|
|
getter: nginxvhosts.ComputedLingeringTimeout,
|
|
|
|
|
serverSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("lingering_timeout %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
2024-02-05 13:44:47 -05:00
|
|
|
"nginx-conf-sigil-path": {
|
|
|
|
|
getter: nginxvhosts.ComputedNginxConfSigilPath,
|
|
|
|
|
},
|
|
|
|
|
"proxy-buffer-size": {
|
|
|
|
|
annotation: "nginx.ingress.kubernetes.io/proxy-buffer-size",
|
|
|
|
|
getter: nginxvhosts.ComputedProxyBufferSize,
|
|
|
|
|
},
|
|
|
|
|
"proxy-buffering": {
|
2024-02-07 13:15:16 -05:00
|
|
|
annotation: "nginx.ingress.kubernetes.io/proxy-buffering",
|
|
|
|
|
getter: nginxvhosts.ComputedProxyBuffering,
|
2024-02-05 13:44:47 -05:00
|
|
|
},
|
|
|
|
|
"proxy-buffers": {
|
|
|
|
|
annotation: "nginx.ingress.kubernetes.io/proxy-buffers-number",
|
|
|
|
|
getter: nginxvhosts.ComputedProxyBuffers,
|
|
|
|
|
},
|
|
|
|
|
"proxy-busy-buffers-size": {
|
|
|
|
|
getter: nginxvhosts.ComputedProxyBusyBuffersSize,
|
|
|
|
|
locationSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("proxy_busy_buffers_size %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
2024-10-16 00:43:35 -04:00
|
|
|
"proxy-connect-timeout": {
|
|
|
|
|
annotation: "nginx.ingress.kubernetes.io/proxy-connect-timeout",
|
|
|
|
|
getter: nginxvhosts.ComputedProxyConnectTimeout,
|
|
|
|
|
},
|
2024-02-05 13:44:47 -05:00
|
|
|
"proxy-read-timeout": {
|
|
|
|
|
annotation: "nginx.ingress.kubernetes.io/proxy-read-timeout",
|
|
|
|
|
getter: nginxvhosts.ComputedProxyReadTimeout,
|
|
|
|
|
},
|
2024-10-16 00:43:35 -04:00
|
|
|
"proxy-send-timeout": {
|
|
|
|
|
annotation: "nginx.ingress.kubernetes.io/proxy-send-timeout",
|
|
|
|
|
getter: nginxvhosts.ComputedProxySendTimeout,
|
|
|
|
|
},
|
|
|
|
|
"send-timeout": {
|
|
|
|
|
getter: nginxvhosts.ComputedSendTimeout,
|
|
|
|
|
serverSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("send_timeout %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
2024-02-27 11:23:23 -05:00
|
|
|
"underscore-in-headers": {
|
|
|
|
|
getter: nginxvhosts.ComputedUnderscoreInHeaders,
|
2024-02-27 12:58:32 -05:00
|
|
|
serverSnippet: func(value string) string {
|
2024-02-27 11:23:23 -05:00
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("underscores_in_headers %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
2024-02-05 13:44:47 -05:00
|
|
|
"x-forwarded-for-value": {
|
|
|
|
|
getter: nginxvhosts.ComputedXForwardedForValue,
|
|
|
|
|
locationSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("proxy_set_header X-Forwarded-For %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
"x-forwarded-port-value": {
|
|
|
|
|
getter: nginxvhosts.ComputedXForwardedPortValue,
|
|
|
|
|
locationSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("proxy_set_header X-Forwarded-Port %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
"x-forwarded-proto-value": {
|
|
|
|
|
getter: nginxvhosts.ComputedXForwardedProtoValue,
|
|
|
|
|
locationSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("proxy_set_header X-Forwarded-Proto %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
"x-forwarded-ssl": {
|
|
|
|
|
getter: nginxvhosts.ComputedXForwardedSSL,
|
|
|
|
|
locationSnippet: func(value string) string {
|
|
|
|
|
if value == "" {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return fmt.Sprintf("proxy_set_header X-Forwarded-SSL %s;", value)
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-06 22:48:22 -05:00
|
|
|
annotations := map[string]string{}
|
2024-02-05 13:44:47 -05:00
|
|
|
for _, newKey := range properties {
|
|
|
|
|
if newKey.locationSnippet != nil {
|
|
|
|
|
locationLines = append(locationLines, newKey.locationSnippet(newKey.getter(appName)))
|
|
|
|
|
} else if newKey.serverSnippet != nil {
|
|
|
|
|
serverLines = append(serverLines, newKey.serverSnippet(newKey.getter(appName)))
|
|
|
|
|
} else if newKey.annotation != "" {
|
2024-02-06 22:48:22 -05:00
|
|
|
annotations[newKey.annotation] = newKey.getter(appName)
|
2024-02-05 13:44:47 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var locationSnippet string
|
|
|
|
|
for _, line := range locationLines {
|
|
|
|
|
if line != "" {
|
|
|
|
|
locationSnippet += line + "\n"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
var serverSnippet string
|
|
|
|
|
for _, line := range serverLines {
|
|
|
|
|
if line != "" {
|
|
|
|
|
serverSnippet += line + "\n"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if locationSnippet != "" {
|
2024-02-06 22:48:22 -05:00
|
|
|
annotations["nginx.ingress.kubernetes.io/configuration-snippet"] = locationSnippet
|
2024-02-05 13:44:47 -05:00
|
|
|
}
|
|
|
|
|
if serverSnippet != "" {
|
2024-02-06 22:48:22 -05:00
|
|
|
annotations["nginx.ingress.kubernetes.io/server-snippet"] = serverSnippet
|
2024-02-05 13:44:47 -05:00
|
|
|
}
|
|
|
|
|
|
2024-02-06 22:48:22 -05:00
|
|
|
customAnnotations, err := getAnnotation(appName, processType, "deployment")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return map[string]string{}, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for key, value := range customAnnotations {
|
|
|
|
|
if _, ok := annotations[key]; ok {
|
|
|
|
|
common.LogWarn(fmt.Sprintf("Nginx-based annotation %s will be overwritten by custom annotation", key))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
annotations[key] = value
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return annotations, nil
|
2024-02-05 13:44:47 -05:00
|
|
|
}
|
|
|
|
|
|
2024-02-28 12:44:03 -05:00
|
|
|
// getLabels retrieves labels for a given app and process type
|
|
|
|
|
func getLabels(appName string, processType string) (ProcessLabels, error) {
|
|
|
|
|
labels := ProcessLabels{}
|
2024-02-28 15:19:39 -05:00
|
|
|
certificateLabels, err := getLabel(appName, processType, "certificate")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.CertificateLabels = certificateLabels
|
|
|
|
|
|
2024-02-28 15:19:39 -05:00
|
|
|
cronJobLabels, err := getLabel(appName, processType, "cronjob")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.CronJobLabels = cronJobLabels
|
|
|
|
|
|
2024-02-28 15:19:39 -05:00
|
|
|
deploymentLabels, err := getLabel(appName, processType, "deployment")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.DeploymentLabels = deploymentLabels
|
|
|
|
|
|
2024-02-28 15:19:39 -05:00
|
|
|
ingressLabels, err := getLabel(appName, processType, "ingress")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.IngressLabels = ingressLabels
|
|
|
|
|
|
2024-02-28 15:19:39 -05:00
|
|
|
jobLabels, err := getLabel(appName, processType, "job")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.JobLabels = jobLabels
|
|
|
|
|
|
2024-02-28 15:19:39 -05:00
|
|
|
podLabels, err := getLabel(appName, processType, "pod")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.PodLabels = podLabels
|
|
|
|
|
|
2024-02-28 15:19:39 -05:00
|
|
|
secretLabels, err := getLabel(appName, processType, "secret")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.SecretLabels = secretLabels
|
|
|
|
|
|
2024-02-28 15:19:39 -05:00
|
|
|
serviceLabels, err := getLabel(appName, processType, "service")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.ServiceLabels = serviceLabels
|
|
|
|
|
|
2024-02-28 15:19:39 -05:00
|
|
|
serviceAccountLabels, err := getLabel(appName, processType, "serviceaccount")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.ServiceAccountLabels = serviceAccountLabels
|
|
|
|
|
|
2024-02-28 15:19:39 -05:00
|
|
|
traefikIngressRouteLabels, err := getLabel(appName, processType, "traefik_ingressroute")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.TraefikIngressRouteLabels = traefikIngressRouteLabels
|
|
|
|
|
|
2024-02-28 15:19:39 -05:00
|
|
|
traefikMiddlewareLabels, err := getLabel(appName, processType, "traefik_middleware")
|
2024-02-28 12:44:03 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
labels.TraefikMiddlewareLabels = traefikMiddlewareLabels
|
|
|
|
|
|
|
|
|
|
return labels, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// getGlobalLabel retrieves global labels for a given app
|
|
|
|
|
func getGlobalLabel(appName string) (ProcessLabels, error) {
|
|
|
|
|
return getLabels(appName, GlobalProcessType)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// getLabel retrieves an label for a given app, process type, and resource type
|
|
|
|
|
func getLabel(appName string, processType string, resourceType string) (map[string]string, error) {
|
|
|
|
|
labels := map[string]string{}
|
|
|
|
|
labelsList, err := common.PropertyListGet("scheduler-k3s", appName, fmt.Sprintf("labels.%s.%s", processType, resourceType))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return labels, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, label := range labelsList {
|
|
|
|
|
parts := strings.SplitN(label, ": ", 2)
|
|
|
|
|
if len(parts) != 2 {
|
|
|
|
|
return labels, fmt.Errorf("Invalid label format: %s", label)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
labels[parts[0]] = parts[1]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return labels, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
func getLetsencryptServer(appName string) string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", appName, "letsencrypt-server", "")
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-23 05:01:23 -05:00
|
|
|
func getGlobalLetsencryptServer() string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", "--global", "letsencrypt-server", "prod")
|
2024-01-22 08:01:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getComputedLetsencryptServer(appName string) string {
|
|
|
|
|
letsencryptServer := getLetsencryptServer(appName)
|
|
|
|
|
if letsencryptServer == "" {
|
2024-01-23 05:01:23 -05:00
|
|
|
letsencryptServer = getGlobalLetsencryptServer()
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
return letsencryptServer
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getGlobalLetsencryptEmailProd() string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", "--global", "letsencrypt-email-prod", "")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getGlobalLetsencryptEmailStag() string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", "--global", "letsencrypt-email-stag", "")
|
|
|
|
|
}
|
2024-01-17 18:06:18 -05:00
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
func getNamespace(appName string) string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", appName, "namespace", "")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getGlobalNamespace() string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", "--global", "namespace", "default")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getComputedNamespace(appName string) string {
|
|
|
|
|
namespace := getNamespace(appName)
|
|
|
|
|
if namespace == "" {
|
|
|
|
|
namespace = getGlobalNamespace()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return namespace
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getGlobalNetworkInterface() string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", "--global", "network-interface", "eth0")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getRollbackOnFailure(appName string) string {
|
|
|
|
|
return common.PropertyGetDefault("scheduler-k3s", appName, "rollback-on-failure", "")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getGlobalRollbackOnFailure() string {
|
2024-01-22 21:14:55 -05:00
|
|
|
return common.PropertyGetDefault("scheduler-k3s", "--global", "rollback-on-failure", "false")
|
2024-01-22 08:01:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getComputedRollbackOnFailure(appName string) string {
|
|
|
|
|
rollbackOnFailure := getRollbackOnFailure(appName)
|
|
|
|
|
if rollbackOnFailure == "" {
|
|
|
|
|
rollbackOnFailure = getGlobalRollbackOnFailure()
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-22 08:01:11 -05:00
|
|
|
return rollbackOnFailure
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getGlobalGlobalToken() string {
|
|
|
|
|
return common.PropertyGet("scheduler-k3s", "--global", "token")
|
2024-01-17 18:06:18 -05:00
|
|
|
}
|
|
|
|
|
|
2024-01-23 01:29:50 -05:00
|
|
|
func getProcessHealtchecks(healthchecks []appjson.Healthcheck, primaryPort int32) ProcessHealthchecks {
|
|
|
|
|
if len(healthchecks) == 0 {
|
|
|
|
|
return ProcessHealthchecks{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
livenessChecks := []ProcessHealthcheck{}
|
|
|
|
|
readinessChecks := []ProcessHealthcheck{}
|
|
|
|
|
startupChecks := []ProcessHealthcheck{}
|
|
|
|
|
uptimeSeconds := []int32{}
|
|
|
|
|
for _, healthcheck := range healthchecks {
|
|
|
|
|
probe := ProcessHealthcheck{
|
|
|
|
|
InitialDelaySeconds: healthcheck.InitialDelay,
|
|
|
|
|
PeriodSeconds: healthcheck.Wait,
|
|
|
|
|
TimeoutSeconds: healthcheck.Timeout,
|
|
|
|
|
FailureThreshold: healthcheck.Attempts,
|
|
|
|
|
SuccessThreshold: int32(1),
|
|
|
|
|
}
|
|
|
|
|
if len(healthcheck.Command) > 0 {
|
|
|
|
|
probe.Exec = &ExecHealthcheck{
|
|
|
|
|
Command: healthcheck.Command,
|
|
|
|
|
}
|
|
|
|
|
} else if healthcheck.Listening {
|
|
|
|
|
probe.TCPSocket = &TCPHealthcheck{
|
|
|
|
|
Port: primaryPort,
|
|
|
|
|
}
|
|
|
|
|
for _, header := range healthcheck.HTTPHeaders {
|
|
|
|
|
if header.Name == "Host" {
|
|
|
|
|
probe.TCPSocket.Host = header.Value
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else if healthcheck.Path != "" {
|
|
|
|
|
probe.HTTPGet = &HTTPHealthcheck{
|
|
|
|
|
Path: healthcheck.Path,
|
|
|
|
|
Port: primaryPort,
|
|
|
|
|
HTTPHeaders: []HTTPHeader{},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if healthcheck.Scheme != "" {
|
|
|
|
|
probe.HTTPGet.Scheme = URIScheme(strings.ToUpper(healthcheck.Scheme))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, header := range healthcheck.HTTPHeaders {
|
|
|
|
|
probe.HTTPGet.HTTPHeaders = append(probe.HTTPGet.HTTPHeaders, HTTPHeader{
|
|
|
|
|
Name: header.Name,
|
|
|
|
|
Value: header.Value,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
} else if healthcheck.Uptime > 0 {
|
|
|
|
|
uptimeSeconds = append(uptimeSeconds, healthcheck.Uptime)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if healthcheck.Type == appjson.HealthcheckType_Liveness {
|
|
|
|
|
livenessChecks = append(livenessChecks, probe)
|
|
|
|
|
} else if healthcheck.Type == appjson.HealthcheckType_Readiness {
|
|
|
|
|
readinessChecks = append(readinessChecks, probe)
|
|
|
|
|
} else if healthcheck.Type == appjson.HealthcheckType_Startup {
|
|
|
|
|
startupChecks = append(startupChecks, probe)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if len(livenessChecks) > 1 {
|
|
|
|
|
common.LogWarn("Multiple liveness checks are not supported, only the first one will be used")
|
|
|
|
|
}
|
|
|
|
|
if len(readinessChecks) > 1 {
|
|
|
|
|
common.LogWarn("Multiple readiness checks are not supported, only the first one will be used")
|
|
|
|
|
}
|
|
|
|
|
if len(startupChecks) > 1 {
|
|
|
|
|
common.LogWarn("Multiple startup checks are not supported, only the first one will be used")
|
|
|
|
|
}
|
|
|
|
|
if len(uptimeSeconds) > 1 {
|
|
|
|
|
common.LogWarn("Multiple uptime checks are not supported, only the first one will be used")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
processHealthchecks := ProcessHealthchecks{}
|
|
|
|
|
if len(livenessChecks) > 0 {
|
|
|
|
|
processHealthchecks.Liveness = livenessChecks[0]
|
|
|
|
|
}
|
|
|
|
|
if len(readinessChecks) > 0 {
|
|
|
|
|
processHealthchecks.Readiness = readinessChecks[0]
|
|
|
|
|
}
|
|
|
|
|
if len(startupChecks) > 0 {
|
|
|
|
|
processHealthchecks.Startup = startupChecks[0]
|
|
|
|
|
}
|
|
|
|
|
if len(uptimeSeconds) > 0 {
|
|
|
|
|
processHealthchecks.MinReadySeconds = uptimeSeconds[0]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return processHealthchecks
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func getProcessResources(appName string, processType string) (ProcessResourcesMap, error) {
|
|
|
|
|
processResources := ProcessResourcesMap{
|
2024-02-21 18:31:04 -05:00
|
|
|
Limits: ProcessResources{},
|
2024-01-23 01:29:50 -05:00
|
|
|
Requests: ProcessResources{
|
2024-02-21 18:31:04 -05:00
|
|
|
CPU: "100m",
|
|
|
|
|
Memory: "128Mi",
|
2024-01-23 01:29:50 -05:00
|
|
|
},
|
|
|
|
|
}
|
2024-02-21 18:13:47 -05:00
|
|
|
|
|
|
|
|
emptyValues := map[string]bool{
|
2024-02-22 06:55:02 -05:00
|
|
|
"": true,
|
|
|
|
|
"0": true,
|
2024-02-21 18:13:47 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
result, err := common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Trigger: "resource-get-property",
|
|
|
|
|
Args: []string{appName, processType, "limit", "cpu"},
|
|
|
|
|
})
|
|
|
|
|
if err == nil && !emptyValues[result.StdoutContents()] {
|
|
|
|
|
quantity, err := resource.ParseQuantity(result.StdoutContents())
|
2024-01-23 01:29:50 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return ProcessResourcesMap{}, fmt.Errorf("Error parsing cpu limit: %w", err)
|
|
|
|
|
}
|
2024-02-21 18:30:48 -05:00
|
|
|
if quantity.MilliValue() != 0 {
|
|
|
|
|
processResources.Limits.CPU = quantity.String()
|
2024-02-22 06:55:02 -05:00
|
|
|
} else {
|
|
|
|
|
processResources.Limits.CPU = ""
|
2024-02-21 18:30:48 -05:00
|
|
|
}
|
2024-01-23 01:29:50 -05:00
|
|
|
}
|
2024-03-14 00:46:55 -04:00
|
|
|
response, err := common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Trigger: "resource-get-property",
|
|
|
|
|
Args: []string{appName, processType, "limit", "nvidia-gpu"},
|
|
|
|
|
})
|
|
|
|
|
nvidiaGpuLimit := response.StdoutContents()
|
2024-02-21 18:13:47 -05:00
|
|
|
if err == nil && nvidiaGpuLimit != "" && nvidiaGpuLimit != "0" {
|
2024-01-23 01:29:50 -05:00
|
|
|
_, err := resource.ParseQuantity(nvidiaGpuLimit)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return ProcessResourcesMap{}, fmt.Errorf("Error parsing nvidia-gpu limit: %w", err)
|
|
|
|
|
}
|
|
|
|
|
processResources.Limits.NvidiaGPU = nvidiaGpuLimit
|
|
|
|
|
}
|
2024-02-21 18:13:47 -05:00
|
|
|
result, err = common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Trigger: "resource-get-property",
|
|
|
|
|
Args: []string{appName, processType, "limit", "memory"},
|
|
|
|
|
})
|
|
|
|
|
if err == nil && !emptyValues[result.StdoutContents()] {
|
|
|
|
|
quantity, err := parseMemoryQuantity(result.StdoutContents())
|
2024-01-23 01:29:50 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return ProcessResourcesMap{}, fmt.Errorf("Error parsing memory limit: %w", err)
|
|
|
|
|
}
|
2024-02-21 18:30:48 -05:00
|
|
|
if quantity != "0Mi" {
|
|
|
|
|
processResources.Limits.Memory = quantity
|
2024-02-22 06:55:02 -05:00
|
|
|
} else {
|
|
|
|
|
processResources.Limits.Memory = ""
|
2024-02-21 18:30:48 -05:00
|
|
|
}
|
2024-01-23 01:29:50 -05:00
|
|
|
}
|
|
|
|
|
|
2024-02-21 18:13:47 -05:00
|
|
|
result, err = common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Trigger: "resource-get-property",
|
|
|
|
|
Args: []string{appName, processType, "reserve", "cpu"},
|
|
|
|
|
})
|
|
|
|
|
if err == nil && !emptyValues[result.StdoutContents()] {
|
|
|
|
|
quantity, err := resource.ParseQuantity(result.StdoutContents())
|
2024-01-23 01:29:50 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return ProcessResourcesMap{}, fmt.Errorf("Error parsing cpu request: %w", err)
|
|
|
|
|
}
|
2024-02-21 18:30:48 -05:00
|
|
|
if quantity.MilliValue() != 0 {
|
|
|
|
|
processResources.Requests.CPU = quantity.String()
|
2024-02-22 06:55:02 -05:00
|
|
|
} else {
|
|
|
|
|
processResources.Requests.CPU = ""
|
2024-02-21 18:30:48 -05:00
|
|
|
}
|
2024-01-23 01:29:50 -05:00
|
|
|
}
|
2024-02-21 18:13:47 -05:00
|
|
|
result, err = common.CallPlugnTrigger(common.PlugnTriggerInput{
|
|
|
|
|
Trigger: "resource-get-property",
|
|
|
|
|
Args: []string{appName, processType, "reserve", "memory"},
|
|
|
|
|
})
|
|
|
|
|
if err == nil && !emptyValues[result.StdoutContents()] {
|
|
|
|
|
quantity, err := parseMemoryQuantity(result.StdoutContents())
|
2024-01-23 01:29:50 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return ProcessResourcesMap{}, fmt.Errorf("Error parsing memory request: %w", err)
|
|
|
|
|
}
|
2024-02-21 18:30:48 -05:00
|
|
|
if quantity != "0Mi" {
|
|
|
|
|
processResources.Requests.Memory = quantity
|
2024-02-22 06:55:02 -05:00
|
|
|
} else {
|
|
|
|
|
processResources.Requests.Memory = ""
|
2024-02-21 18:30:48 -05:00
|
|
|
}
|
2024-01-23 01:29:50 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return processResources, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-23 03:26:32 -05:00
|
|
|
func getServerIP() (string, error) {
|
|
|
|
|
serverIP := ""
|
|
|
|
|
networkInterface := getGlobalNetworkInterface()
|
|
|
|
|
ifaces, err := net.Interfaces()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", fmt.Errorf("Unable to get network interfaces: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, iface := range ifaces {
|
|
|
|
|
if iface.Name == networkInterface {
|
|
|
|
|
addr, err := iface.Addrs()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", fmt.Errorf("Unable to get network addresses for interface %s: %w", networkInterface, err)
|
|
|
|
|
}
|
|
|
|
|
for _, a := range addr {
|
|
|
|
|
if ipnet, ok := a.(*net.IPNet); ok {
|
|
|
|
|
if ipnet.IP.To4() != nil {
|
|
|
|
|
serverIP = ipnet.IP.String()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if len(serverIP) == 0 {
|
|
|
|
|
return "", fmt.Errorf(fmt.Sprintf("Unable to determine server ip address from network-interface %s", networkInterface))
|
|
|
|
|
}
|
|
|
|
|
return serverIP, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 18:06:18 -05:00
|
|
|
func getStartCommand(input StartCommandInput) (StartCommandOutput, error) {
|
|
|
|
|
command := extractStartCommand(input)
|
|
|
|
|
fields, err := shell.Fields(command, func(name string) string {
|
|
|
|
|
if name == "PORT" {
|
|
|
|
|
return fmt.Sprint(input.Port)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return input.Env[name]
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return StartCommandOutput{}, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return StartCommandOutput{
|
|
|
|
|
Command: fields,
|
|
|
|
|
}, nil
|
|
|
|
|
}
|
2024-01-18 15:11:51 -05:00
|
|
|
|
2024-01-30 10:20:12 -05:00
|
|
|
func installHelmCharts(ctx context.Context, clientset KubernetesClient, shouldInstall func(HelmChart) bool) error {
|
2024-01-22 05:12:49 -05:00
|
|
|
for _, repo := range HelmRepositories {
|
|
|
|
|
helmAgent, err := NewHelmAgent("default", DeployLogPrinter)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating helm agent: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = helmAgent.AddRepository(ctx, AddRepositoryInput(repo))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error adding helm repository %s: %w", repo.Name, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, chart := range HelmCharts {
|
2024-01-30 10:20:12 -05:00
|
|
|
if !shouldInstall(chart) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-22 05:12:49 -05:00
|
|
|
if chart.CreateNamespace {
|
|
|
|
|
namespace := corev1.Namespace{
|
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
|
Name: chart.Namespace,
|
|
|
|
|
Annotations: map[string]string{
|
|
|
|
|
"dokku.com/managed": "true",
|
|
|
|
|
},
|
|
|
|
|
Labels: map[string]string{
|
|
|
|
|
"dokku.com/managed": "true",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
_, err := clientset.CreateNamespace(ctx, CreateNamespaceInput{
|
|
|
|
|
Name: namespace,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating namespace %s: %w", chart.Namespace, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-23 01:29:50 -05:00
|
|
|
contents, err := templates.ReadFile(fmt.Sprintf("templates/helm-config/%s.yaml", chart.ReleaseName))
|
2024-01-22 05:12:49 -05:00
|
|
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
|
|
|
|
return fmt.Errorf("Error reading values file %s: %w", chart.ReleaseName, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var values map[string]interface{}
|
|
|
|
|
if len(contents) > 0 {
|
|
|
|
|
err = yaml.Unmarshal(contents, &values)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error unmarshalling values file: %w", err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
helmAgent, err := NewHelmAgent(chart.Namespace, DeployLogPrinter)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating helm agent: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-03-06 21:27:39 -05:00
|
|
|
timeoutDuration, err := time.ParseDuration("300s")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error parsing deploy timeout duration: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-22 05:12:49 -05:00
|
|
|
err = helmAgent.InstallOrUpgradeChart(ctx, ChartInput{
|
|
|
|
|
ChartPath: chart.ChartPath,
|
|
|
|
|
Namespace: chart.Namespace,
|
|
|
|
|
ReleaseName: chart.ReleaseName,
|
|
|
|
|
RepoURL: chart.RepoURL,
|
|
|
|
|
Values: values,
|
|
|
|
|
Version: chart.Version,
|
2024-03-06 21:27:39 -05:00
|
|
|
Timeout: timeoutDuration,
|
|
|
|
|
Wait: true,
|
2024-01-22 05:12:49 -05:00
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error installing chart %s: %w", chart.ChartPath, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-30 08:01:42 -05:00
|
|
|
func installHelperCommands(ctx context.Context) error {
|
|
|
|
|
urls := map[string]string{
|
|
|
|
|
"kubectx": "https://github.com/ahmetb/kubectx/releases/latest/download/kubectx",
|
|
|
|
|
"kubens": "https://github.com/ahmetb/kubectx/releases/latest/download/kubens",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
client := resty.New()
|
|
|
|
|
for binaryName, url := range urls {
|
|
|
|
|
resp, err := client.R().
|
|
|
|
|
SetContext(ctx).
|
|
|
|
|
Get(url)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to download %s: %w", binaryName, err)
|
|
|
|
|
}
|
|
|
|
|
if resp == nil {
|
|
|
|
|
return fmt.Errorf("Missing response from %s download: %w", binaryName, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if resp.StatusCode() != 200 {
|
|
|
|
|
return fmt.Errorf("Invalid status code for %s: %d", binaryName, resp.StatusCode())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f, err := os.Create(filepath.Join("/usr/local/bin", binaryName))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to create %s: %w", binaryName, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := f.Close(); err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to close %s file: %w", binaryName, err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-06 01:39:44 -05:00
|
|
|
err = common.WriteStringToFile(common.WriteStringToFileInput{
|
|
|
|
|
Content: resp.String(),
|
2024-01-30 08:01:42 -05:00
|
|
|
Filename: f.Name(),
|
|
|
|
|
GroupName: "root",
|
|
|
|
|
Mode: os.FileMode(0755),
|
|
|
|
|
Username: "root",
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to write %s to file: %w", binaryName, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fi, err := os.Stat(f.Name())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to get %s file size: %w", binaryName, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if fi.Size() == 0 {
|
|
|
|
|
return fmt.Errorf("Invalid %s filesize", binaryName)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-30 10:20:12 -05:00
|
|
|
return installHelm(ctx)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func installHelm(ctx context.Context) error {
|
|
|
|
|
client := resty.New()
|
|
|
|
|
resp, err := client.R().
|
|
|
|
|
SetContext(ctx).
|
|
|
|
|
Get("https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to download helm installer: %w", err)
|
|
|
|
|
}
|
|
|
|
|
if resp == nil {
|
|
|
|
|
return fmt.Errorf("Missing response from helm installer download: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if resp.StatusCode() != 200 {
|
|
|
|
|
return fmt.Errorf("Invalid status code for helm installer script: %d", resp.StatusCode())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f, err := os.CreateTemp("", "sample")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to create temporary file for helm installer: %w", err)
|
|
|
|
|
}
|
|
|
|
|
defer os.Remove(f.Name())
|
|
|
|
|
|
|
|
|
|
if err := f.Close(); err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to close helm installer file: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = common.WriteStringToFile(common.WriteStringToFileInput{
|
|
|
|
|
Content: resp.String(),
|
|
|
|
|
Filename: f.Name(),
|
|
|
|
|
Mode: os.FileMode(0755),
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to write helm installer to file: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fi, err := os.Stat(f.Name())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to get helm installer file size: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if fi.Size() == 0 {
|
|
|
|
|
return fmt.Errorf("Invalid helm installer filesize")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
common.LogInfo2Quiet("Running helm installer")
|
|
|
|
|
installerCmd, err := common.CallExecCommand(common.ExecCommandInput{
|
|
|
|
|
Command: f.Name(),
|
|
|
|
|
StreamStdio: true,
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Unable to call helm installer command: %w", err)
|
|
|
|
|
}
|
|
|
|
|
if installerCmd.ExitCode != 0 {
|
|
|
|
|
return fmt.Errorf("Invalid exit code from helm installer command: %d", installerCmd.ExitCode)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-30 08:01:42 -05:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-20 15:37:52 -05:00
|
|
|
// isKubernetesAvailable returns an error if kubernetes api is not available
|
2024-02-14 03:50:15 -05:00
|
|
|
func isKubernetesAvailable() error {
|
|
|
|
|
client, err := NewKubernetesClient()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error creating kubernetes client: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := client.Ping(); err != nil {
|
|
|
|
|
return fmt.Errorf("Error pinging kubernetes: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-20 15:37:52 -05:00
|
|
|
// isK3sInstalled returns an error if k3s is not installed
|
2024-01-20 04:58:02 -05:00
|
|
|
func isK3sInstalled() error {
|
|
|
|
|
if !common.FileExists("/usr/local/bin/k3s") {
|
|
|
|
|
return fmt.Errorf("k3s binary is not available")
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-14 03:50:15 -05:00
|
|
|
if !common.FileExists(getKubeconfigPath()) {
|
2024-01-20 04:58:02 -05:00
|
|
|
return fmt.Errorf("k3s kubeconfig is not available")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-20 15:37:52 -05:00
|
|
|
// isK3sKubernetes returns true if the current kubernetes cluster is configured to be k3s
|
|
|
|
|
func isK3sKubernetes() bool {
|
|
|
|
|
return getKubeconfigPath() == KubeConfigPath
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
func isPodReady(ctx context.Context, clientset KubernetesClient, podName, namespace string) wait.ConditionWithContextFunc {
|
|
|
|
|
return func(ctx context.Context) (bool, error) {
|
2024-01-22 05:12:49 -05:00
|
|
|
fmt.Printf(".")
|
2024-01-18 15:11:51 -05:00
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
pod, err := clientset.GetPod(ctx, GetPodInput{
|
|
|
|
|
Name: podName,
|
|
|
|
|
Namespace: namespace,
|
|
|
|
|
})
|
2024-01-18 15:11:51 -05:00
|
|
|
if err != nil {
|
|
|
|
|
return false, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch pod.Status.Phase {
|
|
|
|
|
case v1.PodRunning:
|
|
|
|
|
return true, nil
|
|
|
|
|
case v1.PodFailed, v1.PodSucceeded:
|
|
|
|
|
return false, conditions.ErrPodCompleted
|
|
|
|
|
}
|
|
|
|
|
return false, nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-21 23:48:38 -05:00
|
|
|
// kubernetesNodeToNode converts a kubernetes node to a Node
|
|
|
|
|
func kubernetesNodeToNode(node v1.Node) Node {
|
|
|
|
|
roles := []string{}
|
|
|
|
|
if len(node.Labels["kubernetes.io/role"]) > 0 {
|
|
|
|
|
roles = append(roles, node.Labels["kubernetes.io/role"])
|
|
|
|
|
} else {
|
|
|
|
|
for k, v := range node.Labels {
|
|
|
|
|
if strings.HasPrefix(k, "node-role.kubernetes.io/") && v == "true" {
|
|
|
|
|
roles = append(roles, strings.TrimPrefix(k, "node-role.kubernetes.io/"))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sort.Strings(roles)
|
|
|
|
|
|
|
|
|
|
ready := false
|
|
|
|
|
for _, condition := range node.Status.Conditions {
|
|
|
|
|
if condition.Type == "Ready" {
|
|
|
|
|
ready = condition.Status == "True"
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
remoteHost := ""
|
|
|
|
|
if val, ok := node.Annotations["dokku.com/remote-host"]; ok {
|
|
|
|
|
remoteHost = val
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return Node{
|
|
|
|
|
Name: node.Name,
|
|
|
|
|
Roles: roles,
|
|
|
|
|
Ready: ready,
|
|
|
|
|
RemoteHost: remoteHost,
|
|
|
|
|
Version: node.Status.NodeInfo.KubeletVersion,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-21 18:13:47 -05:00
|
|
|
// parseMemoryQuantity parses a string into a valid memory quantity
|
|
|
|
|
func parseMemoryQuantity(input string) (string, error) {
|
|
|
|
|
if _, err := strconv.ParseInt(input, 10, 64); err == nil {
|
|
|
|
|
input = fmt.Sprintf("%sMi", input)
|
|
|
|
|
}
|
|
|
|
|
quantity, err := resource.ParseQuantity(input)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return quantity.String(), nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-30 08:01:42 -05:00
|
|
|
func uninstallHelperCommands(ctx context.Context) error {
|
|
|
|
|
errs, _ := errgroup.WithContext(ctx)
|
|
|
|
|
errs.Go(func() error {
|
|
|
|
|
return os.RemoveAll("/usr/local/bin/kubectx")
|
|
|
|
|
})
|
|
|
|
|
errs.Go(func() error {
|
|
|
|
|
return os.RemoveAll("/usr/local/bin/kubens")
|
|
|
|
|
})
|
|
|
|
|
return errs.Wait()
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
func waitForPodBySelectorRunning(ctx context.Context, input WaitForPodBySelectorRunningInput) error {
|
2024-01-18 16:14:33 -05:00
|
|
|
pods, err := waitForPodToExist(ctx, WaitForPodToExistInput{
|
2024-01-19 19:26:06 -05:00
|
|
|
Clientset: input.Clientset,
|
|
|
|
|
LabelSelector: input.LabelSelector,
|
|
|
|
|
Namespace: input.Namespace,
|
|
|
|
|
PodName: input.PodName,
|
|
|
|
|
RetryCount: 3,
|
2024-01-18 15:11:51 -05:00
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("Error waiting for pod to exist: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
if len(pods) == 0 {
|
2024-01-19 19:26:06 -05:00
|
|
|
return fmt.Errorf("no pods in %s with selector %s", input.Namespace, input.LabelSelector)
|
2024-01-18 15:11:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
timeout := time.Duration(input.Timeout) * time.Second
|
2024-01-18 16:14:33 -05:00
|
|
|
for _, pod := range pods {
|
2024-01-19 19:26:06 -05:00
|
|
|
if input.PodName != "" && pod.Name != input.PodName {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 15:11:51 -05:00
|
|
|
if err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, false, input.Waiter(ctx, input.Clientset, pod.Name, pod.Namespace)); err != nil {
|
|
|
|
|
print("\n")
|
2024-01-18 16:14:33 -05:00
|
|
|
return fmt.Errorf("Error waiting for pod to be ready: %w", err)
|
2024-01-18 15:11:51 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
print("\n")
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-20 04:58:02 -05:00
|
|
|
func waitForNodeToExist(ctx context.Context, input WaitForNodeToExistInput) ([]v1.Node, error) {
|
|
|
|
|
var matchingNodes []v1.Node
|
|
|
|
|
var err error
|
|
|
|
|
for i := 0; i < input.RetryCount; i++ {
|
|
|
|
|
nodes, err := input.Clientset.ListNodes(ctx, ListNodesInput{})
|
|
|
|
|
if err != nil {
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input.NodeName == "" {
|
|
|
|
|
matchingNodes = nodes
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, node := range nodes {
|
|
|
|
|
if node.Name == input.NodeName {
|
|
|
|
|
matchingNodes = append(matchingNodes, node)
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if len(matchingNodes) > 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
}
|
|
|
|
|
if err != nil {
|
|
|
|
|
return matchingNodes, fmt.Errorf("Error listing nodes: %w", err)
|
|
|
|
|
}
|
|
|
|
|
return matchingNodes, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-18 16:14:33 -05:00
|
|
|
func waitForPodToExist(ctx context.Context, input WaitForPodToExistInput) ([]v1.Pod, error) {
|
|
|
|
|
var pods []v1.Pod
|
2024-01-18 15:11:51 -05:00
|
|
|
var err error
|
|
|
|
|
for i := 0; i < input.RetryCount; i++ {
|
2024-01-18 16:14:33 -05:00
|
|
|
pods, err = input.Clientset.ListPods(ctx, ListPodsInput{
|
|
|
|
|
Namespace: input.Namespace,
|
2024-01-19 19:26:06 -05:00
|
|
|
LabelSelector: input.LabelSelector,
|
2024-01-18 15:11:51 -05:00
|
|
|
})
|
2024-01-19 19:26:06 -05:00
|
|
|
if err != nil {
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input.PodName == "" {
|
2024-01-18 15:11:51 -05:00
|
|
|
break
|
|
|
|
|
}
|
2024-01-19 19:26:06 -05:00
|
|
|
|
|
|
|
|
for _, pod := range pods {
|
|
|
|
|
if pod.Name == input.PodName {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-01-20 04:58:02 -05:00
|
|
|
time.Sleep(1 * time.Second)
|
2024-01-18 15:11:51 -05:00
|
|
|
}
|
|
|
|
|
if err != nil {
|
2024-01-18 16:14:33 -05:00
|
|
|
return pods, fmt.Errorf("Error listing pods: %w", err)
|
2024-01-18 15:11:51 -05:00
|
|
|
}
|
2024-01-18 16:14:33 -05:00
|
|
|
return pods, nil
|
2024-01-18 15:11:51 -05:00
|
|
|
}
|