mirror of
https://github.com/dokku/dokku.git
synced 2025-12-29 00:25:08 +01:00
Merge pull request #8137 from dokku/k3s-server-profiles
Add the ability to create k3s node profiles
This commit is contained in:
@@ -12,7 +12,10 @@ scheduler-k3s:cluster-list # Lists all nodes in a Dokku
|
||||
scheduler-k3s:cluster-remove [node-id] # Removes client node to a Dokku-managed cluster
|
||||
scheduler-k3s:ensure-charts # Ensures the k3s charts are installed
|
||||
scheduler-k3s:initialize # Initializes a cluster
|
||||
scheduler-k3s:labels:set <app|--global> <property> (<value>) [--process-type PROCESS_TYPE] <--resource-type RESOURCE_TYPE>, Set or clear a label for a given app/process-type/resource-type combination
|
||||
scheduler-k3s:labels:set <app|--global> <property> (<value>) [--process-type PROCESS_TYPE] <--resource-type RESOURCE_TYPE> # Set or clear a label for a given app/process-type/resource-type combination
|
||||
scheduler-k3s:profiles:add <profile> [--role ROLE] [--insecure-allow-unknown-hosts] [--taint-scheduling] [--kubelet-args KUBELET_ARGS] Adds a node profile to the k3s cluster
|
||||
scheduler-k3s:profiles:list [--format json|stdout] # Lists all node profiles in the k3s cluster
|
||||
scheduler-k3s:profiles:remove <profile> # Removes a node profile from the k3s cluster
|
||||
scheduler-k3s:report [<app>] [<flag>] # Displays a scheduler-k3s report for one or more apps
|
||||
scheduler-k3s:set [<app>|--global] <key> (<value>) # Set or clear a scheduler-k3s property for an app or the scheduler
|
||||
scheduler-k3s:show-kubeconfig # Displays the kubeconfig for remote usage
|
||||
@@ -83,6 +86,22 @@ Nodes that run app workloads can be added via the `scheduler-k3s:cluster-add` co
|
||||
dokku scheduler-k3s:cluster-add ssh://root@worker-1.example.com
|
||||
```
|
||||
|
||||
Per-node kubelet flags can be supplied by passing `--kubelet-args` with a comma-separated `key=value` list. This is useful for tuning scheduler capacity or enforcing cluster-wide defaults at the node level.
|
||||
|
||||
```shell
|
||||
dokku scheduler-k3s:cluster-add \
|
||||
--kubelet-args allowed-unsafe-sysctls=net.ipv6.conf.all.disable_ipv6 \
|
||||
ssh://root@worker-1.example.com
|
||||
```
|
||||
|
||||
Multiple kubelet arguments can be specified in the same call by separating them with commas. The following example enables IPv4 forwarding while also increasing the pod density on the worker.
|
||||
|
||||
```shell
|
||||
dokku scheduler-k3s:cluster-add \
|
||||
--kubelet-args allowed-unsafe-sysctls=net.ipv6.conf.all.disable_ipv6,max-pods=150 \
|
||||
ssh://root@worker-2.example.com
|
||||
```
|
||||
|
||||
If the server isn't in the `known_hosts` file, the connection will fail. This can be bypassed by setting the `--insecure-allow-unknown-hosts` flag:
|
||||
|
||||
```shell
|
||||
@@ -134,6 +153,48 @@ When attaching an worker or server node, the K3s plugin will look at the IP asso
|
||||
dokku scheduler-k3s:set --global network-interface eth1
|
||||
```
|
||||
|
||||
### Node Profiles
|
||||
|
||||
Node profiles capture repeatable `scheduler-k3s:cluster-add` options so you can join multiple nodes with identical settings. A profile name can be specified for the `scheduler-k3s:cluster-add` command via the `--profile <name>` flag. Any flags passed directly to `scheduler-k3s:cluster-add` override the stored values for that run.
|
||||
|
||||
#### Listing profiles
|
||||
|
||||
Display stored profiles to understand which roles and behaviors will be used.
|
||||
|
||||
```shell
|
||||
dokku scheduler-k3s:profiles:list
|
||||
```
|
||||
|
||||
```
|
||||
name role
|
||||
awesome-profile worker
|
||||
```
|
||||
|
||||
This command also takes an optional `--format` flag to specify a format for the output. Options include `json` and `stdout`
|
||||
|
||||
#### Adding profiles
|
||||
|
||||
Create or update a profile that defines how new nodes should be prepared before joining the cluster.
|
||||
|
||||
```shell
|
||||
dokku scheduler-k3s:profiles:add edge-workers \
|
||||
--role worker \
|
||||
--insecure-allow-unknown-hosts \
|
||||
--kubelet-args protect-kernel-defaults=true,eviction-hard=memory.available<200Mi
|
||||
```
|
||||
|
||||
Profile names must be alphanumeric, may include internal dashes, cannot start/end with a dash, and must be ≤32 characters. Other than the `--server-ip` flag, all flags used for `scheduler-k3s:cluster-add` are valid for the `scheduler-k3s:profiles:add` command.
|
||||
|
||||
#### scheduler-k3s:profiles:remove
|
||||
|
||||
Delete a profile once it’s no longer required.
|
||||
|
||||
```shell
|
||||
dokku scheduler-k3s:profiles:remove edge-workers
|
||||
```
|
||||
|
||||
Removal only deletes the stored definition; nodes that already joined the cluster keep their existing configuration.
|
||||
|
||||
### Changing deployment settings
|
||||
|
||||
The k3s plugin provides a number of settings that can be used to managed deployments on a per-app basis. The following table outlines ones not covered elsewhere:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
SUBCOMMANDS = subcommands/annotations:set subcommands/autoscaling-auth:set subcommands/autoscaling-auth:report subcommands/cluster-add subcommands/cluster-list subcommands/cluster-remove subcommands/ensure-charts subcommands/initialize subcommands/labels:set subcommands/report subcommands/set subcommands/show-kubeconfig subcommands/uninstall
|
||||
SUBCOMMANDS = subcommands/annotations:set subcommands/autoscaling-auth:set subcommands/autoscaling-auth:report subcommands/cluster-add subcommands/cluster-list subcommands/cluster-remove subcommands/ensure-charts subcommands/initialize subcommands/labels:set subcommands/profiles:add subcommands/profiles:list subcommands/profiles:remove subcommands/report subcommands/set subcommands/show-kubeconfig subcommands/uninstall
|
||||
TRIGGERS = triggers/core-post-deploy triggers/core-post-extract triggers/install triggers/post-app-clone-setup triggers/post-app-rename-setup triggers/post-create triggers/post-delete triggers/report triggers/scheduler-app-status triggers/scheduler-deploy triggers/scheduler-enter triggers/scheduler-is-deployed triggers/scheduler-logs triggers/scheduler-proxy-config triggers/scheduler-proxy-logs triggers/scheduler-post-delete triggers/scheduler-run triggers/scheduler-run-list triggers/scheduler-stop triggers/scheduler-cron-write
|
||||
BUILD = commands subcommands triggers
|
||||
PLUGIN_NAME = scheduler-k3s
|
||||
|
||||
@@ -164,10 +164,26 @@ var HelmRepositories = []HelmRepository{
|
||||
},
|
||||
}
|
||||
|
||||
// NodeProfile is a profile for a node in the k3s cluster
|
||||
type NodeProfile struct {
|
||||
// Name is the name of the node profile
|
||||
Name string `json:"name"`
|
||||
// Role is the role of the node
|
||||
Role string `json:"role"`
|
||||
// AllowUknownHosts is whether to allow unknown hosts
|
||||
AllowUknownHosts bool `json:"allow_unknown_hosts,omitempty"`
|
||||
// TaintScheduling is whether to taint the node for scheduling
|
||||
TaintScheduling bool `json:"taint_scheduling,omitempty"`
|
||||
// KubeletArgs is a list of kubelet arguments
|
||||
KubeletArgs []string `json:"kubelet_args,omitempty"`
|
||||
}
|
||||
|
||||
// ServerLabels are the labels for a server node
|
||||
var ServerLabels = map[string]string{
|
||||
"svccontroller.k3s.cattle.io/enablelb": "true",
|
||||
}
|
||||
|
||||
// WorkerLabels are the labels for a worker node
|
||||
var WorkerLabels = map[string]string{
|
||||
"node-role.kubernetes.io/worker": "worker",
|
||||
}
|
||||
|
||||
@@ -19,12 +19,15 @@ Additional commands:`
|
||||
helpContent = `
|
||||
scheduler-k3s:autoscaling-auth:set <app|--global> <trigger> [<--metadata key=value>...], Set or clear a scheduler-k3s autoscaling keda trigger authentication resource for an app
|
||||
scheduler-k3s:annotations:set <app|--global> <property> (<value>) [--process-type PROCESS_TYPE] <--resource-type RESOURCE_TYPE>, Set or clear an annotation for a given app/process-type/resource-type combination
|
||||
scheduler-k3s:cluster-add [--insecure-allow-unknown-hosts] [--server-ip SERVER_IP] [--taint-scheduling] <ssh://user@host:port>, Adds a server node to a Dokku-managed cluster
|
||||
scheduler-k3s:cluster-add [--profile PROFILE] [--role ROLE] [--insecure-allow-unknown-hosts] [--server-ip SERVER_IP] [--taint-scheduling] [--kubelet-args KUBELET_ARGS] <ssh://user@host:port>, Adds a server node to a Dokku-managed cluster
|
||||
scheduler-k3s:cluster-list [--format json|stdout], Lists all nodes in a Dokku-managed cluster
|
||||
scheduler-k3s:cluster-remove [node-id], Removes client node to a Dokku-managed cluster
|
||||
scheduler-k3s:ensure-charts, Ensures the k3s charts are installed
|
||||
scheduler-k3s:initialize [--server-ip SERVER_IP] [--taint-scheduling], Initializes a cluster
|
||||
scheduler-k3s:labels:set <app|--global> <property> (<value>) [--process-type PROCESS_TYPE] <--resource-type RESOURCE_TYPE>, Set or clear a label for a given app/process-type/resource-type combination
|
||||
scheduler-k3s:profiles:add <profile> [--role ROLE] [--insecure-allow-unknown-hosts] [--taint-scheduling] [--kubelet-args KUBELET_ARGS], Adds a node profile to the k3s cluster
|
||||
scheduler-k3s:profiles:list [--format json|stdout], Lists all node profiles in the k3s cluster
|
||||
scheduler-k3s:profiles:remove <profile>, Removes a node profile from the k3s cluster
|
||||
scheduler-k3s:report [<app>] [<flag>], Displays a scheduler-k3s report for one or more apps
|
||||
scheduler-k3s:set <app> <property> (<value>), Set or clear a scheduler-k3s property for an app
|
||||
scheduler-k3s:show-kubeconfig, Displays the kubeconfig for remote usage
|
||||
|
||||
@@ -59,11 +59,12 @@ func main() {
|
||||
allowUknownHosts := args.Bool("insecure-allow-unknown-hosts", false, "insecure-allow-unknown-hosts: allow unknown hosts")
|
||||
taintScheduling := args.Bool("taint-scheduling", false, "taint-scheduling: add a taint against scheduling app workloads")
|
||||
serverIP := args.String("server-ip", "", "server-ip: IP address of the dokku server node")
|
||||
kubeletArgs := args.StringToString("kubelet-args", map[string]string{}, "kubelet-args: a key=value map of kubelet arguments")
|
||||
kubeletArgs := args.StringSlice("kubelet-args", []string{}, "kubelet-args: repeatable key=value kubelet arguments (e.g., --kubelet-args key=value)")
|
||||
role := args.String("role", "worker", "role: [ server | worker ]")
|
||||
profileName := args.String("profile", "", "profile: name of the node profile to use")
|
||||
args.Parse(os.Args[2:])
|
||||
remoteHost := args.Arg(0)
|
||||
err = scheduler_k3s.CommandClusterAdd(*role, remoteHost, *serverIP, *allowUknownHosts, *taintScheduling, *kubeletArgs)
|
||||
err = scheduler_k3s.CommandClusterAdd(*profileName, *role, remoteHost, *serverIP, *allowUknownHosts, *taintScheduling, *kubeletArgs)
|
||||
case "cluster-list":
|
||||
args := flag.NewFlagSet("scheduler-k3s:cluster-list", flag.ExitOnError)
|
||||
format := args.String("format", "stdout", "format: [ stdout | json ]")
|
||||
@@ -103,6 +104,25 @@ func main() {
|
||||
}
|
||||
|
||||
err = scheduler_k3s.CommandLabelsSet(appName, *processType, *resourceType, property, value)
|
||||
case "profiles:add":
|
||||
args := flag.NewFlagSet("scheduler-k3s:profiles:add", flag.ExitOnError)
|
||||
role := args.String("role", "worker", "role: [ server | worker ]")
|
||||
allowUknownHosts := args.Bool("insecure-allow-unknown-hosts", false, "insecure-allow-unknown-hosts: allow unknown hosts")
|
||||
taintScheduling := args.Bool("taint-scheduling", false, "taint-scheduling: add a taint against scheduling app workloads")
|
||||
kubeletArgs := args.StringSlice("kubelet-args", []string{}, "kubelet-args: repeatable key=value kubelet arguments (e.g., --kubelet-args key=value)")
|
||||
args.Parse(os.Args[2:])
|
||||
profileName := args.Arg(0)
|
||||
err = scheduler_k3s.CommandProfilesAdd(profileName, *role, *allowUknownHosts, *taintScheduling, *kubeletArgs)
|
||||
case "profiles:list":
|
||||
args := flag.NewFlagSet("scheduler-k3s:profiles:list", flag.ExitOnError)
|
||||
format := args.String("format", "stdout", "format: [ stdout | json ]")
|
||||
args.Parse(os.Args[2:])
|
||||
err = scheduler_k3s.CommandProfilesList(*format)
|
||||
case "profiles:remove":
|
||||
args := flag.NewFlagSet("scheduler-k3s:profiles:remove", flag.ExitOnError)
|
||||
args.Parse(os.Args[2:])
|
||||
profileName := args.Arg(0)
|
||||
err = scheduler_k3s.CommandProfilesRemove(profileName)
|
||||
case "report":
|
||||
args := flag.NewFlagSet("scheduler-k3s:report", flag.ExitOnError)
|
||||
format := args.String("format", "stdout", "format: [ stdout | json ]")
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -400,7 +401,7 @@ func CommandInitialize(ingressClass string, serverIP string, taintScheduling boo
|
||||
}
|
||||
|
||||
// CommandClusterAdd adds a server to the k3s cluster
|
||||
func CommandClusterAdd(role string, remoteHost string, serverIP string, allowUknownHosts bool, taintScheduling bool, kubeletArgs map[string]string) error {
|
||||
func CommandClusterAdd(profileName string, role string, remoteHost string, serverIP string, allowUknownHosts bool, taintScheduling bool, kubeletArgs []string) error {
|
||||
if err := isK3sInstalled(); err != nil {
|
||||
return fmt.Errorf("k3s not installed, cannot add node to cluster: %w", err)
|
||||
}
|
||||
@@ -414,8 +415,38 @@ func CommandClusterAdd(role string, remoteHost string, serverIP string, allowUkn
|
||||
return fmt.Errorf("kubernetes api not available, cannot add node to cluster: %w", err)
|
||||
}
|
||||
|
||||
if role != "server" && role != "worker" {
|
||||
return fmt.Errorf("Invalid server-type: %s", role)
|
||||
incomingProfile := NodeProfile{}
|
||||
|
||||
if profileName != "" {
|
||||
properties := common.PropertyGetDefault("scheduler-k3s", "--global", fmt.Sprintf("node-profile-%s.json", profileName), "")
|
||||
if properties == "" {
|
||||
return fmt.Errorf("Node profile %s not found", profileName)
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(properties), &incomingProfile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to unmarshal node profile: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if role != "" {
|
||||
incomingProfile.Role = role
|
||||
}
|
||||
|
||||
if allowUknownHosts {
|
||||
incomingProfile.AllowUknownHosts = allowUknownHosts
|
||||
}
|
||||
|
||||
if taintScheduling {
|
||||
incomingProfile.TaintScheduling = taintScheduling
|
||||
}
|
||||
|
||||
if len(kubeletArgs) > 0 {
|
||||
incomingProfile.KubeletArgs = kubeletArgs
|
||||
}
|
||||
|
||||
if incomingProfile.Role != "server" && incomingProfile.Role != "worker" {
|
||||
return fmt.Errorf("Invalid role: %s", incomingProfile.Role)
|
||||
}
|
||||
|
||||
token := getGlobalGlobalToken()
|
||||
@@ -423,7 +454,7 @@ func CommandClusterAdd(role string, remoteHost string, serverIP string, allowUkn
|
||||
return fmt.Errorf("Missing k3s token")
|
||||
}
|
||||
|
||||
if taintScheduling && role == "worker" {
|
||||
if incomingProfile.TaintScheduling && incomingProfile.Role == "worker" {
|
||||
return fmt.Errorf("Taint scheduling can only be used on the server role")
|
||||
}
|
||||
|
||||
@@ -472,14 +503,14 @@ func CommandClusterAdd(role string, remoteHost string, serverIP string, allowUkn
|
||||
}
|
||||
common.LogDebug(fmt.Sprintf("k3s version: %s", k3sVersion))
|
||||
|
||||
common.LogInfo1(fmt.Sprintf("Joining %s to k3s cluster as %s", remoteHost, role))
|
||||
common.LogInfo1(fmt.Sprintf("Joining %s to k3s cluster as %s", remoteHost, incomingProfile.Role))
|
||||
common.LogInfo2Quiet("Updating apt")
|
||||
aptUpdateCmd, err := common.CallSshCommand(common.SshCommandInput{
|
||||
Command: "apt-get",
|
||||
Args: []string{
|
||||
"update",
|
||||
},
|
||||
AllowUknownHosts: allowUknownHosts,
|
||||
AllowUknownHosts: incomingProfile.AllowUknownHosts,
|
||||
RemoteHost: remoteHost,
|
||||
StreamStdio: true,
|
||||
Sudo: true,
|
||||
@@ -503,7 +534,7 @@ func CommandClusterAdd(role string, remoteHost string, serverIP string, allowUkn
|
||||
"nfs-common",
|
||||
"wireguard",
|
||||
},
|
||||
AllowUknownHosts: allowUknownHosts,
|
||||
AllowUknownHosts: incomingProfile.AllowUknownHosts,
|
||||
RemoteHost: remoteHost,
|
||||
StreamStdio: true,
|
||||
Sudo: true,
|
||||
@@ -522,7 +553,7 @@ func CommandClusterAdd(role string, remoteHost string, serverIP string, allowUkn
|
||||
"-o /tmp/k3s-installer.sh",
|
||||
"https://get.k3s.io",
|
||||
},
|
||||
AllowUknownHosts: allowUknownHosts,
|
||||
AllowUknownHosts: incomingProfile.AllowUknownHosts,
|
||||
RemoteHost: remoteHost,
|
||||
StreamStdio: true,
|
||||
})
|
||||
@@ -540,7 +571,7 @@ func CommandClusterAdd(role string, remoteHost string, serverIP string, allowUkn
|
||||
"0755",
|
||||
"/tmp/k3s-installer.sh",
|
||||
},
|
||||
AllowUknownHosts: allowUknownHosts,
|
||||
AllowUknownHosts: incomingProfile.AllowUknownHosts,
|
||||
RemoteHost: remoteHost,
|
||||
StreamStdio: true,
|
||||
})
|
||||
@@ -577,7 +608,7 @@ export INSTALL_K3S_VERSION=%s
|
||||
tmpFile.Close()
|
||||
|
||||
sftpCopyCmd, err := common.CallSftpCopy(common.SftpCopyInput{
|
||||
AllowUknownHosts: allowUknownHosts,
|
||||
AllowUknownHosts: incomingProfile.AllowUknownHosts,
|
||||
DestinationPath: "/tmp/k3s-installer-executor.sh",
|
||||
RemoteHost: remoteHost,
|
||||
SourcePath: tmpFile.Name(),
|
||||
@@ -595,7 +626,7 @@ export INSTALL_K3S_VERSION=%s
|
||||
"0755",
|
||||
"/tmp/k3s-installer-executor.sh",
|
||||
},
|
||||
AllowUknownHosts: allowUknownHosts,
|
||||
AllowUknownHosts: incomingProfile.AllowUknownHosts,
|
||||
RemoteHost: remoteHost,
|
||||
StreamStdio: true,
|
||||
})
|
||||
@@ -634,7 +665,7 @@ export INSTALL_K3S_VERSION=%s
|
||||
token,
|
||||
}
|
||||
|
||||
if role == "server" {
|
||||
if incomingProfile.Role == "server" {
|
||||
args = append([]string{"server"}, args...)
|
||||
// expose etcd metrics
|
||||
args = append(args, "--etcd-expose-metrics")
|
||||
@@ -661,19 +692,19 @@ export INSTALL_K3S_VERSION=%s
|
||||
args = append(args, "--kube-proxy-arg", "metrics-bind-address=0.0.0.0")
|
||||
}
|
||||
|
||||
if taintScheduling {
|
||||
if incomingProfile.TaintScheduling {
|
||||
args = append(args, "--node-taint", "CriticalAddonsOnly=true:NoSchedule")
|
||||
}
|
||||
|
||||
for key, value := range kubeletArgs {
|
||||
args = append(args, "--kubelet-arg", fmt.Sprintf("%s=%s", key, value))
|
||||
for _, kubeletArg := range incomingProfile.KubeletArgs {
|
||||
args = append(args, "--kubelet-arg", kubeletArg)
|
||||
}
|
||||
|
||||
common.LogInfo2Quiet(fmt.Sprintf("Adding %s k3s cluster", nodeName))
|
||||
joinCmd, err := common.CallSshCommand(common.SshCommandInput{
|
||||
Command: "/tmp/k3s-installer-executor.sh",
|
||||
Args: args,
|
||||
AllowUknownHosts: allowUknownHosts,
|
||||
AllowUknownHosts: incomingProfile.AllowUknownHosts,
|
||||
RemoteHost: remoteHost,
|
||||
StreamStdio: true,
|
||||
Sudo: true,
|
||||
@@ -699,7 +730,7 @@ export INSTALL_K3S_VERSION=%s
|
||||
}
|
||||
|
||||
labels := ServerLabels
|
||||
if role == "worker" {
|
||||
if incomingProfile.Role == "worker" {
|
||||
labels = WorkerLabels
|
||||
}
|
||||
|
||||
@@ -995,6 +1026,119 @@ func CommandLabelsSet(appName string, processType string, resourceType string, k
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommandProfilesAdd adds a node profile to the k3s cluster
|
||||
func CommandProfilesAdd(profileName string, role string, allowUknownHosts bool, taintScheduling bool, kubeletArgs []string) error {
|
||||
if role != "server" && role != "worker" {
|
||||
return fmt.Errorf("Invalid role: %s", role)
|
||||
}
|
||||
|
||||
if profileName == "" {
|
||||
return fmt.Errorf("Missing profile name")
|
||||
}
|
||||
|
||||
// profile names must only contain alphanumeric characters and dashes and cannot start with a dash
|
||||
if !regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?$`).MatchString(profileName) {
|
||||
return fmt.Errorf("Invalid profile name, must only contain alphanumeric characters and dashes and cannot start with a dash: %s", profileName)
|
||||
}
|
||||
|
||||
// ensure profile names are no longer than 32 characters
|
||||
if len(profileName) > 32 {
|
||||
return fmt.Errorf("Profile name is too long, must be less than 32 characters: %s", profileName)
|
||||
}
|
||||
|
||||
profile := NodeProfile{
|
||||
Name: profileName,
|
||||
Role: role,
|
||||
AllowUknownHosts: allowUknownHosts,
|
||||
TaintScheduling: taintScheduling,
|
||||
KubeletArgs: kubeletArgs,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to marshal node profile to json: %w", err)
|
||||
}
|
||||
|
||||
if err := common.PropertyWrite("scheduler-k3s", "--global", fmt.Sprintf("node-profile-%s.json", profileName), string(data)); err != nil {
|
||||
return fmt.Errorf("Unable to write node profile: %w", err)
|
||||
}
|
||||
|
||||
common.LogInfo1(fmt.Sprintf("Node profile %s added", profileName))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommandProfilesList lists the node profiles in the k3s cluster
|
||||
func CommandProfilesList(format string) error {
|
||||
if format != "stdout" && format != "json" {
|
||||
return fmt.Errorf("Invalid format: %s", format)
|
||||
}
|
||||
|
||||
properties, err := common.PropertyGetAllByPrefix("scheduler-k3s", "--global", "node-profile-")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to get node profiles: %w", err)
|
||||
}
|
||||
|
||||
output := []NodeProfile{}
|
||||
for property, data := range properties {
|
||||
if !strings.HasSuffix(property, ".json") {
|
||||
continue
|
||||
}
|
||||
|
||||
var profile NodeProfile
|
||||
err := json.Unmarshal([]byte(data), &profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to unmarshal node profile: %w", err)
|
||||
}
|
||||
|
||||
output = append(output, profile)
|
||||
}
|
||||
|
||||
if format == "stdout" {
|
||||
lines := []string{"name|role"}
|
||||
for _, profile := range output {
|
||||
lines = append(lines, fmt.Sprintf("%s|%s", profile.Name, profile.Role))
|
||||
}
|
||||
|
||||
columnized := columnize.SimpleFormat(lines)
|
||||
fmt.Println(columnized)
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := json.Marshal(output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to marshal json: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println(string(b))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommandProfilesRemove removes a node profile from the k3s cluster
|
||||
func CommandProfilesRemove(profileName string) error {
|
||||
if profileName == "" {
|
||||
return fmt.Errorf("Missing profile name")
|
||||
}
|
||||
|
||||
// profile names must only contain alphanumeric characters and dashes and cannot start with a dash
|
||||
if !regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?$`).MatchString(profileName) {
|
||||
return fmt.Errorf("Invalid profile name, must only contain alphanumeric characters and dashes and cannot start with a dash: %s", profileName)
|
||||
}
|
||||
|
||||
// ensure profile names are no longer than 32 characters
|
||||
if len(profileName) > 32 {
|
||||
return fmt.Errorf("Profile name is too long, must be less than 32 characters: %s", profileName)
|
||||
}
|
||||
|
||||
if err := common.PropertyDelete("scheduler-k3s", "--global", fmt.Sprintf("node-profile-%s.json", profileName)); err != nil {
|
||||
return fmt.Errorf("Unable to delete node profile: %w", err)
|
||||
}
|
||||
|
||||
common.LogInfo1(fmt.Sprintf("Node profile %s removed", profileName))
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommandReport displays a scheduler-k3s report for one or more apps
|
||||
func CommandReport(appName string, format string, infoFlag string) error {
|
||||
if len(appName) == 0 {
|
||||
|
||||
Reference in New Issue
Block a user