mirror of
https://github.com/dokku/dokku.git
synced 2025-12-16 03:57:43 +01:00
feat: implement :cluster-list and :cluster-remove
These - and the existing :cluster-add command - do not require root.
This commit is contained in:
4
dokku
4
dokku
@@ -89,14 +89,14 @@ fi
|
||||
|
||||
if [[ $(id -un) != "dokku" ]]; then
|
||||
unset TMP TMPDIR TEMP TEMPDIR
|
||||
if [[ ! $1 =~ plugin:* ]] && [[ $1 != "ssh-keys:add" ]] && [[ $1 != "ssh-keys:remove" ]] && [[ $1 != "scheduler-k3s:initialize" ]] && [[ $1 != "scheduler-k3s:uninstall" ]] && [[ $1 != "scheduler-k3s:cluster-add" ]]; then
|
||||
if [[ ! $1 =~ plugin:* ]] && [[ $1 != "ssh-keys:add" ]] && [[ $1 != "ssh-keys:remove" ]] && [[ $1 != "scheduler-k3s:initialize" ]] && [[ $1 != "scheduler-k3s:uninstall" ]]; then
|
||||
export SSH_USER=$(id -un)
|
||||
sudo -u dokku -E -H "$0" "$@"
|
||||
exit $?
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $1 =~ ^plugin:.* && $1 != "plugin:help" && $1 != "plugin:list" ]] || [[ $1 == "ssh-keys:add" ]] || [[ $1 == "ssh-keys:remove" ]] || [[ $1 == "scheduler-k3s:initialize" ]] || [[ $1 == "scheduler-k3s:uninstall" ]] || [[ $1 == "scheduler-k3s:cluster-add" ]]; then
|
||||
if [[ $1 =~ ^plugin:.* && $1 != "plugin:help" && $1 != "plugin:list" ]] || [[ $1 == "ssh-keys:add" ]] || [[ $1 == "ssh-keys:remove" ]] || [[ $1 == "scheduler-k3s:initialize" ]] || [[ $1 == "scheduler-k3s:uninstall" ]]; then
|
||||
if [[ $(id -un) != "root" ]]; then
|
||||
dokku_log_fail "This command must be run as root"
|
||||
else
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
SUBCOMMANDS = subcommands/initialize subcommands/cluster-add subcommands/report subcommands/set subcommands/show-kubeconfig subcommands/uninstall
|
||||
SUBCOMMANDS = subcommands/initialize subcommands/cluster-add subcommands/cluster-list subcommands/cluster-remove subcommands/report subcommands/set subcommands/show-kubeconfig subcommands/uninstall
|
||||
TRIGGERS = triggers/install triggers/post-app-clone-setup triggers/post-app-rename-setup triggers/post-delete triggers/post-registry-login triggers/report triggers/scheduler-deploy triggers/scheduler-enter triggers/scheduler-logs triggers/scheduler-post-delete triggers/scheduler-run triggers/scheduler-run-list triggers/scheduler-stop
|
||||
BUILD = commands subcommands triggers
|
||||
PLUGIN_NAME = scheduler-k3s
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -20,13 +22,48 @@ import (
|
||||
"mvdan.cc/sh/v3/shell"
|
||||
)
|
||||
|
||||
// EnterPodInput contains all the information needed to enter a pod
|
||||
type EnterPodInput struct {
|
||||
Clientset KubernetesClient
|
||||
Command []string
|
||||
Entrypoint string
|
||||
// Clientset is the kubernetes clientset
|
||||
Clientset KubernetesClient
|
||||
|
||||
// Command is the command to run
|
||||
Command []string
|
||||
|
||||
// Entrypoint is the entrypoint to run
|
||||
Entrypoint string
|
||||
|
||||
// SelectedContainerName is the container name to enter
|
||||
SelectedContainerName string
|
||||
SelectedPod v1.Pod
|
||||
WaitTimeout int
|
||||
|
||||
// SelectedPod is the pod to enter
|
||||
SelectedPod v1.Pod
|
||||
|
||||
// WaitTimeout is the timeout to wait for the pod to be ready
|
||||
WaitTimeout int
|
||||
}
|
||||
|
||||
// Node contains information about a node
|
||||
type Node struct {
|
||||
// Name is the name of the node
|
||||
Name string
|
||||
|
||||
// Roles is the roles of the node
|
||||
Roles []string
|
||||
|
||||
// Ready is whether the node is ready
|
||||
Ready bool
|
||||
|
||||
// RemoteHost is the remote host
|
||||
RemoteHost string
|
||||
|
||||
// Version is the version of the node
|
||||
Version string
|
||||
}
|
||||
|
||||
// String returns a string representation of the node
|
||||
func (n Node) String() string {
|
||||
return fmt.Sprintf("%s|%s|%s|%s", n.Name, strconv.FormatBool(n.Ready), strings.Join(n.Roles, ","), n.Version)
|
||||
}
|
||||
|
||||
// StartCommandInput contains all the information needed to get the start command
|
||||
@@ -323,6 +360,43 @@ func isPodReady(ctx context.Context, clientset KubernetesClient, podName, namesp
|
||||
}
|
||||
}
|
||||
|
||||
// kubernetesNodeToNode converts a kubernetes node to a Node
|
||||
func kubernetesNodeToNode(node v1.Node) Node {
|
||||
roles := []string{}
|
||||
if len(node.Labels["kubernetes.io/role"]) > 0 {
|
||||
roles = append(roles, node.Labels["kubernetes.io/role"])
|
||||
} else {
|
||||
for k, v := range node.Labels {
|
||||
if strings.HasPrefix(k, "node-role.kubernetes.io/") && v == "true" {
|
||||
roles = append(roles, strings.TrimPrefix(k, "node-role.kubernetes.io/"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(roles)
|
||||
|
||||
ready := false
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type == "Ready" {
|
||||
ready = condition.Status == "True"
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
remoteHost := ""
|
||||
if val, ok := node.Annotations["dokku.com/remote-host"]; ok {
|
||||
remoteHost = val
|
||||
}
|
||||
|
||||
return Node{
|
||||
Name: node.Name,
|
||||
Roles: roles,
|
||||
Ready: ready,
|
||||
RemoteHost: remoteHost,
|
||||
Version: node.Status.NodeInfo.KubeletVersion,
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPodBySelectorRunning(ctx context.Context, input WaitForPodBySelectorRunningInput) error {
|
||||
pods, err := waitForPodToExist(ctx, WaitForPodToExistInput{
|
||||
Clientset: input.Clientset,
|
||||
|
||||
@@ -163,6 +163,17 @@ func (k KubernetesClient) DeleteJob(ctx context.Context, input DeleteJobInput) e
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteNodeInput contains all the information needed to delete a Kubernetes node
|
||||
type DeleteNodeInput struct {
|
||||
// Name is the Kubernetes node name
|
||||
Name string
|
||||
}
|
||||
|
||||
// DeleteNode deletes a Kubernetes node
|
||||
func (k KubernetesClient) DeleteNode(ctx context.Context, input DeleteNodeInput) error {
|
||||
return k.Client.CoreV1().Nodes().Delete(ctx, input.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// DeleteSecretInput contains all the information needed to delete a Kubernetes secret
|
||||
type DeleteSecretInput struct {
|
||||
// Name is the Kubernetes secret name
|
||||
@@ -177,6 +188,30 @@ func (k KubernetesClient) DeleteSecret(ctx context.Context, input DeleteSecretIn
|
||||
return k.Client.CoreV1().Secrets(input.Namespace).Delete(ctx, input.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// GetNodeInput contains all the information needed to get a Kubernetes node
|
||||
type GetNodeInput struct {
|
||||
// Name is the Kubernetes node name
|
||||
Name string
|
||||
}
|
||||
|
||||
// GetNode gets a Kubernetes node
|
||||
func (k KubernetesClient) GetNode(ctx context.Context, input GetNodeInput) (Node, error) {
|
||||
if input.Name == "" {
|
||||
return Node{}, errors.New("node name is required")
|
||||
}
|
||||
|
||||
node, err := k.Client.CoreV1().Nodes().Get(ctx, input.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return Node{}, err
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
return Node{}, errors.New("node is nil")
|
||||
}
|
||||
|
||||
return kubernetesNodeToNode(*node), err
|
||||
}
|
||||
|
||||
// GetJobInput contains all the information needed to get a Kubernetes job
|
||||
type GetPodInput struct {
|
||||
// Name is the Kubernetes pod name
|
||||
|
||||
@@ -31,6 +31,16 @@ func main() {
|
||||
args.Parse(os.Args[2:])
|
||||
remoteHost := args.Arg(0)
|
||||
err = scheduler_k3s.CommandClusterAdd(*role, remoteHost, *allowUknownHosts, *taintScheduling)
|
||||
case "cluster-list":
|
||||
args := flag.NewFlagSet("scheduler-k3s:cluster-list", flag.ExitOnError)
|
||||
format := args.String("format", "stdout", "format: [ stdout | json ]")
|
||||
args.Parse(os.Args[2:])
|
||||
err = scheduler_k3s.CommandClusterList(*format)
|
||||
case "cluster-remove":
|
||||
args := flag.NewFlagSet("scheduler-k3s:cluster-remove", flag.ExitOnError)
|
||||
args.Parse(os.Args[2:])
|
||||
nodeName := args.Arg(0)
|
||||
err = scheduler_k3s.CommandClusterRemove(nodeName)
|
||||
case "report":
|
||||
args := flag.NewFlagSet("scheduler-k3s:report", flag.ExitOnError)
|
||||
format := args.String("format", "stdout", "format: [ stdout | json ]")
|
||||
|
||||
@@ -3,6 +3,7 @@ package scheduler_k3s
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
|
||||
"github.com/dokku/dokku/plugins/common"
|
||||
resty "github.com/go-resty/resty/v2"
|
||||
"github.com/ryanuber/columnize"
|
||||
)
|
||||
|
||||
// CommandInitialize initializes a k3s cluster on the local server
|
||||
@@ -525,6 +527,106 @@ func CommandClusterAdd(role string, remoteHost string, allowUknownHosts bool, ta
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommandClusterList lists the nodes in the k3s cluster
|
||||
func CommandClusterList(format string) error {
|
||||
if format != "stdout" && format != "json" {
|
||||
return fmt.Errorf("Invalid format: %s", format)
|
||||
}
|
||||
if err := isK3sInstalled(); err != nil {
|
||||
return fmt.Errorf("k3s not installed, cannot list cluster nodes")
|
||||
}
|
||||
|
||||
clientset, err := NewKubernetesClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to create kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
nodes, err := clientset.ListNodes(context.Background(), ListNodesInput{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to list nodes: %w", err)
|
||||
}
|
||||
|
||||
output := []Node{}
|
||||
for _, node := range nodes {
|
||||
output = append(output, kubernetesNodeToNode(node))
|
||||
}
|
||||
|
||||
if format == "stdout" {
|
||||
lines := []string{"name|ready|roles|version"}
|
||||
for _, node := range output {
|
||||
lines = append(lines, node.String())
|
||||
}
|
||||
|
||||
columnized := columnize.SimpleFormat(lines)
|
||||
fmt.Println(columnized)
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := json.Marshal(output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to marshal json: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println(string(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommandClusterRemove removes a node from the k3s cluster
|
||||
func CommandClusterRemove(nodeName string) error {
|
||||
if err := isK3sInstalled(); err != nil {
|
||||
return fmt.Errorf("k3s not installed, cannot remove node")
|
||||
}
|
||||
|
||||
common.LogInfo1Quiet(fmt.Sprintf("Removing %s from k3s cluster", nodeName))
|
||||
clientset, err := NewKubernetesClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to create kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
common.LogVerboseQuiet("Getting node remote connection information")
|
||||
ctx := context.Background()
|
||||
node, err := clientset.GetNode(ctx, GetNodeInput{
|
||||
Name: nodeName,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to get node: %w", err)
|
||||
}
|
||||
|
||||
common.LogVerboseQuiet("Checking if node is a remote node managed by Dokku")
|
||||
if node.RemoteHost == "" {
|
||||
return fmt.Errorf("Node %s is not a remote node managed by Dokku", nodeName)
|
||||
}
|
||||
|
||||
common.LogVerboseQuiet("Uninstalling k3s on remote host")
|
||||
removeCmd, err := common.CallSshCommand(common.SshCommandInput{
|
||||
Command: "/usr/local/bin/k3s-uninstall.sh",
|
||||
Args: []string{},
|
||||
AllowUknownHosts: true,
|
||||
RemoteHost: node.RemoteHost,
|
||||
StreamStdio: true,
|
||||
Sudo: true,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to call k3s uninstall command over ssh: %w", err)
|
||||
}
|
||||
|
||||
if removeCmd.ExitCode != 0 {
|
||||
return fmt.Errorf("Invalid exit code from k3s uninstall command over ssh: %d", removeCmd.ExitCode)
|
||||
}
|
||||
|
||||
common.LogVerboseQuiet("Deleting node from k3s cluster")
|
||||
err = clientset.DeleteNode(ctx, DeleteNodeInput{
|
||||
Name: nodeName,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to delete node: %w", err)
|
||||
}
|
||||
|
||||
common.LogVerboseQuiet("Done")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommandReport displays a scheduler-k3s report for one or more apps
|
||||
func CommandReport(appName string, format string, infoFlag string) error {
|
||||
if len(appName) == 0 {
|
||||
|
||||
Reference in New Issue
Block a user