- Add .gitignore: exclude compiled binaries, build artifacts, and Helm values files containing real secrets (authentik, prometheus) - Add all Kubernetes deployment manifests (deployment/) - Add services source code: ha-sync, device-inventory, games-console, paperclip, parts-inventory - Add Ansible orchestration: playbooks, roles, inventory, cloud-init - Add hardware specs, execution plans, scripts, HOMELAB.md - Add skills/homelab/SKILL.md + skills/install.sh to preserve Copilot skill - Remove previously-tracked inventory-cli binary from git index Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
381 lines
11 KiB
Go
381 lines
11 KiB
Go
package k8s
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"strings"
|
|
"time"
|
|
|
|
appsv1 "k8s.io/api/apps/v1"
|
|
corev1 "k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/client-go/kubernetes"
|
|
"k8s.io/client-go/rest"
|
|
)
|
|
|
|
const (
|
|
typeLabelKey = "games.homelab/type"
|
|
managedByLabelKey = "games.homelab/managed-by"
|
|
managedByLabelVal = "games-console"
|
|
)
|
|
|
|
// Server represents a game server deployment.
|
|
type Server struct {
|
|
Name string `json:"name"`
|
|
Type string `json:"type"`
|
|
Image string `json:"image"`
|
|
Replicas int32 `json:"replicas"`
|
|
Ready int32 `json:"ready"`
|
|
Status string `json:"status"`
|
|
NodePort int32 `json:"nodePort,omitempty"`
|
|
NodeIP string `json:"nodeIP,omitempty"`
|
|
Port int32 `json:"port"`
|
|
EnvVars map[string]string `json:"envVars"`
|
|
CreatedAt time.Time `json:"createdAt"`
|
|
}
|
|
|
|
// CreateServerRequest holds parameters for creating a new game server.
|
|
type CreateServerRequest struct {
|
|
Name string `json:"name"`
|
|
Type string `json:"type"`
|
|
Image string `json:"image"`
|
|
Port int32 `json:"port"`
|
|
NodePort int32 `json:"nodePort,omitempty"`
|
|
EnvVars map[string]string `json:"envVars"`
|
|
}
|
|
|
|
// UpdateServerRequest holds parameters for updating a game server.
|
|
type UpdateServerRequest struct {
|
|
Image string `json:"image,omitempty"`
|
|
EnvVars map[string]string `json:"envVars,omitempty"`
|
|
NodePort int32 `json:"nodePort,omitempty"`
|
|
}
|
|
|
|
// Client wraps the Kubernetes client for game server operations.
|
|
type Client struct {
|
|
cs kubernetes.Interface
|
|
namespace string
|
|
}
|
|
|
|
// NewClient creates a new K8s client using in-cluster config.
|
|
func NewClient(namespace string) (*Client, error) {
|
|
cfg, err := rest.InClusterConfig()
|
|
if err != nil {
|
|
return nil, fmt.Errorf("in-cluster config: %w", err)
|
|
}
|
|
cs, err := kubernetes.NewForConfig(cfg)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("kubernetes client: %w", err)
|
|
}
|
|
return &Client{cs: cs, namespace: namespace}, nil
|
|
}
|
|
|
|
// ListServers returns all game server deployments in the namespace.
|
|
func (c *Client) ListServers(ctx context.Context) ([]Server, error) {
|
|
deps, err := c.cs.AppsV1().Deployments(c.namespace).List(ctx, metav1.ListOptions{})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
svcs, err := c.cs.CoreV1().Services(c.namespace).List(ctx, metav1.ListOptions{})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
pods, err := c.cs.CoreV1().Pods(c.namespace).List(ctx, metav1.ListOptions{})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
svcMap := buildServiceMap(svcs.Items)
|
|
nodeIPMap := buildNodeIPMap(pods.Items)
|
|
|
|
servers := make([]Server, 0, len(deps.Items))
|
|
for _, d := range deps.Items {
|
|
s := deploymentToServer(d, svcMap[d.Name])
|
|
s.NodeIP = nodeIPMap[d.Name]
|
|
servers = append(servers, s)
|
|
}
|
|
return servers, nil
|
|
}
|
|
|
|
// GetServer returns a single game server by name.
|
|
func (c *Client) GetServer(ctx context.Context, name string) (*Server, error) {
|
|
d, err := c.cs.AppsV1().Deployments(c.namespace).Get(ctx, name, metav1.GetOptions{})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
svc, _ := c.cs.CoreV1().Services(c.namespace).Get(ctx, name, metav1.GetOptions{})
|
|
pods, _ := c.cs.CoreV1().Pods(c.namespace).List(ctx, metav1.ListOptions{
|
|
LabelSelector: "app=" + name,
|
|
})
|
|
s := deploymentToServer(*d, svc)
|
|
if pods != nil {
|
|
s.NodeIP = buildNodeIPMap(pods.Items)[name]
|
|
}
|
|
return &s, nil
|
|
}
|
|
|
|
// CreateServer creates a deployment + NodePort service for a new game server.
|
|
func (c *Client) CreateServer(ctx context.Context, req CreateServerRequest) (*Server, error) {
|
|
replicas := int32(1)
|
|
envVars := envMapToList(req.EnvVars)
|
|
|
|
dep := &appsv1.Deployment{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: req.Name,
|
|
Namespace: c.namespace,
|
|
Labels: map[string]string{
|
|
"app": req.Name,
|
|
typeLabelKey: req.Type,
|
|
managedByLabelKey: managedByLabelVal,
|
|
},
|
|
},
|
|
Spec: appsv1.DeploymentSpec{
|
|
Replicas: &replicas,
|
|
Selector: &metav1.LabelSelector{
|
|
MatchLabels: map[string]string{"app": req.Name},
|
|
},
|
|
Template: corev1.PodTemplateSpec{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Labels: map[string]string{"app": req.Name},
|
|
},
|
|
Spec: corev1.PodSpec{
|
|
Containers: []corev1.Container{
|
|
{
|
|
Name: "server",
|
|
Image: req.Image,
|
|
Ports: []corev1.ContainerPort{
|
|
{ContainerPort: req.Port, Protocol: corev1.ProtocolTCP},
|
|
},
|
|
Env: envVars,
|
|
Resources: corev1.ResourceRequirements{
|
|
Requests: corev1.ResourceList{
|
|
corev1.ResourceCPU: resource.MustParse("500m"),
|
|
corev1.ResourceMemory: resource.MustParse("1Gi"),
|
|
},
|
|
Limits: corev1.ResourceList{
|
|
corev1.ResourceCPU: resource.MustParse("2000m"),
|
|
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
|
},
|
|
},
|
|
},
|
|
},
|
|
NodeSelector: map[string]string{
|
|
"topology.homelab/server": "dell",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
created, err := c.cs.AppsV1().Deployments(c.namespace).Create(ctx, dep, metav1.CreateOptions{})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
svcSpec := corev1.ServiceSpec{
|
|
Selector: map[string]string{"app": req.Name},
|
|
Ports: []corev1.ServicePort{
|
|
{Port: req.Port, Protocol: corev1.ProtocolTCP},
|
|
},
|
|
}
|
|
if req.NodePort > 0 {
|
|
svcSpec.Type = corev1.ServiceTypeNodePort
|
|
svcSpec.Ports[0].NodePort = req.NodePort
|
|
} else {
|
|
svcSpec.Type = corev1.ServiceTypeNodePort
|
|
}
|
|
|
|
svc := &corev1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: req.Name,
|
|
Namespace: c.namespace,
|
|
Labels: map[string]string{
|
|
"app": req.Name,
|
|
managedByLabelKey: managedByLabelVal,
|
|
},
|
|
},
|
|
Spec: svcSpec,
|
|
}
|
|
createdSvc, _ := c.cs.CoreV1().Services(c.namespace).Create(ctx, svc, metav1.CreateOptions{})
|
|
|
|
s := deploymentToServer(*created, createdSvc)
|
|
return &s, nil
|
|
}
|
|
|
|
// UpdateServer patches a deployment's image and/or env vars.
|
|
func (c *Client) UpdateServer(ctx context.Context, name string, req UpdateServerRequest) (*Server, error) {
|
|
d, err := c.cs.AppsV1().Deployments(c.namespace).Get(ctx, name, metav1.GetOptions{})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if req.Image != "" {
|
|
d.Spec.Template.Spec.Containers[0].Image = req.Image
|
|
}
|
|
if req.EnvVars != nil {
|
|
d.Spec.Template.Spec.Containers[0].Env = envMapToList(req.EnvVars)
|
|
}
|
|
|
|
updated, err := c.cs.AppsV1().Deployments(c.namespace).Update(ctx, d, metav1.UpdateOptions{})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Patch service NodePort if requested
|
|
svc, _ := c.cs.CoreV1().Services(c.namespace).Get(ctx, name, metav1.GetOptions{})
|
|
if req.NodePort > 0 && svc != nil {
|
|
patch := fmt.Sprintf(`{"spec":{"ports":[{"port":%d,"nodePort":%d}]}}`, svc.Spec.Ports[0].Port, req.NodePort)
|
|
svc, _ = c.cs.CoreV1().Services(c.namespace).Patch(
|
|
ctx, name, types.MergePatchType, []byte(patch), metav1.PatchOptions{},
|
|
)
|
|
}
|
|
s := deploymentToServer(*updated, svc)
|
|
return &s, nil
|
|
}
|
|
|
|
// DeleteServer deletes a deployment and its service.
|
|
func (c *Client) DeleteServer(ctx context.Context, name string) error {
|
|
if err := c.cs.AppsV1().Deployments(c.namespace).Delete(ctx, name, metav1.DeleteOptions{}); err != nil {
|
|
return err
|
|
}
|
|
// Best-effort service deletion
|
|
_ = c.cs.CoreV1().Services(c.namespace).Delete(ctx, name, metav1.DeleteOptions{})
|
|
return nil
|
|
}
|
|
|
|
// ScaleServer sets the replica count (0 = stop, 1 = start).
|
|
func (c *Client) ScaleServer(ctx context.Context, name string, replicas int32) error {
|
|
patch := fmt.Sprintf(`{"spec":{"replicas":%d}}`, replicas)
|
|
_, err := c.cs.AppsV1().Deployments(c.namespace).Patch(
|
|
ctx, name, types.MergePatchType, []byte(patch), metav1.PatchOptions{},
|
|
)
|
|
return err
|
|
}
|
|
|
|
// RestartServer triggers a rollout restart by patching the pod template annotation.
|
|
func (c *Client) RestartServer(ctx context.Context, name string) error {
|
|
patch := fmt.Sprintf(
|
|
`{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"%s"}}}}}`,
|
|
time.Now().Format(time.RFC3339),
|
|
)
|
|
_, err := c.cs.AppsV1().Deployments(c.namespace).Patch(
|
|
ctx, name, types.MergePatchType, []byte(patch), metav1.PatchOptions{},
|
|
)
|
|
return err
|
|
}
|
|
|
|
// GetLogs returns the last n lines of logs from the first running pod of a deployment.
|
|
func (c *Client) GetLogs(ctx context.Context, name string, lines int64) (string, error) {
|
|
pods, err := c.cs.CoreV1().Pods(c.namespace).List(ctx, metav1.ListOptions{
|
|
LabelSelector: "app=" + name,
|
|
})
|
|
if err != nil || len(pods.Items) == 0 {
|
|
return "", fmt.Errorf("no pods found for %s", name)
|
|
}
|
|
pod := pods.Items[0]
|
|
req := c.cs.CoreV1().Pods(c.namespace).GetLogs(pod.Name, &corev1.PodLogOptions{
|
|
TailLines: &lines,
|
|
})
|
|
stream, err := req.Stream(ctx)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
defer stream.Close()
|
|
buf, err := io.ReadAll(stream)
|
|
return string(buf), err
|
|
}
|
|
|
|
// --- helpers ---
|
|
|
|
func deploymentToServer(d appsv1.Deployment, svc *corev1.Service) Server {
|
|
s := Server{
|
|
Name: d.Name,
|
|
Type: serverType(d),
|
|
EnvVars: make(map[string]string),
|
|
CreatedAt: d.CreationTimestamp.Time,
|
|
}
|
|
if d.Spec.Replicas != nil {
|
|
s.Replicas = *d.Spec.Replicas
|
|
}
|
|
s.Ready = d.Status.ReadyReplicas
|
|
|
|
switch {
|
|
case s.Replicas == 0:
|
|
s.Status = "stopped"
|
|
case s.Ready == 0:
|
|
s.Status = "starting"
|
|
case s.Ready == s.Replicas:
|
|
s.Status = "running"
|
|
default:
|
|
s.Status = "degraded"
|
|
}
|
|
|
|
if len(d.Spec.Template.Spec.Containers) > 0 {
|
|
c := d.Spec.Template.Spec.Containers[0]
|
|
s.Image = c.Image
|
|
if len(c.Ports) > 0 {
|
|
s.Port = c.Ports[0].ContainerPort
|
|
}
|
|
for _, e := range c.Env {
|
|
s.EnvVars[e.Name] = e.Value
|
|
}
|
|
}
|
|
|
|
if svc != nil && len(svc.Spec.Ports) > 0 {
|
|
s.NodePort = svc.Spec.Ports[0].NodePort
|
|
}
|
|
|
|
return s
|
|
}
|
|
|
|
func serverType(d appsv1.Deployment) string {
|
|
if t, ok := d.Labels[typeLabelKey]; ok && t != "" {
|
|
return t
|
|
}
|
|
// Fall back to image-name detection
|
|
if len(d.Spec.Template.Spec.Containers) > 0 {
|
|
img := strings.ToLower(d.Spec.Template.Spec.Containers[0].Image)
|
|
switch {
|
|
case strings.Contains(img, "minecraft"):
|
|
return "minecraft"
|
|
case strings.Contains(img, "factorio"):
|
|
return "factorio"
|
|
case strings.Contains(img, "openttd"):
|
|
return "openttd"
|
|
}
|
|
}
|
|
return "unknown"
|
|
}
|
|
|
|
func buildServiceMap(svcs []corev1.Service) map[string]*corev1.Service {
|
|
m := make(map[string]*corev1.Service, len(svcs))
|
|
for i := range svcs {
|
|
m[svcs[i].Name] = &svcs[i]
|
|
}
|
|
return m
|
|
}
|
|
|
|
// buildNodeIPMap returns a map of deployment name → hostIP of a running pod.
|
|
func buildNodeIPMap(pods []corev1.Pod) map[string]string {
|
|
m := make(map[string]string)
|
|
for _, p := range pods {
|
|
app := p.Labels["app"]
|
|
if app == "" {
|
|
continue
|
|
}
|
|
if p.Status.HostIP != "" {
|
|
m[app] = p.Status.HostIP
|
|
}
|
|
}
|
|
return m
|
|
}
|
|
|
|
func envMapToList(m map[string]string) []corev1.EnvVar {
|
|
vars := make([]corev1.EnvVar, 0, len(m))
|
|
for k, v := range m {
|
|
vars = append(vars, corev1.EnvVar{Name: k, Value: v})
|
|
}
|
|
return vars
|
|
}
|