feat(kubernetes): wip
Some checks are pending
/ lint (push) Waiting to run

This commit is contained in:
Tine 2024-12-14 20:15:07 +01:00
parent bea62d62de
commit 311b82d212
Signed by: mentos1386
SSH key fingerprint: SHA256:MNtTsLbihYaWF8j1fkOHfkKNlnN1JQfxEU/rBU8nCGw
6 changed files with 98 additions and 57 deletions

View file

@ -86,4 +86,4 @@ echo "PasswordAuthentication no" >> /etc/ssh/sshd_config
### 6. Done ### 6. Done
Your node should now be vissible at https://proxmox.tjo.cloud. Your node should now be visible at https://proxmox.tjo.cloud.

View file

@ -29,38 +29,54 @@ module "cluster" {
} }
nodes = { nodes = {
pink = { nevaroo-cp = {
id = 6001
type = "controlplane" type = "controlplane"
host = "nevaroo" host = "nevaroo"
storage = "local-nvme-lvm" storage = "local-nvme-lvm"
cores = 4 cores = 4
memory = 4096 memory = 4096
pod_cidr = {
ipv4 = "10.0.56.0/20"
ipv6 = "fd74:6a6f:0:3800::/52"
}
} }
blue = { #mustafar-cp = {
# id = 6000
# type = "controlplane"
# host = "mustafar"
# storage = "local"
# cores = 2
# memory = 4096
#}
#jakku-cp = {
# id = 6000
# type = "controlplane"
# host = "jakku"
# storage = "local-nvme"
# cores = 2
# memory = 4096
#}
#batuu-cp = {
# id = 6000
# type = "controlplane"
# host = "batuu"
# storage = "local-nvme"
# cores = 2
# memory = 4096
#}
nevaro-w1 = {
id = 6002
type = "worker" type = "worker"
host = "nevaroo" host = "nevaroo"
storage = "local-nvme-lvm" storage = "local-nvme-lvm"
cores = 8 cores = 8
memory = 24576 memory = 24576
pod_cidr = {
ipv4 = "10.0.52.0/20"
ipv6 = "fd74:6a6f:0:3400::/52"
}
} }
cyan = { mustafar-1 = {
id = 6000
type = "worker" type = "worker"
host = "mustafar" host = "mustafar"
storage = "local" storage = "local"
cores = 2 cores = 2
memory = 4096 memory = 4096
pod_cidr = {
ipv4 = "10.0.68.0/20"
ipv6 = "fd74:6a6f:0:4000::/52"
}
} }
} }
} }
@ -69,7 +85,7 @@ resource "local_file" "kubeconfig" {
content = templatefile("${path.module}/kubeconfig.tftpl", { content = templatefile("${path.module}/kubeconfig.tftpl", {
cluster : { cluster : {
name : module.cluster.name, name : module.cluster.name,
endpoint : module.cluster.api.public.endpoint, endpoint : module.cluster.api.internal.endpoint,
ca : module.cluster.api.ca, ca : module.cluster.api.ca,
} }
oidc : { oidc : {

View file

@ -12,7 +12,15 @@ data "helm_template" "cilium" {
values = [<<-EOF values = [<<-EOF
ipam: ipam:
mode: "kubernetes" mode: "kubernetes"
nodeIPAM:
operator:
priorityClassName: "system-cluster-critical"
routingMode: "native"
autoDirectNodeRoutes: true
directRoutingSkipUnreachable: true
bgpControlPlane:
enabled: true enabled: true
bpf: bpf:
@ -21,12 +29,15 @@ data "helm_template" "cilium" {
enableIPv4Masquerade: true enableIPv4Masquerade: true
ipv4: ipv4:
enabled: true enabled: true
ipv4NativeRoutingCIDR: "10.0.0.0/16"
enableIPv6Masquerade: true enableIPv6Masquerade: true
ipv6: ipv6:
enabled: true enabled: false
ipv6NativeRoutingCIDR: "fd74:6a6f:0::/48"
kubeProxyReplacement: true
kubeProxyReplacement: "true"
securityContext: securityContext:
capabilities: capabilities:
ciliumAgent: ciliumAgent:
@ -55,14 +66,9 @@ data "helm_template" "cilium" {
hubble: hubble:
ui: ui:
enabled: true enabled: false
relay: relay:
enabled: true enabled: false
tls:
auto:
enabled: true
method: "cronJob"
schedule: "0 0 1 */4 *"
gatewayAPI: gatewayAPI:
enabled: false enabled: false
envoy: envoy:
@ -92,8 +98,18 @@ data "helm_template" "proxmox-csi" {
region: "${var.proxmox.name}" region: "${var.proxmox.name}"
storageClass: storageClass:
- name: proxmox-main - name: proxmox-local-nvme
storage: main storage: local-nvme
reclaimPolicy: Delete
fstype: ext4
cache: none
- name: proxmox-local
storage: local
reclaimPolicy: Delete
fstype: ext4
cache: none
- name: proxmox-local-nvme-lvm
storage: local-nvme-lvm
reclaimPolicy: Delete reclaimPolicy: Delete
fstype: ext4 fstype: ext4
cache: none cache: none
@ -126,10 +142,23 @@ data "helm_template" "proxmox-ccm" {
kube_version = var.talos.kubernetes kube_version = var.talos.kubernetes
values = [<<-EOF values = [<<-EOF
nodeSelector: # Deploy CCM only on control-plane nodes
node-role.kubernetes.io/control-plane: "" affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
effect: NoSchedule
enabledControllers: enabledControllers:
- cloud-node-lifecycle - cloud-node-lifecycle
config: config:
clusters: clusters:
- url: ${var.proxmox.url} - url: ${var.proxmox.url}
@ -150,4 +179,11 @@ data "helm_template" "talos-ccm" {
namespace = "kube-system" namespace = "kube-system"
kube_version = var.talos.kubernetes kube_version = var.talos.kubernetes
values = [<<-EOF
enabledControllers:
- cloud-node
- node-csr-approval
EOF
]
} }

View file

@ -5,16 +5,21 @@ locals {
cluster_public_endpoint = "https://${local.public_domain}:${var.cluster.api.public.port}" cluster_public_endpoint = "https://${local.public_domain}:${var.cluster.api.public.port}"
podSubnets = [ podSubnets = [
"10.0.240.0/22", "10.0.240.0/21",
"fd74:6a6f:0:f000::/54", "fd74:6a6f:0:f000::/53",
] ]
serviceSubnets = [ serviceSubnets = [
"10.0.244.0/22", "10.0.248.0/22",
"fd74:6a6f:0:f400::/54", "fd74:6a6f:0:f800::/108",
] ]
talos_controlplane_config = { talos_controlplane_config = {
machine = { machine = {
kubelet = {
extraArgs = {
rotate-server-certificates = true
}
}
features = { features = {
rbac = true rbac = true
apidCheckExtKeyUsage = true apidCheckExtKeyUsage = true
@ -90,9 +95,6 @@ locals {
talos_worker_config = { talos_worker_config = {
cluster = { cluster = {
externalCloudProvider = {
enabled = true
}
network = { network = {
cni = { cni = {
name = "none" name = "none"
@ -129,10 +131,6 @@ locals {
"k8s.tjo.cloud/host" = node.host "k8s.tjo.cloud/host" = node.host
"k8s.tjo.cloud/proxmox" = var.proxmox.name "k8s.tjo.cloud/proxmox" = var.proxmox.name
} }
nodeAnnotations = {
"network.cilium.io/ipv4-pod-cidr" : node.pod_cidr.ipv4
"network.cilium.io/ipv6-pod-cidr" : node.pod_cidr.ipv6
}
} }
}), }),
] ]
@ -172,15 +170,13 @@ resource "talos_machine_configuration_apply" "controlplane" {
node = each.value.name node = each.value.name
endpoint = each.value.ipv4 endpoint = each.value.ipv4
apply_mode = "reboot" config_patches = sensitive(concat(
config_patches = concat(
[ [
yamlencode(local.talos_worker_config), yamlencode(local.talos_worker_config),
yamlencode(local.talos_controlplane_config) yamlencode(local.talos_controlplane_config)
], ],
local.talos_node_config[each.key] local.talos_node_config[each.key]
) ))
} }
resource "talos_machine_configuration_apply" "worker" { resource "talos_machine_configuration_apply" "worker" {
@ -192,14 +188,12 @@ resource "talos_machine_configuration_apply" "worker" {
node = each.value.name node = each.value.name
endpoint = each.value.ipv4 endpoint = each.value.ipv4
apply_mode = "reboot" config_patches = sensitive(concat(
config_patches = concat(
[ [
yamlencode(local.talos_worker_config) yamlencode(local.talos_worker_config)
], ],
local.talos_node_config[each.key] local.talos_node_config[each.key]
) ))
} }
resource "talos_machine_bootstrap" "this" { resource "talos_machine_bootstrap" "this" {

View file

@ -1,8 +1,7 @@
locals { locals {
nodes_with_names = { nodes_with_names = {
for k, v in var.nodes : k => merge(v, { for k, v in var.nodes : k => merge(v, {
id = 6000 + index(keys(var.nodes), k) name = replace("${k}.${var.cluster.name}", ".", "-")
name = replace("${k}.${v.type}.${var.cluster.name}", ".", "-")
}) })
} }
hashes = { hashes = {

View file

@ -1,5 +1,6 @@
variable "nodes" { variable "nodes" {
type = map(object({ type = map(object({
id = number
type = string type = string
host = string host = string
@ -8,11 +9,6 @@ variable "nodes" {
storage = string storage = string
boot_size = optional(number, 32) boot_size = optional(number, 32)
pod_cidr = object({
ipv4 = string
ipv6 = string
})
})) }))
} }