feat(kubernetes): wip
Some checks are pending
/ lint (push) Waiting to run

This commit is contained in:
Tine 2024-12-14 20:15:07 +01:00
parent bea62d62de
commit 311b82d212
Signed by: mentos1386
SSH key fingerprint: SHA256:MNtTsLbihYaWF8j1fkOHfkKNlnN1JQfxEU/rBU8nCGw
6 changed files with 98 additions and 57 deletions

View file

@ -86,4 +86,4 @@ echo "PasswordAuthentication no" >> /etc/ssh/sshd_config
### 6. Done
Your node should now be vissible at https://proxmox.tjo.cloud.
Your node should now be visible at https://proxmox.tjo.cloud.

View file

@ -29,38 +29,54 @@ module "cluster" {
}
nodes = {
pink = {
nevaroo-cp = {
id = 6001
type = "controlplane"
host = "nevaroo"
storage = "local-nvme-lvm"
cores = 4
memory = 4096
pod_cidr = {
ipv4 = "10.0.56.0/20"
ipv6 = "fd74:6a6f:0:3800::/52"
}
}
blue = {
#mustafar-cp = {
# id = 6000
# type = "controlplane"
# host = "mustafar"
# storage = "local"
# cores = 2
# memory = 4096
#}
#jakku-cp = {
# id = 6000
# type = "controlplane"
# host = "jakku"
# storage = "local-nvme"
# cores = 2
# memory = 4096
#}
#batuu-cp = {
# id = 6000
# type = "controlplane"
# host = "batuu"
# storage = "local-nvme"
# cores = 2
# memory = 4096
#}
nevaro-w1 = {
id = 6002
type = "worker"
host = "nevaroo"
storage = "local-nvme-lvm"
cores = 8
memory = 24576
pod_cidr = {
ipv4 = "10.0.52.0/20"
ipv6 = "fd74:6a6f:0:3400::/52"
}
}
cyan = {
mustafar-1 = {
id = 6000
type = "worker"
host = "mustafar"
storage = "local"
cores = 2
memory = 4096
pod_cidr = {
ipv4 = "10.0.68.0/20"
ipv6 = "fd74:6a6f:0:4000::/52"
}
}
}
}
@ -69,7 +85,7 @@ resource "local_file" "kubeconfig" {
content = templatefile("${path.module}/kubeconfig.tftpl", {
cluster : {
name : module.cluster.name,
endpoint : module.cluster.api.public.endpoint,
endpoint : module.cluster.api.internal.endpoint,
ca : module.cluster.api.ca,
}
oidc : {

View file

@ -12,7 +12,15 @@ data "helm_template" "cilium" {
values = [<<-EOF
ipam:
mode: "kubernetes"
nodeIPAM:
operator:
priorityClassName: "system-cluster-critical"
routingMode: "native"
autoDirectNodeRoutes: true
directRoutingSkipUnreachable: true
bgpControlPlane:
enabled: true
bpf:
@ -21,12 +29,15 @@ data "helm_template" "cilium" {
enableIPv4Masquerade: true
ipv4:
enabled: true
ipv4NativeRoutingCIDR: "10.0.0.0/16"
enableIPv6Masquerade: true
ipv6:
enabled: true
enabled: false
ipv6NativeRoutingCIDR: "fd74:6a6f:0::/48"
kubeProxyReplacement: true
kubeProxyReplacement: "true"
securityContext:
capabilities:
ciliumAgent:
@ -55,14 +66,9 @@ data "helm_template" "cilium" {
hubble:
ui:
enabled: true
enabled: false
relay:
enabled: true
tls:
auto:
enabled: true
method: "cronJob"
schedule: "0 0 1 */4 *"
enabled: false
gatewayAPI:
enabled: false
envoy:
@ -92,8 +98,18 @@ data "helm_template" "proxmox-csi" {
region: "${var.proxmox.name}"
storageClass:
- name: proxmox-main
storage: main
- name: proxmox-local-nvme
storage: local-nvme
reclaimPolicy: Delete
fstype: ext4
cache: none
- name: proxmox-local
storage: local
reclaimPolicy: Delete
fstype: ext4
cache: none
- name: proxmox-local-nvme-lvm
storage: local-nvme-lvm
reclaimPolicy: Delete
fstype: ext4
cache: none
@ -126,10 +142,23 @@ data "helm_template" "proxmox-ccm" {
kube_version = var.talos.kubernetes
values = [<<-EOF
nodeSelector:
node-role.kubernetes.io/control-plane: ""
# Deploy CCM only on control-plane nodes
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
effect: NoSchedule
enabledControllers:
- cloud-node-lifecycle
config:
clusters:
- url: ${var.proxmox.url}
@ -150,4 +179,11 @@ data "helm_template" "talos-ccm" {
namespace = "kube-system"
kube_version = var.talos.kubernetes
values = [<<-EOF
enabledControllers:
- cloud-node
- node-csr-approval
EOF
]
}

View file

@ -5,16 +5,21 @@ locals {
cluster_public_endpoint = "https://${local.public_domain}:${var.cluster.api.public.port}"
podSubnets = [
"10.0.240.0/22",
"fd74:6a6f:0:f000::/54",
"10.0.240.0/21",
"fd74:6a6f:0:f000::/53",
]
serviceSubnets = [
"10.0.244.0/22",
"fd74:6a6f:0:f400::/54",
"10.0.248.0/22",
"fd74:6a6f:0:f800::/108",
]
talos_controlplane_config = {
machine = {
kubelet = {
extraArgs = {
rotate-server-certificates = true
}
}
features = {
rbac = true
apidCheckExtKeyUsage = true
@ -90,9 +95,6 @@ locals {
talos_worker_config = {
cluster = {
externalCloudProvider = {
enabled = true
}
network = {
cni = {
name = "none"
@ -129,10 +131,6 @@ locals {
"k8s.tjo.cloud/host" = node.host
"k8s.tjo.cloud/proxmox" = var.proxmox.name
}
nodeAnnotations = {
"network.cilium.io/ipv4-pod-cidr" : node.pod_cidr.ipv4
"network.cilium.io/ipv6-pod-cidr" : node.pod_cidr.ipv6
}
}
}),
]
@ -172,15 +170,13 @@ resource "talos_machine_configuration_apply" "controlplane" {
node = each.value.name
endpoint = each.value.ipv4
apply_mode = "reboot"
config_patches = concat(
config_patches = sensitive(concat(
[
yamlencode(local.talos_worker_config),
yamlencode(local.talos_controlplane_config)
],
local.talos_node_config[each.key]
)
))
}
resource "talos_machine_configuration_apply" "worker" {
@ -192,14 +188,12 @@ resource "talos_machine_configuration_apply" "worker" {
node = each.value.name
endpoint = each.value.ipv4
apply_mode = "reboot"
config_patches = concat(
config_patches = sensitive(concat(
[
yamlencode(local.talos_worker_config)
],
local.talos_node_config[each.key]
)
))
}
resource "talos_machine_bootstrap" "this" {

View file

@ -1,8 +1,7 @@
locals {
nodes_with_names = {
for k, v in var.nodes : k => merge(v, {
id = 6000 + index(keys(var.nodes), k)
name = replace("${k}.${v.type}.${var.cluster.name}", ".", "-")
name = replace("${k}.${var.cluster.name}", ".", "-")
})
}
hashes = {

View file

@ -1,5 +1,6 @@
variable "nodes" {
type = map(object({
id = number
type = string
host = string
@ -8,11 +9,6 @@ variable "nodes" {
storage = string
boot_size = optional(number, 32)
pod_cidr = object({
ipv4 = string
ipv6 = string
})
}))
}