chore: work done, not yet working

This commit is contained in:
Tine 2024-07-19 22:48:07 +02:00
parent fc8423fe12
commit 1ba29ddc04
Signed by: mentos1386
SSH key fingerprint: SHA256:MNtTsLbihYaWF8j1fkOHfkKNlnN1JQfxEU/rBU8nCGw
11 changed files with 245 additions and 18468 deletions

5
.gitignore vendored
View file

@ -11,8 +11,8 @@ crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
@ -37,3 +37,4 @@ terraform.rc
# ENV
.env
admin.*config

View file

@ -12,10 +12,6 @@ resource "helm_release" "dashboard" {
}
resource "kubernetes_manifest" "dashoard-http-route" {
depends_on = [
kubernetes_manifest.gateway,
]
manifest = {
apiVersion = "gateway.networking.k8s.io/v1"
kind = "HTTPRoute"
@ -26,7 +22,7 @@ resource "kubernetes_manifest" "dashoard-http-route" {
spec = {
parentRefs = [
{
name : "gateway"
name : kubernetes_manifest.gateway.object.metadata.name
}
]
hostnames = [
@ -120,7 +116,7 @@ resource "kubernetes_manifest" "dashboard-oidc" {
targetRef = {
group : "gateway.networking.k8s.io"
kind : "HTTPRoute"
name : "dashboard"
name : kubernetes_manifest.dashoard-http-route.object.metadata.name
}
oidc = {
provider = {
@ -128,7 +124,7 @@ resource "kubernetes_manifest" "dashboard-oidc" {
}
clientID : var.oidc_client_id
clientSecret : {
name : "dashboard-oidc"
name : kubernetes_secret.dashboard-oidc.metadata[0].name
}
scopes : ["openid", "email", "profile"]
forwardAccessToken : true

View file

@ -9,6 +9,8 @@ resource "kubernetes_secret" "digitalocean-token" {
}
resource "helm_release" "cert-manager" {
depends_on = [helm_release.envoy]
name = "cert-manager"
chart = "cert-manager"
repository = "https://charts.jetstack.io"
@ -27,6 +29,8 @@ resource "helm_release" "cert-manager" {
}
resource "kubernetes_manifest" "tjo-cloud-issuer" {
depends_on = [helm_release.cert-manager]
manifest = {
apiVersion = "cert-manager.io/v1"
kind = "Issuer"
@ -90,7 +94,9 @@ resource "helm_release" "envoy" {
]
}
resource "kubernetes_manifest" "gateway-class" {
resource "kubernetes_manifest" "gateway_class" {
depends_on = [helm_release.envoy]
manifest = {
apiVersion = "gateway.networking.k8s.io/v1"
kind = "GatewayClass"
@ -115,7 +121,7 @@ resource "kubernetes_manifest" "gateway" {
}
}
spec = {
gatewayClassName = "envoy"
gatewayClassName = kubernetes_manifest.gateway_class.object.metadata.name
listeners = [
{
name : "http"

26
k8s.tjo.cloud/kubeconfig Executable file
View file

@ -0,0 +1,26 @@
apiVersion: v1
kind: Config
clusters:
- name: tjo-cloud
cluster:
server: https://api.k8s.tjo.cloud:6443
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpVENDQVMrZ0F3SUJBZ0lRQ2o1ekNPSDg2ajN2bXVtcmVNYU9QekFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTBNRGN4T1RJd016VTFPRm9YRFRNME1EY3hOekl3TXpVMQpPRm93RlRFVE1CRUdBMVVFQ2hNS2EzVmlaWEp1WlhSbGN6QlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VICkEwSUFCSElQUkIzNFdHOHNVNTZrWlFYVDV0VnpjQXlKRXpwd1VpRmpjUkJDQlJXY3JjZ1RiZ3hsaSs4RDNOVEsKV1ZLNUh6bkZnM25kV2VVQzl3S0l4SCs4bHQyallUQmZNQTRHQTFVZER3RUIvd1FFQXdJQ2hEQWRCZ05WSFNVRQpGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFCkZnUVVjKzFYNDU2QTZLSWZJb3FNL3ZiU0dEU0VFQzB3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUpNWkhHZ2UKSUdacHdmbFpQRnl2ZlVrUi9xTDhsWjhXQVJpRFYwWXdoQWNDQWlBQStaNzJNelQ5c2RTS3RUSmRRVmR2WGhZKwpWTkVWb2JicDQ5WjFPeUNTL1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
contexts:
- name: oidc@tjo-cloud
context:
cluster: tjo-cloud
namespace: default
user: oidc
current-context: oidc@tjo-cloud
users:
- name: oidc
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: kubectl
args:
- oidc-login
- get-token
- --oidc-issuer-url=https://id.tjo.space/application/o/k8stjocloud/
- --oidc-client-id=HAI6rW0EWtgmSPGKAJ3XXzubQTUut2GMeTRS2spg
- --oidc-extra-scope=profile

View file

@ -22,7 +22,7 @@ module "cluster" {
proxmox = {
name = "tjo-cloud"
url = "https://proxmox.tjo.cloud/api2/json"
iso_storage_id = "proxmox-backup-tjo-cloud"
common_storage = "proxmox-backup-tjo-cloud"
}
tailscale_authkey = var.tailscale_authkey
@ -30,52 +30,52 @@ module "cluster" {
allow_scheduling_on_control_planes = true
nodes = {
pink = {
public = true
type = "controlplane"
host = "hetzner"
boot_pool = "hetzner-main-data"
cores = 4
memory = 4096
public = true
type = "controlplane"
host = "hetzner"
storage = "local-zfs"
cores = 4
memory = 4096
}
purple = {
public = true
type = "controlplane"
host = "hetzner"
boot_pool = "hetzner-main-data"
cores = 4
memory = 4096
public = true
type = "controlplane"
host = "hetzner"
storage = "local-zfs"
cores = 4
memory = 4096
}
violet = {
public = true
type = "controlplane"
host = "hetzner"
boot_pool = "hetzner-main-data"
cores = 4
memory = 4096
public = true
type = "controlplane"
host = "hetzner"
storage = "local-zfs"
cores = 4
memory = 4096
}
blue = {
public = false
type = "worker"
host = "hetzner"
boot_pool = "hetzner-main-data"
cores = 4
memory = 16384
public = false
type = "worker"
host = "hetzner"
storage = "local-zfs"
cores = 4
memory = 16384
}
cyan = {
public = false
type = "worker"
host = "hetzner"
boot_pool = "hetzner-main-data"
cores = 4
memory = 16384
public = false
type = "worker"
host = "hetzner"
storage = "local-zfs"
cores = 4
memory = 16384
}
green = {
public = false
type = "worker"
host = "hetzner"
boot_pool = "hetzner-main-data"
cores = 4
memory = 16384
public = false
type = "worker"
host = "hetzner"
storage = "local-zfs"
cores = 4
memory = 16384
}
}
}

View file

@ -20,10 +20,6 @@ terraform {
source = "hashicorp/random"
version = "3.6.2"
}
macaddress = {
source = "ivoronin/macaddress"
version = "0.3.2"
}
helm = {
source = "hashicorp/helm"
version = "2.14.0"

File diff suppressed because it is too large Load diff

View file

@ -21,9 +21,8 @@ locals {
talos_controlplane_config = {
cluster : {
etcd : {
#advertisedSubnets : [
# local.tailscaleSubnet
#]
advertisedSubnets : local.tailscaleSubnets
listenSubnets : local.tailscaleSubnets
}
allowSchedulingOnControlPlanes : var.allow_scheduling_on_control_planes,
apiServer : {
@ -67,11 +66,8 @@ locals {
contents : data.helm_template.ccm.manifest
}
]
externalCloudProvider : {
enabled : true
}
extraManifests : [
#"https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/v0.8.5/deploy/standalone-install.yaml",
"https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/v0.8.5/deploy/standalone-install.yaml",
"https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.7.1/components.yaml",
]
}
@ -79,6 +75,9 @@ locals {
talos_worker_config = {
cluster : {
#externalCloudProvider : {
# enabled : true
#}
controlPlane : {
endpoint : local.cluster_endpoint
localAPIServerPort : var.cluster.api.port
@ -101,6 +100,7 @@ locals {
}
extraArgs : {
rotate-server-certificates : true
#cloud-provider : "external"
}
}
install = {
@ -109,6 +109,33 @@ locals {
}
}
}
talos_node_config = {
for k, node in local.nodes_with_address : k => [
yamlencode({
machine = {
network = {
hostname = node.name
}
nodeLabels = {
"k8s.tjo.cloud/public" = node.public ? "true" : "false"
"k8s.tjo.cloud/host" = node.host
"k8s.tjo.cloud/proxmox" = var.proxmox.name
}
}
}),
yamlencode(
{
apiVersion : "v1alpha1"
kind : "ExtensionServiceConfig"
name : "tailscale"
environment : [
"TS_AUTHKEY=${var.tailscale_authkey}",
"TS_HOSTNAME=${node.name}",
]
})
]
}
}
resource "digitalocean_record" "controlplane-A" {
@ -131,7 +158,9 @@ resource "digitalocean_record" "controlplane-AAAA" {
ttl = 30
}
resource "talos_machine_secrets" "this" {}
resource "talos_machine_secrets" "this" {
talos_version = var.talos.version
}
data "talos_machine_configuration" "controlplane" {
cluster_name = var.cluster.name
@ -262,14 +291,30 @@ data "helm_template" "csi" {
- url: ${var.proxmox.url}
insecure: ${var.proxmox.insecure}
token_id: "${proxmox_virtual_environment_user_token.csi.id}"
token_secret: "${proxmox_virtual_environment_user_token.csi.value}"
token_secret: "${split("=", proxmox_virtual_environment_user_token.csi.value)[1]}"
region: "${var.proxmox.name}"
storageClass:
- name: default
- name: proxmox
storage: local-storage
reclaimPolicy: Delete
fstype: xfs
fstype: ext4
ssd: true
cache: none
replicaCount: 1
nodeSelector:
node-role.kubernetes.io/control-plane: ""
node.cloudprovider.kubernetes.io/platform: nocloud
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
node:
nodeSelector:
node.cloudprovider.kubernetes.io/platform: nocloud
tolerations:
- operator: Exists
EOF
]
}
@ -285,13 +330,22 @@ data "helm_template" "ccm" {
kube_version = var.talos.kubernetes
values = [<<-EOF
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
enabledControllers:
- cloud-node-lifecycle
config:
clusters:
- url: ${var.proxmox.url}
insecure: ${var.proxmox.insecure}
token_id: "${proxmox_virtual_environment_user_token.ccm.id}"
token_secret: "${proxmox_virtual_environment_user_token.ccm.value}"
region: "${var.proxmox.name}"
token_id: ${proxmox_virtual_environment_user_token.ccm.id}
token_secret: ${split("=", proxmox_virtual_environment_user_token.ccm.value)[1]}
region: ${var.proxmox.name}
EOF
]
}
@ -307,31 +361,13 @@ resource "talos_machine_configuration_apply" "controlplane" {
apply_mode = "reboot"
config_patches = [
yamlencode(merge(local.talos_worker_config, local.talos_controlplane_config, {
machine = {
network = {
hostname = each.value.name
}
nodeLabels = {
"k8s.tjo.cloud/public" = each.value.public ? "true" : "false"
"k8s.tjo.cloud/host" = each.value.host
"k8s.tjo.cloud/proxmox" = var.proxmox.name
}
}
})),
yamlencode(
{
apiVersion : "v1alpha1"
kind : "ExtensionServiceConfig"
name : "tailscale"
environment : [
"TS_AUTHKEY=${var.tailscale_authkey}",
"TS_HOSTNAME=${each.value.name}",
]
})
]
config_patches = concat(
[
yamlencode(local.talos_worker_config),
yamlencode(local.talos_controlplane_config)
],
local.talos_node_config[each.key]
)
}
resource "talos_machine_configuration_apply" "worker" {
@ -345,30 +381,12 @@ resource "talos_machine_configuration_apply" "worker" {
apply_mode = "reboot"
config_patches = [
yamlencode(merge(local.talos_worker_config, {
machine = {
network = {
hostname = each.value.name
}
nodeLabels = {
"k8s.tjo.cloud/public" = each.value.public ? "true" : "false"
"k8s.tjo.cloud/host" = each.value.host
"k8s.tjo.cloud/proxmox" = var.proxmox.name
}
}
})),
yamlencode(
{
apiVersion : "v1alpha1"
kind : "ExtensionServiceConfig"
name : "tailscale"
environment : [
"TS_AUTHKEY=${var.tailscale_authkey}",
"TS_HOSTNAME=${each.value.name}",
]
})
]
config_patches = concat(
[
yamlencode(local.talos_worker_config)
],
local.talos_node_config[each.key]
)
}
resource "talos_machine_bootstrap" "this" {
@ -390,3 +408,23 @@ data "talos_cluster_kubeconfig" "this" {
client_configuration = talos_machine_secrets.this.client_configuration
node = local.first_controlplane_node.ipv4
}
resource "local_file" "kubeconfig" {
content = data.talos_cluster_kubeconfig.this.kubeconfig_raw
filename = "${path.root}/admin.kubeconfig"
}
data "talos_client_configuration" "this" {
count = length(values({ for k, v in local.nodes_with_address : k => v if v.type == "controlplane" })) > 0 ? 1 : 0
cluster_name = var.cluster.name
client_configuration = talos_machine_secrets.this.client_configuration
endpoints = values({ for k, v in local.nodes_with_address : k => v if v.type == "controlplane" })[*].ipv4
}
resource "local_file" "talosconfig" {
count = length(values({ for k, v in local.nodes : k => v if v.type == "controlplane" })) > 0 ? 1 : 0
content = nonsensitive(data.talos_client_configuration.this[0].talos_config)
filename = "${path.root}/admin.talosconfig"
}

View file

@ -1,6 +1,18 @@
locals {
nodes = { for k, v in var.nodes : k => merge(v, { name = replace("${k}.${v.type}.${var.cluster.domain}", ".", "-") }) }
nodes_with_names = {
for k, v in var.nodes : k => merge(v, {
id = 1000 + index(keys(var.nodes), k)
name = replace("${k}.${v.type}.${var.cluster.domain}", ".", "-")
})
}
hashes = {
for k, v in local.nodes_with_names : k => sha1("${v.name}:${var.cluster.name}")
}
nodes = {
for k, v in local.nodes_with_names : k => merge(v, {
mac_address = "AA:BB:CC:DD:${format("%v:%v", substr(local.hashes[k], 0, 2), substr(local.hashes[k], 2, 2))}"
})
}
first_controlplane_node = values({ for k, v in local.nodes_with_address : k => v if v.type == "controlplane" })[0]
@ -21,49 +33,44 @@ locals {
nodes_with_address = {
for k, v in local.nodes :
k => merge(v, {
ipv4 = try(local.ipv4_addresses[k]["eth0"][0], null)
ipv6 = try(local.ipv6_addresses[k]["eth0"][0], null)
ipv4 = local.ipv4_addresses[k]["eth0"][0]
ipv6 = local.ipv6_addresses[k]["eth0"][0]
})
}
}
resource "proxmox_virtual_environment_download_file" "talos" {
content_type = "iso"
datastore_id = var.proxmox.iso_storage_id
datastore_id = var.proxmox.common_storage
node_name = values(var.nodes)[0].host
file_name = "talos-${var.talos.version}-amd64.iso"
file_name = "talos-${var.talos.schematic_id}-${var.talos.version}-amd64.iso"
url = "https://factory.talos.dev/image/${var.talos.schematic_id}/${var.talos.version}/nocloud-amd64.iso"
}
resource "proxmox_virtual_environment_file" "controlplane" {
for_each = { for k, v in local.nodes_with_address : k => v if v.type == "controlplane" }
resource "proxmox_virtual_environment_file" "metadata" {
for_each = local.nodes
node_name = each.value.host
content_type = "snippets"
datastore_id = each.value.boot_pool
datastore_id = var.proxmox.common_storage
source_raw {
data = <<-EOF
hostname: ${each.value.name}
instance-id: 1000
instance-type: ${each.value.cpu}VCPU-${floor(each.value.memory / 1024)}GB
provider-id: "proxmox://${var.proxmox.name}/1000"
region: ${var.proxmox.name}
id: ${each.value.id}
providerID: proxmox://${var.proxmox.name}/${each.value.id}
type: ${each.value.cores}VCPU-${floor(each.value.memory / 1024)}GB
zone: ${each.value.host}
region: ${var.proxmox.name}
EOF
file_name = "${each.value.name}.metadata.yaml"
file_name = "cluster-${var.cluster.name}-${each.value.name}.metadata.yaml"
}
}
resource "macaddress" "private" {
for_each = local.nodes
}
resource "macaddress" "public" {
for_each = local.nodes
}
resource "proxmox_virtual_environment_vm" "nodes" {
for_each = local.nodes
vm_id = each.value.id
name = each.value.name
node_name = each.value.host
@ -75,8 +82,11 @@ resource "proxmox_virtual_environment_vm" "nodes" {
)
stop_on_destroy = true
timeout_start_vm = 60
timeout_stop_vm = 60
timeout_shutdown_vm = 60
timeout_reboot = 60
timeout_create = 120
cpu {
cores = each.value.cores
@ -97,27 +107,28 @@ resource "proxmox_virtual_environment_vm" "nodes" {
network_device {
bridge = each.value.public ? "vmpublic0" : "vmprivate0"
mac_address = macaddress.private[each.key].address
mac_address = each.value.mac_address
}
disk {
file_format = "raw"
interface = "scsi0"
datastore_id = each.value.boot_pool
file_id = proxmox_virtual_environment_download_file.talos.id
backup = false
cdrom {
enabled = true
file_id = proxmox_virtual_environment_download_file.talos.id
}
scsi_hardware = "virtio-scsi-single"
disk {
file_format = "raw"
interface = "virtio0"
datastore_id = each.value.boot_pool
datastore_id = each.value.storage
size = each.value.boot_size
backup = true
cache = "none"
iothread = true
}
initialization {
meta_data_file_id = proxmox_virtual_environment_file.controlplane[each.key].id
datastore_id = each.value.storage
meta_data_file_id = proxmox_virtual_environment_file.metadata[each.key].id
}
}
@ -136,12 +147,24 @@ resource "proxmox_virtual_environment_user" "csi" {
comment = "Managed by Terraform"
user_id = "kubernetes-csi@pve"
enabled = true
acl {
path = "/"
propagate = true
role_id = proxmox_virtual_environment_role.csi.role_id
}
}
resource "proxmox_virtual_environment_user_token" "csi" {
comment = "Managed by Terraform"
token_name = "terraform"
user_id = proxmox_virtual_environment_user.csi.user_id
}
resource "proxmox_virtual_environment_acl" "csi" {
token_id = proxmox_virtual_environment_user_token.csi.id
role_id = proxmox_virtual_environment_role.csi.role_id
path = "/"
propagate = true
}
resource "proxmox_virtual_environment_role" "ccm" {
role_id = "kubernetes-ccm"
@ -154,9 +177,21 @@ resource "proxmox_virtual_environment_user" "ccm" {
comment = "Managed by Terraform"
user_id = "kubernetes-ccm@pve"
enabled = true
acl {
path = "/"
propagate = true
role_id = proxmox_virtual_environment_role.ccm.role_id
}
}
resource "proxmox_virtual_environment_user_token" "ccm" {
comment = "Managed by Terraform"
token_name = "terraform"
user_id = proxmox_virtual_environment_user.ccm.user_id
}
resource "proxmox_virtual_environment_acl" "ccm" {
token_id = proxmox_virtual_environment_user_token.ccm.id
role_id = proxmox_virtual_environment_role.ccm.role_id
path = "/"
propagate = true
}

View file

@ -7,7 +7,7 @@ variable "nodes" {
cores = optional(number, 4)
memory = optional(number, 4096)
boot_pool = string
storage = string
boot_size = optional(number, 32)
}))
}
@ -60,7 +60,7 @@ variable "proxmox" {
name = string
url = string
insecure = optional(bool, false)
iso_storage_id = string
common_storage = string
})
sensitive = true
}

View file

@ -20,10 +20,6 @@ terraform {
source = "hashicorp/random"
version = "3.6.2"
}
macaddress = {
source = "ivoronin/macaddress"
version = "0.3.2"
}
helm = {
source = "hashicorp/helm"
version = "2.14.0"