refactor: use different proxmox provider, figure out nocloud bootstrap

This commit is contained in:
Tine 2024-07-17 21:43:11 +02:00
parent 2fb90bf621
commit fc8423fe12
Signed by: mentos1386
SSH key fingerprint: SHA256:MNtTsLbihYaWF8j1fkOHfkKNlnN1JQfxEU/rBU8nCGw
9 changed files with 387 additions and 308 deletions

View file

@ -1,6 +1,29 @@
# This file is maintained automatically by "tofu init".
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/bpg/proxmox" {
version = "0.61.1"
constraints = "0.61.1"
hashes = [
"h1:6kz2Rdjc8+TVq2aUxEQXLOwbb9OdhJJei0L1fC4K2R4=",
"zh:27d8b589a2dc1e0a5b0f8ab299b9f3704a2f0b69799d1d4d8845c68056986d1f",
"zh:46dfa6b33ddd7007a2144f38090457604eb56a59a303b37bb0ad1be5c84ddaca",
"zh:47a1b14a759393c5ecc76f2feb950677c418c910b8c677fde0dd3e4675c41579",
"zh:582e49d109d1c2b1f3b1268a7cbc43548f3c6d96a87c92a5428767097a5e383e",
"zh:5e98ad6afae5969a4c3ffb14c0484936550c66c8313d7686551c29b633ff32f2",
"zh:7b9e24b76f947ab8f1e571cf61beefc983b7d2aa1b85df35c4f015728fe37a38",
"zh:8255ca210f279a0f7b8ca2762df26d2ea1a01704298c5e3d5cf601bd39a743f0",
"zh:85d7655fdc95dedced9cf8105a0beeb0d7bc8f668c55f62019a7215a76d60300",
"zh:8aeea5a1d001b06baaf923b754e1a14d06c75eb8c8b87a7f65a3c8205fc8b079",
"zh:a9cfab6c06f613658c5fdd83742cd22c0eb7563778924b1407965ef8c36c1ce0",
"zh:ceaab67801d49a92eb5858b1ddae6df2569462e5ffbe31f9dbd79dcb684ea142",
"zh:dc25b506d5c55d1d78a335d3ebd03213c99b4b2a5859812349a955c2f746ff7e",
"zh:e04b477fd77a0d37a0bdb76a7cf69184dad9e7fbba9b4f3a378a8901b82b75e5",
"zh:f1e6838d9141557f73340df9b21fce5a82b41cc16ae36f063a920ccc36bc0758",
"zh:f26e0763dbe6a6b2195c94b44696f2110f7f55433dc142839be16b9697fa5597",
]
}
provider "registry.opentofu.org/digitalocean/digitalocean" {
version = "2.39.2"
constraints = "~> 2.0"
@ -136,25 +159,3 @@ provider "registry.opentofu.org/siderolabs/talos" {
"zh:f3a279723ff31a095d7bfff21857abfcc9a2cfdeeea8521d179630ae6565d581",
]
}
provider "registry.opentofu.org/telmate/proxmox" {
version = "3.0.1-rc3"
constraints = "3.0.1-rc3"
hashes = [
"h1:x7TfUaW+RpBtGov4DBuSJ5YPYBozapWuLyyZs0qjsKY=",
"zh:3699c41289c6fbe0f33b6c54360d43dcfba429de5fbf49506df9276d03aea915",
"zh:486c9ddda427d3fecdc6dfa189fce85c4a2aa1f490b024d636c0ac6a4dd3c692",
"zh:6091e141a0b8dcb1632c31e0f9555117bb023176c5d083f0e03441bbcf673a4e",
"zh:63d312c2c2994ed39dcb47b4d43c89990bd5fff20dbda63cddfb11c9202270f4",
"zh:6e69c70a85cfa720f543090ee3ce7d2eb2902df19657121b8b7ae64d44875d9f",
"zh:897b9f6075262fc9533f87d470217b14ae82614c6818a26b578a6d41c403d4eb",
"zh:91c24bd374fb8ee0c9e4e1c213d157139c047be78b0cafac3c4c9724db8083b0",
"zh:a224b58759314dc045fdbfc88b63b036b8ca6f75ad32606e94b553f150077c13",
"zh:a56e940c71b45e222c69a2a45388b58ed319836b922f84f62bded5b063662f4a",
"zh:b2e0a83aa535cd3493fbc7485d05d1a823c48bf487e313703f01a17edc631908",
"zh:ba0ad4fea8ba3b01c67fb164ed92fa927ac70d2d898378d192a01e818fcf6bee",
"zh:c49ebe13e7011d35d72e8e6a720df83f21c106444ef4383c5d6c0015aee55db6",
"zh:c53e2775040e103aedcce06b9acb79ca5fccdb4c578a4b6e32489c89e9c652dc",
"zh:c9002cc470ccfd8cd298d5655cf76af84b1d8a200207973d9ad80235818e89e3",
]
}

View file

@ -1,26 +0,0 @@
apiVersion: v1
kind: Config
clusters:
- name: tjo-cloud
cluster:
server: https://api.k8s.tjo.cloud:6443
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpakNDQVMrZ0F3SUJBZ0lRVHV4SURwTXNNUXNYWitLb2k1RkdsakFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTBNRGN4TmpFNE1qWXhPVm9YRFRNME1EY3hOREU0TWpZeApPVm93RlRFVE1CRUdBMVVFQ2hNS2EzVmlaWEp1WlhSbGN6QlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VICkEwSUFCQ01zTVdqWjU1alRzaWd6aVcrT1BUSm1uS1VLQ2VTbktnTmptOFNRVE0ydm1tQklaM1I1UDlKcTY4ZmEKUU9zZ0RXSE96aFF6MjRNRUw1aFhpUzRjU3B1allUQmZNQTRHQTFVZER3RUIvd1FFQXdJQ2hEQWRCZ05WSFNVRQpGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFCkZnUVVEVG41Sy8raXROdEgyczM0MTR3TEFjV0V0Wm93Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQU56bVV6OVEKZVhVVVJzei9KWlRlb29ibzF0Y1BHN2p3cGFlSDQ0VFVwVEE4QWlFQTFZcTdhUWdkb2lVTFNac2IrT3JLeDJyWQpGRTdSVkpxR0lyM1U4c3daY3k4PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
contexts:
- name: oidc@tjo-cloud
context:
cluster: tjo-cloud
namespace: default
user: oidc
current-context: oidc@tjo-cloud
users:
- name: oidc
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: kubectl
args:
- oidc-login
- get-token
- --oidc-issuer-url=https://id.tjo.space/application/o/k8stjocloud/
- --oidc-client-id=HAI6rW0EWtgmSPGKAJ3XXzubQTUut2GMeTRS2spg
- --oidc-extra-scope=profile

View file

@ -5,13 +5,11 @@ module "cluster" {
helm.template = helm.template
}
versions = {
talos = "v1.7.5"
talos = {
version = "v1.7.5"
kubernetes = "v1.30.0"
}
iso = "proxmox-backup-tjo-cloud:iso/talos-v1.7.5-tailscale-metal-amd64.iso"
cluster = {
name = "tjo-cloud"
domain = "k8s.tjo.cloud"
@ -22,16 +20,9 @@ module "cluster" {
}
proxmox = {
name = "tjo-cloud"
url = "https://proxmox.tjo.cloud/api2/json"
ccm = {
username = var.proxmox_ccm_username
token = var.proxmox_ccm_token
}
csi = {
username = var.proxmox_csi_username
token = var.proxmox_csi_token
}
name = "tjo-cloud"
url = "https://proxmox.tjo.cloud/api2/json"
iso_storage_id = "proxmox-backup-tjo-cloud"
}
tailscale_authkey = var.tailscale_authkey

View file

@ -1,8 +1,8 @@
terraform {
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.1-rc3"
source = "bpg/proxmox"
version = "0.61.1"
}
talos = {
source = "siderolabs/talos"
@ -37,8 +37,13 @@ terraform {
provider "proxmox" {
# FIXME: Traefik/NGINX breaks this! 500 ERROR
pm_api_url = "https://178.63.49.225:8006/api2/json"
pm_tls_insecure = true
endpoint = "https://178.63.49.225:8006/api2/json"
insecure = true
api_token = var.proxmox_token
ssh {
agent = true
username = "root"
}
}
provider "digitalocean" {

View file

@ -22,18 +22,7 @@ variable "digitalocean_token" {
sensitive = true
}
variable "proxmox_csi_username" {
type = string
}
variable "proxmox_csi_token" {
type = string
sensitive = true
}
variable "proxmox_ccm_username" {
type = string
}
variable "proxmox_ccm_token" {
variable "proxmox_token" {
type = string
sensitive = true
}

View file

@ -2,12 +2,6 @@ locals {
cluster_api_domain = "${var.cluster.api.subdomain}.${var.cluster.domain}"
cluster_endpoint = "https://${local.cluster_api_domain}:${var.cluster.api.port}"
nodes = { for k, v in var.nodes : k => merge(v, { name = replace("${k}.${v.type}.${var.cluster.domain}", ".", "-") }) }
nodes_with_address = { for k, v in local.nodes : k => merge(v, { ipv4 = try(proxmox_vm_qemu.this[k].default_ipv4_address, false), ipv6 = try(proxmox_vm_qemu.this[k].default_ipv6_address, false) }) }
first_controlplane_node = values({ for k, v in local.nodes_with_address : k => v if v.type == "controlplane" })[0]
podSubnets = [
"10.200.0.0/16",
#"fd9b:5314:fc70::/48",
@ -24,85 +18,116 @@ locals {
"fd7a:115c:a1e0::/96"
]
proxmox-cloud-controller-manager = {
clusters : [{
region : var.proxmox.name,
url : var.proxmox.url,
insecure : var.proxmox.insecure,
token_id : var.proxmox.ccm.username,
token_secret : var.proxmox.ccm.token,
}]
}
}
resource "macaddress" "private" {
for_each = local.nodes
}
resource "macaddress" "public" {
for_each = local.nodes
}
resource "proxmox_vm_qemu" "this" {
for_each = local.nodes
name = each.value.name
target_node = each.value.host
tags = join(";", concat(
["kubernetes", "terraform"],
each.value.public ? ["public"] : ["private"],
[each.value.type]
))
cores = each.value.cores
memory = each.value.memory
scsihw = "virtio-scsi-pci"
qemu_os = "l26"
agent = 1
network {
model = "virtio"
bridge = each.value.public ? "vmpublic0" : "vmprivate0"
macaddr = macaddress.private[each.key].address
}
disks {
scsi {
scsi0 {
cdrom {
iso = var.iso
talos_controlplane_config = {
cluster : {
etcd : {
#advertisedSubnets : [
# local.tailscaleSubnet
#]
}
allowSchedulingOnControlPlanes : var.allow_scheduling_on_control_planes,
apiServer : {
extraArgs : {
"oidc-issuer-url" : "https://id.tjo.space/application/o/k8stjocloud/",
"oidc-client-id" : "HAI6rW0EWtgmSPGKAJ3XXzubQTUut2GMeTRS2spg",
"oidc-username-claim" : "sub",
"oidc-username-prefix" : "oidc:",
"oidc-groups-claim" : "groups",
"oidc-groups-prefix" : "oidc:groups:",
}
}
}
virtio {
virtio0 {
disk {
size = each.value.boot_size
storage = each.value.boot_pool
inlineManifests : [
{
name : "oidc-groups"
contents : <<-EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: id-tjo-space:admins
subjects:
- kind: Group
name: oidc:groups:k8s.tjo.cloud admin
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
EOF
},
{
name : "cilium"
contents : data.helm_template.cilium.manifest
},
{
name : "promxmox-csi-plugin"
contents : data.helm_template.csi.manifest
},
{
name : "proxmox-cloud-controller-manager"
contents : data.helm_template.ccm.manifest
}
]
externalCloudProvider : {
enabled : true
}
extraManifests : [
#"https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/v0.8.5/deploy/standalone-install.yaml",
"https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.7.1/components.yaml",
]
}
}
talos_worker_config = {
cluster : {
controlPlane : {
endpoint : local.cluster_endpoint
localAPIServerPort : var.cluster.api.port
}
network : {
cni : {
name : "none"
}
podSubnets : local.podSubnets
serviceSubnets : local.serviceSubnets
}
proxy : {
disabled : true
}
}
machine = {
kubelet = {
nodeIP : {
validSubnets : local.tailscaleSubnets
}
extraArgs : {
rotate-server-certificates : true
}
}
install = {
image = "factory.talos.dev/installer/${var.talos.schematic_id}:${var.talos.version}"
disk = "/dev/vda"
}
}
}
}
resource "digitalocean_record" "controlplane-A" {
for_each = { for k, v in proxmox_vm_qemu.this : k => v if try(var.nodes[k].public && var.nodes[k].type == "controlplane", false) }
for_each = { for k, node in local.nodes_with_address : k => node if node.type == "controlplane" }
domain = var.cluster.domain
type = "A"
name = var.cluster.api.subdomain
value = each.value.default_ipv4_address
value = each.value.ipv4
ttl = 30
}
resource "digitalocean_record" "controlplane-AAAA" {
for_each = { for k, v in proxmox_vm_qemu.this : k => v if try(var.nodes[k].public && var.nodes[k].type == "controlplane", false) }
for_each = { for k, node in local.nodes_with_address : k => node if node.type == "controlplane" }
domain = var.cluster.domain
type = "AAAA"
name = var.cluster.api.subdomain
value = each.value.default_ipv6_address
value = each.value.ipv6
ttl = 30
}
@ -114,8 +139,8 @@ data "talos_machine_configuration" "controlplane" {
cluster_endpoint = local.cluster_endpoint
machine_secrets = talos_machine_secrets.this.machine_secrets
talos_version = var.versions.talos
kubernetes_version = var.versions.kubernetes
talos_version = var.talos.version
kubernetes_version = var.talos.kubernetes
depends_on = [
digitalocean_record.controlplane-A,
@ -129,8 +154,8 @@ data "talos_machine_configuration" "worker" {
cluster_endpoint = local.cluster_endpoint
machine_secrets = talos_machine_secrets.this.machine_secrets
talos_version = var.versions.talos
kubernetes_version = var.versions.kubernetes
talos_version = var.talos.version
kubernetes_version = var.talos.kubernetes
depends_on = [
digitalocean_record.controlplane-A,
@ -147,7 +172,7 @@ data "helm_template" "cilium" {
version = "1.15.6"
namespace = "kube-system"
kube_version = var.versions.kubernetes
kube_version = var.talos.kubernetes
api_versions = [
"gateway.networking.k8s.io/v1/GatewayClass",
]
@ -220,6 +245,57 @@ data "helm_template" "cilium" {
})]
}
data "helm_template" "csi" {
provider = helm.template
name = "proxmox-csi-plugin"
chart = "proxmox-csi-plugin"
repository = "oci://ghcr.io/sergelogvinov/charts"
version = "0.2.5"
namespace = "kube-system"
kube_version = var.talos.kubernetes
values = [<<-EOF
config:
clusters:
- url: ${var.proxmox.url}
insecure: ${var.proxmox.insecure}
token_id: "${proxmox_virtual_environment_user_token.csi.id}"
token_secret: "${proxmox_virtual_environment_user_token.csi.value}"
region: "${var.proxmox.name}"
storageClass:
- name: default
storage: local-storage
reclaimPolicy: Delete
fstype: xfs
EOF
]
}
data "helm_template" "ccm" {
provider = helm.template
name = "proxmox-cloud-controller-manager"
chart = "proxmox-cloud-controller-manager"
repository = "oci://ghcr.io/sergelogvinov/charts"
version = "0.2.3"
namespace = "kube-system"
kube_version = var.talos.kubernetes
values = [<<-EOF
config:
clusters:
- url: ${var.proxmox.url}
insecure: ${var.proxmox.insecure}
token_id: "${proxmox_virtual_environment_user_token.ccm.id}"
token_secret: "${proxmox_virtual_environment_user_token.ccm.value}"
region: "${var.proxmox.name}"
EOF
]
}
resource "talos_machine_configuration_apply" "controlplane" {
for_each = { for k, v in local.nodes_with_address : k => v if v.type == "controlplane" }
@ -232,117 +308,28 @@ resource "talos_machine_configuration_apply" "controlplane" {
apply_mode = "reboot"
config_patches = [
yamlencode({
cluster : {
controlPlane : {
endpoint : local.cluster_endpoint
localAPIServerPort : var.cluster.api.port
}
etcd : {
#advertisedSubnets : [
# local.tailscaleSubnet
#]
}
network : {
cni : {
name : "none"
}
podSubnets : local.podSubnets
serviceSubnets : local.serviceSubnets
}
proxy : {
disabled : true
}
allowSchedulingOnControlPlanes : var.allow_scheduling_on_control_planes,
apiServer : {
extraArgs : {
"oidc-issuer-url" : "https://id.tjo.space/application/o/k8stjocloud/",
"oidc-client-id" : "HAI6rW0EWtgmSPGKAJ3XXzubQTUut2GMeTRS2spg",
"oidc-username-claim" : "sub",
"oidc-username-prefix" : "oidc:",
"oidc-groups-claim" : "groups",
"oidc-groups-prefix" : "oidc:groups:",
}
}
inlineManifests : [
{
name : "oidc-groups"
contents : <<-EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: id-tjo-space:admins
subjects:
- kind: Group
name: oidc:groups:k8s.tjo.cloud admin
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
EOF
},
{
name : "cilium"
contents : data.helm_template.cilium.manifest
},
{
name : "proxmox-cloud-controller-manager"
contents : <<-EOF
apiVersion: v1
kind: Secret
metadata:
name: proxmox-cloud-controller-manager
namespace: kube-system
data:
config.yaml: ${base64encode(yamlencode(local.proxmox-cloud-controller-manager))}
EOF
}
]
externalCloudProvider : {
enabled : true
manifests : [
"https://raw.githubusercontent.com/sergelogvinov/proxmox-csi-plugin/v0.7.0/docs/deploy/proxmox-csi-plugin-talos.yml",
"https://raw.githubusercontent.com/sergelogvinov/proxmox-cloud-controller-manager/v0.4.2/docs/deploy/cloud-controller-manager-talos.yml"
]
}
extraManifests : [
#"https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/v0.8.5/deploy/standalone-install.yaml",
"https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.7.1/components.yaml",
]
}
yamlencode(merge(local.talos_worker_config, local.talos_controlplane_config, {
machine = {
kubelet = {
nodeIP : {
validSubnets : local.tailscaleSubnets
}
extraArgs : {
rotate-server-certificates : true
cloud-provider : "external"
}
}
network = {
hostname = each.value.name
}
install = {
image = "factory.talos.dev/installer/7d4c31cbd96db9f90c874990697c523482b2bae27fb4631d5583dcd9c281b1ff:${var.versions.talos}"
disk = "/dev/vda"
}
nodeLabels = {
"k8s.tjo.cloud/public" = each.value.public ? "true" : "false"
"k8s.tjo.cloud/host" = each.value.host
"k8s.tjo.cloud/proxmox" = var.proxmox.name
}
}
}),
yamlencode({
apiVersion : "v1alpha1"
kind : "ExtensionServiceConfig"
name : "tailscale"
environment : [
"TS_AUTHKEY=${var.tailscale_authkey}",
"TS_HOSTNAME=${replace(each.value.name, ".", "-")}",
]
})),
yamlencode(
{
apiVersion : "v1alpha1"
kind : "ExtensionServiceConfig"
name : "tailscale"
environment : [
"TS_AUTHKEY=${var.tailscale_authkey}",
"TS_HOSTNAME=${each.value.name}",
]
})
]
}
@ -359,55 +346,27 @@ resource "talos_machine_configuration_apply" "worker" {
apply_mode = "reboot"
config_patches = [
yamlencode({
cluster : {
controlPlane : {
endpoint : local.cluster_endpoint
localAPIServerPort : var.cluster.api.port
}
network : {
cni : {
name : "none"
}
podSubnets : local.podSubnets
serviceSubnets : local.serviceSubnets
}
proxy : {
disabled : true
}
}
yamlencode(merge(local.talos_worker_config, {
machine = {
kubelet = {
nodeIP : {
validSubnets : local.tailscaleSubnets
}
extraArgs : {
rotate-server-certificates : true
cloud-provider : "external"
}
}
network = {
hostname = each.value.name
}
install = {
image = "factory.talos.dev/installer/7d4c31cbd96db9f90c874990697c523482b2bae27fb4631d5583dcd9c281b1ff:${var.versions.talos}"
disk = "/dev/vda"
}
nodeLabels = {
"k8s.tjo.cloud/public" = each.value.public ? "true" : "false"
"k8s.tjo.cloud/host" = each.value.host
"k8s.tjo.cloud/proxmox" = var.proxmox.name
}
}
}),
yamlencode({
apiVersion : "v1alpha1"
kind : "ExtensionServiceConfig"
name : "tailscale"
environment : [
"TS_AUTHKEY=${var.tailscale_authkey}",
"TS_HOSTNAME=${replace(each.value.name, ".", "-")}",
]
})),
yamlencode(
{
apiVersion : "v1alpha1"
kind : "ExtensionServiceConfig"
name : "tailscale"
environment : [
"TS_AUTHKEY=${var.tailscale_authkey}",
"TS_HOSTNAME=${each.value.name}",
]
})
]
}

162
modules/cluster/proxmox.tf Normal file
View file

@ -0,0 +1,162 @@
locals {
nodes = { for k, v in var.nodes : k => merge(v, { name = replace("${k}.${v.type}.${var.cluster.domain}", ".", "-") }) }
first_controlplane_node = values({ for k, v in local.nodes_with_address : k => v if v.type == "controlplane" })[0]
ipv4_addresses = {
for key, node in local.nodes : key => {
for k, v in proxmox_virtual_environment_vm.nodes[key].ipv4_addresses :
proxmox_virtual_environment_vm.nodes[key].network_interface_names[k] => v
}
}
ipv6_addresses = {
for key, node in local.nodes : key => {
for k, v in proxmox_virtual_environment_vm.nodes[key].ipv6_addresses :
proxmox_virtual_environment_vm.nodes[key].network_interface_names[k] => v
}
}
nodes_with_address = {
for k, v in local.nodes :
k => merge(v, {
ipv4 = try(local.ipv4_addresses[k]["eth0"][0], null)
ipv6 = try(local.ipv6_addresses[k]["eth0"][0], null)
})
}
}
resource "proxmox_virtual_environment_download_file" "talos" {
content_type = "iso"
datastore_id = var.proxmox.iso_storage_id
node_name = values(var.nodes)[0].host
file_name = "talos-${var.talos.version}-amd64.iso"
url = "https://factory.talos.dev/image/${var.talos.schematic_id}/${var.talos.version}/nocloud-amd64.iso"
}
resource "proxmox_virtual_environment_file" "controlplane" {
for_each = { for k, v in local.nodes_with_address : k => v if v.type == "controlplane" }
node_name = each.value.host
content_type = "snippets"
datastore_id = each.value.boot_pool
source_raw {
data = <<-EOF
hostname: ${each.value.name}
instance-id: 1000
instance-type: ${each.value.cpu}VCPU-${floor(each.value.memory / 1024)}GB
provider-id: "proxmox://${var.proxmox.name}/1000"
region: ${var.proxmox.name}
zone: ${each.value.host}
EOF
file_name = "${each.value.name}.metadata.yaml"
}
}
resource "macaddress" "private" {
for_each = local.nodes
}
resource "macaddress" "public" {
for_each = local.nodes
}
resource "proxmox_virtual_environment_vm" "nodes" {
for_each = local.nodes
name = each.value.name
node_name = each.value.host
description = "Node ${each.value.name} for cluster ${var.cluster.name}."
tags = concat(
["kubernetes", "terraform"],
each.value.public ? ["public"] : ["private"],
[each.value.type]
)
stop_on_destroy = true
timeout_stop_vm = 60
timeout_shutdown_vm = 60
cpu {
cores = each.value.cores
type = "host"
}
memory {
dedicated = each.value.memory
}
operating_system {
type = "l26"
}
agent {
enabled = true
timeout = "1m"
}
network_device {
bridge = each.value.public ? "vmpublic0" : "vmprivate0"
mac_address = macaddress.private[each.key].address
}
disk {
file_format = "raw"
interface = "scsi0"
datastore_id = each.value.boot_pool
file_id = proxmox_virtual_environment_download_file.talos.id
backup = false
}
disk {
file_format = "raw"
interface = "virtio0"
datastore_id = each.value.boot_pool
size = each.value.boot_size
backup = true
}
initialization {
meta_data_file_id = proxmox_virtual_environment_file.controlplane[each.key].id
}
}
resource "proxmox_virtual_environment_role" "csi" {
role_id = "kubernetes-csi"
privileges = [
"VM.Audit",
"VM.Config.Disk",
"Datastore.Allocate",
"Datastore.AllocateSpace",
"Datastore.Audit",
]
}
resource "proxmox_virtual_environment_user" "csi" {
comment = "Managed by Terraform"
user_id = "kubernetes-csi@pve"
enabled = true
}
resource "proxmox_virtual_environment_user_token" "csi" {
comment = "Managed by Terraform"
token_name = "terraform"
user_id = proxmox_virtual_environment_user.csi.user_id
}
resource "proxmox_virtual_environment_role" "ccm" {
role_id = "kubernetes-ccm"
privileges = [
"VM.Audit",
]
}
resource "proxmox_virtual_environment_user" "ccm" {
comment = "Managed by Terraform"
user_id = "kubernetes-ccm@pve"
enabled = true
}
resource "proxmox_virtual_environment_user_token" "ccm" {
comment = "Managed by Terraform"
token_name = "terraform"
user_id = proxmox_virtual_environment_user.ccm.user_id
}

View file

@ -5,24 +5,29 @@ variable "nodes" {
host = string
cores = optional(number, 4)
memory = optional(string, 4096)
memory = optional(number, 4096)
boot_pool = string
boot_size = optional(string, "32G")
boot_size = optional(number, 32)
}))
}
variable "versions" {
variable "talos" {
type = object({
talos = optional(string, "v1.7.5")
version = optional(string, "v1.7.5")
kubernetes = optional(string, "v1.30.0")
# Default is:
# customization:
# systemExtensions:
# officialExtensions:
# - siderolabs/kata-containers
# - siderolabs/qemu-guest-agent
# - siderolabs/tailscale
schematic_id = optional(string, "a3f29a65dfd32b73c76f14eef96ef7588cf08c7d737d24fae9b8216d1ffa5c3d")
})
}
variable "iso" {
type = string
description = "Downloaded from factory.talos.dev, select quemu agent and tailscale extensions."
}
variable "allow_scheduling_on_control_planes" {
default = false
@ -52,17 +57,10 @@ variable "tailscale_authkey" {
variable "proxmox" {
type = object({
name = string
url = string
insecure = optional(bool, false)
csi = object({
username = string
token = string
})
ccm = object({
username = string
token = string
})
name = string
url = string
insecure = optional(bool, false)
iso_storage_id = string
})
sensitive = true
}

View file

@ -1,8 +1,8 @@
terraform {
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.1-rc3"
source = "bpg/proxmox"
version = "0.61.1"
}
talos = {
source = "siderolabs/talos"