diff --git a/terraform/.terraform.lock.hcl b/clusters/pink/.terraform.lock.hcl similarity index 100% rename from terraform/.terraform.lock.hcl rename to clusters/pink/.terraform.lock.hcl diff --git a/clusters/pink/kubeconfig b/clusters/pink/kubeconfig new file mode 100755 index 0000000..c7d6cca --- /dev/null +++ b/clusters/pink/kubeconfig @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Config +clusters: +- name: pink + cluster: + server: https://api.pink.k8s.tjo.cloud:6443 + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpakNDQVRDZ0F3SUJBZ0lSQVBPU2NEaVZVUGNmRkI1OXl3cE1rQmt3Q2dZSUtvWkl6ajBFQXdJd0ZURVQKTUJFR0ExVUVDaE1LYTNWaVpYSnVaWFJsY3pBZUZ3MHlOREEzTVRReE1ERXhORGxhRncwek5EQTNNVEl4TURFeApORGxhTUJVeEV6QVJCZ05WQkFvVENtdDFZbVZ5Ym1WMFpYTXdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CCkJ3TkNBQVFpa1Y0cVVyVFY5WW1VUzRQZGp5UWhnYkgxaHNNWTlVK0RVVFdWU1pqaGFsRkZuWjBLYTFkMUtGTkoKSFRSa3psdW9va3kxK3JZRkhLejM4SFpqczVIaG8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXdIUVlEVlIwbApCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPCkJCWUVGS1VSWnVWTUlHcHhqRFNKdC80OVBTSDZUSzBmTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDSUFKZXVSdEMKYkR3VGhLSDBYV0hid3YveDV6clJhU2ZhRi9sSnVqcGY3bTMwQWlFQWxyTkx3L056dXFRQmorcnozbXVBdW5ZSAp5NzNNVm9zakRQOTJ2RWo5clBZPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +contexts: +- name: oidc@pink + context: + cluster: pink + namespace: default + user: oidc +current-context: oidc@pink +users: +- name: oidc + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + command: kubectl + args: + - oidc-login + - get-token + - --oidc-issuer-url=https://id.tjo.space/application/o/k8stjocloud/ + - --oidc-client-id=HAI6rW0EWtgmSPGKAJ3XXzubQTUut2GMeTRS2spg + - --oidc-extra-scope=profile diff --git a/clusters/pink/kubernetes.tf b/clusters/pink/kubernetes.tf new file mode 100644 index 0000000..cfe9df3 --- /dev/null +++ b/clusters/pink/kubernetes.tf @@ -0,0 +1,160 @@ +module "cluster" { + source = "../../modules/cluster" + + providers = { + helm.template = helm.template + } + + nodes = { + one = { + public = true + type = "controlplane" + host = "hetzner" + boot_pool = "hetzner-main-data" + }, + two = { + public = true + type = "controlplane" + host = "hetzner" + boot_pool = "hetzner-main-data" + }, + three = { + public = true + type = "controlplane" + host = "hetzner" + boot_pool = "hetzner-main-data" + }, + four = { + public = false + type = "worker" + host = "hetzner" + boot_pool = "hetzner-main-data" + } + five = { + public = false + type = "worker" + host = "hetzner" + boot_pool = "hetzner-main-data" + } + } + + versions = { + talos = "v1.7.5" + kubernetes = "v1.30.0" + } + + iso = "proxmox-backup-tjo-cloud:iso/talos-v1.7.5-tailscale-metal-amd64.iso" + + cluster = { + name = "pink" + domain = "k8s.tjo.cloud" + oidc = { + client_id = var.oidc_client_id + issuer_url = var.oidc_issuer_url + } + } + + tailscale_authkey = var.tailscale_authkey +} + +resource "local_file" "kubeconfig" { + content = module.cluster.kubeconfig + filename = "${path.module}/kubeconfig" +} + +resource "kubernetes_manifest" "hetzner-nodes-as-loadbalancers" { + manifest = { + apiVersion = "cilium.io/v2alpha1" + kind = "CiliumLoadBalancerIPPool" + metadata = { + name = "hetzner-nodes" + } + spec = { + blocks = concat( + [for k, node in module.cluster.nodes : { start : node.address_ipv4 } if node.public], + # [for k, node in module.cluster.nodes : { start : node.address_ipv6 } if node.public], + ) + } + } +} + +# TODO: Certmanager, externaldns... +resource "helm_release" "cert-manager" { + name = "cert-manager" + chart = "cert-manager" + repository = "https://charts.jetstack.io" + version = "v1.15.1" + namespace = "kube-system" + + set { + name = "crds.enabled" + value = true + } +} + +resource "kubernetes_manifest" "gateway" { + manifest = { + apiVersion = "gateway.networking.k8s.io/v1" + kind = "Gateway" + metadata = { + name = "gateway" + namespace = "kube-system" + } + spec = { + gatewayClassName = "cilium" + listeners = [ + { name : "http", protocol : "HTTP", port : 80 }, + { name : "https", protocol : "HTTPS", port : 443 }, + ] + } + } +} + +resource "helm_release" "dashboard" { + name = "kubernetes-dashboard" + repository = "https://kubernetes.github.io/dashboard" + chart = "kubernetes-dashboard" + version = "7.5.0" + namespace = "kube-system" +} + +resource "kubernetes_manifest" "dashoard-http-route" { + depends_on = [ + kubernetes_manifest.gateway, + ] + + manifest = { + apiVersion = "gateway.networking.k8s.io/v1" + kind = "HTTPRoute" + metadata = { + name = "dashboard" + namespace = "kube-system" + } + spec = { + parentRefs = [ + { name : "gateway" } + ] + hostnames = [ + "dashboard.${module.cluster.name}.${module.cluster.domain}" + ] + rules = [ + { + matches = [ + { + path : { + value : "/" + type : "PathPrefix" + } + } + ] + backendRefs = [ + { + name : "kubernetes-dashboard-kong-proxy" + port : 443 + } + ] + } + ] + } + } +} diff --git a/terraform/providers.tf b/clusters/pink/providers.tf similarity index 83% rename from terraform/providers.tf rename to clusters/pink/providers.tf index 38f03a2..d9d32b6 100644 --- a/terraform/providers.tf +++ b/clusters/pink/providers.tf @@ -50,8 +50,8 @@ provider "helm" { provider "helm" { kubernetes { - host = local.cluster_endpoint - cluster_ca_certificate = base64decode(data.talos_cluster_kubeconfig.this.kubernetes_client_configuration.ca_certificate) + host = module.cluster.api.endpoint + cluster_ca_certificate = base64decode(module.cluster.api.ca) exec { api_version = "client.authentication.k8s.io/v1beta1" command = "kubectl" @@ -70,8 +70,8 @@ provider "helm" { } provider "kubernetes" { - host = local.cluster_endpoint - cluster_ca_certificate = base64decode(data.talos_cluster_kubeconfig.this.kubernetes_client_configuration.ca_certificate) + host = module.cluster.api.endpoint + cluster_ca_certificate = base64decode(module.cluster.api.ca) exec { api_version = "client.authentication.k8s.io/v1beta1" command = "kubectl" diff --git a/clusters/pink/variables.tf b/clusters/pink/variables.tf new file mode 100644 index 0000000..9f71d9e --- /dev/null +++ b/clusters/pink/variables.tf @@ -0,0 +1,21 @@ +variable "tailscale_authkey" { + type = string + sensitive = true +} + +variable "oidc_username" { + type = string +} + +variable "oidc_password" { + type = string + sensitive = true +} + +variable "oidc_client_id" { + type = string +} + +variable "oidc_issuer_url" { + type = string +} diff --git a/terraform/manifests/gateway-api-crds.yaml b/modules/cluster/gateway-api-crds.yaml similarity index 100% rename from terraform/manifests/gateway-api-crds.yaml rename to modules/cluster/gateway-api-crds.yaml diff --git a/terraform/templates/kubeconfig.tftpl b/modules/cluster/kubeconfig.tftpl similarity index 100% rename from terraform/templates/kubeconfig.tftpl rename to modules/cluster/kubeconfig.tftpl diff --git a/terraform/kubernetes.tf b/modules/cluster/main.tf similarity index 60% rename from terraform/kubernetes.tf rename to modules/cluster/main.tf index 8d73775..6668f40 100644 --- a/terraform/kubernetes.tf +++ b/modules/cluster/main.tf @@ -1,18 +1,14 @@ locals { - # Downloaded from factory.talos.dev - # https://factory.talos.dev/?arch=amd64&board=undefined&cmdline-set=true&extensions=-&extensions=siderolabs%2Fqemu-guest-agent&extensions=siderolabs%2Ftailscale&platform=metal&secureboot=undefined&target=metal&version=1.7.0 - iso = "proxmox-backup-tjo-cloud:iso/talos-${var.talos_version}-tailscale-metal-amd64.iso" + cluster_domain = "${var.cluster.name}.${var.cluster.domain}" + cluster_api_domain = "${var.cluster.api.subdomain}.${local.cluster_domain}" - boot_pool = "hetzner-main-data" + cluster_endpoint = "https://${local.cluster_api_domain}:${var.cluster.api.port}" - cluster_domain = "api.${var.cluster_name}.${var.domain}" - cluster_port = 6443 - cluster_endpoint = "https://${local.cluster_domain}:${local.cluster_port}" + nodes = { for k, v in var.nodes : k => merge(v, { name = "${k}.node.${local.cluster_domain}" }) } - nodes = { for k, v in var.nodes : k => merge(v, { name = "${k}.node.${var.cluster_name}.${var.domain}" }) } - nodes_with_address = { for k, v in local.nodes : k => merge(v, { address_ipv4 = proxmox_vm_qemu.this[k].default_ipv4_address, address_ipv6 = proxmox_vm_qemu.this[k].default_ipv6_address }) } - first_controlplane_node = values({ for k, v in local.nodes_with_address : k => v if v.type == "controlplane" })[0] - nodes_public_controlplane = { for k, v in proxmox_vm_qemu.this : k => v if var.nodes[k].public && var.nodes[k].type == "controlplane" } + nodes_with_address = { for k, v in local.nodes : k => merge(v, { address_ipv4 = proxmox_vm_qemu.this[k].default_ipv4_address, address_ipv6 = proxmox_vm_qemu.this[k].default_ipv6_address }) } + + first_controlplane_node = values({ for k, v in local.nodes_with_address : k => v if v.type == "controlplane" })[0] podSubnets = [ "10.200.0.0/16", @@ -29,8 +25,6 @@ locals { "100.64.0.0/10", "fd7a:115c:a1e0::/96" ] - - apply_mode = "reboot" } resource "macaddress" "this" { @@ -47,8 +41,8 @@ resource "proxmox_vm_qemu" "this" { each.value.public ? ["public"] : ["private"], )) - cores = 4 - memory = 4096 + cores = each.value.cores + memory = each.value.memory scsihw = "virtio-scsi-pci" qemu_os = "l26" @@ -65,15 +59,15 @@ resource "proxmox_vm_qemu" "this" { scsi { scsi0 { cdrom { - iso = local.iso + iso = var.iso } } } virtio { virtio0 { disk { - size = "32G" - storage = local.boot_pool + size = each.value.boot_size + storage = each.value.boot_pool } } } @@ -83,9 +77,9 @@ resource "proxmox_vm_qemu" "this" { resource "digitalocean_record" "controlplane-A" { for_each = { for k, v in proxmox_vm_qemu.this : k => v if var.nodes[k].public && var.nodes[k].type == "controlplane" } - domain = var.domain + domain = var.cluster.domain type = "A" - name = "api.${var.cluster_name}" + name = "${var.cluster.api.subdomain}.${var.cluster.name}" value = each.value.default_ipv4_address ttl = 30 } @@ -93,9 +87,9 @@ resource "digitalocean_record" "controlplane-A" { resource "digitalocean_record" "controlplane-AAAA" { for_each = { for k, v in proxmox_vm_qemu.this : k => v if var.nodes[k].public && var.nodes[k].type == "controlplane" } - domain = var.domain + domain = var.cluster.domain type = "AAAA" - name = "api.${var.cluster_name}" + name = "${var.cluster.api.subdomain}.${var.cluster.name}" value = each.value.default_ipv6_address ttl = 30 } @@ -103,13 +97,13 @@ resource "digitalocean_record" "controlplane-AAAA" { resource "talos_machine_secrets" "this" {} data "talos_machine_configuration" "controlplane" { - cluster_name = var.cluster_name + cluster_name = var.cluster.name machine_type = "controlplane" cluster_endpoint = local.cluster_endpoint machine_secrets = talos_machine_secrets.this.machine_secrets - talos_version = var.talos_version - kubernetes_version = var.kubernetes_version + talos_version = var.versions.talos + kubernetes_version = var.versions.kubernetes depends_on = [ digitalocean_record.controlplane-A, @@ -118,13 +112,13 @@ data "talos_machine_configuration" "controlplane" { } data "talos_machine_configuration" "worker" { - cluster_name = var.cluster_name + cluster_name = var.cluster.name machine_type = "worker" cluster_endpoint = local.cluster_endpoint machine_secrets = talos_machine_secrets.this.machine_secrets - talos_version = var.talos_version - kubernetes_version = var.kubernetes_version + talos_version = var.versions.talos + kubernetes_version = var.versions.kubernetes depends_on = [ digitalocean_record.controlplane-A, @@ -153,7 +147,7 @@ data "helm_template" "cilium" { version = "1.15.6" namespace = "kube-system" - kube_version = var.kubernetes_version + kube_version = var.versions.kubernetes api_versions = [ "gateway.networking.k8s.io/v1/GatewayClass", ] @@ -194,8 +188,8 @@ data "helm_template" "cilium" { }, hostRoot : "/sys/fs/cgroup" }, - k8sServiceHost : local.cluster_domain - k8sServicePort : local.cluster_port + k8sServiceHost : local.cluster_api_domain + k8sServicePort : var.cluster.api.port ipv4 : { enabled : true }, @@ -222,11 +216,6 @@ data "helm_template" "cilium" { enabled : true hostNetwork : { enabled : true - nodes : { - matchLabels : { - "k8s.tjo.cloud/gateway" : "true" - } - } } } envoy : { @@ -254,14 +243,14 @@ resource "talos_machine_configuration_apply" "controlplane" { node = each.value.name endpoint = each.value.address_ipv4 - apply_mode = local.apply_mode + apply_mode = "reboot" config_patches = [ yamlencode({ cluster : { controlPlane : { endpoint : local.cluster_endpoint - localAPIServerPort : local.cluster_port + localAPIServerPort : var.cluster.api.port } etcd : { #advertisedSubnets : [ @@ -309,7 +298,7 @@ resource "talos_machine_configuration_apply" "controlplane" { }, { name : "gateway-api-crds" - contents : file("${path.module}/manifests/gateway-api-crds.yaml") + contents : file("${path.module}/gateway-api-crds.yaml") }, { name : "cilium" @@ -327,11 +316,12 @@ resource "talos_machine_configuration_apply" "controlplane" { hostname = each.value.name } install = { - image = "factory.talos.dev/installer/7d4c31cbd96db9f90c874990697c523482b2bae27fb4631d5583dcd9c281b1ff:${var.talos_version}" + image = "factory.talos.dev/installer/7d4c31cbd96db9f90c874990697c523482b2bae27fb4631d5583dcd9c281b1ff:${var.versions.talos}" disk = data.talos_machine_disks.boot[each.key].disks[0].name } nodeLabels = { - "k8s.tjo.cloud/gateway" = "true" + "k8s.tjo.cloud/public" = each.value.public ? "true" : "false" + "k8s.tjo.cloud/host" = each.value.host } } }), @@ -355,14 +345,14 @@ resource "talos_machine_configuration_apply" "worker" { node = each.value.name endpoint = each.value.address_ipv4 - apply_mode = local.apply_mode + apply_mode = "reboot" config_patches = [ yamlencode({ cluster : { controlPlane : { endpoint : local.cluster_endpoint - localAPIServerPort : local.cluster_port + localAPIServerPort : var.cluster.api.port } network : { cni : { @@ -374,17 +364,6 @@ resource "talos_machine_configuration_apply" "worker" { proxy : { disabled : true } - allowSchedulingOnControlPlanes : true - apiServer : { - extraArgs : { - "oidc-issuer-url" : var.oidc_issuer_url - "oidc-client-id" : var.oidc_client_id - "oidc-username-claim" : "sub" - "oidc-username-prefix" : "oidc:" - "oidc-groups-claim" : "groups" - "oidc-groups-prefix" : "oidc:groups:" - } - } } machine = { kubelet = { @@ -396,9 +375,13 @@ resource "talos_machine_configuration_apply" "worker" { hostname = each.value.name } install = { - image = "factory.talos.dev/installer/7d4c31cbd96db9f90c874990697c523482b2bae27fb4631d5583dcd9c281b1ff:${var.talos_version}" + image = "factory.talos.dev/installer/7d4c31cbd96db9f90c874990697c523482b2bae27fb4631d5583dcd9c281b1ff:${var.versions.talos}" disk = data.talos_machine_disks.boot[each.key].disks[0].name } + nodeLabels = { + "k8s.tjo.cloud/public" = each.value.public ? "true" : "false" + "k8s.tjo.cloud/host" = each.value.host + } } }), yamlencode({ @@ -429,135 +412,5 @@ data "talos_cluster_kubeconfig" "this" { ] client_configuration = talos_machine_secrets.this.client_configuration - node = values(local.nodes_public_controlplane)[0].default_ipv4_address -} - -resource "local_file" "kubeconfig" { - content = templatefile("${path.module}/templates/kubeconfig.tftpl", { - cluster : { - name : var.cluster_name, - endpoint : local.cluster_endpoint, - ca : data.talos_cluster_kubeconfig.this.kubernetes_client_configuration.ca_certificate, - } - oidc : { - issuer : var.oidc_issuer_url, - id : var.oidc_client_id, - } - }) - filename = "${path.module}/kubeconfig" -} - -resource "kubernetes_manifest" "hetzner-nodes-as-loadbalancers" { - depends_on = [ - talos_machine_bootstrap.this - ] - - manifest = { - apiVersion = "cilium.io/v2alpha1" - kind = "CiliumLoadBalancerIPPool" - metadata = { - name = "hetzner-nodes" - } - spec = { - blocks = concat( - [for k, v in proxmox_vm_qemu.this : { start : v.default_ipv4_address } if var.nodes[k].public], - #[for k, v in proxmox_vm_qemu.this : { start : v.default_ipv6_address } if var.nodes[k].public], - ) - } - } -} - -# TODO: Certmanager, externaldns... -resource "helm_release" "cert-manager" { - depends_on = [ - talos_machine_bootstrap.this - ] - - name = "cert-manager" - chart = "cert-manager" - repository = "https://charts.jetstack.io" - version = "v1.15.1" - namespace = "kube-system" - - set { - name = "crds.enabled" - value = true - } -} - -resource "kubernetes_manifest" "gateway" { - depends_on = [ - talos_machine_bootstrap.this - ] - - manifest = { - apiVersion = "gateway.networking.k8s.io/v1" - kind = "Gateway" - metadata = { - name = "gateway" - namespace = "kube-system" - } - spec = { - gatewayClassName = "cilium" - listeners = [ - { name : "http", protocol : "HTTP", port : 80 }, - { name : "https", protocol : "HTTPS", port : 443 }, - ] - } - } -} - -resource "helm_release" "dashboard" { - depends_on = [ - talos_machine_bootstrap.this - ] - - name = "kubernetes-dashboard" - repository = "https://kubernetes.github.io/dashboard" - chart = "kubernetes-dashboard" - version = "7.5.0" - namespace = "kube-system" -} - -resource "kubernetes_manifest" "dashoard-http-route" { - depends_on = [ - talos_machine_bootstrap.this, - kubernetes_manifest.gateway, - ] - - manifest = { - apiVersion = "gateway.networking.k8s.io/v1" - kind = "HTTPRoute" - metadata = { - name = "dashboard" - namespace = "kube-system" - } - spec = { - parentRefs = [ - { name : "gateway" } - ] - hostnames = [ - "dashboard.${var.cluster_name}.${var.domain}" - ] - rules = [ - { - matches = [ - { - path : { - value : "/" - type : "PathPrefix" - } - } - ] - backendRefs = [ - { - name : "kubernetes-dashboard-kong-proxy" - port : 443 - } - ] - } - ] - - } - } + node = local.first_controlplane_node.address_ipv4 } diff --git a/modules/cluster/outputs.tf b/modules/cluster/outputs.tf new file mode 100644 index 0000000..228ffbe --- /dev/null +++ b/modules/cluster/outputs.tf @@ -0,0 +1,32 @@ +output "kubeconfig" { + value = templatefile("${path.module}/kubeconfig.tftpl", { + cluster : { + name : var.cluster.name, + endpoint : local.cluster_endpoint, + ca : data.talos_cluster_kubeconfig.this.kubernetes_client_configuration.ca_certificate, + } + oidc : { + issuer : var.cluster.oidc.issuer_url, + id : var.cluster.oidc.client_id, + } + }) +} + +output "name" { + value = var.cluster.name +} + +output "domain" { + value = var.cluster.domain +} + +output "api" { + value = merge(var.cluster.api, { + endpoint : local.cluster_endpoint, + ca : data.talos_cluster_kubeconfig.this.kubernetes_client_configuration.ca_certificate, + }) +} + +output "nodes" { + value = local.nodes_with_address +} diff --git a/modules/cluster/variables.tf b/modules/cluster/variables.tf new file mode 100644 index 0000000..80bd562 --- /dev/null +++ b/modules/cluster/variables.tf @@ -0,0 +1,45 @@ +variable "nodes" { + type = map(object({ + public = bool + type = string + host = string + + cores = optional(number, 4) + memory = optional(string, 4096) + + boot_pool = string + boot_size = optional(string, "32G") + })) +} + +variable "versions" { + type = object({ + talos = optional(string, "v1.7.5") + kubernetes = optional(string, "v1.30.0") + }) +} + +variable "iso" { + type = string + description = "Downloaded from factory.talos.dev, select quemu agent and tailscale extensions." +} + +variable "cluster" { + type = object({ + name = string + domain = string + api = optional(object({ + subdomain = optional(string, "api") + port = optional(number, 6443) + }), {}) + oidc = object({ + client_id = string + issuer_url = string + }) + }) +} + +variable "tailscale_authkey" { + type = string + sensitive = true +} diff --git a/modules/cluster/versions.tf b/modules/cluster/versions.tf new file mode 100644 index 0000000..70f2b9e --- /dev/null +++ b/modules/cluster/versions.tf @@ -0,0 +1,39 @@ +terraform { + required_providers { + proxmox = { + source = "Telmate/proxmox" + version = "3.0.1-rc3" + } + talos = { + source = "siderolabs/talos" + version = "0.5.0" + } + local = { + source = "hashicorp/local" + version = "1.4.0" + } + digitalocean = { + source = "digitalocean/digitalocean" + version = "~> 2.0" + } + random = { + source = "hashicorp/random" + version = "3.6.2" + } + macaddress = { + source = "ivoronin/macaddress" + version = "0.3.2" + } + helm = { + source = "hashicorp/helm" + version = "2.14.0" + configuration_aliases = [ + helm.template + ] + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "2.31.0" + } + } +} diff --git a/terraform/.gitignore b/terraform/.gitignore deleted file mode 100644 index 482313f..0000000 --- a/terraform/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -kubeconfig -!templates/kubeconfig -talosconfig diff --git a/terraform/variables.tf b/terraform/variables.tf deleted file mode 100644 index 509d48b..0000000 --- a/terraform/variables.tf +++ /dev/null @@ -1,47 +0,0 @@ -variable "nodes" { - type = map(object({ - public = bool - type = string - host = string - })) -} - -variable "talos_version" { - type = string - default = "v1.7.5" -} - -variable "kubernetes_version" { - type = string - default = "v1.30.0" -} - -variable "cluster_name" { - type = string -} - -variable "domain" { - type = string -} - -variable "tailscale_authkey" { - type = string - sensitive = true -} - -variable "oidc_username" { - type = string -} - -variable "oidc_password" { - type = string - sensitive = true -} - -variable "oidc_client_id" { - type = string -} - -variable "oidc_issuer_url" { - type = string -}