feat: working bgp
Some checks are pending
/ lint (push) Waiting to run

This commit is contained in:
Tine 2024-12-15 18:35:26 +01:00
parent 311b82d212
commit bca4a4aa6e
Signed by: mentos1386
SSH key fingerprint: SHA256:MNtTsLbihYaWF8j1fkOHfkKNlnN1JQfxEU/rBU8nCGw
6 changed files with 196 additions and 79 deletions

View file

@ -20,6 +20,14 @@ module "cluster" {
client_id = var.oidc_client_id client_id = var.oidc_client_id
issuer_url = var.oidc_issuer_url issuer_url = var.oidc_issuer_url
} }
pod_cidr = {
ipv4 = "10.0.240.0/21"
ipv6 = "fd74:6a6f:0:f000::/53"
}
service_cidr = {
ipv4 = "10.0.248.0/22"
ipv6 = "fd74:6a6f:0:f800::/108"
}
} }
proxmox = { proxmox = {
@ -28,8 +36,17 @@ module "cluster" {
common_storage = "synology.storage.tjo.cloud" common_storage = "synology.storage.tjo.cloud"
} }
hosts = {
nevaroo = {
asn = 65003
}
mustafar = {
asn = 65004
}
}
nodes = { nodes = {
nevaroo-cp = { nevaroo-1 = {
id = 6001 id = 6001
type = "controlplane" type = "controlplane"
host = "nevaroo" host = "nevaroo"
@ -37,39 +54,6 @@ module "cluster" {
cores = 4 cores = 4
memory = 4096 memory = 4096
} }
#mustafar-cp = {
# id = 6000
# type = "controlplane"
# host = "mustafar"
# storage = "local"
# cores = 2
# memory = 4096
#}
#jakku-cp = {
# id = 6000
# type = "controlplane"
# host = "jakku"
# storage = "local-nvme"
# cores = 2
# memory = 4096
#}
#batuu-cp = {
# id = 6000
# type = "controlplane"
# host = "batuu"
# storage = "local-nvme"
# cores = 2
# memory = 4096
#}
nevaro-w1 = {
id = 6002
type = "worker"
host = "nevaroo"
storage = "local-nvme-lvm"
cores = 8
memory = 24576
}
mustafar-1 = { mustafar-1 = {
id = 6000 id = 6000
type = "worker" type = "worker"

View file

@ -1,11 +1,11 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.2/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml # https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
annotations: annotations:
controller-gen.kubebuilder.io/version: v0.16.4 controller-gen.kubebuilder.io/version: v0.16.5
operator.prometheus.io/version: 0.78.2 operator.prometheus.io/version: 0.79.0
name: podmonitors.monitoring.coreos.com name: podmonitors.monitoring.coreos.com
spec: spec:
group: monitoring.coreos.com group: monitoring.coreos.com
@ -760,10 +760,16 @@ spec:
type: string type: string
port: port:
description: |- description: |-
Name of the Pod port which this endpoint refers to. The `Pod` port name which exposes the endpoint.
It takes precedence over `targetPort`. It takes precedence over the `portNumber` and `targetPort` fields.
type: string type: string
portNumber:
description: The `Pod` port number which exposes the endpoint.
format: int32
maximum: 65535
minimum: 1
type: integer
proxyUrl: proxyUrl:
description: |- description: |-
`proxyURL` configures the HTTP Proxy URL (e.g. `proxyURL` configures the HTTP Proxy URL (e.g.
@ -892,7 +898,7 @@ spec:
Name or number of the target port of the `Pod` object behind the Service, the Name or number of the target port of the `Pod` object behind the Service, the
port must be specified with container port property. port must be specified with container port property.
Deprecated: use 'port' instead. Deprecated: use 'port' or 'portNumber' instead.
x-kubernetes-int-or-string: true x-kubernetes-int-or-string: true
tlsConfig: tlsConfig:
description: TLS configuration to use when scraping the target. description: TLS configuration to use when scraping the target.
@ -1088,6 +1094,18 @@ spec:
Whether to scrape a classic histogram that is also exposed as a native histogram. Whether to scrape a classic histogram that is also exposed as a native histogram.
It requires Prometheus >= v2.45.0. It requires Prometheus >= v2.45.0.
type: boolean type: boolean
scrapeFallbackProtocol:
description: |-
The protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type.
It requires Prometheus >= v3.0.0.
enum:
- PrometheusProto
- OpenMetricsText0.0.1
- OpenMetricsText1.0.0
- PrometheusText0.0.4
- PrometheusText1.0.0
type: string
scrapeProtocols: scrapeProtocols:
description: |- description: |-
`scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the
@ -1104,11 +1122,13 @@ spec:
* `OpenMetricsText1.0.0` * `OpenMetricsText1.0.0`
* `PrometheusProto` * `PrometheusProto`
* `PrometheusText0.0.4` * `PrometheusText0.0.4`
* `PrometheusText1.0.0`
enum: enum:
- PrometheusProto - PrometheusProto
- OpenMetricsText0.0.1 - OpenMetricsText0.0.1
- OpenMetricsText1.0.0 - OpenMetricsText1.0.0
- PrometheusText0.0.4 - PrometheusText0.0.4
- PrometheusText1.0.0
type: string type: string
type: array type: array
x-kubernetes-list-type: set x-kubernetes-list-type: set
@ -1159,6 +1179,18 @@ spec:
type: object type: object
type: object type: object
x-kubernetes-map-type: atomic x-kubernetes-map-type: atomic
selectorMechanism:
description: |-
Mechanism used to select the endpoints to scrape.
By default, the selection process relies on relabel configurations to filter the discovered targets.
Alternatively, you can opt in for role selectors, which may offer better efficiency in large clusters.
Which strategy is best for your use case needs to be carefully evaluated.
It requires Prometheus >= v2.17.0.
enum:
- RelabelConfig
- RoleSelector
type: string
targetLimit: targetLimit:
description: |- description: |-
`targetLimit` defines a limit on the number of scraped targets that will `targetLimit` defines a limit on the number of scraped targets that will

View file

@ -1,11 +1,11 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.78.2/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml # https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.79.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
annotations: annotations:
controller-gen.kubebuilder.io/version: v0.16.4 controller-gen.kubebuilder.io/version: v0.16.5
operator.prometheus.io/version: 0.78.2 operator.prometheus.io/version: 0.79.0
name: servicemonitors.monitoring.coreos.com name: servicemonitors.monitoring.coreos.com
spec: spec:
group: monitoring.coreos.com group: monitoring.coreos.com
@ -1108,6 +1108,18 @@ spec:
Whether to scrape a classic histogram that is also exposed as a native histogram. Whether to scrape a classic histogram that is also exposed as a native histogram.
It requires Prometheus >= v2.45.0. It requires Prometheus >= v2.45.0.
type: boolean type: boolean
scrapeFallbackProtocol:
description: |-
The protocol to use if a scrape returns blank, unparseable, or otherwise invalid Content-Type.
It requires Prometheus >= v3.0.0.
enum:
- PrometheusProto
- OpenMetricsText0.0.1
- OpenMetricsText1.0.0
- PrometheusText0.0.4
- PrometheusText1.0.0
type: string
scrapeProtocols: scrapeProtocols:
description: |- description: |-
`scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the
@ -1124,11 +1136,13 @@ spec:
* `OpenMetricsText1.0.0` * `OpenMetricsText1.0.0`
* `PrometheusProto` * `PrometheusProto`
* `PrometheusText0.0.4` * `PrometheusText0.0.4`
* `PrometheusText1.0.0`
enum: enum:
- PrometheusProto - PrometheusProto
- OpenMetricsText0.0.1 - OpenMetricsText0.0.1
- OpenMetricsText1.0.0 - OpenMetricsText1.0.0
- PrometheusText0.0.4 - PrometheusText0.0.4
- PrometheusText1.0.0
type: string type: string
type: array type: array
x-kubernetes-list-type: set x-kubernetes-list-type: set
@ -1179,6 +1193,18 @@ spec:
type: object type: object
type: object type: object
x-kubernetes-map-type: atomic x-kubernetes-map-type: atomic
selectorMechanism:
description: |-
Mechanism used to select the endpoints to scrape.
By default, the selection process relies on relabel configurations to filter the discovered targets.
Alternatively, you can opt in for role selectors, which may offer better efficiency in large clusters.
Which strategy is best for your use case needs to be carefully evaluated.
It requires Prometheus >= v2.17.0.
enum:
- RelabelConfig
- RoleSelector
type: string
targetLabels: targetLabels:
description: |- description: |-
`targetLabels` defines the labels which are transferred from the `targetLabels` defines the labels which are transferred from the

View file

@ -29,12 +29,12 @@ data "helm_template" "cilium" {
enableIPv4Masquerade: true enableIPv4Masquerade: true
ipv4: ipv4:
enabled: true enabled: true
ipv4NativeRoutingCIDR: "10.0.0.0/16" ipv4NativeRoutingCIDR: "${var.cluster.pod_cidr.ipv4}"
enableIPv6Masquerade: true enableIPv6Masquerade: true
ipv6: ipv6:
enabled: false enabled: false
ipv6NativeRoutingCIDR: "fd74:6a6f:0::/48" ipv6NativeRoutingCIDR: "${var.cluster.pod_cidr.ipv6}"
kubeProxyReplacement: true kubeProxyReplacement: true

View file

@ -4,15 +4,6 @@ locals {
cluster_internal_endpoint = "https://${local.internal_domain}:${var.cluster.api.internal.port}" cluster_internal_endpoint = "https://${local.internal_domain}:${var.cluster.api.internal.port}"
cluster_public_endpoint = "https://${local.public_domain}:${var.cluster.api.public.port}" cluster_public_endpoint = "https://${local.public_domain}:${var.cluster.api.public.port}"
podSubnets = [
"10.0.240.0/21",
"fd74:6a6f:0:f000::/53",
]
serviceSubnets = [
"10.0.248.0/22",
"fd74:6a6f:0:f800::/108",
]
talos_controlplane_config = { talos_controlplane_config = {
machine = { machine = {
kubelet = { kubelet = {
@ -35,7 +26,7 @@ locals {
} }
} }
cluster = { cluster = {
allowSchedulingOnControlPlanes = var.allow_scheduling_on_control_planes, allowSchedulingOnControlPlanes = true,
apiServer = { apiServer = {
certSANs = [ certSANs = [
local.public_domain, local.public_domain,
@ -51,7 +42,7 @@ locals {
"oidc-groups-prefix" = "oidc:groups:", "oidc-groups-prefix" = "oidc:groups:",
} }
} }
inlineManifests = [ inlineManifests = concat([
{ {
name = "proxmox-cloud-controller-manager" name = "proxmox-cloud-controller-manager"
contents = data.helm_template.proxmox-ccm.manifest contents = data.helm_template.proxmox-ccm.manifest
@ -72,6 +63,47 @@ locals {
name = "cilium" name = "cilium"
contents = data.helm_template.cilium.manifest contents = data.helm_template.cilium.manifest
}, },
{
name = "cilium-bgp-advertisement"
contents = <<-EOF
apiVersion: cilium.io/v2alpha1
kind: CiliumBGPAdvertisement
metadata:
name: pods-and-services
labels:
k8s.tjo.cloud/default: "true"
spec:
advertisements:
- advertisementType: "PodCIDR"
- advertisementType: "Service"
service:
addresses:
- ClusterIP
- ExternalIP
- LoadBalancerIP
EOF
},
{
name = "cilium-bgp-peer-config"
contents = <<-EOF
apiVersion: cilium.io/v2alpha1
kind: CiliumBGPPeerConfig
metadata:
name: default
spec:
families:
- afi: ipv4
safi: unicast
advertisements:
matchLabels:
k8s.tjo.cloud/default: "true"
- afi: ipv6
safi: unicast
advertisements:
matchLabels:
k8s.tjo.cloud/default: "true"
EOF
},
{ {
name = "oidc-admins" name = "oidc-admins"
contents = <<-EOF contents = <<-EOF
@ -89,7 +121,36 @@ locals {
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
EOF EOF
}, },
],
[for name, attributes in var.hosts : {
name = "cilium-bgp-node-config-override-${name}"
contents = <<-EOF
apiVersion: cilium.io/v2alpha1
kind: CiliumBGPClusterConfig
metadata:
name: ${name}
spec:
gracefulRestart:
enabled: true
restartTimeSeconds: 15
nodeSelector:
matchLabels:
k8s.tjo.cloud/bgp: "true"
k8s.tjo.cloud/host: ${name}
k8s.tjo.cloud/proxmox: ${var.proxmox.name}
bgpInstances:
- name: "${name}"
localASN: ${attributes.asn}
peers:
- name: "local-router-vip"
peerASN: ${attributes.asn}
peerAddress: "10.0.0.1"
peerConfigRef:
name: "default"
EOF
}
] ]
)
} }
} }
@ -99,8 +160,14 @@ locals {
cni = { cni = {
name = "none" name = "none"
} }
podSubnets = local.podSubnets podSubnets = [
serviceSubnets = local.serviceSubnets var.cluster.pod_cidr.ipv4,
var.cluster.pod_cidr.ipv6
]
serviceSubnets = [
var.cluster.service_cidr.ipv4,
var.cluster.service_cidr.ipv6
]
} }
proxy = { proxy = {
disabled = true disabled = true
@ -128,6 +195,7 @@ locals {
hostname = node.name hostname = node.name
} }
nodeLabels = { nodeLabels = {
"k8s.tjo.cloud/bgp" = "true"
"k8s.tjo.cloud/host" = node.host "k8s.tjo.cloud/host" = node.host
"k8s.tjo.cloud/proxmox" = var.proxmox.name "k8s.tjo.cloud/proxmox" = var.proxmox.name
} }

View file

@ -12,6 +12,12 @@ variable "nodes" {
})) }))
} }
variable "hosts" {
type = map(object({
asn = number
}))
}
variable "talos" { variable "talos" {
type = object({ type = object({
version = optional(string, "v1.8.3") version = optional(string, "v1.8.3")
@ -28,13 +34,6 @@ variable "talos" {
}) })
} }
variable "allow_scheduling_on_control_planes" {
default = false
type = bool
description = "Allow scheduling on control plane nodes"
}
variable "cluster" { variable "cluster" {
type = object({ type = object({
name = string name = string
@ -54,6 +53,14 @@ variable "cluster" {
client_id = string client_id = string
issuer_url = string issuer_url = string
}) })
pod_cidr = object({
ipv4 = string
ipv6 = string
})
service_cidr = object({
ipv4 = string
ipv6 = string
})
}) })
} }