khuedoan-homelab/infra/cluster.tf

175 lines
4.6 KiB
Terraform
Raw Normal View History

resource "tls_private_key" "ssh" {
algorithm = "ECDSA"
ecdsa_curve = "P256"
}
resource "local_file" "ssh_private_key" {
content = tls_private_key.ssh.private_key_pem
filename = "${path.module}/private.pem"
file_permission = "0600"
}
2021-02-13 21:02:32 +07:00
resource "lxd_profile" "kubenode" {
name = "kubenode"
2021-02-12 21:51:19 +07:00
config = {
2021-05-01 14:57:14 +07:00
"limits.cpu" = 2
"limits.memory" = "4GiB"
"limits.memory.swap" = false
# "security.nesting" = true
# "security.privileged" = true
# "linux.kernel_modules" = "ip_tables,ip6_tables,nf_nat,overlay,br_netfilter"
# "raw.lxc" = <<-EOT
# lxc.apparmor.profile=unconfined
# lxc.cap.drop=
# lxc.cgroup.devices.allow=a
# lxc.mount.auto=proc:rw sys:rw cgroup:rw
# EOT
2021-05-01 14:57:14 +07:00
"user.user-data" = <<-EOT
2021-02-13 12:49:59 +07:00
#cloud-config
ssh_authorized_keys:
- ${tls_private_key.ssh.public_key_openssh}
2021-02-13 12:49:59 +07:00
disable_root: false
runcmd:
2021-02-13 22:41:31 +07:00
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
- add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- apt-get update -y
- apt-get install -y docker-ce docker-ce-cli containerd.io open-iscsi
2021-02-13 22:41:31 +07:00
- mkdir -p /etc/systemd/system/docker.service.d/
- printf "[Service]\nMountFlags=shared" > /etc/systemd/system/docker.service.d/mount_flags.conf
2021-02-14 01:52:12 +07:00
- mount --make-rshared /
- systemctl enable --now docker
- systemctl enable --now open-iscsi
2021-02-13 12:49:59 +07:00
EOT
2021-02-12 21:51:19 +07:00
}
# # echo "262144" > /sys/module/nf_conntrack/parameters/hashsize
# device {
# type = "disk"
# name = "hashsize"
# properties = {
# source = "/sys/module/nf_conntrack/parameters/hashsize"
# path = "/sys/module/nf_conntrack/parameters/hashsize"
# }
# }
# device {
# type = "unix-char"
# name = "kmsg"
2021-02-14 01:52:12 +07:00
# properties = {
# source = "/dev/kmsg"
# path = "/dev/kmsg"
# }
# }
2021-02-14 01:52:12 +07:00
2021-02-13 21:02:32 +07:00
device {
name = "eth0"
type = "nic"
properties = {
nictype = "macvlan"
parent = "eno1"
2021-02-13 21:02:32 +07:00
}
2021-02-12 21:51:19 +07:00
}
2021-02-13 21:02:32 +07:00
device {
type = "disk"
name = "root"
properties = {
pool = "default"
2021-02-13 21:02:32 +07:00
path = "/"
2021-04-24 22:58:22 +07:00
size = "32GiB"
2021-02-13 21:02:32 +07:00
}
}
}
2021-04-21 23:37:59 +07:00
# TODO (optimize) DRY master and worker definition
2021-02-14 12:54:11 +07:00
resource "lxd_container" "masters" {
2021-04-19 00:02:38 +07:00
count = 3
2021-02-14 12:54:11 +07:00
name = "master-${count.index}"
image = "ubuntu:20.04"
2021-05-02 21:10:42 +07:00
# TODO (bug) Use containers instead of virtual machines for Kubernetes nodes https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1226855
type = "virtual-machine"
2021-02-14 12:54:11 +07:00
ephemeral = false
profiles = [lxd_profile.kubenode.name]
2021-04-19 02:02:48 +07:00
config = {
2021-05-01 22:51:55 +07:00
# TODO (bug) Should be posible to put it in the profile instead lxd_profile.kubenode.config, and make it a variable
2021-04-19 02:02:48 +07:00
# https://github.com/terraform-lxd/terraform-provider-lxd/blob/master/lxd/resource_lxd_container.go#L473
"user.access_interface" = "enp5s0"
2021-04-19 02:02:48 +07:00
}
provisioner "local-exec" {
command = "ansible all -u root --private-key ${local_file.ssh_private_key.filename} -i ${self.ip_address}, -m 'wait_for_connection' -a '' && ansible all -u root -i ${self.ip_address}, -m 'wait_for' -a 'path=/var/run/docker.sock'"
environment = {
ANSIBLE_HOST_KEY_CHECKING = "False"
}
}
2021-02-14 12:54:11 +07:00
}
resource "lxd_container" "workers" {
2021-04-19 00:02:38 +07:00
count = 3
2021-02-14 12:54:11 +07:00
name = "worker-${count.index}"
2021-02-13 22:41:31 +07:00
image = "ubuntu:20.04"
type = "virtual-machine"
2021-02-13 21:02:32 +07:00
ephemeral = false
profiles = [lxd_profile.kubenode.name]
2021-04-19 02:02:48 +07:00
config = {
"user.access_interface" = "enp5s0"
2021-04-19 02:02:48 +07:00
}
2021-02-13 12:49:59 +07:00
provisioner "local-exec" {
command = "ansible all -u root --private-key ${local_file.ssh_private_key.filename} -i ${self.ip_address}, -m 'wait_for_connection' -a '' && ansible all -u root -i ${self.ip_address}, -m 'wait_for' -a 'path=/var/run/docker.sock'"
environment = {
ANSIBLE_HOST_KEY_CHECKING = "False"
}
}
2021-02-13 18:54:46 +07:00
}
resource "rke_cluster" "cluster" {
dynamic "nodes" {
2021-02-14 12:54:11 +07:00
for_each = lxd_container.masters
2021-02-13 12:49:59 +07:00
2021-02-13 18:54:46 +07:00
content {
address = nodes.value.ip_address
user = "root"
role = [
"controlplane",
2021-02-14 12:54:11 +07:00
"etcd"
]
ssh_key = tls_private_key.ssh.private_key_pem
2021-02-14 12:54:11 +07:00
}
}
dynamic "nodes" {
for_each = lxd_container.workers
content {
address = nodes.value.ip_address
user = "root"
role = [
2021-02-13 18:54:46 +07:00
"worker"
]
ssh_key = tls_private_key.ssh.private_key_pem
2021-02-13 18:54:46 +07:00
}
}
ingress {
provider = "none"
}
2021-02-13 12:49:59 +07:00
2021-02-13 18:54:46 +07:00
ignore_docker_version = true
}
resource "local_file" "kube_config_yaml" {
2021-04-19 01:00:27 +07:00
filename = "${path.root}/kube_config.yaml"
sensitive_content = rke_cluster.cluster.kube_config_yaml
file_permission = "0600"
2021-02-13 18:54:46 +07:00
}