Remove cluster (Terraform RKE) layer in favor of k3s

This commit is contained in:
Khue Doan 2021-09-01 16:51:23 +07:00
parent b8d3dde827
commit a8c4572b14
7 changed files with 9 additions and 134 deletions

View File

@ -1,16 +1,12 @@
.POSIX:
default: apply
default: metal bootstrap
.PHONY: metal
metal:
make -C metal
.PHONY: cluster
cluster:
make -C cluster
.PHONY: apps
.PHONY: bootstrap
apps:
make -C apps
@ -22,10 +18,6 @@ tools:
docs:
make -C docs
.PHONY: apply
apply: metal cluster apps
lint:
# TODO (feature) Add lint checks for everything
make -C metal lint
make -C cluster lint

View File

@ -25,12 +25,12 @@ A single `make` command will automatically:
- Build the `./metal` layer:
- Create an ephemeral, stateless PXE server
- Install Linux on all servers in parallel
- Build the `./cluster` layer:
- Create a Kubernetes [cluster](./cluster/cluster.tf) using RKE
- Install some [Helm chart for bootstrap](./cluster/bootstrap.tf)
- Build the `./apps` layer:
- Kustomize creates Argo [applications](./apps/resources)
- ArgoCD install those applications
- Build a Kubernetes cluster (based on k3s)
- Build the `./bootstrap` layer:
- Install ArgoCD
- Kustomize creates the [root Argo application](./bootstrap/root-app) that will install other Argo applications
From now on, the root app will install the remaining layers (`./system`, `./applications`) and the `./bootstrap` layer will manage itself.
Please visit the [Provisioning flow document](https://khuedoan.github.io/homelab/deployment/provisioning_flow.html) to learn more.

View File

@ -1,7 +1,7 @@
.POSIX:
.EXPORT_ALL_VARIABLES:
KUBECONFIG = ../cluster/kubeconfig.yaml
KUBECONFIG = ../metal/kubeconfig.yaml
default: argocd root-app

View File

@ -1,21 +0,0 @@
.POSIX:
.EXPORT_ALL_VARIABLES:
TF_IN_AUTOMATION = true
default: init apply
init:
terraform init -input=false -backend-config=backend.tfvars
plan:
terraform plan -input=false -out=/tmp/tfplan
apply: plan
terraform apply -input=false /tmp/tfplan
destroy:
terraform destroy
lint:
terraform fmt -recursive -diff -check

View File

@ -1,9 +0,0 @@
# Kubernetes cluster
## `./cluster.tf`
Using Rancher Kubernetes Engine, with some config specific to CoreOS
## `./bootstrap.tf`
Install some essential Helm charts (network, storage,...)

View File

@ -1,75 +0,0 @@
provider "rke" {
debug = true
}
locals {
hosts = yamldecode(file("../metal/hosts.yml"))
user = local.hosts.metal.vars.ansible_user
ssh_key_path = local.hosts.metal.vars.ansible_ssh_private_key_file
}
resource "rke_cluster" "cluster" {
dynamic "nodes" {
for_each = [
local.hosts.metal.hosts.metal0.ansible_host,
local.hosts.metal.hosts.metal1.ansible_host,
local.hosts.metal.hosts.metal2.ansible_host
]
content {
address = nodes.value
user = local.user
role = [
"controlplane",
"etcd",
"worker"
]
ssh_key_path = local.ssh_key_path
}
}
dynamic "nodes" {
for_each = [
local.hosts.metal.hosts.metal3.ansible_host
]
content {
address = nodes.value
user = local.user
role = [
"worker"
]
ssh_key_path = local.ssh_key_path
}
}
ingress {
provider = "none"
}
# For CoreOS
network {
plugin = "canal"
options = {
canal_flex_volume_plugin_dir = "/opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds"
flannel_backend_type = "vxlan"
canal_flannel_backend_port = "8472"
canal_flannel_backend_type = "vxlan"
canal_flannel_backend_vni = "1"
}
}
services {
kube_controller {
extra_args = {
flex-volume-plugin-dir = "/opt/kubernetes/kubelet-plugins/volume/exec/"
}
}
}
}
resource "local_file" "kube_config_yaml" {
filename = "${path.root}/kubeconfig.yaml"
sensitive_content = rke_cluster.cluster.kube_config_yaml
file_permission = "0600"
}

View File

@ -1,12 +0,0 @@
terraform {
backend "etcdv3" {
lock = true
}
required_providers {
rke = {
source = "rancher/rke"
version = "1.2.3"
}
}
}