Move Cluster API to infra and remove RKE and Terraform

This commit is contained in:
Khue Doan 2021-07-21 00:19:39 +07:00
parent 175321f3cf
commit d09a52eb7b
No known key found for this signature in database
GPG Key ID: 4C1A90A461B3C27B
12 changed files with 0 additions and 160 deletions

View File

@ -1,21 +0,0 @@
.POSIX:
.EXPORT_ALL_VARIABLES:
TF_IN_AUTOMATION = true
default: init apply
init:
terraform init -input=false -backend-config=backend.tfvars
plan:
terraform plan -input=false -out=/tmp/tfplan
apply: plan
terraform apply -input=false /tmp/tfplan
destroy:
terraform destroy
lint:
terraform fmt -recursive -diff -check

View File

@ -1,9 +0,0 @@
# Private cloud infrastructure
## Kubernetes cluster `./cluster.tf`
Using Rancher Kubernetes Engine, with some config specific to CoreOS
## Bootstrap `./bootstrap.tf`
Install some essential Helm charts (network, storage,...)

View File

@ -1,34 +0,0 @@
provider "helm" {
kubernetes {
host = rke_cluster.cluster.api_server_url
client_certificate = rke_cluster.cluster.client_cert
client_key = rke_cluster.cluster.client_key
cluster_ca_certificate = rke_cluster.cluster.ca_crt
}
}
resource "helm_release" "argocd" {
name = "argocd"
repository = "https://argoproj.github.io/argo-helm"
chart = "argo-cd"
version = "3.6.8"
namespace = "argocd"
create_namespace = true
wait = true
timeout = 600
values = [
file("${path.module}/values/argocd.yaml")
]
}
resource "helm_release" "longhorn" {
name = "longhorn"
repository = "https://charts.longhorn.io"
chart = "longhorn"
version = "1.1.1"
namespace = "longhorn"
create_namespace = true
wait = true
timeout = 600
}

View File

@ -1,75 +0,0 @@
provider "rke" {
debug = true
}
locals {
hosts = yamldecode(file("../metal/hosts.yml"))
user = local.hosts.metal.vars.ansible_user
ssh_key_path = local.hosts.metal.vars.ansible_ssh_private_key_file
}
resource "rke_cluster" "cluster" {
dynamic "nodes" {
for_each = [
local.hosts.metal.hosts.metal0.ansible_host,
local.hosts.metal.hosts.metal1.ansible_host,
local.hosts.metal.hosts.metal2.ansible_host
]
content {
address = nodes.value
user = local.user
role = [
"controlplane",
"etcd",
"worker"
]
ssh_key_path = local.ssh_key_path
}
}
dynamic "nodes" {
for_each = [
local.hosts.metal.hosts.metal3.ansible_host
]
content {
address = nodes.value
user = local.user
role = [
"worker"
]
ssh_key_path = local.ssh_key_path
}
}
ingress {
provider = "none"
}
# For CoreOS
network {
plugin = "canal"
options = {
canal_flex_volume_plugin_dir = "/opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds"
flannel_backend_type = "vxlan"
canal_flannel_backend_port = "8472"
canal_flannel_backend_type = "vxlan"
canal_flannel_backend_vni = "1"
}
}
services {
kube_controller {
extra_args = {
flex-volume-plugin-dir = "/opt/kubernetes/kubelet-plugins/volume/exec/"
}
}
}
}
resource "local_file" "kube_config_yaml" {
filename = "${path.root}/kube_config.yaml"
sensitive_content = rke_cluster.cluster.kube_config_yaml
file_permission = "0600"
}

View File

@ -1,16 +0,0 @@
terraform {
backend "etcdv3" {
lock = true
}
required_providers {
rke = {
source = "rancher/rke"
version = "1.2.2"
}
helm = {
source = "hashicorp/helm"
version = "2.2.0"
}
}
}

View File

@ -1,5 +0,0 @@
server:
ingress:
enabled: true
hosts:
- argocd.khuedoan.com