1
0
Fork 0

attempted k8s resources as tf files, not worth the trouble

This commit is contained in:
michael 2024-04-20 02:10:01 +12:00
parent 43dbb951fe
commit 5b83607fe0
7 changed files with 151 additions and 14 deletions

View File

@ -19,18 +19,18 @@ resource "local_sensitive_file" "kubeconfig" {
file_permission = "0600"
}
resource "vultr_block_storage" "ssd0" {
label = "cluster00-ssd0"
size_gb = 10
region = "ewr"
block_type = "high_perf"
attached_to_instance = vultr_kubernetes.k8s.node_pools[0].nodes[0].id
live = true
depends_on = [
vultr_kubernetes.k8s
]
}
# resource "vultr_block_storage" "ssd0" {
# label = "cluster00-ssd0"
# size_gb = 10
# region = "ewr"
# block_type = "high_perf"
# attached_to_instance = vultr_kubernetes.k8s.node_pools[0].nodes[0].id
# live = true
# depends_on = [
# vultr_kubernetes.k8s
# ]
# }
output "ssd0_mount" {
value = vultr_block_storage.ssd0.mount_id
}
# output "ssd0_mount" {
# value = vultr_block_storage.ssd0.mount_id
# }

View File

@ -0,0 +1,10 @@
resource "kubernetes_config_map" "keyoxide-env" {
metadata {
name = "keyoxide-env"
namespace = "default"
}
data = {
DOMAIN = "key.balsillie.net"
}
}

View File

@ -0,0 +1,53 @@
resource "kubernetes_deployment" "keyoxide" {
depends_on = [
kubernetes_config_map.keyoxide-env
]
metadata {
name = "keyoxide"
namespace = "default"
}
spec {
replicas = 1
selector {
match_labels = {
app = "keyoxide"
}
}
template {
metadata {
labels = {
app = "keyoxide"
}
}
spec {
container {
name = "keyoxide"
image = "codeberg.org/keyoxide/keyoxide-web"
image_pull_policy = "Always"
resources {
requests = {
cpu = "100m"
memory = "50Mi"
}
limits = {
cpu = "500m"
memory = "128Mi"
}
}
env_from {
config_map_ref {
name = "keyoxide-env"
}
}
}
}
}
}
}

View File

@ -9,6 +9,10 @@ terraform {
source = "vultr/vultr"
version = ">= 2.19.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.29.0"
}
}
backend "local" {
path = "/home/michael/Nextcloud/Backups/tfstate/vultr.tfstate"
@ -29,3 +33,11 @@ provider "vultr" {
rate_limit = 100
retry_limit = 3
}
provider "kubernetes" {
# # host = vultr_kubernetes.k8s.endpoint
# # client_certificate = vultr_kubernetes.k8s.client_certificate
# # client_key = vultr_kubernetes.k8s.client_key
# # cluster_ca_certificate = vultr_kubernetes.k8s.cluster_ca_certificate
config_path = pathexpand("~/.kube/vultr")
}

15
terraform/vultr/pvcs.tf Normal file
View File

@ -0,0 +1,15 @@
resource "kubernetes_persistent_volume_claim" "ssd" {
metadata {
name = "ssd"
namespace = "default"
}
spec {
access_modes = ["ReadWriteOnce"]
resources {
requests = {
storage = "10Gi"
}
}
storage_class_name = "vultr-block-storage"
}
}

View File

@ -0,0 +1,29 @@
resource "kubernetes_service" "keyoxide" {
metadata {
name = "keyoxide"
namespace = "default"
labels = {
svc = "keyoxide"
}
}
spec {
selector = {
app = "keyoxide"
}
ip_family_policy = "SingleStack"
ip_families = ["IPv4"]
type = "ClusterIP"
cluster_ip = "None"
# external_traffic_policy = "Local"
port {
name = "http"
port = 3000
target_port = 3000
protocol = "TCP"
}
}
}

18
todo/vultr.todo Normal file
View File

@ -0,0 +1,18 @@
revert to using ansible for k8s manifest installs
- Employ Ansible as a provider in TF?
install nginx-ingress-controller
edit the nginx-ingress-controller service
- change the service type to ClusterIP
- Change external and internal traffic policy
- Add external IPs
Install the operator lifecycle manager
- Scrape current version from GH releases
- Download the OLM install script
- Run with current version
Install operators:
- Cert manager
- CNPG
- Keycloak
Create cluster cert issuers