Browse Source

feat: setup caddy via nomad(tf)

pull/2/head
Karan Sharma 8 months ago
parent
commit
f6e2aed142
  1. 11
      TODO.md
  2. 3
      hydra/nomad/caddy/Caddyfile.tpl
  3. 47
      hydra/nomad/pihole/pihole.nomad
  4. 34
      hydra/terraform/.terraform.lock.hcl
  5. 5
      hydra/terraform/Makefile
  6. 2
      hydra/terraform/env.sample
  7. 8
      hydra/terraform/main.tf
  8. 13
      hydra/terraform/modules/caddy/conf/Caddyfile.tpl
  9. 30
      hydra/terraform/modules/caddy/conf/caddy.nomad.tpl
  10. 6
      hydra/terraform/modules/caddy/data.tf
  11. 8
      hydra/terraform/modules/caddy/job.tf
  12. 4
      hydra/terraform/modules/caddy/variables.tf
  13. 24
      hydra/terraform/modules/cloudflare/records.tf
  14. 2
      hydra/terraform/modules/cloudflare/variables.tf
  15. 8
      hydra/terraform/modules/digitalocean/firewalls.tf
  16. 4
      hydra/terraform/modules/digitalocean/output.tf
  17. 8
      hydra/terraform/modules/digitalocean/project.tf
  18. 20
      hydra/terraform/providers.tf
  19. 8
      hydra/terraform/variables.tf
  20. 4
      hydra/terraform/versions.tf
  21. 18
      setup.md

11
TODO.md

@ -2,11 +2,12 @@
- [x] Setup Tailscale in Floyd/Gilmour.
- [x] Configure Nomad and Consul to bind on Tailscale Address.
- [ ] Deploy Caddy as a Nomad Job which
- [ ] binds to 80/443 on host
- [ ] mounts the cert directory
- [ ] template file (caddy.tpl)
- [ ] Configure `nomad.mrkaran.dev` and `consul.mrkaran.dev`.
- [x] Deploy Caddy as a Nomad Job which
- [x] binds to 80/443 on host
- [x] mounts the cert directory
- [x] template file (caddy.tpl)
- [x] Configure `nomad.mrkaran.dev` and `consul.mrkaran.dev`.
- [x] Create Terraform module for running workloads as Nomad jobs.
## Hands on by deploying more workloads

3
hydra/nomad/caddy/Caddyfile.tpl

@ -1,3 +0,0 @@
nomad.mrkaran.dev {
reverse_proxy 100.119.138.27:4646
}

47
hydra/nomad/pihole/pihole.nomad

@ -1,47 +0,0 @@
job "pihole" {
datacenters = ["hydra"]
type = "service"
group "pihole" {
count = 1
network {
# Ensure that port 53 on host is free to use.
# Stop `systemd-resolvd` on the host if required.
port "dns" {
to = 53
}
# Assign a random port for UI and forward to port 80 in container.
port "http" {
to = "80"
}
}
service {
name = "pihole-web"
tags = ["pihole", "web"]
port = "http"
# check {
# name = "alive"
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }
}
restart {
attempts = 2
interval = "2m"
delay = "15s"
mode = "fail"
}
task "app" {
driver = "docker"
config {
image = "pihole/pihole:latest"
ports = ["http","dns"]
}
resources {
cpu = 250
memory = 256
}
}
}
}

34
hydra/terraform/.terraform.lock.hcl

@ -57,6 +57,40 @@ provider "registry.terraform.io/hashicorp/http" {
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "1.4.13"
hashes = [
"h1:Qca2Y2GlKPN+YRjLS4m95HSHyJIqokwrl2AjpYhnQt0=",
"zh:2406f842b2c5c70fceb5715b4dfc3ffe47630072816ecfb047b7f11169cd04fb",
"zh:318575fecf8c77ea9d9c99198ec2df9d8c35ad5a3d53674d92dc6bdce5598d4d",
"zh:3379c8466e0ba8f865ac2f0d909879e088e02559f527b0a45f6d9790fc7a13b5",
"zh:6209427c15d6bb1ff327613d8e261758f0c1abf5d8045b2fe985d6546333b4bc",
"zh:8c159fe5a9c2e12f831ac3e847ec9007e42d116ba4b8adc53c93998446d0e36d",
"zh:90bc5ea082ff0400b698df4f9d70ad82d8f85d21653b341c229a477aba196bf5",
"zh:a0c9c7fe2a0f024365a0e94894d074f61ab5f0db89092eeb538ba9b12ff0b9b9",
"zh:b35293b9fbacca3a3ef772658d977ddc7061c94e4b460623b184293e8fc8ebb4",
"zh:c5fbd8c0639a9b421f92f29268707ac6b16ae008b477256f4aac89d7f14c2f1d",
"zh:d4a8cfcb867fc24ab400340a07c06a62e913317d2d20961c0b6a4f4578af6cb5",
]
}
provider "registry.terraform.io/hashicorp/template" {
version = "2.2.0"
hashes = [
"h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=",
"zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
"zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
"zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
"zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
"zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
"zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
"zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
"zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
"zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
"zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
]
}
provider "registry.terraform.io/kreuzwerker/docker" {
version = "2.11.0"
hashes = [

5
hydra/terraform/Makefile

@ -7,9 +7,8 @@ init:
- terraform init
apply:
# that's how we roll here ;)
- terraform apply -auto-approve
lint:
- terraform validate && terraform fmt
- terraform validate && terraform fmt -recursive

2
hydra/terraform/env.sample

@ -1,3 +1,3 @@
DIGITALOCEAN_TOKEN=
CLOUDFLARE_API_TOKEN=
TF_VAR_xyz=
TF_VAR_cloudflare_caddy_api_token=

8
hydra/terraform/main.tf

@ -12,3 +12,11 @@ module "cloudflare" {
cloudflare = cloudflare
}
}
module "caddy" {
source = "./modules/caddy"
cloudflare_api_token = var.cloudflare_caddy_api_token
providers = {
nomad = nomad
}
}

13
hydra/terraform/modules/caddy/conf/Caddyfile.tpl

@ -0,0 +1,13 @@
nomad.mrkaran.dev {
reverse_proxy 100.119.138.27:4646
tls {
dns cloudflare "${cloudflare_api_token}"
}
}
consul.mrkaran.dev {
reverse_proxy 100.119.138.27:8500
tls {
dns cloudflare "${cloudflare_api_token}"
}
}

30
hydra/nomad/caddy/caddy.nomad → hydra/terraform/modules/caddy/conf/caddy.nomad.tpl

@ -33,26 +33,30 @@ job "caddy" {
driver = "docker"
config {
image = "mrkaran/caddy:latest"
mounts = [
{
type = "bind"
source = "configs"
target = "/etc/caddy" # Bind mount the template from `NOMAD_TASK_DIR`.
}
]
# Bind the config file to container.
mount {
type = "bind"
source = "configs"
target = "/etc/caddy" # Bind mount the template from `NOMAD_TASK_DIR`.
}
# Bind the data directory to preserve certs.
mount {
type = "bind"
target = "/data"
source = "/data/caddy"
readonly = false
}
ports = ["http", "https"]
}
resources {
cpu = 100
memory = 100
}
artifact {
source = "https://raw.githubusercontent.com/mr-karan/hydra/nomad/hydra/nomad/caddy/Caddyfile.tpl"
destination = "configs" # Save to a local path inside `NOMAD_TASK_DIR`.
}
template {
source = "configs/Caddyfile.tpl" # Downloaded from Artifact.
destination = "configs/Caddyfile" # Rendered template.
data = <<EOF
${caddyfile}
EOF
destination = "configs/Caddyfile" # Rendered template.
change_mode = "signal"
change_signal = "SIGINT"
}

6
hydra/terraform/modules/caddy/data.tf

@ -0,0 +1,6 @@
data "template_file" "caddyfile" {
template = file("${path.module}/conf/Caddyfile.tpl")
vars = {
cloudflare_api_token = var.cloudflare_api_token
}
}

8
hydra/terraform/modules/caddy/job.tf

@ -0,0 +1,8 @@
resource "nomad_job" "app" {
jobspec = templatefile("${path.module}/conf/caddy.nomad.tpl", {
caddyfile = data.template_file.caddyfile.rendered
})
hcl2 {
enabled = true
}
}

4
hydra/terraform/modules/caddy/variables.tf

@ -0,0 +1,4 @@
variable "cloudflare_api_token" {
type = string
description = "Cloudflare API token to edit DNS Zones and Records."
}

24
hydra/terraform/modules/cloudflare/records.tf

@ -22,6 +22,30 @@ resource "cloudflare_record" "gilmour" {
}
resource "cloudflare_record" "nomad" {
zone_id = cloudflare_zone.mrkaran_dev.id
name = "nomad"
type = "A"
ttl = "1"
proxied = "false"
value = var.ips["floyd_tailscale"]
}
resource "cloudflare_record" "consul" {
zone_id = cloudflare_zone.mrkaran_dev.id
name = "consul"
type = "A"
ttl = "1"
proxied = "false"
value = var.ips["floyd_tailscale"]
}
resource "cloudflare_record" "website" {
zone_id = cloudflare_zone.mrkaran_dev.id

2
hydra/terraform/modules/cloudflare/variables.tf

@ -1,3 +1,3 @@
variable "ips" {
type = map
type = map(any)
}

8
hydra/terraform/modules/digitalocean/firewalls.tf

@ -9,7 +9,7 @@ data "http" "cloudflare_ip6_addrs" {
resource "digitalocean_firewall" "web" {
name = "allow-http-https-cloudflare"
droplet_ids = [digitalocean_droplet.floyd.id,digitalocean_droplet.gilmour.id]
droplet_ids = [digitalocean_droplet.floyd.id, digitalocean_droplet.gilmour.id]
inbound_rule {
protocol = "tcp"
port_range = "80"
@ -44,7 +44,7 @@ resource "digitalocean_firewall" "icmp" {
resource "digitalocean_firewall" "vpn" {
name = "allow-all-tailscale-inbound"
droplet_ids = [digitalocean_droplet.floyd.id,digitalocean_droplet.gilmour.id]
droplet_ids = [digitalocean_droplet.floyd.id, digitalocean_droplet.gilmour.id]
inbound_rule {
protocol = "tcp"
@ -63,7 +63,7 @@ resource "digitalocean_firewall" "vpn" {
resource "digitalocean_firewall" "ssh" {
name = "ssh-inbound"
droplet_ids = [digitalocean_droplet.floyd.id,digitalocean_droplet.gilmour.id]
droplet_ids = [digitalocean_droplet.floyd.id, digitalocean_droplet.gilmour.id]
inbound_rule {
protocol = "tcp"
@ -76,7 +76,7 @@ resource "digitalocean_firewall" "ssh" {
resource "digitalocean_firewall" "outbound-all" {
name = "allow-all-outbound"
droplet_ids = [digitalocean_droplet.floyd.id,digitalocean_droplet.gilmour.id]
droplet_ids = [digitalocean_droplet.floyd.id, digitalocean_droplet.gilmour.id]
outbound_rule {
protocol = "tcp"

4
hydra/terraform/modules/digitalocean/output.tf

@ -1,9 +1,9 @@
output "floating_floyd" {
value = digitalocean_floating_ip.floyd.ip_address
value = digitalocean_floating_ip.floyd.ip_address
description = "Floating IP of Floyd Node"
}
output "floating_gilmour" {
value = digitalocean_floating_ip.gilmour.ip_address
value = digitalocean_floating_ip.gilmour.ip_address
description = "Floating IP of Gilmour Node"
}

8
hydra/terraform/modules/digitalocean/project.tf

@ -5,8 +5,8 @@ resource "digitalocean_project" "hydra" {
purpose = "Web Application"
environment = "Production"
# Tag the droplet IDs and Floating IPs of `floyd` and `gilmour`.
resources = [digitalocean_droplet.floyd.urn,
digitalocean_floating_ip.floyd.urn,
digitalocean_droplet.gilmour.urn,
digitalocean_floating_ip.gilmour.urn]
resources = [digitalocean_droplet.floyd.urn,
digitalocean_floating_ip.floyd.urn,
digitalocean_droplet.gilmour.urn,
digitalocean_floating_ip.gilmour.urn]
}

20
hydra/terraform/providers.tf

@ -1,18 +1,3 @@
// provider "docker" {
// host = "ssh://floyd:22"
// alias = "floyd"
// }
// provider "docker" {
// host = "ssh://parvaaz:22"
// alias = "parvaaz"
// }
// provider "docker" {
// host = "ssh://gilmour:22"
// alias = "gilmour"
// }
provider "digitalocean" {
# You need to set this in your .bashrc
# export DIGITALOCEAN_TOKEN="Your API TOKEN"
@ -23,3 +8,8 @@ provider "cloudflare" {
# You need to set this in your .bashrc
# export CLOUDFLARE_API_TOKEN="Your API TOKEN"
}
# Configure the Nomad provider.
provider "nomad" {
address = "http://100.119.138.27:4646"
}

8
hydra/terraform/variables.tf

@ -1,7 +1,13 @@
locals {
# Map of IPs of all the nodes.
ips = {
floyd_floating = module.servers.floating_floyd
floyd_floating = module.servers.floating_floyd
gilmour_floating = module.servers.floating_gilmour
floyd_tailscale = "100.119.138.27"
}
}
variable "cloudflare_caddy_api_token" {
type = string
description = "API key to edit DNS zones in Cloudflare used by Caddy"
}

4
hydra/terraform/versions.tf

@ -1,14 +1,14 @@
terraform {
required_providers {
digitalocean = {
source = "digitalocean/digitalocean"
source = "digitalocean/digitalocean"
version = "2.5.1"
}
docker = {
source = "kreuzwerker/docker"
}
cloudflare = {
source = "cloudflare/cloudflare"
}

18
setup.md

@ -150,4 +150,20 @@ Visit http://100.119.138.27:8500/ui/ to access Consul UI.
```
rsync -avz --progress ./*.nomad floyd:/home/karan/jobs
```
```
## Vault
Currently using `TF_VARS` to load env variables from the host and run `tf apply`. Terraform then templates out the Nomad `jobspec` and submits the job to the server. This is okay in this context because:
- Nomad API server is listening only to Tailscale IP. Which means only trusted, authenticated agents have access to the API. This is very important because Nomad shows the plain text version of the `jobspec` in UI and CLI. So all the secret keys can be exposed if a malicious actor has access to the API server (even if read only).
- The env keys are mostly just one time API tokens or DB Passwords. They don't need to be "watched" and reloaded often, running an entire Vault server just for passing these keys seems a bit _extra complexity_.
**However**, to just _experiment_ with things and make the setup a bit more secure, we can consider running a single node Vault server:
- [ ] Setup Vault to store secrets
- [ ] Vault init/unseal steps.
- [ ] Add Policies and Role in Vault for a namespace
- [ ] Configure Nomad to use Vault
- [ ] Add an API token in Vault
- [ ] Pass CF token to Caddyfile and retrieve from Vault with Consul Template

Loading…
Cancel
Save