Compare commits

...

2 Commits

  1. 24
      docs/SETUP.md
  2. 9
      terraform/main.tf
  3. 9
      terraform/modules/caddy/conf/Caddyfile-internal
  4. 12
      terraform/modules/cloudflare/records.tf
  5. 7
      terraform/modules/doggo/conf/doggo.nomad
  6. 118
      terraform/modules/monitoring/conf/monitoring.nomad
  7. 18
      terraform/modules/monitoring/conf/prometheus.yml
  8. 3
      terraform/modules/monitoring/data.tf
  9. 8
      terraform/modules/monitoring/job.tf

24
docs/SETUP.md

@ -146,6 +146,30 @@ connect {
}
```
### Systemd Unit
```
[Unit]
Description="HashiCorp Consul - A service mesh solution"
Documentation=https://www.consul.io/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=/etc/consul.d/consul.hcl
[Service]
User=consul
Group=consul
ExecStart=/usr/bin/consul agent -config-dir=/etc/consul.d/
ExecReload=/bin/kill --signal HUP $MAINPID
KillMode=process
KillSignal=SIGTERM
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
```
## Vault
**WIP**

9
terraform/main.tf

@ -30,7 +30,7 @@ module "pihole" {
module "doggo" {
source = "./modules/doggo"
source = "./modules/doggo"
providers = {
nomad = nomad
}
@ -75,3 +75,10 @@ module "gitea" {
nomad = nomad
}
}
module "monitoring" {
source = "./modules/monitoring"
providers = {
nomad = nomad
}
}

9
terraform/modules/caddy/conf/Caddyfile-internal

@ -29,3 +29,12 @@ joplin.mrkaran.dev {
dns cloudflare "${cloudflare_api_token}"
}
}
grafana.mrkaran.dev {
{{ range service "grafana-web" }}
reverse_proxy {{ .Address }}:{{ .Port }}
{{ end }}
tls {
dns cloudflare "${cloudflare_api_token}"
}
}

12
terraform/modules/cloudflare/records.tf

@ -105,6 +105,18 @@ resource "cloudflare_record" "koadings" {
}
resource "cloudflare_record" "grafana" {
zone_id = cloudflare_zone.mrkaran_dev.id
name = "grafana"
type = "A"
ttl = "1"
proxied = "false"
value = var.ips["floyd_tailscale"]
}
resource "cloudflare_record" "git" {
zone_id = cloudflare_zone.mrkaran_dev.id

7
terraform/modules/doggo/conf/doggo.nomad

@ -31,6 +31,13 @@ job "doggo" {
image = "ghcr.io/mr-karan/doggo-api:v0.3.7"
ports = ["http"]
logging {
type = "json-file"
config {
labels = "com.hashicorp.nomad.alloc_id,com.hashicorp.nomad.job_id,com.hashicorp.nomad.job_name,com.hashicorp.nomad.namespace,com.hashicorp.nomad.node_id,com.hashicorp.nomad.node_name,com.hashicorp.nomad.task_group_name,com.hashicorp.nomad.task_name"
}
}
}
resources {

118
terraform/modules/monitoring/conf/monitoring.nomad

@ -0,0 +1,118 @@
job "monitoring" {
datacenters = ["hydra"]
type = "service"
group "apps" {
count = 1
network {
mode = "bridge"
port "grafana-http" {
to = 3000
host_network = "tailscale"
}
port "prometheus-http" {
to = 9090
host_network = "tailscale"
}
}
restart {
attempts = 3
interval = "5m"
delay = "30s"
mode = "fail"
}
task "grafana" {
driver = "docker"
# https://www.nomadproject.io/docs/job-specification/task#user
# https://grafana.com/docs/grafana/latest/installation/docker/#migrate-to-v51-or-later
user = "root"
service {
name = "grafana-web"
tags = ["grafana", "web"]
port = "grafana-http"
}
config {
image = "grafana/grafana:latest-ubuntu"
ports = ["grafana-http"]
# Bind the data directory to preserve data.
mount {
type = "bind"
target = "/var/lib/grafana"
source = "/data/grafana/"
readonly = false
}
}
resources {
cpu = 200
memory = 300
}
}
task "prometheus" {
driver = "docker"
user = "root"
service {
name = "prometheus-web"
tags = ["prometheus", "web"]
port = "prometheus-http"
}
config {
image = "prom/prometheus:latest"
args = [
"--config.file=/etc/prometheus/prometheus.yml",
"--storage.tsdb.path=/prometheus",
"--web.console.libraries=/etc/prometheus/console_libraries",
"--web.console.templates=/etc/prometheus/consoles",
"--storage.tsdb.retention.time=30d",
"--web.enable-lifecycle"
]
ports = ["prometheus-http"]
# Bind the data directory to preserve data.
mount {
type = "bind"
target = "/prometheus"
source = "/data/prometheus/data"
readonly = false
}
# Bind the config file to container.
mount {
type = "bind"
source = "local/prometheus.yml"
target = "/etc/prometheus/prometheus.yml"
readonly = true
}
}
template {
data = <<EOF
${prometheus_config}
EOF
destination = "local/prometheus.yml" # Rendered template.
change_mode = "restart"
}
resources {
memory = 400
}
}
}
}

18
terraform/modules/monitoring/conf/prometheus.yml

@ -0,0 +1,18 @@
global:
scrape_interval: 30s
external_labels:
hydra_component: "floyd"
scrape_configs:
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
- job_name: "node"
static_configs:
- targets: ["nodeexporter:9100"]
- job_name: "ispmonitor"
scrape_interval: 60s
static_configs:
- targets: ["100.94.241.54:9283"] # RPi telegraf Agent

3
terraform/modules/monitoring/data.tf

@ -0,0 +1,3 @@
data "template_file" "prometheus-config" {
template = file("${path.module}/conf/prometheus.yml")
}

8
terraform/modules/monitoring/job.tf

@ -0,0 +1,8 @@
resource "nomad_job" "prometheus" {
jobspec = templatefile("${path.module}/conf/monitoring.nomad", {
prometheus_config = data.template_file.prometheus-config.rendered
})
hcl2 {
enabled = true
}
}
Loading…
Cancel
Save