Compare commits
3 Commits
7d88137084
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| eb6b3108e0 | |||
| 868fdce461 | |||
| 5d436bb632 |
42
README.md
42
README.md
@@ -2,15 +2,15 @@
|
|||||||
|
|
||||||
**A *forever-work-in-progress* self-hosted server setup**
|
**A *forever-work-in-progress* self-hosted server setup**
|
||||||
|
|
||||||
Based on a multi-node k3s cluster running on VMs and bare metal hardware.
|
Runs on a multi-node k3s cluster deployed across VMs and bare-metal hosts.
|
||||||
|
|
||||||
The overall application configs are stored in a NFS share inside of a SSD that was purposed specifically for this. For that I'm using `nfs-subdir-external-provisioner` as a dynamic storage provisioner with specified paths on each PVC. Some other data is stored on a NAS server with a NFS share as well.
|
Application configuration is stored on an NFS share located on a dedicated SSD. This uses `nfs-subdir-external-provisioner` as a dynamic storage provisioner with PVC-specific paths. Additional data is stored on a NAS exported via NFS.
|
||||||
|
|
||||||
The cluster is running on `k3s` with `nginx` as the ingress controller. For load balancing I'm using `MetalLB` in layer 2 mode. I'm also using `cert-manager` for local CA and certificates (as Vaultwarden requires it).
|
The cluster runs `k3s` with `nginx` as the ingress controller. `MetalLB` is used in layer 2 mode for load balancing. `cert-manager` provides a local CA and issues certificates (required by Vaultwarden).
|
||||||
|
|
||||||
For more information on setup, check out [SETUP.md](SETUP.md).
|
For setup details, see [SETUP.md](SETUP.md).
|
||||||
|
|
||||||
Also, the repository name is a reference to my local TLD which is `.haven` :)
|
The repository name references my local TLD, `.haven` ;)
|
||||||
|
|
||||||
## Namespaces
|
## Namespaces
|
||||||
- default
|
- default
|
||||||
@@ -27,26 +27,36 @@ Also, the repository name is a reference to my local TLD which is `.haven` :)
|
|||||||
- AdGuardHome-2 (2nd instance)
|
- AdGuardHome-2 (2nd instance)
|
||||||
- AdGuard-Sync
|
- AdGuard-Sync
|
||||||
- infra
|
- infra
|
||||||
- Haven Notify (my own internal service)
|
- [Haven Notify](https://git.ivanch.me/ivanch/server-scripts/src/branch/main/haven-notify)
|
||||||
- Beszel
|
- Beszel
|
||||||
- Beszel Agent (running as DaemonSet)
|
- Beszel Agent (running as a DaemonSet)
|
||||||
- Code Config (vscode for internal config editing)
|
- Code Config (VS Code for internal config editing)
|
||||||
- WireGuard Easy
|
- WireGuard Easy
|
||||||
- dev
|
- dev
|
||||||
- Gitea Runner (x64)
|
- Gitea Runner (x64)
|
||||||
- Gitea Runner (arm64)
|
- Gitea Runner (arm64)
|
||||||
|
- monitoring
|
||||||
|
- Grafana
|
||||||
|
- Prometheus
|
||||||
|
- Node Exporter
|
||||||
|
- Kube State Metrics
|
||||||
|
- Loki
|
||||||
|
- Alloy
|
||||||
|
|
||||||
#### Miscellaneous namespaces
|
#### Miscellaneous namespaces
|
||||||
|
|
||||||
- lab (A playground/sandbox namespace)
|
- lab (a playground/sandbox namespace)
|
||||||
- nfs-pod (for testing and accessing NFS mounts through NFS)
|
- nfs-pod (for testing and accessing NFS mounts)
|
||||||
- metallb-system
|
- metallb-system
|
||||||
- MetalLB components
|
- MetalLB components
|
||||||
- cert-manager
|
- cert-manager
|
||||||
- Cert-Manager components
|
- cert-manager components
|
||||||
|
|
||||||
## Todo:
|
## Todo
|
||||||
- Move archivebox data to its own PVC on NAS
|
- Move ArchiveBox data to its own PVC on the NAS
|
||||||
- Move uptimekuma to `infra` namespace
|
- Move Uptime Kuma to the infra namespace
|
||||||
- Add links to each application docs
|
- Add links to each application's documentation
|
||||||
- Add links to server scripts
|
- Add links to server scripts
|
||||||
|
- Move Alloy to the monitoring namespace
|
||||||
|
- Install Loki, Grafana, and Prometheus via Helm charts
|
||||||
|
- Configure Loki and Prometheus to use PVCs
|
||||||
2
SETUP.md
2
SETUP.md
@@ -50,7 +50,7 @@ kubectl apply -f metallb-system/address-pool.yaml
|
|||||||
|
|
||||||
## Install cert-manager
|
## Install cert-manager
|
||||||
```bash
|
```bash
|
||||||
kubectl create namespace cert-manager
|
kubectl create ns cert-manager
|
||||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml
|
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ spec:
|
|||||||
- port: 7575
|
- port: 7575
|
||||||
targetPort: homarr-port
|
targetPort: homarr-port
|
||||||
---
|
---
|
||||||
# 3) PersistentVolumeClaim (for /config)
|
# 3) PersistentVolumeClaim
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
@@ -83,7 +83,7 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
storage: 1Gi
|
storage: 1Gi
|
||||||
---
|
---
|
||||||
# 4) Ingress (Traefik)
|
# 4) Ingress
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ spec:
|
|||||||
- port: 80
|
- port: 80
|
||||||
targetPort: 80
|
targetPort: 80
|
||||||
---
|
---
|
||||||
# 3) PersistentVolumeClaim (local storage via k3s local-path)
|
# 3) PersistentVolumeClaim
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
@@ -60,7 +60,7 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
storage: 1Gi
|
storage: 1Gi
|
||||||
---
|
---
|
||||||
# 4) Ingress (Traefik)
|
# 4) Ingress
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ spec:
|
|||||||
- port: 8080
|
- port: 8080
|
||||||
targetPort: searxng-port
|
targetPort: searxng-port
|
||||||
---
|
---
|
||||||
# 3) PersistentVolumeClaim (for /config)
|
# 3) PersistentVolumeClaim
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
@@ -65,7 +65,7 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
storage: 1Gi
|
storage: 1Gi
|
||||||
---
|
---
|
||||||
# 4) Ingress (Traefik)
|
# 4) Ingress
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ spec:
|
|||||||
- port: 3001
|
- port: 3001
|
||||||
targetPort: uptimekuma-port
|
targetPort: uptimekuma-port
|
||||||
---
|
---
|
||||||
# 3) PersistentVolumeClaim (for /config)
|
# 3) PersistentVolumeClaim
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
@@ -79,7 +79,7 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
storage: 1Gi
|
storage: 1Gi
|
||||||
---
|
---
|
||||||
# 4) Ingress (Traefik)
|
# 4) Ingress
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
storage: 1Gi
|
storage: 1Gi
|
||||||
---
|
---
|
||||||
# 4) Ingress (Traefik)
|
# 4) Ingress
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
@@ -102,7 +102,7 @@ spec:
|
|||||||
port:
|
port:
|
||||||
number: 80
|
number: 80
|
||||||
---
|
---
|
||||||
# 4) Ingress (Traefik)
|
# 4) Ingress
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
|
|||||||
@@ -4,18 +4,3 @@ kubectl create secret generic adguardhome-password \
|
|||||||
--from-literal=password='your_adguardhome_password' \
|
--from-literal=password='your_adguardhome_password' \
|
||||||
--from-literal=username='your_adguardhome_username' -n dns
|
--from-literal=username='your_adguardhome_username' -n dns
|
||||||
```
|
```
|
||||||
|
|
||||||
## Add AdGuardHome to CoreDNS configmap fallback:
|
|
||||||
1. Edit the CoreDNS configmap:
|
|
||||||
```bash
|
|
||||||
kubectl edit configmap coredns -n kube-system
|
|
||||||
```
|
|
||||||
2. Replace the `forward` line with the following:
|
|
||||||
```
|
|
||||||
forward . <ADGUARDHOME_IP> <ADGUARDHOME_IP_2>
|
|
||||||
```
|
|
||||||
This will use AdGuardHome as the primary DNS server and a secondary one as a fallback, instead of using the default Kubernetes CoreDNS server.
|
|
||||||
|
|
||||||
You may also use `/etc/resolv.conf` to forward to the node's own DNS resolver, but it depends on whether it's well configured or not. *Since it's Linux, we never know.*
|
|
||||||
|
|
||||||
Ideally, since DNS is required for fetching the container image, you would have AdGuardHome as first and then a public DNS server as second (fallback).
|
|
||||||
@@ -15,9 +15,18 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: beszel
|
app: beszel
|
||||||
spec:
|
spec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: kubernetes.io/arch
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- amd64
|
||||||
containers:
|
containers:
|
||||||
- name: beszel
|
- name: beszel
|
||||||
image: ghcr.io/henrygd/beszel/beszel:14.1
|
image: ghcr.io/henrygd/beszel/beszel:0.14.1
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8090
|
- containerPort: 8090
|
||||||
@@ -44,7 +53,7 @@ spec:
|
|||||||
- port: 80
|
- port: 80
|
||||||
targetPort: beszel-port
|
targetPort: beszel-port
|
||||||
---
|
---
|
||||||
# 3) PersistentVolumeClaim (for /config)
|
# 3) PersistentVolumeClaim
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
@@ -60,7 +69,7 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
storage: 1Gi
|
storage: 1Gi
|
||||||
---
|
---
|
||||||
# 4) Ingress (Traefik)
|
# 4) Ingress
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ spec:
|
|||||||
- port: 8443
|
- port: 8443
|
||||||
targetPort: code-port
|
targetPort: code-port
|
||||||
---
|
---
|
||||||
# 3) PersistentVolumeClaim (for /config)
|
# 3) PersistentVolumeClaim
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
@@ -82,7 +82,7 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
storage: 5Gi
|
storage: 5Gi
|
||||||
---
|
---
|
||||||
# 4) Ingress (Traefik)
|
# 4) Ingress
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
|
|||||||
101
monitoring/loki.yaml
Normal file
101
monitoring/loki.yaml
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: loki
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: loki
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: loki
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: loki
|
||||||
|
image: grafana/loki:3
|
||||||
|
args: ["-config.file=/etc/loki/config/config.yaml"]
|
||||||
|
ports:
|
||||||
|
- containerPort: 3100
|
||||||
|
volumeMounts:
|
||||||
|
- name: config
|
||||||
|
mountPath: /etc/loki/config
|
||||||
|
- name: loki-storage
|
||||||
|
mountPath: /tmp/loki
|
||||||
|
volumes:
|
||||||
|
- name: config
|
||||||
|
configMap:
|
||||||
|
name: loki-config
|
||||||
|
- name: loki-storage
|
||||||
|
emptyDir:
|
||||||
|
medium: Memory
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: loki-config
|
||||||
|
namespace: monitoring
|
||||||
|
data:
|
||||||
|
config.yaml: |
|
||||||
|
auth_enabled: true
|
||||||
|
server:
|
||||||
|
http_listen_port: 3100
|
||||||
|
|
||||||
|
common:
|
||||||
|
ring:
|
||||||
|
instance_addr: 127.0.0.1
|
||||||
|
kvstore:
|
||||||
|
store: inmemory
|
||||||
|
replication_factor: 1
|
||||||
|
path_prefix: /tmp/loki
|
||||||
|
querier:
|
||||||
|
multi_tenant_queries_enabled: true
|
||||||
|
|
||||||
|
schema_config:
|
||||||
|
configs:
|
||||||
|
- from: "2024-01-01"
|
||||||
|
store: tsdb
|
||||||
|
object_store: filesystem
|
||||||
|
schema: v13
|
||||||
|
index:
|
||||||
|
prefix: index_
|
||||||
|
period: 24h
|
||||||
|
|
||||||
|
storage_config:
|
||||||
|
tsdb_shipper:
|
||||||
|
active_index_directory: /tmp/loki/index
|
||||||
|
cache_location: /tmp/loki/cache
|
||||||
|
filesystem:
|
||||||
|
directory: /tmp/loki/chunks
|
||||||
|
|
||||||
|
limits_config:
|
||||||
|
allow_structured_metadata: true
|
||||||
|
retention_period: 0
|
||||||
|
|
||||||
|
ingester:
|
||||||
|
lifecycler:
|
||||||
|
ring:
|
||||||
|
kvstore:
|
||||||
|
store: inmemory
|
||||||
|
replication_factor: 1
|
||||||
|
chunk_idle_period: 1m
|
||||||
|
max_chunk_age: 5m
|
||||||
|
chunk_target_size: 1536000
|
||||||
|
|
||||||
|
compactor:
|
||||||
|
retention_enabled: false
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: loki
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 3100
|
||||||
|
targetPort: 3100
|
||||||
|
name: http
|
||||||
|
selector:
|
||||||
|
app: loki
|
||||||
@@ -22,7 +22,7 @@ spec:
|
|||||||
args:
|
args:
|
||||||
- "--config.file=/etc/prometheus/prometheus.yml"
|
- "--config.file=/etc/prometheus/prometheus.yml"
|
||||||
- "--storage.tsdb.path=/prometheus"
|
- "--storage.tsdb.path=/prometheus"
|
||||||
- "--storage.tsdb.retention.time=3d"
|
- "--storage.tsdb.retention.time=1d"
|
||||||
- "--web.enable-lifecycle"
|
- "--web.enable-lifecycle"
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 9090
|
- containerPort: 9090
|
||||||
|
|||||||
Reference in New Issue
Block a user