Compare commits

..

6 Commits

Author SHA1 Message Date
a612fcc775 adding resource limts 2026-04-10 13:33:57 -03:00
d7a0e9fbea adding openwebui 2026-04-10 13:33:24 -03:00
2182754eee adding readme 2026-03-16 19:26:33 -03:00
29e3470e39 adding docker-ingress namespace 2026-03-16 19:26:08 -03:00
f979f96fcc adding alloy 2026-01-03 12:55:33 -03:00
fbe1cdd282 updating beszel 2026-01-03 12:55:28 -03:00
14 changed files with 412 additions and 3 deletions

4
.gitignore vendored
View File

@@ -3,7 +3,11 @@ default/*tt*
certs/ca* certs/ca*
lab/* lab/*
sandbox/*
!lab/nfs-pod.yaml !lab/nfs-pod.yaml
rbac rbac
cronjobs cronjobs
*.crt
*.key

View File

@@ -22,6 +22,7 @@ The repository name references my local TLD, `.haven` ;)
- Searxng - Searxng
- Uptimekuma - Uptimekuma
- Vaultwarden - Vaultwarden
- OpenWebUI
- dns - dns
- AdGuardHome - AdGuardHome
- AdGuardHome-2 (2nd instance) - AdGuardHome-2 (2nd instance)
@@ -51,6 +52,8 @@ The repository name references my local TLD, `.haven` ;)
- MetalLB components - MetalLB components
- cert-manager - cert-manager
- cert-manager components - cert-manager components
- docker-ingress
- nginx ingress controller components for Docker-based services
## Todo ## Todo
- Move ArchiveBox data to its own PVC on the NAS - Move ArchiveBox data to its own PVC on the NAS

3
alloy/README.md Normal file
View File

@@ -0,0 +1,3 @@
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
helm install alloy grafana/alloy --namespace alloy -f values.yaml

107
alloy/values.yaml Normal file
View File

@@ -0,0 +1,107 @@
alloy:
clustering:
enabled: false # Single node deployment
configMap:
create: true
content: |-
discovery.kubernetes "all_pods" {
role = "pod"
selectors {
role = "pod"
field = "spec.nodeName=" + coalesce(env("HOSTNAME"), constants.hostname)
}
}
discovery.relabel "all_pods" {
targets = discovery.kubernetes.all_pods.targets
rule {
source_labels = ["__meta_kubernetes_namespace"]
target_label = "namespace"
}
rule {
source_labels = ["__meta_kubernetes_pod_name"]
target_label = "pod"
}
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
target_label = "container"
}
rule {
source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_name"]
target_label = "app"
}
}
loki.source.kubernetes "all_logs" {
targets = discovery.relabel.all_pods.output
forward_to = [loki.write.main.receiver]
}
discovery.kubernetes "shared_pods" {
role = "pod"
selectors {
role = "pod"
field = "metadata.namespace=chacal"
}
}
discovery.relabel "shared_pods" {
targets = discovery.kubernetes.shared_pods.targets
rule {
source_labels = ["__meta_kubernetes_namespace"]
target_label = "namespace"
}
rule {
source_labels = ["__meta_kubernetes_pod_name"]
target_label = "pod"
}
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
target_label = "container"
}
rule {
source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_name"]
target_label = "app"
}
}
loki.source.kubernetes "shared_logs" {
targets = discovery.relabel.shared_pods.output
forward_to = [loki.write.shared.receiver]
}
loki.write "main" {
endpoint {
url = "http://loki.monitoring.svc.cluster.local:3100/loki/api/v1/push"
headers = {
"X-Scope-OrgID" = "main",
}
}
}
loki.write "shared" {
endpoint {
url = "http://loki.monitoring.svc.cluster.local:3100/loki/api/v1/push"
headers = {
"X-Scope-OrgID" = "chacal",
}
}
}
mounts:
varlog: true # Mount host /var/log for pod logs
controller:
type: daemonset # Run on every node
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi

90
default/openwebui.yaml Normal file
View File

@@ -0,0 +1,90 @@
# 1) Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: openwebui
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: openwebui
template:
metadata:
labels:
app: openwebui
spec:
containers:
- name: openwebui
image: ghcr.io/open-webui/open-webui:main-slim
imagePullPolicy: Always
ports:
- containerPort: 8080
resources:
requests:
cpu: "250m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "1Gi"
volumeMounts:
- name: openwebui-data
mountPath: /app/backend/data
volumes:
- name: openwebui-data
persistentVolumeClaim:
claimName: openwebui-data
---
# 2) Service
apiVersion: v1
kind: Service
metadata:
name: openwebui
namespace: default
spec:
type: ClusterIP
selector:
app: openwebui
ports:
- port: 8080
targetPort: 8080
---
# 3) PersistentVolumeClaim
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: openwebui-data
namespace: default
annotations:
nfs.io/storage-path: "openwebui-data"
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
limits:
storage: 10Gi
---
# 4) Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: openwebui
namespace: default
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
spec:
ingressClassName: nginx
rules:
- host: openwebui.haven
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: openwebui
port:
number: 8080

View File

@@ -15,6 +15,7 @@ spec:
labels: labels:
app: searxng app: searxng
spec: spec:
enableServiceLinks: false
containers: containers:
- name: searxng - name: searxng
image: searxng/searxng:latest image: searxng/searxng:latest
@@ -27,6 +28,13 @@ spec:
ports: ports:
- containerPort: 8080 - containerPort: 8080
name: searxng-port name: searxng-port
resources:
requests:
cpu: "100m"
memory: "256Mi"
limits:
cpu: "500m"
memory: "512Mi"
volumeMounts: volumeMounts:
- name: searxng-config - name: searxng-config
mountPath: /etc/searxng mountPath: /etc/searxng

View File

@@ -81,6 +81,13 @@ spec:
- name: gitea-runner - name: gitea-runner
image: gitea/act_runner:latest image: gitea/act_runner:latest
imagePullPolicy: Always imagePullPolicy: Always
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "4000m"
memory: "4Gi"
volumeMounts: volumeMounts:
- name: config-volume - name: config-volume
mountPath: /etc/gitea-runner/config.yaml mountPath: /etc/gitea-runner/config.yaml
@@ -141,6 +148,13 @@ spec:
- name: gitea-runner - name: gitea-runner
image: gitea/act_runner:latest image: gitea/act_runner:latest
imagePullPolicy: Always imagePullPolicy: Always
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "4000m"
memory: "4Gi"
volumeMounts: volumeMounts:
- name: config-volume - name: config-volume
mountPath: /etc/gitea-runner/config.yaml mountPath: /etc/gitea-runner/config.yaml

4
docker-ingress/README.md Normal file
View File

@@ -0,0 +1,4 @@
## Create namespace
```bash
kubectl create namespace docker-ingress
```

View File

@@ -0,0 +1,44 @@
# docker-node: iris.haven
# port: 4100
# Service
apiVersion: v1
kind: Service
metadata:
name: changedetection-service
namespace: docker-ingress
spec:
ports:
- port: 80
targetPort: 4100
---
# Endpoints
apiVersion: v1
kind: Endpoints
metadata:
name: changedetection-service
namespace: docker-ingress
subsets:
- addresses:
- ip: 192.168.15.101
ports:
- port: 4100
---
# Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: changedetection-ingress
namespace: docker-ingress
spec:
rules:
- host: change.haven
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: changedetection-service
port:
number: 80

View File

@@ -0,0 +1,44 @@
# docker-node: iris.haven
# port: 4100
# Service
apiVersion: v1
kind: Service
metadata:
name: dockge-service
namespace: docker-ingress
spec:
ports:
- port: 80
targetPort: 5001
---
# Endpoints
apiVersion: v1
kind: Endpoints
metadata:
name: dockge-service
namespace: docker-ingress
subsets:
- addresses:
- ip: 192.168.15.101
ports:
- port: 5001
---
# Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dockge-ingress
namespace: docker-ingress
spec:
rules:
- host: dockge.haven
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: dockge-service
port:
number: 80

View File

@@ -0,0 +1,44 @@
# docker-node: iris.haven
# port: 4100
# Service
apiVersion: v1
kind: Service
metadata:
name: paperless-service
namespace: docker-ingress
spec:
ports:
- port: 80
targetPort: 4200
---
# Endpoints
apiVersion: v1
kind: Endpoints
metadata:
name: paperless-service
namespace: docker-ingress
subsets:
- addresses:
- ip: 192.168.15.101
ports:
- port: 4200
---
# Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: paperless-ingress
namespace: docker-ingress
spec:
rules:
- host: paperless.haven
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: paperless-service
port:
number: 80

View File

@@ -0,0 +1,44 @@
# docker-node: iris.haven
# port: 4100
# Service
apiVersion: v1
kind: Service
metadata:
name: transmission-service
namespace: docker-ingress
spec:
ports:
- port: 80
targetPort: 3210
---
# Endpoints
apiVersion: v1
kind: Endpoints
metadata:
name: transmission-service
namespace: docker-ingress
subsets:
- addresses:
- ip: 192.168.15.60
ports:
- port: 3210
---
# Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: transmission-ingress
namespace: docker-ingress
spec:
rules:
- host: transmission.haven
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: transmission-service
port:
number: 80

View File

@@ -22,7 +22,7 @@ spec:
secretKeyRef: secretKeyRef:
name: beszel-key name: beszel-key
key: SECRET-KEY key: SECRET-KEY
image: henrygd/beszel-agent:0.14.1 image: henrygd/beszel-agent:0.17.0
imagePullPolicy: Always imagePullPolicy: Always
name: beszel-agent name: beszel-agent
ports: ports:

View File

@@ -26,7 +26,7 @@ spec:
- amd64 - amd64
containers: containers:
- name: beszel - name: beszel
image: ghcr.io/henrygd/beszel/beszel:0.14.1 image: ghcr.io/henrygd/beszel/beszel:0.17.0
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 8090 - containerPort: 8090