feat: add kubernetes manifests for base infrastructure

master
arcbjorn 3 days ago
parent aecbd3a907
commit a23073fd78

@ -0,0 +1,107 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: dozzle
namespace: base-infrastructure
spec:
selector:
matchLabels:
app: dozzle
template:
metadata:
labels:
app: dozzle
spec:
serviceAccountName: dozzle
containers:
- name: dozzle
image: amir20/dozzle:latest
ports:
- containerPort: 8080
env:
- name: DOZZLE_LEVEL
value: "info"
- name: DOZZLE_TAILSIZE
value: "300"
volumeMounts:
- name: varlog
mountPath: /var/log
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
resources:
requests:
memory: "128Mi"
cpu: "50m"
limits:
memory: "256Mi"
cpu: "200m"
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 30
periodSeconds: 30
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 10
periodSeconds: 10
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dozzle
namespace: base-infrastructure
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: dozzle
rules:
- apiGroups: [""]
resources: ["pods", "pods/log"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dozzle
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: dozzle
subjects:
- kind: ServiceAccount
name: dozzle
namespace: base-infrastructure
---
apiVersion: v1
kind: Service
metadata:
name: dozzle
namespace: base-infrastructure
spec:
selector:
app: dozzle
ports:
- port: 8080
targetPort: 8080
type: ClusterIP

@ -0,0 +1,94 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: filestash
namespace: base-infrastructure
spec:
replicas: 1
selector:
matchLabels:
app: filestash
template:
metadata:
labels:
app: filestash
spec:
containers:
- name: filestash
image: machines/filestash:latest
ports:
- containerPort: 8334
volumeMounts:
- name: filestash-data
mountPath: /app/data/state
- name: filestash-config
mountPath: /app/data/config
env:
- name: APPLICATION_URL
value: "https://server.arcbjorn.com"
livenessProbe:
httpGet:
path: /api/session
port: 8334
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
httpGet:
path: /api/session
port: 8334
initialDelaySeconds: 30
periodSeconds: 10
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
volumes:
- name: filestash-data
persistentVolumeClaim:
claimName: filestash-data
- name: filestash-config
persistentVolumeClaim:
claimName: filestash-config
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: filestash-data
namespace: base-infrastructure
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: filestash-config
namespace: base-infrastructure
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: filebrowser
namespace: base-infrastructure
spec:
selector:
app: filestash
ports:
- port: 8080
targetPort: 8334
type: ClusterIP

@ -0,0 +1,127 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea
namespace: base-infrastructure
spec:
replicas: 1
selector:
matchLabels:
app: gitea
template:
metadata:
labels:
app: gitea
spec:
containers:
- name: gitea
image: gitea/gitea:latest
ports:
- containerPort: 3000
name: http
protocol: TCP
- containerPort: 22
name: ssh
protocol: TCP
env:
- name: USER_UID
value: "1000"
- name: USER_GID
value: "1000"
- name: GITEA__database__DB_TYPE
value: "postgres"
- name: GITEA__database__HOST
value: "postgresql:5432"
- name: GITEA__database__NAME
valueFrom:
secretKeyRef:
name: app-secrets
key: GIT_DB
- name: GITEA__database__USER
valueFrom:
secretKeyRef:
name: app-secrets
key: GIT_DB_USER
- name: GITEA__database__PASSWD
valueFrom:
secretKeyRef:
name: app-secrets
key: GIT_DB_USER_PASSWORD
- name: GITEA__server__DOMAIN
value: "git.arcbjorn.com"
- name: GITEA__server__SSH_DOMAIN
value: "git.arcbjorn.com"
- name: GITEA__server__ROOT_URL
value: "https://git.arcbjorn.com/"
volumeMounts:
- name: gitea-data
mountPath: /data
- name: timezone
mountPath: /etc/timezone
readOnly: true
- name: localtime
mountPath: /etc/localtime
readOnly: true
livenessProbe:
httpGet:
path: /api/healthz
port: http
initialDelaySeconds: 200
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
path: /api/healthz
port: http
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
volumes:
- name: gitea-data
persistentVolumeClaim:
claimName: gitea-data
- name: timezone
hostPath:
path: /etc/timezone
type: File
- name: localtime
hostPath:
path: /etc/localtime
type: File
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-data
namespace: base-infrastructure
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
---
apiVersion: v1
kind: Service
metadata:
name: gitea
namespace: base-infrastructure
spec:
selector:
app: gitea
ports:
- port: 3000
targetPort: http
name: http
protocol: TCP
- port: 22
targetPort: ssh
name: ssh
protocol: TCP
type: ClusterIP

@ -0,0 +1,92 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: base-infrastructure-ingress
namespace: base-infrastructure
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod" # If using cert-manager
spec:
tls:
- hosts:
- git.arcbjorn.com
- analytics.arcbjorn.com
- uptime.arcbjorn.com
- server.arcbjorn.com
- logs.arcbjorn.com
- memos.arcbjorn.com
secretName: arcbjorn-tls
rules:
# Git Repository Hosting
- host: git.arcbjorn.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: gitea
port:
number: 3000
# Analytics Dashboard
- host: analytics.arcbjorn.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: umami
port:
number: 3000
# Uptime Monitoring
- host: uptime.arcbjorn.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: uptime-kuma
port:
number: 3001
# File Browser (Filestash)
- host: server.arcbjorn.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: filebrowser
port:
number: 8080
# Container Logs Viewer
- host: logs.arcbjorn.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: dozzle
port:
number: 8080
# Notes and Memos
- host: memos.arcbjorn.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: memos
port:
number: 5230

@ -0,0 +1,88 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: memos
namespace: base-infrastructure
spec:
replicas: 1
selector:
matchLabels:
app: memos
template:
metadata:
labels:
app: memos
spec:
containers:
- name: memos
image: neosmemo/memos:stable
ports:
- containerPort: 5230
name: http
protocol: TCP
env:
- name: MEMOS_MODE
value: "prod"
- name: MEMOS_PORT
value: "5230"
volumeMounts:
- name: memos-data
mountPath: /var/opt/memos
livenessProbe:
httpGet:
path: /api/v1/ping
port: http
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/v1/ping
port: http
initialDelaySeconds: 10
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
resources:
requests:
memory: "512Mi"
cpu: "100m"
limits:
memory: "1Gi"
cpu: "500m"
volumes:
- name: memos-data
persistentVolumeClaim:
claimName: memos-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: memos-data
namespace: base-infrastructure
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: memos
namespace: base-infrastructure
spec:
selector:
app: memos
ports:
- port: 5230
targetPort: http
name: http
protocol: TCP
type: ClusterIP

@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: app-config
namespace: base-infrastructure
data:
# PostgreSQL Configuration
POSTGRES_PORT: "5432"
# Umami Configuration
UMAMI_PORT: "3000"
# Add other non-sensitive config values as needed
# These correspond to your stack.env variables

@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: base-infrastructure
labels:
name: base-infrastructure

@ -0,0 +1,27 @@
apiVersion: v1
kind: Secret
metadata:
name: app-secrets
namespace: base-infrastructure
type: Opaque
data:
# PostgreSQL Secrets (base64 encoded)
# Replace these with your actual base64-encoded values from stack.env
POSTGRES_USER: cG9zdGdyZXM= # postgres
POSTGRES_PASSWORD: Y2hhbmdlbWU= # changeme
POSTGRES_DB: cG9zdGdyZXM= # postgres
POSTGRES_MULTIPLE_DATABASES: "" # your multiple DB config
# Umami Database Secrets (uses shared PostgreSQL)
UMAMI_DATABASE_URL: cG9zdGdyZXM6Ly9wb3N0Z3JlczpjaGFuZ2VtZUBwb3N0Z3Jlc3FsOjU0MzIvdW1hbWk= # postgres://postgres:changeme@postgresql:5432/umami
# Gitea Database Secrets
GIT_DB: Z2l0ZWE= # gitea
GIT_DB_USER: Z2l0ZWE= # gitea
GIT_DB_USER_PASSWORD: Y2hhbmdlbWU= # changeme
---
# Instructions for updating secrets:
# 1. Base64 encode your actual values: echo -n "your-value" | base64
# 2. Replace the placeholder values above
# 3. Apply: kubectl apply -f secrets.yaml

@ -0,0 +1,24 @@
#!/bin/bash
set -e
set -u
function create_user_and_database() {
local database=$(echo $1 | tr ',' ' ' | awk '{print $1}')
local owner=$(echo $1 | tr ',' ' ' | awk '{print $2}')
echo " Creating user and database '$database'"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL
CREATE USER $owner IF NOT EXISTS;
ALTER USER $owner PASSWORD $database;
CREATE DATABASE $database;
GRANT ALL PRIVILEGES ON DATABASE $database TO $owner;
EOSQL
}
if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then
echo "Multiple database creation requested: $POSTGRES_MULTIPLE_DATABASES"
for db in $(echo $POSTGRES_MULTIPLE_DATABASES | tr ':' ' '); do
create_user_and_database $db
done
echo "Multiple databases created"
fi

@ -0,0 +1,28 @@
apiVersion: v1
kind: Service
metadata:
name: postgresql
namespace: base-infrastructure
spec:
selector:
app: postgresql
ports:
- port: 5432
targetPort: 5432
name: postgresql
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: postgresql-headless
namespace: base-infrastructure
spec:
selector:
app: postgresql
ports:
- port: 5432
targetPort: 5432
name: postgresql
clusterIP: None

@ -0,0 +1,76 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgresql
namespace: base-infrastructure
spec:
serviceName: postgresql-headless
replicas: 1
selector:
matchLabels:
app: postgresql
template:
metadata:
labels:
app: postgresql
spec:
containers:
- name: postgresql
image: postgres:latest
ports:
- containerPort: 5432
name: postgresql
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: app-secrets
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: app-secrets
key: POSTGRES_PASSWORD
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: app-secrets
key: POSTGRES_DB
- name: POSTGRES_MULTIPLE_DATABASES
valueFrom:
secretKeyRef:
name: app-secrets
key: POSTGRES_MULTIPLE_DATABASES
volumeMounts:
- name: postgresql-data
mountPath: /var/lib/postgresql/data
- name: init-scripts
mountPath: /docker-entrypoint-initdb.d
livenessProbe:
exec:
command:
- /bin/sh
- -c
- pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
exec:
command:
- /bin/sh
- -c
- pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: init-scripts
configMap:
name: postgresql-init-scripts
volumeClaimTemplates:
- metadata:
name: postgresql-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi

@ -0,0 +1,69 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: umami
namespace: base-infrastructure
spec:
replicas: 1
selector:
matchLabels:
app: umami
template:
metadata:
labels:
app: umami
spec:
containers:
- name: umami
image: docker.umami.is/umami-software/umami:postgresql-latest
ports:
- containerPort: 3000
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: app-secrets
key: UMAMI_DATABASE_URL
livenessProbe:
httpGet:
path: /api/heartbeat
port: 3000
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
httpGet:
path: /api/heartbeat
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
initContainers:
- name: wait-for-db
image: postgres:12-alpine
command: ['sh', '-c']
args:
- |
until pg_isready -h postgresql -p 5432 -U postgres; do
echo "Waiting for PostgreSQL database..."
sleep 2
done
---
apiVersion: v1
kind: Service
metadata:
name: umami
namespace: base-infrastructure
spec:
selector:
app: umami
ports:
- port: 3000
targetPort: 3000
type: ClusterIP

@ -0,0 +1,73 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: uptime-kuma
namespace: base-infrastructure
spec:
replicas: 1
selector:
matchLabels:
app: uptime-kuma
template:
metadata:
labels:
app: uptime-kuma
spec:
containers:
- name: uptime-kuma
image: louislam/uptime-kuma:latest
ports:
- containerPort: 3001
volumeMounts:
- name: uptime-kuma-data
mountPath: /app/data
livenessProbe:
httpGet:
path: /
port: 3001
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
httpGet:
path: /
port: 3001
initialDelaySeconds: 30
periodSeconds: 10
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
volumes:
- name: uptime-kuma-data
persistentVolumeClaim:
claimName: uptime-kuma-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: uptime-kuma-data
namespace: base-infrastructure
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: Service
metadata:
name: uptime-kuma
namespace: base-infrastructure
spec:
selector:
app: uptime-kuma
ports:
- port: 3001
targetPort: 3001
type: ClusterIP
Loading…
Cancel
Save