chore: add k8s migration docs & scripts

master
arcbjorn 1 day ago
parent 1b6522e3f2
commit 8544769f77

@ -0,0 +1,508 @@
#!/bin/bash
# Docker Compose to Kubernetes Migration Script
# This script provides executable commands for the migration process
# Run sections individually, do not execute the entire script at once
set -e # Exit on any error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Check if running in correct directory
check_directory() {
if [[ ! -f "CLAUDE.md" ]] || [[ ! -d "k8s" ]]; then
log_error "Must run from base_infrastructure directory"
log_info "Current directory: $(pwd)"
log_info "Expected files: CLAUDE.md, k8s/ directory"
exit 1
fi
}
# Check SSH environment and requirements
check_ssh_environment() {
log_info "Checking SSH environment..."
# Check if we're in SSH session
if [[ -n "$SSH_CLIENT" ]] || [[ -n "$SSH_TTY" ]]; then
log_info "✅ Running over SSH connection"
# Check if kubectl is available
if ! command -v kubectl &> /dev/null; then
log_error "kubectl not found. Install with:"
echo " curl -LO \"https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl\""
echo " sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl"
exit 1
fi
# Check if docker/docker-compose is available for backups
if ! command -v docker &> /dev/null; then
log_warning "Docker not found - backup phase may not work"
fi
# Check multipass VM access if that's where K8s is running
if command -v multipass &> /dev/null; then
if multipass list | grep -q "k8s-master.*Running"; then
log_info "✅ Multipass k8s-master VM detected and running"
# Check if we can access kubectl through the VM
if ! kubectl cluster-info &> /dev/null; then
log_warning "kubectl not configured. You may need to:"
echo " 1. Copy kubeconfig from VM: multipass exec k8s-master -- sudo cat /etc/kubernetes/admin.conf > ~/.kube/config"
echo " 2. Update server IP in kubeconfig to VM IP"
echo " 3. Or run migration script inside VM: multipass shell k8s-master"
fi
else
log_error "k8s-master VM not found or not running"
echo "Start with: multipass start k8s-master"
exit 1
fi
fi
else
log_info "Running locally (not over SSH)"
fi
}
# Phase 0: Pre-Migration Backup
phase0_backup() {
log_info "=== PHASE 0: Pre-Migration Backup ==="
# Create backup directory
BACKUP_DIR="$HOME/k8s-migration-backup/$(date +%Y%m%d-%H%M%S)"
mkdir -p "$BACKUP_DIR"
cd "$BACKUP_DIR"
log_success "Created backup directory: $BACKUP_DIR"
# Backup Docker volumes
log_info "Backing up Docker volumes..."
# PostgreSQL (CRITICAL)
if [[ -d "/root/containers/postgresql/data" ]]; then
sudo tar -czf postgresql-data-backup.tar.gz -C /root/containers/postgresql/data . 2>/dev/null || {
log_warning "PostgreSQL backup failed - directory may not exist or permission issue"
}
fi
# Gitea (CRITICAL)
if [[ -d "/root/containers/gitea/data" ]]; then
sudo tar -czf gitea-data-backup.tar.gz -C /root/containers/gitea/data . 2>/dev/null || {
log_warning "Gitea backup failed - directory may not exist or permission issue"
}
fi
# Other services
for service in pgadmin filebrowser uptime-kuma memos; do
if [[ -d "/root/containers/$service" ]]; then
sudo tar -czf "${service}-backup.tar.gz" -C "/root/containers/$service" . 2>/dev/null || {
log_warning "$service backup failed - directory may not exist"
}
fi
done
# List created backups
log_info "Created backups:"
ls -lah *.tar.gz 2>/dev/null || log_warning "No backup files found"
log_success "Phase 0 completed. Backup directory: $BACKUP_DIR"
echo "BACKUP_DIR=$BACKUP_DIR" > ~/.k8s-migration-env
}
# Phase 1: Infrastructure Setup
phase1_infrastructure() {
log_info "=== PHASE 1: Infrastructure Setup ==="
# Check kubectl connectivity
kubectl cluster-info >/dev/null 2>&1 || {
log_error "kubectl not configured or cluster not accessible"
exit 1
}
# Apply namespace
log_info "Creating namespace..."
kubectl apply -f k8s/namespace/namespace.yaml
# Apply configmap
log_info "Creating configmap..."
kubectl apply -f k8s/namespace/configmap.yaml
# Check if secrets need to be updated
if grep -q "changeme" k8s/namespace/secrets.yaml; then
log_warning "⚠️ Secrets file contains placeholder values!"
log_warning "Edit k8s/namespace/secrets.yaml with real base64-encoded values before continuing"
echo "Example commands to encode secrets:"
echo " echo -n 'your_postgres_password' | base64"
echo " echo -n 'your_gitea_password' | base64"
echo ""
read -p "Press Enter after updating secrets file to continue..."
fi
# Apply secrets
log_info "Creating secrets..."
kubectl apply -f k8s/namespace/secrets.yaml
# Verify resources
kubectl get namespaces | grep base-infrastructure >/dev/null && log_success "Namespace created"
kubectl get secrets -n base-infrastructure | grep app-secrets >/dev/null && log_success "Secrets created"
kubectl get configmap -n base-infrastructure >/dev/null && log_success "ConfigMap created"
log_success "Phase 1 completed"
}
# Phase 2: PostgreSQL Deployment
phase2_postgresql() {
log_info "=== PHASE 2: PostgreSQL Deployment ==="
# Deploy PostgreSQL
log_info "Deploying PostgreSQL..."
kubectl apply -f k8s/postgresql/postgresql-statefulset.yaml
kubectl apply -f k8s/postgresql/postgresql-service.yaml
# Wait for PostgreSQL to be ready
log_info "Waiting for PostgreSQL to be ready (up to 5 minutes)..."
if kubectl wait --for=condition=ready pod -l app=postgresql -n base-infrastructure --timeout=300s; then
log_success "PostgreSQL is ready"
else
log_error "PostgreSQL failed to start within timeout"
kubectl logs -n base-infrastructure -l app=postgresql --tail=20
return 1
fi
# Test PostgreSQL connection
log_info "Testing PostgreSQL connection..."
if kubectl exec -n base-infrastructure postgresql-0 -- pg_isready -U postgres >/dev/null 2>&1; then
log_success "PostgreSQL connection test passed"
else
log_error "PostgreSQL connection test failed"
return 1
fi
log_success "Phase 2 completed"
}
# Phase 3: Data Migration
phase3_data_migration() {
log_info "=== PHASE 3: Data Migration ==="
# Load backup directory
if [[ -f ~/.k8s-migration-env ]]; then
source ~/.k8s-migration-env
else
log_error "Backup directory not found. Run phase0 first."
return 1
fi
if [[ ! -d "$BACKUP_DIR" ]]; then
log_error "Backup directory $BACKUP_DIR does not exist"
return 1
fi
cd "$BACKUP_DIR"
# Migrate PostgreSQL data (if backup exists)
if [[ -f "postgresql-data-backup.tar.gz" ]]; then
log_info "Migrating PostgreSQL data..."
kubectl cp postgresql-data-backup.tar.gz base-infrastructure/postgresql-0:/tmp/ || {
log_error "Failed to copy PostgreSQL backup to pod"
return 1
}
# Note: This will replace existing data in the pod
log_warning "This will replace any existing PostgreSQL data in the pod"
read -p "Continue with PostgreSQL data migration? (y/N): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
kubectl exec -n base-infrastructure postgresql-0 -- bash -c "
cd /var/lib/postgresql/data &&
rm -rf * &&
tar -xzf /tmp/postgresql-data-backup.tar.gz &&
chown -R postgres:postgres .
" || {
log_error "PostgreSQL data migration failed"
return 1
}
# Restart PostgreSQL to pick up data
kubectl delete pod postgresql-0 -n base-infrastructure
kubectl wait --for=condition=ready pod -l app=postgresql -n base-infrastructure --timeout=300s
log_success "PostgreSQL data migrated and restarted"
fi
else
log_warning "No PostgreSQL backup found, skipping data migration"
fi
log_success "Phase 3 completed"
}
# Phase 4: Services Deployment
phase4_services() {
log_info "=== PHASE 4: Services Deployment ==="
# Deploy services that depend on PostgreSQL
log_info "Deploying Gitea..."
kubectl apply -f k8s/gitea/gitea-deployment.yaml
log_info "Deploying Umami..."
kubectl apply -f k8s/umami/umami-deployment.yaml
# Deploy other services
log_info "Deploying other services..."
kubectl apply -f k8s/memos/memos-deployment.yaml
kubectl apply -f k8s/uptime-kuma/uptime-kuma-deployment.yaml
kubectl apply -f k8s/dozzle/dozzle-deployment.yaml
kubectl apply -f k8s/filestash/filestash-deployment.yaml
# Wait for services to be ready
services=("gitea" "umami" "memos" "uptime-kuma" "dozzle")
for service in "${services[@]}"; do
log_info "Waiting for $service to be ready..."
if kubectl wait --for=condition=ready pod -l app=$service -n base-infrastructure --timeout=300s 2>/dev/null; then
log_success "$service is ready"
else
log_warning "$service failed to start within timeout (may not exist or different label)"
kubectl get pods -n base-infrastructure -l app=$service
fi
done
log_success "Phase 4 completed"
}
# Phase 5: Ingress & Networking
phase5_networking() {
log_info "=== PHASE 5: Ingress & Networking ==="
# Check if ingress controller is running
if ! kubectl get pods -n ingress-nginx | grep -q "Running"; then
log_warning "Ingress controller not found or not running"
log_info "Install ingress controller with:"
echo "kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.2/deploy/static/provider/baremetal/deploy.yaml"
read -p "Press Enter after installing ingress controller..."
fi
# Apply ingress rules
log_info "Applying ingress rules..."
kubectl apply -f k8s/ingress/ingress.yaml
# Show ingress status
kubectl get ingress -n base-infrastructure
log_success "Phase 5 completed"
}
# Phase 6: Verification
phase6_verification() {
log_info "=== PHASE 6: Verification ==="
# Check all pods
log_info "Checking pod status..."
kubectl get pods -n base-infrastructure
# Check services
log_info "Checking services..."
kubectl get services -n base-infrastructure
# Check ingress
log_info "Checking ingress..."
kubectl get ingress -n base-infrastructure
# Test database connectivity
log_info "Testing database connectivity..."
if kubectl exec -n base-infrastructure postgresql-0 -- psql -U postgres -c '\l' >/dev/null 2>&1; then
log_success "Database connectivity test passed"
else
log_error "Database connectivity test failed"
fi
# Count running pods
running_pods=$(kubectl get pods -n base-infrastructure --no-headers | grep "Running" | wc -l)
total_pods=$(kubectl get pods -n base-infrastructure --no-headers | wc -l)
log_info "Pod status: $running_pods/$total_pods running"
if [[ $running_pods -eq $total_pods ]] && [[ $total_pods -gt 0 ]]; then
log_success "All pods are running!"
else
log_warning "Not all pods are running. Check logs:"
kubectl get pods -n base-infrastructure | grep -v "Running"
fi
log_success "Phase 6 completed"
}
# Phase 7: Docker Cleanup (DANGEROUS)
phase7_cleanup() {
log_warning "=== PHASE 7: Docker Cleanup (DANGEROUS) ==="
log_warning "This will stop and remove Docker Compose services"
log_warning "Only proceed if Kubernetes migration is fully verified!"
read -p "Are you absolutely sure you want to proceed with Docker cleanup? (type 'yes'): " confirm
if [[ $confirm != "yes" ]]; then
log_info "Docker cleanup cancelled"
return 0
fi
# Stop Docker Compose services
services_dirs=("caddy" "postgresql" "gitea" "umami" "memos" "uptime-kuma" "dozzle" "filebrowser")
for service_dir in "${services_dirs[@]}"; do
if [[ -d "$service_dir" ]] && [[ -f "$service_dir/docker-compose.yaml" || -f "$service_dir/docker-compose.yml" ]]; then
log_info "Stopping $service_dir..."
(cd "$service_dir" && docker-compose down 2>/dev/null) || log_warning "Failed to stop $service_dir"
fi
done
# Clean up Docker resources
log_info "Cleaning up Docker containers..."
docker container prune -f 2>/dev/null || log_warning "Container cleanup failed"
log_info "Cleaning up Docker images..."
docker image prune -f 2>/dev/null || log_warning "Image cleanup failed"
log_info "Cleaning up Docker networks..."
docker network prune -f 2>/dev/null || log_warning "Network cleanup failed"
log_warning "Docker volumes NOT cleaned up for safety"
log_info "To clean volumes manually: docker volume prune -f"
log_success "Phase 7 completed"
}
# Status check function
status_check() {
log_info "=== Kubernetes Cluster Status ==="
echo "Cluster Info:"
kubectl cluster-info
echo -e "\nNamespace Resources:"
kubectl get all -n base-infrastructure
echo -e "\nPod Details:"
kubectl get pods -n base-infrastructure -o wide
echo -e "\nIngress Status:"
kubectl get ingress -n base-infrastructure
echo -e "\nPersistent Volumes:"
kubectl get pv,pvc -n base-infrastructure
}
# Port forward for testing
port_forward_test() {
log_info "=== Port Forward Test ==="
log_info "Starting port forwards for testing..."
# Kill any existing port forwards
pkill -f "kubectl port-forward" 2>/dev/null || true
# Start port forwards in background
kubectl port-forward -n base-infrastructure svc/gitea 3000:3000 &
kubectl port-forward -n base-infrastructure svc/umami 3001:3000 &
kubectl port-forward -n base-infrastructure svc/postgresql 5432:5432 &
log_success "Port forwards started:"
echo " Gitea: http://localhost:3000"
echo " Umami: http://localhost:3001"
echo " PostgreSQL: localhost:5432"
echo ""
echo "Press Ctrl+C to stop port forwards"
# Wait for interrupt
trap 'pkill -f "kubectl port-forward"; log_info "Port forwards stopped"' INT
wait
}
# Help function
show_help() {
echo "Docker Compose to Kubernetes Migration Script"
echo ""
echo "Usage: $0 <command>"
echo ""
echo "Commands:"
echo " phase0 - Create backups of Docker volumes"
echo " phase1 - Set up Kubernetes infrastructure (namespace, secrets)"
echo " phase2 - Deploy PostgreSQL"
echo " phase3 - Migrate data from Docker to Kubernetes"
echo " phase4 - Deploy application services"
echo " phase5 - Set up ingress and networking"
echo " phase6 - Verify deployment"
echo " phase7 - Clean up Docker resources (DANGEROUS)"
echo " status - Show cluster status"
echo " test - Start port forwards for testing"
echo " help - Show this help"
echo ""
echo "Run phases in order. Each phase should complete successfully before proceeding."
echo ""
echo "Example workflow:"
echo " $0 phase0 # Backup"
echo " $0 phase1 # Infrastructure"
echo " $0 phase2 # PostgreSQL"
echo " $0 phase3 # Data migration"
echo " $0 phase4 # Services"
echo " $0 phase5 # Networking"
echo " $0 phase6 # Verification"
echo " $0 test # Test access"
echo " $0 phase7 # Cleanup (only when satisfied)"
}
# Main execution
main() {
check_directory
check_ssh_environment
case "${1:-help}" in
"phase0"|"backup")
phase0_backup
;;
"phase1"|"infrastructure")
phase1_infrastructure
;;
"phase2"|"postgresql")
phase2_postgresql
;;
"phase3"|"data")
phase3_data_migration
;;
"phase4"|"services")
phase4_services
;;
"phase5"|"networking")
phase5_networking
;;
"phase6"|"verify")
phase6_verification
;;
"phase7"|"cleanup")
phase7_cleanup
;;
"status")
status_check
;;
"test"|"port-forward")
port_forward_test
;;
"help"|"--help"|"-h")
show_help
;;
*)
log_error "Unknown command: $1"
show_help
exit 1
;;
esac
}
# Only run main if script is executed directly
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi

@ -0,0 +1,460 @@
# Docker Compose to Kubernetes Migration Guide
This guide provides a complete step-by-step migration process from Docker Compose to Kubernetes.
## Prerequisites
- ✅ Kubernetes cluster running (confirmed in k8s-migration.md)
- ✅ kubectl configured and working
- ✅ Access to current Docker Compose services
- ✅ Portainer secrets available
## Phase 0: Pre-Migration Backup & Preparation
### 1. Create Backup Directory
```bash
mkdir -p ~/k8s-migration-backup/$(date +%Y%m%d-%H%M%S)
cd ~/k8s-migration-backup/$(date +%Y%m%d-%H%M%S)
```
### 2. Backup All Docker Volumes
```bash
# PostgreSQL data (CRITICAL)
sudo tar -czf postgresql-data-backup.tar.gz -C /root/containers/postgresql/data .
# Gitea data (CRITICAL)
sudo tar -czf gitea-data-backup.tar.gz -C /root/containers/gitea/data .
# Other service data
sudo tar -czf pgadmin-backup.tar.gz -C /root/containers/pgadmin .
sudo tar -czf filebrowser-backup.tar.gz -C /root/containers/filebrowser .
sudo tar -czf uptime-kuma-backup.tar.gz -C /root/containers/uptime-kuma .
sudo tar -czf memos-backup.tar.gz -C /root/containers/memos .
# Verify backups
ls -lah *.tar.gz
```
### 3. Export Docker Compose Environment
```bash
# Extract current environment variables from Portainer or stack.env
# Save them to a temporary file for reference
cat > docker-secrets.env << 'EOF'
# Copy your actual values from Portainer:
POSTGRES_USER=postgres
POSTGRES_PASSWORD=your_actual_password
POSTGRES_DB=postgres
POSTGRES_MULTIPLE_DATABASES=gitea,umami,memos
# Gitea database
GIT_DB=gitea
GIT_DB_USER=gitea
GIT_DB_USER_PASSWORD=your_actual_password
# pgAdmin
PGADMIN_DEFAULT_EMAIL=admin@example.com
PGADMIN_DEFAULT_PASSWORD=your_actual_password
# Add any other secrets from Portainer
EOF
```
### 4. Create Kubernetes Secrets
```bash
# Base64 encode your secrets (replace with actual values)
echo -n "your_postgres_password" | base64
echo -n "your_gitea_password" | base64
echo -n "admin@example.com" | base64
# Edit the secrets file with real values
nano k8s/namespace/secrets.yaml
```
## Phase 1: Infrastructure Setup
### 1. Create Namespace and Core Resources
```bash
# Apply namespace
kubectl apply -f k8s/namespace/namespace.yaml
# Verify namespace
kubectl get namespaces
# Apply configmap (for PostgreSQL init scripts)
kubectl apply -f k8s/namespace/configmap.yaml
# Apply secrets (after updating with real values)
kubectl apply -f k8s/namespace/secrets.yaml
# Verify secrets (don't show values)
kubectl get secrets -n base-infrastructure
```
### 2. Start PostgreSQL (Foundation Service)
```bash
# Apply PostgreSQL StatefulSet and Service
kubectl apply -f k8s/postgresql/postgresql-statefulset.yaml
kubectl apply -f k8s/postgresql/postgresql-service.yaml
# Wait for PostgreSQL to be ready (this is CRITICAL)
kubectl wait --for=condition=ready pod -l app=postgresql -n base-infrastructure --timeout=300s
# Verify PostgreSQL is running
kubectl get pods -n base-infrastructure -l app=postgresql
kubectl logs -n base-infrastructure -l app=postgresql
```
### 3. Migrate PostgreSQL Data
```bash
# Copy PostgreSQL backup to the running pod
kubectl cp postgresql-data-backup.tar.gz base-infrastructure/postgresql-0:/tmp/
# Extract data in the pod (if migrating existing data)
kubectl exec -it postgresql-0 -n base-infrastructure -- bash -c "
cd /var/lib/postgresql/data
tar -xzf /tmp/postgresql-data-backup.tar.gz
chown -R postgres:postgres .
"
# Test PostgreSQL connection
kubectl exec -it postgresql-0 -n base-infrastructure -- psql -U postgres -c '\l'
```
## Phase 2: Core Services Deployment
### 1. Deploy Gitea
```bash
# Apply Gitea deployment
kubectl apply -f k8s/gitea/gitea-deployment.yaml
# Wait for Gitea to be ready
kubectl wait --for=condition=ready pod -l app=gitea -n base-infrastructure --timeout=300s
# Check Gitea logs
kubectl logs -n base-infrastructure -l app=gitea --tail=50
```
### 2. Migrate Gitea Data
```bash
# Find Gitea pod name
GITEA_POD=$(kubectl get pods -n base-infrastructure -l app=gitea -o jsonpath='{.items[0].metadata.name}')
# Copy Gitea backup
kubectl cp gitea-data-backup.tar.gz base-infrastructure/$GITEA_POD:/tmp/
# Extract Gitea data
kubectl exec -it $GITEA_POD -n base-infrastructure -- bash -c "
cd /data
tar -xzf /tmp/gitea-data-backup.tar.gz
chown -R 1000:1000 .
"
# Restart Gitea to pick up data
kubectl rollout restart deployment/gitea -n base-infrastructure
kubectl wait --for=condition=ready pod -l app=gitea -n base-infrastructure --timeout=300s
```
### 3. Deploy Other Services
```bash
# Deploy all other services
kubectl apply -f k8s/umami/umami-deployment.yaml
kubectl apply -f k8s/memos/memos-deployment.yaml
kubectl apply -f k8s/uptime-kuma/uptime-kuma-deployment.yaml
kubectl apply -f k8s/dozzle/dozzle-deployment.yaml
# Note: filestash-deployment.yaml might be for filebrowser replacement
kubectl apply -f k8s/filestash/filestash-deployment.yaml
# Wait for all services to be ready
kubectl wait --for=condition=ready pod -l app=umami -n base-infrastructure --timeout=300s
kubectl wait --for=condition=ready pod -l app=memos -n base-infrastructure --timeout=300s
kubectl wait --for=condition=ready pod -l app=uptime-kuma -n base-infrastructure --timeout=300s
kubectl wait --for=condition=ready pod -l app=dozzle -n base-infrastructure --timeout=300s
```
### 4. Migrate Service Data
```bash
# Migrate other service data as needed
# Example for memos:
MEMOS_POD=$(kubectl get pods -n base-infrastructure -l app=memos -o jsonpath='{.items[0].metadata.name}')
kubectl cp memos-backup.tar.gz base-infrastructure/$MEMOS_POD:/tmp/
kubectl exec -it $MEMOS_POD -n base-infrastructure -- bash -c "cd /var/opt/memos && tar -xzf /tmp/memos-backup.tar.gz"
# Repeat for other services as needed
```
## Phase 3: Networking & Ingress
### 1. Deploy Ingress
```bash
# Apply ingress rules
kubectl apply -f k8s/ingress/ingress.yaml
# Check ingress status
kubectl get ingress -n base-infrastructure
kubectl describe ingress -n base-infrastructure
```
### 2. Verify Service Access
```bash
# Check all pods are running
kubectl get pods -n base-infrastructure
# Check all services
kubectl get services -n base-infrastructure
# Test internal connectivity
kubectl exec -it postgresql-0 -n base-infrastructure -- nslookup gitea
```
## Phase 4: k8s-webui Setup (CI/CD)
### 1. Configure Gitea Container Registry
```bash
# Access Gitea admin panel through port-forward first
kubectl port-forward -n base-infrastructure svc/gitea 3000:3000
# In browser: http://localhost:3000
# Go to Site Administration > Configuration
# Enable "Enable Container Registry" if not already enabled
```
### 2. Create Gitea CI/CD Pipeline
Create in your k8s-webui repositories:
```yaml
# .gitea/workflows/build-and-deploy.yaml
name: Build and Deploy k8s-webui
on:
push:
branches: [main]
jobs:
build-backend:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Build and push backend
run: |
docker build -t git.arcbjorn.com/archellir/k8s-webui-backend:latest ./backend
docker push git.arcbjorn.com/archellir/k8s-webui-backend:latest
build-frontend:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Build and push frontend
run: |
docker build -t git.arcbjorn.com/archellir/k8s-webui-frontend:latest ./frontend
docker push git.arcbjorn.com/archellir/k8s-webui-frontend:latest
deploy:
needs: [build-backend, build-frontend]
runs-on: ubuntu-latest
steps:
- name: Deploy to k8s
run: |
kubectl rollout restart deployment/k8s-webui-backend -n base-infrastructure
kubectl rollout restart deployment/k8s-webui-frontend -n base-infrastructure
```
### 3. Deploy k8s-webui
```bash
# After CI/CD is set up and images are built
kubectl apply -f k8s/k8s-webui/k8s-webui-deployment.yaml
# Wait for deployment
kubectl wait --for=condition=ready pod -l app=k8s-webui-backend -n base-infrastructure --timeout=300s
kubectl wait --for=condition=ready pod -l app=k8s-webui-frontend -n base-infrastructure --timeout=300s
```
## Phase 5: Verification & Testing
### 1. Service Health Check
```bash
# Check all pods are running
kubectl get pods -n base-infrastructure
# Check service endpoints
kubectl get endpoints -n base-infrastructure
# Test database connections
kubectl exec -it postgresql-0 -n base-infrastructure -- psql -U postgres -c '\l'
# Check logs for errors
kubectl logs -n base-infrastructure -l app=gitea --tail=20
kubectl logs -n base-infrastructure -l app=umami --tail=20
```
### 2. External Access Test
```bash
# Port forward to test services
kubectl port-forward -n base-infrastructure svc/gitea 3000:3000 &
kubectl port-forward -n base-infrastructure svc/umami 3001:3000 &
kubectl port-forward -n base-infrastructure svc/k8s-webui-frontend 3002:3000 &
# Test in browser:
# http://localhost:3000 - Gitea
# http://localhost:3001 - Umami
# http://localhost:3002 - k8s-webui
# Kill port forwards
pkill -f "kubectl port-forward"
```
### 3. Data Integrity Check
```bash
# Verify PostgreSQL databases exist
kubectl exec -it postgresql-0 -n base-infrastructure -- psql -U postgres -c '\l'
# Check Gitea repositories are accessible
# Check Umami analytics data
# Verify other service data
```
## Phase 6: DNS & Production Access
### 1. Update DNS Records
Point your domains to the Kubernetes ingress IP:
```bash
# Get ingress IP
kubectl get ingress -n base-infrastructure -o wide
# Update DNS A records:
# git.arcbjorn.com -> <ingress-ip>
# analytics.arcbjorn.com -> <ingress-ip>
# uptime.arcbjorn.com -> <ingress-ip>
# etc.
```
### 2. SSL Certificate Check
```bash
# Check if cert-manager is handling SSL
kubectl get certificates -n base-infrastructure
kubectl describe ingress -n base-infrastructure
```
## Phase 7: Docker Cleanup (ONLY AFTER VERIFICATION)
### 1. Stop Docker Compose Services
```bash
# Navigate to each service directory and stop
cd caddy && docker-compose down
cd ../postgresql && docker-compose down
cd ../gitea && docker-compose down
cd ../umami && docker-compose down
cd ../memos && docker-compose down
cd ../uptime-kuma && docker-compose down
cd ../dozzle && docker-compose down
cd ../filebrowser && docker-compose down
```
### 2. Clean Up Docker Resources
```bash
# Remove containers
docker container prune -f
# Remove dangling images
docker image prune -f
# Remove unused volumes (BE CAREFUL - only after data is confirmed migrated)
docker volume prune -f
# Remove unused networks
docker network prune -f
# Clean up everything (DANGEROUS - only when confident)
# docker system prune -a --volumes -f
```
### 3. Archive Old Configuration
```bash
# Move docker-compose files to archive
mkdir -p ~/docker-compose-archive
cp -r caddy/ postgresql/ gitea/ umami/ memos/ uptime-kuma/ dozzle/ filebrowser/ ~/docker-compose-archive/
```
## Troubleshooting
### Common Issues & Solutions
1. **Pod stuck in Pending**
```bash
kubectl describe pod <pod-name> -n base-infrastructure
# Check: PVC binding, resource constraints, node capacity
```
2. **Database connection errors**
```bash
kubectl logs -n base-infrastructure -l app=postgresql
kubectl exec -it postgresql-0 -n base-infrastructure -- pg_isready
```
3. **Service not accessible**
```bash
kubectl get endpoints -n base-infrastructure
kubectl port-forward -n base-infrastructure svc/<service-name> 8080:3000
```
4. **Ingress not working**
```bash
kubectl get ingress -n base-infrastructure
kubectl logs -n ingress-nginx -l app.kubernetes.io/component=controller
```
### Rollback Procedure
If migration fails:
```bash
# 1. Scale down K8s deployments
kubectl scale deployment --all --replicas=0 -n base-infrastructure
# 2. Restore Docker Compose services
cd postgresql && docker-compose up -d
cd ../gitea && docker-compose up -d
# ... etc
# 3. Restore data if needed
sudo tar -xzf ~/k8s-migration-backup/*/postgresql-data-backup.tar.gz -C /root/containers/postgresql/data/
```
## Post-Migration Checklist
- [ ] All services running in Kubernetes
- [ ] Data integrity verified
- [ ] External access working via ingress
- [ ] SSL certificates working
- [ ] DNS records updated
- [ ] Monitoring/logging working
- [ ] Backup strategy updated for K8s
- [ ] Docker resources cleaned up
- [ ] Documentation updated
## Migration Success Verification
Run these final checks:
```bash
# All pods running
kubectl get pods -n base-infrastructure | grep -v Running && echo "❌ Some pods not running" || echo "✅ All pods running"
# All services have endpoints
kubectl get endpoints -n base-infrastructure
# Database accessible
kubectl exec -it postgresql-0 -n base-infrastructure -- psql -U postgres -c 'SELECT version();'
# Ingress configured
kubectl get ingress -n base-infrastructure
echo "🎉 Migration completed successfully!"
```
---
**⚠️ Important Notes:**
- Always backup before starting migration
- Test each phase before proceeding
- Keep Docker Compose running until K8s is fully verified
- Monitor logs during migration for issues
- Have rollback plan ready
**Migration Time Estimate:** 2-4 hours depending on data size and complexity

@ -0,0 +1,234 @@
# SSH Migration Setup Guide
This guide covers setting up the migration process when connecting to your server via SSH.
## Architecture Overview
Based on your k8s-migration.md, you have:
- **Host Server**: macOS with Docker Compose services
- **K8s Cluster**: Running in Multipass VM (Ubuntu 22.04)
- **Access Method**: SSH connection to host server
## Setup Options
### Option 1: Run Migration from SSH Session (Recommended)
This approach runs the migration script directly on your server via SSH.
#### Prerequisites on Server:
```bash
# 1. Install kubectl (if not already installed)
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
# 2. Configure kubectl to access your K8s cluster
# Copy kubeconfig from Multipass VM
multipass exec k8s-master -- sudo cat /etc/kubernetes/admin.conf > ~/.kube/config
# 3. Update kubeconfig server IP to VM IP (find VM IP first)
VM_IP=$(multipass info k8s-master | grep IPv4 | awk '{print $2}')
sed -i "s/127.0.0.1:6443/$VM_IP:6443/g" ~/.kube/config
# 4. Test kubectl access
kubectl cluster-info
kubectl get nodes
```
#### SSH Connection Setup:
```bash
# From your local machine, connect with port forwarding for testing
ssh -L 6443:localhost:6443 -L 3000:localhost:3000 -L 3001:localhost:3001 user@your-server
# Or without port forwarding for basic access
ssh user@your-server
```
### Option 2: Run Migration Inside Multipass VM
Run the migration directly inside the K8s VM where kubectl is already configured.
```bash
# SSH to your server first
ssh user@your-server
# Enter the Multipass VM
multipass shell k8s-master
# Inside VM: Clone/copy your infrastructure repo
git clone <your-repo> /home/ubuntu/base_infrastructure
# OR copy files from host:
# multipass transfer /path/to/base_infrastructure k8s-master:/home/ubuntu/
# Inside VM: Run migration
cd /home/ubuntu/base_infrastructure
chmod +x migration-commands.sh
./migration-commands.sh help
```
## Key Considerations for SSH Migration
### 1. Volume Access
The Docker volumes are on your **host server** at `/root/containers/`, but kubectl accesses the **Multipass VM**. The migration script handles this by:
- Creating backups on the host server (where Docker volumes exist)
- Copying backups to K8s pods (which run in the VM)
- Extracting data inside the pods
### 2. Network Access
- **Docker Compose services**: Run on host server
- **Kubernetes services**: Run in Multipass VM
- **Port forwarding**: May be needed for testing services
### 3. File Transfer
When running from SSH, files need to move:
```
Host Server (/root/containers/) → Backup Files → K8s Pods (in VM)
```
## Pre-Migration SSH Checklist
Run these commands to verify your SSH setup:
```bash
# 1. SSH to your server
ssh user@your-server
# 2. Check if you're in SSH session
echo "SSH_CLIENT: $SSH_CLIENT"
echo "SSH_TTY: $SSH_TTY"
# 3. Check Multipass VM status
multipass list
# 4. Check kubectl access
kubectl cluster-info
kubectl get nodes
# 5. Check Docker access (for backups)
docker ps
# 6. Check base infrastructure directory
ls -la base_infrastructure/
cd base_infrastructure && ls -la k8s/
# 7. Run migration script environment check
./migration-commands.sh help
```
## Modified Migration Workflow for SSH
### Phase 0: Backup (Host Server)
```bash
# This runs on host server where Docker volumes exist
./migration-commands.sh phase0
```
### Phase 1-6: K8s Operations (Multipass VM via kubectl)
```bash
# These run via kubectl commands that connect to Multipass VM
./migration-commands.sh phase1 # Setup K8s infrastructure
./migration-commands.sh phase2 # Deploy PostgreSQL in VM
./migration-commands.sh phase3 # Copy backup files to VM pods
./migration-commands.sh phase4 # Deploy services in VM
./migration-commands.sh phase5 # Setup ingress in VM
./migration-commands.sh phase6 # Verify services in VM
```
### Phase 7: Cleanup (Host Server)
```bash
# This stops Docker Compose services on host server
./migration-commands.sh phase7
```
## Testing Access via SSH
### Port Forward for Testing
```bash
# From your SSH session, forward ports for testing
kubectl port-forward -n base-infrastructure svc/gitea 3000:3000 &
kubectl port-forward -n base-infrastructure svc/umami 3001:3000 &
# Then from another SSH terminal or with SSH tunnel:
curl http://localhost:3000 # Test Gitea
curl http://localhost:3001 # Test Umami
```
### SSH Tunnel for Browser Access
```bash
# From your local machine, create SSH tunnel
ssh -L 3000:VM_IP:30000 -L 3001:VM_IP:30001 user@your-server
# Where VM_IP is your Multipass VM IP
# And 30000, 30001 are NodePort services (if configured)
```
## Troubleshooting SSH Issues
### kubectl Connection Issues
```bash
# Check if kubectl can reach K8s API server
kubectl cluster-info
# If connection fails, verify:
# 1. Multipass VM is running
multipass list
# 2. VM IP is correct in kubeconfig
grep server ~/.kube/config
# 3. Port 6443 is accessible
telnet <VM_IP> 6443
```
### Docker Volume Access Issues
```bash
# Check if Docker volumes exist and are accessible
sudo ls -la /root/containers/
sudo ls -la /root/containers/postgresql/data/
# Check Docker daemon is running
docker ps
```
### File Transfer Issues
```bash
# Check if kubectl cp works
kubectl get pods -n base-infrastructure
kubectl cp --help
# Test file transfer to pod
echo "test" > /tmp/test.txt
kubectl cp /tmp/test.txt base-infrastructure/postgresql-0:/tmp/
kubectl exec -n base-infrastructure postgresql-0 -- ls -la /tmp/
```
## Migration Success Verification over SSH
After migration completes, verify everything works:
```bash
# 1. All K8s pods running
kubectl get pods -n base-infrastructure
# 2. Services accessible via port-forward
kubectl port-forward -n base-infrastructure svc/gitea 3000:3000 &
curl http://localhost:3000
# 3. Database connectivity
kubectl exec -n base-infrastructure postgresql-0 -- psql -U postgres -c '\l'
# 4. Ingress working (if external IP configured)
kubectl get ingress -n base-infrastructure
```
## Summary
The migration **will work over SSH** with the updated script. The key points:
**Script detects SSH environment** and provides appropriate guidance
**Backup phase** works on host server where Docker volumes exist
**K8s operations** work via kubectl connecting to Multipass VM
**Data transfer** handled via kubectl cp to move backups into pods
**Testing** possible via port-forward and SSH tunnels
Choose **Option 1** (SSH to host server) for the easiest setup, or **Option 2** (inside Multipass VM) if you prefer direct access to the K8s cluster.
Loading…
Cancel
Save