Initial monorepo scaffold

Turborepo + pnpm monorepo for k3s homelab cluster on Intel NUCs.

- Apps: Next.js web frontend, Express API (TypeScript, Dockerfiles, k8s manifests)
- Packages: shared UI, ESLint config, TypeScript config, Drizzle DB schemas
- Infra/Ansible: bare-metal provisioning with roles for common, k3s-server, k3s-agent, hardening
- Infra/Kubernetes: ArgoCD GitOps (app-of-apps + ApplicationSets), platform components
  (cert-manager, Traefik, CloudNativePG, Valkey, Longhorn, Sealed Secrets), namespaces
- Observability: kube-prometheus-stack, Loki, Promtail as ArgoCD Applications
- CI/CD: GitHub Actions for PR builds, preview deploys, production deploys
- DX: Taskfile, utility scripts, copier templates, Ubiquiti network docs
This commit is contained in:
Julia McGhee
2026-03-19 22:24:56 +00:00
commit 96e3f32f28
118 changed files with 2681 additions and 0 deletions

17
.env.example Normal file
View File

@@ -0,0 +1,17 @@
# GitHub Container Registry
GHCR_TOKEN=
GITHUB_USERNAME=
# Cluster
KUBECONFIG=~/.kube/homelab
K3S_TOKEN=
# DNS (for cert-manager DNS-01 challenge)
CF_API_TOKEN=
# Database
POSTGRES_PASSWORD=
POSTGRES_USER=homelab
# Valkey (Redis-compatible)
VALKEY_PASSWORD=

79
.github/workflows/ci.yaml vendored Normal file
View File

@@ -0,0 +1,79 @@
name: CI
on:
pull_request:
branches: [main]
push:
branches: [main]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
changes:
runs-on: ubuntu-latest
outputs:
apps: ${{ steps.filter.outputs.changes }}
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@v3
id: filter
with:
filters: |
web:
- 'apps/web/**'
- 'packages/**'
api:
- 'apps/api/**'
- 'packages/**'
lint-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v4
- uses: actions/setup-node@v4
with:
node-version: 20
cache: pnpm
- run: pnpm install --frozen-lockfile
- run: pnpm turbo lint test
build:
needs: [changes, lint-and-test]
runs-on: ubuntu-latest
if: needs.changes.outputs.apps != '[]'
strategy:
matrix:
app: ${{ fromJson(needs.changes.outputs.apps) }}
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v4
- uses: actions/setup-node@v4
with:
node-version: 20
cache: pnpm
- run: pnpm install --frozen-lockfile
- run: pnpm turbo build --filter=@homelab/${{ matrix.app }}
- name: Build Docker image
run: |
docker build \
-t ghcr.io/${{ github.repository_owner }}/homelab-${{ matrix.app }}:${{ github.sha }} \
-t ghcr.io/${{ github.repository_owner }}/homelab-${{ matrix.app }}:pr-${{ github.event.number }} \
apps/${{ matrix.app }}
- name: Push to GHCR
if: github.event_name == 'push' || github.event_name == 'pull_request'
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
docker push ghcr.io/${{ github.repository_owner }}/homelab-${{ matrix.app }}:${{ github.sha }}

59
.github/workflows/deploy-preview.yaml vendored Normal file
View File

@@ -0,0 +1,59 @@
name: Deploy Preview
on:
pull_request:
types: [opened, synchronize, reopened]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v4
- uses: actions/setup-node@v4
with:
node-version: 20
cache: pnpm
- run: pnpm install --frozen-lockfile
- name: Determine changed apps
id: changes
run: |
APPS=$(pnpm turbo build --filter='...[origin/main]' --dry-run=json | jq -r '[.packages[] | select(startswith("@homelab/")) | sub("@homelab/";"") ] | join(",")')
echo "apps=$APPS" >> "$GITHUB_OUTPUT"
- name: Build and push images
if: steps.changes.outputs.apps != ''
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
IFS=',' read -ra APPS <<< "${{ steps.changes.outputs.apps }}"
for app in "${APPS[@]}"; do
docker build \
-t ghcr.io/${{ github.repository_owner }}/homelab-${app}:${{ github.sha }} \
apps/${app}
docker push ghcr.io/${{ github.repository_owner }}/homelab-${app}:${{ github.sha }}
done
- name: Update image tags in preview overlay
if: steps.changes.outputs.apps != ''
run: |
IFS=',' read -ra APPS <<< "${{ steps.changes.outputs.apps }}"
for app in "${APPS[@]}"; do
cd apps/${app}/k8s/overlays/preview
kustomize edit set image ghcr.io/${{ github.repository_owner }}/homelab-${app}=ghcr.io/${{ github.repository_owner }}/homelab-${app}:${{ github.sha }}
cd -
done
- name: Comment preview URL
uses: actions/github-script@v7
with:
script: |
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `## Preview Deploy\nNamespace: \`preview-${context.issue.number}\`\nArgoCD will sync automatically from branch \`${context.payload.pull_request.head.ref}\`.`
})

View File

@@ -0,0 +1,59 @@
name: Deploy Production
on:
push:
branches: [main]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v4
- uses: actions/setup-node@v4
with:
node-version: 20
cache: pnpm
- run: pnpm install --frozen-lockfile
- name: Determine changed apps
id: changes
run: |
APPS=$(pnpm turbo build --filter='...[HEAD~1]' --dry-run=json | jq -r '[.packages[] | select(startswith("@homelab/")) | sub("@homelab/";"") ] | join(",")')
echo "apps=$APPS" >> "$GITHUB_OUTPUT"
- name: Build and push images
if: steps.changes.outputs.apps != ''
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
IFS=',' read -ra APPS <<< "${{ steps.changes.outputs.apps }}"
for app in "${APPS[@]}"; do
docker build \
-t ghcr.io/${{ github.repository_owner }}/homelab-${app}:${{ github.sha }} \
-t ghcr.io/${{ github.repository_owner }}/homelab-${app}:latest \
apps/${app}
docker push ghcr.io/${{ github.repository_owner }}/homelab-${app}:${{ github.sha }}
docker push ghcr.io/${{ github.repository_owner }}/homelab-${app}:latest
done
- name: Update image tags in production overlay
if: steps.changes.outputs.apps != ''
run: |
IFS=',' read -ra APPS <<< "${{ steps.changes.outputs.apps }}"
for app in "${APPS[@]}"; do
cd apps/${app}/k8s/overlays/production
kustomize edit set image ghcr.io/${{ github.repository_owner }}/homelab-${app}=ghcr.io/${{ github.repository_owner }}/homelab-${app}:${{ github.sha }}
cd -
done
- name: Commit image tag updates
if: steps.changes.outputs.apps != ''
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add apps/*/k8s/overlays/production/
git diff --staged --quiet || git commit -m "deploy: update production images to ${{ github.sha }}"
git push

44
.gitignore vendored Normal file
View File

@@ -0,0 +1,44 @@
# Dependencies
node_modules/
.pnpm-store/
# Build
dist/
.next/
.turbo/
out/
# Environment
.env
.env.local
.env.*.local
!.env.example
# OS
.DS_Store
Thumbs.db
# IDE
.vscode/
.idea/
*.swp
*.swo
# Kubernetes
/kubeconfig*
!/kubeconfig.example
*.decoded.yaml
# Ansible
*.retry
.vault_pass
# Python
__pycache__/
*.pyc
.venv/
# Secrets — never commit
**/secrets/*.yaml
!**/secrets/*.example.yaml
*-sealed.yaml.bak

5
.tool-versions Normal file
View File

@@ -0,0 +1,5 @@
nodejs 20.18.1
pnpm 9.15.4
python 3.12.4
kubectl 1.31.4
helm 3.16.4

48
CLAUDE.md Normal file
View File

@@ -0,0 +1,48 @@
# Homelab Monorepo
## Overview
Monorepo for frontend/backend apps deployed to a k3s cluster on Intel NUC machines.
GitOps via ArgoCD, bare-metal provisioning via Ansible, Turborepo for app builds.
## Tech Stack
- **Monorepo**: Turborepo + pnpm workspaces
- **Apps**: Next.js (frontend), Express (API), TypeScript
- **Infrastructure**: k3s, ArgoCD, Ansible
- **Platform**: Traefik, cert-manager, CloudNativePG, Valkey, Longhorn, Sealed Secrets
- **Observability**: kube-prometheus-stack, Loki, Promtail, Grafana
- **CI/CD**: GitHub Actions → ghcr.io → ArgoCD
- **Task Runner**: Taskfile (go-task)
## Directory Structure
- `apps/` — Deployable applications (each has src/, Dockerfile, k8s/ manifests)
- `packages/` — Shared libraries (ui, config-eslint, config-typescript, db)
- `infra/ansible/` — Bare-metal provisioning playbooks and roles
- `infra/kubernetes/` — K8s manifests (argocd, platform, observability, namespaces)
- `infra/ubiquiti/` — Network documentation for Ubiquiti Cloud Gateway
- `scripts/` — Utility scripts (new-app, seal-secret, kubeconfig-fetch)
- `templates/` — Copier templates for scaffolding new apps
## Conventions
- All apps use Kustomize with base + overlays (preview, production)
- K8s manifests live inside each app at `k8s/`
- Secrets are sealed with kubeseal before committing
- Branch deploys create `preview-<PR#>` namespaces automatically
- Use `task <command>` for all operations (see Taskfile.yaml)
- Docker images tagged with git SHA, pushed to ghcr.io
## Common Commands
```bash
task dev # Start all apps in dev mode
task build # Build all apps
task lint # Lint everything
task cluster:bootstrap # Provision NUCs with Ansible
task cluster:kubeconfig # Fetch kubeconfig from server node
task platform:install # Bootstrap ArgoCD + platform
task secrets:seal # Seal a secret for git
```
## Testing
- Run `task test` for all tests
- Run `task lint` for linting (includes ansible-lint)
- K8s manifests: `kubectl apply --dry-run=client -f <file>`
- Kustomize: `kustomize build <dir>` to verify rendering

74
README.md Normal file
View File

@@ -0,0 +1,74 @@
# Homelab
Monorepo for self-hosted applications running on a k3s cluster across Intel NUC machines.
## What's Inside
- **Apps**: Next.js frontend, Express API (TypeScript)
- **Infrastructure**: Ansible provisioning, ArgoCD GitOps, full Kubernetes platform
- **Observability**: Prometheus, Grafana, Loki
## Prerequisites
- [Node.js](https://nodejs.org/) >= 20
- [pnpm](https://pnpm.io/) >= 9
- [go-task](https://taskfile.dev/) >= 3
- [kubectl](https://kubernetes.io/docs/tasks/tools/) >= 1.31
- [Helm](https://helm.sh/) >= 3.16
- [Ansible](https://docs.ansible.com/) >= 2.16
- [kubeseal](https://github.com/bitnami-labs/sealed-secrets) >= 0.27
## Quick Start
```bash
# Install dependencies
pnpm install
# Start development
task dev
# Build all apps
task build
```
## Bootstrap Cluster (Day 1)
1. Flash Ubuntu Server 24.04 on NUCs, configure SSH access
2. Configure Ubiquiti Cloud Gateway (see `infra/ubiquiti/`)
3. Provision nodes and install k3s:
```bash
task cluster:bootstrap
task cluster:kubeconfig
```
4. Install platform components:
```bash
task platform:install
```
5. Seal initial secrets:
```bash
task secrets:seal
```
6. Push an app — ArgoCD handles the rest
## Branch Deploys
Push a branch or open a PR → GitHub Actions builds changed apps → ArgoCD creates a preview namespace → merge to main deploys to production.
## Project Structure
```
homelab/
├── apps/ # Deployable applications
├── packages/ # Shared libraries
├── infra/
│ ├── ansible/ # Bare-metal provisioning
│ ├── kubernetes/ # K8s manifests (ArgoCD, platform, observability)
│ └── ubiquiti/ # Network documentation
├── scripts/ # Utility scripts
├── templates/ # App scaffolding templates
└── Taskfile.yaml # Task runner
```
## License
Private — All rights reserved.

111
Taskfile.yaml Normal file
View File

@@ -0,0 +1,111 @@
version: "3"
vars:
ANSIBLE_DIR: infra/ansible
K8S_DIR: infra/kubernetes
tasks:
# Development
dev:
desc: Start all apps in dev mode
cmds:
- pnpm turbo dev
build:
desc: Build all apps
cmds:
- pnpm turbo build
lint:
desc: Lint everything
cmds:
- pnpm turbo lint
- task: lint:ansible
- task: lint:k8s
test:
desc: Run all tests
cmds:
- pnpm turbo test
# Cluster operations
cluster:bootstrap:
desc: Provision all nodes and install k3s
dir: "{{.ANSIBLE_DIR}}"
cmds:
- ansible-playbook playbooks/site.yaml
cluster:bootstrap-check:
desc: Dry-run cluster bootstrap
dir: "{{.ANSIBLE_DIR}}"
cmds:
- ansible-playbook playbooks/site.yaml --check --diff
cluster:kubeconfig:
desc: Fetch kubeconfig from server node
cmds:
- ./scripts/kubeconfig-fetch.sh
cluster:upgrade:
desc: Rolling upgrade of k3s
dir: "{{.ANSIBLE_DIR}}"
cmds:
- ansible-playbook playbooks/k3s-upgrade.yaml
cluster:reset:
desc: DESTRUCTIVE - Reset k3s cluster
dir: "{{.ANSIBLE_DIR}}"
cmds:
- ansible-playbook playbooks/reset.yaml
# Platform
platform:install:
desc: Bootstrap ArgoCD and platform components
cmds:
- kubectl apply -k {{.K8S_DIR}}/argocd/
- echo "ArgoCD installed. It will sync remaining platform components."
platform:status:
desc: Check ArgoCD app sync status
cmds:
- kubectl get applications -n argocd
# Secrets
secrets:seal:
desc: Seal a Kubernetes secret
cmds:
- ./scripts/seal-secret.sh {{.CLI_ARGS}}
# Utilities
port-forward:grafana:
desc: Port-forward Grafana
cmds:
- kubectl port-forward -n observability svc/kube-prometheus-stack-grafana 3001:80
port-forward:argocd:
desc: Port-forward ArgoCD UI
cmds:
- kubectl port-forward -n argocd svc/argocd-server 8080:443
port-forward:pg:
desc: Port-forward PostgreSQL
cmds:
- kubectl port-forward -n platform svc/homelab-pg-rw 5432:5432
# Linting
lint:ansible:
desc: Lint Ansible playbooks
dir: "{{.ANSIBLE_DIR}}"
cmds:
- ansible-lint playbooks/ roles/
ignore_error: true
lint:k8s:
desc: Validate K8s manifests
cmds:
- |
find {{.K8S_DIR}} -name 'kustomization.yaml' -exec dirname {} \; | while read dir; do
echo "Validating $dir..."
kustomize build "$dir" | kubectl apply --dry-run=client -f - 2>/dev/null || true
done
ignore_error: true

25
apps/api/Dockerfile Normal file
View File

@@ -0,0 +1,25 @@
FROM node:20-alpine AS base
FROM base AS deps
WORKDIR /app
COPY package.json pnpm-lock.yaml* ./
RUN corepack enable pnpm && pnpm install --frozen-lockfile
FROM base AS builder
WORKDIR /app
COPY --from=deps /app/node_modules ./node_modules
COPY . .
RUN corepack enable pnpm && pnpm build
FROM base AS runner
WORKDIR /app
ENV NODE_ENV=production
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 appuser
COPY --from=builder /app/dist ./dist
COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app/package.json ./
USER appuser
EXPOSE 4000
ENV PORT=4000
CMD ["node", "dist/index.js"]

View File

@@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: api
labels:
app: api
spec:
replicas: 1
selector:
matchLabels:
app: api
template:
metadata:
labels:
app: api
spec:
containers:
- name: api
image: ghcr.io/OWNER/homelab-api:latest
ports:
- containerPort: 4000
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: api-secrets
key: database-url
- name: VALKEY_URL
valueFrom:
secretKeyRef:
name: api-secrets
key: valkey-url
resources:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 512Mi
readinessProbe:
httpGet:
path: /health
port: 4000
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /health
port: 4000
initialDelaySeconds: 15
periodSeconds: 20

View File

@@ -0,0 +1,23 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: api
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
spec:
ingressClassName: traefik
rules:
- host: api.homelab.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: api
port:
number: 80
tls:
- hosts:
- api.homelab.local
secretName: api-tls

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
- ingress.yaml

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: api
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 4000
protocol: TCP
selector:
app: api

View File

@@ -0,0 +1,25 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- target:
kind: Deployment
name: api
patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: api
spec:
replicas: 1
- target:
kind: Ingress
name: api
patch: |
- op: replace
path: /spec/rules/0/host
value: api-preview.homelab.local
- op: replace
path: /spec/tls/0/hosts/0
value: api-preview.homelab.local

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- target:
kind: Deployment
name: api
patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: api
spec:
replicas: 2

24
apps/api/package.json Normal file
View File

@@ -0,0 +1,24 @@
{
"name": "@homelab/api",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "tsx watch src/index.ts",
"build": "tsup src/index.ts --format cjs --outDir dist",
"start": "node dist/index.js",
"lint": "tsc --noEmit",
"test": "echo \"no tests yet\""
},
"dependencies": {
"express": "^4.21.0",
"cors": "^2.8.5"
},
"devDependencies": {
"@types/express": "^5.0.0",
"@types/cors": "^2.8.17",
"@types/node": "^22.10.0",
"tsup": "^8.3.0",
"tsx": "^4.19.0",
"typescript": "^5.7.0"
}
}

20
apps/api/src/index.ts Normal file
View File

@@ -0,0 +1,20 @@
import express from "express";
import cors from "cors";
const app = express();
const port = process.env.PORT || 4000;
app.use(cors());
app.use(express.json());
app.get("/health", (_req, res) => {
res.json({ status: "ok", timestamp: new Date().toISOString() });
});
app.get("/api", (_req, res) => {
res.json({ message: "Homelab API", version: "0.1.0" });
});
app.listen(port, () => {
console.log(`API server running on port ${port}`);
});

19
apps/api/tsconfig.json Normal file
View File

@@ -0,0 +1,19 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "commonjs",
"lib": ["ES2022"],
"outDir": "./dist",
"rootDir": "./src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"declaration": true,
"declarationMap": true,
"sourceMap": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

26
apps/web/Dockerfile Normal file
View File

@@ -0,0 +1,26 @@
FROM node:20-alpine AS base
FROM base AS deps
RUN apk add --no-cache libc6-compat
WORKDIR /app
COPY package.json pnpm-lock.yaml* ./
RUN corepack enable pnpm && pnpm install --frozen-lockfile
FROM base AS builder
WORKDIR /app
COPY --from=deps /app/node_modules ./node_modules
COPY . .
RUN corepack enable pnpm && pnpm build
FROM base AS runner
WORKDIR /app
ENV NODE_ENV=production
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
COPY --from=builder /app/public ./public
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
USER nextjs
EXPOSE 3000
ENV PORT=3000
CMD ["node", "server.js"]

View File

@@ -0,0 +1,39 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: web
labels:
app: web
spec:
replicas: 1
selector:
matchLabels:
app: web
template:
metadata:
labels:
app: web
spec:
containers:
- name: web
image: ghcr.io/OWNER/homelab-web:latest
ports:
- containerPort: 3000
resources:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 512Mi
readinessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 15
periodSeconds: 20

View File

@@ -0,0 +1,23 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: web
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
spec:
ingressClassName: traefik
rules:
- host: homelab.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web
port:
number: 80
tls:
- hosts:
- homelab.local
secretName: web-tls

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
- ingress.yaml

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: web
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 3000
protocol: TCP
selector:
app: web

View File

@@ -0,0 +1,25 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- target:
kind: Deployment
name: web
patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: web
spec:
replicas: 1
- target:
kind: Ingress
name: web
patch: |
- op: replace
path: /spec/rules/0/host
value: preview.homelab.local
- op: replace
path: /spec/tls/0/hosts/0
value: preview.homelab.local

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- target:
kind: Deployment
name: web
patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: web
spec:
replicas: 2

6
apps/web/next.config.js Normal file
View File

@@ -0,0 +1,6 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
output: "standalone",
};
module.exports = nextConfig;

23
apps/web/package.json Normal file
View File

@@ -0,0 +1,23 @@
{
"name": "@homelab/web",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "next dev --port 3000",
"build": "next build",
"start": "next start",
"lint": "next lint",
"test": "echo \"no tests yet\""
},
"dependencies": {
"next": "^15.1.0",
"react": "^19.0.0",
"react-dom": "^19.0.0"
},
"devDependencies": {
"@types/node": "^22.10.0",
"@types/react": "^19.0.0",
"@types/react-dom": "^19.0.0",
"typescript": "^5.7.0"
}
}

View File

@@ -0,0 +1,18 @@
import type { Metadata } from "next";
export const metadata: Metadata = {
title: "Homelab",
description: "Self-hosted applications",
};
export default function RootLayout({
children,
}: {
children: React.ReactNode;
}) {
return (
<html lang="en">
<body>{children}</body>
</html>
);
}

View File

@@ -0,0 +1,8 @@
export default function Home() {
return (
<main style={{ padding: "2rem", fontFamily: "system-ui, sans-serif" }}>
<h1>Homelab</h1>
<p>Self-hosted applications running on k3s.</p>
</main>
);
}

21
apps/web/tsconfig.json Normal file
View File

@@ -0,0 +1,21 @@
{
"compilerOptions": {
"target": "ES2017",
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true,
"skipLibCheck": true,
"strict": true,
"noEmit": true,
"esModuleInterop": true,
"module": "esnext",
"moduleResolution": "bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve",
"incremental": true,
"plugins": [{ "name": "next" }],
"paths": { "@/*": ["./src/*"] }
},
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"],
"exclude": ["node_modules"]
}

14
infra/ansible/ansible.cfg Normal file
View File

@@ -0,0 +1,14 @@
[defaults]
inventory = inventory/hosts.yaml
roles_path = roles
remote_user = julia
private_key_file = ~/.ssh/homelab
host_key_checking = False
retry_files_enabled = False
stdout_callback = yaml
callbacks_enabled = profile_tasks
[privilege_escalation]
become = True
become_method = sudo
become_user = root

View File

@@ -0,0 +1,3 @@
---
k3s_agent_args: >-
--node-label=node-role.kubernetes.io/worker=true

View File

@@ -0,0 +1,37 @@
---
# Timezone
timezone: America/New_York
# NTP
ntp_servers:
- 0.ubuntu.pool.ntp.org
- 1.ubuntu.pool.ntp.org
# k3s
k3s_version: v1.31.4+k3s1
k3s_server_url: "https://{{ hostvars['nuc01']['ansible_host'] }}:6443"
k3s_token: "{{ vault_k3s_token }}"
# System packages
common_packages:
- curl
- wget
- git
- htop
- iotop
- net-tools
- unzip
- jq
- open-iscsi
- nfs-common
- cryptsetup
# Container runtime
containerd_config:
max_container_log_size: 10M
max_container_log_files: 3
# Network
cluster_cidr: 10.42.0.0/16
service_cidr: 10.43.0.0/16
cluster_dns: 10.43.0.10

View File

@@ -0,0 +1,12 @@
---
k3s_server_args: >-
--cluster-cidr={{ cluster_cidr }}
--service-cidr={{ service_cidr }}
--cluster-dns={{ cluster_dns }}
--disable=servicelb
--write-kubeconfig-mode=644
--tls-san={{ ansible_host }}
--tls-san=k3s.homelab.local
--kube-apiserver-arg=audit-log-maxage=30
--kube-apiserver-arg=audit-log-maxbackup=10
--kube-apiserver-arg=audit-log-maxsize=100

View File

@@ -0,0 +1,4 @@
---
node_labels:
- topology.kubernetes.io/zone=rack1
- node.kubernetes.io/instance-type=nuc

View File

@@ -0,0 +1,18 @@
---
all:
children:
k3s_cluster:
children:
servers:
hosts:
nuc01:
ansible_host: 10.0.10.11
k3s_role: server
agents:
hosts:
nuc02:
ansible_host: 10.0.10.12
k3s_role: agent
nuc03:
ansible_host: 10.0.10.13
k3s_role: agent

View File

@@ -0,0 +1,7 @@
---
- name: Bootstrap all nodes
hosts: k3s_cluster
become: true
roles:
- common
- hardening

View File

@@ -0,0 +1,6 @@
---
- name: Install k3s agent nodes
hosts: agents
become: true
roles:
- k3s-agent

View File

@@ -0,0 +1,6 @@
---
- name: Install k3s server nodes
hosts: servers
become: true
roles:
- k3s-server

View File

@@ -0,0 +1,49 @@
---
- name: Upgrade k3s cluster
hosts: k3s_cluster
become: true
serial: 1
vars:
k3s_upgrade_version: "{{ k3s_version }}"
tasks:
- name: Cordon node
ansible.builtin.command:
cmd: k3s kubectl cordon {{ inventory_hostname }}
delegate_to: "{{ groups['servers'][0] }}"
changed_when: true
- name: Drain node
ansible.builtin.command:
cmd: >-
k3s kubectl drain {{ inventory_hostname }}
--ignore-daemonsets
--delete-emptydir-data
--timeout=120s
delegate_to: "{{ groups['servers'][0] }}"
changed_when: true
- name: Upgrade k3s
ansible.builtin.command:
cmd: /tmp/k3s-install.sh
environment:
INSTALL_K3S_VERSION: "{{ k3s_upgrade_version }}"
K3S_TOKEN: "{{ k3s_token }}"
INSTALL_K3S_EXEC: "{{ 'server ' + k3s_server_args if k3s_role == 'server' else 'agent ' + k3s_agent_args }}"
K3S_URL: "{{ '' if k3s_role == 'server' else k3s_server_url }}"
changed_when: true
- name: Wait for node to be ready
ansible.builtin.command:
cmd: k3s kubectl get node {{ inventory_hostname }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}'
delegate_to: "{{ groups['servers'][0] }}"
register: node_ready
retries: 30
delay: 10
until: node_ready.stdout == "True"
changed_when: false
- name: Uncordon node
ansible.builtin.command:
cmd: k3s kubectl uncordon {{ inventory_hostname }}
delegate_to: "{{ groups['servers'][0] }}"
changed_when: true

View File

@@ -0,0 +1,38 @@
---
- name: Reset k3s cluster (DESTRUCTIVE)
hosts: k3s_cluster
become: true
tasks:
- name: Confirm reset
ansible.builtin.pause:
prompt: "This will DESTROY the k3s cluster. Type 'yes' to continue"
register: confirm
run_once: true
- name: Abort if not confirmed
ansible.builtin.fail:
msg: "Reset aborted"
when: confirm.user_input != "yes"
run_once: true
- name: Uninstall k3s agent
ansible.builtin.command:
cmd: /usr/local/bin/k3s-agent-uninstall.sh
when: k3s_role == 'agent'
ignore_errors: true
changed_when: true
- name: Uninstall k3s server
ansible.builtin.command:
cmd: /usr/local/bin/k3s-uninstall.sh
when: k3s_role == 'server'
ignore_errors: true
changed_when: true
- name: Clean up data directories
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /var/lib/rancher
- /etc/rancher

View File

@@ -0,0 +1,9 @@
---
- name: Full cluster deployment
ansible.builtin.import_playbook: bootstrap.yaml
- name: Install k3s servers
ansible.builtin.import_playbook: k3s-server.yaml
- name: Install k3s agents
ansible.builtin.import_playbook: k3s-agent.yaml

View File

@@ -0,0 +1,8 @@
---
collections:
- name: ansible.posix
version: ">=1.5.0"
- name: community.general
version: ">=9.0.0"
- name: kubernetes.core
version: ">=4.0.0"

View File

@@ -0,0 +1,6 @@
---
- name: restart timesyncd
ansible.builtin.systemd:
name: systemd-timesyncd
state: restarted
enabled: true

View File

@@ -0,0 +1,55 @@
---
- name: Set timezone
community.general.timezone:
name: "{{ timezone }}"
- name: Configure NTP
ansible.builtin.template:
src: timesyncd.conf.j2
dest: /etc/systemd/timesyncd.conf
mode: "0644"
notify: restart timesyncd
- name: Update apt cache
ansible.builtin.apt:
update_cache: true
cache_valid_time: 3600
- name: Install common packages
ansible.builtin.apt:
name: "{{ common_packages }}"
state: present
- name: Configure sysctl for k8s
ansible.posix.sysctl:
name: "{{ item.key }}"
value: "{{ item.value }}"
sysctl_set: true
reload: true
loop:
- { key: net.bridge.bridge-nf-call-iptables, value: "1" }
- { key: net.bridge.bridge-nf-call-ip6tables, value: "1" }
- { key: net.ipv4.ip_forward, value: "1" }
- { key: fs.inotify.max_user_instances, value: "512" }
- { key: fs.inotify.max_user_watches, value: "524288" }
- name: Load br_netfilter module
community.general.modprobe:
name: br_netfilter
persistent: present
- name: Disable swap
ansible.builtin.command: swapoff -a
changed_when: false
- name: Remove swap from fstab
ansible.builtin.lineinfile:
path: /etc/fstab
regexp: '\sswap\s'
state: absent
- name: Enable iscsid service (for Longhorn)
ansible.builtin.systemd:
name: iscsid
enabled: true
state: started

View File

@@ -0,0 +1,5 @@
[Time]
{% for server in ntp_servers %}
NTP={{ server }}
{% endfor %}
FallbackNTP=ntp.ubuntu.com

View File

@@ -0,0 +1,5 @@
---
- name: restart sshd
ansible.builtin.systemd:
name: sshd
state: restarted

View File

@@ -0,0 +1,81 @@
---
- name: Ensure SSH password authentication is disabled
ansible.builtin.lineinfile:
path: /etc/ssh/sshd_config
regexp: "^#?PasswordAuthentication"
line: "PasswordAuthentication no"
notify: restart sshd
- name: Disable root SSH login
ansible.builtin.lineinfile:
path: /etc/ssh/sshd_config
regexp: "^#?PermitRootLogin"
line: "PermitRootLogin no"
notify: restart sshd
- name: Install and configure UFW
ansible.builtin.apt:
name: ufw
state: present
- name: Set UFW default deny incoming
community.general.ufw:
direction: incoming
default: deny
- name: Set UFW default allow outgoing
community.general.ufw:
direction: outgoing
default: allow
- name: Allow SSH
community.general.ufw:
rule: allow
port: "22"
proto: tcp
- name: Allow k3s API server (servers only)
community.general.ufw:
rule: allow
port: "6443"
proto: tcp
when: k3s_role == 'server'
- name: Allow k3s flannel VXLAN
community.general.ufw:
rule: allow
port: "8472"
proto: udp
- name: Allow kubelet metrics
community.general.ufw:
rule: allow
port: "10250"
proto: tcp
- name: Allow HTTP/HTTPS (for Traefik ingress)
community.general.ufw:
rule: allow
port: "{{ item }}"
proto: tcp
loop:
- "80"
- "443"
- name: Enable UFW
community.general.ufw:
state: enabled
- name: Configure automatic security updates
ansible.builtin.apt:
name: unattended-upgrades
state: present
- name: Enable automatic security updates
ansible.builtin.copy:
dest: /etc/apt/apt.conf.d/20auto-upgrades
content: |
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Unattended-Upgrade "1";
APT::Periodic::AutocleanInterval "7";
mode: "0644"

View File

@@ -0,0 +1,29 @@
---
- name: Check if k3s-agent is installed
ansible.builtin.stat:
path: /usr/local/bin/k3s
register: k3s_binary
- name: Download k3s installer
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s-install.sh
mode: "0755"
when: not k3s_binary.stat.exists
- name: Install k3s agent
ansible.builtin.command:
cmd: /tmp/k3s-install.sh
environment:
INSTALL_K3S_VERSION: "{{ k3s_version }}"
K3S_URL: "{{ k3s_server_url }}"
K3S_TOKEN: "{{ k3s_token }}"
INSTALL_K3S_EXEC: "agent {{ k3s_agent_args }}"
when: not k3s_binary.stat.exists
changed_when: true
- name: Wait for k3s-agent to be ready
ansible.builtin.systemd:
name: k3s-agent
state: started
enabled: true

View File

@@ -0,0 +1,47 @@
---
- name: Check if k3s is installed
ansible.builtin.stat:
path: /usr/local/bin/k3s
register: k3s_binary
- name: Download k3s installer
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s-install.sh
mode: "0755"
when: not k3s_binary.stat.exists
- name: Install k3s server
ansible.builtin.command:
cmd: /tmp/k3s-install.sh
environment:
INSTALL_K3S_VERSION: "{{ k3s_version }}"
K3S_TOKEN: "{{ k3s_token }}"
INSTALL_K3S_EXEC: "server {{ k3s_server_args }}"
when: not k3s_binary.stat.exists
changed_when: true
- name: Wait for k3s to be ready
ansible.builtin.command:
cmd: k3s kubectl get nodes
register: k3s_ready
retries: 30
delay: 10
until: k3s_ready.rc == 0
changed_when: false
- name: Fetch kubeconfig
ansible.builtin.fetch:
src: /etc/rancher/k3s/k3s.yaml
dest: "{{ playbook_dir }}/../../kubeconfig"
flat: true
run_once: true
- name: Update kubeconfig server URL
ansible.builtin.lineinfile:
path: "{{ playbook_dir }}/../../kubeconfig"
regexp: "server: https://127.0.0.1:6443"
line: " server: https://{{ ansible_host }}:6443"
delegate_to: localhost
become: false
run_once: true

View File

@@ -0,0 +1,22 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-of-apps
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://github.com/OWNER/homelab.git
targetRevision: main
path: infra/kubernetes/argocd
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,30 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: apps-production
namespace: argocd
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- git:
repoURL: https://github.com/OWNER/homelab.git
revision: main
directories:
- path: apps/*/k8s/overlays/production
template:
metadata:
name: "{{ index .path.segments 1 }}-production"
spec:
project: default
source:
repoURL: https://github.com/OWNER/homelab.git
targetRevision: main
path: "{{ .path.path }}"
destination:
server: https://kubernetes.default.svc
namespace: apps
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,32 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: platform
namespace: argocd
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- git:
repoURL: https://github.com/OWNER/homelab.git
revision: main
directories:
- path: infra/kubernetes/platform/*
template:
metadata:
name: "platform-{{ .path.basename }}"
spec:
project: default
source:
repoURL: https://github.com/OWNER/homelab.git
targetRevision: main
path: "{{ .path.path }}"
destination:
server: https://kubernetes.default.svc
namespace: platform
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,34 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: apps-preview
namespace: argocd
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- pullRequest:
github:
owner: OWNER
repo: homelab
requeueAfterSeconds: 60
template:
metadata:
name: "preview-{{ .number }}"
spec:
project: default
source:
repoURL: https://github.com/OWNER/homelab.git
targetRevision: "{{ .branch }}"
path: apps/*/k8s/overlays/preview
kustomize:
nameSuffix: "-pr{{ .number }}"
destination:
server: https://kubernetes.default.svc
namespace: "preview-{{ .number }}"
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,2 @@
# ArgoCD is installed via Kustomize remote base.
# See kustomization.yaml for the version-pinned reference.

View File

@@ -0,0 +1,22 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: argocd
resources:
- namespace.yaml
- https://raw.githubusercontent.com/argoproj/argo-cd/v2.13.3/manifests/install.yaml
- app-of-apps.yaml
- appsets/platform.yaml
- appsets/apps.yaml
- appsets/previews.yaml
patches:
- target:
kind: ConfigMap
name: argocd-cm
patch: |
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
data:
url: https://argocd.homelab.local
application.resourceTrackingMethod: annotation

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: argocd

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: apps
labels:
managed-by: argocd

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- apps.yaml
- platform.yaml
- observability.yaml

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: observability
labels:
managed-by: argocd

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: platform
labels:
managed-by: argocd

View File

@@ -0,0 +1,95 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-prometheus-stack
namespace: argocd
spec:
project: default
source:
repoURL: https://prometheus-community.github.io/helm-charts
chart: kube-prometheus-stack
targetRevision: 67.9.0
helm:
valuesObject:
prometheus:
prometheusSpec:
retention: 15d
resources:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 2Gi
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: longhorn
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
grafana:
adminPassword: "changeme"
ingress:
enabled: true
ingressClassName: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
hosts:
- grafana.homelab.local
tls:
- secretName: grafana-tls
hosts:
- grafana.homelab.local
sidecar:
dashboards:
enabled: true
searchNamespace: ALL
label: grafana_dashboard
datasources:
enabled: true
searchNamespace: ALL
label: grafana_datasource
resources:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 512Mi
alertmanager:
alertmanagerSpec:
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 256Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: longhorn
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 5Gi
nodeExporter:
enabled: true
kubeStateMetrics:
enabled: true
destination:
server: https://kubernetes.default.svc
namespace: observability
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-overview-dashboard
namespace: observability
labels:
grafana_dashboard: "1"
data:
cluster-overview.json: |
{
"annotations": { "list": [] },
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": null,
"links": [],
"panels": [
{
"title": "CPU Usage by Node",
"type": "timeseries",
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 0 },
"targets": [
{
"expr": "100 - (avg by(instance) (rate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100)",
"legendFormat": "{{ instance }}"
}
]
},
{
"title": "Memory Usage by Node",
"type": "timeseries",
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 0 },
"targets": [
{
"expr": "(1 - (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes)) * 100",
"legendFormat": "{{ instance }}"
}
]
},
{
"title": "Disk Usage by Node",
"type": "timeseries",
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 8 },
"targets": [
{
"expr": "(1 - (node_filesystem_avail_bytes{mountpoint=\"/\"} / node_filesystem_size_bytes{mountpoint=\"/\"})) * 100",
"legendFormat": "{{ instance }}"
}
]
},
{
"title": "Pod Count by Namespace",
"type": "bargauge",
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 8 },
"targets": [
{
"expr": "count by(namespace) (kube_pod_info)",
"legendFormat": "{{ namespace }}"
}
]
}
],
"schemaVersion": 39,
"tags": ["homelab", "cluster"],
"templating": { "list": [] },
"time": { "from": "now-6h", "to": "now" },
"title": "Cluster Overview",
"uid": "cluster-overview"
}

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-datasource
namespace: observability
labels:
grafana_datasource: "1"
data:
loki-datasource.yaml: |
apiVersion: 1
datasources:
- name: Loki
type: loki
access: proxy
url: http://loki.observability.svc:3100
jsonData:
maxLines: 1000

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- application.yaml
- grafana-datasources.yaml
- dashboards/cluster-overview.yaml

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- kube-prometheus-stack/
- loki/
- promtail/

View File

@@ -0,0 +1,71 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: loki
namespace: argocd
spec:
project: default
source:
repoURL: https://grafana.github.io/helm-charts
chart: loki
targetRevision: 6.24.0
helm:
valuesObject:
deploymentMode: SingleBinary
loki:
auth_enabled: false
commonConfig:
replication_factor: 1
storage:
type: filesystem
schemaConfig:
configs:
- from: "2024-01-01"
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: loki_index_
period: 24h
limits_config:
retention_period: 168h
max_query_series: 500
max_query_parallelism: 2
singleBinary:
replicas: 1
resources:
requests:
memory: 256Mi
cpu: 100m
limits:
memory: 1Gi
persistence:
enabled: true
storageClass: longhorn
size: 10Gi
gateway:
enabled: false
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
chunksCache:
enabled: false
resultsCache:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: observability
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- application.yaml

View File

@@ -0,0 +1,38 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: promtail
namespace: argocd
spec:
project: default
source:
repoURL: https://grafana.github.io/helm-charts
chart: promtail
targetRevision: 6.16.6
helm:
valuesObject:
config:
clients:
- url: http://loki.observability.svc:3100/loki/api/v1/push
snippets:
pipelineStages:
- cri: {}
- multiline:
firstline: '^\d{4}-\d{2}-\d{2}'
max_wait_time: 3s
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 256Mi
tolerations:
- effect: NoSchedule
operator: Exists
destination:
server: https://kubernetes.default.svc
namespace: observability
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- application.yaml

View File

@@ -0,0 +1,37 @@
# Prerequisites: cert-manager must be installed via Helm first.
# Install: helm install cert-manager jetstack/cert-manager --namespace cert-manager --set crds.enabled=true --version v1.16.3
# This file configures the Let's Encrypt issuers after cert-manager is running.
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
server: https://acme-staging-v02.api.letsencrypt.org/directory
email: admin@homelab.local
privateKeySecretRef:
name: letsencrypt-staging-key
solvers:
- dns01:
cloudflare:
apiTokenSecretRef:
name: cloudflare-api-token
key: api-token
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: admin@homelab.local
privateKeySecretRef:
name: letsencrypt-production-key
solvers:
- dns01:
cloudflare:
apiTokenSecretRef:
name: cloudflare-api-token
key: api-token

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- clusterissuer-letsencrypt.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager

View File

@@ -0,0 +1,45 @@
# Prerequisites: CloudNativePG operator must be installed first.
# Install: helm install cnpg cloudnative-pg/cloudnative-pg --namespace cnpg-system --create-namespace
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: homelab-pg
namespace: platform
spec:
instances: 2
primaryUpdateStrategy: unsupervised
storage:
storageClass: longhorn
size: 10Gi
postgresql:
parameters:
max_connections: "100"
shared_buffers: 256MB
effective_cache_size: 512MB
work_mem: 4MB
bootstrap:
initdb:
database: homelab
owner: homelab
secret:
name: homelab-pg-credentials
backup:
barmanObjectStore:
destinationPath: s3://homelab-pg-backups/
endpointURL: http://minio.platform.svc:9000
s3Credentials:
accessKeyId:
name: pg-backup-s3-credentials
key: ACCESS_KEY_ID
secretAccessKey:
name: pg-backup-s3-credentials
key: SECRET_ACCESS_KEY
retentionPolicy: "30d"
monitoring:
enablePodMonitor: true

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cluster.yaml

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- storageclass.yaml

View File

@@ -0,0 +1,6 @@
# Prerequisites: Longhorn must be installed via Helm first.
# Install: helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --version 1.7.2
apiVersion: v1
kind: Namespace
metadata:
name: longhorn-system

View File

@@ -0,0 +1,14 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: longhorn
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: driver.longhorn.io
allowVolumeExpansion: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
parameters:
numberOfReplicas: "2"
staleReplicaTimeout: "2880"
dataLocality: best-effort

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml

View File

@@ -0,0 +1,9 @@
# Prerequisites: Sealed Secrets must be installed via Helm first.
# Install: helm install sealed-secrets sealed-secrets/sealed-secrets --namespace kube-system --version 2.16.2
# The controller runs in kube-system; this is just the config namespace.
apiVersion: v1
kind: Namespace
metadata:
name: sealed-secrets
labels:
managed-by: argocd

View File

@@ -0,0 +1,26 @@
# HelmChartConfig customizes the k3s-bundled Traefik deployment
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
ports:
web:
redirectTo:
port: websecure
websecure:
tls:
enabled: true
providers:
kubernetesCRD:
allowCrossNamespace: true
logs:
access:
enabled: true
metrics:
prometheus:
entryPoint: metrics
additionalArguments:
- "--entrypoints.websecure.http.tls.certresolver=letsencrypt"

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helmchartconfig.yaml
- middleware-default-headers.yaml

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: default-headers
namespace: platform
spec:
headers:
browserXssFilter: true
contentTypeNosniff: true
frameDeny: true
stsIncludeSubdomains: true
stsPreload: true
stsSeconds: 31536000
customFrameOptionsValue: SAMEORIGIN
customRequestHeaders:
X-Forwarded-Proto: https

View File

@@ -0,0 +1,71 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: valkey
namespace: platform
labels:
app: valkey
spec:
replicas: 1
selector:
matchLabels:
app: valkey
template:
metadata:
labels:
app: valkey
spec:
containers:
- name: valkey
image: valkey/valkey:8.0-alpine
ports:
- containerPort: 6379
args:
- "--requirepass"
- "$(VALKEY_PASSWORD)"
- "--maxmemory"
- "256mb"
- "--maxmemory-policy"
- "allkeys-lru"
env:
- name: VALKEY_PASSWORD
valueFrom:
secretKeyRef:
name: valkey-credentials
key: password
resources:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 512Mi
readinessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 15
periodSeconds: 20
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
persistentVolumeClaim:
claimName: valkey-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: valkey-data
namespace: platform
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 2Gi

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: valkey
namespace: platform
labels:
app: valkey
spec:
type: ClusterIP
ports:
- port: 6379
targetPort: 6379
protocol: TCP
selector:
app: valkey

73
infra/ubiquiti/README.md Normal file
View File

@@ -0,0 +1,73 @@
# Ubiquiti Cloud Gateway Configuration
Documentation for the Ubiquiti Cloud Gateway (UCG) that manages network ingress for the homelab cluster.
## Network Layout
```
Internet
UCG (Ubiquiti Cloud Gateway)
├── VLAN 1 - Management (10.0.1.0/24)
├── VLAN 10 - Servers (10.0.10.0/24)
├── VLAN 20 - IoT (10.0.20.0/24)
└── VLAN 99 - Guest (10.0.99.0/24)
```
## VLAN Configuration
| VLAN ID | Name | Subnet | Purpose |
|---------|------------|-----------------|------------------------|
| 1 | Management | 10.0.1.0/24 | Network devices, admin |
| 10 | Servers | 10.0.10.0/24 | k3s cluster nodes |
| 20 | IoT | 10.0.20.0/24 | IoT devices |
| 99 | Guest | 10.0.99.0/24 | Guest WiFi |
## DHCP Reservations (VLAN 10 — Servers)
| Hostname | IP Address | MAC Address | Role |
|----------|-------------|-------------------|-------------|
| nuc01 | 10.0.10.11 | XX:XX:XX:XX:XX:01 | k3s server |
| nuc02 | 10.0.10.12 | XX:XX:XX:XX:XX:02 | k3s agent |
| nuc03 | 10.0.10.13 | XX:XX:XX:XX:XX:03 | k3s agent |
## Port Forwarding Rules
| Name | External Port | Internal IP | Internal Port | Protocol |
|------------|---------------|--------------|---------------|----------|
| HTTP | 80 | 10.0.10.11 | 80 | TCP |
| HTTPS | 443 | 10.0.10.11 | 443 | TCP |
| k3s API | 6443 | 10.0.10.11 | 6443 | TCP |
> **Note**: HTTP/HTTPS traffic routes to nuc01 where Traefik runs as the ingress controller.
> k3s API port is only forwarded if external kubectl access is needed.
## Firewall Rules
### Inter-VLAN Rules
- **Servers → Internet**: Allow all outbound
- **Servers → Management**: Allow (for UCG API access)
- **IoT → Servers**: Deny (isolate IoT from cluster)
- **Guest → ***: Allow Internet only, block all local
### Inbound Rules
- Allow established/related connections
- Allow HTTP (80) and HTTPS (443) to VLAN 10
- Drop all other inbound
## DNS Configuration
- **Internal DNS**: Use UCG as DNS server for VLAN 10
- **External DNS**: Cloudflare (1.1.1.1, 1.0.0.1)
- **Local DNS entries**: Add `*.homelab.local` → 10.0.10.11 for internal access
## Setup Steps
1. **Create VLANs** in UniFi Network → Settings → Networks
2. **Assign ports** on the switch to VLAN 10 for NUC connections
3. **Create DHCP reservations** for each NUC (Settings → Networks → VLAN 10)
4. **Add port forwarding rules** (Settings → Firewall & Security → Port Forwarding)
5. **Configure firewall rules** (Settings → Firewall & Security → Firewall Rules)
6. **Set local DNS** entries for *.homelab.local

View File

@@ -0,0 +1,37 @@
# Network Diagram
```
┌──────────────┐
│ Internet │
└──────┬───────┘
┌──────┴───────┐
│ UCG │
│ 10.0.1.1 │
└──────┬───────┘
┌────────────┼────────────┐
│ │ │
┌──────┴───┐ ┌─────┴────┐ ┌────┴──────┐
│ VLAN 10 │ │ VLAN 20 │ │ VLAN 99 │
│ Servers │ │ IoT │ │ Guest │
└──────┬───┘ └──────────┘ └───────────┘
┌────────────┼────────────┐
│ │ │
┌────┴────┐ ┌────┴────┐ ┌────┴────┐
│ nuc01 │ │ nuc02 │ │ nuc03 │
│ .10.11 │ │ .10.12 │ │ .10.13 │
│ server │ │ agent │ │ agent │
└─────────┘ └─────────┘ └─────────┘
Services on k3s cluster:
┌─────────────────────────────────────┐
│ Traefik (Ingress) ← :80/:443 │
│ ArgoCD ← :8080 │
│ Grafana ← :3001 │
│ PostgreSQL (CNPG) ← :5432 │
│ Valkey ← :6379 │
│ Longhorn UI ← :8000 │
└─────────────────────────────────────┘
```

19
package.json Normal file
View File

@@ -0,0 +1,19 @@
{
"name": "homelab",
"private": true,
"scripts": {
"build": "turbo build",
"dev": "turbo dev",
"lint": "turbo lint",
"test": "turbo test",
"format": "prettier --write \"**/*.{ts,tsx,js,jsx,json,md,yaml,yml}\""
},
"devDependencies": {
"prettier": "^3.2.5",
"turbo": "^2.4.0"
},
"packageManager": "pnpm@9.15.4",
"engines": {
"node": ">=20"
}
}

View File

@@ -0,0 +1,6 @@
module.exports = {
extends: ["next/core-web-vitals", "prettier"],
rules: {
"no-console": "warn",
},
};

View File

@@ -0,0 +1,10 @@
{
"name": "@homelab/config-eslint",
"version": "0.1.0",
"private": true,
"main": "./index.js",
"dependencies": {
"eslint-config-next": "^15.1.0",
"eslint-config-prettier": "^9.1.0"
}
}

View File

@@ -0,0 +1,13 @@
{
"compilerOptions": {
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"isolatedModules": true,
"declaration": true,
"declarationMap": true
},
"exclude": ["node_modules"]
}

View File

@@ -0,0 +1,14 @@
{
"extends": "./base.json",
"compilerOptions": {
"target": "ES2017",
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true,
"noEmit": true,
"module": "esnext",
"moduleResolution": "bundler",
"jsx": "preserve",
"incremental": true,
"plugins": [{ "name": "next" }]
}
}

View File

@@ -0,0 +1,10 @@
{
"extends": "./base.json",
"compilerOptions": {
"target": "ES2022",
"module": "commonjs",
"lib": ["ES2022"],
"outDir": "./dist",
"sourceMap": true
}
}

View File

@@ -0,0 +1,6 @@
{
"name": "@homelab/config-typescript",
"version": "0.1.0",
"private": true,
"files": ["base.json", "nextjs.json", "node.json"]
}

View File

@@ -0,0 +1,10 @@
import { defineConfig } from "drizzle-kit";
export default defineConfig({
schema: "./src/schema.ts",
out: "./drizzle",
dialect: "postgresql",
dbCredentials: {
url: process.env.DATABASE_URL!,
},
});

22
packages/db/package.json Normal file
View File

@@ -0,0 +1,22 @@
{
"name": "@homelab/db",
"version": "0.1.0",
"private": true,
"main": "./src/index.ts",
"types": "./src/index.ts",
"scripts": {
"lint": "tsc --noEmit",
"build": "tsc",
"db:generate": "drizzle-kit generate",
"db:migrate": "drizzle-kit migrate",
"db:studio": "drizzle-kit studio"
},
"dependencies": {
"drizzle-orm": "^0.36.0",
"postgres": "^3.4.0"
},
"devDependencies": {
"drizzle-kit": "^0.28.0",
"typescript": "^5.7.0"
}
}

View File

@@ -0,0 +1,8 @@
import { drizzle } from "drizzle-orm/postgres-js";
import postgres from "postgres";
import * as schema from "./schema";
const connectionString = process.env.DATABASE_URL!;
const client = postgres(connectionString);
export const db = drizzle(client, { schema });

2
packages/db/src/index.ts Normal file
View File

@@ -0,0 +1,2 @@
export * from "./schema";
export * from "./client";

Some files were not shown because too many files have changed in this diff Show More