Big rewrite for clean app-of-apps setup

This commit is contained in:
Marco van Zijl 2025-11-08 13:38:36 +01:00
parent aabead174c
commit b972f5073f
29 changed files with 638 additions and 26739 deletions

7
apps/traefik/Chart.yaml Normal file
View File

@ -0,0 +1,7 @@
apiVersion: v2
name: traefik
version: 1.0.0
dependencies:
- name: traefik
version: 37.2.0
repository: https://traefik.github.io/charts

View File

@ -0,0 +1,30 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: traefik
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "1"
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.mvzijl.nl/marco/veda.git
targetRevision: applicationset-rewrite
path: apps/traefik
helm:
releaseName: traefik
valueFiles:
- values.yaml
destination:
server: https://kubernetes.default.svc
namespace: traefik
syncPolicy:
automated:
prune: false
selfHeal: false
syncOptions:
- CreateNamespace=true
- PruneLast=true
- PrunePropagationPolicy=foreground

87
apps/traefik/values.yaml Normal file
View File

@ -0,0 +1,87 @@
traefik:
# Service configuration
service:
type: LoadBalancer
annotations:
io.cilium/lb-ipam-ips: "192.168.0.1" # Your Cilium L2 IP
# Ports configuration
ports:
web:
port: 80
exposedPort: 80
protocol: TCP
websecure:
port: 443
exposedPort: 443
protocol: TCP
tls:
enabled: true
metrics:
port: 9100
expose: false
protocol: TCP
# Enable dashboard
ingressRoute:
dashboard:
enabled: true
matchRule: Host(`traefik.noxxos.nl`)
entryPoints:
- websecure
# Global arguments
globalArguments:
- "--global.checknewversion=false"
- "--global.sendanonymoususage=false"
# Additional arguments
additionalArguments:
- "--api.dashboard=true"
- "--log.level=INFO"
- "--accesslog=true"
- "--entrypoints.web.http.redirections.entrypoint.to=websecure"
- "--entrypoints.web.http.redirections.entrypoint.scheme=https"
# Providers
providers:
kubernetesCRD:
enabled: true
allowCrossNamespace: true
kubernetesIngress:
enabled: true
publishedService:
enabled: true
# Resource limits
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "500m"
memory: "512Mi"
# Replicas
deployment:
replicas: 2
# Metrics (Prometheus)
metrics:
prometheus:
enabled: true
addEntryPointsLabels: true
addServicesLabels: true
# Security
securityContext:
capabilities:
drop: [ALL]
add: [NET_BIND_SERVICE]
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
podSecurityContext:
fsGroup: 65532

View File

@ -1,35 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: apps
namespace: argocd
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- git:
repoURL: https://git.mvzijl.nl/marco/veda.git
revision: applicationset-rewrite
directories:
- path: apps/*
template:
metadata:
name: '{{.path.basename}}'
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: test
source:
repoURL: https://git.mvzijl.nl/marco/veda.git
targetRevision: applicationset-rewrite
path: '{{.path.path}}'
destination:
server: https://kubernetes.default.svc
namespace: '{{.path.basename}}'
syncPolicy:
automated:
prune: false
selfHeal: false
syncOptions:
- CreateNamespace=true
- ApplyOutOfSyncOnly=true

View File

@ -0,0 +1,57 @@
#!/usr/bin/env bash
set -euo pipefail
echo "Starting Cilium installation..."
# Add Cilium Helm repository
echo "Adding Cilium Helm repository..."
helm repo add cilium https://helm.cilium.io/
helm repo update
# Install Cilium
echo "Installing Cilium..."
helm upgrade --install \
cilium \
cilium/cilium \
--version 1.18.3 \
--namespace kube-system \
--create-namespace \
--values "$(dirname "$0")/values.yaml" \
--wait
# Wait for Cilium to be ready
echo "Waiting for Cilium DaemonSet to be ready..."
kubectl rollout status daemonset/cilium -n kube-system --timeout=300s
# Wait for Hubble components if enabled
echo "Waiting for Hubble components..."
kubectl rollout status deployment/hubble-relay -n kube-system --timeout=300s
kubectl rollout status deployment/hubble-ui -n kube-system --timeout=300s
# Apply post-install configurations if any exist
if [ -d "$(dirname "$0")/post-install" ]; then
echo "Applying post-install configurations..."
kubectl apply --recursive -f "$(dirname "$0")/post-install/"
fi
echo "Checking Cilium status..."
if command -v cilium &> /dev/null; then
cilium status
else
echo "Cilium CLI not found. To install:"
echo "brew install cilium-cli"
fi
echo
echo "Installation complete!"
echo
echo "To access Hubble UI:"
echo "1. Run port-forward:"
echo " kubectl port-forward -n kube-system svc/hubble-ui 12000:80"
echo "2. Visit: http://localhost:12000"
echo
echo "To verify installation:"
echo "1. Check pod status: kubectl get pods -n kube-system -l k8s-app=cilium"
echo "2. Check Hubble UI: kubectl get deployment -n kube-system hubble-ui"
echo "3. Install Cilium CLI: brew install cilium-cli"

View File

@ -15,7 +15,7 @@ metadata:
namespace: kube-system
spec:
blocks:
- cidr: "192.168.0.1/32"
- cidr: "192.168.0.2/32"
serviceSelector:
matchLabels:
io.kubernetes.service.namespace: "traefik"

View File

@ -0,0 +1,75 @@
# Cilium Component
## Overview
Cilium is our CNI (Container Network Interface) solution that provides networking, security, and observability for Kubernetes using eBPF.
## Configuration
The following configurations are available:
- Version: 1.18.3
- IPAM Mode: kubernetes
- Hubble UI: Enabled
- L2 Announcements: Enabled
- kube-proxy Replacement: Enabled
## Features
- **Hubble UI**: Web interface for network observability
- **L2 Announcements**: For LoadBalancer service type support
- **Enhanced Security**: Using eBPF for network policy enforcement
- **Kube-proxy Replacement**: Native handling of service load-balancing
## Post-Install
After installation:
1. Cilium core components will be installed
2. Hubble UI and Relay will be deployed
3. LoadBalancer IP pools will be configured
4. Initial access to Hubble UI is available through port-forward:
```bash
kubectl port-forward -n kube-system svc/hubble-ui 12000:80
```
Then visit: `http://localhost:12000`
## Dependencies
- Kubernetes cluster
- Helm v3+
- Linux kernel >= 4.9.17
## Troubleshooting
1. Check if Cilium pods are running:
```bash
kubectl get pods -n kube-system -l k8s-app=cilium
```
2. Check Cilium status (requires Cilium CLI):
```bash
cilium status
```
3. Check Hubble UI deployment:
```bash
kubectl get deployment -n kube-system hubble-ui
```
4. View Cilium logs:
```bash
kubectl logs -n kube-system -l k8s-app=cilium
```
To install Cilium CLI:
```bash
brew install cilium-cli
```

View File

@ -0,0 +1,45 @@
ipam:
mode: kubernetes
hubble:
relay:
enabled: true
ui:
enabled: true
ingress:
enabled: true
className: traefik
hosts:
- hubble.noxxos.nl
l2announcements:
enabled: true
kubeProxyReplacement: true
securityContext:
capabilities:
ciliumAgent:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
cleanCiliumState:
- NET_ADMIN
- SYS_ADMIN
- SYS_RESOURCE
cgroup:
autoMount:
enabled: false
hostRoot: /sys/fs/cgroup
k8sServiceHost: localhost
k8sServicePort: 7445

View File

@ -0,0 +1,51 @@
#!/usr/bin/env bash
set -euo pipefail
echo "Starting ArgoCD installation..."
# Add Argo CD Helm repository
echo "Adding Argo CD Helm repository..."
helm repo add argo https://argoproj.github.io/argo-helm
helm repo update
# Install Argo CD
echo "Installing Argo CD..."
helm upgrade --install \
argocd \
argo/argo-cd \
--namespace argocd \
--create-namespace \
--version 9.1.0 \
--values "$(dirname "$0")/values.yaml" \
--wait
# Wait for the Argo CD server to be ready
echo "Waiting for Argo CD server to be ready..."
kubectl wait --for=condition=available --timeout=300s deployment/argocd-server -n argocd
# Apply post-install configurations if they exist
if [ -n "$(find "$(dirname "$0")/post-install" -type f \( -name '*.yaml' -o -name '*.yml' -o -name '*.json' \) 2>/dev/null)" ]; then
echo "Applying post-install configurations..."
kubectl apply --recursive -f "$(dirname "$0")/post-install/"
fi
# Get the initial admin password
echo
echo "Initial admin password:"
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d
echo
echo
echo "ArgoCD installation complete!"
echo
echo "Temporary access (until ingress is ready):"
echo "1. Run: kubectl port-forward svc/argocd-server -n argocd 8080:443"
echo "2. Open: https://localhost:8080"
echo
echo "Credentials:"
echo " Username: admin"
echo " Password: (shown above)"
echo
echo "Once Traefik ingress is running, access ArgoCD at:"
echo " https://argocd.noxxos.nl"

View File

@ -0,0 +1,33 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: root
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.mvzijl.nl/marco/veda.git
targetRevision: applicationset-rewrite
path: apps
directory:
recurse: true
include: '{*/application.yaml,*/application.yml}' # Only Application manifests
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: false
selfHeal: false
syncOptions:
- CreateNamespace=true
- PruneLast=true
- PrunePropagationPolicy=foreground
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m

View File

@ -0,0 +1,49 @@
# ArgoCD Component
## Overview
ArgoCD is our GitOps continuous delivery tool for Kubernetes.
## Configuration
The following configurations are available:
- Domain: argocd.noxxos.nl
- Ingress: Enabled with Traefik
- Version: 9.1.0
## Post-Install
After installation:
1. The admin password will be displayed
2. Initial access is available through port-forward:
```bash
kubectl port-forward svc/argocd-server -n argocd 8080:443
```
Then visit: `https://localhost:8080`
3. Once Traefik is running, access through ingress will be available
## Dependencies
- Kubernetes cluster
- Helm v3+
- Traefik (for ingress)
## Troubleshooting
If you can't access ArgoCD:
1. Check if the pods are running:
```bash
kubectl get pods -n argocd
```
2. Check ingress status
```bash
kubectl get ingress -n argocd
````

View File

@ -0,0 +1,9 @@
global:
domain: argocd.noxxos.nl
server:
ingress:
enabled: true
ingressClassName: traefik
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure

View File

@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -euo pipefail
# Get the directory where the script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
COMPONENTS_DIR="$(cd "${SCRIPT_DIR}/../components" && pwd)"
# Function to check prerequisites
check_prerequisites() {
echo "Checking prerequisites..."
command -v kubectl >/dev/null 2>&1 || { echo "kubectl is required but not installed"; exit 1; }
command -v helm >/dev/null 2>&1 || { echo "helm is required but not installed"; exit 1; }
# Check if we can connect to the cluster
kubectl cluster-info >/dev/null 2>&1 || { echo "Cannot connect to Kubernetes cluster"; exit 1; }
}
# Function to install a component
install_component() {
local component_dir=$1
local component_name=$(basename "${component_dir}")
echo
echo "================================================================"
echo "Installing component: ${component_name}"
echo "================================================================"
if [[ -f "${component_dir}/install.sh" ]]; then
bash "${component_dir}/install.sh"
else
echo "No install.sh found for ${component_name}, skipping..."
fi
}
# Main installation process
main() {
echo "Starting platform installation..."
echo
# Check prerequisites
check_prerequisites
# Get all component directories in order
components=($(find "${COMPONENTS_DIR}" -maxdepth 1 -mindepth 1 -type d | sort))
# Install each component
for component in "${components[@]}"; do
install_component "${component}"
done
echo
echo "================================================================"
echo "Platform installation complete!"
echo "================================================================"
echo
echo "To validate the installation, run:"
echo " ./validate.sh"
}
# Run main function
main "$@"

View File

@ -0,0 +1,116 @@
#!/usr/bin/env bash
set -euo pipefail
# Get the directory where the script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
COMPONENTS_DIR="$(cd "${SCRIPT_DIR}/../components" && pwd)"
# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
# Function to check if a deployment is ready
check_deployment() {
local namespace=$1
local deployment=$2
local description=$3
echo -n "Checking ${description}... "
if kubectl get deployment -n "${namespace}" "${deployment}" >/dev/null 2>&1; then
if kubectl wait --for=condition=available --timeout=5s deployment/"${deployment}" -n "${namespace}" >/dev/null 2>&1; then
echo -e "${GREEN}OK${NC}"
return 0
else
echo -e "${RED}Not Ready${NC}"
return 1
fi
else
echo -e "${RED}Not Found${NC}"
return 1
fi
}
# Function to check post-install manifests
check_post_install() {
local component_dir=$1
local description=$2
echo -n "Checking ${description} post-install configurations... "
if [ -n "$(find "${component_dir}/post-install" -type f \( -name '*.yaml' -o -name '*.yml' -o -name '*.json' \) 2>/dev/null)" ]; then
if kubectl diff -f "${component_dir}/post-install/" >/dev/null 2>&1; then
echo -e "${GREEN}OK${NC}"
return 0
else
echo -e "${RED}Out of sync${NC}"
return 1
fi
else
echo -e "${GREEN}No post-install configs${NC}"
return 0
fi
}
# Function to check if a daemon set is ready
check_daemonset() {
local namespace=$1
local daemonset=$2
local description=$3
echo -n "Checking ${description}... "
if kubectl get daemonset -n "${namespace}" "${daemonset}" >/dev/null 2>&1; then
if kubectl rollout status daemonset/"${daemonset}" -n "${namespace}" --timeout=5s >/dev/null 2>&1; then
echo -e "${GREEN}OK${NC}"
return 0
else
echo -e "${RED}Not Ready${NC}"
return 1
fi
else
echo -e "${RED}Not Found${NC}"
return 1
fi
}
# Main validation process
main() {
local errors=0
echo "Validating platform components..."
echo
# Validate Cilium
echo "Checking Cilium components:"
check_daemonset kube-system cilium "Cilium CNI" || ((errors++))
check_deployment kube-system hubble-relay "Hubble Relay" || ((errors++))
check_deployment kube-system hubble-ui "Hubble UI" || ((errors++))
check_post_install "${COMPONENTS_DIR}/01-cilium" "Cilium" || ((errors++))
echo
# Validate ArgoCD
echo "Checking ArgoCD components:"
check_deployment argocd argocd-server "ArgoCD Server" || ((errors++))
check_deployment argocd argocd-repo-server "ArgoCD Repo Server" || ((errors++))
check_deployment argocd argocd-applicationset-controller "ArgoCD ApplicationSet Controller" || ((errors++))
check_post_install "${COMPONENTS_DIR}/02-argocd" "ArgoCD" || ((errors++))
echo
# Summary
echo "================================================================"
if [ "${errors}" -eq 0 ]; then
echo -e "${GREEN}All components are running correctly!${NC}"
exit 0
else
echo -e "${RED}Found ${errors} component(s) with issues${NC}"
echo "Check the component logs for more details:"
echo " kubectl logs -n <namespace> deployment/<deployment-name>"
exit 1
fi
}
# Run main function
main "$@"

View File

@ -82,11 +82,10 @@ talosctl gen config \
--output-types controlplane \
--with-secrets secrets.yaml \
--config-patch @nodes/master1.yaml \
--config-patch @patches/argocd.yaml \
--config-patch @patches/cilium.yaml \
--config-patch @patches/network.yaml \
--config-patch @patches/scheduling.yaml \
--config-patch @patches/discovery.yaml \
--config-patch @patches/disk.yaml \
--config-patch @patches/diskselector.yaml \
--config-patch @patches/vip.yaml \
--config-patch @patches/metrics.yaml \
--config-patch @patches/hostpath.yaml \
@ -102,8 +101,7 @@ talosctl gen config \
--output-types worker \
--with-secrets secrets.yaml \
--config-patch @nodes/worker1.yaml \
--config-patch @patches/argocd.yaml \
--config-patch @patches/cilium.yaml \
--config-patch @patches/network.yaml \
--config-patch @patches/scheduling.yaml \
--config-patch @patches/discovery.yaml \
--config-patch @patches/diskselector.yaml \
@ -141,12 +139,24 @@ Finally, retrieve the kubeconfig, it will merge with `~/.kube/config`, if it exi
talosctl -n 192.168.0.10 kubeconfig
```
Check nodes:
Check nodes, note the NotReady status, since the Cilium CNI is not running yet:
```bash
kubectl get nodes
```
Install the Gateway API:
```bash
kubectl apply --server-side -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
```
Install Cilium:
```bash
bash scripts/cilium.sh
```
## TODO
- Remove secrets from config

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +0,0 @@
#!/usr/bin/env bash
helm repo add cilium https://helm.cilium.io/
helm install \
cilium \
cilium/cilium \
--version 1.18.3 \
--namespace kube-system \
--set ipam.mode=kubernetes \
--set hubble.relay.enabled=true \
--set hubble.ui.enabled=true \
--set l2announcements.enabled=true \
--set kubeProxyReplacement=true \
--set securityContext.capabilities.ciliumAgent="{CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}" \
--set securityContext.capabilities.cleanCiliumState="{NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}" \
--set cgroup.autoMount.enabled=false \
--set cgroup.hostRoot=/sys/fs/cgroup \
--set k8sServiceHost=localhost \
--set k8sServicePort=7445