From 822e42dbb8afb40bdfa9ef363484d7cb3a9490d4 Mon Sep 17 00:00:00 2001 From: Horacio Duran Date: Fri, 9 Jan 2026 16:07:44 +0100 Subject: [PATCH] Add Template to deploy forgejo. This template allows deploying a forgejo en either Scaleway or Hetzner (untested) without much knowledge about them. It DOES require knowledge about Terragrunt and ansible. A wizard of sorts is provided but it will not guarantee success without some knowledge about the underlying technology. --- CONTRIBUTING.md | 107 ++ Makefile | 242 +++ README.md | 167 ++- ansible/ansible.cfg | 28 + .../inventory/production/hosts.yml.example | 61 + ansible/playbooks/backup.yml | 203 +++ ansible/playbooks/deploy.yml | 122 ++ ansible/playbooks/restore.yml | 234 +++ ansible/playbooks/update.yml | 133 ++ ansible/playbooks/vars/main.yml | 64 + ansible/playbooks/vars/secrets.yml.example | 40 + ansible/roles/forgejo/defaults/main.yml | 145 ++ ansible/roles/forgejo/handlers/main.yml | 81 + ansible/roles/forgejo/tasks/backup.yml | 32 + ansible/roles/forgejo/tasks/caddy.yml | 71 + ansible/roles/forgejo/tasks/docker.yml | 100 ++ ansible/roles/forgejo/tasks/forgejo.yml | 138 ++ ansible/roles/forgejo/tasks/main.yml | 94 ++ ansible/roles/forgejo/tasks/monitoring.yml | 66 + ansible/roles/forgejo/tasks/postgres.yml | 163 ++ ansible/roles/forgejo/tasks/prepare.yml | 194 +++ ansible/roles/forgejo/tasks/redis.yml | 40 + ansible/roles/forgejo/tasks/ssl.yml | 29 + ansible/roles/forgejo/tasks/tailscale.yml | 76 + ansible/roles/forgejo/tasks/ufw.yml | 142 ++ ansible/roles/forgejo/tasks/volume.yml | 60 + .../tasks/{restore.yml,monitoring.yml} | 0 ansible/roles/forgejo/templates/Caddyfile.j2 | 63 + ansible/roles/forgejo/templates/app.ini.j2 | 219 +++ .../docker-compose.monitoring.yml.j2 | 27 + .../forgejo/templates/docker-compose.yml.j2 | 76 + .../forgejo/templates/forgejo.service.j2 | 19 + .../forgejo/templates/forgejo_backup.sh.j2 | 33 + .../forgejo/templates/postgres_backup.sh.j2 | 24 + .../roles/forgejo/templates/prometheus.yml.j2 | 42 + .../roles/forgejo/templates/ufw-forgejo.j2 | 14 + docs/CONFIGURATION.md | 569 +++++++ docs/OPERATIONS.md | 574 +++++++ docs/QUICKSTART.md | 190 +++ setup-wizard.sh | 1327 +++++++++++++++++ .../hetzner/compute/terraform.tfvars.example | 12 + terraform/hetzner/compute/terragrunt.hcl | 304 ++++ terraform/hetzner/root.hcl | 63 + terraform/hetzner/storage/.gitkeep | 0 .../scaleway/compute/terraform.tfvars.example | 12 + terraform/scaleway/compute/terragrunt.hcl | 224 +++ terraform/scaleway/root.hcl | 70 + terraform/scaleway/storage/terragrunt.hcl | 154 ++ 48 files changed, 6846 insertions(+), 2 deletions(-) create mode 100644 CONTRIBUTING.md create mode 100644 Makefile create mode 100644 ansible/ansible.cfg create mode 100644 ansible/inventory/production/hosts.yml.example create mode 100644 ansible/playbooks/backup.yml create mode 100644 ansible/playbooks/deploy.yml create mode 100644 ansible/playbooks/restore.yml create mode 100644 ansible/playbooks/update.yml create mode 100644 ansible/playbooks/vars/main.yml create mode 100644 ansible/playbooks/vars/secrets.yml.example create mode 100644 ansible/roles/forgejo/defaults/main.yml create mode 100644 ansible/roles/forgejo/handlers/main.yml create mode 100644 ansible/roles/forgejo/tasks/backup.yml create mode 100644 ansible/roles/forgejo/tasks/caddy.yml create mode 100644 ansible/roles/forgejo/tasks/docker.yml create mode 100644 ansible/roles/forgejo/tasks/forgejo.yml create mode 100644 ansible/roles/forgejo/tasks/main.yml create mode 100644 ansible/roles/forgejo/tasks/monitoring.yml create mode 100644 ansible/roles/forgejo/tasks/postgres.yml create mode 100644 ansible/roles/forgejo/tasks/prepare.yml create mode 100644 ansible/roles/forgejo/tasks/redis.yml create mode 100644 ansible/roles/forgejo/tasks/ssl.yml create mode 100644 ansible/roles/forgejo/tasks/tailscale.yml create mode 100644 ansible/roles/forgejo/tasks/ufw.yml create mode 100644 ansible/roles/forgejo/tasks/volume.yml create mode 100644 ansible/roles/forgejo/tasks/{restore.yml,monitoring.yml} create mode 100644 ansible/roles/forgejo/templates/Caddyfile.j2 create mode 100644 ansible/roles/forgejo/templates/app.ini.j2 create mode 100644 ansible/roles/forgejo/templates/docker-compose.monitoring.yml.j2 create mode 100644 ansible/roles/forgejo/templates/docker-compose.yml.j2 create mode 100644 ansible/roles/forgejo/templates/forgejo.service.j2 create mode 100644 ansible/roles/forgejo/templates/forgejo_backup.sh.j2 create mode 100644 ansible/roles/forgejo/templates/postgres_backup.sh.j2 create mode 100644 ansible/roles/forgejo/templates/prometheus.yml.j2 create mode 100644 ansible/roles/forgejo/templates/ufw-forgejo.j2 create mode 100644 docs/CONFIGURATION.md create mode 100644 docs/OPERATIONS.md create mode 100644 docs/QUICKSTART.md create mode 100755 setup-wizard.sh create mode 100644 terraform/hetzner/compute/terraform.tfvars.example create mode 100644 terraform/hetzner/compute/terragrunt.hcl create mode 100644 terraform/hetzner/root.hcl create mode 100644 terraform/hetzner/storage/.gitkeep create mode 100644 terraform/scaleway/compute/terraform.tfvars.example create mode 100644 terraform/scaleway/compute/terragrunt.hcl create mode 100644 terraform/scaleway/root.hcl create mode 100644 terraform/scaleway/storage/terragrunt.hcl diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..90f7800 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,107 @@ +# Contributing + +Thank you for your interest in contributing! This repository is hosted on a private Forgejo instance without public registration, but we welcome contributions from anyone. + +**Authoritative repository:** https://git.dumontix.eu/dumontix/forgejo-autohebergement +**Codeberg mirror:** https://codeberg.org/dumontix/forgejo-autohebergement + +Choose the method that matches your experience level: + +--- + +## Simple Method (Recommended for Most Contributors) + +If you're new to Git or prefer a straightforward process: + +### Fork on Codeberg And Pull Request + +1. Go to the [Codeberg mirror](https://codeberg.org/dumontix/forgejo-autohebergement) +2. Click **Fork** to create a copy in your Codeberg account +3. Clone your fork and make your changes +4. Push your changes to your fork +5. Make a Pull Request to the [Codeberg mirror](https://codeberg.org/dumontix/forgejo-autohebergement) after some reviewing ill make sure to merge it into the autoritative repo. + +### Submit via Issue + +1. Performs steps 1-4 of the previous section. +2. Open an issue at the [authoritative repository](https://git.dumontix.eu/dumontix/forgejo-autohebergement/issues) +3. Title it: **"Contribution: [brief description]"** +4. Include: + - A link to your Codeberg fork/branch + - A description of what you changed and why + +We'll review your changes and merge them manually. You'll be credited as the author in the commit. + +**Example issue:** + +``` +Title: Contribution: Fix typo in README + +Link: https://codeberg.org/youruser/forgejo-autohebergement/src/branch/fix-typo + +Description: +Fixed a typo in the installation instructions. "recieve" -> "receive" +``` + +--- + +## Expert Method: Email Patches + +For experienced Git users who prefer the traditional kernel-style workflow: + +### Generate Patches + +```bash +# Clone the Codeberg mirror +git clone https://git.dumontix.eu/dumontix/forgejo-autohebergement.git +cd forgejo-autohebergement +git checkout -b my-feature + +# Make commits with good messages +git commit -s -m "component: brief description + +Longer explanation of what and why." + +# Generate patch files +git format-patch origin/main --stdout > my-feature.patch +``` + +### Submit Patches + +Open an issue at the [authoritative repository](https://git.dumontix.eu/dumontix/forgejo-autohebergement/issues) with the subject **"[PATCH] brief description"** and attach your patch file. + +or + +Send an email to `hduran` (at) `dumontix.fr` with the same subject as the Issue with the attached patch. + +**Patch guidelines:** +- One logical change per patch +- Clear commit message explaining *what* and *why* +- Include `Signed-off-by` line (`git commit -s`) +- Test your changes before submitting + +--- + +## Contribution Guidelines + +Regardless of method: + +- **Search existing issues** before submitting to avoid duplicates +- **Keep changes focused** - one feature/fix per contribution +- **Follow existing code style** - match the patterns you see +- **Test your changes** - make sure nothing breaks + +--- + +## What Happens Next? + +1. We'll review your contribution +2. We may ask questions or request changes via the issue +3. Once approved, we'll merge your changes with proper attribution +4. The issue will be closed with a link to the merged commit + +--- + +## Questions? + +Open an issue and we'll be happy to help. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..3acc506 --- /dev/null +++ b/Makefile @@ -0,0 +1,242 @@ +# Forgejo Self-Hosting Automation Makefile +# Provides convenient commands for deployment, updates, backups, and maintenance + +.PHONY: help install deploy update backup restore ssh terraform-init terraform-plan terraform-apply terraform-destroy ansible-ping ansible-setup check-deps + +# Default target +.DEFAULT_GOAL := help + +# Configuration +TERRAFORM_DIR ?= terraform +ANSIBLE_DIR ?= ansible +PROVIDER ?= scaleway +ENVIRONMENT ?= production +INVENTORY ?= $(ANSIBLE_DIR)/inventory/$(ENVIRONMENT)/hosts.yml +PLAYBOOK_DIR ?= $(ANSIBLE_DIR)/playbooks +VAULT_PASSWORD_FILE ?= $(ANSIBLE_DIR)/.vault_password +SSH_KEY ?= ~/.ssh/id_ed25519 +ANSIBLE_USER ?= root + +# Point Ansible to our config file +export ANSIBLE_CONFIG := $(ANSIBLE_DIR)/ansible.cfg + +# Colors for output +GREEN := \033[0;32m +YELLOW := \033[0;33m +RED := \033[0;31m +NC := \033[0m # No Color + +# Help target +help: ## Show this help message + @echo "$(GREEN)Forgejo Self-Hosting - Available Commands$(NC)" + @echo "" + @echo "$(YELLOW)First time? Run:$(NC) ./setup-wizard.sh" + @echo "" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " $(YELLOW)%-20s$(NC) %s\n", $$1, $$2}' + @echo "" + @echo "$(GREEN)Examples:$(NC)" + @echo " ./setup-wizard.sh # Interactive first-time setup" + @echo " make check-deps # Check all dependencies" + @echo " make terraform-apply # Create infrastructure" + @echo " make deploy # Deploy Forgejo" + @echo " make update # Update Forgejo" + @echo " make backup # Create backup" + @echo " make PROVIDER=hetzner deploy # Deploy on Hetzner" + +## Setup Wizard +wizard: ## Run interactive setup wizard (recommended for first-time setup) + @./setup-wizard.sh + +## Dependency Checks +check-deps: ## Check if all required tools are installed + @echo "$(GREEN)Checking dependencies...$(NC)" + @command -v terraform >/dev/null 2>&1 || { echo "$(RED)Error: terraform not installed$(NC)"; exit 1; } + @command -v terragrunt >/dev/null 2>&1 || { echo "$(RED)Error: terragrunt not installed$(NC)"; exit 1; } + @command -v ansible >/dev/null 2>&1 || { echo "$(RED)Error: ansible not installed$(NC)"; exit 1; } + @command -v ansible-playbook >/dev/null 2>&1 || { echo "$(RED)Error: ansible-playbook not installed$(NC)"; exit 1; } + @command -v ssh-agent >/dev/null 2>&1 || { echo "$(RED)Error: ssh-agent not found$(NC)"; exit 1; } + @test -f $(SSH_KEY) || { echo "$(RED)Error: SSH key not found at $(SSH_KEY)$(NC)"; exit 1; } + @echo "$(GREEN)✓ All dependencies installed$(NC)" + +## SSH Management +ssh-agent-start: ## Start SSH agent and add key + @echo "$(GREEN)Starting SSH agent...$(NC)" + @eval $$(ssh-agent -s) && ssh-add $(SSH_KEY) + +ssh-agent-check: ## Check if SSH agent has keys loaded + @ssh-add -l >/dev/null 2>&1 || { echo "$(YELLOW)No SSH keys loaded. Run 'make ssh-agent-start'$(NC)"; exit 1; } + @echo "$(GREEN)✓ SSH agent has keys loaded$(NC)" + +ssh: ssh-agent-check ## SSH into the Forgejo server + @echo "$(GREEN)Connecting to Forgejo server...$(NC)" + @ssh -i $(SSH_KEY) $(ANSIBLE_USER)@$$(cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt output -raw server_ipv4 2>/dev/null || terragrunt output -raw server_ip) + +## Terraform Commands +terraform-init: check-deps ## Initialize Terraform/Terragrunt + @echo "$(GREEN)Initializing Terraform for $(PROVIDER)...$(NC)" + @cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt init + @cd $(TERRAFORM_DIR)/$(PROVIDER)/storage && terragrunt init || true + +terraform-plan: terraform-init ## Plan infrastructure changes + @echo "$(GREEN)Planning infrastructure for $(PROVIDER)...$(NC)" + @cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt plan + @cd $(TERRAFORM_DIR)/$(PROVIDER)/storage && terragrunt plan || true + +terraform-apply: terraform-init ## Apply infrastructure changes + @echo "$(GREEN)Applying infrastructure for $(PROVIDER)...$(NC)" + @cd $(TERRAFORM_DIR)/$(PROVIDER)/storage && terragrunt apply -auto-approve || true + @cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt apply -auto-approve + @echo "$(GREEN)✓ Infrastructure created$(NC)" + @echo "$(YELLOW)Waiting 30 seconds for server initialization...$(NC)" + @sleep 30 + @$(MAKE) terraform-output + +terraform-output: ## Display Terraform outputs + @echo "$(GREEN)Infrastructure outputs:$(NC)" + @cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt output + +terraform-destroy: ## Destroy infrastructure + @echo "$(RED)WARNING: This will destroy all infrastructure!$(NC)" + @read -p "Type 'yes' to confirm: " confirm && [ "$$confirm" = "yes" ] || exit 1 + @cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt destroy -auto-approve + @cd $(TERRAFORM_DIR)/$(PROVIDER)/storage && terragrunt destroy -auto-approve || true + @echo "$(GREEN)Infrastructure destroyed$(NC)" + +## Ansible Commands +ansible-ping: ssh-agent-check ## Test Ansible connection + @echo "$(GREEN)Testing Ansible connection...$(NC)" + @ansible -i $(INVENTORY) forgejo -m ping + +ansible-setup: ssh-agent-check ## Run Ansible setup to gather facts + @echo "$(GREEN)Gathering system facts...$(NC)" + @ansible -i $(INVENTORY) forgejo -m setup + +ansible-vault-create: ## Create new Ansible vault file + @echo "$(GREEN)Creating Ansible vault...$(NC)" + @ansible-vault create $(ANSIBLE_DIR)/playbooks/vars/secrets.yml + +ansible-vault-edit: ## Edit Ansible vault file + @echo "$(GREEN)Editing Ansible vault...$(NC)" + @ansible-vault edit $(ANSIBLE_DIR)/playbooks/vars/secrets.yml + +ansible-vault-encrypt: ## Encrypt an existing file + @echo "$(GREEN)Encrypting file...$(NC)" + @read -p "File to encrypt: " file && ansible-vault encrypt $$file + +## Deployment Commands +deploy: check-deps ssh-agent-check ## Deploy Forgejo (full deployment) + @echo "$(GREEN)Deploying Forgejo...$(NC)" + @ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/deploy.yml --ask-vault-pass + @echo "$(GREEN)✓ Deployment complete$(NC)" + +deploy-quick: ssh-agent-check ## Quick deploy without dependency checks + @echo "$(GREEN)Quick deploying Forgejo...$(NC)" + @ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/deploy.yml --ask-vault-pass + +deploy-tags: ssh-agent-check ## Deploy specific tags (make deploy-tags TAGS=nginx,ssl) + @echo "$(GREEN)Deploying with tags: $(TAGS)$(NC)" + @ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/deploy.yml --ask-vault-pass --tags "$(TAGS)" + +deploy-check: ssh-agent-check ## Dry-run deployment + @echo "$(GREEN)Checking deployment...$(NC)" + @ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/deploy.yml --ask-vault-pass --check + +## Update Commands +update: ssh-agent-check ## Update Forgejo to latest version + @echo "$(GREEN)Updating Forgejo...$(NC)" + @ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/update.yml --ask-vault-pass + @echo "$(GREEN)✓ Update complete$(NC)" + +update-no-backup: ssh-agent-check ## Update without creating backup + @echo "$(YELLOW)Updating without backup...$(NC)" + @ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/update.yml --ask-vault-pass --extra-vars "skip_backup=true" + +## Backup Commands +backup: ssh-agent-check ## Create backup of Forgejo + @echo "$(GREEN)Creating backup...$(NC)" + @ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/backup.yml --ask-vault-pass + @echo "$(GREEN)✓ Backup complete$(NC)" + +backup-to-s3: ssh-agent-check ## Create backup and upload to S3 + @echo "$(GREEN)Creating backup and uploading to S3...$(NC)" + @ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/backup.yml --ask-vault-pass --extra-vars "upload_to_s3=true" + +## Restore Commands +restore: ssh-agent-check ## Restore Forgejo from backup + @echo "$(RED)WARNING: This will restore from backup$(NC)" + @read -p "Backup timestamp (e.g., 20240115T120000): " timestamp && \ + ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/restore.yml --ask-vault-pass --extra-vars "backup_timestamp=$$timestamp" + +restore-force: ssh-agent-check ## Force restore without confirmation + @read -p "Backup timestamp: " timestamp && \ + ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/restore.yml --ask-vault-pass --extra-vars "backup_timestamp=$$timestamp force_restore=true" + +restore-from-s3: ssh-agent-check ## Restore from S3 backup + @read -p "Backup timestamp: " timestamp && \ + ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/restore.yml --ask-vault-pass --extra-vars "backup_timestamp=$$timestamp backup_source=s3" + +## Maintenance Commands +logs: ssh-agent-check ## View Forgejo logs + @echo "$(GREEN)Fetching Forgejo logs...$(NC)" + @ssh -i $(SSH_KEY) $(ANSIBLE_USER)@$$(cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt output -raw server_ipv4 2>/dev/null || terragrunt output -raw server_ip) "docker logs forgejo --tail 100 -f" + +logs-caddy: ssh-agent-check ## View Caddy logs + @echo "$(GREEN)Fetching Caddy logs...$(NC)" + @ssh -i $(SSH_KEY) $(ANSIBLE_USER)@$$(cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt output -raw server_ipv4 2>/dev/null || terragrunt output -raw server_ip) "tail -f /var/log/caddy/forgejo_access.log" + +restart: ssh-agent-check ## Restart Forgejo service + @echo "$(GREEN)Restarting Forgejo...$(NC)" + @ssh -i $(SSH_KEY) $(ANSIBLE_USER)@$$(cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt output -raw server_ipv4 2>/dev/null || terragrunt output -raw server_ip) "cd /opt/forgejo && docker compose restart" + @echo "$(GREEN)✓ Forgejo restarted$(NC)" + +status: ssh-agent-check ## Check Forgejo status + @echo "$(GREEN)Checking Forgejo status...$(NC)" + @ssh -i $(SSH_KEY) $(ANSIBLE_USER)@$$(cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt output -raw server_ipv4 2>/dev/null || terragrunt output -raw server_ip) "cd /opt/forgejo && docker compose ps" + +health: ssh-agent-check ## Check Forgejo health + @echo "$(GREEN)Checking Forgejo health...$(NC)" + @ssh -i $(SSH_KEY) $(ANSIBLE_USER)@$$(cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt output -raw server_ipv4 2>/dev/null || terragrunt output -raw server_ip) "curl -s http://localhost:3000/api/healthz" + +## Full Stack Commands +install: terraform-apply deploy ## Full installation (infrastructure + deployment) + @echo "$(GREEN)✓ Full installation complete!$(NC)" + +rebuild: terraform-destroy install ## Rebuild everything from scratch + @echo "$(GREEN)✓ Rebuild complete!$(NC)" + +## Utility Commands +clean: ## Clean local temporary files + @echo "$(GREEN)Cleaning temporary files...$(NC)" + @find . -type f -name "*.retry" -delete + @find . -type d -name ".terraform" -exec rm -rf {} + 2>/dev/null || true + @find . -type f -name "terraform.tfstate*" -delete 2>/dev/null || true + @echo "$(GREEN)✓ Cleaned$(NC)" + +validate: check-deps ## Validate all configurations + @echo "$(GREEN)Validating configurations...$(NC)" + @cd $(TERRAFORM_DIR)/$(PROVIDER)/compute && terragrunt validate + @ansible-playbook -i $(INVENTORY) $(PLAYBOOK_DIR)/deploy.yml --syntax-check + @echo "$(GREEN)✓ All configurations valid$(NC)" + +docs: ## Generate documentation + @echo "$(GREEN)Generating documentation...$(NC)" + @terraform-docs markdown $(TERRAFORM_DIR)/$(PROVIDER)/compute > $(TERRAFORM_DIR)/$(PROVIDER)/compute/README.md || true + @echo "$(GREEN)✓ Documentation generated$(NC)" + +## Information Commands +info: ## Display project information + @echo "$(GREEN)Forgejo Self-Hosting Project$(NC)" + @echo "" + @echo "Provider: $(PROVIDER)" + @echo "Environment: $(ENVIRONMENT)" + @echo "Ansible User: $(ANSIBLE_USER)" + @echo "SSH Key: $(SSH_KEY)" + @echo "Inventory: $(INVENTORY)" + @echo "" + +version: ## Display tool versions + @echo "$(GREEN)Tool Versions:$(NC)" + @echo "Terraform: $$(terraform version | head -1)" + @echo "Terragrunt: $$(terragrunt --version)" + @echo "Ansible: $$(ansible --version | head -1)" + @echo "Make: $$(make --version | head -1)" diff --git a/README.md b/README.md index 8b0ef3b..ece630f 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,166 @@ -# forgejo-autohebergement +# Forgejo Self-Hosting -Modele pour demarrer un auto hebergement de forgejo dans un cloud europeen \ No newline at end of file +Deploy your own **Forgejo** Git server on **Scaleway** or **Hetzner** with automated infrastructure provisioning and secure configuration. + +## Features + +- **One-command deployment** via interactive setup wizard +- **Infrastructure as Code** using Terraform/Terragrunt +- **Automated configuration** via Ansible +- **Secure by default**: Tailscale VPN + UFW firewall +- **Automatic HTTPS** via Caddy and Let's Encrypt +- **PostgreSQL database** with optimized settings +- **Redis caching** for improved performance +- **Automated backups** with configurable retention +- **Docker-based** Forgejo deployment + +## Supported Providers + +- **Scaleway** (France) - European cloud provider +- **Hetzner** (Germany) - European cloud provider + +## Prerequisites + +- macOS or Linux +- [Terraform](https://terraform.io) >= 1.5.0 +- [Terragrunt](https://terragrunt.gruntwork.io) +- [Ansible](https://ansible.com) >= 2.14 +- SSH key pair +- Cloud provider account (Scaleway or Hetzner) +- Domain name with DNS access + +## Quick Start + +```bash +# Clone the repository +git clone https://git.dumontix.eu/dumontix/forgejo-autohebergement.git +cd forgejo-autohebergement + +# Run the interactive setup wizard +./setup-wizard.sh +``` + +The wizard will guide you through: + +1. Checking dependencies +2. Selecting your SSH key +3. Choosing cloud provider (Scaleway/Hetzner) +4. Configuring secrets (Ansible Vault) +5. Setting up cloud credentials +6. Configuring your domain +7. Creating infrastructure +8. Deploying Forgejo +9. Setting up Tailscale VPN +10. Enabling UFW firewall + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Internet │ +└─────────────────────────────────────────────────────────────┘ + │ + ┌─────────┴─────────┐ + │ Caddy (HTTPS) │ ← Let's Encrypt + │ Port 80, 443 │ + └─────────┬─────────┘ + │ + ┌─────────┴─────────┐ + │ Forgejo │ ← Docker Container + │ Port 3000 │ + └─────────┬─────────┘ + │ + ┌─────────────────┼─────────────────┐ + │ │ │ + ┌────────┴────────┐ ┌──────┴──────┐ ┌────────┴──────┐ + │ PostgreSQL │ │ Redis │ │ Tailscale │ + │ (Database) │ │ (Cache) │ │ (VPN/SSH) │ + └─────────────────┘ └─────────────┘ └───────────────┘ +``` + +## Security + +- **SSH access**: Only via Tailscale VPN (port 22 blocked publicly) +- **Git SSH**: Only via Tailscale (port 2222 blocked publicly) +- **Web access**: HTTPS only (HTTP redirects to HTTPS) +- **Firewall**: UFW with strict rules +- **2FA**: Enabled by default for user accounts + +## Configuration + +After deployment, you can customize your Forgejo instance: + +```bash +# Edit configuration +make ansible-vault-edit + +# Re-deploy with changes +make deploy +``` + +See `docs/CONFIGURATION.md` for all available options. + +## Operations + +```bash +# Check status +make status + +# View logs +make logs + +# Create backup +make backup + +# Update Forgejo +make update + +# SSH to server (via Tailscale) +ssh root@ +``` + +See `docs/OPERATIONS.md` for detailed operations guide. + +## File Structure + +``` +forgejo-selfhosting/ +├── ansible/ +│ ├── inventory/production/ # Server inventory +│ ├── playbooks/ # Deployment playbooks +│ └── roles/forgejo/ # Forgejo role +├── terraform/ +│ ├── scaleway/ # Scaleway infrastructure +│ └── hetzner/ # Hetzner infrastructure +├── docs/ # Documentation +├── setup-wizard.sh # Interactive setup +└── Makefile # Convenience commands +``` + +## Troubleshooting + +### Cannot SSH after UFW enabled +SSH is only accessible via Tailscale after UFW is enabled. Use: +```bash +ssh root@ +``` + +### Forgejo not starting +Check logs: +```bash +docker logs forgejo +``` + +### Database connection issues +Verify PostgreSQL is running and accessible: +```bash +systemctl status postgresql +``` + +## License + +Apache 2.0 see LICENSE for details + +## Contributing + +Contributions are welcome! Please read the contributing guidelines before submitting PRs. diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000..0785b01 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,28 @@ +[defaults] +# Roles path relative to this config file +roles_path = ./roles + +# Inventory path +inventory = ./inventory/production/hosts.yml + +# Don't create retry files +retry_files_enabled = False + +# Show task duration +callback_whitelist = timer + +# Reduce verbosity +deprecation_warnings = False + +# SSH settings +host_key_checking = False +timeout = 30 + +[privilege_escalation] +become = True +become_method = sudo +become_user = root + +[ssh_connection] +pipelining = True +ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no diff --git a/ansible/inventory/production/hosts.yml.example b/ansible/inventory/production/hosts.yml.example new file mode 100644 index 0000000..bf5c8e2 --- /dev/null +++ b/ansible/inventory/production/hosts.yml.example @@ -0,0 +1,61 @@ +--- +# Ansible Inventory for Forgejo Production +# Copy this file to hosts.yml and update with your values: +# cp hosts.yml.example hosts.yml + +all: + children: + forgejo: + hosts: + forgejo-prod: + # UPDATE: Your server IP (from terraform output or cloud console) + ansible_host: YOUR_SERVER_IP + ansible_user: root + ansible_port: 22 + ansible_python_interpreter: /usr/bin/python3 + + # ============================================================= + # DOMAIN CONFIGURATION (REQUIRED!) + # ============================================================= + # UPDATE: Your domain name pointing to the server IP + forgejo_domain: git.example.com + + forgejo_version: "9.0.2" + + # Database configuration + forgejo_db_type: postgres + forgejo_db_name: forgejo + forgejo_db_user: forgejo + + # Enable features + forgejo_enable_letsencrypt: true + forgejo_enable_backups: true + forgejo_enable_2fa: true + forgejo_use_redis: true + + # Security settings + forgejo_disable_registration: true # Disable public registration + forgejo_require_signin_view: false # Require login to view repos + + # Security: Tailscale VPN + UFW firewall + forgejo_enable_tailscale: true + forgejo_enable_ufw: true + + # Email configuration (optional) + forgejo_enable_email: false + # forgejo_email_host: smtp.example.com + # forgejo_email_port: 587 + # forgejo_email_user: noreply@example.com + + # S3 configuration (optional) + forgejo_enable_s3: false + # forgejo_s3_endpoint: https://s3.example.com + # forgejo_s3_bucket: forgejo-lfs + # forgejo_s3_region: us-east-1 + + # Backup configuration + forgejo_backup_retention_days: 30 + forgejo_backup_to_s3: false + + vars: + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' diff --git a/ansible/playbooks/backup.yml b/ansible/playbooks/backup.yml new file mode 100644 index 0000000..dd39397 --- /dev/null +++ b/ansible/playbooks/backup.yml @@ -0,0 +1,203 @@ +--- +# Backup Forgejo data and database +# Creates timestamped backups and optionally uploads to S3 + +- name: Backup Forgejo + hosts: forgejo + become: yes + gather_facts: yes + + vars_files: + - vars/main.yml + - vars/secrets.yml + + vars: + backup_timestamp: "{{ ansible_date_time.iso8601_basic_short }}" + backup_filename: "forgejo-backup-{{ backup_timestamp }}.tar.gz" + upload_to_s3: "{{ forgejo_backup_to_s3 | default(false) }}" + + pre_tasks: + - name: Display backup information + ansible.builtin.debug: + msg: | + Creating backup: {{ backup_filename }} + Upload to S3: {{ upload_to_s3 }} + Backup path: {{ forgejo_backup_path }} + + tasks: + - name: Ensure backup directory exists + ansible.builtin.file: + path: "{{ forgejo_backup_path }}" + state: directory + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + mode: '0750' + + - name: Create PostgreSQL backup + community.postgresql.postgresql_db: + name: "{{ forgejo_db_name }}" + state: dump + target: "{{ forgejo_backup_path }}/database-{{ backup_timestamp }}.sql" + become_user: postgres + when: forgejo_db_type == 'postgres' + + - name: Compress database backup + community.general.archive: + path: "{{ forgejo_backup_path }}/database-{{ backup_timestamp }}.sql" + dest: "{{ forgejo_backup_path }}/database-{{ backup_timestamp }}.sql.gz" + format: gz + remove: yes + when: forgejo_db_type == 'postgres' + + - name: Create Git repositories backup + community.general.archive: + path: "{{ forgejo_data_path }}/git" + dest: "{{ forgejo_backup_path }}/repositories-{{ backup_timestamp }}.tar.gz" + format: gz + + - name: Backup configuration files + community.general.archive: + path: + - "{{ forgejo_config_path }}" + - "{{ forgejo_base_path }}/docker-compose.yml" + dest: "{{ forgejo_backup_path }}/config-{{ backup_timestamp }}.tar.gz" + format: gz + + - name: Backup attachments and LFS + community.general.archive: + path: + - "{{ forgejo_data_path }}/attachments" + - "{{ forgejo_data_path }}/lfs" + - "{{ forgejo_data_path }}/avatars" + dest: "{{ forgejo_backup_path }}/data-{{ backup_timestamp }}.tar.gz" + format: gz + + - name: Create backup manifest + ansible.builtin.copy: + dest: "{{ forgejo_backup_path }}/manifest-{{ backup_timestamp }}.json" + content: | + { + "timestamp": "{{ ansible_date_time.iso8601 }}", + "version": "{{ forgejo_version }}", + "hostname": "{{ ansible_hostname }}", + "database": "{{ forgejo_db_type }}", + "files": { + "database": "database-{{ backup_timestamp }}.sql.gz", + "repositories": "repositories-{{ backup_timestamp }}.tar.gz", + "config": "config-{{ backup_timestamp }}.tar.gz", + "data": "data-{{ backup_timestamp }}.tar.gz" + }, + "sizes": {} + } + mode: '0644' + + - name: Get backup file sizes + ansible.builtin.stat: + path: "{{ forgejo_backup_path }}/{{ item }}" + register: backup_files + loop: + - "database-{{ backup_timestamp }}.sql.gz" + - "repositories-{{ backup_timestamp }}.tar.gz" + - "config-{{ backup_timestamp }}.tar.gz" + - "data-{{ backup_timestamp }}.tar.gz" + + - name: Display backup sizes + ansible.builtin.debug: + msg: "{{ item.item }}: {{ (item.stat.size / 1024 / 1024) | round(2) }} MB" + loop: "{{ backup_files.results }}" + when: item.stat.exists + + - name: Upload to S3 + when: upload_to_s3 and forgejo_enable_s3 + block: + - name: Install AWS CLI + ansible.builtin.pip: + name: awscli + state: present + + - name: Upload database backup to S3 + ansible.builtin.command: + cmd: > + aws s3 cp {{ forgejo_backup_path }}/database-{{ backup_timestamp }}.sql.gz + s3://{{ forgejo_backup_s3_bucket }}/backups/database-{{ backup_timestamp }}.sql.gz + --endpoint-url {{ forgejo_s3_endpoint }} + environment: + AWS_ACCESS_KEY_ID: "{{ forgejo_s3_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ forgejo_s3_secret_key }}" + no_log: yes + + - name: Upload repositories backup to S3 + ansible.builtin.command: + cmd: > + aws s3 cp {{ forgejo_backup_path }}/repositories-{{ backup_timestamp }}.tar.gz + s3://{{ forgejo_backup_s3_bucket }}/backups/repositories-{{ backup_timestamp }}.tar.gz + --endpoint-url {{ forgejo_s3_endpoint }} + environment: + AWS_ACCESS_KEY_ID: "{{ forgejo_s3_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ forgejo_s3_secret_key }}" + no_log: yes + + - name: Upload config backup to S3 + ansible.builtin.command: + cmd: > + aws s3 cp {{ forgejo_backup_path }}/config-{{ backup_timestamp }}.tar.gz + s3://{{ forgejo_backup_s3_bucket }}/backups/config-{{ backup_timestamp }}.tar.gz + --endpoint-url {{ forgejo_s3_endpoint }} + environment: + AWS_ACCESS_KEY_ID: "{{ forgejo_s3_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ forgejo_s3_secret_key }}" + no_log: yes + + - name: Upload manifest to S3 + ansible.builtin.command: + cmd: > + aws s3 cp {{ forgejo_backup_path }}/manifest-{{ backup_timestamp }}.json + s3://{{ forgejo_backup_s3_bucket }}/backups/manifest-{{ backup_timestamp }}.json + --endpoint-url {{ forgejo_s3_endpoint }} + environment: + AWS_ACCESS_KEY_ID: "{{ forgejo_s3_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ forgejo_s3_secret_key }}" + no_log: yes + + - name: Clean up old backups + ansible.builtin.shell: | + find {{ forgejo_backup_path }} -name "*.tar.gz" -o -name "*.sql.gz" -o -name "*.json" | \ + grep -E "[0-9]{8}T[0-9]{6}" | \ + sort -r | \ + tail -n +{{ (forgejo_backup_retention_days | int * 4) + 1 }} | \ + xargs -r rm -f + args: + executable: /bin/bash + when: forgejo_backup_retention_days is defined + + post_tasks: + - name: Calculate total backup size + ansible.builtin.shell: | + du -sh {{ forgejo_backup_path }} | cut -f1 + register: total_backup_size + changed_when: false + + - name: Display completion message + ansible.builtin.debug: + msg: | + ======================================== + Backup Complete! + ======================================== + + Timestamp: {{ backup_timestamp }} + Location: {{ forgejo_backup_path }} + Total size: {{ total_backup_size.stdout }} + + Files created: + - database-{{ backup_timestamp }}.sql.gz + - repositories-{{ backup_timestamp }}.tar.gz + - config-{{ backup_timestamp }}.tar.gz + - data-{{ backup_timestamp }}.tar.gz + - manifest-{{ backup_timestamp }}.json + + {% if upload_to_s3 %} + Uploaded to S3: {{ forgejo_backup_s3_bucket }}/backups/ + {% endif %} + + Retention: {{ forgejo_backup_retention_days }} days + ======================================== diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml new file mode 100644 index 0000000..92cfcc9 --- /dev/null +++ b/ansible/playbooks/deploy.yml @@ -0,0 +1,122 @@ +--- +# Deploy Forgejo Git Forge +# This playbook deploys a complete Forgejo instance with PostgreSQL, Redis, Nginx, and SSL + +- name: Deploy Forgejo + hosts: forgejo + become: yes + gather_facts: yes + + vars_files: + - vars/main.yml + - vars/secrets.yml # Ansible Vault encrypted + + pre_tasks: + - name: Verify Ansible version + ansible.builtin.assert: + that: + - ansible_version.full is version('2.14', '>=') + fail_msg: "This playbook requires Ansible 2.14 or higher" + success_msg: "Ansible version is compatible" + + - name: Gather system facts + ansible.builtin.setup: + + - name: Check system requirements + ansible.builtin.assert: + that: + - ansible_memtotal_mb >= 3500 + - ansible_processor_vcpus >= 2 + fail_msg: "System does not meet minimum requirements (4GB RAM, 2 vCPUs)" + success_msg: "System meets requirements" + + - name: Display deployment information + ansible.builtin.debug: + msg: | + Deploying Forgejo {{ forgejo_version }} + Domain: {{ forgejo_domain }} + Database: {{ forgejo_db_type }} + HTTPS: {{ forgejo_enable_letsencrypt }} + S3: {{ forgejo_enable_s3 }} + + roles: + - role: forgejo + tags: ['forgejo'] + + post_tasks: + - name: Display completion message + ansible.builtin.debug: + msg: | + ======================================== + Forgejo Deployment Complete! + ======================================== + + Access your Forgejo instance at: + {{ forgejo_protocol }}://{{ forgejo_domain }} + + SSH clone URL: + git@{{ forgejo_domain }}:{{ forgejo_ssh_port }} + + Admin credentials (if first install): + Username: {{ forgejo_admin_username }} + Password: (set in vault) + + Next steps: + 1. Visit the web interface and complete setup + 2. Configure OAuth/LDAP if needed + 3. Set up CI/CD with Forgejo Actions + 4. Configure webhooks for integrations + + Backup location: {{ forgejo_backup_path }} + Logs: {{ forgejo_data_path }}/gitea/log + + ======================================== + + - name: Verify Forgejo is running + ansible.builtin.uri: + url: "http://localhost:{{ forgejo_http_port }}" + status_code: 200 + register: health_check + until: health_check.status == 200 + retries: 5 + delay: 3 + + - name: Create deployment summary file + ansible.builtin.copy: + dest: "{{ forgejo_base_path }}/DEPLOYMENT_INFO.txt" + content: | + Forgejo Deployment Information + ============================== + + Deployment Date: {{ ansible_date_time.iso8601 }} + Forgejo Version: {{ forgejo_version }} + Ansible User: {{ ansible_user }} + + Server Details: + - Hostname: {{ ansible_hostname }} + - IP Address: {{ ansible_default_ipv4.address }} + - OS: {{ ansible_distribution }} {{ ansible_distribution_version }} + - RAM: {{ ansible_memtotal_mb }} MB + - CPUs: {{ ansible_processor_vcpus }} + + Configuration: + - Domain: {{ forgejo_domain }} + - HTTP Port: {{ forgejo_http_port }} + - SSH Port: {{ forgejo_ssh_port }} + - Database: {{ forgejo_db_type }} + - Redis: {{ forgejo_use_redis }} + - LFS: {{ forgejo_enable_lfs }} + + Paths: + - Base: {{ forgejo_base_path }} + - Data: {{ forgejo_data_path }} + - Config: {{ forgejo_config_path }} + - Backups: {{ forgejo_backup_path }} + + Maintenance Commands: + - Restart: docker compose -f {{ forgejo_base_path }}/docker-compose.yml restart + - Logs: docker logs forgejo + - Backup: /usr/local/bin/forgejo_backup.sh + - Update: docker compose -f {{ forgejo_base_path }}/docker-compose.yml pull && docker compose up -d + mode: '0644' + become: yes diff --git a/ansible/playbooks/restore.yml b/ansible/playbooks/restore.yml new file mode 100644 index 0000000..32ef8c7 --- /dev/null +++ b/ansible/playbooks/restore.yml @@ -0,0 +1,234 @@ +--- +# Restore Forgejo from backup +# Restores database, repositories, configuration, and data + +- name: Restore Forgejo from Backup + hosts: forgejo + become: yes + gather_facts: yes + + vars_files: + - vars/main.yml + - vars/secrets.yml + + vars: + # Must be provided via --extra-vars + backup_timestamp: "" + backup_source: "local" # local or s3 + force_restore: false + + pre_tasks: + - name: Validate backup timestamp + ansible.builtin.fail: + msg: "Please provide backup_timestamp via --extra-vars 'backup_timestamp=20240115T120000'" + when: backup_timestamp == "" + + - name: Display restore information + ansible.builtin.debug: + msg: | + ======================================== + WARNING: This will restore Forgejo data + ======================================== + + Backup timestamp: {{ backup_timestamp }} + Source: {{ backup_source }} + + This operation will: + 1. Stop Forgejo service + 2. Restore database + 3. Restore repositories + 4. Restore configuration + 5. Restart services + + Current data will be backed up first. + + - name: Confirm restore operation + ansible.builtin.pause: + prompt: "Type 'yes' to continue with restore" + register: restore_confirm + when: not force_restore + + - name: Validate confirmation + ansible.builtin.fail: + msg: "Restore cancelled by user" + when: not force_restore and restore_confirm.user_input != 'yes' + + tasks: + - name: Create pre-restore backup + ansible.builtin.include_tasks: backup.yml + vars: + backup_filename: "pre-restore-{{ ansible_date_time.iso8601_basic_short }}.tar.gz" + + - name: Download backup from S3 if needed + when: backup_source == 's3' + block: + - name: Create temporary download directory + ansible.builtin.file: + path: "{{ forgejo_backup_path }}/restore-temp" + state: directory + mode: '0750' + + - name: Download backups from S3 + ansible.builtin.command: + cmd: > + aws s3 cp s3://{{ forgejo_backup_s3_bucket }}/backups/{{ item }}-{{ backup_timestamp }}.tar.gz + {{ forgejo_backup_path }}/{{ item }}-{{ backup_timestamp }}.tar.gz + --endpoint-url {{ forgejo_s3_endpoint }} + environment: + AWS_ACCESS_KEY_ID: "{{ forgejo_s3_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ forgejo_s3_secret_key }}" + loop: + - database + - repositories + - config + - data + no_log: yes + + - name: Verify backup files exist + ansible.builtin.stat: + path: "{{ forgejo_backup_path }}/{{ item }}-{{ backup_timestamp }}.tar.gz" + register: backup_files + loop: + - repositories + - config + - data + failed_when: not backup_files.results | map(attribute='stat.exists') | list | min + + - name: Verify database backup exists + ansible.builtin.stat: + path: "{{ forgejo_backup_path }}/database-{{ backup_timestamp }}.sql.gz" + register: db_backup + failed_when: not db_backup.stat.exists + + - name: Stop Forgejo service + community.docker.docker_compose_v2: + project_src: "{{ forgejo_base_path }}" + state: stopped + + - name: Restore PostgreSQL database + when: forgejo_db_type == 'postgres' + block: + - name: Drop existing database + community.postgresql.postgresql_db: + name: "{{ forgejo_db_name }}" + state: absent + become_user: postgres + + - name: Recreate database + community.postgresql.postgresql_db: + name: "{{ forgejo_db_name }}" + encoding: UTF8 + lc_collate: en_US.UTF-8 + lc_ctype: en_US.UTF-8 + template: template0 + state: present + become_user: postgres + + - name: Decompress database backup + ansible.builtin.command: + cmd: gunzip -c {{ forgejo_backup_path }}/database-{{ backup_timestamp }}.sql.gz + register: db_dump + + - name: Restore database + community.postgresql.postgresql_db: + name: "{{ forgejo_db_name }}" + state: restore + target: "{{ forgejo_backup_path }}/database-{{ backup_timestamp }}.sql" + become_user: postgres + + - name: Clear existing repositories + ansible.builtin.file: + path: "{{ forgejo_data_path }}/git" + state: absent + + - name: Restore repositories + ansible.builtin.unarchive: + src: "{{ forgejo_backup_path }}/repositories-{{ backup_timestamp }}.tar.gz" + dest: "{{ forgejo_data_path }}" + remote_src: yes + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + + - name: Restore configuration + ansible.builtin.unarchive: + src: "{{ forgejo_backup_path }}/config-{{ backup_timestamp }}.tar.gz" + dest: "{{ forgejo_base_path }}" + remote_src: yes + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + + - name: Restore data files + ansible.builtin.unarchive: + src: "{{ forgejo_backup_path }}/data-{{ backup_timestamp }}.tar.gz" + dest: "{{ forgejo_data_path }}" + remote_src: yes + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + + - name: Set correct permissions + ansible.builtin.file: + path: "{{ item }}" + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + recurse: yes + loop: + - "{{ forgejo_data_path }}/git" + - "{{ forgejo_data_path }}/attachments" + - "{{ forgejo_data_path }}/lfs" + - "{{ forgejo_config_path }}" + + - name: Start Forgejo service + community.docker.docker_compose_v2: + project_src: "{{ forgejo_base_path }}" + state: present + + - name: Wait for Forgejo to be ready + ansible.builtin.uri: + url: "http://localhost:{{ forgejo_http_port }}" + status_code: 200 + register: health_check + until: health_check.status == 200 + retries: 30 + delay: 2 + + - name: Run integrity checks + ansible.builtin.command: + cmd: docker exec forgejo forgejo doctor check --all + register: integrity_check + failed_when: false + + - name: Display integrity check results + ansible.builtin.debug: + msg: "{{ integrity_check.stdout_lines }}" + + post_tasks: + - name: Verify Forgejo health + ansible.builtin.uri: + url: "http://localhost:{{ forgejo_http_port }}/api/healthz" + status_code: 200 + register: health + + - name: Clean up temporary files + ansible.builtin.file: + path: "{{ forgejo_backup_path }}/restore-temp" + state: absent + when: backup_source == 's3' + + - name: Display completion message + ansible.builtin.debug: + msg: | + ======================================== + Restore Complete! + ======================================== + + Restored from backup: {{ backup_timestamp }} + + Forgejo is now running with restored data. + + Please verify: + 1. Login works correctly + 2. Repositories are accessible + 3. All data is present + + Original data was backed up before restore. + ======================================== diff --git a/ansible/playbooks/update.yml b/ansible/playbooks/update.yml new file mode 100644 index 0000000..6439776 --- /dev/null +++ b/ansible/playbooks/update.yml @@ -0,0 +1,133 @@ +--- +# Update Forgejo to latest version +# This playbook safely updates Forgejo with automatic backup + +- name: Update Forgejo + hosts: forgejo + become: yes + gather_facts: yes + + vars_files: + - vars/main.yml + - vars/secrets.yml + + vars: + backup_before_update: true + skip_backup: false # Override with --extra-vars "skip_backup=true" + + pre_tasks: + - name: Display update information + ansible.builtin.debug: + msg: | + Updating Forgejo from {{ forgejo_version }} + Backup will be created: {{ backup_before_update and not skip_backup }} + + - name: Check current Forgejo version + ansible.builtin.command: + cmd: docker exec forgejo forgejo --version + register: current_version + changed_when: false + failed_when: false + + - name: Display current version + ansible.builtin.debug: + msg: "Current version: {{ current_version.stdout if current_version.rc == 0 else 'Unable to determine' }}" + + tasks: + - name: Create pre-update backup + when: backup_before_update and not skip_backup + block: + - name: Run backup script + ansible.builtin.command: + cmd: /usr/local/bin/forgejo_backup.sh + register: backup_result + + - name: Display backup result + ansible.builtin.debug: + msg: "Backup completed: {{ backup_result.stdout_lines[-1] if backup_result.stdout_lines else 'No output' }}" + + - name: Stop Forgejo service + community.docker.docker_compose_v2: + project_src: "{{ forgejo_base_path }}" + state: stopped + + - name: Pull latest Forgejo image + community.docker.docker_image: + name: "{{ forgejo_docker_image }}:{{ forgejo_version }}" + source: pull + force_source: yes + + - name: Update Docker Compose file if needed + ansible.builtin.template: + src: ../roles/forgejo/templates/docker-compose.yml.j2 + dest: "{{ forgejo_base_path }}/docker-compose.yml" + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + mode: '0640' + + - name: Start Forgejo service + community.docker.docker_compose_v2: + project_src: "{{ forgejo_base_path }}" + state: present + pull: always + + - name: Wait for Forgejo to be ready + ansible.builtin.uri: + url: "http://localhost:{{ forgejo_http_port }}" + status_code: 200 + register: health_check + until: health_check.status == 200 + retries: 30 + delay: 2 + + - name: Check updated version + ansible.builtin.command: + cmd: docker exec forgejo forgejo --version + register: updated_version + changed_when: false + + - name: Display updated version + ansible.builtin.debug: + msg: "Updated version: {{ updated_version.stdout }}" + + - name: Run database migrations + ansible.builtin.command: + cmd: docker exec forgejo forgejo migrate + register: migrate_result + changed_when: "'No migration needed' not in migrate_result.stdout" + + - name: Display migration result + ansible.builtin.debug: + msg: "{{ migrate_result.stdout_lines }}" + + post_tasks: + - name: Verify Forgejo health + ansible.builtin.uri: + url: "http://localhost:{{ forgejo_http_port }}/api/healthz" + status_code: 200 + return_content: yes + register: health + + - name: Display health status + ansible.builtin.debug: + msg: "Forgejo health check: {{ health.content }}" + + - name: Update deployment info + ansible.builtin.lineinfile: + path: "{{ forgejo_base_path }}/DEPLOYMENT_INFO.txt" + regexp: '^Last Update:' + line: "Last Update: {{ ansible_date_time.iso8601 }} - {{ forgejo_version }}" + insertafter: '^Deployment Date:' + + - name: Display completion message + ansible.builtin.debug: + msg: | + ======================================== + Forgejo Update Complete! + ======================================== + + Previous version: {{ current_version.stdout if current_version.rc == 0 else 'Unknown' }} + Current version: {{ updated_version.stdout }} + + The service is running and healthy. + ======================================== diff --git a/ansible/playbooks/vars/main.yml b/ansible/playbooks/vars/main.yml new file mode 100644 index 0000000..20137a0 --- /dev/null +++ b/ansible/playbooks/vars/main.yml @@ -0,0 +1,64 @@ +--- +# Main variables for Forgejo deployment +# NOTE: Domain-specific settings should be in inventory/production/hosts.yml +# Variables here are lower-priority defaults only. + +# Forgejo version (can be overridden in inventory) +# forgejo_version: "9.0.2" + +# Protocol for public URLs (https recommended) +forgejo_protocol: https + +# System configuration +forgejo_user: git +forgejo_group: git +forgejo_uid: 1100 +forgejo_gid: 1100 + +# Installation paths +forgejo_base_path: /opt/forgejo +forgejo_data_path: "{{ forgejo_base_path }}/data" +forgejo_config_path: "{{ forgejo_base_path }}/config" + +# Network ports +forgejo_http_port: 3000 +forgejo_ssh_port: 2222 + +# Database configuration +forgejo_db_type: postgres +forgejo_db_host: localhost +forgejo_db_port: 5432 +forgejo_db_name: forgejo +forgejo_db_user: forgejo + +# Redis configuration +forgejo_use_redis: true +redis_host: localhost +redis_port: 6379 + +# SSL/TLS configuration +forgejo_enable_letsencrypt: true +letsencrypt_email: "admin@{{ forgejo_domain }}" + +# Features (can be overridden in inventory) +forgejo_enable_lfs: true +forgejo_enable_2fa: true +# forgejo_disable_registration - set in inventory +# forgejo_require_signin_view - set in inventory + +# Backup configuration +forgejo_enable_backups: true +forgejo_backup_schedule: "0 2 * * *" +forgejo_backup_retention_days: 30 + +# Monitoring +forgejo_enable_prometheus: false + +# Email configuration +forgejo_enable_email: false + +# S3 configuration +forgejo_enable_s3: false + +# Security +forgejo_log_level: Info diff --git a/ansible/playbooks/vars/secrets.yml.example b/ansible/playbooks/vars/secrets.yml.example new file mode 100644 index 0000000..2eb0d7c --- /dev/null +++ b/ansible/playbooks/vars/secrets.yml.example @@ -0,0 +1,40 @@ +--- +# Ansible Vault Encrypted Secrets +# +# IMPORTANT: Do NOT commit secrets.yml to git, even if encrypted! +# The .gitignore is configured to exclude it, but always verify. +# +# To set up: +# 1. cp secrets.yml.example secrets.yml +# 2. Edit secrets.yml with your actual values +# 3. ansible-vault encrypt secrets.yml +# 4. Verify: git status should NOT show secrets.yml +# +# To edit encrypted secrets: ansible-vault edit secrets.yml + +# Database passwords +vault_forgejo_db_password: "CHANGE_ME_STRONG_PASSWORD_HERE" + +# Admin account +vault_forgejo_admin_password: "CHANGE_ME_ADMIN_PASSWORD_HERE" + +# Secret keys (generate with: openssl rand -base64 32) +vault_forgejo_secret_key: "CHANGE_ME_SECRET_KEY_64_CHARS_MINIMUM_XXXXXXXXXXXXXXXXX" +vault_forgejo_internal_token: "CHANGE_ME_INTERNAL_TOKEN_XXXXXXXXXXXXXXXXXXXXXXXXX" +vault_forgejo_jwt_secret: "CHANGE_ME_JWT_SECRET_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + +# Metrics token (if prometheus enabled) +vault_forgejo_metrics_token: "CHANGE_ME_METRICS_TOKEN_XXXXXXXXX" + +# Email password (if email enabled) +vault_email_password: "" + +# S3 credentials (if S3 enabled) +vault_s3_access_key: "" +vault_s3_secret_key: "" + +# Notes: +# - Generate strong passwords: openssl rand -base64 32 +# - Never commit unencrypted secrets to version control +# - Keep a secure backup of your vault password +# - Rotate secrets regularly diff --git a/ansible/roles/forgejo/defaults/main.yml b/ansible/roles/forgejo/defaults/main.yml new file mode 100644 index 0000000..ca49582 --- /dev/null +++ b/ansible/roles/forgejo/defaults/main.yml @@ -0,0 +1,145 @@ +--- +# Default variables for Forgejo role + +# Forgejo version +forgejo_version: "9.0.2" +forgejo_docker_image: "codeberg.org/forgejo/forgejo" + +# System user and group +forgejo_user: git +forgejo_group: git +forgejo_uid: 1100 +forgejo_gid: 1100 + +# Installation paths +forgejo_base_path: /opt/forgejo +forgejo_data_path: "{{ forgejo_base_path }}/data" +forgejo_config_path: "{{ forgejo_base_path }}/config" +forgejo_custom_path: "{{ forgejo_base_path }}/custom" +forgejo_backup_path: "{{ forgejo_base_path }}/backups" + +# External volume (if using cloud provider block storage) +forgejo_use_external_volume: false +forgejo_volume_device: /dev/sdb +forgejo_volume_mount: /mnt/forgejo-data + +# Network configuration +# NOTE: Set your actual domain in ansible/inventory/production/hosts.yml +# The value here is just a fallback default. +forgejo_domain: git.example.com +forgejo_http_port: 3000 +forgejo_ssh_port: 2222 +forgejo_protocol: https + +# Database configuration +forgejo_db_type: postgres +forgejo_db_host: localhost +forgejo_db_port: 5432 +forgejo_db_name: forgejo +forgejo_db_user: forgejo +forgejo_db_password: "{{ vault_forgejo_db_password | default('changeme') }}" + +# PostgreSQL settings +postgres_version: "16" +postgres_data_dir: "{{ forgejo_data_path }}/postgres" +postgres_max_connections: 100 +postgres_shared_buffers: "256MB" +postgres_effective_cache_size: "1GB" + +# Redis configuration (optional, for caching) +forgejo_use_redis: true +redis_host: localhost +redis_port: 6379 + +# Admin user (created on first setup) +# NOTE: "admin" is a reserved name in Forgejo, use something else +forgejo_admin_username: forgejo_admin +forgejo_admin_password: "{{ vault_forgejo_admin_password | default('changeme') }}" +forgejo_admin_email: "admin@{{ forgejo_domain }}" + +# HTTPS/SSL configuration +forgejo_enable_letsencrypt: true +letsencrypt_email: "admin@{{ forgejo_domain }}" +certbot_create_if_missing: true +certbot_auto_renew: true + +# Object storage (S3-compatible) +forgejo_enable_s3: false +forgejo_s3_endpoint: "" +forgejo_s3_bucket: "" +forgejo_s3_region: "" +forgejo_s3_access_key: "{{ vault_s3_access_key | default('') }}" +forgejo_s3_secret_key: "{{ vault_s3_secret_key | default('') }}" + +# Backup configuration +forgejo_enable_backups: true +forgejo_backup_schedule: "0 2 * * *" # Daily at 2 AM +forgejo_backup_retention_days: 30 +forgejo_backup_to_s3: false +forgejo_backup_s3_bucket: "" + +# Security settings +forgejo_disable_registration: false +forgejo_require_signin_view: false +forgejo_enable_2fa: true + +# Tailscale VPN configuration +# Recommended: Enable for secure SSH access +forgejo_enable_tailscale: true +tailscale_interface: tailscale0 + +# UFW Firewall configuration +# When enabled with Tailscale, SSH is only accessible via Tailscale +forgejo_enable_ufw: true +ufw_reset_on_configure: false # Set to true to reset all rules before configuring + +# Email configuration (optional) +forgejo_enable_email: false +forgejo_email_host: "" +forgejo_email_port: 587 +forgejo_email_user: "" +forgejo_email_password: "{{ vault_email_password | default('') }}" +forgejo_email_from: "noreply@{{ forgejo_domain }}" + +# Git configuration +forgejo_disable_http_git: false +forgejo_enable_lfs: true +forgejo_lfs_max_file_size: 100 # MB + +# Performance tuning +forgejo_log_level: Info +forgejo_disable_gravatar: false + +# Docker Compose configuration +docker_compose_version: "2.24.0" +docker_install_compose: true + +# Firewall configuration +firewall_allowed_tcp_ports: + - "22" + - "80" + - "443" + - "{{ forgejo_ssh_port }}" + +# System packages to install +system_packages: + - curl + - wget + - git + - htop + - vim + - tmux + - unzip + - jq + - python3-pip + - python3-docker + - python3-psycopg2 + - acl + +# Monitoring (optional) +forgejo_enable_prometheus: false +prometheus_port: 9090 + +# Restore from backup +forgejo_restore_from_backup: false +forgejo_restore_backup_file: "" diff --git a/ansible/roles/forgejo/handlers/main.yml b/ansible/roles/forgejo/handlers/main.yml new file mode 100644 index 0000000..faf0f8f --- /dev/null +++ b/ansible/roles/forgejo/handlers/main.yml @@ -0,0 +1,81 @@ +--- +# Handlers for Forgejo role + +- name: Restart Docker + ansible.builtin.systemd: + name: docker + state: restarted + daemon_reload: yes + become: yes + +- name: Restart Forgejo + community.docker.docker_compose_v2: + project_src: "{{ forgejo_base_path }}" + state: restarted + become: yes + +- name: Reload Forgejo + community.docker.docker_compose_v2: + project_src: "{{ forgejo_base_path }}" + reloaded: yes + become: yes + +- name: Restart PostgreSQL + ansible.builtin.systemd: + name: postgresql + state: restarted + become: yes + when: forgejo_db_type == 'postgres' + +- name: Restart Redis + ansible.builtin.systemd: + name: redis-server + state: restarted + become: yes + when: forgejo_use_redis | bool + +- name: Reload Caddy + ansible.builtin.systemd: + name: caddy + state: reloaded + become: yes + +- name: Restart Caddy + ansible.builtin.systemd: + name: caddy + state: restarted + become: yes + +- name: Reload Systemd + ansible.builtin.systemd: + daemon_reload: yes + become: yes + +- name: Restart UFW + ansible.builtin.systemd: + name: ufw + state: restarted + become: yes + +- name: Restart sshd + ansible.builtin.systemd: + name: sshd + state: restarted + become: yes + +- name: Renew SSL Certificate + ansible.builtin.debug: + msg: "Caddy handles certificate renewal automatically - no manual action needed" + when: forgejo_enable_letsencrypt | bool + +- name: Restart Prometheus + community.docker.docker_compose_v2: + project_src: "{{ forgejo_base_path }}" + files: + - docker-compose.yml + - docker-compose.monitoring.yml + services: + - prometheus + state: restarted + become: yes + when: forgejo_enable_prometheus | bool diff --git a/ansible/roles/forgejo/tasks/backup.yml b/ansible/roles/forgejo/tasks/backup.yml new file mode 100644 index 0000000..432cfff --- /dev/null +++ b/ansible/roles/forgejo/tasks/backup.yml @@ -0,0 +1,32 @@ +--- +# Backup configuration tasks + +- name: Create backup script + ansible.builtin.template: + src: forgejo_backup.sh.j2 + dest: /usr/local/bin/forgejo_backup.sh + mode: '0755' + become: yes + +- name: Set up backup cron job + ansible.builtin.cron: + name: "Forgejo daily backup" + minute: "{{ forgejo_backup_schedule.split()[0] }}" + hour: "{{ forgejo_backup_schedule.split()[1] }}" + job: "/usr/local/bin/forgejo_backup.sh >> /var/log/forgejo-backup.log 2>&1" + become: yes + +- name: Create log rotation for backup logs + ansible.builtin.copy: + dest: /etc/logrotate.d/forgejo-backup + content: | + /var/log/forgejo-backup.log { + daily + rotate 7 + compress + delaycompress + missingok + notifempty + } + mode: '0644' + become: yes diff --git a/ansible/roles/forgejo/tasks/caddy.yml b/ansible/roles/forgejo/tasks/caddy.yml new file mode 100644 index 0000000..0293c2d --- /dev/null +++ b/ansible/roles/forgejo/tasks/caddy.yml @@ -0,0 +1,71 @@ +--- +# Caddy web server setup tasks +# Caddy handles HTTPS certificates automatically via Let's Encrypt + +- name: Install dependencies for Caddy + ansible.builtin.apt: + name: + - debian-keyring + - debian-archive-keyring + - apt-transport-https + - curl + state: present + update_cache: yes + become: yes + +- name: Add Caddy GPG key + ansible.builtin.shell: | + curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg + args: + creates: /usr/share/keyrings/caddy-stable-archive-keyring.gpg + become: yes + +- name: Add Caddy repository + ansible.builtin.shell: | + curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-stable.list + args: + creates: /etc/apt/sources.list.d/caddy-stable.list + become: yes + +- name: Install Caddy + ansible.builtin.apt: + name: caddy + state: present + update_cache: yes + become: yes + +- name: Create Caddy configuration directory + ansible.builtin.file: + path: /etc/caddy + state: directory + owner: root + group: root + mode: '0755' + become: yes + +- name: Create Caddy log directory + ansible.builtin.file: + path: /var/log/caddy + state: directory + owner: caddy + group: caddy + mode: '0755' + become: yes + +- name: Create Caddyfile for Forgejo + ansible.builtin.template: + src: Caddyfile.j2 + dest: /etc/caddy/Caddyfile + owner: root + group: root + mode: '0644' + validate: 'caddy validate --adapter caddyfile --config %s' + become: yes + notify: Reload Caddy + +- name: Ensure Caddy is started and enabled + ansible.builtin.systemd: + name: caddy + state: started + enabled: yes + become: yes diff --git a/ansible/roles/forgejo/tasks/docker.yml b/ansible/roles/forgejo/tasks/docker.yml new file mode 100644 index 0000000..002c270 --- /dev/null +++ b/ansible/roles/forgejo/tasks/docker.yml @@ -0,0 +1,100 @@ +--- +# Docker installation tasks + +- name: Check if Docker is already installed + ansible.builtin.command: docker --version + register: docker_installed + changed_when: false + failed_when: false + +- name: Install Docker + when: docker_installed.rc != 0 + block: + - name: Install Docker dependencies + ansible.builtin.apt: + name: + - apt-transport-https + - ca-certificates + - curl + - gnupg + - lsb-release + state: present + update_cache: yes + become: yes + + - name: Create directory for Docker GPG key + ansible.builtin.file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + become: yes + + - name: Add Docker GPG key + ansible.builtin.apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + keyring: /etc/apt/keyrings/docker.gpg + state: present + become: yes + + - name: Add Docker repository + ansible.builtin.apt_repository: + repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" + state: present + filename: docker + become: yes + + - name: Install Docker Engine + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + state: present + update_cache: yes + become: yes + +- name: Add Forgejo user to Docker group + ansible.builtin.user: + name: "{{ forgejo_user }}" + groups: docker + append: yes + become: yes + +- name: Ensure Docker service is started and enabled + ansible.builtin.systemd: + name: docker + state: started + enabled: yes + daemon_reload: yes + become: yes + +- name: Configure Docker daemon + ansible.builtin.copy: + dest: /etc/docker/daemon.json + content: | + { + "log-driver": "json-file", + "log-opts": { + "max-size": "10m", + "max-file": "3" + }, + "storage-driver": "overlay2", + "userland-proxy": false, + "live-restore": true + } + mode: '0644' + become: yes + notify: Restart Docker + +- name: Verify Docker installation + ansible.builtin.command: docker run --rm hello-world + register: docker_test + changed_when: false + become: yes + +- name: Display Docker version + ansible.builtin.debug: + msg: "Docker is installed and working" + when: docker_test.rc == 0 diff --git a/ansible/roles/forgejo/tasks/forgejo.yml b/ansible/roles/forgejo/tasks/forgejo.yml new file mode 100644 index 0000000..219d9b3 --- /dev/null +++ b/ansible/roles/forgejo/tasks/forgejo.yml @@ -0,0 +1,138 @@ +--- +# Forgejo deployment tasks + +- name: Ensure Forgejo data directories have correct ownership + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ forgejo_uid }}" + group: "{{ forgejo_gid }}" + mode: '0755' + recurse: yes + become: yes + loop: + - "{{ forgejo_data_path }}" + - "{{ forgejo_config_path }}" + - "{{ forgejo_custom_path }}" + +- name: Create .ssh directory for Forgejo + ansible.builtin.file: + path: "{{ forgejo_data_path }}/git/.ssh" + state: directory + owner: "{{ forgejo_uid }}" + group: "{{ forgejo_gid }}" + mode: '0700' + become: yes + +- name: Create Forgejo configuration from template + ansible.builtin.template: + src: app.ini.j2 + dest: "{{ forgejo_config_path }}/app.ini" + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + mode: '0640' + become: yes + notify: Restart Forgejo + +- name: Create Docker Compose file + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ forgejo_base_path }}/docker-compose.yml" + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + mode: '0640' + become: yes + notify: Restart Forgejo + +- name: Pull Forgejo Docker image + community.docker.docker_image: + name: "{{ forgejo_docker_image }}:{{ forgejo_version }}" + source: pull + become: yes + +- name: Start Forgejo with Docker Compose + community.docker.docker_compose_v2: + project_src: "{{ forgejo_base_path }}" + state: present + become: yes + register: forgejo_started + +- name: Wait for Forgejo to be ready + ansible.builtin.uri: + url: "http://localhost:{{ forgejo_http_port }}" + status_code: 200 + register: forgejo_health + until: forgejo_health.status == 200 + retries: 30 + delay: 5 + ignore_errors: yes + +- name: Get Forgejo container logs if startup failed + ansible.builtin.command: + cmd: docker logs forgejo --tail 50 + register: forgejo_logs + become: yes + when: forgejo_health.status is not defined or forgejo_health.status != 200 + +- name: Show Forgejo container logs + ansible.builtin.debug: + var: forgejo_logs.stdout_lines + when: forgejo_logs is defined and forgejo_logs.stdout_lines is defined + +- name: Fail if Forgejo is not ready + ansible.builtin.fail: + msg: "Forgejo failed to start. Check logs above." + when: forgejo_health.status is not defined or forgejo_health.status != 200 + +- name: Check if admin user exists + ansible.builtin.command: + cmd: docker exec --user git forgejo forgejo admin user list --admin + register: admin_user_check + become: yes + changed_when: false + failed_when: false + +- name: Create admin user + ansible.builtin.command: + cmd: > + docker exec --user git forgejo forgejo admin user create + --admin + --username "{{ forgejo_admin_username }}" + --password "{{ forgejo_admin_password }}" + --email "{{ forgejo_admin_email }}" + --must-change-password=false + become: yes + when: forgejo_admin_username not in admin_user_check.stdout + register: admin_created + no_log: yes + +- name: Display admin credentials + ansible.builtin.debug: + msg: | + ===================================================== + ADMIN USER CREATED + ===================================================== + Username: {{ forgejo_admin_username }} + Email: {{ forgejo_admin_email }} + Password: (from your secrets.yml vault) + + IMPORTANT: Change this password after first login! + ===================================================== + when: admin_created is defined and admin_created.changed + +- name: Create Forgejo systemd service + ansible.builtin.template: + src: forgejo.service.j2 + dest: /etc/systemd/system/forgejo.service + mode: '0644' + become: yes + notify: + - Reload Systemd + - Restart Forgejo + +- name: Enable Forgejo service + ansible.builtin.systemd: + name: forgejo + enabled: yes + daemon_reload: yes + become: yes diff --git a/ansible/roles/forgejo/tasks/main.yml b/ansible/roles/forgejo/tasks/main.yml new file mode 100644 index 0000000..71c77e3 --- /dev/null +++ b/ansible/roles/forgejo/tasks/main.yml @@ -0,0 +1,94 @@ +--- +# Main tasks for Forgejo deployment + +- name: Include system preparation tasks + ansible.builtin.include_tasks: prepare.yml + tags: + - prepare + - system + +- name: Include Tailscale VPN setup tasks + ansible.builtin.include_tasks: tailscale.yml + when: forgejo_enable_tailscale | bool + tags: + - tailscale + - security + - vpn + +- name: Include volume setup tasks + ansible.builtin.include_tasks: volume.yml + when: forgejo_use_external_volume | bool + tags: + - volume + - storage + +- name: Include Docker installation tasks + ansible.builtin.include_tasks: docker.yml + tags: + - docker + - install + +- name: Include PostgreSQL setup tasks + ansible.builtin.include_tasks: postgres.yml + when: forgejo_db_type == 'postgres' + tags: + - postgres + - database + +- name: Include Redis setup tasks + ansible.builtin.include_tasks: redis.yml + when: forgejo_use_redis | bool + tags: + - redis + - cache + +# Ensure PostgreSQL is restarted with new config before Forgejo connects +- name: Flush handlers before starting Forgejo + ansible.builtin.meta: flush_handlers + +- name: Include Forgejo configuration tasks + ansible.builtin.include_tasks: forgejo.yml + tags: + - forgejo + - config + +- name: Include Caddy setup tasks + ansible.builtin.include_tasks: caddy.yml + tags: + - caddy + - webserver + +- name: Include SSL certificate tasks + ansible.builtin.include_tasks: ssl.yml + when: forgejo_enable_letsencrypt | bool + tags: + - ssl + - certificates + +- name: Include backup configuration tasks + ansible.builtin.include_tasks: backup.yml + when: forgejo_enable_backups | bool + tags: + - backup + +- name: Include restore tasks + ansible.builtin.include_tasks: restore.yml + when: forgejo_restore_from_backup | bool + tags: + - restore + - never # Only run when explicitly requested + +- name: Include monitoring setup tasks + ansible.builtin.include_tasks: monitoring.yml + when: forgejo_enable_prometheus | bool + tags: + - monitoring + - prometheus + +- name: Include UFW firewall configuration tasks + ansible.builtin.include_tasks: ufw.yml + when: forgejo_enable_ufw | bool + tags: + - ufw + - firewall + - security diff --git a/ansible/roles/forgejo/tasks/monitoring.yml b/ansible/roles/forgejo/tasks/monitoring.yml new file mode 100644 index 0000000..c33537d --- /dev/null +++ b/ansible/roles/forgejo/tasks/monitoring.yml @@ -0,0 +1,66 @@ +--- +# Prometheus monitoring setup for Forgejo +# This is INTERNAL monitoring - metrics are only accessible locally or via authenticated endpoint + +- name: Create monitoring directory + ansible.builtin.file: + path: "{{ forgejo_base_path }}/monitoring" + state: directory + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + mode: '0755' + become: yes + +- name: Create Prometheus configuration + ansible.builtin.template: + src: prometheus.yml.j2 + dest: "{{ forgejo_base_path }}/monitoring/prometheus.yml" + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + mode: '0644' + become: yes + notify: Restart Prometheus + +- name: Create Prometheus Docker Compose override + ansible.builtin.template: + src: docker-compose.monitoring.yml.j2 + dest: "{{ forgejo_base_path }}/docker-compose.monitoring.yml" + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + mode: '0644' + become: yes + notify: Restart Prometheus + +- name: Create Prometheus data directory + ansible.builtin.file: + path: "{{ forgejo_base_path }}/monitoring/data" + state: directory + owner: "65534" # nobody user in Prometheus container + group: "65534" + mode: '0755' + become: yes + +- name: Start Prometheus container + community.docker.docker_compose_v2: + project_src: "{{ forgejo_base_path }}" + files: + - docker-compose.yml + - docker-compose.monitoring.yml + state: present + become: yes + +- name: Display monitoring access information + ansible.builtin.debug: + msg: | + Prometheus monitoring is now enabled! + + Internal access (from server): + - Prometheus UI: http://localhost:9090 + - Forgejo metrics: http://localhost:3000/metrics (requires token) + + The metrics endpoint is protected by a token configured in your secrets.yml + (vault_forgejo_metrics_token). Use this token in the Authorization header + or as a query parameter: /metrics?token=YOUR_TOKEN + + Prometheus scrapes Forgejo metrics every 15 seconds. + Data is retained for 15 days by default. diff --git a/ansible/roles/forgejo/tasks/postgres.yml b/ansible/roles/forgejo/tasks/postgres.yml new file mode 100644 index 0000000..4a928f9 --- /dev/null +++ b/ansible/roles/forgejo/tasks/postgres.yml @@ -0,0 +1,163 @@ +--- +# PostgreSQL setup tasks + +- name: Install PostgreSQL + ansible.builtin.apt: + name: + - "postgresql-{{ postgres_version }}" + - "postgresql-contrib-{{ postgres_version }}" + - python3-psycopg2 + state: present + update_cache: yes + become: yes + +- name: Ensure PostgreSQL is started and enabled + ansible.builtin.systemd: + name: postgresql + state: started + enabled: yes + become: yes + +- name: Create PostgreSQL data directory + ansible.builtin.file: + path: "{{ postgres_data_dir }}" + state: directory + owner: postgres + group: postgres + mode: '0700' + become: yes + when: forgejo_use_external_volume | bool + +- name: Check if PostgreSQL database exists + ansible.builtin.command: + cmd: psql -U postgres -lqt + register: postgres_db_list + changed_when: false + become: yes + become_user: postgres + +- name: Create Forgejo PostgreSQL database + community.postgresql.postgresql_db: + name: "{{ forgejo_db_name }}" + encoding: UTF8 + lc_collate: en_US.UTF-8 + lc_ctype: en_US.UTF-8 + template: template0 + state: present + become: yes + become_user: postgres + when: forgejo_db_name not in postgres_db_list.stdout + +- name: Create Forgejo PostgreSQL user + community.postgresql.postgresql_user: + name: "{{ forgejo_db_user }}" + password: "{{ forgejo_db_password }}" + state: present + become: yes + become_user: postgres + no_log: yes + +- name: Grant database privileges to Forgejo user + community.postgresql.postgresql_privs: + database: "{{ forgejo_db_name }}" + roles: "{{ forgejo_db_user }}" + type: database + privs: ALL + become: yes + become_user: postgres + +- name: Grant schema privileges to Forgejo user + community.postgresql.postgresql_privs: + database: "{{ forgejo_db_name }}" + roles: "{{ forgejo_db_user }}" + type: schema + objs: public + privs: ALL + become: yes + become_user: postgres + +- name: Set Forgejo user as owner of public schema + community.postgresql.postgresql_owner: + db: "{{ forgejo_db_name }}" + new_owner: "{{ forgejo_db_user }}" + obj_name: public + obj_type: schema + become: yes + become_user: postgres + +- name: Configure PostgreSQL for optimal performance + ansible.builtin.lineinfile: + path: "/etc/postgresql/{{ postgres_version }}/main/postgresql.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + state: present + become: yes + loop: + - { regexp: '^max_connections', line: "max_connections = {{ postgres_max_connections }}" } + - { regexp: '^shared_buffers', line: "shared_buffers = {{ postgres_shared_buffers }}" } + - { regexp: '^effective_cache_size', line: "effective_cache_size = {{ postgres_effective_cache_size }}" } + - { regexp: '^maintenance_work_mem', line: "maintenance_work_mem = 128MB" } + - { regexp: '^checkpoint_completion_target', line: "checkpoint_completion_target = 0.9" } + - { regexp: '^wal_buffers', line: "wal_buffers = 16MB" } + - { regexp: '^default_statistics_target', line: "default_statistics_target = 100" } + - { regexp: '^random_page_cost', line: "random_page_cost = 1.1" } + - { regexp: '^effective_io_concurrency', line: "effective_io_concurrency = 200" } + - { regexp: '^work_mem', line: "work_mem = 8MB" } + - { regexp: '^min_wal_size', line: "min_wal_size = 1GB" } + - { regexp: '^max_wal_size', line: "max_wal_size = 4GB" } + notify: Restart PostgreSQL + +- name: Configure PostgreSQL to listen on all interfaces + ansible.builtin.lineinfile: + path: "/etc/postgresql/{{ postgres_version }}/main/postgresql.conf" + regexp: "^#?listen_addresses" + line: "listen_addresses = '*'" + state: present + become: yes + notify: Restart PostgreSQL + +- name: Configure PostgreSQL authentication + ansible.builtin.lineinfile: + path: "/etc/postgresql/{{ postgres_version }}/main/pg_hba.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + state: present + become: yes + loop: + - regexp: '^local\s+all\s+postgres' + line: 'local all postgres peer' + - regexp: '^local\s+all\s+all' + line: 'local all all peer' + - regexp: '^host\s+all\s+all\s+127\.0\.0\.1' + line: 'host all all 127.0.0.1/32 scram-sha-256' + - regexp: '^host\s+all\s+all\s+::1' + line: 'host all all ::1/128 scram-sha-256' + notify: Restart PostgreSQL + +- name: Allow Docker network to connect to PostgreSQL + ansible.builtin.lineinfile: + path: "/etc/postgresql/{{ postgres_version }}/main/pg_hba.conf" + line: 'host all all 172.16.0.0/12 scram-sha-256' + insertafter: '^host\s+all\s+all\s+127' + state: present + become: yes + notify: Restart PostgreSQL + +- name: Enable PostgreSQL extensions + community.postgresql.postgresql_ext: + name: "{{ item }}" + db: "{{ forgejo_db_name }}" + state: present + become: yes + become_user: postgres + loop: + - pg_trgm + - btree_gin + +- name: Create PostgreSQL backup script + ansible.builtin.template: + src: postgres_backup.sh.j2 + dest: /usr/local/bin/postgres_backup.sh + mode: '0755' + become: yes + when: forgejo_enable_backups | bool diff --git a/ansible/roles/forgejo/tasks/prepare.yml b/ansible/roles/forgejo/tasks/prepare.yml new file mode 100644 index 0000000..7d22c29 --- /dev/null +++ b/ansible/roles/forgejo/tasks/prepare.yml @@ -0,0 +1,194 @@ +--- +# System preparation tasks + +- name: Update apt cache + ansible.builtin.apt: + update_cache: yes + cache_valid_time: 3600 + become: yes + +- name: Upgrade all packages + ansible.builtin.apt: + upgrade: safe + become: yes + tags: + - upgrade + +- name: Install system packages + ansible.builtin.apt: + name: "{{ system_packages }}" + state: present + become: yes + +- name: Check if Forgejo group exists + ansible.builtin.getent: + database: group + key: "{{ forgejo_group }}" + register: forgejo_group_check + ignore_errors: yes + become: yes + +- name: Create Forgejo system group + ansible.builtin.group: + name: "{{ forgejo_group }}" + gid: "{{ forgejo_gid }}" + system: yes + state: present + become: yes + when: forgejo_group_check.failed | default(false) + +- name: Ensure Forgejo group exists (if already created with different GID) + ansible.builtin.group: + name: "{{ forgejo_group }}" + system: yes + state: present + become: yes + when: not (forgejo_group_check.failed | default(false)) + +- name: Check if Forgejo user exists + ansible.builtin.getent: + database: passwd + key: "{{ forgejo_user }}" + register: forgejo_user_check + ignore_errors: yes + become: yes + +- name: Create Forgejo system user + ansible.builtin.user: + name: "{{ forgejo_user }}" + uid: "{{ forgejo_uid }}" + group: "{{ forgejo_group }}" + system: yes + shell: /bin/bash + home: "{{ forgejo_base_path }}" + create_home: no + state: present + become: yes + when: forgejo_user_check.failed | default(false) + +- name: Ensure Forgejo user exists (if already created with different UID) + ansible.builtin.user: + name: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + system: yes + shell: /bin/bash + home: "{{ forgejo_base_path }}" + create_home: no + state: present + become: yes + when: not (forgejo_user_check.failed | default(false)) + +- name: Create Forgejo directory structure + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + mode: '0755' + become: yes + loop: + - "{{ forgejo_base_path }}" + - "{{ forgejo_data_path }}" + - "{{ forgejo_config_path }}" + - "{{ forgejo_custom_path }}" + - "{{ forgejo_backup_path }}" + - "{{ forgejo_data_path }}/git" + - "{{ forgejo_data_path }}/attachments" + - "{{ forgejo_data_path }}/lfs" + - "{{ forgejo_data_path }}/avatars" + +- name: Configure system limits for Forgejo + ansible.builtin.pam_limits: + domain: "{{ forgejo_user }}" + limit_type: "{{ item.limit_type }}" + limit_item: "{{ item.limit_item }}" + value: "{{ item.value }}" + become: yes + loop: + - { limit_type: 'soft', limit_item: 'nofile', value: '65535' } + - { limit_type: 'hard', limit_item: 'nofile', value: '65535' } + - { limit_type: 'soft', limit_item: 'nproc', value: '65535' } + - { limit_type: 'hard', limit_item: 'nproc', value: '65535' } + +- name: Configure kernel parameters + ansible.builtin.sysctl: + name: "{{ item.name }}" + value: "{{ item.value }}" + state: present + reload: yes + become: yes + loop: + - { name: 'net.core.somaxconn', value: '1024' } + - { name: 'net.ipv4.tcp_max_syn_backlog', value: '2048' } + - { name: 'net.ipv4.ip_forward', value: '1' } + - { name: 'vm.swappiness', value: '10' } + - { name: 'fs.file-max', value: '65535' } + +# NOTE: UFW firewall configuration is handled by ufw.yml +# We only set up minimal rules here for Docker access during deployment +# The full secure configuration (Tailscale-only SSH) is applied in ufw.yml + +- name: Install UFW + ansible.builtin.apt: + name: ufw + state: present + become: yes + when: ansible_os_family == "Debian" + +- name: Allow Docker network to access host services + community.general.ufw: + rule: allow + from_ip: 172.16.0.0/12 + comment: "Allow Docker containers to access host services (PostgreSQL, etc.)" + become: yes + when: ansible_os_family == "Debian" + +- name: Set timezone to UTC + community.general.timezone: + name: UTC + become: yes + +- name: Enable automatic security updates + ansible.builtin.apt: + name: unattended-upgrades + state: present + become: yes + +- name: Configure unattended upgrades + ansible.builtin.copy: + dest: /etc/apt/apt.conf.d/50unattended-upgrades + content: | + Unattended-Upgrade::Allowed-Origins { + "${distro_id}:${distro_codename}-security"; + "${distro_id}ESMApps:${distro_codename}-apps-security"; + "${distro_id}ESM:${distro_codename}-infra-security"; + }; + Unattended-Upgrade::AutoFixInterruptedDpkg "true"; + Unattended-Upgrade::MinimalSteps "true"; + Unattended-Upgrade::Remove-Unused-Dependencies "true"; + Unattended-Upgrade::Automatic-Reboot "false"; + mode: '0644' + become: yes + +- name: Ensure SSH is properly configured + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + state: present + validate: '/usr/sbin/sshd -t -f %s' + become: yes + loop: + - { regexp: '^PermitRootLogin', line: 'PermitRootLogin prohibit-password' } + - { regexp: '^PasswordAuthentication', line: 'PasswordAuthentication no' } + - { regexp: '^PubkeyAuthentication', line: 'PubkeyAuthentication yes' } + notify: Restart sshd + when: ansible_connection != 'local' + +- name: Create systemd service for sshd + ansible.builtin.systemd: + name: sshd + enabled: yes + state: started + become: yes + when: ansible_connection != 'local' diff --git a/ansible/roles/forgejo/tasks/redis.yml b/ansible/roles/forgejo/tasks/redis.yml new file mode 100644 index 0000000..7211d32 --- /dev/null +++ b/ansible/roles/forgejo/tasks/redis.yml @@ -0,0 +1,40 @@ +--- +# Redis setup tasks + +- name: Install Redis + ansible.builtin.apt: + name: + - redis-server + - redis-tools + state: present + update_cache: yes + become: yes + +- name: Configure Redis + ansible.builtin.lineinfile: + path: /etc/redis/redis.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + state: present + become: yes + loop: + - { regexp: '^bind', line: 'bind 127.0.0.1 ::1' } + - { regexp: '^protected-mode', line: 'protected-mode yes' } + - { regexp: '^maxmemory', line: 'maxmemory 256mb' } + - { regexp: '^maxmemory-policy', line: 'maxmemory-policy allkeys-lru' } + - { regexp: '^save', line: 'save 900 1' } + notify: Restart Redis + +- name: Ensure Redis is started and enabled + ansible.builtin.systemd: + name: redis-server + state: started + enabled: yes + become: yes + +- name: Test Redis connection + ansible.builtin.command: + cmd: redis-cli ping + register: redis_ping + changed_when: false + failed_when: redis_ping.stdout != "PONG" diff --git a/ansible/roles/forgejo/tasks/ssl.yml b/ansible/roles/forgejo/tasks/ssl.yml new file mode 100644 index 0000000..2edecbf --- /dev/null +++ b/ansible/roles/forgejo/tasks/ssl.yml @@ -0,0 +1,29 @@ +--- +# SSL/TLS setup for Caddy +# Note: Caddy handles Let's Encrypt certificates automatically! +# This file only sets up log directories and verifies configuration. + +- name: Create Caddy log directory + ansible.builtin.file: + path: /var/log/caddy + state: directory + owner: caddy + group: caddy + mode: '0755' + become: yes + +- name: Verify Caddy is configured for HTTPS + ansible.builtin.debug: + msg: > + Caddy will automatically obtain and renew TLS certificates for {{ forgejo_domain }} + using Let's Encrypt. The email {{ letsencrypt_email }} will be used for renewal + notifications. No manual certificate management is required. + +- name: Ensure Caddy data directory exists (for certificates) + ansible.builtin.file: + path: /var/lib/caddy/.local/share/caddy + state: directory + owner: caddy + group: caddy + mode: '0700' + become: yes diff --git a/ansible/roles/forgejo/tasks/tailscale.yml b/ansible/roles/forgejo/tasks/tailscale.yml new file mode 100644 index 0000000..304716f --- /dev/null +++ b/ansible/roles/forgejo/tasks/tailscale.yml @@ -0,0 +1,76 @@ +--- +# Tailscale VPN installation and configuration +# Provides secure access to SSH and internal services + +- name: Install prerequisites for Tailscale + ansible.builtin.apt: + name: + - curl + - gnupg + - apt-transport-https + state: present + update_cache: yes + become: yes + +- name: Add Tailscale GPG key + ansible.builtin.shell: | + curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/noble.noarmor.gpg | tee /usr/share/keyrings/tailscale-archive-keyring.gpg > /dev/null + args: + creates: /usr/share/keyrings/tailscale-archive-keyring.gpg + become: yes + +- name: Add Tailscale repository + ansible.builtin.shell: | + curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/noble.tailscale-keyring.list | tee /etc/apt/sources.list.d/tailscale.list > /dev/null + args: + creates: /etc/apt/sources.list.d/tailscale.list + become: yes + +- name: Install Tailscale + ansible.builtin.apt: + name: tailscale + state: present + update_cache: yes + become: yes + +- name: Enable Tailscale service + ansible.builtin.systemd: + name: tailscaled + state: started + enabled: yes + become: yes + +- name: Check if Tailscale is already authenticated + ansible.builtin.command: tailscale status + register: tailscale_status + ignore_errors: yes + changed_when: false + become: yes + +- name: Display Tailscale authentication instructions + ansible.builtin.debug: + msg: | + =============================================================== + TAILSCALE AUTHENTICATION REQUIRED + =============================================================== + + Tailscale is installed but needs to be authenticated. + + SSH into the server and run: + sudo tailscale up --ssh + + This will: + 1. Open a browser URL for authentication + 2. Connect to your Tailnet + 3. Enable Tailscale SSH (optional but recommended) + + For headless servers, use an auth key: + sudo tailscale up --authkey=tskey-auth-XXXXX + + Generate an auth key at: https://login.tailscale.com/admin/settings/keys + + After authentication, you can access this server via: + - Tailscale IP (shown in 'tailscale ip') + - Tailscale hostname (from admin console) + =============================================================== + when: tailscale_status.rc != 0 diff --git a/ansible/roles/forgejo/tasks/ufw.yml b/ansible/roles/forgejo/tasks/ufw.yml new file mode 100644 index 0000000..2ec830c --- /dev/null +++ b/ansible/roles/forgejo/tasks/ufw.yml @@ -0,0 +1,142 @@ +--- +# UFW Firewall configuration for Forgejo +# Restricts SSH access to Tailscale interface only +# Only exposes HTTP/HTTPS to the public internet + +- name: Install UFW + ansible.builtin.apt: + name: ufw + state: present + update_cache: yes + become: yes + tags: + - install + - ufw + +- name: Deploy Forgejo UFW application profile + ansible.builtin.template: + src: ufw-forgejo.j2 + dest: /etc/ufw/applications.d/forgejo + owner: root + group: root + mode: '0644' + become: yes + tags: + - configure + - ufw + +- name: Reset UFW to default (clean slate) + community.general.ufw: + state: reset + become: yes + tags: + - configure + - ufw + +- name: Set default incoming policy to deny + community.general.ufw: + direction: incoming + policy: deny + become: yes + tags: + - configure + - ufw + +- name: Set default outgoing policy to allow + community.general.ufw: + direction: outgoing + policy: allow + become: yes + tags: + - configure + - ufw + +- name: Allow all traffic on Tailscale interface + community.general.ufw: + rule: allow + interface: "{{ tailscale_interface }}" + direction: in + comment: "Allow all Tailscale traffic (SSH, monitoring, internal services)" + become: yes + tags: + - configure + - ufw + +- name: Allow Docker network to access host services + community.general.ufw: + rule: allow + from_ip: 172.16.0.0/12 + comment: "Allow Docker containers to access host services (PostgreSQL, etc.)" + become: yes + tags: + - configure + - ufw + +# Public-facing ports (Caddy handles HTTPS) +- name: Allow HTTP (Caddy) + community.general.ufw: + rule: allow + port: "80" + proto: tcp + comment: "HTTP - Caddy (redirects to HTTPS)" + become: yes + tags: + - configure + - ufw + +- name: Allow HTTPS (Caddy) + community.general.ufw: + rule: allow + port: "443" + proto: tcp + comment: "HTTPS - Caddy/Forgejo" + become: yes + tags: + - configure + - ufw + +# Git SSH is only accessible via Tailscale (through the interface rule above) +# Regular SSH is only accessible via Tailscale (through the interface rule above) + +- name: Enable UFW logging + community.general.ufw: + logging: "on" + become: yes + tags: + - configure + - ufw + +- name: Enable UFW + community.general.ufw: + state: enabled + become: yes + tags: + - configure + - ufw + +- name: Display UFW security configuration + ansible.builtin.debug: + msg: | + =============================================================== + FIREWALL CONFIGURED - SECURITY SUMMARY + =============================================================== + + PUBLIC ACCESS (from anywhere): + - Port 80/tcp (HTTP - redirects to HTTPS) + - Port 443/tcp (HTTPS - Forgejo web interface) + + TAILSCALE-ONLY ACCESS (via {{ tailscale_interface }}): + - Port 22/tcp (SSH - system administration) + - Port 2222/tcp (Git SSH - clone/push/pull) + - Port 3000/tcp (Forgejo internal - for debugging) + - Port 9090/tcp (Prometheus - if enabled) + - All other internal services + + Git clone URLs: + - HTTPS (public): https://{{ forgejo_domain }}/user/repo.git + - SSH (Tailscale): git@:user/repo.git + + To access SSH after this change: + ssh root@ + + =============================================================== diff --git a/ansible/roles/forgejo/tasks/volume.yml b/ansible/roles/forgejo/tasks/volume.yml new file mode 100644 index 0000000..2da713f --- /dev/null +++ b/ansible/roles/forgejo/tasks/volume.yml @@ -0,0 +1,60 @@ +--- +# External volume setup tasks + +- name: Check if volume device exists + ansible.builtin.stat: + path: "{{ forgejo_volume_device }}" + register: volume_device + +- name: Fail if volume device not found + ansible.builtin.fail: + msg: "Volume device {{ forgejo_volume_device }} not found" + when: not volume_device.stat.exists + +- name: Check if volume is already formatted + ansible.builtin.command: + cmd: "blkid {{ forgejo_volume_device }}" + register: volume_formatted + changed_when: false + failed_when: false + +- name: Format volume with ext4 + ansible.builtin.filesystem: + fstype: ext4 + dev: "{{ forgejo_volume_device }}" + become: yes + when: volume_formatted.rc != 0 + +- name: Create mount point + ansible.builtin.file: + path: "{{ forgejo_volume_mount }}" + state: directory + mode: '0755' + become: yes + +- name: Mount volume + ansible.posix.mount: + path: "{{ forgejo_volume_mount }}" + src: "{{ forgejo_volume_device }}" + fstype: ext4 + opts: defaults,nofail + state: mounted + become: yes + +- name: Update data path to use volume + ansible.builtin.set_fact: + forgejo_data_path: "{{ forgejo_volume_mount }}/data" + +- name: Create data directories on volume + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ forgejo_user }}" + group: "{{ forgejo_group }}" + mode: '0755' + become: yes + loop: + - "{{ forgejo_data_path }}" + - "{{ forgejo_data_path }}/git" + - "{{ forgejo_data_path }}/attachments" + - "{{ forgejo_data_path }}/lfs" diff --git a/ansible/roles/forgejo/tasks/{restore.yml,monitoring.yml} b/ansible/roles/forgejo/tasks/{restore.yml,monitoring.yml} new file mode 100644 index 0000000..e69de29 diff --git a/ansible/roles/forgejo/templates/Caddyfile.j2 b/ansible/roles/forgejo/templates/Caddyfile.j2 new file mode 100644 index 0000000..b380fd4 --- /dev/null +++ b/ansible/roles/forgejo/templates/Caddyfile.j2 @@ -0,0 +1,63 @@ +# Caddyfile for Forgejo +# Caddy automatically obtains and renews TLS certificates via Let's Encrypt + +{% if forgejo_enable_letsencrypt %} +{{ forgejo_domain }} { + # Reverse proxy to Forgejo + reverse_proxy localhost:{{ forgejo_http_port }} { + # WebSocket support (needed for real-time features) + header_up X-Real-IP {remote_host} + header_up X-Forwarded-For {remote_host} + header_up X-Forwarded-Proto {scheme} + + # Timeouts for large Git operations + transport http { + read_timeout 600s + write_timeout 600s + } + } + + # Security headers + header { + Strict-Transport-Security "max-age=31536000; includeSubDomains" + X-Frame-Options "SAMEORIGIN" + X-Content-Type-Options "nosniff" + X-XSS-Protection "1; mode=block" + } + + # Request body size for large uploads (Git push, LFS) + request_body { + max_size 100MB + } + + # Logging + log { + output file /var/log/caddy/forgejo_access.log { + roll_size 100mb + roll_keep 5 + } + format json + } + + # TLS configuration (automatic via Let's Encrypt) + tls {{ letsencrypt_email }} +} +{% else %} +# HTTP-only configuration (not recommended for production) +:80 { + reverse_proxy localhost:{{ forgejo_http_port }} { + header_up X-Real-IP {remote_host} + header_up X-Forwarded-For {remote_host} + header_up X-Forwarded-Proto {scheme} + } + + request_body { + max_size 100MB + } + + log { + output file /var/log/caddy/forgejo_access.log + format json + } +} +{% endif %} diff --git a/ansible/roles/forgejo/templates/app.ini.j2 b/ansible/roles/forgejo/templates/app.ini.j2 new file mode 100644 index 0000000..0ab83cb --- /dev/null +++ b/ansible/roles/forgejo/templates/app.ini.j2 @@ -0,0 +1,219 @@ +; Forgejo Configuration File +; Generated by Ansible + +APP_NAME = Forgejo: {{ forgejo_domain }} +RUN_MODE = prod +RUN_USER = {{ forgejo_user }} +WORK_PATH = /data/gitea + +[repository] +ROOT = /data/git/repositories +SCRIPT_TYPE = bash +DEFAULT_BRANCH = main +DEFAULT_PRIVATE = last +MAX_CREATION_LIMIT = -1 +ENABLE_PUSH_CREATE_USER = true +ENABLE_PUSH_CREATE_ORG = true +DISABLE_HTTP_GIT = {{ forgejo_disable_http_git | lower }} + +[repository.local] +LOCAL_COPY_PATH = /data/gitea/tmp/local-repo + +[repository.upload] +ENABLED = true +TEMP_PATH = /data/gitea/uploads +FILE_MAX_SIZE = 100 +MAX_FILES = 10 + +[lfs] +ENABLED = {{ forgejo_enable_lfs | lower }} +PATH = /data/lfs +MAX_FILE_SIZE = {{ forgejo_lfs_max_file_size }} + +[server] +; Forgejo listens on HTTP internally; Caddy handles TLS termination +PROTOCOL = http +DOMAIN = {{ forgejo_domain }} +ROOT_URL = {{ forgejo_protocol }}://{{ forgejo_domain }}/ +HTTP_ADDR = 0.0.0.0 +HTTP_PORT = 3000 +DISABLE_SSH = false +SSH_DOMAIN = {{ forgejo_domain }} +SSH_PORT = {{ forgejo_ssh_port }} +SSH_LISTEN_PORT = 22 +OFFLINE_MODE = false +APP_DATA_PATH = /data/gitea +LANDING_PAGE = explore +LFS_START_SERVER = {{ forgejo_enable_lfs | lower }} + +[database] +DB_TYPE = {{ forgejo_db_type }} +; Use host.docker.internal to reach host PostgreSQL from container +HOST = host.docker.internal:{{ forgejo_db_port }} +NAME = {{ forgejo_db_name }} +USER = {{ forgejo_db_user }} +PASSWD = {{ forgejo_db_password }} +SCHEMA = +SSL_MODE = disable +CHARSET = utf8mb4 +LOG_SQL = false +MAX_IDLE_CONNS = 30 +MAX_OPEN_CONNS = 100 +CONN_MAX_LIFETIME = 3600 + +[security] +INSTALL_LOCK = true +SECRET_KEY = {{ vault_forgejo_secret_key | default('') }} +INTERNAL_TOKEN = {{ vault_forgejo_internal_token | default('') }} +PASSWORD_COMPLEXITY = lower,upper,digit,spec +MIN_PASSWORD_LENGTH = 10 +PASSWORD_HASH_ALGO = argon2 + +[service] +DISABLE_REGISTRATION = {{ forgejo_disable_registration | lower }} +REQUIRE_SIGNIN_VIEW = {{ forgejo_require_signin_view | lower }} +REGISTER_EMAIL_CONFIRM = {{ forgejo_enable_email | lower }} +ENABLE_NOTIFY_MAIL = {{ forgejo_enable_email | lower }} +DEFAULT_KEEP_EMAIL_PRIVATE = true +DEFAULT_ALLOW_CREATE_ORGANIZATION = true +DEFAULT_ORG_VISIBILITY = private +ENABLE_CAPTCHA = true +ENABLE_TIMETRACKING = true +DEFAULT_ENABLE_TIMETRACKING = true +ENABLE_USER_HEATMAP = true + +[service.explore] +REQUIRE_SIGNIN_VIEW = {{ forgejo_require_signin_view | lower }} +DISABLE_USERS_PAGE = false + +{% if forgejo_enable_email %} +[mailer] +ENABLED = true +SMTP_ADDR = {{ forgejo_email_host }} +SMTP_PORT = {{ forgejo_email_port }} +FROM = {{ forgejo_email_from }} +USER = {{ forgejo_email_user }} +PASSWD = {{ forgejo_email_password }} +SUBJECT_PREFIX = [{{ forgejo_domain }}] +MAILER_TYPE = smtp +IS_TLS_ENABLED = true +{% endif %} + +[session] +PROVIDER = file +PROVIDER_CONFIG = /data/gitea/sessions +COOKIE_SECURE = {{ (forgejo_protocol == 'https') | lower }} +COOKIE_NAME = i_like_forgejo +COOKIE_DOMAIN = {{ forgejo_domain }} +GC_INTERVAL_TIME = 86400 +SESSION_LIFE_TIME = 86400 + +[picture] +DISABLE_GRAVATAR = {{ forgejo_disable_gravatar | lower }} +ENABLE_FEDERATED_AVATAR = false + +[attachment] +ENABLED = true +PATH = /data/attachments +MAX_SIZE = 100 +MAX_FILES = 10 + +[time] +DEFAULT_UI_LOCATION = UTC + +[log] +MODE = console, file +LEVEL = {{ forgejo_log_level }} +ROOT_PATH = /data/gitea/log +ENABLE_XORM_LOG = false + +[log.console] +LEVEL = {{ forgejo_log_level }} +COLORIZE = false + +[log.file] +LEVEL = {{ forgejo_log_level }} +FILE_NAME = forgejo.log +MAX_SIZE_SHIFT = 28 +DAILY_ROTATE = true +MAX_DAYS = 7 + +[git] +MAX_GIT_DIFF_LINES = 1000 +MAX_GIT_DIFF_LINE_CHARACTERS = 5000 +MAX_GIT_DIFF_FILES = 100 +GC_ARGS = + +[git.timeout] +DEFAULT = 360 +MIGRATE = 600 +MIRROR = 300 +CLONE = 300 +PULL = 300 +GC = 60 + +{% if forgejo_enable_2fa %} +[two_factor] +ENABLED = true +{% endif %} + +[openid] +ENABLE_OPENID_SIGNIN = false +ENABLE_OPENID_SIGNUP = false + +[cron] +ENABLED = true +RUN_AT_START = false + +[cron.update_mirrors] +SCHEDULE = @every 10m + +[cron.repo_health_check] +SCHEDULE = @every 24h +TIMEOUT = 60s + +[cron.check_repo_stats] +SCHEDULE = @every 24h + +[cron.cleanup_hook_task_table] +SCHEDULE = @every 24h +CLEANUP_TYPE = OlderThan +OLDER_THAN = 168h + +[cron.update_migration_poster_id] +SCHEDULE = @every 24h + +[cron.sync_external_users] +SCHEDULE = @every 24h +UPDATE_EXISTING = true + +[api] +ENABLE_SWAGGER = false +MAX_RESPONSE_ITEMS = 50 +DEFAULT_PAGING_NUM = 30 +DEFAULT_GIT_TREES_PER_PAGE = 1000 +DEFAULT_MAX_BLOB_SIZE = 10485760 + +[oauth2] +ENABLED = true +JWT_SECRET = {{ vault_forgejo_jwt_secret | default('') }} + +[webhook] +QUEUE_LENGTH = 1000 +DELIVER_TIMEOUT = 15 +SKIP_TLS_VERIFY = false +PAGING_NUM = 10 + +[metrics] +ENABLED = {{ forgejo_enable_prometheus | lower }} +TOKEN = {{ vault_forgejo_metrics_token | default('') }} + +[task] +QUEUE_TYPE = channel +QUEUE_LENGTH = 10000 +QUEUE_CONN_STR = +QUEUE_BATCH_NUMBER = 20 + +[indexer] +ISSUE_INDEXER_TYPE = db +REPO_INDEXER_ENABLED = true diff --git a/ansible/roles/forgejo/templates/docker-compose.monitoring.yml.j2 b/ansible/roles/forgejo/templates/docker-compose.monitoring.yml.j2 new file mode 100644 index 0000000..298f219 --- /dev/null +++ b/ansible/roles/forgejo/templates/docker-compose.monitoring.yml.j2 @@ -0,0 +1,27 @@ +# Docker Compose override for Prometheus monitoring +# Generated by Ansible +# This file extends the main docker-compose.yml + +services: + prometheus: + image: prom/prometheus:latest + container_name: prometheus + restart: unless-stopped + volumes: + - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro + - ./monitoring/data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--storage.tsdb.retention.time=15d' + - '--web.enable-lifecycle' + ports: + # Only bind to localhost for security - not exposed externally + - "127.0.0.1:{{ prometheus_port | default(9090) }}:9090" + networks: + - forgejo-network + +networks: + forgejo-network: + external: true + name: {{ forgejo_base_path | basename }}_default diff --git a/ansible/roles/forgejo/templates/docker-compose.yml.j2 b/ansible/roles/forgejo/templates/docker-compose.yml.j2 new file mode 100644 index 0000000..aac85f6 --- /dev/null +++ b/ansible/roles/forgejo/templates/docker-compose.yml.j2 @@ -0,0 +1,76 @@ +services: + forgejo: + image: {{ forgejo_docker_image }}:{{ forgejo_version }} + container_name: forgejo + restart: unless-stopped + environment: + - USER_UID={{ forgejo_uid }} + - USER_GID={{ forgejo_gid }} + - FORGEJO__database__DB_TYPE={{ forgejo_db_type }} + - FORGEJO__database__HOST=host.docker.internal:{{ forgejo_db_port }} + - FORGEJO__database__NAME={{ forgejo_db_name }} + - FORGEJO__database__USER={{ forgejo_db_user }} + - FORGEJO__database__PASSWD={{ forgejo_db_password }} +{% if forgejo_use_redis %} + - FORGEJO__cache__ENABLED=true + - FORGEJO__cache__ADAPTER=redis + - FORGEJO__cache__HOST=redis://redis:{{ redis_port }}/0 + - FORGEJO__session__PROVIDER=redis + - FORGEJO__session__PROVIDER_CONFIG=redis://redis:{{ redis_port }}/0 +{% endif %} + extra_hosts: + - "host.docker.internal:host-gateway" + volumes: + - {{ forgejo_data_path }}/git:/data/git + - {{ forgejo_data_path }}/attachments:/data/attachments + - {{ forgejo_data_path }}/lfs:/data/lfs + - {{ forgejo_config_path }}/app.ini:/data/gitea/conf/app.ini + - {{ forgejo_custom_path }}:/data/gitea/custom + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + ports: + - "127.0.0.1:{{ forgejo_http_port }}:3000" + - "{{ forgejo_ssh_port }}:22" + networks: + - forgejo +{% if forgejo_use_redis %} + depends_on: + - redis +{% endif %} + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:3000/api/healthz"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + +{% if forgejo_use_redis %} + redis: + image: redis:7-alpine + container_name: forgejo-redis + restart: unless-stopped + volumes: + - {{ forgejo_data_path }}/redis:/data + networks: + - forgejo + command: redis-server --appendonly yes + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "3" + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 30s + timeout: 3s + retries: 3 +{% endif %} + +networks: + forgejo: + driver: bridge diff --git a/ansible/roles/forgejo/templates/forgejo.service.j2 b/ansible/roles/forgejo/templates/forgejo.service.j2 new file mode 100644 index 0000000..8a4605e --- /dev/null +++ b/ansible/roles/forgejo/templates/forgejo.service.j2 @@ -0,0 +1,19 @@ +[Unit] +Description=Forgejo Git Server (Docker Compose) +Documentation=https://forgejo.org/docs/latest/ +After=docker.service +Requires=docker.service +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot +RemainAfterExit=yes +WorkingDirectory={{ forgejo_base_path }} +ExecStart=/usr/bin/docker compose up -d +ExecStop=/usr/bin/docker compose down +ExecReload=/usr/bin/docker compose restart +TimeoutStartSec=300 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/roles/forgejo/templates/forgejo_backup.sh.j2 b/ansible/roles/forgejo/templates/forgejo_backup.sh.j2 new file mode 100644 index 0000000..58be61a --- /dev/null +++ b/ansible/roles/forgejo/templates/forgejo_backup.sh.j2 @@ -0,0 +1,33 @@ +#!/bin/bash +# Forgejo Backup Script +# Generated by Ansible + +set -e + +BACKUP_DIR="{{ forgejo_backup_path }}" +TIMESTAMP=$(date +%Y%m%dT%H%M%S) +LOG_FILE="/var/log/forgejo-backup.log" + +echo "[$(date)] Starting Forgejo backup..." | tee -a "$LOG_FILE" + +# Create database backup +pg_dump -U {{ forgejo_db_user }} {{ forgejo_db_name }} | gzip > "$BACKUP_DIR/database-$TIMESTAMP.sql.gz" +echo "[$(date)] Database backed up" | tee -a "$LOG_FILE" + +# Backup repositories +tar -czf "$BACKUP_DIR/repositories-$TIMESTAMP.tar.gz" -C {{ forgejo_data_path }} git +echo "[$(date)] Repositories backed up" | tee -a "$LOG_FILE" + +# Backup configuration +tar -czf "$BACKUP_DIR/config-$TIMESTAMP.tar.gz" {{ forgejo_config_path }} {{ forgejo_base_path }}/docker-compose.yml +echo "[$(date)] Configuration backed up" | tee -a "$LOG_FILE" + +# Backup data +tar -czf "$BACKUP_DIR/data-$TIMESTAMP.tar.gz" -C {{ forgejo_data_path }} attachments lfs avatars +echo "[$(date)] Data backed up" | tee -a "$LOG_FILE" + +# Clean old backups +find "$BACKUP_DIR" -type f -name "*.gz" -mtime +{{ forgejo_backup_retention_days }} -delete +echo "[$(date)] Old backups cleaned" | tee -a "$LOG_FILE" + +echo "[$(date)] Backup completed successfully" | tee -a "$LOG_FILE" diff --git a/ansible/roles/forgejo/templates/postgres_backup.sh.j2 b/ansible/roles/forgejo/templates/postgres_backup.sh.j2 new file mode 100644 index 0000000..8534b2a --- /dev/null +++ b/ansible/roles/forgejo/templates/postgres_backup.sh.j2 @@ -0,0 +1,24 @@ +#!/bin/bash +# PostgreSQL Backup Script +# Generated by Ansible + +set -e + +BACKUP_DIR="{{ forgejo_backup_path }}" +TIMESTAMP=$(date +%Y%m%dT%H%M%S) +LOG_FILE="/var/log/postgres-backup.log" + +# Ensure backup directory exists +mkdir -p "$BACKUP_DIR" + +echo "[$(date)] Starting PostgreSQL backup..." | tee -a "$LOG_FILE" + +# Create database backup +sudo -u postgres pg_dump {{ forgejo_db_name }} | gzip > "$BACKUP_DIR/postgres-$TIMESTAMP.sql.gz" + +echo "[$(date)] PostgreSQL backup completed: postgres-$TIMESTAMP.sql.gz" | tee -a "$LOG_FILE" + +# Clean old PostgreSQL backups (keep last {{ forgejo_backup_retention_days }} days) +find "$BACKUP_DIR" -type f -name "postgres-*.sql.gz" -mtime +{{ forgejo_backup_retention_days }} -delete + +echo "[$(date)] Old PostgreSQL backups cleaned" | tee -a "$LOG_FILE" diff --git a/ansible/roles/forgejo/templates/prometheus.yml.j2 b/ansible/roles/forgejo/templates/prometheus.yml.j2 new file mode 100644 index 0000000..b17a009 --- /dev/null +++ b/ansible/roles/forgejo/templates/prometheus.yml.j2 @@ -0,0 +1,42 @@ +# Prometheus configuration for Forgejo monitoring +# Generated by Ansible + +global: + scrape_interval: 15s + evaluation_interval: 15s + +scrape_configs: + # Forgejo metrics endpoint + - job_name: 'forgejo' + scheme: http + static_configs: + - targets: ['forgejo:3000'] + metrics_path: /metrics + bearer_token: '{{ vault_forgejo_metrics_token | default("") }}' + + # Prometheus self-monitoring + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + +{% if forgejo_db_type == 'postgres' %} + # PostgreSQL metrics (if postgres_exporter is enabled) + # Uncomment and configure if you add postgres_exporter + # - job_name: 'postgres' + # static_configs: + # - targets: ['postgres_exporter:9187'] +{% endif %} + +{% if forgejo_use_redis %} + # Redis metrics (if redis_exporter is enabled) + # Uncomment and configure if you add redis_exporter + # - job_name: 'redis' + # static_configs: + # - targets: ['redis_exporter:9121'] +{% endif %} + + # Node metrics (if node_exporter is enabled) + # Uncomment and configure if you add node_exporter + # - job_name: 'node' + # static_configs: + # - targets: ['node_exporter:9100'] diff --git a/ansible/roles/forgejo/templates/ufw-forgejo.j2 b/ansible/roles/forgejo/templates/ufw-forgejo.j2 new file mode 100644 index 0000000..a42feb8 --- /dev/null +++ b/ansible/roles/forgejo/templates/ufw-forgejo.j2 @@ -0,0 +1,14 @@ +[Forgejo] +title=Forgejo Git Forge +description=Forgejo self-hosted Git service (web interface) +ports=80,443/tcp + +[Forgejo-SSH] +title=Forgejo Git SSH +description=Forgejo Git operations over SSH +ports={{ forgejo_ssh_port }}/tcp + +[Forgejo-Full] +title=Forgejo Full +description=Forgejo web interface and Git SSH +ports=80,443,{{ forgejo_ssh_port }}/tcp diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md new file mode 100644 index 0000000..ecebcb7 --- /dev/null +++ b/docs/CONFIGURATION.md @@ -0,0 +1,569 @@ +# Configuration Reference + +This document explains all configuration options for the Forgejo self-hosting setup. + +## Table of Contents + +- [Domain Configuration](#domain-configuration) +- [Secrets (Vault Variables)](#secrets-vault-variables) +- [Feature Flags](#feature-flags) +- [Security (Tailscale + UFW)](#security-tailscale--ufw) +- [S3/Object Storage](#s3object-storage) +- [Database Configuration](#database-configuration) +- [Email Configuration](#email-configuration) +- [Monitoring (Prometheus)](#monitoring-prometheus) +- [Backup Configuration](#backup-configuration) + +--- + +## Domain Configuration + +The domain is configured in **one primary location**: + +**File:** `ansible/inventory/production/hosts.yml` + +```yaml +forgejo_domain: git.yourdomain.com +``` + +This domain is used for: + +- HTTPS certificate (automatically obtained via Caddy/Let's Encrypt) +- Forgejo web interface URL +- Git clone URLs: `https://git.yourdomain.com/user/repo.git` +- SSH clone URLs: `git@git.yourdomain.com:user/repo.git` +- Email "From" addresses + +**Before deployment**, ensure your DNS is configured: + +``` +git.yourdomain.com. IN A +git.yourdomain.com. IN AAAA +``` + +--- + +## Secrets (Vault Variables) + +All secrets are stored in `ansible/playbooks/vars/secrets.yml` encrypted with Ansible Vault. + +### How to Set Up Secrets + +```bash +# 1. Copy the example file +cp ansible/playbooks/vars/secrets.yml.example ansible/playbooks/vars/secrets.yml + +# 2. Edit with your values +nano ansible/playbooks/vars/secrets.yml + +# 3. Encrypt it +ansible-vault encrypt ansible/playbooks/vars/secrets.yml + +# 4. To edit later +ansible-vault edit ansible/playbooks/vars/secrets.yml +``` + +### Secret Variables Explained + +| Variable | Purpose | How to Generate | +|----------|---------|-----------------| +| `vault_forgejo_db_password` | PostgreSQL database password for Forgejo | `openssl rand -base64 32` | +| `vault_forgejo_admin_password` | Initial admin account password | Choose a strong password | +| `vault_forgejo_secret_key` | Used for CSRF tokens, session cookies, and encryption. Must be 64+ characters. | `openssl rand -base64 48` | +| `vault_forgejo_internal_token` | Used for internal API calls between Forgejo components | `openssl rand -base64 48` | +| `vault_forgejo_jwt_secret` | Signs JWT tokens for OAuth2 and API authentication | `openssl rand -base64 32` | +| `vault_forgejo_metrics_token` | Required to access `/metrics` endpoint (if Prometheus enabled) | `openssl rand -base64 32` | +| `vault_email_password` | SMTP password (if email enabled) | Your email provider password | +| `vault_s3_access_key` | S3-compatible storage access key (if S3 enabled) | From your cloud provider | +| `vault_s3_secret_key` | S3-compatible storage secret key (if S3 enabled) | From your cloud provider | + +### Example secrets.yml + +```yaml +--- +vault_forgejo_db_password: "xK9mN2pL8qR5tW7vY3zB1cD4fG6hJ0kM" +vault_forgejo_admin_password: "MySecureAdminPassword123!" +vault_forgejo_secret_key: "aB3cD5eF7gH9iJ1kL3mN5oP7qR9sT1uV3wX5yZ7aB9cD1eF3gH5iJ7kL9mN1oP" +vault_forgejo_internal_token: "qW2eR4tY6uI8oP0aS2dF4gH6jK8lZ0xC2vB4nM6qW8eR0tY2uI4oP6aS8dF0g" +vault_forgejo_jwt_secret: "mN3bV5cX7zL9kJ1hG3fD5sA7pO9iU1yT" +vault_forgejo_metrics_token: "pR0mE7hEuS_t0K3n_H3r3" +vault_email_password: "" +vault_s3_access_key: "" +vault_s3_secret_key: "" +``` + +--- + +## Feature Flags + +Configure features in `ansible/inventory/production/hosts.yml`: + +### Core Features + +| Flag | Default | Description | +|------|---------|-------------| +| `forgejo_enable_letsencrypt` | `true` | Automatic HTTPS via Let's Encrypt (handled by Caddy) | +| `forgejo_enable_lfs` | `true` | Git Large File Storage support | +| `forgejo_enable_2fa` | `true` | Allow users to enable Two-Factor Authentication | +| `forgejo_use_redis` | `true` | Use Redis for caching (recommended for performance) | +| `forgejo_enable_backups` | `true` | Enable automated daily backups | +| `forgejo_enable_prometheus` | `false` | Enable internal Prometheus metrics collection | + +### Access Control + +| Flag | Default | Description | +|------|---------|-------------| +| `forgejo_disable_registration` | `false` | Disable public user registration (invite-only) | +| `forgejo_require_signin_view` | `false` | Require login to view public repositories | + +### Optional Services + +| Flag | Default | Description | +|------|---------|-------------| +| `forgejo_enable_email` | `false` | Enable email notifications (requires SMTP config) | +| `forgejo_enable_s3` | `false` | Use S3-compatible storage for LFS and attachments | + +### Example Configuration + +```yaml +# In ansible/inventory/production/hosts.yml +forgejo_prod: + # ... other settings ... + + # Enable all recommended features + forgejo_enable_letsencrypt: true + forgejo_enable_lfs: true + forgejo_enable_2fa: true + forgejo_use_redis: true + forgejo_enable_backups: true + + # Enable monitoring + forgejo_enable_prometheus: true + + # Private instance (no public registration) + forgejo_disable_registration: true + forgejo_require_signin_view: false +``` + +--- + +## Security (Tailscale + UFW) + +This setup uses Tailscale VPN and UFW firewall to secure your Forgejo instance: + +- **SSH access**: Only via Tailscale (not exposed to the public internet) +- **Git SSH (port 2222)**: Only via Tailscale +- **Web interface**: Public via HTTPS (ports 80/443) +- **Internal services**: Only via Tailscale (Prometheus, database, etc.) + +### Enable Security Features + +```yaml +# In ansible/inventory/production/hosts.yml +forgejo_enable_tailscale: true +forgejo_enable_ufw: true +``` + +### How It Works + +1. **Tailscale** creates a secure mesh VPN network +2. **UFW** is configured to: + - Allow all traffic on the `tailscale0` interface + - Allow only HTTP/HTTPS (80/443) from the public internet + - Block SSH from the public internet + +### Post-Deployment: Authenticate Tailscale + +After deployment, SSH into the server (while SSH is still open) and authenticate Tailscale: + +```bash +# SSH into server (before UFW locks down SSH) +ssh root@ + +# Authenticate Tailscale +sudo tailscale up --ssh + +# This will print a URL - open it in your browser to authenticate +``` + +For headless/automated setup, use an auth key: + +```bash +sudo tailscale up --authkey=tskey-auth-XXXXX +``` + +Generate auth keys at: https://login.tailscale.com/admin/settings/keys + +### Accessing Your Server After Setup + +Once UFW is configured, SSH is only accessible via Tailscale: + +```bash +# Via Tailscale IP +ssh root@100.x.x.x + +# Via Tailscale hostname (from admin console) +ssh root@your-server.tailnet-name.ts.net + +# Via Tailscale SSH (if enabled with --ssh) +tailscale ssh root@your-server +``` + +### Git Clone URLs + +| Method | URL | Access | +|--------|-----|--------| +| HTTPS | `https://git.yourdomain.com/user/repo.git` | Public | +| SSH | `git@:user/repo.git` | Tailscale only | + +### Firewall Rules Summary + +| Port | Protocol | Access | Purpose | +|------|----------|--------|---------| +| 80 | TCP | Public | HTTP (redirects to HTTPS) | +| 443 | TCP | Public | HTTPS (Forgejo web) | +| 22 | TCP | Tailscale only | System SSH | +| 2222 | TCP | Tailscale only | Git SSH | +| 3000 | TCP | Tailscale only | Forgejo internal | +| 9090 | TCP | Tailscale only | Prometheus | + +### Disabling Security Features + +If you need public SSH access (not recommended): + +```yaml +forgejo_enable_tailscale: false +forgejo_enable_ufw: false +``` + +Or configure UFW manually after deployment. + +--- + +## S3/Object Storage + +S3-compatible object storage can be used for: + +1. **Git LFS** - Large file storage +2. **Backups** - Off-site backup storage +3. **Attachments** - Issue/PR attachments (future) + +### What is S3? + +S3 (Simple Storage Service) is an object storage protocol. Both **Scaleway** and **Hetzner** offer S3-compatible storage: + +- **Scaleway**: Object Storage (S3-compatible) +- **Hetzner**: Object Storage (S3-compatible, in beta) + +### Setting Up S3 Storage + +#### For Scaleway + +1. **Create storage via Terraform** (already included): + + ```bash + make terraform-apply PROVIDER=scaleway + ``` + +2. **Get credentials from Terraform output**: + + ```bash + cd terraform/scaleway/storage + terragrunt output access_key + terragrunt output secret_key + ``` + +3. **Configure in inventory**: + + ```yaml + forgejo_enable_s3: true + forgejo_s3_endpoint: https://s3.fr-par.scw.cloud + forgejo_s3_bucket: your-project-production-lfs + forgejo_s3_region: fr-par + ``` + +4. **Add credentials to secrets.yml**: + + ```yaml + vault_s3_access_key: "SCWXXXXXXXXXXXXXXXXX" + vault_s3_secret_key: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + ``` + +#### For Hetzner + +Hetzner Object Storage is S3-compatible: + +1. **Create a storage box** in Hetzner Cloud Console + +2. **Configure in inventory**: + + ```yaml + forgejo_enable_s3: true + forgejo_s3_endpoint: https://fsn1.your-objectstorage.com + forgejo_s3_bucket: forgejo-lfs + forgejo_s3_region: fsn1 + ``` + +3. **Add credentials to secrets.yml** + +### S3 for Backups + +To upload backups to S3: + +```yaml +# In inventory +forgejo_backup_to_s3: true +forgejo_backup_s3_bucket: your-project-production-backups +``` + +Then run: + +```bash +make backup-to-s3 +``` + +--- + +## Database Configuration + +PostgreSQL is the recommended database. + +### Settings + +| Variable | Default | Description | +|----------|---------|-------------| +| `forgejo_db_type` | `postgres` | Database type (postgres recommended) | +| `forgejo_db_host` | `localhost` | Database host | +| `forgejo_db_port` | `5432` | Database port | +| `forgejo_db_name` | `forgejo` | Database name | +| `forgejo_db_user` | `forgejo` | Database user | + +The password is in `vault_forgejo_db_password`. + +### PostgreSQL Tuning + +Default tuning in `ansible/roles/forgejo/defaults/main.yml`: + +```yaml +postgres_version: "16" +postgres_max_connections: 100 +postgres_shared_buffers: "256MB" +postgres_effective_cache_size: "1GB" +``` + +For larger instances, adjust these based on available RAM. + +--- + +## Email Configuration + +Enable email for notifications, password resets, and registration confirmation. + +### Settings + +```yaml +# In inventory +forgejo_enable_email: true +forgejo_email_host: smtp.example.com +forgejo_email_port: 587 +forgejo_email_user: noreply@yourdomain.com + +# In secrets.yml +vault_email_password: "your-smtp-password" +``` + +### Common SMTP Providers + +**Gmail (with App Password)**: + +```yaml +forgejo_email_host: smtp.gmail.com +forgejo_email_port: 587 +forgejo_email_user: your-email@gmail.com +``` + +**Mailgun**: + +```yaml +forgejo_email_host: smtp.mailgun.org +forgejo_email_port: 587 +forgejo_email_user: postmaster@your-domain.mailgun.org +``` + +**SendGrid**: + +```yaml +forgejo_email_host: smtp.sendgrid.net +forgejo_email_port: 587 +forgejo_email_user: apikey +# vault_email_password should be your SendGrid API key +``` + +--- + +## Monitoring (Prometheus) + +Internal Prometheus monitoring for your Forgejo instance. + +### Enable Monitoring + +```yaml +# In inventory +forgejo_enable_prometheus: true +``` + +### What Gets Monitored + +- **Forgejo metrics**: HTTP requests, Git operations, users, repos, issues +- **Prometheus self-monitoring**: Scrape health + +### Accessing Metrics + +Prometheus is **internal only** (bound to localhost:9090). To access: + +1. **SSH tunnel**: + + ```bash + ssh -L 9090:localhost:9090 root@your-server + ``` + + Then open http://localhost:9090 + +2. **Forgejo metrics endpoint**: + + ``` + https://git.yourdomain.com/metrics?token=YOUR_METRICS_TOKEN + ``` + + The token is `vault_forgejo_metrics_token`. + +### Adding Grafana (Optional) + +To add Grafana dashboards, extend the monitoring setup: + +```yaml +# Create docker-compose.grafana.yml manually +services: + grafana: + image: grafana/grafana:latest + container_name: grafana + ports: + - "127.0.0.1:3001:3000" + volumes: + - grafana-data:/var/lib/grafana + environment: + - GF_SECURITY_ADMIN_PASSWORD=your-grafana-password + networks: + - forgejo-network +``` + +--- + +## Backup Configuration + +### Settings + +| Variable | Default | Description | +|----------|---------|-------------| +| `forgejo_enable_backups` | `true` | Enable automated backups | +| `forgejo_backup_schedule` | `0 2 * * *` | Cron schedule (default: 2 AM daily) | +| `forgejo_backup_retention_days` | `30` | Days to keep local backups | +| `forgejo_backup_to_s3` | `false` | Upload backups to S3 | +| `forgejo_backup_s3_bucket` | `""` | S3 bucket for backups | + +### What Gets Backed Up + +1. **PostgreSQL database** - Full SQL dump +2. **Git repositories** - All repository data +3. **Configuration** - app.ini, docker-compose.yml +4. **User data** - Attachments, LFS files, avatars + +### Backup Commands + +```bash +# Manual backup +make backup + +# Backup and upload to S3 +make backup-to-s3 + +# Restore from backup +make restore +# You'll be prompted for the backup timestamp +``` + +### Backup Location + +Local backups are stored in: `/opt/forgejo/backups/` + +Files: + +- `database-TIMESTAMP.sql.gz` +- `repositories-TIMESTAMP.tar.gz` +- `config-TIMESTAMP.tar.gz` +- `data-TIMESTAMP.tar.gz` + +--- + +## Quick Reference: Enable Everything + +For a fully-featured setup with all options enabled: + +```yaml +# ansible/inventory/production/hosts.yml +forgejo-prod: + ansible_host: YOUR_SERVER_IP + ansible_user: root + + # Domain + forgejo_domain: git.yourdomain.com + + # Core + forgejo_version: "9.0.2" + forgejo_enable_letsencrypt: true + forgejo_enable_lfs: true + forgejo_enable_2fa: true + forgejo_use_redis: true + + # Database + forgejo_db_type: postgres + + # Backups + forgejo_enable_backups: true + forgejo_backup_retention_days: 30 + forgejo_backup_to_s3: true + forgejo_backup_s3_bucket: your-backup-bucket + + # S3 Storage + forgejo_enable_s3: true + forgejo_s3_endpoint: https://s3.fr-par.scw.cloud + forgejo_s3_bucket: your-lfs-bucket + forgejo_s3_region: fr-par + + # Email + forgejo_enable_email: true + forgejo_email_host: smtp.mailgun.org + forgejo_email_port: 587 + forgejo_email_user: noreply@yourdomain.com + + # Monitoring + forgejo_enable_prometheus: true + + # Access + forgejo_disable_registration: false # Set to true for invite-only + forgejo_require_signin_view: false +``` + +Then in `ansible/playbooks/vars/secrets.yml`: + +```yaml +vault_forgejo_db_password: "GENERATED_PASSWORD" +vault_forgejo_admin_password: "YOUR_ADMIN_PASSWORD" +vault_forgejo_secret_key: "64_CHAR_GENERATED_KEY" +vault_forgejo_internal_token: "GENERATED_TOKEN" +vault_forgejo_jwt_secret: "GENERATED_SECRET" +vault_forgejo_metrics_token: "GENERATED_TOKEN" +vault_email_password: "YOUR_SMTP_PASSWORD" +vault_s3_access_key: "YOUR_S3_ACCESS_KEY" +vault_s3_secret_key: "YOUR_S3_SECRET_KEY" +``` diff --git a/docs/OPERATIONS.md b/docs/OPERATIONS.md new file mode 100644 index 0000000..cace1d9 --- /dev/null +++ b/docs/OPERATIONS.md @@ -0,0 +1,574 @@ +# Operations Guide - Forgejo Self-Hosting + +Complete guide for day-to-day operations and maintenance. + +## Table of Contents + +1. [Daily Operations](#daily-operations) +2. [Backup & Recovery](#backup--recovery) +3. [Updates & Upgrades](#updates--upgrades) +4. [Monitoring](#monitoring) +5. [Troubleshooting](#troubleshooting) +6. [Security Operations](#security-operations) +7. [Scaling](#scaling) +8. [Disaster Recovery](#disaster-recovery) + +## Daily Operations + +### Health Checks + +```bash +# Quick health check +make health + +# Full status +make status + +# View recent logs +make logs | tail -100 +``` + +### User Management + +```bash +# SSH into server +make ssh + +# List users +docker exec --user git forgejo forgejo admin user list + +# Create user +docker exec --user git forgejo forgejo admin user create \ + --username newuser \ + --password 'SecureP@ssw0rd' \ + --email user@example.com + +# Change user password +docker exec --user git forgejo forgejo admin user change-password \ + --username user \ + --password 'NewP@ssw0rd' + +# Make user admin +docker exec --user git forgejo forgejo admin user change-admin \ + --username user \ + --admin +``` + +### Repository Management + +```bash +# Sync mirrors +docker exec --user git forgejo forgejo admin repo-sync-releases + +# Rebuild indexes +docker exec --user git forgejo forgejo admin regenerate hooks +docker exec --user git forgejo forgejo admin regenerate keys +``` + +## Backup & Recovery + +### Manual Backup + +```bash +# Create immediate backup +make backup + +# Backup with S3 upload +make backup-to-s3 + +# Verify backup +make ssh +ls -lh /opt/forgejo/backups/ +``` + +### Automated Backups + +Backups run automatically at 2 AM daily. + +**Check backup status:** +```bash +make ssh +tail -f /var/log/forgejo-backup.log +``` + +**Modify schedule:** +```yaml +# In inventory or vars +forgejo_backup_schedule: "0 3 * * *" # 3 AM daily +``` + +### Restore from Backup + +```bash +# List available backups +make ssh +ls /opt/forgejo/backups/ + +# Restore (timestamp format: 20240115T120000) +make restore +# Enter backup timestamp when prompted + +# Force restore without confirmation +make restore-force + +# Restore from S3 +make restore-from-s3 +``` + +### Backup Verification + +```bash +# Test restore in separate environment +make ssh +cd /opt/forgejo/backups + +# Verify database backup +gunzip -c database-TIMESTAMP.sql.gz | head -100 + +# Verify repositories backup +tar -tzf repositories-TIMESTAMP.tar.gz | head -20 +``` + +## Updates & Upgrades + +### Update Forgejo + +```bash +# Standard update (includes backup) +make update + +# Update without backup (not recommended) +make update-no-backup +``` + +**Update process:** +1. Creates pre-update backup +2. Stops Forgejo service +3. Pulls new Docker image +4. Starts service +5. Runs database migrations +6. Verifies health + +### Update System Packages + +```bash +make ssh +sudo apt update +sudo apt upgrade -y +sudo reboot # If kernel updated +``` + +### Update Infrastructure + +```bash +# Review changes +make terraform-plan PROVIDER=scaleway + +# Apply updates +make terraform-apply PROVIDER=scaleway +``` + +## Monitoring + +### Log Management + +```bash +# Forgejo logs +make logs + +# Follow logs (real-time) +make logs | tail -f + +# Nginx access logs +make logs-nginx + +# System logs +make ssh +sudo journalctl -u forgejo -f + +# Docker logs +docker logs forgejo --tail 100 -f +``` + +### Performance Monitoring + +```bash +# Check resource usage +make ssh + +# CPU and memory +htop + +# Disk usage +df -h +du -sh /opt/forgejo/* + +# Docker stats +docker stats forgejo + +# PostgreSQL status +sudo systemctl status postgresql +``` + +### Database Monitoring + +```bash +make ssh + +# Connect to PostgreSQL +sudo -u postgres psql forgejo + +# Check database size +SELECT pg_size_pretty(pg_database_size('forgejo')); + +# Check table sizes +SELECT + schemaname, + tablename, + pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS size +FROM pg_tables +WHERE schemaname NOT IN ('pg_catalog', 'information_schema') +ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC +LIMIT 10; + +# Active connections +SELECT count(*) FROM pg_stat_activity; +``` + +## Troubleshooting + +### Service Won't Start + +```bash +# Check Docker status +make ssh +docker ps -a +docker logs forgejo + +# Check system resources +free -h +df -h + +# Restart service +make restart + +# Full rebuild if needed +docker compose down +docker compose up -d +``` + +### Database Connection Issues + +```bash +make ssh + +# Check PostgreSQL +sudo systemctl status postgresql + +# Check connections +sudo -u postgres psql -c "SELECT * FROM pg_stat_activity;" + +# Restart PostgreSQL +sudo systemctl restart postgresql + +# Check logs +sudo tail -f /var/log/postgresql/postgresql-16-main.log +``` + +### SSL Certificate Issues + +```bash +make ssh + +# Check certificate status +sudo certbot certificates + +# Renew manually +sudo certbot renew --force-renewal + +# Check Nginx configuration +sudo nginx -t +sudo systemctl restart nginx + +# Check certificate files +sudo ls -la /etc/letsencrypt/live/git.yourdomain.com/ +``` + +### Performance Issues + +```bash +# Check slow queries +make ssh +sudo -u postgres psql forgejo + +# Enable slow query logging +ALTER SYSTEM SET log_min_duration_statement = 1000; +SELECT pg_reload_conf(); + +# View slow queries +sudo tail -f /var/log/postgresql/postgresql-16-main.log + +# Vacuum database +sudo -u postgres vacuumdb --analyze forgejo +``` + +### Disk Space Issues + +```bash +make ssh + +# Check space +df -h + +# Find large directories +du -h /opt/forgejo | sort -rh | head -20 + +# Clean Docker +docker system prune -a + +# Clean old backups +find /opt/forgejo/backups -type f -mtime +30 -delete + +# Clean logs +sudo journalctl --vacuum-time=7d +``` + +## Security Operations + +### Security Audit + +```bash +make ssh + +# Check for security updates +sudo apt update +sudo apt list --upgradable + +# Review firewall rules +sudo ufw status verbose + +# Check open ports +sudo netstat -tulpn + +# Review SSH configuration +cat /etc/ssh/sshd_config + +# Check failed login attempts +sudo grep "Failed password" /var/log/auth.log | tail -20 +``` + +### Rotate Secrets + +```bash +# Generate new secrets +openssl rand -base64 48 + +# Update vault +make ansible-vault-edit + +# Redeploy with new secrets +make deploy +``` + +### SSL Certificate Renewal + +```bash +# Auto-renewal is configured, but to force: +make ssh +sudo certbot renew --force-renewal +sudo systemctl reload nginx +``` + +### Security Updates + +```bash +# Enable automatic security updates (already configured) +make ssh +sudo cat /etc/apt/apt.conf.d/50unattended-upgrades + +# Check update history +sudo cat /var/log/unattended-upgrades/unattended-upgrades.log +``` + +## Scaling + +### Vertical Scaling (More Resources) + +```bash +# Update instance type +vim terraform/scaleway/compute/terraform.tfvars +# Change: instance_type = "DEV1-L" # 8GB RAM + +make terraform-apply PROVIDER=scaleway + +# Service restarts automatically +``` + +### Database Optimization + +```bash +make ssh +sudo -u postgres psql forgejo + +# Optimize settings +ALTER SYSTEM SET shared_buffers = '512MB'; +ALTER SYSTEM SET effective_cache_size = '2GB'; +ALTER SYSTEM SET maintenance_work_mem = '256MB'; + +SELECT pg_reload_conf(); +``` + +### Add Storage Volume + +```bash +# In inventory +forgejo_use_external_volume: true +forgejo_volume_device: /dev/sdb + +# Redeploy +make deploy-tags TAGS=volume +``` + +## Disaster Recovery + +### Complete Failure Recovery + +```bash +# 1. Create new infrastructure +make terraform-apply PROVIDER=scaleway + +# 2. Update inventory with new IP +vim ansible/inventory/production/hosts.yml + +# 3. Update DNS records +# Point domain to new IP + +# 4. Deploy with restore +# First, copy backup timestamp +make ssh +ls /opt/forgejo/backups/ # Note timestamp + +# Then restore +make restore +# Enter timestamp when prompted + +# 5. Verify +make health +``` + +### Backup Storage Migration + +```bash +# Copy backups to new location +make ssh +rsync -avz /opt/forgejo/backups/ /new/backup/location/ + +# Update backup configuration +vim ansible/playbooks/vars/main.yml +# forgejo_backup_path: /new/backup/location + +make deploy-tags TAGS=backup +``` + +### Database Recovery from Corruption + +```bash +make ssh + +# Stop Forgejo +docker compose down + +# Backup corrupted database +sudo -u postgres pg_dump forgejo > /tmp/forgejo-corrupted.sql + +# Drop and recreate +sudo -u postgres dropdb forgejo +sudo -u postgres createdb forgejo + +# Restore from latest backup +gunzip -c /opt/forgejo/backups/database-TIMESTAMP.sql.gz | \ + sudo -u postgres psql forgejo + +# Restart Forgejo +docker compose up -d +``` + +## Best Practices + +### Daily Checklist + +- [ ] Check service health: `make health` +- [ ] Review logs for errors: `make logs | grep -i error` +- [ ] Verify backups ran: `ls -lt /opt/forgejo/backups/ | head -5` +- [ ] Check disk space: `df -h` + +### Weekly Checklist + +- [ ] Review security logs: `sudo grep "Failed" /var/log/auth.log` +- [ ] Check for updates: `sudo apt update && apt list --upgradable` +- [ ] Test backup restore (in dev environment) +- [ ] Review performance metrics +- [ ] Check SSL certificate expiry + +### Monthly Checklist + +- [ ] Full security audit +- [ ] Review and update firewall rules +- [ ] Rotate secrets if needed +- [ ] Review and optimize database +- [ ] Update documentation +- [ ] Test disaster recovery procedures + +## Emergency Contacts + +Keep these handy: + +```bash +# Quick recovery commands +make health # Check status +make restart # Restart service +make logs # View logs +make backup # Create backup +make restore # Restore from backup +make ssh # SSH access + +# Emergency rollback +make update # Includes backup +# If issues: make restore # Roll back +``` + +## Useful Scripts + +### Health Check Script + +```bash +#!/bin/bash +# Save as: health-check.sh + +echo "=== Forgejo Health Check ===" +echo "Date: $(date)" +echo "" + +echo "1. Service Status:" +docker ps | grep forgejo + +echo "" +echo "2. Disk Space:" +df -h | grep -E "Filesystem|/dev/vda1|/dev/sda1" + +echo "" +echo "3. Memory Usage:" +free -h + +echo "" +echo "4. Latest Backup:" +ls -lth /opt/forgejo/backups/*.tar.gz | head -1 + +echo "" +echo "5. HTTP Status:" +curl -s -o /dev/null -w "%{http_code}" http://localhost:3000 +``` + +--- + +**Remember**: Always test procedures in a development environment first! diff --git a/docs/QUICKSTART.md b/docs/QUICKSTART.md new file mode 100644 index 0000000..e048c60 --- /dev/null +++ b/docs/QUICKSTART.md @@ -0,0 +1,190 @@ +# Quick Start Guide - 15 Minutes to Forgejo + +Get your Forgejo instance running in 15 minutes. + +## Prerequisites Check (2 minutes) + +```bash +# Install required tools (if not installed) +# macOS: +brew install terraform terragrunt ansible + +# Ubuntu/Debian: +sudo apt-get install terraform ansible # (or look for instructions to do this using a virtualenv for ansible) +curl -L https://github.com/gruntwork-io/terragrunt/releases/download/v0.50.0/terragrunt_linux_amd64 -o /usr/local/bin/terragrunt +chmod +x /usr/local/bin/terragrunt + +# Verify installations +make check-deps +``` + +## Step 1: Cloud Provider Setup (3 minutes) + +### Option A: Scaleway + +1. Create account at https://console.scaleway.com +2. Generate API credentials: Console → IAM → API Keys +3. Export credentials: + ```bash + export SCW_ACCESS_KEY="SCW..." + export SCW_SECRET_KEY="..." + export SCW_DEFAULT_PROJECT_ID="..." #bear in mind, you might want a project ID other than the default + ``` + +### Option B: Hetzner + +1. Create account at https://console.hetzner.cloud +2. Generate API token: Security → API Tokens +3. Export token: + ```bash + export HCLOUD_TOKEN="..." + ``` + +## Step 2: Configuration (5 minutes) + +### Configure Domain + +```bash +# terraform/scaleway/compute/terraform.tfvars (or hetzner) +domain_name = "git.yourdomain.com" +``` + +### Generate Secrets + +```bash +# Generate strong passwords +openssl rand -base64 32 # Database password +openssl rand -base64 32 # Admin password +openssl rand -base64 48 # Secret key +openssl rand -base64 48 # Internal token +openssl rand -base64 48 # JWT secret +``` + +### Configure Secrets + +```bash +cd ansible/playbooks/vars +cp secrets.yml.example secrets.yml + +# Edit with generated passwords +$EDITOR secrets.yml + +# Encrypt +ansible-vault encrypt secrets.yml +# Enter vault password (remember this!) +``` + +### Update Inventory + +```bash +$EDITOR ansible/inventory/production/hosts.yml +``` + +Change: +- `forgejo_domain: git.yourdomain.com` +- `letsencrypt_email: your@email.com` + +## Step 3: Deploy (5 minutes) + +```bash +# Create infrastructure +make terraform-apply PROVIDER=scaleway # or hetzner + +# Get server IP +make terraform-output PROVIDER=scaleway + +# Create DNS A record +# git.yourdomain.com → + +# Wait 2 minutes for DNS propagation + +# Update inventory with server IP +$EDITOR ansible/inventory/production/hosts.yml +# Change: ansible_host: + +# Deploy Forgejo +make deploy +# Enter vault password when prompted +``` + +## Step 4: Access (1 minute) + +Visit: `https://git.yourdomain.com` + +**First Login:** + +1. Complete installation wizard +2. Login with admin credentials from vault +3. Create your first repository! + +## Next Steps + +- [ ] Configure SSH key: Settings → SSH Keys +- [ ] Create organization +- [ ] Import repositories +- [ ] Set up webhooks +- [ ] Configure CI/CD with Forgejo Actions +- [ ] Invite team members + +## Troubleshooting + +**Can't connect to server?** + +```bash +make ansible-ping +``` + +**SSL certificate not working?** +- Wait 5 minutes for Let's Encrypt +- Check DNS: `dig git.yourdomain.com` + +**Service not starting?** + +```bash +make logs +make status +``` + +**Need to start over?** + +```bash +make terraform-destroy PROVIDER=scaleway +# Then start from Step 3 +``` + +## Daily Operations + +```bash +# Create backup +make backup + +# Update Forgejo +make update + +# View logs +make logs + +# SSH into server +make ssh + +# Check health +make health +``` + +## Cost Per Month (in January 2026) + +- **Scaleway**: ~€9/month +- **Hetzner**: ~€8/month + +**vs GitHub Enterprise**: €19/user/month + +## Support + +- Check logs: `make logs` +- View status: `make status` +- Full docs: `README.md` +- Troubleshooting: `README.md#troubleshooting` + +--- + +That's it! You now have a production-ready Forgejo instance running on European infrastructure. 🎉 diff --git a/setup-wizard.sh b/setup-wizard.sh new file mode 100755 index 0000000..e4ee34b --- /dev/null +++ b/setup-wizard.sh @@ -0,0 +1,1327 @@ +#!/usr/bin/env bash +# +# Forgejo Self-Hosting Setup Wizard +# Interactive setup script for deploying Forgejo on Scaleway or Hetzner +# +# This wizard guides you through: +# 1. Checking dependencies +# 2. Configuring secrets (Ansible Vault) +# 3. Setting up cloud provider credentials +# 4. Creating infrastructure (Terraform/Terragrunt) +# 5. Configuring Ansible inventory +# 6. Deploying Forgejo with proper Tailscale/UFW ordering +# +# Works on: macOS, Debian, Ubuntu +# + +set -euo pipefail + +# ============================================================================== +# Configuration +# ============================================================================== + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ANSIBLE_DIR="${SCRIPT_DIR}/ansible" +TERRAFORM_DIR="${SCRIPT_DIR}/terraform" +INVENTORY_FILE="${ANSIBLE_DIR}/inventory/production/hosts.yml" +SECRETS_FILE="${ANSIBLE_DIR}/playbooks/vars/secrets.yml" +SECRETS_EXAMPLE="${ANSIBLE_DIR}/playbooks/vars/secrets.yml.example" +STATE_FILE="${SCRIPT_DIR}/.wizard-state" + +# Point Ansible to our config file +export ANSIBLE_CONFIG="${ANSIBLE_DIR}/ansible.cfg" + +# State tracking (compatible with bash 3.x - no associative arrays) +VAULT_PASSWORD="" + +load_state() { + # State is read directly from file when needed (bash 3.x compatible) + true +} + +save_state() { + local key="$1" + local value="$2" + + # Create file if it doesn't exist + if [[ ! -f "$STATE_FILE" ]]; then + echo "# Wizard state - auto-generated" >"$STATE_FILE" + fi + + # Remove existing key if present, then add new value + if grep -q "^${key}=" "$STATE_FILE" 2>/dev/null; then + # Key exists, update it (macOS/Linux compatible) + local temp_file="${STATE_FILE}.tmp" + grep -v "^${key}=" "$STATE_FILE" >"$temp_file" + echo "${key}=${value}" >>"$temp_file" + mv "$temp_file" "$STATE_FILE" + else + # Key doesn't exist, append it + echo "${key}=${value}" >>"$STATE_FILE" + fi +} + +get_state() { + local key="$1" + if [[ -f "$STATE_FILE" ]]; then + # Use || true to prevent exit on grep finding no matches (exit code 1) + grep "^${key}=" "$STATE_FILE" 2>/dev/null | cut -d'=' -f2- | head -1 || true + fi +} + +# Check if a step is marked as "done" +is_step_done() { + [[ "$(get_state "$1")" == "done" ]] +} + +# Check if a step has any value set (for steps that store actual values like provider, domain) +has_state() { + local value + value="$(get_state "$1")" + [[ -n "$value" ]] +} + +# ============================================================================== +# Colors and Formatting (works on macOS and Linux) +# ============================================================================== + +# Check if terminal supports colors +if [[ -t 1 ]] && command -v tput &>/dev/null && [[ $(tput colors 2>/dev/null || echo 0) -ge 8 ]]; then + RED=$(tput setaf 1) + GREEN=$(tput setaf 2) + YELLOW=$(tput setaf 3) + BLUE=$(tput setaf 4) + MAGENTA=$(tput setaf 5) + CYAN=$(tput setaf 6) + BOLD=$(tput bold) + DIM=$(tput dim) + RESET=$(tput sgr0) +else + RED="" + GREEN="" + YELLOW="" + BLUE="" + MAGENTA="" + CYAN="" + BOLD="" + DIM="" + RESET="" +fi + +# ============================================================================== +# Helper Functions +# ============================================================================== + +print_banner() { + clear + echo "${CYAN}${BOLD}" + echo "╔═══════════════════════════════════════════════════════════════════════════╗" + echo "║ ║" + echo "║ ███████╗ ██████╗ ██████╗ ██████╗ ███████╗ ██╗ ██████╗ ║" + echo "║ ██╔════╝██╔═══██╗██╔══██╗██╔════╝ ██╔════╝ ██║██╔═══██╗ ║" + echo "║ █████╗ ██║ ██║██████╔╝██║ ███╗█████╗ ██║██║ ██║ ║" + echo "║ ██╔══╝ ██║ ██║██╔══██╗██║ ██║██╔══╝ ██ ██║██║ ██║ ║" + echo "║ ██║ ╚██████╔╝██║ ██║╚██████╔╝███████╗╚█████╔╝╚██████╔╝ ║" + echo "║ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚════╝ ╚═════╝ ║" + echo "║ ║" + echo "║ Self-Hosting Setup Wizard (by Dumontix) ║" + echo "║ ║" + echo "╚═══════════════════════════════════════════════════════════════════════════╝" + echo "${RESET}" + echo "" +} + +print_step() { + local step_num="$1" + local step_title="$2" + + # Clear screen for each new step (cleaner experience) + clear + + # Show mini header + echo "${CYAN}${BOLD}Forgejo Setup Wizard${RESET}" + echo "" + echo "${BLUE}${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + echo "${BLUE}${BOLD} Step ${step_num}: ${step_title}${RESET}" + echo "${BLUE}${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + echo "" +} + +print_info() { + echo "${CYAN}ℹ ${RESET}$1" +} + +print_success() { + echo "${GREEN}✓ ${RESET}$1" +} + +print_warning() { + echo "${YELLOW}⚠ ${RESET}$1" +} + +print_error() { + echo "${RED}✗ ${RESET}$1" +} + +print_action() { + echo "${MAGENTA}▶ ${RESET}$1" +} + +press_enter() { + echo "" + echo -n "${DIM}Press Enter to continue...${RESET}" + read -r &2 + else + echo -n "${YELLOW}? ${RESET}${prompt}: " >&2 + fi + read -r result &2 + read -rs result &2 # New line after hidden input + echo "$result" +} + +ask_choice() { + local prompt="$1" + shift + local options=("$@") + local choice + + # Output menu to stderr so it's not captured by $() + echo "${YELLOW}? ${RESET}${prompt}" >&2 + for i in "${!options[@]}"; do + echo " ${CYAN}$((i + 1))${RESET}) ${options[$i]}" >&2 + done + echo -n " Enter choice [1-${#options[@]}]: " >&2 + # Read from /dev/tty to ensure we read from terminal + read -r choice /dev/null; then + openssl rand -base64 "$1" | tr -d '\n' + elif [[ -r /dev/urandom ]]; then + head -c "$1" /dev/urandom | base64 | tr -d '\n' | head -c "$1" + else + print_error "Cannot generate random password" + exit 1 + fi +} + +open_editor() { + local file="$1" + local editor="${EDITOR:-${VISUAL:-}}" + + # Try to find a suitable editor + if [[ -z "$editor" ]]; then + for cmd in nano vim vi; do + if command -v "$cmd" &>/dev/null; then + editor="$cmd" + break + fi + done + fi + + if [[ -z "$editor" ]]; then + print_error "No editor found. Please set \$EDITOR environment variable." + print_info "For now, please edit the file manually: ${file}" + press_enter + return 1 + fi + + print_action "Opening ${file} in ${editor}..." + "$editor" "$file" +} + +run_command() { + local description="$1" + shift + + print_action "${description}..." + echo "${DIM} \$ $*${RESET}" + + if "$@"; then + print_success "${description} completed" + return 0 + else + print_error "${description} failed" + return 1 + fi +} + +# ============================================================================== +# Step Functions +# ============================================================================== + +check_dependencies() { + print_step "1" "Checking Dependencies" + + local missing=() + local deps=(terraform terragrunt ansible ansible-playbook ansible-vault make ssh ssh-keygen) + + for dep in "${deps[@]}"; do + if command -v "$dep" &>/dev/null; then + print_success "$dep found: $(command -v "$dep")" + else + print_error "$dep not found" + missing+=("$dep") + fi + done + + echo "" + + if [[ ${#missing[@]} -gt 0 ]]; then + echo "" + print_error "Missing dependencies: ${missing[*]}" + echo "" + print_info "Please install the missing tools:" + echo "" + echo " ${BOLD}macOS (with Homebrew):${RESET}" + echo " brew install terraform terragrunt ansible make" + echo "" + echo " ${BOLD}Ubuntu/Debian:${RESET}" + echo " sudo apt update" + echo " sudo apt install make ansible" + echo " # Install terraform and terragrunt from their websites" + echo "" + echo " ${BOLD}on a Python virtualenv:${RESET} (likely can be done with UV, I am not sure how)" + echo " python3 -m venv .venv;. .venv/bin/activate" + echo " python3 -m pip install ansible" + echo " # Install terraform and terragrunt from their websites" + echo "" + exit 1 + fi + + print_success "All dependencies satisfied!" +} + +select_ssh_key() { + print_step "1b" "Select SSH Key" + + # Find all SSH keys (private keys without .pub extension that have a matching .pub) + local ssh_keys=() + local key_names=() + + for key in ~/.ssh/id_* ~/.ssh/*.pem; do + # Skip if glob didn't match + [[ -e "$key" ]] || continue + # Skip public keys + [[ "$key" == *.pub ]] && continue + # Skip known_hosts and config files + [[ "$key" == *known_hosts* ]] && continue + [[ "$key" == *config* ]] && continue + [[ "$key" == *authorized_keys* ]] && continue + # Check if it's a file (not directory) + [[ -f "$key" ]] || continue + + ssh_keys+=("$key") + # Get a friendly name (just the filename) + key_names+=("$(basename "$key")") + done + + if [[ ${#ssh_keys[@]} -eq 0 ]]; then + print_warning "No SSH keys found in ~/.ssh/" + if ask_yes_no "Generate a new SSH key?"; then + local key_name + key_name=$(ask_input "Key name" "id_ed25519") + ssh-keygen -t ed25519 -f ~/.ssh/"$key_name" -N "" + SSH_KEY=~/.ssh/"$key_name" + print_success "SSH key generated: ${SSH_KEY}" + else + print_error "SSH key is required for server access" + exit 1 + fi + elif [[ ${#ssh_keys[@]} -eq 1 ]]; then + SSH_KEY="${ssh_keys[0]}" + print_info "Using only available SSH key: ${SSH_KEY}" + else + print_info "Found ${#ssh_keys[@]} SSH keys:" + echo "" + + # Display keys with numbers + local i=1 + for key in "${ssh_keys[@]}"; do + local pub_key="${key}.pub" + local key_info="" + if [[ -f "$pub_key" ]]; then + # Extract comment/email from public key + key_info=$(awk '{print $3}' "$pub_key" 2>/dev/null || true) + fi + if [[ -n "$key_info" ]]; then + echo " ${CYAN}${i}${RESET}) $(basename "$key") ${DIM}(${key_info})${RESET}" + else + echo " ${CYAN}${i}${RESET}) $(basename "$key")" + fi + i=$((i + 1)) + done + echo "" + + local choice + echo -n " Select SSH key [1-${#ssh_keys[@]}]: " >&2 + read -r choice /dev/null | grep -q '^\$ANSIBLE_VAULT' +} + +read_vault_secret() { + local file="$1" + local key="$2" + local password="$3" + + # Decrypt and extract value using ansible-vault + echo "$password" | ansible-vault view "$file" --vault-password-file=/dev/stdin 2>/dev/null | grep "^${key}:" | sed "s/^${key}:[[:space:]]*//" | tr -d '"' | tr -d "'" +} + +configure_secrets() { + print_step "3" "Configure Secrets" + + # Check if secrets file exists and is encrypted + if [[ -f "$SECRETS_FILE" ]] && is_vault_encrypted "$SECRETS_FILE"; then + print_info "Encrypted secrets file found: ${SECRETS_FILE}" + echo "" + + local choice + choice=$(ask_choice "What would you like to do?" "Use existing secrets (enter vault password to view admin password)" "Generate new secrets (overwrites existing)") + + if [[ "$choice" == "0" ]]; then + # Read existing secrets + print_info "Enter your Ansible Vault password to read existing secrets." + echo "" + + local vault_pass + vault_pass=$(ask_secret "Vault password") + + if [[ -z "$vault_pass" ]]; then + print_error "No password entered" + return 1 + fi + + # Verify password by trying to view the file + local admin_password + admin_password=$(read_vault_secret "$SECRETS_FILE" "vault_forgejo_admin_password" "$vault_pass") + + if [[ -z "$admin_password" ]]; then + print_error "Could not decrypt secrets. Wrong password?" + if ask_yes_no "Try again?" "y"; then + configure_secrets + return $? + fi + return 1 + fi + + # Show admin password + echo "" + echo "${GREEN}${BOLD}┌─────────────────────────────────────────────────────────────────┐${RESET}" + echo "${GREEN}${BOLD}│ Secrets decrypted successfully! │${RESET}" + echo "${GREEN}${BOLD}│ │${RESET}" + echo "${GREEN}${BOLD}│ Admin Username: admin │${RESET}" + echo "${GREEN}${BOLD}│ Admin Password: ${admin_password} ${RESET}" + echo "${GREEN}${BOLD}│ │${RESET}" + echo "${GREEN}${BOLD}└─────────────────────────────────────────────────────────────────┘${RESET}" + echo "" + + print_success "Using existing secrets" + return 0 + fi + # Fall through to generate new secrets + print_warning "Existing secrets will be overwritten." + fi + + print_info "Secrets are stored encrypted with Ansible Vault." + print_info "You'll need to remember your vault password!" + echo "" + + # Copy example file + if [[ -f "$SECRETS_EXAMPLE" ]]; then + cp "$SECRETS_EXAMPLE" "$SECRETS_FILE" + else + print_error "Secrets example file not found: ${SECRETS_EXAMPLE}" + exit 1 + fi + + # Generate passwords + print_action "Generating secure passwords..." + + local db_password + local admin_password + local secret_key + local internal_token + local jwt_secret + local metrics_token + + db_password=$(generate_password 32) + admin_password=$(generate_password 24) + secret_key=$(generate_password 48) + internal_token=$(generate_password 48) + jwt_secret=$(generate_password 32) + metrics_token=$(generate_password 24) + + # Update secrets file + if [[ "$(uname)" == "Darwin" ]]; then + # macOS sed requires different syntax + sed -i '' "s|CHANGE_ME_STRONG_PASSWORD_HERE|${db_password}|g" "$SECRETS_FILE" + sed -i '' "s|CHANGE_ME_ADMIN_PASSWORD_HERE|${admin_password}|g" "$SECRETS_FILE" + sed -i '' "s|CHANGE_ME_SECRET_KEY_64_CHARS_MINIMUM_XXXXXXXXXXXXXXXXX|${secret_key}|g" "$SECRETS_FILE" + sed -i '' "s|CHANGE_ME_INTERNAL_TOKEN_XXXXXXXXXXXXXXXXXXXXXXXXX|${internal_token}|g" "$SECRETS_FILE" + sed -i '' "s|CHANGE_ME_JWT_SECRET_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|${jwt_secret}|g" "$SECRETS_FILE" + sed -i '' "s|CHANGE_ME_METRICS_TOKEN_XXXXXXXXX|${metrics_token}|g" "$SECRETS_FILE" + else + # Linux sed + sed -i "s|CHANGE_ME_STRONG_PASSWORD_HERE|${db_password}|g" "$SECRETS_FILE" + sed -i "s|CHANGE_ME_ADMIN_PASSWORD_HERE|${admin_password}|g" "$SECRETS_FILE" + sed -i "s|CHANGE_ME_SECRET_KEY_64_CHARS_MINIMUM_XXXXXXXXXXXXXXXXX|${secret_key}|g" "$SECRETS_FILE" + sed -i "s|CHANGE_ME_INTERNAL_TOKEN_XXXXXXXXXXXXXXXXXXXXXXXXX|${internal_token}|g" "$SECRETS_FILE" + sed -i "s|CHANGE_ME_JWT_SECRET_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|${jwt_secret}|g" "$SECRETS_FILE" + sed -i "s|CHANGE_ME_METRICS_TOKEN_XXXXXXXXX|${metrics_token}|g" "$SECRETS_FILE" + fi + + print_success "Passwords generated and saved" + + # Show admin password + echo "" + echo "${YELLOW}${BOLD}┌─────────────────────────────────────────────────────────────────┐${RESET}" + echo "${YELLOW}${BOLD}│ IMPORTANT: Save your admin password! │${RESET}" + echo "${YELLOW}${BOLD}│ │${RESET}" + echo "${YELLOW}${BOLD}│ Admin Username: admin │${RESET}" + echo "${YELLOW}${BOLD}│ Admin Password: ${admin_password} │${RESET}" + echo "${YELLOW}${BOLD}│ │${RESET}" + echo "${YELLOW}${BOLD}│ You can change this later in the Forgejo web interface. │${RESET}" + echo "${YELLOW}${BOLD}└─────────────────────────────────────────────────────────────────┘${RESET}" + echo "" + + # Encrypt with Ansible Vault + print_info "Now you need to encrypt the secrets file with Ansible Vault." + print_info "Choose a strong password and remember it!" + echo "" + + if ansible-vault encrypt "$SECRETS_FILE"; then + print_success "Secrets encrypted successfully" + else + print_error "Failed to encrypt secrets" + exit 1 + fi +} + +configure_cloud_credentials() { + print_step "4" "Configure Cloud Provider Credentials" + + if [[ "$PROVIDER" == "scaleway" ]]; then + print_info "You need Scaleway API credentials." + print_info "Get them from: https://console.scaleway.com/iam/api-keys" + echo "" + + if [[ -z "${SCW_ACCESS_KEY:-}" ]]; then + SCW_ACCESS_KEY=$(ask_input "Scaleway Access Key") + export SCW_ACCESS_KEY + else + print_success "SCW_ACCESS_KEY already set: ${SCW_ACCESS_KEY:0:8}..." + fi + + if [[ -z "${SCW_SECRET_KEY:-}" ]]; then + SCW_SECRET_KEY=$(ask_input "Scaleway Secret Key") + export SCW_SECRET_KEY + else + print_success "SCW_SECRET_KEY already set" + fi + + echo "" + print_info "Project ID determines where resources are created." + print_info "Find it at: https://console.scaleway.com/project/settings" + print_info " (Click on the project you want, then Settings → Project ID)" + echo "" + + if [[ -n "${SCW_DEFAULT_PROJECT_ID:-}" ]]; then + print_info "Current project ID: ${SCW_DEFAULT_PROJECT_ID}" + if ask_yes_no "Use this project?" "y"; then + print_success "Using existing project ID" + else + SCW_DEFAULT_PROJECT_ID=$(ask_input "Scaleway Project ID") + export SCW_DEFAULT_PROJECT_ID + fi + else + SCW_DEFAULT_PROJECT_ID=$(ask_input "Scaleway Project ID") + export SCW_DEFAULT_PROJECT_ID + fi + + else # hetzner + print_info "You need a Hetzner Cloud API token." + print_info "Get it from: https://console.hetzner.cloud/ → Security → API Tokens" + echo "" + + if [[ -z "${HCLOUD_TOKEN:-}" ]]; then + HCLOUD_TOKEN=$(ask_input "Hetzner API Token") + export HCLOUD_TOKEN + else + print_success "HCLOUD_TOKEN already set" + fi + fi + + print_success "Cloud credentials configured" +} + +configure_domain() { + print_step "5" "Configure Domain" + + print_info "Your Forgejo instance needs a domain name." + print_info "Example: git.yourdomain.com" + echo "" + + DOMAIN=$(ask_input "Enter your domain" "git.example.com") + + print_info "After deployment, you'll need to create DNS records:" + echo "" + echo " ${DOMAIN} IN A " + echo " ${DOMAIN} IN AAAA " + echo "" + + # Create inventory file from example if it doesn't exist + local inventory_example="${ANSIBLE_DIR}/inventory/production/hosts.yml.example" + if [[ ! -f "$INVENTORY_FILE" ]] && [[ -f "$inventory_example" ]]; then + print_action "Creating inventory file from example..." + cp "$inventory_example" "$INVENTORY_FILE" + fi + + # Update inventory file + if [[ -f "$INVENTORY_FILE" ]]; then + if [[ "$(uname)" == "Darwin" ]]; then + sed -i '' "s|forgejo_domain:.*|forgejo_domain: ${DOMAIN}|g" "$INVENTORY_FILE" + else + sed -i "s|forgejo_domain:.*|forgejo_domain: ${DOMAIN}|g" "$INVENTORY_FILE" + fi + print_success "Domain configured in inventory" + else + print_error "Could not find or create inventory file" + return 1 + fi + + export DOMAIN +} + +create_infrastructure() { + print_step "6" "Create Infrastructure" + + print_info "This will create the following resources on ${PROVIDER}:" + echo "" + if [[ "$PROVIDER" == "scaleway" ]]; then + echo " - DEV1-M compute instance" + echo " - 50GB block storage volume" + echo " - Security group" + echo " - Reserved IP address" + echo " - SSH key registration" + else + echo " - CPX21 compute instance" + echo " - Block volume" + echo " - Firewall rules" + echo " - SSH key registration" + fi + echo "" + + if ! ask_yes_no "Create infrastructure now?"; then + print_warning "Skipping infrastructure creation" + return 0 + fi + + cd "$TERRAFORM_DIR/$PROVIDER/compute" + + # Update terraform.tfvars with the SSH public key and domain + local tfvars_file="terraform.tfvars" + if [[ -n "${SSH_PUBLIC_KEY:-}" ]]; then + print_action "Configuring SSH key in Terraform..." + + # Create or update terraform.tfvars with the SSH key + if [[ -f "$tfvars_file" ]]; then + # Remove existing ssh_public_key line if present + grep -v '^ssh_public_key' "$tfvars_file" >"${tfvars_file}.tmp" || true + mv "${tfvars_file}.tmp" "$tfvars_file" + fi + + # Append SSH public key + echo "" >>"$tfvars_file" + echo "# SSH public key (added by setup wizard)" >>"$tfvars_file" + echo "ssh_public_key = \"${SSH_PUBLIC_KEY}\"" >>"$tfvars_file" + + print_success "SSH public key configured" + else + print_warning "No SSH public key available - you may need to add it manually to your cloud provider" + fi + + # Update domain in tfvars if set + if [[ -n "${DOMAIN:-}" ]]; then + if [[ -f "$tfvars_file" ]]; then + if [[ "$(uname)" == "Darwin" ]]; then + sed -i '' "s|^domain_name = .*|domain_name = \"${DOMAIN}\"|" "$tfvars_file" + else + sed -i "s|^domain_name = .*|domain_name = \"${DOMAIN}\"|" "$tfvars_file" + fi + fi + fi + + # Initialize + run_command "Initializing Terraform" terragrunt init + + # Plan + print_action "Planning infrastructure..." + terragrunt plan + + echo "" + if ! ask_yes_no "Apply this plan?"; then + print_warning "Infrastructure creation cancelled" + return 1 + fi + + # Apply + run_command "Creating infrastructure" terragrunt apply -auto-approve + + # Get outputs + echo "" + print_success "Infrastructure created!" + echo "" + + SERVER_IP=$(terragrunt output -raw server_ip 2>/dev/null || terragrunt output -raw server_ipv4 2>/dev/null || echo "") + + if [[ -n "$SERVER_IP" ]]; then + echo "${GREEN}${BOLD}┌─────────────────────────────────────────────────────────────────┐${RESET}" + echo "${GREEN}${BOLD}│ Server IP: ${SERVER_IP} ${RESET}" + echo "${GREEN}${BOLD}└─────────────────────────────────────────────────────────────────┘${RESET}" + echo "" + + # Update inventory + if [[ "$(uname)" == "Darwin" ]]; then + sed -i '' "s|ansible_host:.*|ansible_host: ${SERVER_IP}|g" "$INVENTORY_FILE" + else + sed -i "s|ansible_host:.*|ansible_host: ${SERVER_IP}|g" "$INVENTORY_FILE" + fi + print_success "Server IP updated in inventory" + + export SERVER_IP + fi + + echo "" + print_warning "IMPORTANT: Create DNS records now!" + echo "" + echo " ${DOMAIN} IN A ${SERVER_IP}" + echo "" + print_info "DNS propagation may take a few minutes." + + press_enter + + cd "$SCRIPT_DIR" +} + +wait_for_server() { + print_step "7" "Waiting for Server" + + print_info "Waiting for server to be ready..." + print_info "Using SSH key: ${SSH_KEY}" + + local max_attempts=30 + local attempt=1 + + while [[ $attempt -le $max_attempts ]]; do + echo -n " Attempt ${attempt}/${max_attempts}: " + if ssh -i "$SSH_KEY" -o ConnectTimeout=5 -o StrictHostKeyChecking=no -o BatchMode=yes "root@${SERVER_IP}" "echo 'Server ready'" 2>/dev/null; then + echo "" + print_success "Server is ready!" + return 0 + fi + echo "waiting..." + sleep 10 + ((attempt++)) + done + + print_error "Server not reachable after ${max_attempts} attempts" + print_info "You may need to wait longer or check your cloud provider console" + + if ask_yes_no "Continue anyway?"; then + return 0 + fi + return 1 +} + +deploy_base() { + print_step "8" "Deploy Base Configuration" + + print_info "This will deploy:" + echo " - System packages and configuration" + echo " - Docker" + echo " - PostgreSQL database" + echo " - Redis cache" + echo " - Forgejo application" + echo " - Caddy web server (HTTPS)" + echo " - Tailscale VPN" + echo "" + print_warning "Tailscale will be installed but NOT UFW yet!" + print_info "We'll configure UFW after you authenticate Tailscale." + echo "" + + if ! ask_yes_no "Start deployment?"; then + return 1 + fi + + # Deploy everything except UFW (run from ansible dir to pick up ansible.cfg) + cd "$ANSIBLE_DIR" + + print_action "Running Ansible deployment (without UFW)..." + ansible-playbook \ + -i "inventory/production/hosts.yml" \ + "playbooks/deploy.yml" \ + --ask-vault-pass \ + --skip-tags "ufw,firewall" + + cd "$SCRIPT_DIR" + + print_success "Base deployment completed!" +} + +configure_tailscale() { + print_step "9" "Configure Tailscale VPN" + + echo "" + echo "${YELLOW}${BOLD}╔═══════════════════════════════════════════════════════════════════════════╗${RESET}" + echo "${YELLOW}${BOLD}║ TAILSCALE AUTHENTICATION REQUIRED ║${RESET}" + echo "${YELLOW}${BOLD}╚═══════════════════════════════════════════════════════════════════════════╝${RESET}" + echo "" + print_info "Tailscale is installed but needs to be authenticated." + print_info "This connects your server to your private Tailscale network." + echo "" + print_warning "After this step, SSH will ONLY be accessible via Tailscale!" + echo "" + + echo "You have two options:" + echo "" + echo " ${BOLD}Option 1: Interactive (recommended)${RESET}" + echo " 1. SSH into the server: ssh -i ${SSH_KEY} root@${SERVER_IP}" + echo " 2. Run: sudo tailscale up --ssh" + echo " 3. Open the URL shown in your browser to authenticate" + echo "" + echo " ${BOLD}Option 2: Auth Key (for automation)${RESET}" + echo " 1. Generate a key at: https://login.tailscale.com/admin/settings/keys" + echo " 2. SSH into the server: ssh -i ${SSH_KEY} root@${SERVER_IP}" + echo " 3. Run: sudo tailscale up --authkey=tskey-auth-XXXXX" + echo "" + + if ask_yes_no "Open SSH session to configure Tailscale now?"; then + print_action "Opening SSH session..." + print_info "Run: sudo tailscale up --ssh" + print_info "Then exit the SSH session when done." + echo "" + ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "root@${SERVER_IP}" || true + fi + + echo "" + print_warning "VERIFY: Is Tailscale authenticated and connected?" + echo "" + + if ! ask_yes_no "Have you successfully authenticated Tailscale?"; then + print_error "Tailscale must be authenticated before enabling UFW!" + print_info "Without Tailscale, you will be locked out of your server." + echo "" + + if ! ask_yes_no "Try again?"; then + print_warning "Skipping UFW configuration. Your server SSH is still publicly accessible." + print_info "Run 'make deploy-tags TAGS=ufw' later after configuring Tailscale." + return 1 + fi + + configure_tailscale + return $? + fi + + print_success "Tailscale configured!" + + # Verify Tailscale connection + print_action "Verifying Tailscale connection..." + local tailscale_ip + tailscale_ip=$(ssh -i "$SSH_KEY" -o StrictHostKeyChecking=no "root@${SERVER_IP}" "tailscale ip -4" 2>/dev/null || echo "") + + if [[ -n "$tailscale_ip" ]]; then + echo "" + echo "${GREEN}${BOLD}┌─────────────────────────────────────────────────────────────────┐${RESET}" + echo "${GREEN}${BOLD}│ Tailscale IP: ${tailscale_ip} ${RESET}" + echo "${GREEN}${BOLD}│ │${RESET}" + echo "${GREEN}${BOLD}│ After UFW is enabled, use this IP for SSH: │${RESET}" + echo "${GREEN}${BOLD}│ ssh root@${tailscale_ip} ${RESET}" + echo "${GREEN}${BOLD}└─────────────────────────────────────────────────────────────────┘${RESET}" + echo "" + export TAILSCALE_IP="$tailscale_ip" + fi + + return 0 +} + +enable_firewall() { + print_step "10" "Enable UFW Firewall" + + print_warning "This will enable the UFW firewall with the following rules:" + echo "" + echo " ${GREEN}PUBLIC ACCESS:${RESET}" + echo " - Port 80 (HTTP) - Caddy/Let's Encrypt" + echo " - Port 443 (HTTPS) - Forgejo web interface" + echo "" + echo " ${YELLOW}TAILSCALE-ONLY ACCESS:${RESET}" + echo " - Port 22 (SSH) - System administration" + echo " - Port 2222 (Git SSH) - Git clone/push/pull" + echo " - All other internal services" + echo "" + print_warning "After this, public SSH will be blocked!" + print_info "You will need to use Tailscale to access SSH." + echo "" + + if ! ask_yes_no "Enable UFW firewall now?"; then + print_warning "Skipping UFW. Your SSH is still publicly accessible." + print_info "Run 'make deploy-tags TAGS=ufw' later to enable UFW." + return 0 + fi + + # Run from ansible dir to pick up ansible.cfg + cd "$ANSIBLE_DIR" + + print_action "Enabling UFW firewall..." + ansible-playbook \ + -i "inventory/production/hosts.yml" \ + "playbooks/deploy.yml" \ + --ask-vault-pass \ + --tags "ufw,firewall" + + cd "$SCRIPT_DIR" + + print_success "UFW firewall enabled!" + + # Verify we can still connect via Tailscale + if [[ -n "${TAILSCALE_IP:-}" ]]; then + print_action "Verifying Tailscale SSH access..." + if ssh -i "$SSH_KEY" -o ConnectTimeout=10 -o StrictHostKeyChecking=no "root@${TAILSCALE_IP}" "echo 'Tailscale SSH works!'" 2>/dev/null; then + print_success "Tailscale SSH access confirmed!" + else + print_warning "Could not verify Tailscale SSH. Please test manually." + fi + fi +} + +show_completion() { + print_step "✓" "Setup Complete!" + + echo "" + echo "${GREEN}${BOLD}╔═══════════════════════════════════════════════════════════════════════════╗${RESET}" + echo "${GREEN}${BOLD}║ ║${RESET}" + echo "${GREEN}${BOLD}║ 🎉 Congratulations! Your Forgejo instance is ready! ║${RESET}" + echo "${GREEN}${BOLD}║ ║${RESET}" + echo "${GREEN}${BOLD}╚═══════════════════════════════════════════════════════════════════════════╝${RESET}" + echo "" + + echo "${CYAN}${BOLD}Access Your Forgejo:${RESET}" + echo "" + echo " Web Interface: https://${DOMAIN}" + echo " Admin Username: admin" + echo " Admin Password: (the one shown earlier)" + echo "" + + echo "${CYAN}${BOLD}SSH Access (via Tailscale):${RESET}" + echo "" + if [[ -n "${TAILSCALE_IP:-}" ]]; then + echo " ssh -i ${SSH_KEY} root@${TAILSCALE_IP}" + else + echo " ssh -i ${SSH_KEY} root@" + fi + echo "" + + echo "${CYAN}${BOLD}Git Clone URLs:${RESET}" + echo "" + echo " HTTPS: https://${DOMAIN}/user/repo.git" + echo " SSH: git@:user/repo.git" + echo "" + + echo "${CYAN}${BOLD}Useful Commands:${RESET}" + echo "" + echo " make status # Check service status" + echo " make logs # View Forgejo logs" + echo " make backup # Create backup" + echo " make update # Update Forgejo" + echo "" + + echo "${CYAN}${BOLD}Documentation:${RESET}" + echo "" + echo " docs/CONFIGURATION.md - All configuration options" + echo " docs/OPERATIONS.md - Operations guide" + echo " README.md - Quick reference" + echo "" + + echo "${YELLOW}${BOLD}Next Steps:${RESET}" + echo "" + echo " 1. Wait for DNS propagation (check: dig ${DOMAIN})" + echo " 2. Access https://${DOMAIN} in your browser" + echo " 3. Log in with admin credentials" + echo " 4. Configure 2FA for the admin account" + echo " 5. Create your first repository!" + echo "" +} + +# ============================================================================== +# Main +# ============================================================================== + +show_resume_status() { + echo "" + echo "${CYAN}${BOLD}Previous session detected. Completed steps:${RESET}" + echo "" + + # Check each step individually (some use is_step_done, some use has_state) + # Steps that store "done": dependencies, secrets, cloud_credentials, infrastructure, server_ready, base_deploy, tailscale, firewall + # Steps that store values: provider, domain, server_ip + + if is_step_done "dependencies"; then + echo " ${GREEN}✓${RESET} Check Dependencies" + else + echo " ${DIM}○${RESET} Check Dependencies" + fi + + if has_state "ssh_key"; then + echo " ${GREEN}✓${RESET} SSH Key ($(basename "$(get_state ssh_key)"))" + else + echo " ${DIM}○${RESET} SSH Key" + fi + + if has_state "provider"; then + echo " ${GREEN}✓${RESET} Choose Provider ($(get_state provider))" + else + echo " ${DIM}○${RESET} Choose Provider" + fi + + if is_step_done "secrets"; then + echo " ${GREEN}✓${RESET} Configure Secrets" + else + echo " ${DIM}○${RESET} Configure Secrets" + fi + + if is_step_done "cloud_credentials"; then + echo " ${GREEN}✓${RESET} Cloud Credentials" + else + echo " ${DIM}○${RESET} Cloud Credentials" + fi + + if has_state "domain"; then + echo " ${GREEN}✓${RESET} Configure Domain ($(get_state domain))" + else + echo " ${DIM}○${RESET} Configure Domain" + fi + + if is_step_done "infrastructure"; then + echo " ${GREEN}✓${RESET} Create Infrastructure" + else + echo " ${DIM}○${RESET} Create Infrastructure" + fi + + if is_step_done "server_ready"; then + echo " ${GREEN}✓${RESET} Wait for Server" + else + echo " ${DIM}○${RESET} Wait for Server" + fi + + if is_step_done "base_deploy"; then + echo " ${GREEN}✓${RESET} Base Deployment" + else + echo " ${DIM}○${RESET} Base Deployment" + fi + + if is_step_done "tailscale"; then + echo " ${GREEN}✓${RESET} Tailscale Setup" + else + echo " ${DIM}○${RESET} Tailscale Setup" + fi + + if is_step_done "firewall"; then + echo " ${GREEN}✓${RESET} Enable Firewall" + else + echo " ${DIM}○${RESET} Enable Firewall" + fi + + # Show server IP if set + local saved_ip + saved_ip=$(get_state "server_ip") + if [[ -n "$saved_ip" ]]; then + echo "" + echo "${CYAN}${BOLD}Server IP:${RESET} ${saved_ip}" + fi + echo "" +} + +main() { + # Load previous state if exists + load_state + + print_banner + + # Check if resuming + if [[ -f "$STATE_FILE" ]]; then + show_resume_status + + echo "" + local choice + choice=$(ask_choice "What would you like to do?" "Continue from where you left off" "Start fresh (reset all progress)") + + if [[ "$choice" == "1" ]]; then + # Reset state + rm -f "$STATE_FILE" + print_info "Starting fresh..." + echo "" + else + # Restore saved values + PROVIDER=$(get_state "provider") + DOMAIN=$(get_state "domain") + SERVER_IP=$(get_state "server_ip") + TAILSCALE_IP=$(get_state "tailscale_ip") + SSH_KEY=$(get_state "ssh_key") + SSH_PUBLIC_KEY=$(get_state "ssh_public_key") + export PROVIDER DOMAIN SERVER_IP TAILSCALE_IP SSH_KEY SSH_PUBLIC_KEY + print_info "Resuming previous session..." + echo "" + fi + else + echo "This wizard will guide you through setting up your own Forgejo instance." + echo "It takes about 10-15 minutes to complete." + echo "" + + if ! ask_yes_no "Ready to begin?"; then + echo "Setup cancelled." + exit 0 + fi + fi + + # Step 1: Dependencies + if ! is_step_done "dependencies"; then + check_dependencies + save_state "dependencies" "done" + press_enter + else + print_info "Skipping dependencies check (already completed)" + fi + + # Step 1b: SSH Key selection (stores path, not "done") + if ! has_state "ssh_key"; then + select_ssh_key + save_state "ssh_key" "$SSH_KEY" + # Store public key content for terraform (base64 encoded to handle newlines/spaces) + if [[ -n "${SSH_PUBLIC_KEY:-}" ]]; then + save_state "ssh_public_key" "$SSH_PUBLIC_KEY" + fi + press_enter + else + SSH_KEY=$(get_state "ssh_key") + SSH_PUBLIC_KEY=$(get_state "ssh_public_key") + export SSH_KEY SSH_PUBLIC_KEY + print_info "Using SSH key: ${SSH_KEY}" + fi + + # Step 2: Provider (stores actual value, not "done") + if ! has_state "provider"; then + choose_provider + save_state "provider" "$PROVIDER" + press_enter + else + print_info "Skipping provider selection (using: ${PROVIDER})" + fi + + # Step 3: Secrets + if ! is_step_done "secrets"; then + configure_secrets + save_state "secrets" "done" + press_enter + else + print_info "Skipping secrets configuration (already completed)" + fi + + # Step 4: Cloud Credentials + # Skip cloud credentials entirely if infrastructure is already created + # (remaining steps are Ansible-only and don't need cloud provider access) + if is_step_done "infrastructure"; then + print_info "Skipping cloud credentials (infrastructure already exists)" + elif ! is_step_done "cloud_credentials"; then + configure_cloud_credentials + save_state "cloud_credentials" "done" + press_enter + else + print_info "Skipping cloud credentials (already configured)" + # Re-prompt for credentials since they're not persisted (security) + configure_cloud_credentials + fi + + # Step 5: Domain (stores actual value, not "done") + if ! has_state "domain"; then + configure_domain + save_state "domain" "$DOMAIN" + press_enter + else + print_info "Skipping domain configuration (using: ${DOMAIN})" + fi + + # Step 6: Infrastructure + if ! is_step_done "infrastructure"; then + create_infrastructure + if [[ -n "${SERVER_IP:-}" ]]; then + save_state "infrastructure" "done" + save_state "server_ip" "$SERVER_IP" + fi + else + print_info "Skipping infrastructure creation (already created)" + fi + + # Ensure we have server IP + if [[ -z "${SERVER_IP:-}" ]]; then + SERVER_IP=$(ask_input "Enter server IP address") + save_state "server_ip" "$SERVER_IP" + export SERVER_IP + fi + + # Step 7: Wait for Server + if ! is_step_done "server_ready"; then + wait_for_server + save_state "server_ready" "done" + press_enter + else + print_info "Skipping server wait (already verified)" + fi + + # Step 8: Base Deployment + if ! is_step_done "base_deploy"; then + deploy_base + save_state "base_deploy" "done" + press_enter + else + print_info "Skipping base deployment (already deployed)" + fi + + # Step 9: Tailscale + if ! is_step_done "tailscale"; then + if configure_tailscale; then + save_state "tailscale" "done" + if [[ -n "${TAILSCALE_IP:-}" ]]; then + save_state "tailscale_ip" "$TAILSCALE_IP" + fi + fi + else + print_info "Skipping Tailscale configuration (already configured)" + fi + + # Step 10: Firewall + if ! is_step_done "firewall"; then + if is_step_done "tailscale"; then + enable_firewall + save_state "firewall" "done" + else + print_warning "Skipping firewall - Tailscale must be configured first" + fi + else + print_info "Skipping firewall setup (already enabled)" + fi + + show_completion + + # Clean up state file on successful completion + if is_step_done "firewall" || is_step_done "tailscale"; then + if ask_yes_no "Setup complete! Remove progress tracking file?" "y"; then + rm -f "$STATE_FILE" + fi + fi +} + +# Run main function +main "$@" diff --git a/terraform/hetzner/compute/terraform.tfvars.example b/terraform/hetzner/compute/terraform.tfvars.example new file mode 100644 index 0000000..277eef5 --- /dev/null +++ b/terraform/hetzner/compute/terraform.tfvars.example @@ -0,0 +1,12 @@ +# Hetzner Forgejo Server Configuration +# Copy this file to terraform.tfvars and update with your values: +# cp terraform.tfvars.example terraform.tfvars + +# Server type (cpx21 = 4 vCPU, 8GB RAM | cpx31 = 8 vCPU, 16GB RAM) +server_type = "cpx21" + +# SSH keys to add (leave empty to use all keys in account) +# ssh_keys = ["my-key-name"] + +# Domain name for Forgejo +# domain_name = "git.example.com" diff --git a/terraform/hetzner/compute/terragrunt.hcl b/terraform/hetzner/compute/terragrunt.hcl new file mode 100644 index 0000000..61d9a6e --- /dev/null +++ b/terraform/hetzner/compute/terragrunt.hcl @@ -0,0 +1,304 @@ +# Include root configuration +include "root" { + path = find_in_parent_folders("root.hcl") +} + +# Terragrunt configuration +terraform { + source = "." +} + +# Generate the main Terraform configuration +generate "main" { + path = "main.tf" + if_exists = "overwrite" + contents = < 0 ? 0 : 1 +} + +# Network for private communication +resource "hcloud_network" "forgejo" { + name = "$${var.project_name}-$${var.environment}-network" + ip_range = "10.0.0.0/16" + labels = var.common_labels +} + +resource "hcloud_network_subnet" "forgejo" { + network_id = hcloud_network.forgejo.id + type = "cloud" + network_zone = "eu-central" + ip_range = "10.0.1.0/24" +} + +# Firewall +resource "hcloud_firewall" "forgejo" { + name = "$${var.project_name}-$${var.environment}-firewall" + labels = var.common_labels + + rule { + direction = "in" + protocol = "tcp" + port = "22" + source_ips = [ + "0.0.0.0/0", + "::/0" + ] + description = "SSH access" + } + + rule { + direction = "in" + protocol = "tcp" + port = "80" + source_ips = [ + "0.0.0.0/0", + "::/0" + ] + description = "HTTP" + } + + rule { + direction = "in" + protocol = "tcp" + port = "443" + source_ips = [ + "0.0.0.0/0", + "::/0" + ] + description = "HTTPS" + } + + rule { + direction = "in" + protocol = "tcp" + port = "2222" + source_ips = [ + "0.0.0.0/0", + "::/0" + ] + description = "SSH alternative port" + } + + rule { + direction = "in" + protocol = "icmp" + source_ips = [ + "0.0.0.0/0", + "::/0" + ] + description = "ICMP (ping)" + } +} + +# Placement Group for better availability +resource "hcloud_placement_group" "forgejo" { + name = "$${var.project_name}-$${var.environment}" + type = "spread" + labels = var.common_labels +} + +# Cloud-init configuration +data "cloudinit_config" "forgejo" { + gzip = false + base64_encode = false + + part { + content_type = "text/cloud-config" + content = yamlencode({ + package_update = true + package_upgrade = true + + packages = [ + "apt-transport-https", + "ca-certificates", + "curl", + "gnupg", + "lsb-release", + "python3", + "python3-pip", + "ufw" + ] + + write_files = [ + { + path = "/etc/sysctl.d/99-forgejo.conf" + content = <<-SYSCTL + # Forgejo optimizations + net.core.somaxconn = 1024 + net.ipv4.tcp_max_syn_backlog = 2048 + net.ipv4.ip_forward = 1 + vm.swappiness = 10 + fs.file-max = 65535 + SYSCTL + } + ] + + runcmd = [ + "sysctl -p /etc/sysctl.d/99-forgejo.conf", + "systemctl enable ssh", + "ufw --force enable", + "ufw allow 22/tcp", + "ufw allow 80/tcp", + "ufw allow 443/tcp", + "ufw allow 2222/tcp" + ] + }) + } +} + +# Server +resource "hcloud_server" "forgejo" { + name = "$${var.project_name}-$${var.environment}" + server_type = var.server_type + image = "ubuntu-24.04" + location = var.location + ssh_keys = length(var.ssh_keys) > 0 ? var.ssh_keys : data.hcloud_ssh_keys.all[0].ssh_keys[*].id + placement_group_id = hcloud_placement_group.forgejo.id + user_data = data.cloudinit_config.forgejo.rendered + + labels = var.common_labels + + public_net { + ipv4_enabled = true + ipv6_enabled = true + } + + firewall_ids = [hcloud_firewall.forgejo.id] + + # Attach to private network + network { + network_id = hcloud_network.forgejo.id + ip = "10.0.1.5" + } + + depends_on = [hcloud_network_subnet.forgejo] +} + +# Volume for data persistence +resource "hcloud_volume" "forgejo_data" { + name = "$${var.project_name}-$${var.environment}-data" + size = 50 + location = var.location + format = "ext4" + labels = var.common_labels +} + +resource "hcloud_volume_attachment" "forgejo_data" { + volume_id = hcloud_volume.forgejo_data.id + server_id = hcloud_server.forgejo.id + automount = false # We'll mount via Ansible for better control +} + +# Outputs +output "server_id" { + description = "Server ID" + value = hcloud_server.forgejo.id +} + +output "server_name" { + description = "Server name" + value = hcloud_server.forgejo.name +} + +output "server_ipv4" { + description = "Server IPv4 address" + value = hcloud_server.forgejo.ipv4_address +} + +output "server_ipv6" { + description = "Server IPv6 address" + value = hcloud_server.forgejo.ipv6_address +} + +output "server_private_ip" { + description = "Server private IP" + value = hcloud_server.forgejo.network[0].ip +} + +output "volume_id" { + description = "Data volume ID" + value = hcloud_volume.forgejo_data.id +} + +output "volume_device" { + description = "Volume device path" + value = "/dev/disk/by-id/scsi-0HC_Volume_$${hcloud_volume.forgejo_data.id}" +} + +output "network_id" { + description = "Network ID" + value = hcloud_network.forgejo.id +} + +output "ssh_command" { + description = "SSH command to connect" + value = "ssh root@$${hcloud_server.forgejo.ipv4_address}" +} + +output "dns_records" { + description = "DNS records to create" + value = var.domain_name != "" ? { + ipv4 = "$${var.domain_name} IN A $${hcloud_server.forgejo.ipv4_address}" + ipv6 = "$${var.domain_name} IN AAAA $${hcloud_server.forgejo.ipv6_address}" + } : {} +} +EOF +} + +# Generate cloudinit provider +generate "cloudinit_provider" { + path = "cloudinit.tf" + if_exists = "overwrite" + contents = < 0 ? scaleway_instance_server.forgejo.private_ips[0].address : null +} + +output "security_group_id" { + description = "Security group ID" + value = scaleway_instance_security_group.forgejo.id +} + +output "volume_id" { + description = "Data volume ID" + value = scaleway_block_volume.forgejo_data.id +} + +output "ssh_command" { + description = "SSH command to connect to server" + value = "ssh root@$${scaleway_instance_ip.forgejo.address}" +} + +output "dns_record" { + description = "DNS A record to create" + value = var.domain_name != "" ? "$${var.domain_name} IN A $${scaleway_instance_ip.forgejo.address}" : "No domain configured" +} +EOF +} + +# Dependencies +dependency "storage" { + config_path = "../storage" + skip_outputs = true + mock_outputs = { + bucket_name = "forgejo-storage" + } +} diff --git a/terraform/scaleway/root.hcl b/terraform/scaleway/root.hcl new file mode 100644 index 0000000..66d2da4 --- /dev/null +++ b/terraform/scaleway/root.hcl @@ -0,0 +1,70 @@ +# Scaleway Root Configuration +# This file contains common configuration for all Scaleway resources + +locals { + # Project configuration + project_name = "forgejo" + environment = "production" + + # Scaleway configuration + region = "fr-par" + zone = "fr-par-1" + + # Tags for resource organization + common_tags = { + Project = "forgejo" + Environment = "production" + ManagedBy = "terragrunt" + } +} + +# Generate provider configuration +generate "provider" { + path = "provider.tf" + if_exists = "overwrite_terragrunt" + contents = <