diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..2442877
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,33 @@
+# === Terraform state ===
+*.tfstate.backup
+
+# Crash logs
+crash.log
+crash.*.log
+
+# === Terraform working dir ===
+.terraform/
+.terraform.*
+
+# === Terraform lock files ===
+# Keep the lock file if you want reproducible provider versions
+# Comment out the next line if you do want to commit it
+.terraform.lock.hcl
+
+# === Local override files ===
+# These are machine/developer specific, never commit them
+override.tf
+override.tf.json
+*_override.tf
+*_override.tf.json
+
+# === Sensitive variable files ===
+# (add your own if you keep secrets in *.tfvars)
+*.tfvars
+*.tfvars.json
+*.auto.tfvars
+*.auto.tfvars.json
+# === Other noise ===
+*.bak
+*.swp
+*.tmp
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000..1b3b62b
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,299 @@
+stages:
+ - deploy
+
+deploy_watchtower:
+ stage: deploy
+ tags:
+ - shared
+ script:
+ - |
+ set -euo pipefail
+
+ echo "=== [1] Preparing deploy directory ==="
+ mkdir -p /root/docker
+ rm -rf /root/docker/watchtower
+ cp -r watchtower /root/docker/watchtower
+
+ echo "=== [2] Bringing up Watchtower with docker compose ==="
+ cd /root/docker/watchtower
+ docker compose -f watchtower.yml up -d
+
+ CONTAINER_NAME="watchtower"
+
+ echo "=== [3] Waiting a bit for container to (re)start ==="
+ sleep 5
+
+ echo "=== [4] Checking container state ==="
+ docker ps -a --filter "name=${CONTAINER_NAME}"
+ STATUS="$(docker inspect -f '{{.State.Status}}' "${CONTAINER_NAME}")" || STATUS="unknown"
+ echo "Container '${CONTAINER_NAME}' status: ${STATUS}"
+
+ if [ "${STATUS}" != "running" ]; then
+ echo "ERROR: Container '${CONTAINER_NAME}' is not running (status=${STATUS})."
+ echo "Recent logs for ${CONTAINER_NAME}:"
+ docker logs --tail=100 "${CONTAINER_NAME}" || echo "No logs found for ${CONTAINER_NAME}"
+ exit 1
+ fi
+
+ echo "Container '${CONTAINER_NAME}' is running ✅"
+
+ echo "=== [5] Waiting for HEALTHCHECK to become healthy (if defined) ==="
+ MAX_WAIT_SECONDS=120
+ SLEEP_INTERVAL=5
+ ELAPSED=0
+
+ while true; do
+ HEALTH_STATUS="$(docker inspect -f '{{ if .State.Health }}{{ .State.Health.Status }}{{ end }}' "${CONTAINER_NAME}" || true)"
+
+ if [ -z "${HEALTH_STATUS}" ]; then
+ echo "No HEALTHCHECK defined for '${CONTAINER_NAME}', skipping health verification."
+ break
+ fi
+
+ echo "Current health status for '${CONTAINER_NAME}': ${HEALTH_STATUS} (elapsed: ${ELAPSED}s)"
+
+ if [ "${HEALTH_STATUS}" = "healthy" ]; then
+ echo "Container '${CONTAINER_NAME}' health is healthy ✅"
+ break
+ fi
+
+ if [ "${HEALTH_STATUS}" = "unhealthy" ]; then
+ echo "ERROR: Container '${CONTAINER_NAME}' health is 'unhealthy'."
+ docker inspect "${CONTAINER_NAME}" | grep -A5 -B2 '"Health"' || true
+ docker logs --tail=100 "${CONTAINER_NAME}" || true
+ exit 1
+ fi
+
+ if [ "${ELAPSED}" -ge "${MAX_WAIT_SECONDS}" ]; then
+ echo "ERROR: Container '${CONTAINER_NAME}' health did not become 'healthy' within ${MAX_WAIT_SECONDS}s (last status='${HEALTH_STATUS}')."
+ docker inspect "${CONTAINER_NAME}" | grep -A5 -B2 '"Health"' || true
+ docker logs --tail=100 "${CONTAINER_NAME}" || true
+ exit 1
+ fi
+
+ sleep "${SLEEP_INTERVAL}"
+ ELAPSED=$((ELAPSED + SLEEP_INTERVAL))
+ done
+
+ echo "=== [6] Deployment completed successfully ✅ ==="
+ only:
+ - main
+deploy_jellyfin:
+ stage: deploy
+ tags:
+ - shared
+ script:
+ - |
+ set -euo pipefail
+
+ DEPLOY_DIR="/root/docker/jellyfin"
+ COMPOSE_FILE="${DEPLOY_DIR}/jellyfin.yml"
+ CONTAINER_NAME="jellyfin"
+
+ echo "=== [1] Preparing deploy directory (safe) ==="
+ # We ONLY touch /root/docker/jellyfin, never your data directories.
+ mkdir -p "${DEPLOY_DIR}"
+
+ # Copy just the compose file from the repo to the deploy dir
+ cp jellyfin/jellyfin.yml "${COMPOSE_FILE}"
+
+ echo "=== [2] Bringing up Jellyfin with docker compose ==="
+ cd "${DEPLOY_DIR}"
+ docker compose -f jellyfin.yml pull
+ docker compose -f jellyfin.yml up -d
+
+ echo "=== [3] Checking container state ==="
+ docker ps -a --filter "name=${CONTAINER_NAME}"
+
+ STATUS="$(docker inspect -f '{{.State.Status}}' "${CONTAINER_NAME}")" || STATUS="unknown"
+ echo "Container '${CONTAINER_NAME}' status: ${STATUS}"
+
+ if [ "${STATUS}" != "running" ]; then
+ echo "ERROR: Container '${CONTAINER_NAME}' is not running (status=${STATUS})."
+ echo "Recent logs for ${CONTAINER_NAME}:"
+ docker logs --tail=100 "${CONTAINER_NAME}" || echo "No logs found for ${CONTAINER_NAME}"
+ exit 1
+ fi
+
+ echo "Container '${CONTAINER_NAME}' is running ✅"
+
+ echo "=== [4] Deployment of Jellyfin completed successfully ✅ ==="
+ only:
+ - main
+deploy_plex:
+ stage: deploy
+ tags:
+ - shared
+ script:
+ - |
+ set -euo pipefail
+
+ DEPLOY_DIR="/root/docker/plex"
+ COMPOSE_FILE="${DEPLOY_DIR}/plex.yml"
+ CONTAINER_NAME="plex"
+
+ echo "=== [1] Preparing deploy directory for Plex (safe) ==="
+ # Only ever touch /root/docker/plex, never your media/config paths.
+ mkdir -p "${DEPLOY_DIR}"
+
+ # Copy just the compose file from the repo into the deploy dir
+ cp plex/plex.yml "${COMPOSE_FILE}"
+
+ echo "=== [2] Bringing up Plex with docker compose ==="
+ cd "${DEPLOY_DIR}"
+ docker compose -f plex.yml pull
+ docker compose -f plex.yml up -d
+
+ echo "=== [3] Checking Plex container state ==="
+ docker ps -a --filter "name=${CONTAINER_NAME}"
+
+ STATUS="$(docker inspect -f '{{.State.Status}}' "${CONTAINER_NAME}")" || STATUS="unknown"
+ echo "Container '${CONTAINER_NAME}' status: ${STATUS}"
+
+ if [ "${STATUS}" != "running" ]; then
+ echo "ERROR: Container '${CONTAINER_NAME}' is not running (status=${STATUS})."
+ echo "Recent logs for ${CONTAINER_NAME}:"
+ docker logs --tail=100 "${CONTAINER_NAME}" || echo "No logs found for ${CONTAINER_NAME}"
+ exit 1
+ fi
+
+ echo "Plex container '${CONTAINER_NAME}' is running ✅"
+ echo "=== [4] Plex deployment completed successfully ✅ ==="
+ only:
+ - main
+
+deploy_wg_easy:
+ stage: deploy
+ tags:
+ - shared
+ script:
+ - |
+ set -euo pipefail
+
+ DEPLOY_DIR="/root/docker/wg-easy"
+ COMPOSE_FILE="${DEPLOY_DIR}/wg-easy.yml"
+ CONTAINER_NAME="wg-easy"
+
+ echo "=== [1] Preparing deploy directory for wg-easy (safe) ==="
+ mkdir -p "${DEPLOY_DIR}"
+ cp wg-easy/wg-easy.yml "${COMPOSE_FILE}"
+
+ echo "=== [2] Bringing up wg-easy with docker compose ==="
+ cd "${DEPLOY_DIR}"
+ docker compose -f wg-easy.yml pull
+ docker compose -f wg-easy.yml up -d
+
+ echo "=== [3] Checking wg-easy container state ==="
+ docker ps -a --filter "name=${CONTAINER_NAME}"
+ STATUS="$(docker inspect -f '{{.State.Status}}' "${CONTAINER_NAME}")" || STATUS="unknown"
+ echo "Container '${CONTAINER_NAME}' status: ${STATUS}"
+
+ if [ "${STATUS}" != "running" ]; then
+ echo "ERROR: Container '${CONTAINER_NAME}' is not running (status=${STATUS})."
+ echo "Recent logs for ${CONTAINER_NAME}:"
+ docker logs --tail=100 "${CONTAINER_NAME}" || echo "No logs found for ${CONTAINER_NAME}"
+ exit 1
+ fi
+
+ echo "wg-easy container '${CONTAINER_NAME}' is running ✅"
+ echo "=== [4] wg-easy deployment completed successfully ✅ ==="
+ only:
+ - main
+deploy_adguard:
+ stage: deploy
+ tags:
+ - shared # make sure your runner has this tag
+ script:
+ - |
+ set -euo pipefail
+
+ DEPLOY_DIR="/root/docker/adguard"
+ COMPOSE_FILE="${DEPLOY_DIR}/adguard.yml"
+ CONTAINER_NAME="adguardhome"
+
+ echo "=== [1] Preparing deploy directory for AdGuard Home ==="
+ # Only manage /root/docker/adguard — we do NOT touch Docker volumes.
+ mkdir -p "${DEPLOY_DIR}"
+
+ # Copy the compose file from the repo to the deploy directory
+ cp adguard/adguard.yml "${COMPOSE_FILE}"
+
+ echo "=== [2] Running docker compose (pull + up -d) ==="
+ cd "${DEPLOY_DIR}"
+ docker compose -f adguard.yml pull
+ docker compose -f adguard.yml up -d
+
+ echo "=== [3] Checking container state ==="
+ docker ps -a --filter "name=${CONTAINER_NAME}"
+ STATUS="$(docker inspect -f '{{.State.Status}}' "${CONTAINER_NAME}")" || STATUS="unknown"
+ echo "Container '${CONTAINER_NAME}' status: ${STATUS}"
+
+ if [ "${STATUS}" != "running" ]; then
+ echo "❌ ERROR: AdGuard Home is not running (status=${STATUS})"
+ echo "Recent logs:"
+ docker logs --tail=100 "${CONTAINER_NAME}" || echo "No logs found"
+ exit 1
+ fi
+
+ echo "✅ AdGuard Home container is running."
+
+ echo "=== [4] Verifying static IP on hurricane network ==="
+ IP_ON_HURRICANE="$(docker inspect -f '{{range .NetworkSettings.Networks}}{{if eq .NetworkID (index (docker network inspect -f \"{{.Id}}\" hurricane) 0)}}{{.IPAddress}}{{end}}{{end}}' "${CONTAINER_NAME}" 2>/dev/null || true)"
+
+ # Fallback simple check if the above is too fancy:
+ if [ -z "${IP_ON_HURRICANE}" ]; then
+ IP_ON_HURRICANE="$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "${CONTAINER_NAME}" || true)"
+ fi
+
+ echo "AdGuard Home IP (as seen by Docker): ${IP_ON_HURRICANE}"
+
+ echo "=== [5] Deployment completed successfully ✅ ==="
+ only:
+ - main # run this deploy only when pushing to main
+
+deploy_portainer:
+ stage: deploy
+ tags:
+ - shared
+ script: |
+ set -euo pipefail
+
+ echo "=== [1] Preparing deploy directory for Portainer ==="
+ mkdir -p /root/docker/portainer
+ cp portainer/portainer.yml /root/docker/portainer/portainer.yml
+
+ echo "=== [2] Bringing up Portainer ==="
+ cd /root/docker/portainer
+ docker compose -f portainer.yml pull
+ docker compose -f portainer.yml up -d
+
+ echo "=== [3] Checking container status ==="
+ sleep 3
+ docker ps --filter "name=portainer"
+ only:
+ - main
+
+deploy_nextcloud:
+ stage: deploy
+ tags:
+ - shared
+ script: |
+ set -euo pipefail
+
+ echo "=== [1] Preparing nextcloud deploy directory ==="
+ mkdir -p /root/docker/nextcloud
+
+ echo "Copying compose and env files..."
+ cp nextcloud/nextcloud.yml /root/docker/nextcloud/nextcloud.yml
+ cp nextcloud/.env /root/docker/nextcloud/.env
+
+ echo "=== [2] Bringing up Nextcloud with docker compose ==="
+ cd /root/docker/nextcloud
+ docker compose -f nextcloud.yml pull
+ docker compose -f nextcloud.yml up -d
+
+ echo "=== [3] Checking Nextcloud container status ==="
+ sleep 5
+ docker ps --filter "name=nextcloud"
+ only:
+ - main
diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg
new file mode 100644
index 0000000..73a0fac
--- /dev/null
+++ b/ansible/ansible.cfg
@@ -0,0 +1,3 @@
+[defaults]
+interpreter_python = auto_silent
+host_key_checking = False
\ No newline at end of file
diff --git a/ansible/inventory.ini b/ansible/inventory.ini
new file mode 100644
index 0000000..2d3d63d
--- /dev/null
+++ b/ansible/inventory.ini
@@ -0,0 +1,12 @@
+[linux]
+general ansible_host=100.120.57.49 ansible_port=54321
+k3s1 ansible_host=100.117.166.126 ansible_port=54321
+k3s2 ansible_host=100.64.200.58 ansible_port=54321
+k3s3 ansible_host=100.83.32.18 ansible_port=54321
+loadbalancer ansible_host=100.75.102.81 ansible_port=54321
+nl ansible_host=100.92.32.17 ansible_port=54321
+pve ansible_host=100.102.23.33
+storage01 ansible_host=100.92.109.78
+ovh ansible_host=p.h-y.st
+uk ansible_host=100.122.107.18 ansible_port=54321
+us ansible_host=100.126.105.9 ansible_port=54321
diff --git a/ansible/mount.sh b/ansible/mount.sh
new file mode 100644
index 0000000..30507bf
--- /dev/null
+++ b/ansible/mount.sh
@@ -0,0 +1,4 @@
+ansible-playbook -i inventory.ini ssh.yml --vault-password-file ~/.vault_pass.txt
+ansible-playbook -i inventory.ini update.yml --vault-password-file ~/.vault_pass.txt
+ansible-playbook -i inventory.ini fail2ban.yml --vault-password-file ~/.vault_pass.txt
+ansible-playbook -i inventory.ini smb.yml --vault-password-file ~/.vault_pass.txt
\ No newline at end of file
diff --git a/ansible/ssh.yml b/ansible/ssh.yml
new file mode 100644
index 0000000..457897f
--- /dev/null
+++ b/ansible/ssh.yml
@@ -0,0 +1,108 @@
+---
+- name: Phase 1 - Bootstrap SSH Keys and Packages
+ hosts: linux
+ remote_user: root
+ vars_files:
+ - vault.yml
+ tasks:
+ - name: Ensure .ssh directory exists
+ file:
+ path: /root/.ssh
+ state: directory
+ mode: '0700'
+
+ - name: Deploy SSH Identity (Private and Public Keys)
+ copy:
+ dest: "/root/.ssh/{{ item.file }}"
+ content: "{{ item.content }}"
+ mode: "{{ item.mode }}"
+ loop:
+ - { file: 'id_ed25519', content: "{{ my_private_key }}", mode: '0400' }
+ - { file: 'id_ed25519.pub', content: "{{ my_public_key }}", mode: '0644' }
+ when: my_private_key is defined
+
+ - name: Authorize Public Key for Root
+ authorized_key:
+ user: root
+ key: "{{ my_public_key }}"
+ when: my_public_key is defined
+
+ - name: Configure Passwordless Sudo for Zeshan
+ copy:
+ dest: /etc/sudoers.d/zeshan
+ content: "zeshan ALL=(ALL) NOPASSWD: ALL"
+ mode: '0440'
+
+- name: Phase 2 - Secure SSH Port
+ hosts: linux
+ become: yes
+ vars:
+ new_ssh_port: 54321
+ tasks:
+ - name: Handle SELinux for custom SSH port (RHEL)
+ block:
+ - name: Install SELinux management tools
+ package:
+ name: policycoreutils-python-utils
+ state: present
+ - name: Allow SSH on custom port in SELinux
+ seport:
+ ports: "{{ new_ssh_port }}"
+ proto: tcp
+ setype: ssh_port_t
+ state: present
+ when: ansible_os_family == 'RedHat'
+
+ - name: Configure SSH Port in sshd_config
+ lineinfile:
+ path: /etc/ssh/sshd_config
+ regexp: '^#?Port\s+'
+ line: "Port {{ new_ssh_port }}"
+ notify: Restart SSH
+
+ - name: Handle Systemd Socket Activation (Debian/Ubuntu)
+ block:
+ - name: Check if SSH socket exists
+ stat:
+ path: /lib/systemd/system/ssh.socket
+ register: ssh_socket_file
+
+ - name: Create socket override directory
+ file:
+ path: /etc/systemd/system/ssh.socket.d
+ state: directory
+ when: ssh_socket_file.stat.exists
+
+ - name: Set Port in Systemd Socket Override
+ copy:
+ dest: /etc/systemd/system/ssh.socket.d/addresses.conf
+ content: |
+ [Socket]
+ ListenStream=
+ ListenStream={{ new_ssh_port }}
+ when: ssh_socket_file.stat.exists
+ notify:
+ - Reload Systemd
+ - Stop SSH Service
+ - Restart SSH Socket
+
+ handlers:
+ - name: Reload Systemd
+ systemd:
+ daemon_reload: yes
+
+ - name: Stop SSH Service
+ service:
+ name: ssh
+ state: stopped
+ when: ansible_os_family == 'Debian'
+
+ - name: Restart SSH Socket
+ service:
+ name: ssh.socket
+ state: restarted
+
+ - name: Restart SSH
+ service:
+ name: "{{ (ansible_os_family == 'Debian') | ternary('ssh', 'sshd') }}"
+ state: restarted
diff --git a/ansible/update.yml b/ansible/update.yml
new file mode 100644
index 0000000..1aaffe9
--- /dev/null
+++ b/ansible/update.yml
@@ -0,0 +1,65 @@
+---
+- name: Universal Linux System Maintenance
+ hosts: linux
+ remote_user: root
+ # Gather facts once at the start to determine OS family
+ gather_facts: yes
+
+ tasks:
+ # --- DEBIAN / UBUNTU / PROXMOX ---
+ - name: Debian-based Maintenance
+ when: ansible_os_family == "Debian"
+ block:
+ - name: Update apt cache and upgrade all packages
+ apt:
+ upgrade: dist
+ update_cache: yes
+ cache_valid_time: 3600
+
+ - name: Install baseline toolset (Debian)
+ apt:
+ name:
+ - htop
+ - make
+ - git
+ - curl
+ - samba
+ - fail2ban
+ - sshpass
+ - sudo
+ state: present
+
+ - name: Remove obsolete packages and kernels
+ apt:
+ autoremove: yes
+ autoclean: yes
+
+ # --- RHEL / ALMALINUX / ROCKY ---
+ - name: RedHat-based Maintenance
+ when: ansible_os_family == "RedHat"
+ block:
+ - name: Upgrade all packages (DNF)
+ dnf:
+ name: "*"
+ state: latest
+ update_cache: yes
+
+ - name: Install baseline toolset (RHEL)
+ dnf:
+ name: [htop, make, nano, git, curl, fail2ban, samba, sshpass]
+ state: present
+
+ - name: Clean DNF metadata and cache
+ command: dnf clean all
+ changed_when: false
+
+ # --- FINAL CHECK ---
+ - name: Check if reboot is required
+ stat:
+ path: /var/run/reboot-required
+ register: reboot_required_file
+
+ - name: Notify if reboot is needed
+ debug:
+ msg: "Host {{ inventory_hostname }} requires a reboot to apply updates."
+ when: reboot_required_file.stat.exists
\ No newline at end of file
diff --git a/ansible/vault.yml b/ansible/vault.yml
new file mode 100644
index 0000000..cad0439
--- /dev/null
+++ b/ansible/vault.yml
@@ -0,0 +1,50 @@
+$ANSIBLE_VAULT;1.1;AES256
+64376334353039653233386464663633646238333537623265623334633061633337353161376638
+6532373239376635333664653866343239393062316439650a353063653131363166353931333237
+66646661393663376263323565626331353137323330343664633230373732616566353231623631
+6263376364633036630a393839306239383066623436356235393263373438623630396139326536
+32636630336136646636623932383337386331616230663063366337303836633065346239616261
+32333439363664306235366366346238653735383231393530633833323131333937663065353738
+34666135656366313633373839376138396566653132353637616261343264366436326337373130
+30323435613438363432393431313666653063323333633338626265313531356164633764343837
+34363466616462626436633939303538333531336537393131306136336663636538303739343030
+63393930343530626363666364626537636231343334393132386537386537356237396634323234
+34366464313864636639623037613666326430376239386439646665333966653938613465366565
+35616462333762386532616634656534616365643030653335353132636462633666363639353738
+31636435623333656262323565383161316164363239643531616162623865666266616639366365
+36376634386535333765383366323939386133633230373539343936376239646465373266313635
+31303266353732616533663433626635383962626566396233323265316437326238326131336538
+30623365633765383138306537303266336436386631396435663366626531656230336565376630
+32303933343131346161633437363738363336336666666264653532316334633833653134383738
+32373730343833376334376439376166376666356265366565306562303539333061633061353861
+62353533326238313961613864346432313937356430373134626265366638393036373930326364
+34323630313964396165393761613764643633643738353838626238636433366134613333633565
+65346635356538386164303034323332303736353038656364373435343037336536363439393733
+39333833623033393062346261376666643236663132343265613438313639326532383132636338
+36623463383935306164656435383938376138323637653964306364313534306564363562633130
+39386236373333643165363231306132663036373232653236316230356533373338363636306639
+39313130626337353663633465643030363466356238626364363132353033366237353834376564
+65346638383538313861343163653435633734343230313737623136373832323537616438633130
+62623536343935333539646537383930633339316535343364623230353861336239373535633136
+64366231656238633233333834663831366538653837613137643163613330333538373362343733
+62373839396666366266646435336561643031393763396366646262666165616130656437396238
+64666236623534353536356562616132646561623165346265326231363963323365393336316565
+66363963346164623962393463646130323138613132383436396562613137343034303634323237
+36326364333731633233393265306462303038383762356233346266663363653034623164353236
+31656339353264316337633166383734343335303331376261626531366165326239363836316566
+36666466653230393137316234306363356433613964393065313563303062363065393232613265
+34333764336434373361326462643739363361376231623265323564653963313337616363353936
+32313934343037333836653235653761623133373635626433313935613734653336376633653933
+65353063323431646437643032383835613865343130336366646338663430346336396163396538
+30386339663930666539346666303664663836353435666164346635383237343431633730616336
+34636539343466346433373138323664653561343532383536313738633831613931383635323434
+30316136383434346437633562383934353764366537646566646239653136623163393130326538
+37393439383232363266646564623134323439353834353139303066633539363738303932623436
+36373439363961613337666532633933336566386330353534306363313436333763333465373861
+33386537346462656430373363303235663565313538353732303064363365343734626237393830
+30316362333738636237393733313234336536643338626134613065363862643962333836663639
+37343637326538363635343032353936333938666430346633323966653438636265356532313066
+37336131396138636438383163613933353130623837346561386638366562343862376266323833
+37316231346532623934303962633365656433663661333062303033656133336563356435333164
+34653735303865396330653931336362353334373935653566326166623863616461616635636231
+3239
diff --git a/client.ovpn b/client.ovpn
new file mode 100644
index 0000000..7e855e1
--- /dev/null
+++ b/client.ovpn
@@ -0,0 +1,78 @@
+client
+proto udp
+explicit-exit-notify
+remote 154.41.135.47 8080
+dev tun
+resolv-retry infinite
+nobind
+persist-key
+persist-tun
+remote-cert-tls server
+verify-x509-name server_cyni6qc4D05RIAyn name
+auth SHA256
+auth-nocache
+cipher AES-128-GCM
+tls-client
+tls-version-min 1.2
+tls-cipher TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256
+ignore-unknown-option block-outside-dns
+setenv opt block-outside-dns # Prevent Windows 10 DNS leak
+verb 3
+
+-----BEGIN CERTIFICATE-----
+MIIB1zCCAX2gAwIBAgIUcv+sBGhEfd1XbXBndZcYoaVvB4cwCgYIKoZIzj0EAwIw
+HjEcMBoGA1UEAwwTY25fcDhocDdNWGV3eFZzdWtJQTAeFw0yNTEwMDUxODIyNTJa
+Fw0zNTEwMDMxODIyNTJaMB4xHDAaBgNVBAMME2NuX3A4aHA3TVhld3hWc3VrSUEw
+WTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATBFzEmd5ULOahTH2L7cB87t/WqkmOw
+035tgz6BmPzuIeJhPfnlbSUO1PwtdcjVbAVYxIUiWUGFa+3Y7kVXz1C3o4GYMIGV
+MAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFLsU5suHhUaH2i86fkSeKlC8BZFnMFkG
+A1UdIwRSMFCAFLsU5suHhUaH2i86fkSeKlC8BZFnoSKkIDAeMRwwGgYDVQQDDBNj
+bl9wOGhwN01YZXd4VnN1a0lBghRy/6wEaER93VdtcGd1lxihpW8HhzALBgNVHQ8E
+BAMCAQYwCgYIKoZIzj0EAwIDSAAwRQIhAKTF9TUEK6qvR9eKdsm+g+jUOPgSa2oz
+8hCMVmYBFE5TAiA+RRfPqAHwzKeXXodSrw7PsACKpdPeUX29U6QRxdqvrA==
+-----END CERTIFICATE-----
+
+
+-----BEGIN CERTIFICATE-----
+MIIB2TCCAX+gAwIBAgIRAOrCPSzfYpHv2Bbgwps7LbYwCgYIKoZIzj0EAwIwHjEc
+MBoGA1UEAwwTY25fcDhocDdNWGV3eFZzdWtJQTAeFw0yNTEwMDUxODIzMDJaFw0z
+NTEwMDMxODIzMDJaMBExDzANBgNVBAMMBnJvdXRlcjBZMBMGByqGSM49AgEGCCqG
+SM49AwEHA0IABFjSYA3bpnjB0fm/o45ay71tSCqvlllj7YllWEdwI2NDqtmSA2S6
+MjA4doQ1hiKCxLuk4tj1bZHNy5a2805nmiajgaowgacwCQYDVR0TBAIwADAdBgNV
+HQ4EFgQUl2uced7PlC3MB/2OhiVPEbaNcWkwWQYDVR0jBFIwUIAUuxTmy4eFRofa
+Lzp+RJ4qULwFkWehIqQgMB4xHDAaBgNVBAMME2NuX3A4aHA3TVhld3hWc3VrSUGC
+FHL/rARoRH3dV21wZ3WXGKGlbweHMBMGA1UdJQQMMAoGCCsGAQUFBwMCMAsGA1Ud
+DwQEAwIHgDAKBggqhkjOPQQDAgNIADBFAiAycBNgtvXYK103GWt1pr11EyqrFc37
+/g+5LzRN4E+CbAIhANCS7h72bw+t1Swk7UPSYiUOJPS6ZjxETXOr0W78FHwx
+-----END CERTIFICATE-----
+
+
+-----BEGIN PRIVATE KEY-----
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgRZ9xWBze4ypq7ipO
+klOaIuJDTiiKocZT6SxVZT41LEmhRANCAARY0mAN26Z4wdH5v6OOWsu9bUgqr5ZZ
+Y+2JZVhHcCNjQ6rZkgNkujIwOHaENYYigsS7pOLY9W2RzcuWtvNOZ5om
+-----END PRIVATE KEY-----
+
+
+#
+# 2048 bit OpenVPN static key
+#
+-----BEGIN OpenVPN Static key V1-----
+6d95023c7fe6dd1793518359f8bfce71
+5de38910df988a47d4253bf3fe68629b
+71d27ac6b49aaef68a22f6d3b80a63a3
+d1d57565d78f78ae4cae0b782169facd
+9a3245acbbb4a80059c758bb066433e8
+b080ed19a1523344f0b3785a429b5f76
+b0c2051a3d90437b624c09b1959e3b73
+dddcf6b34282a5e84595b053f0d50340
+d8ac7b9fd4fecd4a45350cede6c55fe2
+22ded165b513c567adebc6181cd4fef9
+dbd5387e25b83f9b23be67e1866c22ac
+3cd5961b401dcc6d6a022248dbe927e4
+56ba8a804f01804ea8c7d48e19e96e55
+6c47f25b9affcc71a6ae6beadd287e53
+0aaa69d91c004cc5a22cf8a53bdf7b76
+f1a3de39124c9cdc90f04ba549e578e4
+-----END OpenVPN Static key V1-----
+
diff --git a/cloudflare/Dockerfile b/cloudflare/Dockerfile
new file mode 100644
index 0000000..637aed6
--- /dev/null
+++ b/cloudflare/Dockerfile
@@ -0,0 +1,18 @@
+FROM alpine:latest
+
+# Install tools
+RUN apk add --no-cache curl bash jq
+
+# Copy the script
+COPY cloudflare-ddns.sh /usr/local/bin/cloudflare-ddns.sh
+
+# --- FIX: This line removes Windows line endings if they exist ---
+RUN sed -i 's/\r$//' /usr/local/bin/cloudflare-ddns.sh
+
+# Make it executable
+RUN chmod +x /usr/local/bin/cloudflare-ddns.sh
+
+# Setup cron (Runs every 5 minutes and logs to Docker)
+RUN echo "*/5 * * * * /usr/local/bin/cloudflare-ddns.sh > /proc/1/fd/1 2>&1" > /etc/crontabs/root
+
+CMD ["crond", "-f", "-l", "2"]
\ No newline at end of file
diff --git a/cloudflare/checkip.sh b/cloudflare/checkip.sh
new file mode 100644
index 0000000..c97d7b4
--- /dev/null
+++ b/cloudflare/checkip.sh
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+LOG_FILE="/var/log/public_ip_monitor.log"
+LAST_IP_FILE="/var/log/last_ip.txt"
+MAKE_DIR="/root/hurricane/cloudflare/zones" # CHANGE THIS to your Makefile directory
+
+# Make sure log files exist
+touch "$LOG_FILE"
+touch "$LAST_IP_FILE"
+
+while true; do
+ TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
+ echo "============================================" | tee -a "$LOG_FILE"
+ echo "[$TIMESTAMP] Checking public IP..." | tee -a "$LOG_FILE"
+
+ IP=""
+ METHOD=""
+
+ # Try api.ipify.org
+ echo "[$TIMESTAMP] Trying api.ipify.org..." | tee -a "$LOG_FILE"
+ IP=$(curl -s --max-time 10 https://api.ipify.org)
+
+ if [[ -n "$IP" ]]; then
+ METHOD="api.ipify.org"
+ echo "[$TIMESTAMP] SUCCESS: Retrieved IP: $IP" | tee -a "$LOG_FILE"
+ else
+ echo "[$TIMESTAMP] FAILED: api.ipify.org did not return an IP." | tee -a "$LOG_FILE"
+
+ echo "[$TIMESTAMP] Trying ifconfig.me..." | tee -a "$LOG_FILE"
+ IP=$(curl -s --max-time 10 http://ifconfig.io)
+
+ if [[ -n "$IP" ]]; then
+ METHOD="ifconfig.me"
+ echo "[$TIMESTAMP] SUCCESS: Retrieved IP: $IP" | tee -a "$LOG_FILE"
+ else
+ echo "[$TIMESTAMP] FAILED: ifconfig.me did not return an IP." | tee -a "$LOG_FILE"
+
+ echo "[$TIMESTAMP] Trying dig opendns..." | tee -a "$LOG_FILE"
+ IP=$(dig +short myip.opendns.com @resolver1.opendns.com)
+
+ if [[ -n "$IP" ]]; then
+ METHOD="dig opendns"
+ echo "[$TIMESTAMP] SUCCESS: Retrieved IP: $IP" | tee -a "$LOG_FILE"
+ else
+ IP="FAILED TO RESOLVE"
+ METHOD="All methods failed"
+ echo "[$TIMESTAMP] ERROR: All methods failed to retrieve IP." | tee -a "$LOG_FILE"
+ fi
+ fi
+ fi
+
+ if [[ "$IP" == "FAILED TO RESOLVE" ]]; then
+ echo "[$TIMESTAMP] ERROR: Could not determine public IP." | tee -a "$LOG_FILE"
+ else
+ # Read previous IP
+ LAST_IP=$(cat "$LAST_IP_FILE")
+
+ if [[ "$IP" != "$LAST_IP" ]]; then
+ echo "[$TIMESTAMP] Detected IP change: $LAST_IP --> $IP" | tee -a "$LOG_FILE"
+
+ echo "[$TIMESTAMP] Running 'make apply' in $MAKE_DIR..." | tee -a "$LOG_FILE"
+ cd "$MAKE_DIR"
+ make apply >> "$LOG_FILE" 2>&1
+
+ echo "$IP" > "$LAST_IP_FILE"
+ echo "[$TIMESTAMP] 'make apply' completed." | tee -a "$LOG_FILE"
+ else
+ echo "[$TIMESTAMP] No change detected. No action taken." | tee -a "$LOG_FILE"
+ fi
+ fi
+
+ echo "[$TIMESTAMP] Sleeping 5 minutes..." | tee -a "$LOG_FILE"
+ sleep 300
+done
+
+
diff --git a/cloudflare/cloudflare-ddns.sh b/cloudflare/cloudflare-ddns.sh
new file mode 100644
index 0000000..3bc4e34
--- /dev/null
+++ b/cloudflare/cloudflare-ddns.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+echo "[$(date)] --- DDNS Check Started ---"
+
+# 1. Validate variables are not empty
+if [ -z "$CF_API_TOKEN" ] || [ -z "$CF_ZONE_ID" ] || [ -z "$CF_RECORD_NAME" ]; then
+ echo "ERROR: One or more environment variables (TOKEN, ZONE_ID, RECORD_NAME) are missing!"
+ exit 1
+fi
+
+# 2. Get current public IP (Trying two services for reliability)
+NEW_IP=$(curl -s https://api.ipify.org || curl -s https://ifconfig.me/ip)
+
+if [[ ! $NEW_IP =~ ^[0-9]{1,3}(\.[0-9]{1,3}){3}$ ]]; then
+ echo "ERROR: Could not get a valid Public IP."
+ exit 1
+fi
+
+# 3. Get the current Record info from Cloudflare
+RECORD_DATA=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$CF_ZONE_ID/dns_records?name=$CF_RECORD_NAME&type=A" \
+ -H "Authorization: Bearer $CF_API_TOKEN" \
+ -H "Content-Type: application/json")
+
+# Extract Record ID and Current IP from the JSON response
+RECORD_ID=$(echo "$RECORD_DATA" | jq -r '.result[0].id // empty')
+OLD_IP=$(echo "$RECORD_DATA" | jq -r '.result[0].content // empty')
+
+if [ -z "$RECORD_ID" ]; then
+ echo "ERROR: Could not find DNS record for $CF_RECORD_NAME. Check your Zone ID and Name."
+ exit 1
+fi
+
+# 4. Compare and Update
+if [ "$NEW_IP" = "$OLD_IP" ]; then
+ echo "IP is still $OLD_IP. No update needed."
+else
+ echo "IP changed from $OLD_IP to $NEW_IP. Updating Cloudflare..."
+
+ UPDATE_RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$CF_ZONE_ID/dns_records/$RECORD_ID" \
+ -H "Authorization: Bearer $CF_API_TOKEN" \
+ -H "Content-Type: application/json" \
+ --data "{\"type\":\"A\",\"name\":\"$CF_RECORD_NAME\",\"content\":\"$NEW_IP\",\"ttl\":120,\"proxied\":false}")
+
+ SUCCESS=$(echo "$UPDATE_RESPONSE" | jq -r '.success')
+
+ if [ "$SUCCESS" = "true" ]; then
+ echo "SUCCESS: Cloudflare updated to $NEW_IP"
+ else
+ echo "FAILURE: Update failed. Response: $UPDATE_RESPONSE"
+ fi
+fi
\ No newline at end of file
diff --git a/cloudflare/docker-compose.yml b/cloudflare/docker-compose.yml
new file mode 100644
index 0000000..82d67f6
--- /dev/null
+++ b/cloudflare/docker-compose.yml
@@ -0,0 +1,9 @@
+services:
+ cloudflare-updater:
+ build: .
+ container_name: cloudflare-ddns
+ restart: always
+ environment:
+ - CF_API_TOKEN=tDRW0bR8oiRI3xLYAOIGT_FVqIejif7hqk93W2Sc
+ - CF_ZONE_ID=0f670677e7c36e9fe8f8e6a1d1c72cbf
+ - CF_RECORD_NAME=home.ztariq.com
\ No newline at end of file
diff --git a/cloudflare/terraform.sh b/cloudflare/terraform.sh
new file mode 100644
index 0000000..e73fbf2
--- /dev/null
+++ b/cloudflare/terraform.sh
@@ -0,0 +1,27 @@
+docker run --rm -it -v "$PWD":/app -w /app hashicorp/terraform:latest init
+export AWS_ACCESS_KEY_ID="696EwxMMRUABP"
+export AWS_SECRET_ACCESS_KEY="Ow5uqEka8Uzk0ea4Ag4wPacO4tiz5MsQV3JF4GuK"
+export AWS_DEFAULT_REGION="eus3"
+export AWS_EC2_METADATA_DISABLED=true
+aws --endpoint-url https://YOUR-S3-ENDPOINT s3 ls s3://terraform
+
+
+docker run --rm -it \
+ -v "$PWD":/app -w /app \
+ -e AWS_ACCESS_KEY_ID \
+ -e AWS_SECRET_ACCESS_KEY \
+ -e AWS_DEFAULT_REGION=us-east-1 \
+ hashicorp/terraform:latest apply
+
+docker run -d \
+ --name syncthing \
+ -e PUID=1000 \
+ -e PGID=1000 \
+ -e TZ=Europe/London \
+ -p 8384:8384 \ # Web UI
+ -p 22000:22000/tcp \ # Sync (TCP)
+ -p 22000:22000/udp \ # QUIC (UDP)
+ -p 21027:21027/udp \ # Local discovery
+ -v ./sync/config:/config \
+ -v ./sync/data:/data \
+ lscr.io/linuxserver/syncthing:latest
diff --git a/cloudflare/zones/azuredevops/backend.tf b/cloudflare/zones/azuredevops/backend.tf
new file mode 100644
index 0000000..e69de29
diff --git a/cloudflare/zones/azuredevops/main.tf b/cloudflare/zones/azuredevops/main.tf
new file mode 100644
index 0000000..b11622c
--- /dev/null
+++ b/cloudflare/zones/azuredevops/main.tf
@@ -0,0 +1,41 @@
+terraform {
+ required_providers {
+ cloudflare = {
+ source = "cloudflare/cloudflare"
+ version = "~> 5"
+ }
+ }
+}
+
+provider "cloudflare" {
+ api_token = var.cloudflare_apitoken
+}
+locals {
+ azure_records = {
+ root_a = { name = "@", type = "A", content = "185.139.7.37", ttl = 1, proxied = true }
+ root_uk_aaaa = { name = "@", type = "AAAA", content = "2a12:ab46:5344:fd::a", ttl = 1, proxied = true }
+
+ autodiscover = { name = "autodiscover", type = "CNAME", content = "eu1.workspace.org.", ttl = 360, proxied = false }
+ mail = { name = "mail", type = "CNAME", content = "eu1.workspace.org.", ttl = 360, proxied = false }
+ mail_mx = { name = "@", type = "MX", content = "eu1.workspace.org.", priority = 10, proxied = false }
+
+ spf_txt = { name = "@", content = "v=spf1 include:_spf.workspace.org -all", type = "TXT", ttl = 3600 }
+ verify_txt = { name = "workspace-verification", content = "44856072-5cde-458d-86c9-c8f86c0ab7bd", type = "TXT", ttl = 360 }
+ dkim_txt = { name = "cf8DDF69382578883._domainKey", content = "v=DKIM1;k=rsa;h=sha256;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr/Mu/P1bfiMIGkHNnvhLB1oVcAaSOg4QoKTCF9N6F/eVV7JCoERTSSHiMyS74V/xq0i3kUJYjspFgrXKicVaEl6jHmRJ4jSyb2b52frWzLakW1SB9LJwXZ/n0PDm90iSPToQOEvQTSl+pg9B9RWfhqr3Tv5hz9YvsjQP1tn7yNwJSbyhU944PWZimu0ryqwAQyLGNP+CsIeMTinwe0B8Rdtc52TusInwhcMddL9XgGYi/IsWsuri85R5yvzIOKk/sklfuDHOSQoCap7RW+Lm22B/DzC0spdjV42n0k4tGtv6Rz0bYT/2DpcqRVIQd9EAcTeUFq3qOYZCHsN0Q+iS2QIDAQAB", type = "TXT", ttl = 3600 }
+ dmarc_txt = { name = "_dmarc", content = "v=DMARC1; p=quarantine; rua=mailto:postmaster@azuredevops.co.uk; ruf=mailto:postmaster@azuredevops.co.uk; fo=1; adkim=s; aspf=s", type = "TXT", ttl = 3600 }
+ }
+}
+
+
+resource "cloudflare_dns_record" "this" {
+ for_each = local.azure_records
+
+ zone_id = var.zone_id
+ name = each.value.name
+ content = each.value.content
+ type = each.value.type
+ ttl = each.value.ttl
+
+ proxied = lookup(each.value, "proxied", false)
+ priority = lookup(each.value, "priority", null)
+}
\ No newline at end of file
diff --git a/cloudflare/zones/azuredevops/terraform.tfstate b/cloudflare/zones/azuredevops/terraform.tfstate
new file mode 100644
index 0000000..b283d5d
--- /dev/null
+++ b/cloudflare/zones/azuredevops/terraform.tfstate
@@ -0,0 +1,258 @@
+{
+ "version": 4,
+ "terraform_version": "1.13.3",
+ "serial": 59,
+ "lineage": "87d4a795-db19-508c-0f53-1e6bdd9b5d93",
+ "outputs": {},
+ "resources": [
+ {
+ "mode": "managed",
+ "type": "cloudflare_dns_record",
+ "name": "this",
+ "provider": "provider[\"registry.terraform.io/cloudflare/cloudflare\"]",
+ "instances": [
+ {
+ "index_key": "autodiscover",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "eu1.workspace.org.",
+ "created_on": "2025-10-03T10:35:51Z",
+ "data": null,
+ "id": "2c03ba416645db773ec3b0bdc7514d1e",
+ "meta": "{}",
+ "modified_on": "2025-10-04T13:37:12Z",
+ "name": "autodiscover.azuredevops.co.uk",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": false,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 360,
+ "type": "CNAME",
+ "zone_id": "d2697ef5d69f322186bdbc812fdad150"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "dkim_txt",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "v=DKIM1;k=rsa;h=sha256;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr/Mu/P1bfiMIGkHNnvhLB1oVcAaSOg4QoKTCF9N6F/eVV7JCoERTSSHiMyS74V/xq0i3kUJYjspFgrXKicVaEl6jHmRJ4jSyb2b52frWzLakW1SB9LJwXZ/n0PDm90iSPToQOEvQTSl+pg9B9RWfhqr3Tv5hz9YvsjQP1tn7yNwJSbyhU944PWZimu0ryqwAQyLGNP+CsIeMTinwe0B8Rdtc52TusInwhcMddL9XgGYi/IsWsuri85R5yvzIOKk/sklfuDHOSQoCap7RW+Lm22B/DzC0spdjV42n0k4tGtv6Rz0bYT/2DpcqRVIQd9EAcTeUFq3qOYZCHsN0Q+iS2QIDAQAB",
+ "created_on": "2025-10-03T10:35:51Z",
+ "data": null,
+ "id": "d5bf0503497e76584ffbe03173f8d644",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:35:51Z",
+ "name": "cf8DDF69382578883._domainKey",
+ "priority": null,
+ "proxiable": false,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 3600,
+ "type": "TXT",
+ "zone_id": "d2697ef5d69f322186bdbc812fdad150"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "dmarc_txt",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "v=DMARC1; p=quarantine; rua=mailto:postmaster@azuredevops.co.uk; ruf=mailto:postmaster@azuredevops.co.uk; fo=1; adkim=s; aspf=s",
+ "created_on": "2025-10-03T10:35:50Z",
+ "data": null,
+ "id": "50d45619b699b9482803d7f4a624127a",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:35:50Z",
+ "name": "_dmarc.azuredevops.co.uk",
+ "priority": null,
+ "proxiable": false,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 3600,
+ "type": "TXT",
+ "zone_id": "d2697ef5d69f322186bdbc812fdad150"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "mail",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "eu1.workspace.org.",
+ "created_on": "2025-10-03T10:35:51Z",
+ "data": null,
+ "id": "9f0db2a3b0ad84d1037580611c84a348",
+ "meta": "{}",
+ "modified_on": "2025-10-04T13:37:12Z",
+ "name": "mail.azuredevops.co.uk",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": false,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 360,
+ "type": "CNAME",
+ "zone_id": "d2697ef5d69f322186bdbc812fdad150"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "root_a",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "185.139.7.37",
+ "created_on": "2025-10-03T10:35:51Z",
+ "data": null,
+ "id": "9868e7bf3f8cc58583eda37a9f45434f",
+ "meta": "{}",
+ "modified_on": "2025-10-04T13:37:12Z",
+ "name": "azuredevops.co.uk",
+ "priority": null,
+ "proxiable": true,
+ "proxied": true,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "d2697ef5d69f322186bdbc812fdad150"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "root_uk_aaaa",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "2a12:ab46:5344:fd::a",
+ "created_on": "2025-10-04T13:33:21Z",
+ "data": null,
+ "id": "a9db6975674cf2e19cc3e99cf79a2904",
+ "meta": "{}",
+ "modified_on": "2025-10-04T13:37:12Z",
+ "name": "azuredevops.co.uk",
+ "priority": null,
+ "proxiable": true,
+ "proxied": true,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "AAAA",
+ "zone_id": "d2697ef5d69f322186bdbc812fdad150"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "spf_txt",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "v=spf1 include:_spf.workspace.org -all",
+ "created_on": "2025-10-03T10:35:50Z",
+ "data": null,
+ "id": "7fef7251bb88c2b501343951705cdb3b",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:35:50Z",
+ "name": "azuredevops.co.uk",
+ "priority": null,
+ "proxiable": false,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 3600,
+ "type": "TXT",
+ "zone_id": "d2697ef5d69f322186bdbc812fdad150"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "verify_txt",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "44856072-5cde-458d-86c9-c8f86c0ab7bd",
+ "created_on": "2025-10-03T10:35:50Z",
+ "data": null,
+ "id": "43850a81936261408d3cd135c064c199",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:35:50Z",
+ "name": "workspace-verification.azuredevops.co.uk",
+ "priority": null,
+ "proxiable": false,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 360,
+ "type": "TXT",
+ "zone_id": "d2697ef5d69f322186bdbc812fdad150"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ }
+ ]
+ }
+ ],
+ "check_results": null
+}
diff --git a/cloudflare/zones/azuredevops/variables.tf b/cloudflare/zones/azuredevops/variables.tf
new file mode 100644
index 0000000..d8be6af
--- /dev/null
+++ b/cloudflare/zones/azuredevops/variables.tf
@@ -0,0 +1,15 @@
+variable "cloudflare_apitoken" {
+ description = "Cloudflare API token"
+ type = string
+ sensitive = true
+}
+
+variable "zone_id" {
+ description = "Cloudflare zone ID"
+ type = string
+}
+
+variable "current_ip" {
+ description = "Current public IP address"
+ type = string
+}
\ No newline at end of file
diff --git a/cloudflare/zones/dreamartdecor/backend.tf b/cloudflare/zones/dreamartdecor/backend.tf
new file mode 100644
index 0000000..4c16d8c
--- /dev/null
+++ b/cloudflare/zones/dreamartdecor/backend.tf
@@ -0,0 +1,6 @@
+terraform {
+ backend "pg" {
+ conn_str = "postgres://zeshan:Shan33779488@100.115.152.20:5432/terraform?sslmode=disable"
+ schema_name = "dreamartdecor-state"
+ }
+}
diff --git a/cloudflare/zones/dreamartdecor/main.tf b/cloudflare/zones/dreamartdecor/main.tf
new file mode 100644
index 0000000..7f3581d
--- /dev/null
+++ b/cloudflare/zones/dreamartdecor/main.tf
@@ -0,0 +1,78 @@
+terraform {
+ required_providers {
+ cloudflare = {
+ source = "cloudflare/cloudflare"
+ version = "5.8.2"
+ }
+ }
+}
+
+provider "cloudflare" {
+ api_token = var.cloudflare_apitoken
+}
+locals {
+ dream_records = {
+ dream_mail = {
+ name = "mail"
+ content = "168.119.13.219"
+ type = "A"
+ ttl = 86400
+ }
+ dream_mx = {
+ name = "@"
+ content = "mail.dreamartdecor.com"
+ type = "MX"
+ ttl = 86400
+ priority = 10
+ }
+ dream_autoconfig = {
+ name = "autoconfig"
+ content = "mail.dreamartdecor.com"
+ type = "CNAME"
+ ttl = 86400
+ }
+ dream_mail_aaaa = {
+ name = "mail"
+ content = "2a01:4f8:242:4460::2"
+ type = "AAAA"
+ ttl = 86400
+ }
+ dream_txt_spf = {
+ name = "@"
+ content = "v=spf1 a mx ip4:168.119.13.219 ip6:2a01:4f8:242:4460::2 ~all"
+ type = "TXT"
+ ttl = 86400
+ }
+ dream_txt_dkim = {
+ name = "x._domainkey"
+ content = "v=DKIM1; k=rsa; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1GW9inpeYtcxMWY3JjUnVFzsPgKCZOfOCETVvk5wWOYZr9LJGz0YnJu3xGIZeJiFWDOgGV/xorzlcAzDqumh58cYPkDIzYVgbOp8vw1qS+a3iKMtRM99kyadEmUDyKjHk11HiCADNaEAgCD1vaKlQzRGAmdP15XhFC7xprSPQPAi6z/l2Iy3wsLdMpYR9P+tiSpS0msI86PBj4Kj5JRuzyHMw4YCqLRKMOIXTKO/zBWOAJOc/eKbjMyTT/iUJe9YE5yuzUHSZNT57aTHIGGadhFhMrkCNVFMyuCGZFt7fCF+Xzvu0iljYK/Uw4Zru73fTaUtq8SMcnvLjj7lm0fpvwIDAQAB"
+ type = "TXT"
+ ttl = 86400
+ }
+ dream_dmarc = {
+ name = "_dmarc"
+ content = "v=DMARC1; p=none; rua=mailto:4a937e10a8e144c89cb11f1272c159c0@dmarc-reports.cloudflare.net"
+ type = "TXT"
+ ttl = 86400
+ }
+ dream_www = {
+ name = "www"
+ content = "dreamartdecor.com"
+ type = "CNAME"
+ ttl = 1
+ proxied = true
+ }
+ }
+}
+resource "cloudflare_dns_record" "this" {
+ for_each = local.dream_records
+
+ zone_id = var.zone_id
+ name = each.value.name
+ content = each.value.content
+ type = each.value.type
+ ttl = each.value.ttl
+
+ proxied = lookup(each.value, "proxied", false)
+ priority = lookup(each.value, "priority", null)
+}
\ No newline at end of file
diff --git a/cloudflare/zones/dreamartdecor/variables.tf b/cloudflare/zones/dreamartdecor/variables.tf
new file mode 100644
index 0000000..d8be6af
--- /dev/null
+++ b/cloudflare/zones/dreamartdecor/variables.tf
@@ -0,0 +1,15 @@
+variable "cloudflare_apitoken" {
+ description = "Cloudflare API token"
+ type = string
+ sensitive = true
+}
+
+variable "zone_id" {
+ description = "Cloudflare zone ID"
+ type = string
+}
+
+variable "current_ip" {
+ description = "Current public IP address"
+ type = string
+}
\ No newline at end of file
diff --git a/cloudflare/zones/ztariq/backend.tf b/cloudflare/zones/ztariq/backend.tf
new file mode 100644
index 0000000..e69de29
diff --git a/cloudflare/zones/ztariq/main.tf b/cloudflare/zones/ztariq/main.tf
new file mode 100644
index 0000000..a5f9596
--- /dev/null
+++ b/cloudflare/zones/ztariq/main.tf
@@ -0,0 +1,83 @@
+terraform {
+ required_providers {
+ cloudflare = {
+ source = "cloudflare/cloudflare"
+ version = "~> 5"
+ }
+ }
+}
+
+provider "cloudflare" {
+ api_token = var.cloudflare_apitoken
+}
+
+locals {
+ ztariq_records = {
+ # --- A Records ---
+ beszel = { name = "beszel", type = "A", content = "198.23.169.195", ttl = 1, proxied = true }
+ ca = { name = "ca", type = "A", content = "154.12.117.17", ttl = 1, proxied = false }
+ nc = { name = "nc", type = "A", content = "154.12.117.17", ttl = 1, proxied = false }
+ nl = { name = "nl", type = "A", content = "62.84.172.70", ttl = 1, proxied = false }
+ reg = { name = "reg", type = "A", content = "154.12.117.17", ttl = 1, proxied = false }
+ tea = { name = "tea", type = "A", content = "198.23.169.195", ttl = 1, proxied = false }
+ uk = { name = "uk", type = "A", content = "185.139.7.37", ttl = 1, proxied = false }
+ uk2 = { name = "uk2", type = "A", content = "154.41.135.47", ttl = 1, proxied = false }
+ uptime = { name = "uptime", type = "A", content = "198.23.169.195", ttl = 1, proxied = true }
+ us = { name = "us", type = "A", content = "198.23.169.195", ttl = 1, proxied = false }
+ root_a = { name = "@", type = "A", content = "185.139.7.37", ttl = 1, proxied = true }
+ # --- AAAA Records ---
+ nl_aaaa = { name = "nl", type = "AAAA", content = "2a12:bec4:1821:f0::a", ttl = 1, proxied = false }
+ root_uk_aaaa = { name = "@", type = "AAAA", content = "2a12:ab46:5344:fd::a", ttl = 1, proxied = true }
+ root_uk_uk = { name = "uk", type = "AAAA", content = "2a12:ab46:5344:fd::a", ttl = 1, proxied = false }
+
+ # --- CNAME Records ---
+ autodiscover = { name = "autodiscover", type = "CNAME", content = "eu1.workspace.org.", ttl = 360, proxied = false }
+ mail = { name = "mail", type = "CNAME", content = "eu1.workspace.org.", ttl = 360, proxied = false }
+
+ # --- MX Records ---
+ mx_root = { name = "@", type = "MX", content = "eu1.workspace.org.", priority = 10, ttl = 360 }
+
+ # --- TXT Records ---
+ dmarc = {
+ name = "_dmarc"
+ type = "TXT"
+ content = "v=DMARC1; p=quarantine; rua=mailto:postmaster@ztariq.com; ruf=mailto:postmaster@ztariq.com; fo=1; adkim=s; aspf=s"
+ ttl = 3600
+ }
+ dkim = {
+ name = "nd8ddf6995beebee4._domainkey"
+ type = "TXT"
+ content = "v=DKIM1; k=rsa; h=sha256; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoi3yX1W5V6a9QbEXo00k9JCZ8Vew5rQEanHLIY0cOxCauAIZZIrhQsexZ0j45EFVtfMrBHeddUtolVSSDHvvJg49HzJqWsKOsN061uBgmdN69JEtzme04pRmz/7H+3Y0QDUSYDd+ffYzWaouplFqGuhYkQ5QG2J1JzofcetuAkQICIgWStcOO+av5WoyTdxfqsY64d/XFP4PZJJHX0XA1P2YaSuyNF5c7nv/+a9A6F5+OrgZhFNNWjUurkKKhFzhbR82BUPTXVuG3EI5wSQcIYjhXgINagsmvVyPL1XP584qtnq0ScGysSkh0T3Vhg/Kob9eHX1du7mZj7G0z3PHmwIDAQAB"
+ ttl = 360
+ }
+ workspace_verification = {
+ name = "workspace-verification"
+ type = "TXT"
+ content = "f23716dd-2ad6-4dd4-8867-112e3c4c318d"
+ ttl = 360
+ }
+ spf = {
+ name = "@"
+ type = "TXT"
+ content = "v=spf1 include:_spf.workspace.org -all"
+ ttl = 360
+ }
+ }
+}
+
+resource "cloudflare_dns_record" "ztariq" {
+ for_each = local.ztariq_records
+
+ zone_id = var.zone_id
+ name = each.value.name
+ type = each.value.type
+ content = each.value.content
+ ttl = each.value.ttl
+
+ proxied = lookup(each.value, "proxied", null)
+ priority = lookup(each.value, "priority", null)
+
+ lifecycle {
+ prevent_destroy = false
+ }
+}
diff --git a/cloudflare/zones/ztariq/terraform.tfstate b/cloudflare/zones/ztariq/terraform.tfstate
new file mode 100644
index 0000000..c37c323
--- /dev/null
+++ b/cloudflare/zones/ztariq/terraform.tfstate
@@ -0,0 +1,648 @@
+{
+ "version": 4,
+ "terraform_version": "1.13.3",
+ "serial": 67,
+ "lineage": "86dbab99-bb75-e967-6f01-8134ccc693e6",
+ "outputs": {},
+ "resources": [
+ {
+ "mode": "managed",
+ "type": "cloudflare_dns_record",
+ "name": "ztariq",
+ "provider": "provider[\"registry.terraform.io/cloudflare/cloudflare\"]",
+ "instances": [
+ {
+ "index_key": "autodiscover",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "eu1.workspace.org.",
+ "created_on": "2025-10-03T10:41:38Z",
+ "data": null,
+ "id": "1da92323f9f6e5a00e02df0edac16554",
+ "meta": "{}",
+ "modified_on": "2025-10-09T08:38:58Z",
+ "name": "autodiscover.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": false,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 360,
+ "type": "CNAME",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "beszel",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "198.23.169.195",
+ "created_on": "2025-10-03T10:41:38Z",
+ "data": null,
+ "id": "e1824f42449cb3fed3024633819bd345",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:38Z",
+ "name": "beszel.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": true,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "ca",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "154.12.117.17",
+ "created_on": "2025-10-03T10:41:37Z",
+ "data": null,
+ "id": "8eedfb649e973cacfcb117155ccbca61",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:37Z",
+ "name": "ca.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "dkim",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "v=DKIM1; k=rsa; h=sha256; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoi3yX1W5V6a9QbEXo00k9JCZ8Vew5rQEanHLIY0cOxCauAIZZIrhQsexZ0j45EFVtfMrBHeddUtolVSSDHvvJg49HzJqWsKOsN061uBgmdN69JEtzme04pRmz/7H+3Y0QDUSYDd+ffYzWaouplFqGuhYkQ5QG2J1JzofcetuAkQICIgWStcOO+av5WoyTdxfqsY64d/XFP4PZJJHX0XA1P2YaSuyNF5c7nv/+a9A6F5+OrgZhFNNWjUurkKKhFzhbR82BUPTXVuG3EI5wSQcIYjhXgINagsmvVyPL1XP584qtnq0ScGysSkh0T3Vhg/Kob9eHX1du7mZj7G0z3PHmwIDAQAB",
+ "created_on": "2025-10-03T10:41:38Z",
+ "data": null,
+ "id": "e7d739b620e7de9ddf4acc1d35d9104e",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:38Z",
+ "name": "nd8ddf6995beebee4._domainkey.ztariq.com",
+ "priority": null,
+ "proxiable": false,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 360,
+ "type": "TXT",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "dmarc",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "v=DMARC1; p=quarantine; rua=mailto:postmaster@ztariq.com; ruf=mailto:postmaster@ztariq.com; fo=1; adkim=s; aspf=s",
+ "created_on": "2025-10-03T10:41:38Z",
+ "data": null,
+ "id": "d687c8ca6f4cd2057d1607c77797b8dd",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:38Z",
+ "name": "_dmarc.ztariq.com",
+ "priority": null,
+ "proxiable": false,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 3600,
+ "type": "TXT",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "mail",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "eu1.workspace.org.",
+ "created_on": "2025-10-03T10:41:38Z",
+ "data": null,
+ "id": "8adb370489931da4b8726c8142b468b2",
+ "meta": "{}",
+ "modified_on": "2025-10-09T08:38:58Z",
+ "name": "mail.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": false,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 360,
+ "type": "CNAME",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "mx_root",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "eu1.workspace.org.",
+ "created_on": "2025-10-03T10:41:38Z",
+ "data": null,
+ "id": "1853b26b8d52a41c5ef0f4212c41696f",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:38Z",
+ "name": "ztariq.com",
+ "priority": 10,
+ "proxiable": false,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 360,
+ "type": "MX",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "nc",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "154.12.117.17",
+ "created_on": "2025-10-03T10:41:37Z",
+ "data": null,
+ "id": "9188fe0d253b8094f19320d4afff3d9a",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:37Z",
+ "name": "nc.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "nl",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "62.84.172.70",
+ "created_on": "2025-10-03T10:41:38Z",
+ "data": null,
+ "id": "18b04c8ced7b1ae3bcf5fc873e1fbdf8",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:38Z",
+ "name": "nl.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "nl_aaaa",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "2a12:bec4:1821:f0::a",
+ "created_on": "2025-10-03T10:41:37Z",
+ "data": null,
+ "id": "f6bffdbdb7f94832d39186a147687fe8",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:37Z",
+ "name": "nl.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "AAAA",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "reg",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "154.12.117.17",
+ "created_on": "2025-10-09T08:38:58Z",
+ "data": null,
+ "id": "2f92ab4eb7475d3c9b678f49abbc9ae3",
+ "meta": "{}",
+ "modified_on": "2025-10-09T08:38:58Z",
+ "name": "reg",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "root_a",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "185.139.7.37",
+ "created_on": "2025-10-03T10:41:37Z",
+ "data": null,
+ "id": "1d1e80fd88cbed6b00ddf0ac4d856e0f",
+ "meta": "{}",
+ "modified_on": "2025-10-04T13:37:26Z",
+ "name": "ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": true,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "root_uk_aaaa",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "2a12:ab46:5344:fd::a",
+ "created_on": "2025-10-04T13:30:31Z",
+ "data": null,
+ "id": "69fa5646418278ab2d865b509349f85d",
+ "meta": "{}",
+ "modified_on": "2025-10-04T14:52:55Z",
+ "name": "ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": true,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "AAAA",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "root_uk_uk",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "2a12:ab46:5344:fd::a",
+ "created_on": "2025-10-04T13:32:56Z",
+ "data": null,
+ "id": "041c4d5e021eeb72b16ce82b3215d114",
+ "meta": "{}",
+ "modified_on": "2025-10-04T14:52:55Z",
+ "name": "uk.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "AAAA",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "spf",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "v=spf1 include:_spf.workspace.org -all",
+ "created_on": "2025-10-03T10:41:38Z",
+ "data": null,
+ "id": "a7dc36bb0d2542c1d9534e70af352a70",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:38Z",
+ "name": "ztariq.com",
+ "priority": null,
+ "proxiable": false,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 360,
+ "type": "TXT",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "tea",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "198.23.169.195",
+ "created_on": "2025-10-03T10:41:37Z",
+ "data": null,
+ "id": "12be831a0a9cfcaac555f82acbabec70",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:37Z",
+ "name": "tea.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "uk",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "185.139.7.37",
+ "created_on": "2025-10-03T10:41:37Z",
+ "data": null,
+ "id": "c932fd20294dd7e63dd49bcbb42dd46d",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:37Z",
+ "name": "uk.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "uk2",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "154.41.135.47",
+ "created_on": "2025-10-05T13:44:38Z",
+ "data": null,
+ "id": "3e85a02d4d60fdd2746eded881daf70b",
+ "meta": "{}",
+ "modified_on": "2025-10-05T14:03:16Z",
+ "name": "uk2.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "uptime",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "198.23.169.195",
+ "created_on": "2025-10-03T10:41:38Z",
+ "data": null,
+ "id": "79f47fe2d5ec8575b215fddd6bbb1f6b",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:38Z",
+ "name": "uptime.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": true,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "us",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "198.23.169.195",
+ "created_on": "2025-10-03T10:41:37Z",
+ "data": null,
+ "id": "3532e6cb018f85319c15430387aba340",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:37Z",
+ "name": "us.ztariq.com",
+ "priority": null,
+ "proxiable": true,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 1,
+ "type": "A",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ },
+ {
+ "index_key": "workspace_verification",
+ "schema_version": 0,
+ "attributes": {
+ "comment": null,
+ "comment_modified_on": null,
+ "content": "f23716dd-2ad6-4dd4-8867-112e3c4c318d",
+ "created_on": "2025-10-03T10:41:37Z",
+ "data": null,
+ "id": "396e522c02d0fe0b716f174d2cdca4e3",
+ "meta": "{}",
+ "modified_on": "2025-10-03T10:41:37Z",
+ "name": "workspace-verification.ztariq.com",
+ "priority": null,
+ "proxiable": false,
+ "proxied": false,
+ "settings": {
+ "flatten_cname": null,
+ "ipv4_only": null,
+ "ipv6_only": null
+ },
+ "tags": [],
+ "tags_modified_on": null,
+ "ttl": 360,
+ "type": "TXT",
+ "zone_id": "0f670677e7c36e9fe8f8e6a1d1c72cbf"
+ },
+ "sensitive_attributes": [],
+ "identity_schema_version": 0
+ }
+ ]
+ }
+ ],
+ "check_results": null
+}
diff --git a/cloudflare/zones/ztariq/variables.tf b/cloudflare/zones/ztariq/variables.tf
new file mode 100644
index 0000000..d8be6af
--- /dev/null
+++ b/cloudflare/zones/ztariq/variables.tf
@@ -0,0 +1,15 @@
+variable "cloudflare_apitoken" {
+ description = "Cloudflare API token"
+ type = string
+ sensitive = true
+}
+
+variable "zone_id" {
+ description = "Cloudflare zone ID"
+ type = string
+}
+
+variable "current_ip" {
+ description = "Current public IP address"
+ type = string
+}
\ No newline at end of file
diff --git a/cv archive/index2.html b/cv archive/index2.html
new file mode 100644
index 0000000..5144bfe
--- /dev/null
+++ b/cv archive/index2.html
@@ -0,0 +1,497 @@
+
+
+
+
+ Zeshan Tariq – DevOps · SRE · SOC
+
+
+
+
+
+
+
+
+ ZESHAN · AZURE · KUBERNETES
+
+
+ Single-page profile · CV first
+
+
+
+
+
+
+ Zeshan Tariq
+
+
+ SOC · SRE · DEVOPS · DEVSECOPS · KUBERNETES
+
+
+ Cloud & platform engineer focused on Azure , Kubernetes ,
+ and secure automation . This page is the signal — the details live in my CV.
+
+
+
+
+
+ Available for remote roles
+
+
Azure · AKS · Terraform
+
CI/CD · GitHub · GitLab
+
SOC · Sentinel · SRE
+
+
+
+
+
+
+
+
+
+
+ azure-kubernetes-engineer.ts
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ import { AzureKubernetesServices } from '@azure/aks';
+
+
+ import { Engineer, Experience } from '@professional/core';
+
+
+ export
+ class
+ AzureKubernetesEngineer
+ implements Engineer {
+
+
+ constructor(private azure: AzureKubernetesServices) {}
+
+
+ experience
+ : Experience = {
+ years
+ :
+ 8
+ ,
+ specialties
+ :
+
+ ['devops','sre','soc','devsecops']
+
+ };
+
+ deploySolution(solution: any) {
+ return
+
+ this.azure.deploy({ solution, orchestration: 'Kubernetes',
+
+
+ cloud: 'Azure' });
+
+ }
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ MTTR < 30m
+ SLO 99.9%
+ AKS clusters: 12
+ Pipelines: 40+
+ Alerts tuned: yes
+
+
+
+
+
+
+
+
+
+ © Zeshan Tariq
+ Dark single-page · SVG-driven hero
+
+
+
+
+
+
diff --git a/cv archive/index3.html b/cv archive/index3.html
new file mode 100644
index 0000000..480175d
--- /dev/null
+++ b/cv archive/index3.html
@@ -0,0 +1,331 @@
+
+
+
+
+ Zeshan Tariq – DevOps · SRE · SOC
+
+
+
+
+
+
+
+
+
+
+ Cloud & platform engineer focused on Azure ,
+ Kubernetes , and secure automation .
+ For full details, see the CV.
+
+
+
+
+
+
+
+
+ azure-kubernetes-engineer.ts
+
+
+
+
+
+
+
+
+
+
+ src/azure-kubernetes-engineer.ts
+
+
+
+
+
+ import { AzureKubernetesServices } from '@azure/kubernetes-engine';
+
+
+ import { Engineer, Experience } from '@professional/core';
+
+
+ export
+ class
+ AzureKubernetesEngineer
+ implements
+ Engineer {
+
+
+ constructor(private azure: AzureKubernetesServices) {}
+
+
+ experience
+ : Experience = { years: 8,
+ specialties
+
+ : ['application dev','config mgmt','cloud']
+
+ };
+
+
+ deploySolution(solution: any) { return this.azure.deploy({ solution }); }
+
+
+
+
+
+
+
+
+ © Zeshan Tariq · Minimal dark profile
+
+
+
+
+
+
diff --git a/cv archive/index4.html b/cv archive/index4.html
new file mode 100644
index 0000000..1278b9d
--- /dev/null
+++ b/cv archive/index4.html
@@ -0,0 +1,281 @@
+
+
+
+
+ ZT – DevOps · SRE · SOC
+
+
+
+
+
+
+
+
+
+
+
+ Cloud & platform engineer focused on Azure ,
+ Kubernetes , and secure automation .
+ For full details, see the CV.
+
+
+
+
+
+
+
+
+ azure-kubernetes-engineer.ts
+
+
+
+
+
+
+
+
+
+ src/azure-kubernetes-engineer.ts
+
+
+
+
+ import { AzureKubernetesServices } from '@azure/kubernetes-engine';
+
+
+ import { Engineer, Experience } from '@professional/core';
+
+
+ export
+ class
+ AzureKubernetesEngineer
+ implements
+ Engineer {
+
+
+ constructor(private azure: AzureKubernetesServices) {}
+
+
+ experience
+ : Experience = { years: 8,
+ specialties
+
+ : ['application dev','config mgmt','cloud']
+
+ };
+
+
+ deploySolution(solution: any) { return this.azure.deploy({ solution }); }
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cv archive/index5.html b/cv archive/index5.html
new file mode 100644
index 0000000..43ca4b5
--- /dev/null
+++ b/cv archive/index5.html
@@ -0,0 +1,491 @@
+
+
+
+
+ Zeshan Tariq – DevOps · SRE · SOC
+
+
+
+
+
+
+
+
+ Zeshan Tariq · DevOps · SRE · SOC
+
+
+
+ Available for remote roles · contract & permanent
+
+
+
+
+
+
+
+
+
+ Profile loaded: SOC / SRE / DevOps / DevSecOps / Kubernetes
+
+
+
+
+
+
+ Who are you and what do you do?
+
+
U
+
+
+
+
+
+
+
Zeshan Tariq
+
SOC · SRE · DevOps · DevSecOps · Kubernetes
+
+ I’m a cloud & platform engineer focused on Azure ,
+ Kubernetes , and secure automation — combining
+ SOC practices with SRE principles to keep systems
+ reliable and secure.
+
+
+
Azure · AKS
+
Kubernetes · Docker
+
Terraform · CI/CD
+
Sentinel · SOC
+
+
+
+
+
+
+
+ Can I see the full details of your experience?
+
+
U
+
+
+
+
+
+
+ Absolutely. The CV carries all the detail (roles, dates, full stack, and examples).
+
+
+ Download CV
+
+
+
+
+
+
+
+
+
+
+ In a sentence or two, what kind of work do you do?
+
+
U
+
+
+
+
+
+
+ I design, build and operate Azure and Kubernetes platforms,
+ automate infrastructure with Terraform/Bicep , run
+ CI/CD pipelines , and use SOC tooling
+ and SRE metrics to keep services healthy.
+
+
+
+
+
+
+ Show me a small code sample that represents how you think.
+
+
U
+
+
+
+
+
+
+ Here’s a condensed TypeScript-style example that reflects what I do with Azure, AKS and
+ translating requirements into platform configuration:
+
import { AzureKubernetesServices } from '@azure/kubernetes-engine';
+import { Engineer, Experience } from '@professional/core';
+
+export class AzureKubernetesEngineer implements Engineer {
+ constructor(private azureServices: AzureKubernetesServices) {}
+
+ experience: Experience = {
+ years: 8,
+ sectors: ['government', 'consultancy', 'finance'],
+ specialties: [
+ 'application development',
+ 'configuration management',
+ 'public cloud deployment',
+ ],
+ };
+
+ deploySolution(solution: any) {
+ return this.azureServices.deploy({
+ solution,
+ orchestration: 'Kubernetes',
+ cloud: 'Azure',
+ });
+ }
+
+ bridgeBusinessAndTechnology(requirements: any) {
+ return this.azureServices.configure(requirements);
+ }
+}
+
+
+
+
+
+
+ How do I contact you if I want to talk about a role?
+
+
U
+
+
+
+
+
+
+
+
+ © Zeshan Tariq
+ Minimal chat-style profile · CV-first
+
+
+
+
+
+
diff --git a/cv/Dockerfile b/cv/Dockerfile
new file mode 100644
index 0000000..10bf945
--- /dev/null
+++ b/cv/Dockerfile
@@ -0,0 +1,2 @@
+FROM nginx:alpine
+COPY . /usr/share/nginx/html/
diff --git a/cv/compose.yml b/cv/compose.yml
new file mode 100644
index 0000000..9cc43cf
--- /dev/null
+++ b/cv/compose.yml
@@ -0,0 +1,11 @@
+services:
+ cvsite:
+ image: r.h-y.st/cv:latest
+ container_name: zeshan
+ restart: always
+ networks:
+ - hurricane
+
+networks:
+ hurricane:
+ external: true
\ No newline at end of file
diff --git a/cv/index.html b/cv/index.html
new file mode 100644
index 0000000..7d4bcbd
--- /dev/null
+++ b/cv/index.html
@@ -0,0 +1,564 @@
+
+
+
+
+ Zeshan Tariq – Azure DevOps · SRE · SOC · Kubernetes Engineer
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Zeshan Tariq – Azure DevOps, SRE, SOC & Kubernetes Engineer
+
+
+
+
+
+
+ Zeshan Tariq · DevOps · SRE · SOC
+
+
+
+ Available for remote contract & permanent roles
+
+
+
+
+
+
+
+
+
+ Profile: SOC / SRE / DevOps / DevSecOps / Kubernetes
+
+
+
+
+
+
+ Who are you and what do you do?
+
+
U
+
+
+
+
+
+
+
Zeshan Tariq
+
SOC · SRE · DevOps · DevSecOps · Kubernetes
+
+ I’m a cloud & platform engineer focused on Azure ,
+ Kubernetes , and secure automation —
+ combining SOC practices with SRE principles
+ to keep systems reliable and secure.
+
+
+
Azure · AKS
+
Kubernetes · Docker
+
Terraform · CI/CD
+
Sentinel · SOC
+
+
+
+
+
+
+
+ Where can I see your full experience and roles?
+
+
U
+
+
+
+
+
+
+
+
+ In practical terms, what kind of work do you do?
+
+
U
+
+
+
+
+
+
+ I design and operate Azure & AKS platforms , automate
+ infrastructure with Terraform/Bicep , build
+ CI/CD pipelines , and use SOC tooling
+ and SRE metrics to keep services secure and reliable.
+
+
+
+
+
+
+ Show me a small code sample that represents how you think.
+
+
U
+
+
+
+
+
+
+ A condensed TypeScript-style example that reflects what I do with Azure, AKS,
+ and translating requirements into platform configuration:
+
import { AzureKubernetesServices } from '@azure/kubernetes-engine';
+import { Engineer, Experience } from '@professional/core';
+
+export class AzureKubernetesEngineer implements Engineer {
+ constructor(private azureServices: AzureKubernetesServices) {}
+
+ experience: Experience = {
+ years: 8,
+ sectors: ['government', 'consultancy', 'finance'],
+ specialties: [
+ 'application development',
+ 'configuration management',
+ 'public cloud deployment',
+ ],
+ };
+
+ deploySolution(solution: any) {
+ return this.azureServices.deploy({
+ solution,
+ orchestration: 'Kubernetes',
+ cloud: 'Azure',
+ });
+ }
+
+ bridgeBusinessAndTechnology(requirements: any) {
+ return this.azureServices.configure(requirements);
+ }
+}
+
+
+
+
+
+
+ How do I contact you about a role?
+
+
U
+
+
+
+
+
+
+
+
+ © Zeshan Tariq
+ Chat-style profile · CV-first
+
+
+
+
+
+
diff --git a/cv/zt1125.docx b/cv/zt1125.docx
new file mode 100644
index 0000000..3cfed45
Binary files /dev/null and b/cv/zt1125.docx differ
diff --git a/hurricane/.env b/hurricane/.env
new file mode 100644
index 0000000..ddef663
--- /dev/null
+++ b/hurricane/.env
@@ -0,0 +1,68 @@
+# General settings
+WG_HOST=39.33779.xyz
+PASSWORD=c0bba9d2-7207-4c18-8133-9c3b01c7514c
+TZ=Europe/London
+WEBPASSWORD=c0bba9d2-7207-4c18-8133-9c3b01c7514c
+PIHOLE=C759tw1j
+
+# Nginx Proxy Manager settings
+NGINX_HTTP_PORT=80
+NGINX_HTTPS_PORT=443
+NGINX_ADMIN_PORT=81
+GITLAB_ROOT_PASSWORD=Shan33779488@!
+POSTGRES_PASSWORD=Shan33779488@!
+SMTP_PASSWORD=Shan33779488@!
+# WireGuard settings
+WG_UDP_PORT=51820
+WG_TCP_PORT=51821
+
+# Homepage service settings
+HOMEPAGE_PORT=3000
+
+# gitlab
+GITLAB_EXTERNAL_URL=https://41.33779.xyz
+DB_NAME=gitlabhq_production
+GITLAB_ROOT=KGdB3tMltcRMl/JghwO/jYM5xbomWSziDjcW2pVh/H0=
+DB_USER=gitlab
+DB_PASS=a656c343-5deb-4355-80c2-c6ad7e5bbf93
+POSTGRES_VERSION=latest
+# gitlab Email configuration
+EMAIL_ENABLED=true
+SMTP_HOST=mail.zeshan.uk
+SMTP_PORT=465
+SMTP_USER=tariq@zeshan.uk
+SMTP_PASS=Shan33779488@!
+SMTP_DOMAIN=zeshan.uk
+SMTP_AUTHENTICATION=login
+SMTP_ENABLE_STARTTLS_AUTO=true
+SMTP_TLS=true
+SMTP_OPENSSL_VERIFY_MODE=none
+GITLAB_EMAIL_FROM=tariq@zeshan.uk
+GITLAB_EMAIL_REPLY_TO=noreply@zeshan.uk
+HOSTBRR=^;B*3Al+w.5{
+SMTP_ADDRESS=smtp.example.com
+SMTP_PORT=587
+SMTP_USER_NAME=smtp_user@example.com
+SMTP_PASSWORD=smtp_password
+SMTP_DOMAIN=example.com
+
+# MariaDB settings for Nextcloud
+MYSQL_ROOT_PASSWORD=ce5ef083-99c2-4a8d-bbaa-47728ea144b1
+MYSQL_PASSWORD=d4808430-7aab-4637-b883-3a6c757fde0f
+MYSQL_DATABASE=nextcloud
+MYSQL_USER=nextcloud
+ex
+# Nextcloud settings
+TRUSTED_PROXIES=ncloud.zeshan.uk
+OVERWRITECLIURL=https://ncloud.zeshan.uk
+OVERWRITEPROTOCOL=https
+NEXTCLOUD_TRUSTED_DOMAINS=ncloud.zeshan.uk
+OVERWRITEHOST=ncloud.zeshan.uk
+
+# code-server
+CODE_SERVER_PASSWORD=b681e77c-9ac8-480c-a1dd-1f5c5542f4fd
+CODE_SERVER_SUDO_PASSWORD=b681e77c-9ac8-480c-a1dd-1f5c5542f4fd
+PROXY_DOMAIN=code.zeshan.uk
+# Adjust the workspace path as needed
+WORKSPACE_PATH=/mnt/data/code-server/workspace
+CODE_SERVER_CONFIF_PATH=/mnt/data/code-server/config
\ No newline at end of file
diff --git a/hurricane/Makefile b/hurricane/Makefile
new file mode 100644
index 0000000..754bddc
--- /dev/null
+++ b/hurricane/Makefile
@@ -0,0 +1,38 @@
+# Makefile for restic backups
+
+# Hard-coded settings
+RESTIC_REPOSITORY := /mnt/data/OneDrive/backup/us
+RESTIC_PASSWORD := Shan33779488
+RESTIC_BIN := /usr/bin/restic
+
+# Sources to back up
+SOURCES := /root/docker/ /var/lib/docker/volumes/
+
+# Common flags
+BACKUP_FLAGS := --verbose
+CHECK_FLAGS := --read-data-subset=10%
+FORGET_FLAGS := --keep-daily 7 --keep-weekly 5 --keep-monthly 12 --prune
+
+.PHONY: backup check forget-prune init print-env cron-install cron-remove
+
+print-env:
+ @echo "Repository: $(RESTIC_REPOSITORY)"; echo "Sources: $(SOURCES)"; echo "Restic: $(RESTIC_BIN)"; echo "Backup flags: $(BACKUP_FLAGS)" # [web:70]
+
+init:
+ RESTIC_REPOSITORY=$(RESTIC_REPOSITORY) RESTIC_PASSWORD=$(RESTIC_PASSWORD) $(RESTIC_BIN) init || true # [web:57]
+
+backup:
+ RESTIC_REPOSITORY=$(RESTIC_REPOSITORY) RESTIC_PASSWORD=$(RESTIC_PASSWORD) $(RESTIC_BIN) backup $(SOURCES) $(BACKUP_FLAGS) # [web:57]
+
+check:
+ RESTIC_REPOSITORY=$(RESTIC_REPOSITORY) RESTIC_PASSWORD=$(RESTIC_PASSWORD) $(RESTIC_BIN) check $(CHECK_FLAGS) # [web:57]
+
+forget-prune:
+ RESTIC_REPOSITORY=$(RESTIC_REPOSITORY) RESTIC_PASSWORD=$(RESTIC_PASSWORD) $(RESTIC_BIN) forget $(FORGET_FLAGS) # [web:57]
+
+# Install a root crontab entry to run backup every 6 hours at minute 0, with logging
+cron-install:
+ @(crontab -l 2>/dev/null; echo '0 */6 * * * RESTIC_REPOSITORY=$(RESTIC_REPOSITORY) RESTIC_PASSWORD=$(RESTIC_PASSWORD) $(RESTIC_BIN) backup $(SOURCES) $(BACKUP_FLAGS) >> /var/log/restic-backup.log 2>&1') | crontab - # [web:40]
+
+cron-remove:
+ @crontab -l | grep -v '$(RESTIC_BIN) backup' | crontab - || true # [web:40]
diff --git a/hurricane/ac_runner_mac.sh b/hurricane/ac_runner_mac.sh
new file mode 100644
index 0000000..73f3311
--- /dev/null
+++ b/hurricane/ac_runner_mac.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# === CONFIG ===
+GITEA_URL="${GITEA_URL:-https://git.azuredevops.co.uk}" # or export GITEA_URL=...
+REG_TOKEN="tQ6K7y2adPYg7Nh1gt0pWrYbBOxkPPcHjfcvfqHE" # must export REG_TOKEN
+HOSTNAME_VAL="$(hostname)"
+RUNNER_NAME="${RUNNER_NAME:-${HOSTNAME_VAL}-runner}"
+RUNNER_LABELS="${RUNNER_LABELS:-${HOSTNAME_VAL}}"
+WORK_DIR="${WORK_DIR:-$HOME/act_runner}"
+VERSION="v0.2.13"
+FILE_VER="${VERSION#v}"
+ARCH="darwin-amd64" # for Intel Macs. Use darwin-arm64 on Apple Silicon
+BINARY_URL="https://gitea.com/gitea/act_runner/releases/download/${VERSION}/act_runner-${FILE_VER}-${ARCH}"
+
+if [[ -z "$REG_TOKEN" ]]; then
+ echo "❌ REG_TOKEN not set. export REG_TOKEN= and re-run."
+ exit 1
+fi
+
+mkdir -p "$WORK_DIR"
+cd "$WORK_DIR"
+
+# --- download binary ---
+if [[ ! -x "${WORK_DIR}/act_runner" ]]; then
+ echo "⬇️ Downloading act_runner ${VERSION} for ${ARCH}…"
+ curl -fL "$BINARY_URL" -o act_runner
+ chmod +x act_runner
+fi
+
+# --- config.yml ---
+if [[ ! -f "${WORK_DIR}/config.yml" ]]; then
+ cat > "${WORK_DIR}/config.yml" < and re-run."
+ exit 1
+fi
+
+mkdir -p "$WORK_DIR"
+cd "$WORK_DIR"
+
+# --- download binary ---
+if [[ ! -x "${WORK_DIR}/act_runner" ]]; then
+ echo "⬇️ Downloading act_runner ${VERSION}…"
+ curl -fL "$BINARY_URL" -o act_runner
+ chmod +x act_runner
+fi
+
+# --- config.yml (Docker default image + hostname label) ---
+if [[ ! -f "${WORK_DIR}/config.yml" ]]; then
+ cat > "${WORK_DIR}/config.yml" < "/etc/systemd/system/${SERVICE_NAME}.service" < and re-run."
+ exit 1
+fi
+
+mkdir -p "$WORK_DIR"
+cd "$WORK_DIR"
+
+# --- download binary ---
+if [[ ! -x "${WORK_DIR}/act_runner" ]]; then
+ echo "⬇️ Downloading act_runner ${VERSION}…"
+ curl -fL "$BINARY_URL" -o act_runner
+ chmod +x act_runner
+fi
+
+# --- config.yml (host executor + leaseweb label) ---
+if [[ ! -f "${WORK_DIR}/config.yml" ]]; then
+ cat > "${WORK_DIR}/config.yml" < "/etc/systemd/system/${SERVICE_NAME}.service" < /etc/fail2ban/jail.local' <> /etc/hosts
+ echo "Hostname changed to $new_hostname. You may need to re-login for prompt update."
+else
+ echo "Hostname change aborted."
+fi
diff --git a/hurricane/jellyfin.yml b/hurricane/jellyfin.yml
new file mode 100644
index 0000000..16f2266
--- /dev/null
+++ b/hurricane/jellyfin.yml
@@ -0,0 +1,18 @@
+services:
+ jellyfin:
+ image: lscr.io/linuxserver/jellyfin:latest
+ container_name: jellyfin
+ environment:
+ - PUID=1000
+ - PGID=1000
+ - TZ=Europe/London
+ volumes:
+ - /root/jellyfin/config:/config
+ - /mnt/media/shared/media//tv:/data/tvshows
+ - /mnt/media/shared/media//movies:/data/movies
+ ports:
+ - "8096:8096" # HTTP Web UI
+ - "8920:8920" # HTTPS Web UI (optional)
+ - "7359:7359/udp" # Service discovery (optional)
+ - "1900:1900/udp" # DLNA (optional)
+ restart: unless-stopped
diff --git a/hurricane/minio.yml b/hurricane/minio.yml
new file mode 100644
index 0000000..372c2eb
--- /dev/null
+++ b/hurricane/minio.yml
@@ -0,0 +1,21 @@
+ minio:
+ image: minio/minio:latest
+ container_name: minio
+ command: server /data --console-address ":9001"
+ restart: unless-stopped
+ environment:
+ MINIO_ROOT_USER: ${MINIO_ROOT_USER}
+ MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
+ # Set these if you’ll access MinIO via a reverse proxy (highly recommended)
+ MINIO_SERVER_URL: ${MINIO_SERVER_URL:-} # e.g. https://s3.ztariq.com
+ MINIO_BROWSER_REDIRECT_URL: ${MINIO_BROWSER_REDIRECT_URL:-} # e.g. https://console.ztariq.com
+ healthcheck:
+ test: ["CMD", "curl", "-fsS", "http://localhost:9000/minio/health/live"]
+ interval: 15s
+ timeout: 3s
+ retries: 20
+ volumes:
+ - ./minio_data:/data
+ - ./minio_config:/root/.minio
+ networks:
+ - hurricane
\ No newline at end of file
diff --git a/hurricane/mount.sh b/hurricane/mount.sh
new file mode 100644
index 0000000..53f3844
--- /dev/null
+++ b/hurricane/mount.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+# Prompt the user for the filesystem type
+echo "Select the filesystem type:"
+echo "1. NTFS"
+echo "2. EXT4"
+read -p "Enter your choice (1 or 2): " FS_CHOICE
+
+# Set filesystem type and default mount options based on user choice
+case $FS_CHOICE in
+ 1)
+ FS_TYPE="ntfs"
+ MOUNT_OPTIONS="defaults"
+ ;;
+ 2)
+ FS_TYPE="ext4"
+ MOUNT_OPTIONS="defaults"
+ ;;
+ *)
+ echo "Invalid choice. Exiting."
+ exit 1
+ ;;
+esac
+
+# Prompt the user for the UUID
+read -p "Enter the UUID: " UUID
+
+# Prompt the user for the mount point
+read -p "Enter the mount point (e.g., /mnt/data): " MOUNT_POINT
+
+# Check if the mount point directory exists, and if not, create it
+if [ ! -d "$MOUNT_POINT" ]; then
+ echo "Creating mount point directory..."
+ sudo mkdir -p "$MOUNT_POINT"
+ if [ $? -ne 0 ]; then
+ echo "Failed to create mount point directory. Please check your permissions."
+ exit 1
+ fi
+fi
+
+# Check if the UUID is already in /etc/fstab
+if grep -q "$UUID" /etc/fstab; then
+ echo "UUID $UUID is already in /etc/fstab. Skipping."
+else
+ # Add the volume to /etc/fstab
+ echo "UUID=$UUID $MOUNT_POINT $FS_TYPE $MOUNT_OPTIONS 0 0" | sudo tee -a /etc/fstab > /dev/null
+ if [ $? -eq 0 ]; then
+ echo "Added UUID $UUID to /etc/fstab for a $FS_TYPE volume."
+ else
+ echo "Failed to add UUID to /etc/fstab. Please check your permissions."
+ exit 1
+ fi
+fi
+
+# Mount the volume
+echo "Attempting to mount the $FS_TYPE volume..."
+sudo mount -a
+if [ $? -ne 0 ]; then
+ echo "Failed to mount. Please check the UUID and your permissions."
+ exit 1
+fi
+
+# Check if the mount was successful
+if mount | grep -q "$MOUNT_POINT"; then
+ echo "$FS_TYPE volume with UUID $UUID has been successfully mounted to $MOUNT_POINT."
+else
+ echo "Failed to mount $FS_TYPE volume with UUID $UUID to $MOUNT_POINT."
+ exit 1
+fi
diff --git a/hurricane/mount_smb.sh b/hurricane/mount_smb.sh
new file mode 100644
index 0000000..cefa94c
--- /dev/null
+++ b/hurricane/mount_smb.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+set -euo pipefail
+
+# Script to mount multiple CIFS shares and handle credentials separately per mount
+# Compatible with Ubuntu, Debian, RHEL, CentOS, Fedora, AlmaLinux
+
+# Detect OS type
+detect_os() {
+ if [ -f /etc/os-release ]; then
+ . /etc/os-release
+ echo "Detected OS: $NAME ($ID)"
+ OS=$ID
+ else
+ echo "Unsupported OS"
+ exit 1
+ fi
+}
+
+# Install CIFS utilities
+install_cifs_utils() {
+ echo "Installing CIFS utilities..."
+ if [[ "$OS" == "ubuntu" || "$OS" == "debian" ]]; then
+ apt-get update -y
+ apt-get install -y cifs-utils samba
+ elif [[ "$OS" == "rhel" || "$OS" == "centos" || "$OS" == "fedora" || "$OS" == "almalinux" ]]; then
+ yum update -y
+ yum install -y cifs-utils samba
+ else
+ echo "Unsupported OS"
+ exit 1
+ fi
+}
+
+# Prompt user for multiple CIFS mount inputs
+handle_mounts() {
+ while true; do
+ echo
+ read -p "Enter the CIFS share address (e.g., //server/share): " cifs_share
+ read -p "Enter the mount point directory (default: /mnt/media): " mount_point
+ mount_point=${mount_point:-/mnt/media}
+
+ mkdir -p "$mount_point"
+
+ read -p "Enter the username: " username
+ read -sp "Enter the password: " password
+ echo
+
+ cred_file="/etc/samba/credentials_$(basename "$mount_point")"
+ echo -e "username=$username\npassword=$password" > "$cred_file"
+ chmod 600 "$cred_file"
+ echo "Credentials stored at $cred_file"
+
+ echo "Mounting $cifs_share at $mount_point..."
+
+ # Try SMB 3.0 first
+ if ! mount -t cifs "$cifs_share" "$mount_point" \
+ -o credentials="$cred_file",vers=3.0,iocharset=utf8,uid=1000,gid=1000,file_mode=0660,dir_mode=0770; then
+ echo "SMB 3.0 failed, retrying with SMB 3.1.1..."
+ if ! mount -t cifs "$cifs_share" "$mount_point" \
+ -o credentials="$cred_file",vers=3.1.1,iocharset=utf8,uid=1000,gid=1000,file_mode=0660,dir_mode=0770; then
+ echo "❌ Failed to mount $cifs_share, please check credentials or network."
+ exit 1
+ fi
+ fi
+
+ echo "✅ Mounted successfully."
+
+ read -p "Add this mount to /etc/fstab for automount at boot? (y/n): " add_fstab
+ if [[ "$add_fstab" =~ ^[Yy]$ ]]; then
+ fstab_entry="$cifs_share $mount_point cifs credentials=$cred_file,vers=3.0,iocharset=utf8,uid=1000,gid=1000,file_mode=0660,dir_mode=0770 0 0"
+ if ! grep -qsF "$fstab_entry" /etc/fstab; then
+ echo "$fstab_entry" >> /etc/fstab
+ echo "Added to /etc/fstab"
+ else
+ echo "Entry already exists in /etc/fstab"
+ fi
+ fi
+
+ read -p "Do you want to add another CIFS mount? (y/n): " more
+ [[ "$more" =~ ^[Yy]$ ]] || break
+ done
+}
+
+# Main execution
+detect_os
+install_cifs_utils
+handle_mounts
+
+echo "🎉 All operations completed."
diff --git a/hurricane/nc-backup.sh b/hurricane/nc-backup.sh
new file mode 100644
index 0000000..fbf2994
--- /dev/null
+++ b/hurricane/nc-backup.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+BACKUP_DIR="/mnt/s3bucket/nc-bk"
+
+echo "🛑 Stopping all containers..."
+docker stop $(docker ps -q) || true
+
+echo "🧹 Clearing old backup..."
+rm -rf "$BACKUP_DIR"
+mkdir -p "$BACKUP_DIR"
+
+for vol in nextcloud_aio_apache \
+ nextcloud_aio_database \
+ nextcloud_aio_database_dump \
+ nextcloud_aio_mastercontainer \
+ nextcloud_aio_nextcloud \
+ nextcloud_aio_nextcloud_data \
+ nextcloud_aio_onlyoffice \
+ nextcloud_aio_redis; do
+ echo "📦 Backing up $vol..."
+ docker run --rm \
+ -v ${vol}:/volume \
+ -v "$BACKUP_DIR":/backup \
+ alpine tar -czf /backup/${vol}.tar.gz -C /volume .
+done
+
+echo "✅ Backup complete! Files saved in: $BACKUP_DIR"
+ls -lh "$BACKUP_DIR"
diff --git a/hurricane/nc-restore.sh b/hurricane/nc-restore.sh
new file mode 100644
index 0000000..e9cf2d8
--- /dev/null
+++ b/hurricane/nc-restore.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+BACKUP_DIR="/mnt/s3bucket/nc-bk"
+
+VOLUMES=(
+ nextcloud_aio_apache
+ nextcloud_aio_database
+ nextcloud_aio_database_dump
+ nextcloud_aio_mastercontainer
+ nextcloud_aio_nextcloud
+ nextcloud_aio_nextcloud_data
+ nextcloud_aio_onlyoffice
+ nextcloud_aio_redis
+)
+
+echo "🛑 Stopping all containers..."
+docker stop $(docker ps -q) || true
+
+for vol in "${VOLUMES[@]}"; do
+ ARCHIVE="$BACKUP_DIR/${vol}.tar.gz"
+
+ # Check if volume exists
+ if docker volume inspect "$vol" >/dev/null 2>&1; then
+ echo "📂 Volume $vol already exists"
+ else
+ echo "🆕 Creating volume $vol..."
+ docker volume create "$vol" >/dev/null
+ fi
+
+ # Restore from archive if present
+ if [ -f "$ARCHIVE" ]; then
+ echo "♻️ Restoring $vol from $ARCHIVE..."
+ docker run --rm \
+ -v ${vol}:/volume \
+ -v "$BACKUP_DIR":/backup \
+ alpine sh -c "rm -rf /volume/* && tar -xzf /backup/${vol}.tar.gz -C /volume"
+ else
+ echo "⚠️ No archive found for $vol, skipping."
+ fi
+done
+
+echo "🚀 Starting all containers..."
+docker start $(docker ps -aq) || true
+
+echo "✅ Restore complete and containers started!"
diff --git a/hurricane/pi-hole.yml b/hurricane/pi-hole.yml
new file mode 100644
index 0000000..9991bc7
--- /dev/null
+++ b/hurricane/pi-hole.yml
@@ -0,0 +1,29 @@
+version: '3.8'
+
+services:
+ adguardhome:
+ image: adguard/adguardhome
+ container_name: adguardhome
+ restart: always
+ volumes:
+ - adguard_work:/opt/adguardhome/work
+ - adguard_conf:/opt/adguardhome/conf
+ ports:
+ - "53:53/tcp"
+ - "53:53/udp"
+ - "67:67/udp"
+ - "68:68/udp"
+ - "8082:80/tcp"
+ - "8443:443/tcp"
+ - "8443:443/udp"
+ - "3000:3000/tcp"
+ - "853:853/tcp"
+ - "784:784/udp"
+ - "853:853/udp"
+ - "8853:8853/udp"
+ - "5443:5443/tcp"
+ - "5443:5443/udp"
+
+volumes:
+ adguard_work:
+ adguard_conf:
diff --git a/hurricane/pivpn-fedora-nas-setup.md b/hurricane/pivpn-fedora-nas-setup.md
new file mode 100644
index 0000000..2d7e2de
--- /dev/null
+++ b/hurricane/pivpn-fedora-nas-setup.md
@@ -0,0 +1,185 @@
+# 📘 Guide: Remote Access via PiVPN, Fedora Gateway, and Home NAS
+
+## 1. 🎯 Goal
+
+Allow a remote machine (like a VPS in the US or a laptop in Spain) to connect into your **PiVPN WireGuard server** and then securely access devices on your **home LAN** (such as a NAS).
+
+**End Flow:**
+```
+Remote Client (Spain/US VPS)
+ ⇩ (WireGuard VPN)
+PiVPN Server (UK, 10.138.135.1)
+ ⇩ (peer routing)
+Fedora VM (VPN: 10.138.135.3 / LAN: 192.168.1.40)
+ ⇩
+NAS (192.168.1.207, 192.168.1.216, etc.)
+```
+
+---
+
+## 2. 🛠️ Prerequisites
+
+Before starting, make sure you have:
+
+### Knowledge
+- Ability to log into your servers via SSH.
+- Basic Linux familiarity (running commands, editing files).
+
+### Tools
+- A **PiVPN server** already set up and working with WireGuard.
+- A **Fedora VM** on your home LAN with:
+ - One interface in the VPN (`10.138.135.3`).
+ - One interface in the home LAN (`192.168.1.40`).
+- A **NAS** or devices on your home LAN (`192.168.1.x`).
+- At least one **remote client** (US VPS, laptop in Spain, etc.).
+
+---
+
+## 3. 🚦 Step-by-Step Setup
+
+### Step 1: Connect Remote Client to VPN
+- Place config in `/etc/wireguard/vps.conf`.
+- Bring it up:
+ ```bash
+ sudo wg-quick up vps
+ ```
+- Verify:
+ ```bash
+ sudo wg show vps
+ ```
+
+✅ You should see a handshake with the PiVPN server.
+
+---
+
+### Step 2: Ensure PiVPN allows peer-to-peer
+By default, VPN clients can only talk to the server. Enable forwarding:
+
+1. On **PiVPN server** (`/etc/wireguard/wg0.conf`):
+ ```ini
+ PostUp = iptables -A FORWARD -i wg0 -j ACCEPT; iptables -A FORWARD -o wg0 -j ACCEPT
+ PostDown = iptables -D FORWARD -i wg0 -j ACCEPT; iptables -D FORWARD -o wg0 -j ACCEPT
+ ```
+2. Apply:
+ ```bash
+ sudo sysctl -w net.ipv4.ip_forward=1
+ sudo systemctl restart wg-quick@wg0
+ ```
+
+---
+
+### Step 3: Advertise the home LAN via Fedora
+On the **PiVPN server**, edit the Fedora peer:
+
+```ini
+[Peer]
+PublicKey =
+AllowedIPs = 10.138.135.3/32, 192.168.1.0/24
+```
+
+This tells the VPN server: *traffic for 192.168.1.x should go to Fedora*.
+
+---
+
+### Step 4: Route home LAN via VPN clients
+On your **remote client** (US VPS, Spain laptop), edit config:
+
+```ini
+[Peer]
+PublicKey =
+AllowedIPs = 10.138.135.0/24, 192.168.1.0/24
+```
+
+Reconnect:
+```bash
+sudo wg-quick down vps
+sudo wg-quick up vps
+```
+
+Now the client knows to send home LAN traffic into the VPN.
+
+---
+
+### Step 5: Enable forwarding & NAT on Fedora
+Fedora needs to pass packets between VPN (`fedora`) and LAN (`ens18`).
+
+```bash
+# Enable forwarding
+sudo sysctl -w net.ipv4.ip_forward=1
+
+# Allow forwarding
+sudo iptables -A FORWARD -i fedora -o ens18 -j ACCEPT
+sudo iptables -A FORWARD -i ens18 -o fedora -j ACCEPT
+
+# NAT VPN subnet → Fedora LAN IP
+sudo iptables -t nat -A POSTROUTING -s 10.138.135.0/24 -o ens18 -j MASQUERADE
+```
+
+---
+
+### Step 6: Test connectivity
+From **remote client**:
+```bash
+ping 192.168.1.40 # Fedora LAN IP
+ping 192.168.1.207 # NAS
+ping 192.168.1.216 # Another LAN device
+```
+
+✅ If Fedora responds but NAS doesn’t → NAT rule missing.
+✅ If all respond → setup complete.
+
+---
+
+## 4. 🔄 Making NAT Permanent (Fedora)
+
+By default, Fedora forgets iptables rules after reboot.
+
+### Option A (simple, legacy)
+```bash
+sudo dnf install -y iptables-services
+sudo service iptables save
+sudo systemctl enable iptables
+```
+
+### Option B (modern, nftables)
+Edit `/etc/nftables.conf`:
+```nft
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 100;
+ ip saddr 10.138.135.0/24 oifname "ens18" masquerade
+ }
+}
+```
+
+Apply:
+```bash
+sudo systemctl enable nftables
+sudo systemctl restart nftables
+```
+
+---
+
+## 5. 🔎 Troubleshooting Flow
+
+```
+[1] VPN up? → sudo wg show
+ ↓
+[2] Can ping Fedora VPN IP (10.138.135.3)?
+ ↓
+[3] Can ping Fedora LAN IP (192.168.1.40)?
+ ↓
+[4] Can ping NAS (192.168.1.x)?
+ ↙ ↘
+ No → Check NAT Yes → Success 🎉
+```
+
+---
+
+## 6. ✅ End State
+
+- Remote clients reach Fedora and all devices on home LAN.
+- Fedora acts as NAT gateway.
+- NAT persists across reboot.
+- SSH works on Fedora via port `54321`.
+- No DNS/IP leaks — traffic exits via PiVPN server.
diff --git a/hurricane/qbt.sh b/hurricane/qbt.sh
new file mode 100644
index 0000000..9f5b735
--- /dev/null
+++ b/hurricane/qbt.sh
@@ -0,0 +1,22 @@
+docker run -d \
+ --name=qbittorrent \
+ -e PUID=1000 \
+ -e PGID=1000 \
+ -e TZ="Europe/London" \
+ -e VPN_ENABLED=true \
+ -e VPN_PROVIDER=pia \
+ -e VPN_PIA_USER=p0363376 \
+ -e VPN_PIA_PASS=cq89D59uVf \
+ -e VPN_PIA_PREFERRED_REGION \
+ -e VPN_AUTO_PORT_FORWARD=true \
+ -e VPN_LAN_NETWORK="100.64.0.0/10,172.18.0.0/16,192.168.0.0/24" \
+ -e VPN_NAMESERVERS="194.169.169.169" \
+ -p 8080:8080 \
+ -v ./qbittorrent:/config \
+ --cap-add=NET_ADMIN \
+ --device /dev/net/tun:/dev/net/tun \
+ --sysctl net.ipv4.conf.all.src_valid_mark=1 \
+ --sysctl net.ipv6.conf.all.disable_ipv6=1 \
+ --sysctl net.ipv4.conf.all.rp_filter=2 \
+ --sysctl net.ipv4.conf.default.rp_filter=2 \
+ ghcr.io/hotio/qbittorrent:latest
\ No newline at end of file
diff --git a/hurricane/qbt.yml b/hurricane/qbt.yml
new file mode 100644
index 0000000..3a341db
--- /dev/null
+++ b/hurricane/qbt.yml
@@ -0,0 +1,39 @@
+services:
+ qbittorrent:
+ container_name: qbittorrent
+ image: ghcr.io/hotio/qbittorrent
+ ports:
+ - "8000:8080"
+ environment:
+ - PUID=1000
+ - PGID=1000
+ - UMASK=002
+ - TZ=Etc/UTC
+ - WEBUI_PORTS=8080/tcp,8080/udp
+ - VPN_ENABLED=true
+ - VPN_CONF=wg0
+ - VPN_PROVIDER=pia
+ - VPN_LAN_NETWORK=192.168.4.0/24
+ - VPN_LAN_LEAK_ENABLED=false
+ - VPN_EXPOSE_PORTS_ON_LAN
+ - VPN_AUTO_PORT_FORWARD=true
+ - VPN_AUTO_PORT_FORWARD_TO_PORTS=
+ - VPN_KEEP_LOCAL_DNS=false
+ - VPN_FIREWALL_TYPE=auto
+ - VPN_HEALTHCHECK_ENABLED=true
+ - VPN_PIA_USER=p0363376
+ - VPN_PIA_PASS=cq89D59uVf
+ - VPN_PIA_PREFERRED_REGION
+ - VPN_PIA_DIP_TOKEN=no
+ - VPN_PIA_PORT_FORWARD_PERSIST=false
+ - PRIVOXY_ENABLED=false
+ - UNBOUND_ENABLED=false
+ volumes:
+ - /root/data/qbt/config:/config
+ - /root/data/qbt/data:/data
+ cap_add:
+ - NET_ADMIN
+ sysctls:
+ - net.ipv4.conf.all.src_valid_mark=1
+ - net.ipv6.conf.all.disable_ipv6=1
+ restart: always
\ No newline at end of file
diff --git a/hurricane/restic-backup.sh b/hurricane/restic-backup.sh
new file mode 100644
index 0000000..d9450da
--- /dev/null
+++ b/hurricane/restic-backup.sh
@@ -0,0 +1,26 @@
+
+#!/usr/bin/env bash
+set -euo pipefail
+
+# === Restic Config ===
+export RESTIC_REPOSITORY="/mnt/windows/OneDrive/backup/pi"
+export RESTIC_PASSWORD_FILE="/root/.restic-pass"
+
+# === Logging ===
+LOGFILE="/var/log/restic-backup.log"
+
+echo "[$(date)] === Restic backup started ===" | tee -a "$LOGFILE"
+
+# === Run backup ===
+restic backup \
+ /mnt/data/docker/ \
+ /mnt/data/nextcloud/ \
+ >> "$LOGFILE" 2>&1
+
+# === Retention: Keep only last 2 days ===
+restic forget \
+ --keep-within 48h \
+ --prune \
+ >> "$LOGFILE" 2>&1
+
+echo "[$(date)] === Restic backup finished ===" | tee -a "$LOGFILE"
diff --git a/hurricane/restic.sh b/hurricane/restic.sh
new file mode 100644
index 0000000..d8863a7
--- /dev/null
+++ b/hurricane/restic.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+export RESTIC_PASSWORD='Shan33779488@@'
+export RESTIC_REPOSITORY='/mnt/raid1/backup/restic'
+
+LOG_FILE="/var/log/restic_backup.log"
+BACKUP_DATE=$(date +%F)
+
+# Safety check
+if ! mountpoint -q /mnt/raid1; then
+ echo "$(date): RAID not mounted. Aborting." >> "$LOG_FILE"
+ exit 1
+fi
+
+# Init repo if not exists
+if [ ! -d "$RESTIC_REPOSITORY" ]; then
+ restic init >> "$LOG_FILE" 2>&1
+fi
+
+{
+ echo "$(date): Starting Restic backup..."
+ restic backup /var/lib/nextcloud /var/lib/docker --tag "$BACKUP_DATE" --verbose
+
+ echo "$(date): Forgetting old backups..."
+ restic forget --keep-last 1 --prune
+
+ echo "$(date): Checking repo integrity..."
+ restic check
+
+ echo "$(date): Backup complete."
+} >> "$LOG_FILE" 2>&1
diff --git a/hurricane/setup-samba.sh b/hurricane/setup-samba.sh
new file mode 100644
index 0000000..8f98040
--- /dev/null
+++ b/hurricane/setup-samba.sh
@@ -0,0 +1,129 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# === PROMPT USER ===
+read -rp "📂 Enter the folder you want to share (absolute path): " SHARE_PATH
+read -rp "📍 Enter the mount point name (e.g., data, backups, media): " MOUNT_POINT
+read -rp "👤 Enter the Samba username: " SMB_USER
+
+# Ensure SHARE_PATH is absolute
+if [[ "$SHARE_PATH" != /* ]]; then
+ echo "❌ Please provide an absolute path (starting with /)"
+ exit 1
+fi
+
+# Detect distro family
+if [ -f /etc/os-release ]; then
+ . /etc/os-release
+ DISTRO_FAMILY=$ID
+else
+ echo "❌ Cannot detect OS type"
+ exit 1
+fi
+
+install_samba() {
+ if ! command -v smbd >/dev/null 2>&1 && ! command -v smb >/dev/null 2>&1; then
+ echo "📦 Installing Samba..."
+ case "$DISTRO_FAMILY" in
+ ubuntu|debian)
+ apt-get update -y
+ DEBIAN_FRONTEND=noninteractive apt-get install -y samba samba-common-bin
+ ;;
+ almalinux|rocky|centos|rhel|fedora)
+ dnf install -y samba samba-client
+ ;;
+ *)
+ echo "❌ Unsupported distro: $DISTRO_FAMILY"
+ exit 1
+ ;;
+ esac
+ fi
+}
+
+# Install Samba
+install_samba
+
+# Stop AD-DC service if it’s installed
+systemctl disable --now samba-ad-dc 2>/dev/null || true
+
+# Ensure directory exists
+mkdir -p "$SHARE_PATH"
+
+# Ensure user exists (no home, no login)
+if ! id -u "$SMB_USER" >/dev/null 2>&1; then
+ echo "ℹ️ Creating system user $SMB_USER (no home, no login)"
+ useradd -M -s /usr/sbin/nologin "$SMB_USER"
+fi
+
+# Set directory ownership
+chown -R "$SMB_USER":"$SMB_USER" "$SHARE_PATH"
+
+# Backup smb.conf once
+if [ -f /etc/samba/smb.conf ] && [ ! -f /etc/samba/smb.conf.bak ]; then
+ cp -a /etc/samba/smb.conf /etc/samba/smb.conf.bak
+fi
+
+# Use the mount point name as the share name
+SHARE_NAME="$MOUNT_POINT"
+
+# Remove any old section with same name
+awk -v name="[$SHARE_NAME]" '
+ BEGIN {skip=0}
+ /^\[/ {
+ if ($0 == name) {skip=1; next}
+ else {skip=0}
+ }
+ skip==0 {print}
+' /etc/samba/smb.conf > /etc/samba/smb.conf.tmp
+mv /etc/samba/smb.conf.tmp /etc/samba/smb.conf
+
+# Append new share config
+cat <> /etc/samba/smb.conf
+
+[$SHARE_NAME]
+ path = $SHARE_PATH
+ browseable = yes
+ read only = no
+ guest ok = no
+ valid users = $SMB_USER
+ create mask = 0664
+ directory mask = 0775
+EOF
+
+# Validate config
+testparm -s >/dev/null
+
+# Prompt for Samba password
+echo "🔑 Set a Samba password for $SMB_USER:"
+smbpasswd -a "$SMB_USER"
+smbpasswd -e "$SMB_USER"
+
+# Open firewall
+if command -v ufw >/dev/null 2>&1; then
+ ufw allow Samba || true
+elif command -v firewall-cmd >/dev/null 2>&1; then
+ firewall-cmd --permanent --add-service=samba || true
+ firewall-cmd --reload || true
+fi
+
+# Enable and start services
+case "$DISTRO_FAMILY" in
+ ubuntu|debian)
+ systemctl enable --now smbd
+ systemctl enable --now nmbd 2>/dev/null || true
+ ;;
+ almalinux|rocky|centos|rhel|fedora)
+ systemctl enable --now smb
+ systemctl enable --now nmb
+ ;;
+esac
+
+IP_ADDR="$(hostname -I | awk '{print $1}')"
+
+echo
+echo "✅ Samba share created!"
+echo " Path: $SHARE_PATH"
+echo " User: $SMB_USER"
+echo " Share: \\\\$IP_ADDR\\$SHARE_NAME"
+echo
+echo "Test with: smbclient -L //$IP_ADDR -U $SMB_USER"
diff --git a/hurricane/sshfs_mount.sh b/hurricane/sshfs_mount.sh
new file mode 100644
index 0000000..daef08d
--- /dev/null
+++ b/hurricane/sshfs_mount.sh
@@ -0,0 +1 @@
+sudo sshfs -o allow_other,port=54321,reconnect,ServerAliveInterval=15,ServerAliveCountMax=3 root@hurricane.tail872446.ts.net:/mnt/raid /mnt/raid
diff --git a/hurricane/wg-easy-hostbrr.yml b/hurricane/wg-easy-hostbrr.yml
new file mode 100644
index 0000000..8521826
--- /dev/null
+++ b/hurricane/wg-easy-hostbrr.yml
@@ -0,0 +1,31 @@
+services:
+ wg-easy:
+ container_name: wg-easy
+ image: ghcr.io/wg-easy/wg-easy
+ environment:
+ WG_HOST: hostbrr.azuredevops.co.uk
+ PASSWORD_HASH: '$$2a$$12$$nMu2fSWPkmefVLdIf68Qle/QK/9oZx/Jmp2HfjZmo6R8V/AtVKD82'
+ PORT: 51821
+ WG_PORT: 53298
+ LANG: en
+ WG_DEFAULT_DNS: '94.140.14.14, 94.140.15.15'
+ ports:
+ - "53298:53298/udp" # Changed from 51820 to 53298
+ - "51821:51821/tcp"
+ volumes:
+ - wg-easy-data:/etc/wireguard
+ cap_add:
+ - NET_ADMIN
+ - SYS_MODULE
+ sysctls:
+ net.ipv4.conf.all.src_valid_mark: 1
+ net.ipv4.ip_forward: 1
+ restart: always
+ networks:
+ - hostbrr_hurricane
+volumes:
+ wg-easy-data:
+
+networks:
+ hostbrr_hurricane:
+ external: true # This assumes the network already exists. You can create it with: docker network create hurricane
diff --git a/jellyfin/jellyfin.yml b/jellyfin/jellyfin.yml
new file mode 100644
index 0000000..6fa6604
--- /dev/null
+++ b/jellyfin/jellyfin.yml
@@ -0,0 +1,19 @@
+services:
+ jellyfin:
+ image: lscr.io/linuxserver/jellyfin:latest
+ container_name: jellyfin
+ environment:
+ - PUID=1000
+ - PGID=1000
+ - TZ=Europe/London
+ volumes:
+ - /root/docker/jellyfin/config:/config
+ - /mnt/data/media/tv:/data/tvshows
+ - /mnt/data/media/movies:/data/movies
+ restart: unless-stopped
+ networks:
+ - hurricane
+
+networks:
+ hurricane:
+ external: true
\ No newline at end of file
diff --git a/ovh/compose.yml b/ovh/compose.yml
new file mode 100644
index 0000000..e69de29
diff --git a/zeshancv/Dockerfile b/zeshancv/Dockerfile
new file mode 100644
index 0000000..4e54eea
--- /dev/null
+++ b/zeshancv/Dockerfile
@@ -0,0 +1,6 @@
+FROM caddy:2-alpine
+
+# Copy your static site into Caddy's default web root
+COPY . /usr/share/caddy
+
+EXPOSE 80
diff --git a/zeshancv/docker-compose.yml b/zeshancv/docker-compose.yml
new file mode 100644
index 0000000..6ad48d7
--- /dev/null
+++ b/zeshancv/docker-compose.yml
@@ -0,0 +1,13 @@
+services:
+ zeshancv:
+ image: zeshan
+ container_name: zeshancv
+ restart: unless-stopped
+ expose:
+ - "80" # internal only; NPM will reach this over the huricane network
+ networks:
+ - hurricane
+
+networks:
+ hurricane:
+ external: true # assumes the network already exists: `docker network create huricane`
diff --git a/zeshancv/index.html b/zeshancv/index.html
new file mode 100644
index 0000000..f2fc142
--- /dev/null
+++ b/zeshancv/index.html
@@ -0,0 +1,564 @@
+
+
+
+
+ Zeshan Tariq – Azure DevOps · SRE · SOC · Kubernetes Engineer
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Zeshan Tariq – Azure DevOps, SRE, SOC & Kubernetes Engineer
+
+
+
+
+
+
+ Zeshan Tariq · DevOps · SRE · SOC
+
+
+
+ Available for remote contract & permanent roles
+
+
+
+
+
+
+
+
+
+ Profile: SOC / SRE / DevOps / DevSecOps / Kubernetes
+
+
+
+
+
+
+ Who are you and what do you do?
+
+
U
+
+
+
+
+
+
+
Zeshan Tariq
+
SOC · SRE · DevOps · DevSecOps · Kubernetes
+
+ I’m a cloud & platform engineer focused on Azure ,
+ Kubernetes , and secure automation —
+ combining SOC practices with SRE principles
+ to keep systems reliable and secure.
+
+
+
Azure · AKS
+
Kubernetes · Docker
+
Terraform · CI/CD
+
Sentinel · SOC
+
+
+
+
+
+
+
+ Where can I see your full experience and roles?
+
+
U
+
+
+
+
+
+
+
+
+ In practical terms, what kind of work do you do?
+
+
U
+
+
+
+
+
+
+ I design and operate Azure & AKS platforms , automate
+ infrastructure with Terraform/Bicep , build
+ CI/CD pipelines , and use SOC tooling
+ and SRE metrics to keep services secure and reliable.
+
+
+
+
+
+
+ Show me a small code sample that represents how you think.
+
+
U
+
+
+
+
+
+
+ A condensed TypeScript-style example that reflects what I do with Azure, AKS,
+ and translating requirements into platform configuration:
+
import { AzureKubernetesServices } from '@azure/kubernetes-engine';
+import { Engineer, Experience } from '@professional/core';
+
+export class AzureKubernetesEngineer implements Engineer {
+ constructor(private azureServices: AzureKubernetesServices) {}
+
+ experience: Experience = {
+ years: 8,
+ sectors: ['government', 'consultancy', 'finance'],
+ specialties: [
+ 'application development',
+ 'configuration management',
+ 'public cloud deployment',
+ ],
+ };
+
+ deploySolution(solution: any) {
+ return this.azureServices.deploy({
+ solution,
+ orchestration: 'Kubernetes',
+ cloud: 'Azure',
+ });
+ }
+
+ bridgeBusinessAndTechnology(requirements: any) {
+ return this.azureServices.configure(requirements);
+ }
+}
+
+
+
+
+
+
+ How do I contact you about a role?
+
+
U
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/zeshancv/zt1125.docx b/zeshancv/zt1125.docx
new file mode 100644
index 0000000..3cfed45
Binary files /dev/null and b/zeshancv/zt1125.docx differ