diff --git a/.gitea/workflows/action.yml b/.gitea/workflows/action.yml index 1a6ece3..ffad8d9 100644 --- a/.gitea/workflows/action.yml +++ b/.gitea/workflows/action.yml @@ -20,6 +20,8 @@ runs: - name: Configure container id: init run: | + repository="${{ inputs.repo }}" + echo "repo=${repository##*/}" >> "$GITEA_ENV" source repo/config.env echo "ip=$IP" >> $GITEA_ENV echo "id=$ID" >> $GITEA_ENV @@ -51,18 +53,18 @@ runs: with: repository: "${{ inputs.repo }}" ref: "${{ gitea.ref_name }}" - path: repo + path: "${{ env.repo }}" - name: Checkout libraries uses: https://gitea.com/actions/checkout@v4 with: repository: 'main/libraries' ref: 'main' - path: 'repo/libraries' + path: "${{ env.repo }}/libraries" - name: Configure container run: | - tar cz . | ssh -o StrictHostKeyChecking=no -i /share/.ssh/${{ env.id }} config@${{ env.ip }} \ - 'sudo tar xz -C /tmp && sudo IP=${{ env.ip }} ID=${{ env.id }} HOST=${{ vars.HOST }} LOGIN=${{ vars.LOGIN }} PASSWORD=${{ vars.PASSWORD }} PWD="$(pwd)" \ - cinc-client -l info --local-mode --chef-license accept --config-option cookbook_path="/tmp" $( [[ -f "/tmp/repo/config.json" ]] && echo "-j /tmp/repo/config.json" ) -o repo' + tar -c "${{ env.repo }}" -cz . | ssh -o StrictHostKeyChecking=no -i "/share/.ssh/${{ env.id }}" "config@${{ env.ip }}" 'sudo tar xz -C /tmp + sudo -E IP=${{ env.ip }} ID=${{ env.id }} HOST=${{ vars.HOST }} LOGIN=${{ vars.LOGIN }} PASSWORD=${{ vars.PASSWORD }} PWD="$(pwd)" \ + cinc-client --local-mode $CONFIG_ARGS --config-option cookbook_path="/tmp" -o "${{ env.repo }}"' shell: bash diff --git a/.gitea/workflows/pipeline.yml b/.gitea/workflows/pipeline.yml index cf31f81..9419403 100644 --- a/.gitea/workflows/pipeline.yml +++ b/.gitea/workflows/pipeline.yml @@ -2,7 +2,6 @@ on: workflow_dispatch: push: branches: [ release, main ] - jobs: init: @@ -51,11 +50,11 @@ jobs: boot: ${{ needs.init.outputs.boot }} mount: ${{ needs.init.outputs.mount }} share: true - if: ${{ gitea.ref == 'refs/heads/release' }} + if: ${{ gitea.ref == 'refs/heads/release' }} share: runs-on: [ "shell" ] - needs: init + needs: [ init, base ] steps: - name: Checkout repository uses: https://gitea.com/actions/checkout@v4 @@ -70,15 +69,14 @@ jobs: login: ${{ vars.LOGIN }} password: ${{ vars.PASSWORD }} run: | - tar -c config -cz . | \ - ssh -o StrictHostKeyChecking=no -i "/share/.ssh/${id}" "config@${ip}" \ - 'sudo tar xz -C /tmp && sudo env IP="'"${ip}"'" ID="'"${id}"'" MOUNT="'"${mount}"'" LOGIN="'"${login}"'" PASSWORD="'"${password}"'" \ - cinc-client -l info --local-mode --chef-license accept --config-option cookbook_path="[\"'"/tmp/config"'\", \"'"/tmp/config/libs"'\"]" -o share' - if: ${{ gitea.ref == 'refs/heads/release' }} + tar -c config -cz . | ssh -o StrictHostKeyChecking=no -i "/share/.ssh/${id}" "config@${ip}" 'sudo tar xz -C /tmp + sudo -E IP="'"${ip}"'" ID="'"${id}"'" LOGIN="'"${login}"'" PASSWORD="'"${password}"'" \ + cinc-client --local-mode --config-option cookbook_path="[\"/tmp/config\", \"/tmp/config/libs\"]" -o share' + if: ${{ gitea.ref != 'refs/heads/release' || success() }} config: runs-on: [ "shell" ] - needs: [init] + needs: [ init, base ] steps: - name: Checkout repository uses: https://gitea.com/actions/checkout@v4 @@ -91,11 +89,10 @@ jobs: id: ${{ needs.init.outputs.id }} login: ${{ vars.LOGIN }} password: ${{ vars.PASSWORD }} + email: ${{ vars.EMAIL }} run: | - git -C config submodule update --init --recursive - tar -c config -cz . | \ - ssh -o StrictHostKeyChecking=no -i "/share/.ssh/${id}" "config@${ip}" \ - 'sudo tar xz -C /tmp && sudo env \ - IP="'"${ip}"'" ID="'"${id}"'" LOGIN="'"${login}"'" PASSWORD="'"${password}"'" PWD="/tmp/config" \ - cinc-client -l info --local-mode --chef-license accept --config-option \ - cookbook_path="/tmp/config" $( [ -f /tmp/config/local/config.json ] && echo "-j /tmp/config/local/config.json" ) -o config' \ No newline at end of file + git -C config submodule update --remote --recursive + tar -c config -cz . | ssh -o StrictHostKeyChecking=no -i "/share/.ssh/${id}" "config@${ip}" 'sudo tar xz -C /tmp + CONFIG_ARGS=; [ -f /tmp/config/local/config.json ] && CONFIG_ARGS="-j /tmp/config/local/config.json" + sudo -E IP="'"${ip}"'" ID="'"${id}"'" LOGIN="'"${login}"'" PASSWORD="'"${password}"'" EMAIL="'"${email}"'" PWD=/tmp/config \ + cinc-client --local-mode $CONFIG_ARGS --config-option cookbook_path="/tmp/config" -o config' diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5d8cdf4..5d90be4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -75,8 +75,8 @@ jobs: - name: Mock config.json run: | cat < ./local/config.json - { "proxmox": { "host": "localhost", "api": "test", "user": "test", "token": "test", "secret": "test" }, - "login": "test", "password": "test", "email": "test@example.com", "pub": "" } + { "proxmox": { "host": "localhost", "user": "test", "password": "test" }, + "login": "test", "password": "test", "email": "test@example.com" } EOF - name: Deploy config env: @@ -84,6 +84,6 @@ jobs: ID: ${{ needs.init.outputs.id }} PWD: ${{ github.workspace }} run: | - sudo env IP="$IP" ID="$ID" PWD="$PWD" \ - cinc-client -l info --local-mode --chef-license accept --config-option cookbook_path="." \ - -j ./local/config.json -o config + sudo env IP="$IP" ID="$ID" PWD="$PWD" HOME="/app" \ + cinc-client -l info --local-mode --chef-license accept --config-option \ + cookbook_path="." -j ./local/config.json -o config diff --git a/.gitignore b/.gitignore index fb180bb..a65d4a9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,7 @@ **/.git/ -config.json - +local/config.json local/*.hash .DS_Store -.idea \ No newline at end of file +.idea diff --git a/README.md b/README.md index bc3f8dd..8a2497a 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ -[![Build Status](https://github.com/stevius10/Proxmox-GitOps/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/stevius10/Proxmox-GitOps/actions/workflows/build.yml) -[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![Build Status](https://github.com/stevius10/Proxmox-GitOps/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/stevius10/Proxmox-GitOps/actions/workflows/build.yml) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) + +[![PVE 8.4](https://img.shields.io/badge/PVE-8.4-orange)](https://www.proxmox.com/) [![PVE 9.0](https://img.shields.io/badge/PVE-9.0-orange)](https://www.proxmox.com/) + ## Table of Contents - [Overview](#overview) @@ -8,10 +10,11 @@ - [Design](#design) - [Trade-offs](#trade-offs) - [Usage](#usage) - - [Requirements](#requirements) - [Lifecycle](#lifecycle) + - [Self-contained Monorepository](#self-contained-monorepository) + - [Requirements](#requirements) - [Getting Started](#getting-started) - - [Development and Container Extension](#development-and-container-extension) + - [Development and Extension](#development-and-extension) --- @@ -56,41 +59,44 @@ This system implements stateless infrastructure management on Proxmox VE, ensuri - **Git Convention vs. Infrastructure State:** Uses Git as a state engine rather than versioning in volatile, stateless contexts. Monorepository representation, however, encapsulates the entire infrastructure as a self-contained asset suited for version control. +- **API Token Restriction vs. Automation:** With Proxmox 9, stricter privilege separation prevents privileged containers from mounting shares via API token; automation capabilities, however, are mainly within the root user context. As a consequence, root user-based API access takes precedence over token-based authentication. + ## Usage -### Requirements +### Lifecycle -- Docker -- Proxmox VE 8.4 -- Proxmox API token -- See [Wiki](https://github.com/stevius10/Proxmox-GitOps/wiki) for recommendations +#### Self-contained Monorepository -### Lifecycle +- `git clone --recurse-submodules` + - e. g., **Version-Controlled Mirrored** -- **Self-contained Monorepository** Artifact for **Version-Controlled Mirroring** - - `git clone --recurse-submodules` (store /share for persistence, disable network access for security) +- **Backup**: See [Self-contained Monorepository](#self-contained-monorepository) + - store /share for persistence -- **Backup**: See previous +- **Update**: See [Self-contained Monorepository](#self-contained-monorepository), and redeploy merged -- **Update**: See previous, and redeploy merged +- **Rollback**: See [Self-contained Monorepository](#self-contained-monorepository), or set `snapshot` branch to `release` at runtime -- **Rollback**: See previous, or set `snapshot` to `release` at runtime +### Requirements + +- Docker +- Proxmox VE 8.4-9.0 +- See [Wiki](https://github.com/stevius10/Proxmox-GitOps/wiki) for recommendations ### Getting Started -- Set **credentials and Proxmox API token** in [`local/.config.json`](local/.config.json) as `./local/config.json` +- Set **Proxmox** and **global usage credentials** in [`local/.config.json`](local/.config.json) as [`./local/config.json`](https://github.com/stevius10/Proxmox-GitOps/wiki/Example-Configuration#file-localconfigjson) +- Ensure **container configuration** in [`config.env`](config.env) and [**verify storage**](https://github.com/stevius10/Proxmox-GitOps/wiki/Example-Configuration#file-configenv) - Run `./local/run.sh` for local Docker environment -- Accept the Pull Request at `localhost:8080/main/config/pulls/1` to deploy on Proxmox VE +- Accept the Pull Request at `localhost:8080/main/config` to deploy on Proxmox VE

in a nutshell

-#### Development and Container Extension - -Reusable container definitions are stored in the [`libs`](libs) folder. +#### Development and Extension -Copy an example container (like [`libs/broker`](libs/broker) or [`libs/proxy`](libs/proxy)) as a template, or create a new container lib from scratch and follow these steps: +Reusable container definitions are stored in the [`libs`](libs) folder. Copy an example container (like [`libs/broker`](libs/broker) or [`libs/proxy`](libs/proxy)) as a template, or create a new container lib from scratch and follow these steps: - Add `config.env` to your container's root directory (e.g. `./libs/apache`): ```dotenv @@ -104,25 +110,8 @@ DISK=local-lvm:8 BOOT=yes ``` -- Paste generic pipeline in container's `.gitea/workflows`: -```yaml -on: - workflow_dispatch: - push: - branches: [ release, main, develop ] - -jobs: - include: - runs-on: shell - steps: - - id: init - uses: main/config/.gitea/workflows@main - with: - repo: ${{ gitea.repository }} - ref: ${{ gitea.ref_name }} -``` - - Add your cookbook to the container definition root: + ```ruby # libs/apache/recipes/default.rb package 'apache2' @@ -130,18 +119,20 @@ package 'apache2' file '/var/www/html/index.html' do content "

Hello from #{Env.get(node, 'login')}

" mode '0644' - owner 'app' # see base/roles/base/tasks/main.yml - group 'app' # each container is configured identically + owner Default.user(self) # see base/roles/base/tasks/main.yml + group Default.group(self) # each container is configured identically end -Common.application 'apache2' # provided by convention +Common.application(self, 'apache2') # provided by convention ``` - Optionally, use `Env.get()` and `Env.set()` to access Gitea environment variables. -- a) **Deploy**: Push to the `release` branch of a new repository +

+ Global Environment +

-- b) **Add to Monorepository**: Redeploy +- Add to Monorepository and redeploy. The container can be tested locally running `./local/run.sh [container]` (_wip_) diff --git a/base/.gitea/workflows/action.yml b/base/.gitea/workflows/action.yml index 4189e1c..6cebea2 100644 --- a/base/.gitea/workflows/action.yml +++ b/base/.gitea/workflows/action.yml @@ -43,12 +43,13 @@ runs: ansible-playbook default.yml -e "ip=${{ inputs.ip }}" -e "id=${{ inputs.id }}" \ -e "hostname=${{ inputs.hostname }}" -e "cores=${{ inputs.cores }}" -e "memory=${{ inputs.memory }}" \ -e "swap=${{ inputs.swap }}" -e "disk=${{ inputs.disk }}" -e "boot=${{ inputs.boot }}" \ - -e "mount=${{ inputs.mount }}" -e "share=${{ inputs.share }}" + -e "mount=${{ inputs.mount }}" -e "share=${{ env.PROXMOX_PASSWORD && inputs.share || 'false' }}" env: HOST: ${{ vars.HOST }} LOGIN: ${{ vars.LOGIN }} PASSWORD: ${{ vars.PASSWORD }} PROXMOX_HOST: ${{ vars.PROXMOX_HOST }} PROXMOX_USER: ${{ vars.PROXMOX_USER }} + PROXMOX_PASSWORD: ${{ vars.PROXMOX_PASSWORD }} PROXMOX_TOKEN: ${{ vars.PROXMOX_TOKEN }} PROXMOX_SECRET: ${{ vars.PROXMOX_SECRET }} \ No newline at end of file diff --git a/base/default.yml b/base/default.yml index be1e98d..dd18ae3 100644 --- a/base/default.yml +++ b/base/default.yml @@ -3,220 +3,23 @@ hosts: localhost gather_facts: no vars: - os: "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst" - keys_dir: "/share/.ssh" + os: "{{ OS | default('local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst') }}" + key_dir: "{{ KEYS_DIR | default('/share/.ssh') }}" tasks: - - name: Check container configuration - ansible.builtin.stat: - path: "config.json" - register: config_file - - - name: Load container configuration - ansible.builtin.include_vars: - file: "config.json" - name: config - when: config_file.stat.exists - - - name: Set Proxmox configuration - ansible.builtin.set_fact: - PROXMOX_HOST: "{{ lookup('env', 'PROXMOX_HOST') }}" - PROXMOX_USER: "{{ lookup('env', 'PROXMOX_USER') }}" - PROXMOX_TOKEN: "{{ lookup('env', 'PROXMOX_TOKEN') }}" - PROXMOX_SECRET: "{{ lookup('env', 'PROXMOX_SECRET') }}" - when: - - (config_file.stat.exists and config.proxmox is defined) or (not config_file.stat.exists) - - - name: Ensure container keys directory on host - ansible.builtin.file: - path: "{{ keys_dir }}" + - name: Ensure key directory + file: + path: "{{ key_dir }}" state: directory - name: Generate container key on host community.crypto.openssh_keypair: - path: "{{ keys_dir }}/{{ id }}" + path: "{{ key_dir }}/{{ id }}" type: ed25519 force: false - - name: Check container existence - community.general.proxmox_vm_info: - api_host: "{{ PROXMOX_HOST }}" - api_user: "{{ PROXMOX_USER }}" - api_token_id: "{{ PROXMOX_TOKEN }}" - api_token_secret: "{{ PROXMOX_SECRET }}" - api_port: 8006 - node: "pve" - vmid: "{{ id }}" - register: container_info - delegate_to: localhost - ignore_errors: yes - - - name: Set container existence - set_fact: - container_exists: "{{ container_info is success and (container_info.proxmox_vms | length) > 0 }}" - - - name: Stop container - community.general.proxmox: - api_host: "{{ PROXMOX_HOST }}" - api_user: "{{ PROXMOX_USER }}" - api_token_id: "{{ PROXMOX_TOKEN }}" - api_token_secret: "{{ PROXMOX_SECRET }}" - api_port: 8006 - vmid: "{{ id }}" - node: "pve" - state: stopped - force: yes - delegate_to: localhost - ignore_errors: yes - when: container_exists - - - name: Wait for container to be stopped - uri: - url: "https://{{ PROXMOX_HOST }}:8006/api2/json/nodes/pve/lxc/{{ id }}/status/current" - method: GET - headers: - Authorization: "PVEAPIToken={{ PROXMOX_USER }}!{{ PROXMOX_TOKEN }}={{ PROXMOX_SECRET }}" - validate_certs: no - register: container_status - until: container_status.json.data.status == "stopped" - retries: 5 - delay: 2 - ignore_errors: true - delegate_to: localhost - when: container_exists - - - name: Remove container - community.general.proxmox: - api_host: "{{ PROXMOX_HOST }}" - api_user: "{{ PROXMOX_USER }}" - api_token_id: "{{ PROXMOX_TOKEN }}" - api_token_secret: "{{ PROXMOX_SECRET }}" - api_port: 8006 - vmid: "{{ id }}" - node: "pve" - state: absent - delegate_to: localhost - ignore_errors: yes - register: removal - when: container_exists - - - name: Wait for container to be removed - uri: - url: "https://{{ PROXMOX_HOST }}:8006/api2/json/nodes/pve/lxc/{{ id }}/status/current" - method: GET - headers: - Authorization: "PVEAPIToken={{ PROXMOX_USER }}!{{ PROXMOX_TOKEN }}={{ PROXMOX_SECRET }}" - validate_certs: no - register: container_status - until: - - container_status.status == 500 - - "'does not exist' in container_status.json.message" - retries: 5 - delay: 4 - failed_when: false - delegate_to: localhost - when: container_exists - - - name: Set shared mounts - set_fact: - mounts: "{{ mounts | default({}) | combine({ 'mp' ~ idx: 'local:' ~ item.split(':')[1] ~ ',mp=/share/' ~ (item.split(':')[0] if item.split(':')[0] != 'share' else '') }) }}" - loop: "{{ (mount | default('')).split(',') | select('match', '^.+:.+$') }}" - loop_control: - index_var: idx - when: - - mount is defined - - (share | default(false, true) | bool) - - (mount | trim | length) > 0 - - - name: Create container - community.general.proxmox: - api_host: "{{ PROXMOX_HOST }}" - api_user: "{{ PROXMOX_USER }}" - api_token_id: "{{ PROXMOX_TOKEN }}" - api_token_secret: "{{ PROXMOX_SECRET }}" - api_port: 8006 - vmid: "{{ id }}" - node: "pve" - hostname: "{{ hostname }}" - ostemplate: "{{ os }}" - cores: "{{ cores }}" - memory: "{{ memory }}" - pubkey: "{{ lookup('file', [keys_dir, id ~ '.pub'] | path_join) }}" - swap: "{{ swap }}" - disk: "{{ disk }}" - netif: - net0: "name=eth0,gw=192.168.178.1,ip={{ ip }}/24,bridge=vmbr0" - mounts: "{{ mounts if (share | default(false, true) | bool) else {} }}" - unprivileged: >- - {{ true if (share | default(false) | bool) else false - if (mount is defined and (mount | trim | length > 0)) else true }} - onboot: "{{ boot }}" - state: present - delegate_to: localhost - - - name: Wait for container to be created - uri: - url: "https://{{ PROXMOX_HOST }}:8006/api2/json/nodes/pve/lxc/{{ id }}/config" - method: GET - headers: - Authorization: "PVEAPIToken={{ PROXMOX_USER }}!{{ PROXMOX_TOKEN }}={{ PROXMOX_SECRET }}" - validate_certs: no - register: container_status - until: - - container_status.status == 200 - - "'hostname' in container_status.json.data" - retries: 5 - delay: 6 - ignore_errors: yes - delegate_to: localhost - - - name: Start container - community.general.proxmox: - api_host: "{{ PROXMOX_HOST }}" - api_user: "{{ PROXMOX_USER }}" - api_token_id: "{{ PROXMOX_TOKEN }}" - api_token_secret: "{{ PROXMOX_SECRET }}" - api_port: 8006 - vmid: "{{ id }}" - hostname: "{{ hostname }}" - node: "pve" - state: started - delegate_to: localhost - - - name: Wait for container to be connectable - wait_for: - port: 22 - host: "{{ ip }}" - timeout: 180 - delegate_to: localhost - - - name: Check technical user configured - command: "ssh -o BatchMode=yes -o ConnectTimeout=5 -i {{ keys_dir }}/{{ id }} config@{{ ip }} echo success" - register: ssh_config_test - failed_when: false - changed_when: false - when: container_exists - - - name: Set technical user - set_fact: - ssh_user: "{{ 'config' if (container_exists and (ssh_config_test.rc | default(1)) == 0) else 'root' }}" - - - name: Add container - add_host: + - name: Container + ansible.builtin.include_role: name: container - id: "{{ id }}" - ansible_host: "{{ ip }}" - ansible_user: "{{ ssh_user }}" - ansible_ssh_private_key_file: "{{ [keys_dir, id] | path_join }}" - ansible_ssh_common_args: '-o StrictHostKeyChecking=no' - public_key: "{{ lookup('file', [keys_dir, id ~ '.pub'] | path_join) }}" - private_key: "{{ lookup('file', [keys_dir, id] | path_join) }}" - - - name: Reset known hosts - ansible.builtin.known_hosts: - name: "{{ ip }}" - state: absent - delegate_to: localhost - become: false - name: Remote container configuration hosts: container @@ -239,4 +42,4 @@ when: - not (share | default(false) | bool) - mount is defined - tags: mounts \ No newline at end of file + tags: mounts diff --git a/base/roles/base/files/environment b/base/roles/base/files/environment new file mode 100644 index 0000000..cffbfd4 --- /dev/null +++ b/base/roles/base/files/environment @@ -0,0 +1,8 @@ +CHEF_NODE_NAME=$(hostname -f) + +CHEF_LICENSE=accept + +CHEF_LOG_LEVEL=info +CHEF_LOG_LOCATION=STDOUT + +CHEF_CACHE_PATH=/var/cache/cinc \ No newline at end of file diff --git a/base/roles/base/files/.bashrc b/base/roles/base/files/profile.sh similarity index 100% rename from base/roles/base/files/.bashrc rename to base/roles/base/files/profile.sh diff --git a/base/roles/base/tasks/access.yml b/base/roles/base/tasks/access.yml new file mode 100644 index 0000000..f103c76 --- /dev/null +++ b/base/roles/base/tasks/access.yml @@ -0,0 +1,79 @@ +- name: SSH configuration + notify: Restart SSH + when: (configure_ssh | default(true) | bool) + block: + - name: Disable root login + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: "^#?PermitRootLogin" + line: "PermitRootLogin no" + backup: yes + state: present + + - name: Disable password authentication + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: "^#?PasswordAuthentication" + line: "PasswordAuthentication no" + backup: yes + state: present + + - name: Enable public key authentication + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: '^#?PubkeyAuthentication' + line: 'PubkeyAuthentication yes' + state: present + backup: yes + + - name: Authorized keys + block: + - name: Create global authorized keys + file: + path: /etc/ssh/authorized_keys + state: directory + owner: root + group: root + mode: "0755" + + - name: Set global authorized keys + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: '^#?AuthorizedKeysFile' + line: 'AuthorizedKeysFile /etc/ssh/authorized_keys/%u' + state: present + backup: yes + +- name: Container accessibility + block: + - name: Store container key + ansible.builtin.set_fact: + private_key: "{{ private_key ~ (private_key.endswith('\n') | ternary('', '\n')) }}" + public_key: "{{ public_key ~ (public_key.endswith('\n') | ternary('', '\n')) }}" + + - name: Save container keys + copy: + content: "{{ item.content }}" + dest: "{{ key_dir }}/{{ item.name }}" + owner: app + group: config + mode: "{{ item.mode }}" + loop: + - { name: "{{ id }}", content: "{{ private_key }}", mode: "0600" } + - { name: "{{ id }}.pub", content: "{{ public_key }}", mode: "0644" } + + - name: Verify container key + shell: "ssh-keygen -y -f {{ key_dir }}/{{ id }} | diff - {{ key_dir }}/{{ id }}.pub" + register: verify + changed_when: false + failed_when: verify.rc != 0 + + - name: Set container authorized keys + ansible.posix.authorized_key: + user: "{{ item }}" + key: "{{ public_key }}" + state: present + exclusive: no + manage_dir: false + path: "/etc/ssh/authorized_keys/{{ item }}" + loop: "{{ ssh_users }}" diff --git a/base/roles/base/tasks/defaults.yml b/base/roles/base/tasks/defaults.yml new file mode 100644 index 0000000..6a351ec --- /dev/null +++ b/base/roles/base/tasks/defaults.yml @@ -0,0 +1,31 @@ +- name: User defaults + block: + + - name: Set environment + copy: + src: environment + dest: /etc/environment + owner: root + group: root + mode: '0644' + + - name: Set profile + copy: + src: profile.sh + dest: /etc/profile.d/10-profile.sh + owner: root + group: root + mode: '0644' + + - name: User + block: + - name: Set root bashrc + copy: + dest: /root/.bashrc + content: | + for f in /etc/profile.d/*.sh; do + . "$f" + done + owner: root + group: root + mode: '0644' \ No newline at end of file diff --git a/base/roles/base/tasks/main.yml b/base/roles/base/tasks/main.yml index 618da86..c011dcd 100644 --- a/base/roles/base/tasks/main.yml +++ b/base/roles/base/tasks/main.yml @@ -1,5 +1,9 @@ - name: Base container configuration block: + - name: Set key directory + set_fact: + key_dir: "{{ KEYS_DIR | default('/share/.ssh') }}" + - name: Update system apt: update_cache: true @@ -13,14 +17,21 @@ name: "{{ default_packages }}" state: present + - name: Create default groups + group: + name: "{{ item }}" + state: present + loop: "{{ default_groups }}" + - name: Create default users user: name: "{{ item.name }}" shell: "{{ item.shell | default('/bin/bash') }}" groups: "{{ item.groups | default(omit) }}" - create_home: yes + create_home: "{{ item.create_home | default(true) }}" + home: "{{ item.home | default(omit) }}" state: present - loop: "{{ users }}" + loop: "{{ default_users }}" register: created_users - name: Configure Sudo @@ -36,127 +47,18 @@ create: yes loop: "{{ ssh_users }}" -- name: Install configuration management - block: - - name: Get latest Cinc version - ansible.builtin.uri: - url: https://downloads.cinc.sh/files/stable/cinc/ - return_content: yes - register: download_configuration_management_version - retries: 5 - delay: 3 - until: download_configuration_management_version.status == 200 - changed_when: false - - - name: Get configuration management version - ansible.builtin.set_fact: - cinc_version: "{{ download_configuration_management_version.content | regex_findall('href=\"(\\d+\\.\\d+\\.\\d+)/\"') | sort | last }}" - - - name: Download configuration management - ansible.builtin.get_url: - url: "https://downloads.cinc.sh/files/stable/cinc/{{ cinc_version }}/debian/12/cinc_{{ cinc_version }}-1_{{ arch | default('amd64') }}.deb" - dest: /tmp/cinc.deb - validate_certs: no - register: download_configuration_management_result - retries: 5 - delay: 3 - until: download_configuration_management_result is succeeded - - - name: Install configuration management - ansible.builtin.apt: - deb: /tmp/cinc.deb - state: present - -- name: System Preparation - block: - - name: Create application directory - file: - path: "/app" - state: directory - owner: app - group: app - mode: 0755 +- name: Ensure files and directories + include_tasks: state.yml + vars: + extra: + - { path: "/app", state: "directory", mode: "0755", owner: "app", group: "config" } + - { path: "/app/.ssh", state: "directory", mode: "0711", owner: "app", group: "config" } -- name: SSH configuration - notify: Restart SSH - when: (configure_ssh | default(true) | bool) - block: - - name: Disable SSH password authentication - ansible.builtin.lineinfile: - path: /etc/ssh/sshd_config - regexp: "^#?PasswordAuthentication" - line: "PasswordAuthentication no" - backup: yes - state: present +- name: Container accessibility + import_tasks: access.yml - - name: Disable SSH root login - ansible.builtin.lineinfile: - path: /etc/ssh/sshd_config - regexp: "^#?PermitRootLogin" - line: "PermitRootLogin no" - backup: yes - state: present - -- name: SSH key management - block: - - name: Store container key - ansible.builtin.set_fact: - private_key: "{{ private_key ~ (private_key.endswith('\n') | ternary('', '\n')) }}" - public_key: "{{ public_key ~ (public_key.endswith('\n') | ternary('', '\n')) }}" +- name: Configuration management + import_tasks: tools.yml - - name: Ensure container key directory - file: - path: "/root/.ssh" - state: directory - owner: root - group: root - recurse: yes - - - name: Save container private key - copy: - content: "{{ private_key }}" - dest: "/root/id_rsa" - owner: root - group: root - mode: 0600 - - - name: Save container public key - copy: - content: "{{ public_key }}" - dest: "/root/id_rsa.pub" - owner: root - group: root - mode: 0644 - - - name: Verify container key - shell: ssh-keygen -y -f /root/id_rsa | diff - /root/id_rsa.pub - register: verify - changed_when: false - failed_when: verify.rc != 0 - - - name: Set container authorized key - ansible.posix.authorized_key: - user: "{{ item }}" - key: "{{ public_key }}" - state: present - exclusive: no - loop: "{{ ssh_users }}" - -- name: Customization - block: - - name: Copy bash configuration for root - copy: - src: .bashrc - dest: /root/.bashrc - owner: root - group: root - mode: '0644' - - - name: Copy bash configuration for users - copy: - src: .bashrc - dest: "/home/{{ item }}/.bashrc" - owner: "{{ item }}" - group: "{{ item }}" - mode: '0644' - loop: "{{ ssh_users }}" +- name: User default settings + import_tasks: defaults.yml diff --git a/base/roles/base/tasks/state.yml b/base/roles/base/tasks/state.yml new file mode 100644 index 0000000..e2c4794 --- /dev/null +++ b/base/roles/base/tasks/state.yml @@ -0,0 +1,38 @@ +- name: Set files and directories + set_fact: + defaults: + - { path: "{{ key_dir }}", state: "directory", mode: "0711", owner: "app", group: "config" } + - { path: "{{ key_dir }}/{{ id }}", state: "file", mode: "0600", owner: "app", group: "config" } + - { path: "{{ key_dir }}/{{ id }}.pub", state: "file", mode: "0644", owner: "app", group: "config" } + when: defaults is not defined + +- name: Gather files and directories + set_fact: + objects: "{{ defaults + (extra | default([])) }}" + +- name: Ensure directories exists + file: + path: "{{ item.path }}" + state: directory + owner: "{{ item.owner }}" + group: "{{ item.group }}" + mode: "{{ item.mode }}" + loop: "{{ objects }}" + when: item.state == 'directory' + +- name: Check if files exist + stat: + path: "{{ item.path }}" + register: file + loop: "{{ objects }}" + when: item.state == 'file' + +- name: Ensure permissions on existing files + file: + path: "{{ item.item.path }}" + state: file + owner: "{{ item.item.owner }}" + group: "{{ item.item.group }}" + mode: "{{ item.item.mode }}" + loop: "{{ file.results }}" + when: item.stat is defined and item.stat.exists diff --git a/base/roles/base/tasks/tools.yml b/base/roles/base/tasks/tools.yml new file mode 100644 index 0000000..e845e73 --- /dev/null +++ b/base/roles/base/tasks/tools.yml @@ -0,0 +1,30 @@ +- name: Install configuration management + block: + - name: Get latest Cinc version + ansible.builtin.uri: + url: https://downloads.cinc.sh/files/stable/cinc/ + return_content: yes + register: download_configuration_management_version + retries: 5 + delay: 3 + until: download_configuration_management_version.status == 200 + changed_when: false + + - name: Get configuration management version + ansible.builtin.set_fact: + cinc_version: "{{ download_configuration_management_version.content | regex_findall('href=\"(\\d+\\.\\d+\\.\\d+)/\"') | sort | last }}" + + - name: Download configuration management + ansible.builtin.get_url: + url: "https://downloads.cinc.sh/files/stable/cinc/{{ cinc_version }}/debian/12/cinc_{{ cinc_version }}-1_{{ arch | default('amd64') }}.deb" + dest: /tmp/cinc.deb + validate_certs: no + register: download_configuration_management_result + retries: 5 + delay: 3 + until: download_configuration_management_result is succeeded + + - name: Install configuration management + ansible.builtin.apt: + deb: /tmp/cinc.deb + state: present diff --git a/base/roles/base/vars/main.yml b/base/roles/base/vars/main.yml index 8cc7b17..e83319e 100644 --- a/base/roles/base/vars/main.yml +++ b/base/roles/base/vars/main.yml @@ -2,15 +2,13 @@ default_packages: - vim - wget - sudo - - net-tools - - procps - curl - - ansible-core - - ansible -users: - - { name: "app", create_home: false } - - { name: "config", groups: ["root", "sudo"], create_home: true } +default_groups: ["config"] + +default_users: + - { name: "app", groups: ["config"], create_home: false, home: "/app" } + - { name: "config", groups: ["config", "root", "sudo"], create_home: false, home: "/app" } - { name: "user", shell: "/bin/bash", groups: ["root", "sudo"], create_home: true } ssh_users: diff --git a/base/roles/container/defaults/main.yml b/base/roles/container/defaults/main.yml new file mode 100644 index 0000000..fa57cc2 --- /dev/null +++ b/base/roles/container/defaults/main.yml @@ -0,0 +1,14 @@ +proxmox_auth: >- + {{ (PROXMOX_PASSWORD | default('') == '') | ternary( + {'Authorization': 'PVEAPIToken=' + PROXMOX_USER + '!' + PROXMOX_TOKEN + '=' + PROXMOX_SECRET}, + {'Cookie': 'PVEAuthCookie=' + proxmox_login.json.data.ticket, + 'CSRFPreventionToken': proxmox_login.json.data.CSRFPreventionToken}) }} + +proxmox_cred: + api_host: "{{ PROXMOX_HOST }}" + api_user: "{{ PROXMOX_USER }}" + api_password: "{{ PROXMOX_PASSWORD | default(omit) }}" + api_token_id: "{{ (PROXMOX_PASSWORD | default('') == '') | ternary(PROXMOX_TOKEN, omit) }}" + api_token_secret: "{{ (PROXMOX_PASSWORD | default('') == '') | ternary(PROXMOX_SECRET, omit) }}" + api_port: 8006 + node: "pve" \ No newline at end of file diff --git a/base/roles/container/tasks/add.yml b/base/roles/container/tasks/add.yml new file mode 100644 index 0000000..e732146 --- /dev/null +++ b/base/roles/container/tasks/add.yml @@ -0,0 +1,28 @@ +- name: Container addition + block: + - name: Check config user + command: "ssh -o BatchMode=yes -o ConnectTimeout=5 -i {{ key_dir }}/{{ id }} config@{{ ip }} echo success" + register: ssh_config_test + failed_when: false + changed_when: false + when: container_exists + + - name: Set configuration user + set_fact: + ssh_user: "{{ 'config' if (container_exists and (ssh_config_test.rc | default(1)) == 0) else 'root' }}" + + - name: Add container + add_host: + name: container + id: "{{ id }}" + ansible_host: "{{ ip }}" + ansible_user: "{{ ssh_user }}" + ansible_ssh_private_key_file: "{{ [key_dir, id] | path_join }}" + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + public_key: "{{ lookup('file', [key_dir, id ~ '.pub'] | path_join) }}" + private_key: "{{ lookup('file', [key_dir, id] | path_join) }}" + + - name: Reset known hosts + ansible.builtin.known_hosts: + name: "{{ ip }}" + state: absent diff --git a/base/roles/container/tasks/create.yml b/base/roles/container/tasks/create.yml new file mode 100644 index 0000000..688fd68 --- /dev/null +++ b/base/roles/container/tasks/create.yml @@ -0,0 +1,61 @@ +- name: Container creation + block: + - name: Init mounts + set_fact: + mounts: "{{ mounts | default({}) }}" + + - name: Set mounts + set_fact: + mounts: >- + {{ mounts | default({}) | combine({('mp'~idx): + ((item.split(':')[:-1] | join(':')) ~ ',mp=' ~ (item.split(':')[-1]) ~ ',ro=0,acl=1') }) }} + loop: "{{ mount.split(',') | select('search', ':') }}" + loop_control: + index_var: idx + when: + - mount is defined + - mount | trim | length > 0 + + - name: Set passthrough + set_fact: + mounts: "{{ mounts | combine({ ('dev' ~ idx): item }) }}" + loop: "{{ mount.split(',') | select('match', '^/[^:]+$') }}" + loop_control: + index_var: idx + when: + - mount is defined + - mount | trim | length > 0 + + - name: Create container + community.general.proxmox: &create_container + vmid: "{{ id }}" + hostname: "{{ hostname }}" + ostemplate: "{{ os }}" + cores: "{{ cores }}" + memory: "{{ memory }}" + pubkey: "{{ lookup('file', [key_dir, id ~ '.pub'] | path_join) }}" + swap: "{{ swap }}" + disk: "{{ disk }}" + mounts: >- + {{ (mounts if mounts and (PROXMOX_PASSWORD is defined and PROXMOX_PASSWORD != '') else omit) }} + features: >- + {{ (['mount=cifs'] if (share | default(false)) and (mount | default('') | trim != '') + and (PROXMOX_PASSWORD is defined and PROXMOX_PASSWORD != '') else omit) }} + unprivileged: "{{ (share | default(false) and mount | default('') | trim != '') | ternary(false, true) }}" + netif: + net0: "name=eth0,gw=192.168.178.1,ip={{ ip }}/24,bridge=vmbr0" + onboot: "{{ boot }}" + state: present + register: container_creation + + - name: Download container os if missing + include_tasks: os.yml + when: + - container_creation.failed + - os in (container_creation.msg | default('') ) + + - name: Retry create container with os downloaded + community.general.proxmox: + <<: *create_container + register: container_creation + when: os_available is defined and os in (os_available.json.data | map(attribute='volid') | list) diff --git a/base/roles/container/tasks/env.yml b/base/roles/container/tasks/env.yml new file mode 100644 index 0000000..764d277 --- /dev/null +++ b/base/roles/container/tasks/env.yml @@ -0,0 +1,40 @@ +- name: Environment + block: + - name: Configuration file + block: + - name: Get configuration file + ansible.builtin.stat: + path: "config.json" + register: config_file + + - name: Load configuration file + ansible.builtin.include_vars: + file: "config.json" + name: config + when: config_file.stat.exists + + - name: Proxmox configuration + block: + - name: Set Proxmox environment + ansible.builtin.set_fact: + PROXMOX_HOST: "{{ lookup('env', 'PROXMOX_HOST') }}" + PROXMOX_USER: "{{ lookup('env', 'PROXMOX_USER') }}" + PROXMOX_PASSWORD: "{{ lookup('env', 'PROXMOX_PASSWORD') }}" + PROXMOX_TOKEN: "{{ lookup('env', 'PROXMOX_TOKEN') }}" + PROXMOX_SECRET: "{{ lookup('env', 'PROXMOX_SECRET') }}" + when: + - (config_file.stat.exists and config.proxmox is defined) or (not config_file.stat.exists) + + - name: Get root ticket + block: + - name: Get Proxmox ticket + uri: + url: "https://{{ PROXMOX_HOST }}:8006/api2/json/access/ticket" + method: POST + body: + username: "{{ PROXMOX_USER }}" + password: "{{ PROXMOX_PASSWORD }}" + body_format: json + validate_certs: no + register: proxmox_login + when: (PROXMOX_PASSWORD | default('')) | trim != '' diff --git a/base/roles/container/tasks/main.yml b/base/roles/container/tasks/main.yml new file mode 100644 index 0000000..357c855 --- /dev/null +++ b/base/roles/container/tasks/main.yml @@ -0,0 +1,26 @@ +- name: Environment + include_tasks: env.yml + +- name: Container orchestration + block: + - name: Check container + community.general.proxmox_vm_info: + vmid: "{{ id }}" + register: container_exists + + - name: Remove existing container + include_tasks: remove.yml + when: container_exists is success and (container_exists.proxmox_vms | length) > 0 + + - name: Create container + include_tasks: create.yml + + - name: Start container + include_tasks: start.yml + + - name: Add container + include_tasks: add.yml + + module_defaults: + community.general.proxmox: "{{ proxmox_cred }}" + community.general.proxmox_vm_info: "{{ proxmox_cred }}" \ No newline at end of file diff --git a/base/roles/container/tasks/os.yml b/base/roles/container/tasks/os.yml new file mode 100644 index 0000000..087d442 --- /dev/null +++ b/base/roles/container/tasks/os.yml @@ -0,0 +1,26 @@ +- name: Container OS + block: + - name: Download container os if missing + uri: + url: "https://{{ PROXMOX_HOST }}:8006/api2/json/nodes/pve/storage/local/download-url" + method: POST + headers: "{{ proxmox_auth | combine({'Content-Type': 'application/json'}) }}" + body_format: json + body: + url: "http://download.proxmox.com/images/system/{{ os.split('/')[-1] }}" + filename: "{{ os.split('/')[-1] }}" + content: "vztmpl" + validate_certs: no + register: os_download + + - name: Wait for os to be downloaded + uri: + url: "https://{{ PROXMOX_HOST }}:8006/api2/json/nodes/pve/storage/local/content" + method: GET + headers: "{{ proxmox_auth }}" + validate_certs: no + register: os_available + until: os in (os_available.json.data | map(attribute='volid') | list) + retries: 10 + delay: 6 + when: os_download is defined diff --git a/base/roles/container/tasks/remove.yml b/base/roles/container/tasks/remove.yml new file mode 100644 index 0000000..e6979f9 --- /dev/null +++ b/base/roles/container/tasks/remove.yml @@ -0,0 +1,38 @@ +- name: Container removal + block: + - name: Stop container + community.general.proxmox: + vmid: "{{ id }}" + state: stopped + force: yes + + - name: Wait for container to be stopped + uri: + url: "https://{{ PROXMOX_HOST }}:8006/api2/json/nodes/pve/lxc/{{ id }}/status/current" + method: GET + headers: "{{ proxmox_auth }}" + validate_certs: no + register: container_status + until: container_status.json.data.status == "stopped" + retries: 5 + delay: 2 + + - name: Remove container + community.general.proxmox: + vmid: "{{ id }}" + state: absent + register: removal + + - name: Wait for container to be removed + uri: + url: "https://{{ PROXMOX_HOST }}:8006/api2/json/nodes/pve/lxc/{{ id }}/status/current" + method: GET + headers: "{{ proxmox_auth }}" + validate_certs: no + register: container_status + until: + - container_status.status == 500 + - "'does not exist' in container_status.json.message" + retries: 5 + delay: 4 + failed_when: false diff --git a/base/roles/container/tasks/start.yml b/base/roles/container/tasks/start.yml new file mode 100644 index 0000000..17a8493 --- /dev/null +++ b/base/roles/container/tasks/start.yml @@ -0,0 +1,27 @@ +- name: Container start + block: + - name: Wait for container to be created + uri: + url: "https://{{ PROXMOX_HOST }}:8006/api2/json/nodes/pve/lxc/{{ id }}/config" + method: GET + headers: "{{ proxmox_auth }}" + validate_certs: no + register: container_status + until: + - container_status.status == 200 + - "'hostname' in container_status.json.data" + retries: 5 + delay: 6 + ignore_errors: yes + + - name: Start container + community.general.proxmox: + vmid: "{{ id }}" + hostname: "{{ hostname }}" + state: started + + - name: Wait for container to be connectable + wait_for: + port: 22 + host: "{{ ip }}" + timeout: 180 diff --git a/base/roles/mount/tasks/main.yml b/base/roles/mount/tasks/main.yml index 673c09a..826adda 100644 --- a/base/roles/mount/tasks/main.yml +++ b/base/roles/mount/tasks/main.yml @@ -31,4 +31,5 @@ label: "{{ item }}" - name: Restart mount - ansible.builtin.command: mount -a \ No newline at end of file + ansible.builtin.command: mount -a + ignore_errors: yes # avoid permission error if token usage \ No newline at end of file diff --git a/config.env b/config.env index 98213aa..bccbd62 100644 --- a/config.env +++ b/config.env @@ -4,6 +4,6 @@ HOSTNAME=config CORES=4 MEMORY=6144 SWAP=1024 -DISK=local-lvm:16 -MOUNT=share:32 -BOOT=yes \ No newline at end of file +DISK=local-lvm:16 # adjust if needed +MOUNT=/mnt/ext:/share/files # adjust +BOOT=yes diff --git a/config/attributes/default.rb b/config/attributes/default.rb index 14d9ccf..cd8edab 100644 --- a/config/attributes/default.rb +++ b/config/attributes/default.rb @@ -1,28 +1,32 @@ -default['host'] = ENV['IP'].to_s.presence ? ENV['IP'] : "127.0.0.1" default['id'] = ENV['ID'] -default['key'] = ENV['KEY'].to_s.presence ? ENV['KEY'] : "/share/.ssh/#{node['id']}" +default['host'] = ENV['IP'].to_s.presence || "127.0.0.1" +default['key'] = ENV['KEY'].to_s.presence || "/share/.ssh/#{node['id']}" -default['git']['app']['user'] = 'app' -default['git']['app']['group'] = 'app' +default['app']['user'] = Default.user(node, default: true) +default['app']['group'] = Default.group(node, default: true) +default['app']['config'] = Default.config(node, default: true) -default['git']['dir']['install'] = '/app/git' -default['git']['dir']['data'] = '/app/git/data' -default['git']['home'] = "/home/#{node['git']['app']['user']}/git" -default['git']['workspace'] = '/share/workspace' +default['git']['conf']['customize'] = true +default['git']['conf']['repo'] = [ "./", "./base", "./config/libraries", "./libs" ] + +default['git']['dir']['app'] = '/app/git' +default['git']['dir']['home'] = Dir.home(node['app']['user']) || ENV['HOME'] || '/app' +default['git']['dir']['workspace'] = "#{node['git']['dir']['home']}/workspace" default['git']['port']['http'] = 8080 default['git']['port']['ssh'] = 2222 -default['git']['version'] = "v1" -default['git']['host'] = "http://#{node['host']}:#{node['git']['port']['http']}" -default['git']['endpoint'] = "http://#{node['host']}:#{node['git']['port']['http']}/api/#{node['git']['version']}" +default['git']['host']['http'] = "http://#{node['host']}:#{node['git']['port']['http']}" +default['git']['host']['ssh'] = "#{node['host']}:#{node['git']['port']['ssh']}" + +default['git']['api']['version'] = "v1" +default['git']['api']['endpoint'] = "http://#{node['host']}:#{node['git']['port']['http']}/api/#{node['git']['api']['version']}" default['git']['org']['main'] = 'main' default['git']['org']['stage'] = 'stage' -default['git']['repo']['ssh'] = "#{node['host']}:#{node['git']['port']['ssh']}" -default['runner']['dir']['install'] = '/app/runner' +# Runner + +default['runner']['dir']['app'] = '/app/runner' default['runner']['dir']['cache'] = '/tmp' -default['runner']['file']['marker'] = "#{node['runner']['dir']['install']}/.runner" -default['runner']['labels'] = 'shell' -default['git']['repositories'] = [ "./", "./base", "./config/libraries", "./libs" ] +default['runner']['conf']['label'] = 'shell' diff --git a/config/libraries/common.rb b/config/libraries/common.rb index 16633f1..c8b1562 100644 --- a/config/libraries/common.rb +++ b/config/libraries/common.rb @@ -11,12 +11,12 @@ def self.packages(ctx, *pkgs, action: :install) end def self.directories(ctx, dirs, opts = {}) - dirs = Array(dirs) - owner = opts[:owner] || 'root' - group = opts[:group] || 'root' - mode = opts[:mode] || '0755' - recursive = opts.key?(:recursive) ? opts[:recursive] : true - recreate = opts[:recreate] || false + dirs = Array(dirs).compact.uniq + owner = opts[:owner] || Default.user(ctx) + group = opts[:group] || Default.group(ctx) + mode = opts[:mode] || '0755' + recursive = true + recreate = !!opts[:recreate] || false if recreate sort_dir(dirs).each { |dir| delete_dir(ctx, dir) } @@ -27,16 +27,18 @@ def self.directories(ctx, dirs, opts = {}) # System def self.daemon(ctx, name) - ctx.find_resource!(:execute, name) - rescue Chef::Exceptions::ResourceNotFound - ctx.execute name do + Ctx.find(ctx, :execute, name) do command 'systemctl daemon-reload' - action :nothing + action :nothing end end - def self.application(ctx, name, user: 'app', group: user, exec: nil, cwd: nil, unit: {}, action: [:enable, :start], restart: 'on-failure', subscribe: nil, reload: 'systemd_reload') - if exec || !unit.empty? + def self.application(ctx, name, user: nil, group: nil, exec: nil, cwd: nil, unit: {}, action: [:enable, :start], restart: 'on-failure', subscribe: nil, reload: 'systemd_reload') + user ||= Default.user(ctx) + group ||= Default.group(ctx) + user = user.to_s + group = group.to_s + if exec || unit.present? daemon(ctx, reload) service = { @@ -65,133 +67,37 @@ def self.application(ctx, name, user: 'app', group: user, exec: nil, cwd: nil, u "[#{section}]\n#{lines}" end.join("\n\n") - ctx.file "/etc/systemd/system/#{name}.service" do + Ctx.dsl(ctx).file "/etc/systemd/system/#{name}.service" do owner 'root' group 'root' mode '0644' content unit_content - notifies :run, "execute[#{reload}]", :immediately end - end + Ctx.find(ctx, :execute, reload) - ctx.service name do - action action - Array(subscribe).flatten.each { |ref| subscribes :restart, ref, :delayed } if subscribe end - end - - def self.arch(node) - case node['kernel']['machine'].to_s - when /arm64|aarch64/ - 'arm64' - when /armv6|armv7l/ - 'armv7' - else - 'amd64' - end - end - - # Remote - - def self.request(uri, user: nil, pass: nil, headers: {}, method: Net::HTTP::Get, body: nil, expect: false) - req = method.new(u = URI(uri)) - req.basic_auth(user, pass) if user && pass - req.body = body if body - headers.each { |k, v| req[k] = v } - response = Net::HTTP.start(u.host, u.port, use_ssl: u.scheme == 'https') { |http| http.request(req) } - Chef::Log.info("[#{__method__}] request #{uri}: #{response.code} #{response.message}") - - if response.is_a?(Net::HTTPSuccess) - return expect ? true : response - end - if response.is_a?(Net::HTTPRedirection) - loc = response['location'] - loc = "#{u.scheme}://#{u.host}#{loc}" if loc&.start_with?('/') - return request(loc, user: user, pass: pass, headers: headers, method: method, body: body, expect: expect) - end - - expect ? false : response - end - - def self.latest(url) - request(url).body[/title>.*?v?([0-9]+\.[0-9]+(?:\.[0-9]+)?)/, 1].to_s || "latest" - end - def self.download(ctx, path, url:, owner: 'root', group: 'root', mode: '0644', action: :create) - ctx.remote_file path do - source url.respond_to?(:call) ? lazy { url.call } : url - owner owner - group group - mode mode + Ctx.dsl(ctx).service name do action action + Array(subscribe).flatten.each { |ref| subscribes :restart, ref, :delayed } if subscribe end - end - # Utility - - def self.wait(condition = nil, timeout: 20, sleep_interval: 5, &block) - return Kernel.sleep(condition) if condition.is_a?(Integer) - return Timeout.timeout(timeout) { block.call } if block_given? - raise ArgumentError unless condition - - Timeout.timeout(timeout) do - loop do - ok = false - if condition =~ %r{^https?://} - uri = URI(condition) - http = Net::HTTP.new(uri.host, uri.port) - http.use_ssl = (uri.scheme == 'https') - http.verify_mode = OpenSSL::SSL::VERIFY_NONE if uri.scheme == 'https' - begin - res = http.get(uri.path.empty? ? '/' : uri.path) - ok = res.is_a?(Net::HTTPSuccess) || res.is_a?(Net::HTTPRedirection) - rescue - ok = false - end - else - host_port = condition.include?('@') ? condition.split('@', 2).last : condition - host, port = host_port.split(':', 2) - port = (port || '80').to_i - begin - TCPSocket.new(host, port).close - ok = true - rescue - ok = false - end - end - break if ok - sleep sleep_interval - end - end - true - rescue Timeout::Error, StandardError - false end - # Helper - def self.create_dir(ctx, dir, owner, group, mode, recursive) - ctx.directory dir do - owner owner - group group - mode mode - recursive recursive - action :create - end + ctx.directory dir do owner owner; group group; mode mode; recursive recursive end + rescue => e + Logs.warn("Skip create #{dir}: #{e}") end - def self.delete_dir(ctx, dir) - ctx.directory dir do - action :delete - recursive true - only_if { ::Dir.exist?(dir) } - end - end + def self.delete_dir(ctx, dir) + ctx.directory dir do action :delete; recursive true; only_if { ::Dir.exist?(dir) } end + rescue => e + Logs.warn("Skip delete #{dir}: #{e}") + end def self.sort_dir(dirs) - Array(dirs).sort_by { |d| -d.count('/') } + Array(dirs).compact.sort_by { |d| -d.count('/') } end - private_class_method :create_dir, :delete_dir - -end +end \ No newline at end of file diff --git a/config/libraries/constants.rb b/config/libraries/constants.rb new file mode 100644 index 0000000..b6ff880 --- /dev/null +++ b/config/libraries/constants.rb @@ -0,0 +1,12 @@ +module Constants + + HEADER_JSON = { + 'Content-Type' => 'application/json', + 'Accept' => 'application/json' + }.freeze + + HEADER_FORM = { + 'Content-Type' => 'application/x-www-form-urlencoded' + }.freeze + +end \ No newline at end of file diff --git a/config/libraries/default.rb b/config/libraries/default.rb new file mode 100644 index 0000000..6bb17ea --- /dev/null +++ b/config/libraries/default.rb @@ -0,0 +1,61 @@ +module Default + + def self.user(ctx, default: nil) + node = Ctx.node(ctx) + @user ||= (default.presence ? 'app' : presence_or(Env.get(node, "app_user"), user(node, default: true))).to_s + end + + def self.group(ctx, default: nil) + node = Ctx.node(ctx) + @group ||= (default.presence ? 'config' : presence_or(Env.get(node, "app_group"), group(node, default: true))).to_s + end + + def self.config(ctx, default: nil) + node = Ctx.node(ctx) + @config ||= (default.presence ? 'config' : presence_or(Env.get(node, "app_config"), config(node, default: true))).to_s + end + + def self.presence_or(var, default) + var.to_s.presence || default.to_s + end + +end + +module Ctx + + def self.node(obj) + return obj.node if defined?(Context) && obj.is_a?(Context) + return obj.node if obj.respond_to?(:node) + if obj.respond_to?(:run_context) && obj.run_context && obj.run_context.respond_to?(:node) + return obj.run_context.node + end + obj + end + + def self.dsl(obj) + return obj if obj.respond_to?(:file) + rctx = rc(obj) + return obj unless rctx + cb = obj.respond_to?(:cookbook_name) ? obj.cookbook_name : nil + rn = obj.respond_to?(:recipe_name) ? obj.recipe_name : nil + Chef::Recipe.new(cb, rn, rctx) + end + + def self.rc(obj) + return obj.run_context if obj.respond_to?(:run_context) + return obj if defined?(Chef::RunContext) && obj.is_a?(Chef::RunContext) + nil + end + + def self.find(obj, type, name, &block) + rctx = rc(obj) + if rctx && rctx.respond_to?(:resource_collection) + begin + return rctx.resource_collection.find("#{type}[#{name}]") + rescue Chef::Exceptions::ResourceNotFound + end + end + dsl(obj).public_send(type, name, &block) + end + +end \ No newline at end of file diff --git a/config/libraries/env.rb b/config/libraries/env.rb index c8e1de3..aaa65a1 100644 --- a/config/libraries/env.rb +++ b/config/libraries/env.rb @@ -4,72 +4,76 @@ module Env - def self.creds(node, login_key = 'login', password_key = 'password') - [login = ENV[login_key.upcase] || node[login_key.to_sym], pass = ENV[password_key.upcase] || node[password_key.to_sym]].tap { Chef::Log.info(mask(login)); Chef::Log.info(mask(pass)) } + def self.creds(ctx, login = 'login', password = 'password') + node = Ctx.node(ctx) + user ||= ENV[login.upcase] || node[login.to_sym] + pass ||= ENV[password.upcase] || node[password.to_sym] + return user, pass end - def self.get(node, key) - Chef::Log.info("[#{__method__}] #{key}: #{mask(val = node[key].to_s.presence || ENV[key.to_s.upcase].presence || get_variable(node, key))}"); val - rescue => e - Chef::Log.warn("[#{__method__}] #{e.message} node[#{key}]: #{node[key].inspect} ENV[#{key}]: #{ENV[key.to_s.upcase].inspect}") - end - - def self.get_variable(node, key) - JSON.parse(request(node, key).body)['data'] - rescue => e - Chef::Log.warn("[#{__method__}] #{e.message} failed get '#{key}' on #{endpoint(node)} node[#{key}]: #{node[key].inspect} ENV[#{key}]: #{ENV[key.to_s.upcase].inspect}") - end - - def self.set_variable(node, key, val) - request(node, key, { name: key, value: val.to_s }.to_json) - rescue => e - Chef::Log.warn("[#{__method__}] #{e.message} failed set '#{key}' on #{endpoint(node)} node[#{key}]: #{node[key].inspect} ENV[#{key}]: #{ENV[key.to_s.upcase].inspect}") - raise + def self.get(ctx, key) + Logs.try!("get '#{key}'") do + node = Ctx.node(ctx) + env_key = ENV[key.to_s.upcase] + return node[key] if node[key].present? + return env_key if env_key.present? + return get_variable(ctx, key) + end end - class << self - alias_method :set, :set_variable + def self.get_variable(ctx, key, repo: nil) + Logs.try!("get variable '#{key}'", [:repo, repo]) do + request(Ctx.node(ctx), key, repo: repo).json['data'] + end end - private_class_method def self.or_default(var, default) - var.to_s.presence ? var.to_s : default.to_s - end + def self.set_variable(ctx, key, val, repo: nil) + Logs.try!("set variable '#{key}' to #{val.try(:mask) || val}", [:val, val, :repo, repo], raise: true) do + request(Ctx.node(ctx), key, body: ({ name: key, value: val.to_s }.to_json), repo: repo, expect: true) + end + end; class << self; alias_method :set, :set_variable; end - private_class_method def self.endpoint(node, port=or_default(node.dig('git', 'port', 'http'), '8080')) - or_default(node.dig('git', 'endpoint'), -"http://#{or_default(node['host'].to_s.presence || ENV['HOST'].to_s.presence, '127.0.0.1')}:#{port}/api/#{or_default(node.dig('git', 'version'), 'v1')}") + def self.endpoint(ctx) + node = Ctx.node(ctx) + host = Default.presence_or(node.dig('git', 'host', 'http'), "http://#{Default.presence_or(Env.get(node, 'host'), '127.0.0.1')}:#{Default.presence_or(node.dig('git', 'port', 'http'), 8080)}") + "#{host}/api/#{Default.presence_or(node.dig('git', 'api', 'version'), 'v1')}" end - private_class_method def self.request(node, key, body = nil) - uri = URI("#{endpoint(node)}/orgs/#{or_default(node.dig('git', 'repo', 'org'), 'main')}/actions/variables/#{key}") - (body ? [Net::HTTP::Put, Net::HTTP::Post] : [Net::HTTP::Get]).each do |m| - req = m.new(uri) - req.basic_auth(*creds(node)) - req['Content-Type'] = 'application/json' - req.body = body if body - response = Net::HTTP.start(uri.host, uri.port) { |h| h.request(req) } - Chef::Log.info("[#{__method__}] request #{uri}: #{response.code} #{response.message}") - return response unless body && response.code.to_i == 404 + def self.request(ctx, key, body: nil, repo: nil, expect: false) + user, pass = creds(ctx) + owner = Default.presence_or(ctx.dig('git', 'org', 'main'), 'main') + uri = URI("#{endpoint(ctx)}/#{repo.to_s.strip.size>0 ? "repos/#{owner}/#{repo.to_s}" : "orgs/#{owner}"}/actions/variables/#{key}") + response = Utils.request(uri, user: user, pass: pass, headers: {}, method: Net::HTTP::Get, expect: (body.present? or expect), log: false) + if body.present? + method = (response ? Net::HTTP::Put : Net::HTTP::Post) + response = Utils.request(uri, user: user, pass: pass, headers: Constants::HEADER_JSON, method: method, body: body, expect: expect) end + return response end -end - -class Object - def blank? - respond_to?(:empty?) ? empty? : !self - end - - def presence - blank? ? nil : self + def self.dump(ctx, *keys, repo: nil) + Logs.try!("dump variables", [:repo, repo], raise: true) do + node = Ctx.node(ctx) + keys.flatten.each do |key| + value = node[key] + next if value.blank? + case value + when Hash + value.each do |subkey, subvalue| + next if subvalue.blank? + Env.set_variable(node, "#{key}_#{subkey}", subvalue, repo: repo) + end + when Array + value.each_with_index do |item, i| + next if item.blank? + Env.set_variable(node, "#{key}_#{i}", item, repo: repo) + end + else + Env.set_variable(node, key, value, repo: repo) + end + end + true + end end -end - -class NilClass - def blank?; true; end - def presence; nil; end -end -def mask(str) - str.to_s.length <= 2 ? '*' * str.to_s.length : "#{str[0]}#{'*' * (str.length - 2)}#{str[-1]}" end \ No newline at end of file diff --git a/config/libraries/logs.rb b/config/libraries/logs.rb new file mode 100644 index 0000000..9f2b7de --- /dev/null +++ b/config/libraries/logs.rb @@ -0,0 +1,63 @@ +module Logs + + def self.log(msg, level: :info) + c = callsite + label = method_label(c) + if label + Chef::Log.send(level, FORMAT_WITH % [label, msg, File.basename(c.path), c.lineno]) + else + Chef::Log.send(level, FORMAT_NO % [msg, File.basename(c.path), c.lineno]) + end + end + + def self.info(msg); log(msg) end; def self.warn(msg); log(msg, level: :warn) end; def self.error(msg); log(msg, level: :error) end + def self.info?(msg, result: true); log(msg); result; end + def self.request(uri, response); info("requested #{uri}: #{response&.code} #{response&.message}"); return response end + def self.return(msg); log(msg.to_s); return msg end + + def self.debug(msg, *pairs, ctx: nil, level: :info) + flat = pairs.flatten + raise ArgumentError, "debug requires key value pairs (#{flat.length}: #{flat.inspect})" unless flat.length.even? + input = flat.each_slice(2).to_h.transform_keys(&:to_s) + input['env'] = ENV.to_h; input['ctx'] = ctx.respond_to?(:to_h) ? ctx.to_h : ctx + payload = input.map { |k, v| "#{k}=#{v.inspect}" }.join(" ") + log([msg, payload].reject { |s| s.blank? }.join(" "), level: level) + end + + def self.try!(msg, *pairs, e:nil, ctx: nil, raise: false) + yield + rescue => e + debug("failed: #{msg}: #{e.message}", *(pairs.flatten), ctx: ctx, level: (raise ? :error : :warn)) + raise("[#{method_label(callsite)}] #{e.message if e} #{msg}") if raise + end + + def self.request!(uri, response, valid=[], msg: nil, ctx: nil) + res = try!("failed request", [:uri, uri, :response, response]) do + if valid.presence # if valid status code required + raise("[#{method_label(callsite)}] #{e.message if e} #{msg}") unless + (valid == true && response.is_a?(Net::HTTPSuccess) or valid.include?(response.code.to_i)) + end + response ? "#{response.code} #{response.message}" : response + end + debug("[#{msg}] responded #{res}", [:uri, uri, :response, response], ctx: ctx) + return response + end + + # Helper + + FORMAT_WITH = "\e[1m[%s] %s (%s:%d)\e[0m" + FORMAT_NO = "%s (%s:%d)" + IGNORES = [__FILE__, %r{libraries}] + + def self.callsite + s = caller_locations(2,60) + s.find { |l| IGNORES.none? { |ig| ig.is_a?(Regexp) ? l.path =~ ig : l.path == ig } } || s.first + end + + def self.method_label(loc) + label = (loc.respond_to?(:label) ? loc.label : loc.to_s).sub(/block.*in /, '') + return nil if label == 'from_file' + label + end + +end \ No newline at end of file diff --git a/config/libraries/patch.rb b/config/libraries/patch.rb new file mode 100644 index 0000000..edac41a --- /dev/null +++ b/config/libraries/patch.rb @@ -0,0 +1,58 @@ +require 'json' + +class Object + def blank?; respond_to?(:empty?) ? empty? : !self; end + def present?; !blank?; end + def presence; blank? ? nil : self; end + def presence_in(collection); return nil unless collection&.respond_to?(:include?); collection.include?(self) ? self : nil; end + def try(method_name = nil, *args, &block) + return nil if nil?; return instance_eval(&block) if block + return nil unless method_name && respond_to?(method_name, true) + public_send(method_name, *args) + end +end + +class NilClass + def blank?; true; end + def present?; false; end + def presence; nil; end +end + +# Data types + +class String + def blank?; strip.empty? end + def squish; strip.gsub(/\s+/, " ") end + def mask; (length <= 4) ? '*' * length : "#{self[0]}#{self[1]}#{'*' * (length - 4)}#{self[-2]}#{self[-1]}" ; end +end + +class Integer + def minutes; self * 60; end + def hours; self * 3600; end +end + +class Hash + def slice(*keys); keys.each_with_object({}) { |k,h| h[k] = self[k] if key?(k) }; end + def except(*keys); dup.tap { |h| keys.each { |k| h.delete(k) } }; end + def json(*a); JSON.generate(self, *a); end + def mask; JSON.generate(transform_values { |v| v.is_a?(String) ? v.mask : v.to_s }) end +end + +class Array + def mask; JSON.generate(map { |v| v.is_a?(String) ? v.mask : v.to_s }) end +end + +# Extension + +class Net::HTTPResponse + def json(symbolize_names: false, allow_blank: false, validate_content_type: false) + ct = self['content-type'] + return nil if validate_content_type && !(ct && ct.downcase.include?('application/json')) + s = body.to_s + return nil if allow_blank && s.strip.empty? + JSON.parse(s, symbolize_names: symbolize_names) + rescue + self + end + +end \ No newline at end of file diff --git a/config/libraries/utils.rb b/config/libraries/utils.rb new file mode 100644 index 0000000..eba3ac4 --- /dev/null +++ b/config/libraries/utils.rb @@ -0,0 +1,156 @@ +require 'digest' +require 'fileutils' +require 'json' +require 'net/http' +require 'openssl' +require 'shellwords' +require 'socket' +require 'timeout' +require 'tmpdir' +require 'uri' + +module Utils + + # General + + def self.wait(condition = nil, timeout: 20, sleep_interval: 5, &block) + return Kernel.sleep(condition) if condition.is_a?(Integer) + return Timeout.timeout(timeout) { block.call } if block_given? + return Kernel.sleep(timeout) if condition.nil? + Timeout.timeout(timeout) do + loop do + ok = false + if condition =~ %r{^https?://} + uri = URI(condition) + http = Net::HTTP.new(uri.host, uri.port) + http.use_ssl = (uri.scheme == 'https') + http.verify_mode = OpenSSL::SSL::VERIFY_NONE if uri.scheme == 'https' + begin + res = http.get(uri.path.empty? ? '/' : uri.path) + ok = res.is_a?(Net::HTTPSuccess) || res.is_a?(Net::HTTPRedirection) + rescue + ok = false + end + else + host_port = condition.include?('@') ? condition.split('@', 2).last : condition + host, port = host_port.split(':', 2) + port = (port || '80').to_i + begin + TCPSocket.new(host, port).close + ok = true + rescue + ok = false + end + end + break if ok + sleep sleep_interval + end + end + true + rescue Timeout::Error, StandardError + false + end + + # System + + def self.arch(ctx) + case ctx['kernel']['machine'].to_s + when /arm64|aarch64/ + 'arm64' + when /armv6|armv7l/ + 'armv7' + else + 'amd64' + end + end + + def self.snapshot(ctx, dir, snapshot_dir: '/share/snapshots', name: ctx.cookbook_name, restore: false) + timestamp = Time.now.strftime('%H%M-%d%m%y') + snapshot = File.join(snapshot_dir, name, "#{name}-#{timestamp}.tar.gz") + md5_dir = ->(path) { + entries = Dir.glob("#{path}/**/*", File::FNM_DOTMATCH) + files = entries.reject { |f| File.directory?(f) || File.basename(f).start_with?('._') } + Digest::MD5.new.tap { |md5| files.sort.each { |f| File.open(f, 'rb') { |io| md5.update(io.read) } } }.hexdigest } + verify = ->(archive, compare_dir) { + Dir.mktmpdir do |tmp| + Logs.try!("snapshot extraction", [:archive, archive, :tmp, tmp], raise: true) do + system("tar -xzf #{Shellwords.escape(archive)} -C #{Shellwords.escape(tmp)}") or raise("tar failed") + end + roots = Dir.children(tmp).map { |e| File.join(tmp, e) }.select { |p| File.directory?(p) } + base = roots.size == 1 ? roots.first : File.join(tmp, File.basename(compare_dir)) + base = roots.first unless Dir.exist?(base) + md5_base = md5_dir.(base) + md5_compare = Dir.exist?(compare_dir) ? md5_dir.(compare_dir) : '' + raise("verify snapshot failed") unless md5_base == md5_compare + end + true + } + if restore + latest = Dir[File.join(snapshot_dir, name, "#{name}*.tar.gz")].max_by { |f| File.mtime(f) } + return true unless latest && ::File.exist?(latest) # no snapshot available + FileUtils.rm_rf(dir) + FileUtils.mkdir_p(File.dirname(dir)) + Logs.try!("snapshot restore", [:dir, dir, :archive, latest], raise: true) do + system("tar -xzf #{Shellwords.escape(latest)} -C #{Shellwords.escape(File.dirname(dir))}") or raise("tar extract failed") + end + end + return true unless Dir.exist?(dir) # true for convenience in idempotency + FileUtils.mkdir_p(File.dirname(snapshot)) + Logs.try!("snapshot creation", [:dir, dir, :snapshot, snapshot], raise: true) do + system("tar -czf #{Shellwords.escape(snapshot)} -C #{Shellwords.escape(File.dirname(dir))} #{Shellwords.escape(File.basename(dir))}") or raise("tar compress failed") + end + return verify.(snapshot, dir) + end + + def self.proxmox(uri, ctx, path) + host = Env.get(ctx, 'proxmox_host'); user = Env.get(ctx, 'proxmox_user'); pass = Env.get(ctx, 'proxmox_password') + token = Env.get(ctx, 'proxmox_token'); secret = Env.get(ctx, 'proxmox_secret') + + url = "https://#{host}:8006/api2/json/#{path}" + if pass && !pass.empty? + response = request(uri="https://#{host}:8006/api2/json/access/ticket", method: Net::HTTP::Post, + body: URI.encode_www_form(username: user, password: pass), headers: Constants::HEADER_FORM) + Logs.request!(uri, response, true, msg: "Proxmox ticket could not be retrieved") + headers = { 'Cookie' => "PVEAuthCookie=#{response.json['data']['ticket']}" } + else + headers = { 'Authorization' => "PVEAPIToken=#{user}!#{token}=#{secret}" } + end + request(url, headers: headers).json['data'] + end + + # Remote + + def self.request(uri, user: nil, pass: nil, headers: {}, method: Net::HTTP::Get, body: nil, expect: false, log: true) + u = URI(uri) + req = method.new(u) + req.basic_auth(user, pass) if user && pass + req.body = body if body + headers.each { |k, v| req[k] = v } + response = Net::HTTP.start(u.host, u.port, use_ssl: u.scheme == 'https') { |http| http.request(req) } + if response.is_a?(Net::HTTPRedirection) && response['location'] + loc = response['location'] + loc = loc.start_with?('http://', 'https://') ? loc : (loc.start_with?('/') ? "#{u.scheme}://#{u.host}#{loc}" : URI.join("#{u.scheme}://#{u.host}#{u.path}", loc).to_s) + response = request(loc, user: user, pass: pass, headers: headers, method: method, body: body, expect: expect, log: log) + end + if log + tag = log.is_a?(String) ? " #{log}" : "" + Logs.request("#{u}#{tag} (#{body})", response) + end + return expect ? response.is_a?(Net::HTTPSuccess) : response + end + + def self.download(ctx, path, url:, owner: Default.user(ctx), group: Default.group(ctx), mode: '0754', action: :create) + ctx.remote_file path do + source url.respond_to?(:call) ? lazy { url.call } : url + owner owner + group group + mode mode + action action + end + end + + def self.latest(url) + (request(url).body[/title>.*?v?([0-9]+\.[0-9]+(?:\.[0-9]+)?)/, 1] || "latest").to_s + end + +end \ No newline at end of file diff --git a/config/recipes/config.rb b/config/recipes/config.rb index b477db6..2465b87 100644 --- a/config/recipes/config.rb +++ b/config/recipes/config.rb @@ -1,122 +1,78 @@ +login = Env.get(self, 'login') +password = Env.get(self, 'password') +email = Env.get(self, 'email') + ruby_block 'config_wait_http' do - block do Common.wait("127.0.0.1:#{node['git']['port']['http']}", timeout: 15, sleep_interval: 1) end + block do Utils.wait("127.0.0.1:#{node['git']['port']['http']}", timeout: 15, sleep_interval: 1) end end execute 'config_set_user' do - user node['git']['app']['user'] + user node['app']['user'] command <<-EOH - login="#{Env.get(node, 'login')}" - base="#{node['git']['dir']['install']}/gitea admin user --config #{node['git']['dir']['install']}/app.ini" - user="--username #{Env.get(node, 'login')} --password #{Env.get(node, 'password')}" - create="--email #{Env.get(node, 'email')} --admin --must-change-password=false" - if $base list | awk '{print $2}' | grep -q "^#{Env.get(node, 'login')}$"; then + login="#{login}" + base="#{node['git']['dir']['app']}/gitea admin user --config #{node['git']['dir']['app']}/app.ini" + user="--username #{login} --password #{password}" + create="--email #{email} --admin --must-change-password=false" + if $base list | awk '{print $2}' | grep -q "^#{login}$"; then $base delete $user fi $base create $create $user EOH - not_if { Common.request("#{node['git']['endpoint']}/user", user: Env.get(node, 'login'), pass: Env.get(node, 'password'), expect: true) } + not_if { Utils.request("#{node['git']['api']['endpoint']}/user", user: login, pass: password, expect: true) } end ruby_block 'config_set_key' do block do - require 'json' - login = Env.get(node, 'login') - password = Env.get(node, 'password') - url = "#{node['git']['endpoint']}/admin/users/#{login}/keys" key = ::File.read("#{node['key']}.pub").strip + uri = "#{node['git']['api']['endpoint']}/user/keys" - (JSON.parse(Common.request(url, user: login, pass: password).body) rescue []).each do |k| - Common.request("#{url}/#{k['id']}", method: Net::HTTP::Delete, user: login, pass: password) if k['key'] && k['key'].strip == key + Utils.request(uri, user: login, pass: password).json.each do |k| + Utils.request("#{uri}/#{k['id']}", user: login, pass: password, + method: Net::HTTP::Delete) if k['key'] && k['key'].strip == key end - result = Common.request(url, body: { title: "config-#{login}", key: key }.to_json, - user: login, pass: password, method: Net::HTTP::Post, headers: { 'Content-Type' => 'application/json' }) - raise "Set new key failed (#{result.code}): #{result.body}" unless [201, 422].include?(result.code.to_i) + + response = Utils.request(uri, body: { title: login, key: key }.json, user: login, pass: password, method: Net::HTTP::Post, headers: Constants::HEADER_JSON) + Logs.request!(uri, response, [201, 422], msg: "set key") end - action :run only_if { ::File.exist?("#{node['key']}.pub") } not_if do next false unless ::File.exist?("#{node['key']}.pub") begin - resp = Common.request("#{node['git']['endpoint']}/admin/users/#{Env.get(node, 'login')}/keys", user: Env.get(node, 'login'), pass: Env.get(node, 'password')) - (JSON.parse(resp.body) rescue []).any? { |k| k['key'] && k['key'].strip == ::File.read("#{node['key']}.pub").strip } + response = Utils.request("#{node['git']['api']['endpoint']}/user/keys", user: login, pass: password) + response.json.any? { |k| k['key'] && k['key'].strip == ::File.read("#{node['key']}.pub").strip } end end end -directory "/home/#{node['git']['app']['user']}/.ssh" do - owner node['git']['app']['user'] - group node['git']['app']['group'] - mode '0700' - action :create -end - -file "/home/#{node['git']['app']['user']}/.ssh/config" do - content <<~CONF - Host #{node['host']} - HostName #{node['host']} - IdentityFile #{node['key']} - StrictHostKeyChecking no - CONF - owner node['git']['app']['user'] - group node['git']['app']['group'] - mode '0600' - action :create_if_missing -end - -ruby_block 'config_wait_ssh' do - block do Common.wait("#{Env.get(node, 'login')}@#{node['host']}:#{node['git']['port']['ssh']}") end -end - execute 'config_git_safe_directory' do command <<-SH git config --global --add safe.directory "*" && \ git config --system --add safe.directory "*" SH - environment 'HOME' => "/home/#{node['git']['app']['user']}" - action :run end execute 'config_git_user' do command <<-SH - git config --global user.name "#{Env.get(node, 'login')}" - git config --global user.email "#{Env.get(node, 'email')}" + git config --global user.name "#{login}" + git config --global user.email "#{email}" git config --global core.excludesfile #{ENV['PWD']}/.gitignore SH - user node['git']['app']['user'] - environment 'HOME' => "/home/#{node['git']['app']['user']}" - action :run + user node['app']['user'] end [node['git']['org']['main'], node['git']['org']['stage']].each do |org| ruby_block "config_git_org_#{org}" do block do - require 'json' - status_code = (result = Common.request("#{node['git']['endpoint']}/orgs", - method: Net::HTTP::Post, headers: { 'Content-Type' => 'application/json' }, - user: Env.get(node, 'login'), pass: Env.get(node, 'password'), - body: { username: org }.to_json - )).code.to_i - raise "HTTP #{status_code}: #{result.body}" unless [201, 409, 422].include? status_code + (response = Utils.request(uri="#{node['git']['api']['endpoint']}/orgs", + method: Net::HTTP::Post, headers: Constants::HEADER_JSON, + body: { username: org }.json, user: login, pass: password, )) + Logs.request!(uri, response, [201, 409, 422], msg: "create organization '#{org}'") end - action :run end end ruby_block 'config_git_environment' do block do - %w(proxmox login password email host).each do |parent_key| - value = node[parent_key] - next if value.nil? || value.to_s.strip.empty? - if value.is_a?(Hash) - value.each do |subkey, subvalue| - next if subvalue.nil? || subvalue.to_s.strip.empty? - combined_key = "#{parent_key}_#{subkey}" - Env.set_variable(Chef.run_context.node, combined_key, subvalue) - end - else - Env.set_variable(Chef.run_context.node, parent_key, value) - end - end + Env.dump(self, 'proxmox', 'host', 'app', 'login', 'password', 'email') end - action :run -end \ No newline at end of file +end diff --git a/config/recipes/customize.rb b/config/recipes/customize.rb new file mode 100644 index 0000000..7dbc7e3 --- /dev/null +++ b/config/recipes/customize.rb @@ -0,0 +1 @@ +Chef::Log.info("") \ No newline at end of file diff --git a/config/recipes/default.rb b/config/recipes/default.rb index a44c90d..a4f03d8 100644 --- a/config/recipes/default.rb +++ b/config/recipes/default.rb @@ -3,3 +3,4 @@ include_recipe 'config::runner' include_recipe 'config::config' include_recipe 'config::repo' +include_recipe('config::customize') if node['git']['conf']['customize'] \ No newline at end of file diff --git a/config/recipes/git.rb b/config/recipes/git.rb index 94635e3..a37ada9 100644 --- a/config/recipes/git.rb +++ b/config/recipes/git.rb @@ -1,20 +1,20 @@ -Common.download(self, "#{node['git']['dir']['install']}/gitea", - url: -> { ver = Common.latest('https://github.com/go-gitea/gitea/releases/latest') - "https://github.com/go-gitea/gitea/releases/download/v#{ver}/gitea-#{ver}-linux-#{Common.arch(node)}" }, - owner: node['git']['app']['user'], - group: node['git']['app']['group'], - mode: '0755' ) +Utils.download(self, "#{node['git']['dir']['app']}/gitea", + url: -> { ver = Utils.latest('https://github.com/go-gitea/gitea/releases/latest') + "https://github.com/go-gitea/gitea/releases/download/v#{ver}/gitea-#{ver}-linux-#{Utils.arch(node)}" } ) -template "#{node['git']['dir']['install']}/app.ini" do +template "#{node['git']['dir']['app']}/app.ini" do source 'git_app.ini.erb' - owner node['git']['app']['user'] - group node['git']['app']['group'] + owner node['app']['user'] + group node['app']['group'] mode '0644' + variables(host: node['host'], app_user: node['app']['user'] , ssh_user: node['app']['config'], + app_dir: node['git']['dir']['app'], home_dir: node['git']['dir']['home'], + http_port: node['git']['port']['http'], ssh_port: node['git']['port']['ssh'] ) action :create_if_missing end Common.application(self, 'gitea', - user: node['git']['app']['user'], cwd: node['git']['dir']['data'], - exec: "#{node['git']['dir']['install']}/gitea web --config #{node['git']['dir']['install']}/app.ini", - unit: { 'Service' => { 'Environment' => "USER=#{node['git']['app']['user']} HOME=/home/#{node['git']['app']['user']}" } }, - subscribe: ["template[#{node['git']['dir']['install']}/app.ini]", "remote_file[#{node['git']['dir']['install']}/gitea]"] ) + user: node['app']['user'] , cwd: node['git']['dir']['data'], + exec: "#{node['git']['dir']['app']}/gitea web --config #{node['git']['dir']['app']}/app.ini", + unit: { 'Service' => { 'Environment' => "USER=#{node['app']['user'] } HOME=#{node['git']['dir']['home']}" } }, + subscribe: ["template[#{node['git']['dir']['app']}/app.ini]", "remote_file[#{node['git']['dir']['app']}/gitea]"] ) diff --git a/config/recipes/prepare.rb b/config/recipes/prepare.rb index 4f05777..7ec7aa8 100644 --- a/config/recipes/prepare.rb +++ b/config/recipes/prepare.rb @@ -1,52 +1,31 @@ -# Filesystem +Common.directories(self, [ (app = node['git']['dir']['app']), + node['git']['dir']['workspace'], node['runner']['dir']['app'], + "#{app}/custom", "#{app}/data", "#{app}/gitea-repositories", "#{app}/log" ]) -Common.directories(self, [ "/home/#{node['git']['app']['user']}", - "#{node['git']['home']}", - "#{node['git']['dir']['install']}", - "#{node['git']['dir']['data']}", - "#{node['git']['dir']['data']}/custom", - "#{node['git']['dir']['data']}/data", - "#{node['git']['dir']['data']}/data/gitea-repositories", - "#{node['git']['dir']['data']}/log", - "#{node['git']['dir']['data']}/custom/conf", - "#{node['runner']['dir']['install']}", - "#{::File.dirname(node['key'])}", - "#{node['git']['workspace']}" -], owner: node['git']['app']['user'], group: node['git']['app']['group']) - -Common.packages(self, %w(git acl python3-pip ansible nodejs npm python3-proxmoxer)) +Common.packages(self, %w(git acl python3-pip ansible ansible-core nodejs npm python3-proxmoxer)) execute 'prepare_install_ansible' do command 'python3 -m pip install --upgrade ansible --break-system-packages' + environment 'HOME' => '/tmp' end execute 'prepare_install_ansible_galaxy' do command 'LC_ALL=C.UTF-8 ansible-galaxy collection install community.general' + environment 'HOME' => '/tmp' user 'root' not_if "ansible-galaxy collection list | grep community.general" end # Self-management -file node['key'] do - content lazy { ::File.read('/root/id_rsa') } - owner node['git']['app']['user'] - group node['git']['app']['group'] +file "#{node['git']['dir']['home']}/.ssh/config" do + content <<~CONF + Host #{node['host']} + HostName #{node['host']} + IdentityFile #{node['key']} + StrictHostKeyChecking no + CONF + owner node['app']['user'] + group node['app']['group'] mode '0600' - sensitive true - action :create - only_if { ::File.exist?('/root/id_rsa') } - not_if { ::File.exist?(node['key']) } -end - -file "#{node['key']}.pub" do - content lazy { ::File.read('/root/id_rsa.pub') } - owner node['git']['app']['user'] - group node['git']['app']['group'] - mode '0644' - action :create - only_if { ::File.exist?('/root/id_rsa.pub') } - not_if { ::File.exist?("#{node['key']}.pub") } end - - diff --git a/config/recipes/repo.rb b/config/recipes/repo.rb index 98b67cb..30b4cdb 100644 --- a/config/recipes/repo.rb +++ b/config/recipes/repo.rb @@ -1,124 +1,116 @@ require 'find' require 'fileutils' -home = "/home/#{node['git']['app']['user']}" -source = ENV['PWD'] -destination = node['git']['workspace'] +source = ENV['PWD'] || Dir.pwd +destination = node['git']['dir']['workspace'] working = "#{destination}/workdir" -Common.directories(self, [destination, working], recreate: true, - owner: node['git']['app']['user'], group: node['git']['app']['group']) +is_bootstrap = ['127.0.0.1', 'localhost', '::1'].include?((Env.get(self, 'host'))) -(repositories = node['git']['repositories'] +Common.directories(self, [destination, working], recreate: true) + +(repositories = node['git']['conf']['repo'] .flat_map { |r| (r == './libs') ? Dir.glob(File.join(source, r, '*')).select { |d| File.directory?(d) }.map { |p| p.sub(source, '.') } : r } .sort_by { |r| r == "./" ? 1 : 0 }).each do |repository| # dynamically resolved libs before monorepo monorepo = (repository == "./") - path_source = monorepo ? source : File.expand_path(repository, source) + path_source = monorepo ? source : File.expand_path(repository.to_s, source.to_s) name_repo = File.basename(path_source) path_working = "#{working}/#{name_repo}" path_destination = monorepo ? destination : File.expand_path(name_repo, destination) - Chef::Log.info("#{repository} (#{name_repo})") - ruby_block "repo_exists_#{name_repo}" do + only_if { Logs.info?("#{repository} (#{name_repo})") } block do node.run_state["#{name_repo}_repo_exists"] = - (Common.request("#{node['git']['endpoint']}/repos/#{node['git']['org']['main']}/#{name_repo}", - user: Env.get(node, 'login'), pass: Env.get(node, 'password')) - ).code.to_i != 404 + (Utils.request("#{node['git']['api']['endpoint']}/repos/#{node['git']['org']['main']}/#{name_repo}", + user: Env.get(self, 'login'), pass: Env.get(self, 'password'))).code.to_i != 404 end - action :run end execute "repo_exists_snapshot_create_#{name_repo}" do command <<-EOH - if git ls-remote ssh://#{node['git']['app']['user']}@#{node['git']['repo']['ssh']}/#{node['git']['org']['main']}/#{name_repo}.git HEAD | grep -q .; then - git clone --recurse-submodules ssh://#{node['git']['app']['user']}@#{node['git']['repo']['ssh']}/#{node['git']['org']['main']}/#{name_repo}.git #{path_working} + if git ls-remote ssh://#{node['app']['user'] }@#{node['git']['host']['ssh']}/#{node['git']['org']['main']}/#{name_repo}.git HEAD | grep -q .; then + git clone --recurse-submodules ssh://#{node['app']['user'] }@#{node['git']['host']['ssh']}/#{node['git']['org']['main']}/#{name_repo}.git #{path_working} cd #{path_working} && git submodule update --init --recursive find . -type d -name .git -exec rm -rf {} + else mkdir -p #{path_working} fi EOH - user node['git']['app']['user'] - environment 'HOME' => home - only_if { Chef::Log.info("[#{repository} (#{name_repo})]: delete repository after snapshot") + user node['app']['user'] + only_if { Logs.info("[#{repository} (#{name_repo})] delete stored repository") node.run_state["#{name_repo}_repo_exists"] } end ruby_block "repo_exists_reset_#{name_repo}" do block do - unless [204, 404].include?(status_code = (result = Common.request("#{node['git']['endpoint']}/repos/#{node['git']['org']['main']}/#{name_repo}", - method: Net::HTTP::Delete, user: Env.get(node, 'login'), pass: Env.get(node, 'password'))).code.to_i) - raise "Failed to delete #{name_repo} (#{status_code}): #{result.body}" - end + uri = "#{node['git']['api']['endpoint']}/repos/#{node['git']['org']['main']}/#{name_repo}" + response = Utils.request(uri, method: Net::HTTP::Delete, user: Env.get(self, 'login'), pass: Env.get(self, 'password')) + Logs.request!(uri, response, [204, 404], msg: "Delete #{name_repo}") end - action :run only_if { node.run_state["#{name_repo}_repo_exists"] } end - Chef::Log.info("[#{repository} (#{name_repo})]: request repository") ruby_block "repo_request_#{name_repo}" do + only_if { Logs.info?("[#{repository} (#{name_repo})] create repository") } block do - require 'json' - (result = Common.request( - "#{node['git']['endpoint']}/admin/users/#{node['git']['org']['main']}/repos", - method: Net::HTTP::Post, headers: { 'Content-Type' => 'application/json' }, - user: Env.get(node, 'login'), pass: Env.get(node, 'password'), - body: { name: name_repo, private: false, auto_init: false, default_branch: 'main' }.to_json - )).code.to_i == 201 or raise "Error creating repository '#{name_repo}': #{result.code} #{result.message} #{result.body}" + uri="#{node['git']['api']['endpoint']}/admin/users/#{node['git']['org']['main']}/repos" + response = Utils.request(uri, method: Net::HTTP::Post, headers: Constants::HEADER_JSON, + user: Env.get(self, 'login'), pass: Env.get(self, 'password'), + body: { name: name_repo, private: false, auto_init: false, default_branch: 'main' }.json ) + + Logs.request!(uri, response, [201], msg: "Create repository '#{name_repo}'") + response.json end - action :run end execute "repo_git_init_#{name_repo}" do command <<-EOH mkdir -p #{path_destination} && cd #{path_destination} && git init -b main EOH - user node['git']['app']['user'] - environment 'HOME' => home + user node['app']['user'] end template "#{path_destination}/.git/config" do source 'repo_config.erb' - owner node['git']['app']['user'] - group node['git']['app']['group'] + owner node['app']['user'] + group node['app']['group'] mode '0644' - variables(repo: name_repo, git_user: node['git']['app']['user']) - action :create + variables(repo: name_repo, + config: node['app']['config'], + org: node['git']['org']['main'], + ssh: node['git']['host']['ssh']) only_if { ::File.directory?("#{path_destination}/.git") } end - Chef::Log.info("[#{repository} (#{name_repo})]: base commit") execute "repo_git_empty_#{name_repo}" do + only_if { Logs.info?("[#{repository} (#{name_repo})] base commit") } command <<-EOH git commit --allow-empty -m "base commit [skip ci]" && git checkout -b release git push -u origin main && git push -u origin release EOH cwd path_destination - user node['git']['app']['user'] - environment 'HOME' => home + user node['app']['user'] end execute "repo_exists_snapshot_push_#{name_repo}" do command <<-EOH cp -r #{path_destination}/.git #{path_working} - cd #{path_working} && git checkout -b snapshot && git add -A + cd #{path_working} && git checkout -b snapshot && git add -A git commit --allow-empty -m "snapshot [skip ci]" git push -f origin snapshot && (rm -rf #{path_working} || true) EOH cwd path_destination - user node['git']['app']['user'] - environment 'HOME' => home - only_if { Chef::Log.info("[#{repository} (#{name_repo})]: snapshot commit") + user node['app']['user'] + only_if { Logs.info("[#{repository} (#{name_repo})]: snapshot commit") node.run_state["#{name_repo}_repo_exists"] } end ruby_block "repo_files_#{name_repo}" do block do - Find.find(path_source) do |path_src| + Find.find(path_source) do |path_src| next if path_src =~ /(^|\/)\.git(\/|$)/ || path_src =~ /(^|\/)\.gitmodules(\/|$)/ path_src_rel = path_src.sub(/^#{Regexp.escape(path_source)}\/?/, '') path_dst = File.join(path_destination, path_src_rel) @@ -129,16 +121,29 @@ FileUtils.cp(path_src, path_dst, verbose: true) end end - FileUtils.chown_R(node['git']['app']['user'], node['git']['app']['group'], path_destination) + FileUtils.chown_R(node['app']['user'] , node['app']['group'], path_destination) end - action :run + end + + directory "#{path_destination}/.gitea/workflows" do + recursive true + end + + template "#{path_destination}/.gitea/workflows/pipeline.yml" do + source 'repo_pipeline.yml.erb' + owner node['app']['user'] + group node['app']['group'] + mode '0644' + only_if { repository.include?('libs/') and File.exist?("#{path_destination}/config.env") } + not_if { File.exist?("#{path_destination}/.gitea/workflows/pipeline.yml") } end if monorepo submodules = repositories.reject { |r| r == "./" } # without itself - Chef::Log.info("#{repository} (monorepository): referencing #{submodules}") + ruby_block 'repo_mono_submodule_rewritten' do + only_if { Logs.info?("#{repository} (monorepository): referencing #{submodules}") } block do path_dst_gitmodules = File.join(destination, '.gitmodules') if File.exist?(path_dst_gitmodules) @@ -147,7 +152,6 @@ .gsub(/(url\s*=\s*http:\/\/)([^:\/\s]+)/) { "#{$1}#{node['host']}" }) end end - action :run end # Submodule handling in Monorepository @@ -155,7 +159,7 @@ path_module = submodule.sub(%r{^\./}, '') module_name = File.basename(path_module) - module_url = "#{node['git']['host']}/#{node['git']['org']['main']}/#{module_name}.git" + module_url = "#{node['git']['host']['http']}/#{node['git']['org']['main']}/#{module_name}.git" # delete module files in last ordered monorepository directory File.join(path_destination, path_module) do @@ -163,33 +167,31 @@ action :delete end - Chef::Log.info("#{repository} (monorepository): referencing #{path_module} (#{module_name})") - execute "repo_mono_submodule_references_#{module_name}" do - cwd path_destination - user node['git']['app']['user'] - environment 'HOME' => home + only_if { Logs.info?("#{repository} (monorepository): referencing #{path_module} (#{module_name})") } command <<-EOH if ! git config --file .gitmodules --get-regexp path | grep -q "^submodule\\.#{module_name}\\.path"; then echo "submodule add: #{module_url} -> #{path_module}" git submodule add #{module_url} #{path_module} fi git submodule update --init --recursive - # bootstrap only - if [ "#{Env.get(node, 'host')}" = "127.0.0.1" ] && [ -f local/config.json ]; then + # pass variables in bootstrap + if [ "#{is_bootstrap}" = "true" ] && [ -f local/config.json ]; then git add -f local/config.json fi EOH + cwd path_destination + user node['app']['user'] end end end # Repositories + execute "repo_push_#{name_repo}" do cwd path_destination - user node['git']['app']['user'] - environment 'HOME' => home + user node['app']['user'] command <<-EOH git add --all if ! git diff --quiet || ! git diff --cached --quiet; then @@ -197,8 +199,8 @@ git push -f origin HEAD:main sleep 3 if ! git ls-remote origin refs/for/release | grep -q "$(git rev-parse HEAD)"; then - if { [ "#{repository}" != "./" ] && [ "#{Env.get(node, 'host')}" != "127.0.0.1" ]; } || \ - { [ "#{repository}" = "./" ] && [ "#{Env.get(node, 'host')}" = "127.0.0.1" ]; }; then + if { [ "#{repository}" != "./" ] && [ "#{is_bootstrap}" = "false" ]; } || \ + { [ "#{repository}" = "./" ] && [ "#{is_bootstrap}" = "true" ]; }; then git push origin HEAD:refs/for/release \ -o topic="release" \ -o title="Release Pull Request" \ @@ -208,39 +210,41 @@ fi fi EOH - action :run - end - - directory path_destination do - action :delete - recursive true - only_if { ::Dir.exist?(path_destination) } end # Fork as stage repository ruby_block "repo_stage_fork_clean_#{name_repo}" do block do - if Common.request("#{node['git']['endpoint']}/repos/#{node['git']['org']['main']}/#{name_repo}", - user: Env.get(node, 'login'), pass: Env.get(node, 'password')).code.to_i != 404 - status_code = (Common.request("#{node['git']['endpoint']}/repos/#{node['git']['org']['stage']}/#{name_repo}", - method: Net::HTTP::Delete, user: Env.get(node, 'login'), pass: Env.get(node, 'password'))).code.to_i - raise "Failed to clean test/#{name_repo} (#{status_code})" unless [204, 404].include?(status_code) + uri="#{node['git']['api']['endpoint']}/repos/#{node['git']['org']['main']}/#{name_repo}" + if Utils.request(uri, user: Env.get(self, 'login'), pass: Env.get(self, 'password')).code.to_i != 404 + response = Utils.request(uri="#{node['git']['api']['endpoint']}/repos/#{node['git']['org']['stage']}/#{name_repo}", + method: Net::HTTP::Delete, user: Env.get(self, 'login'), pass: Env.get(self, 'password')) + Logs.request!(uri, response, [204, 404], msg: "Clean #{node['git']['org']['stage']}/#{name_repo}") end end - action :run end ruby_block "repo_stage_fork_create_#{name_repo}" do block do - status_code = Common.request("#{node['git']['endpoint']}/repos/#{node['git']['org']['main']}/#{name_repo}/forks", - method: Net::HTTP::Post, headers: { 'Content-Type' => 'application/json' }, - user: Env.get(node, 'login'), pass: Env.get(node, 'password'), - body: { name: name_repo, organization: node['git']['org']['stage'] }.to_json - ).code.to_i - raise "Forking to #{node['git']['org']['stage']}/#{name_repo} failed (#{status_code})" unless [201, 202].include?(status_code) + uri="#{node['git']['api']['endpoint']}/repos/#{node['git']['org']['main']}/#{name_repo}/forks" + Logs.request!(uri, Utils.request(uri, method: Net::HTTP::Post, headers: Constants::HEADER_JSON, + user: Env.get(self, 'login'), pass: Env.get(self, 'password'), + body: { name: name_repo, organization: node['git']['org']['stage'] }.json ), + [201, 202], msg: "Fork to #{node['git']['org']['stage']}/#{name_repo}") end - action :run end + directory path_destination do + action :delete + recursive true + only_if { ::Dir.exist?(path_destination) } + end + +end + +ruby_block "#{cookbook_name}_env_dump" do + block do + Env.dump(self, 'git', 'runner', repo: cookbook_name) + end end diff --git a/config/recipes/runner.rb b/config/recipes/runner.rb index c0d67ca..cf4e689 100644 --- a/config/recipes/runner.rb +++ b/config/recipes/runner.rb @@ -1,66 +1,49 @@ -Common.directories(self, node['runner']['dir']['install'], owner: node['git']['app']['user'], group: node['git']['app']['group']) +Common.directories(self, node['runner']['dir']['app']) -Common.download(self, "#{node['runner']['dir']['install']}/act_runner", - url: -> { ver = Common.latest('https://gitea.com/gitea/act_runner/releases/latest') - "https://gitea.com/gitea/act_runner/releases/download/v#{ver}/act_runner-#{ver}-linux-#{Common.arch(node)}" }, - owner: node['git']['app']['user'], - group: node['git']['app']['group'], - mode: '0755' -) +Utils.download(self, "#{node['runner']['dir']['app']}/act_runner", + url: -> { ver = Utils.latest('https://gitea.com/gitea/act_runner/releases/latest') + "https://gitea.com/gitea/act_runner/releases/download/v#{ver}/act_runner-#{ver}-linux-#{Utils.arch(node)}" } ) -template "#{node['runner']['dir']['install']}/config.yaml" do +template "#{node['runner']['dir']['app']}/config.yaml" do source 'runner.config.yaml.erb' - owner node['git']['app']['user'] - group node['git']['app']['group'] + owner node['app']['user'] + group node['app']['group'] mode '0644' - action :create end Common.application(self, 'runner', - user: node['git']['app']['user'], action: [:enable], cwd: node['runner']['dir']['install'], - exec: "#{node['runner']['dir']['install']}/act_runner daemon --config #{node['runner']['dir']['install']}/config.yaml", - subscribe: ["template[#{node['runner']['dir']['install']}/config.yaml]", "remote_file[#{node['runner']['dir']['install']}/act_runner]"] ) + user: node['app']['user'] , action: [:enable], cwd: node['runner']['dir']['app'], + exec: "#{node['runner']['dir']['app']}/act_runner daemon --config #{node['runner']['dir']['app']}/config.yaml", + subscribe: ["template[#{node['runner']['dir']['app']}/config.yaml]", "remote_file[#{node['runner']['dir']['app']}/act_runner]"] ) ruby_block 'runner_register' do block do - require 'net/http' uri = URI("http://localhost:#{node['git']['port']['http']}") - connected = 15.times.any? do - begin - res = Common.request(uri) - res.is_a?(Net::HTTPSuccess) || res.is_a?(Net::HTTPRedirection) - rescue Errno::ECONNREFUSED, SocketError - false - ensure - sleep 5 - end + Logs.try!("Gitea not responding", [:uri, uri], raise: true) do + connected = 15.times.any? do + begin + res = Utils.request(uri, expect: true) + rescue Errno::ECONNREFUSED, SocketError + false + ensure; sleep 5; end + end unless connected end - raise 'Gitea not responding' unless connected (token = Mixlib::ShellOut.new( - "#{node['git']['dir']['install']}/gitea actions --config #{node['git']['dir']['install']}/app.ini generate-runner-token", - user: node['git']['app']['user'], - environment: { 'HOME' => "/home/#{node['git']['app']['user']}" } - )).run_command + "#{node['git']['dir']['app']}/gitea actions --config #{node['git']['dir']['app']}/app.ini generate-runner-token", + user: node['app']['user'])).run_command token.error! - token = token.stdout.strip (register = Mixlib::ShellOut.new( - "#{node['runner']['dir']['install']}/act_runner register " \ + "#{node['runner']['dir']['app']}/act_runner register " \ "--instance http://localhost:#{node['git']['port']['http']} " \ - "--token #{token} " \ - "--no-interactive " \ - "--labels shell " \ - "--config #{node['runner']['dir']['install']}/config.yaml", - cwd: node['runner']['dir']['install'], - user: node['git']['app']['user'], - environment: { 'HOME' => "/home/#{node['git']['app']['user']}" } + "--token #{token.stdout.strip} --no-interactive " \ + "--config #{node['runner']['dir']['app']}/config.yaml --labels #{node['runner']['conf']['label']} ", + cwd: node['runner']['dir']['app'], + user: node['app']['user'] )).run_command register.error! - - # File.write(node['runner']['marker_file'], Time.now.to_s) end - # not_if { ::File.exist?(node['runner']['marker_file']) } # stability over convention end Common.application(self, 'runner') diff --git a/config/templates/git_app.ini.erb b/config/templates/git_app.ini.erb index 72ff8d3..560d27d 100644 --- a/config/templates/git_app.ini.erb +++ b/config/templates/git_app.ini.erb @@ -1,19 +1,21 @@ ; Gitea Application Configuration APP_NAME = Gitea -RUN_USER = <%= node['git']['app']['user'] %> +RUN_USER = <%= @app_user %> RUN_MODE = prod [server] -DOMAIN = <%= node['host'] %> -HTTP_PORT = <%= node['git']['port']['http'] %> -ROOT_URL = http://<%= node['host'] %>:<%= node['git']['port']['http'] %>/ -SSH_PORT = <%= node['git']['port']['ssh'] %> +DOMAIN = <%= @host %> +HTTP_PORT = <%= @http_port %> +ROOT_URL = http://<%= @host %>:<%= @http_port %>/ +SSH_PORT = <%= @ssh_port %> START_SSH_SERVER = true +BUILTIN_SSH_SERVER_USER = <%= @ssh_user %> +HTTP_ADDR = 0.0.0.0 [database] DB_TYPE = sqlite3 -PATH = <%= node['git']['dir']['data'] %>/gitea.db +PATH = <%= @app_dir %>/gitea.db [security] INSTALL_LOCK = true @@ -25,7 +27,7 @@ DEFAULT_ACTIONS_URL=self LOG_RETENTION_DAYS = 7 [repository] -ROOT = <%= node['git']['dir']['data'] %>/gitea-repositories +ROOT = <%= @app_dir %>/gitea-repositories REPOSITORY_AVATAR_FALLBACK = none DISABLE_STARS = true DISABLE_MIGRATIONS = true @@ -40,10 +42,11 @@ SHOW_USER_EMAIL = false DISABLE_AVATAR = true [git] +HOME_PATH=<%= @home_dir %> MAX_GIT_DIFF_LINES = 500 MAX_GIT_DIFF_FILES = 50 GC_ARGS = --aggressive [log] MODE = console -LEVEL = Warn \ No newline at end of file +LEVEL = Error \ No newline at end of file diff --git a/config/templates/repo_config.erb b/config/templates/repo_config.erb index 47746ff..4616285 100644 --- a/config/templates/repo_config.erb +++ b/config/templates/repo_config.erb @@ -5,7 +5,7 @@ logallrefupdates = true [remote "origin"] - url = ssh://<%= node['git']['app']['user'] %>@<%= node['git']['repo']['ssh'] %>/<%= node['git']['org']['main'] %>/<%= @repo %>.git + url = ssh://<%= @config %>@<%= @ssh %>/<%= @org %>/<%= @repo %>.git fetch = +refs/heads/*:refs/remotes/origin/* [branch "main"] diff --git a/libs/assistant/.gitea/workflows/pipeline.yml b/config/templates/repo_pipeline.yml.erb similarity index 100% rename from libs/assistant/.gitea/workflows/pipeline.yml rename to config/templates/repo_pipeline.yml.erb diff --git a/config/templates/runner.config.yaml.erb b/config/templates/runner.config.yaml.erb index f19c5c9..1f126e9 100644 --- a/config/templates/runner.config.yaml.erb +++ b/config/templates/runner.config.yaml.erb @@ -1,5 +1,5 @@ name: <%= node['hostname'] %> -labels: ["<%= node['runner']['labels'] %>"] +labels: ["<%= node['runner']['conf']['label'] %>"] log: level: warn @@ -8,7 +8,7 @@ runner: file: .runner capacity: 1 timeout: 1h - labels: ["<%= node['runner']['labels'] %>"] + labels: ["<%= node['runner']['conf']['label'] %>"] cache: enabled: false diff --git a/docs/environment.png b/docs/environment.png new file mode 100644 index 0000000..502e4e6 Binary files /dev/null and b/docs/environment.png differ diff --git a/libs/assistant/attributes/default.rb b/libs/assistant/attributes/default.rb index f0c9eb6..e635991 100644 --- a/libs/assistant/attributes/default.rb +++ b/libs/assistant/attributes/default.rb @@ -1,9 +1,9 @@ -default['ip'] = "#{ENV['IP']}" +default['ip'] = "#{ENV['IP']}" -default['app']['user'] = 'app' -default['app']['group'] = 'app' +default['app']['user'] = Default.user(node) +default['app']['group'] = Default.group(node) -default['homeassistant']['dir']['venv'] = '/app/venv' +default['homeassistant']['dir']['venv'] = '/app/venv' default['homeassistant']['dir']['config'] = '/app/homeassistant' -default['configurator']['dir'] = '/app/configurator' \ No newline at end of file +default['configurator']['dir'] = '/app/configurator' \ No newline at end of file diff --git a/libs/assistant/recipes/default.rb b/libs/assistant/recipes/default.rb index d7a65de..76818a6 100644 --- a/libs/assistant/recipes/default.rb +++ b/libs/assistant/recipes/default.rb @@ -1,3 +1,7 @@ +Env.dump(self, cookbook_name, repo: cookbook_name) + +Common.directories(self, [node['homeassistant']['dir']['config'], '/app/uv-cache']) + execute 'fix_broken_apt' do command 'apt-get --fix-broken install -y' ignore_failure true @@ -10,16 +14,13 @@ nfs-common wget libncurses5-dev libgdbm-dev libnss3-dev libreadline-dev libsqlite3-dev libbz2-dev python3-venv]) -Common.directories(self, [node['homeassistant']['dir']['config'], '/app/uv-cache'], owner: node['app']['user'], group: node['app']['group']) - link '/config' do to node['homeassistant']['dir']['config'] owner node['app']['user'] group node['app']['group'] - action :create end -Common.download(self, '/tmp/Python-3.13.5.tgz', url: 'https://www.python.org/ftp/python/3.13.5/Python-3.13.5.tgz') +Utils.download(self, '/tmp/Python-3.13.5.tgz', url: 'https://www.python.org/ftp/python/3.13.5/Python-3.13.5.tgz') bash 'install_python3135' do cwd '/tmp' @@ -41,7 +42,7 @@ user node['app']['user'] group node['app']['group'] environment( - 'HOME' => "/home/#{node['app']['user']}", + # 'HOME' => "/home/#{node['app']['user']}", 'UV_CACHE_DIR' => '/app/uv-cache' ) not_if { ::File.exist?("#{node['homeassistant']['dir']['venv']}/bin/activate") } @@ -51,9 +52,7 @@ command "#{node['homeassistant']['dir']['venv']}/bin/python -m ensurepip" user node['app']['user'] group node['app']['group'] - environment( - 'HOME' => "/home/#{node['app']['user']}" - ) + # environment('HOME' => "/home/#{node['app']['user']}") not_if { ::File.exist?("#{node['homeassistant']['dir']['venv']}/bin/pip") } end @@ -87,12 +86,10 @@ not_if { ::File.exist?("#{node['configurator']['dir']}/bin/hass-configurator") } end -Common.application(self, 'homeassistant', - user: node['app']['user'], cwd: node['homeassistant']['dir']['config'], +Common.application(self, 'homeassistant', cwd: node['homeassistant']['dir']['config'], exec: "#{node['homeassistant']['dir']['venv']}/bin/python3 -m homeassistant --config #{node['homeassistant']['dir']['config']}", unit: { 'Service' => { 'RestartForceExitStatus' => '100', 'Environment' => "PATH=#{node['homeassistant']['dir']['venv']}/bin:/usr/local/bin:/usr/bin:/usr/local/bin/uv" } } ) -Common.application(self, 'hass-configurator', - user: node['app']['user'], cwd: node['homeassistant']['dir']['config'], +Common.application(self, 'hass-configurator', cwd: node['homeassistant']['dir']['config'], exec: "#{node['configurator']['dir']}/bin/hass-configurator -s -e -b #{node['homeassistant']['dir']['config']}" ) \ No newline at end of file diff --git a/libs/bridge/.config.json b/libs/bridge/.config.json deleted file mode 100644 index 4c885df..0000000 --- a/libs/bridge/.config.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "serial": "", - "adapter": "" -} \ No newline at end of file diff --git a/libs/bridge/.gitea/workflows/pipeline.yml b/libs/bridge/.gitea/workflows/pipeline.yml deleted file mode 100644 index ff44609..0000000 --- a/libs/bridge/.gitea/workflows/pipeline.yml +++ /dev/null @@ -1,14 +0,0 @@ -on: - workflow_dispatch: - push: - branches: [ release, main, develop ] - -jobs: - include: - runs-on: shell - steps: - - id: init - uses: main/config/.gitea/workflows@main - with: - repo: ${{ gitea.repository }} - ref: ${{ gitea.ref_name }} diff --git a/libs/bridge/attributes/default.rb b/libs/bridge/attributes/default.rb index 6f81745..e59d6c7 100644 --- a/libs/bridge/attributes/default.rb +++ b/libs/bridge/attributes/default.rb @@ -1,9 +1,9 @@ -default['app']['user'] = 'app' -default['app']['group'] = 'app' +default['app']['user'] = Default.user(node) +default['app']['group'] = Default.group(node) default['bridge']['port'] = 8080 -default['bridge']['serial'] = Env.get(node, 'serial') || '/dev/serial/by-id/' -default['bridge']['adapter'] = Env.get(node, 'adapter') || '' +default['bridge']['adapter'] = Env.get(node, 'adapter') || 'zstack' # overwrite +default['bridge']['serial'] = Env.get(node, 'serial') || '/dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_1c27822ced5fec11a1d52e5f25bfaa52-if00-port0' default['bridge']['dir'] = '/app/bridge' default['bridge']['data'] = "#{node['bridge']['dir']}/data" diff --git a/libs/bridge/config.env b/libs/bridge/config.env index 7a6cf03..67e39e2 100644 --- a/libs/bridge/config.env +++ b/libs/bridge/config.env @@ -5,5 +5,5 @@ CORES=2 MEMORY=2048 SWAP=512 DISK=local-lvm:8 -MOUNT=share +MOUNT=share,/dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_1c27822ced5fec11a1d52e5f25bfaa52-if00-port0 BOOT=yes \ No newline at end of file diff --git a/libs/bridge/recipes/default.rb b/libs/bridge/recipes/default.rb index f81cfc4..e193367 100644 --- a/libs/bridge/recipes/default.rb +++ b/libs/bridge/recipes/default.rb @@ -1,6 +1,16 @@ +Env.dump(self, cookbook_name, repo: cookbook_name) + +login = Env.get(self, 'login') +password = Env.get(self, 'password') +broker = Env.get(self, 'broker') + Common.packages(self, %w[unzip curl]) -Common.directories(self, [node['bridge']['dir'], node['bridge']['data']], owner: node['app']['user'], group: node['app']['group']) +group 'dialout' do + action :modify + members [node['app']['user']] + append true +end execute 'setup_node' do command 'curl -fsSL https://deb.nodesource.com/setup_20.x | bash -' @@ -11,64 +21,54 @@ package "nodejs" -group 'dialout' do - action :modify - members [node['app']['user']] - append true -end - -execute 'enable_corepack' do - command 'corepack enable && corepack prepare pnpm --activate' +execute 'install_pnpm' do + command 'npm i -g pnpm@9' not_if 'which pnpm' end -z2m_file = Common.download(self, "/tmp/zigbee2mqtt.zip", - url: -> { ver = Common.latest('https://github.com/Koenkk/zigbee2mqtt/releases/latest') - "https://github.com/Koenkk/zigbee2mqtt/archive/refs/tags/#{ver}.zip" }, - owner: node['git']['app']['user'], - group: node['git']['app']['group'], - mode: '0644' -) -z2m_file.notifies :stop, "service[zigbee2mqtt]", :immediately if resources("service[zigbee2mqtt]") rescue nil -z2m_file.notifies :run, 'execute[create_backup]', :immediately -z2m_file.notifies :run, 'execute[zigbee2mqtt_extract]', :immediately -z2m_file.notifies :run, 'execute[install_dependencies]', :delayed -z2m_file.notifies :run, 'execute[zigbee2mqtt_build]', :delayed - -execute 'create_backup' do - command "tar -czf #{node['bridge']['dir']}/backup_$(date +%Y%m%d%H%M%S).tar.gz -C #{node['bridge']['dir']} . && find #{node['bridge']['dir']} -name 'backup_*.tar.gz' -type f | head -n -3 | xargs rm -f || true" - user node['app']['user'] - group node['app']['group'] - cwd node['bridge']['dir'] - only_if { ::Dir.exist?("#{node['bridge']['dir']}/node_modules") } - action :nothing -end +installed_version = ::File.exist?("#{node['bridge']['dir']}/.version") ? ::File.read("#{node['bridge']['dir']}/.version").strip : nil +Logs.info("installed version: #{installed_version}") -execute 'zigbee2mqtt_extract' do - command lazy { "unzip -o #{"/tmp/zigbee2mqtt.zip"} -d #{node['bridge']['dir']} && mv #{node['bridge']['dir']}/zigbee2mqtt*/* #{node['bridge']['dir']}/ && rm -rf #{node['bridge']['dir']}/zigbee2mqtt" } - user node['app']['user'] - group node['app']['group'] - only_if { ::File.exist?("/tmp/zigbee2mqtt.zip") } - action :nothing -end +latest_version = Utils.latest('https://github.com/Koenkk/zigbee2mqtt/releases/latest') +Logs.info("latest version: #{latest_version}") if latest_version -execute 'install_dependencies' do - command 'pnpm install --frozen-lockfile' - user node['app']['user'] - group node['app']['group'] - cwd node['bridge']['dir'] - environment('HOME' => "/home/#{node['app']['user']}") - action :nothing +latest_version = false unless installed_version.nil? || Gem::Version.new(latest_version) > Gem::Version.new(installed_version) + +update_needed = false +if latest_version + update_needed = installed_version.nil? || Gem::Version.new(latest_version) > Gem::Version.new(installed_version) end -execute 'zigbee2mqtt_build' do - command 'pnpm build' - user node['app']['user'] - group node['app']['group'] - cwd node['bridge']['dir'] - environment('HOME' => "/home/#{node['app']['user']}") - action :nothing - notifies :restart, "service[zigbee2mqtt]", :delayed +Common.directories(self, [node['bridge']['dir'], node['bridge']['data']], recreate: update_needed) + +if update_needed + if latest_version && ::File.exist?("/etc/systemd/system/zigbee2mqtt.service") + execute 'stop_zigbee2mqtt' do + command 'systemctl stop zigbee2mqtt || true' + action :run + end + end + + Utils.download(self, "/tmp/zigbee2mqtt.zip", + url: "https://github.com/Koenkk/zigbee2mqtt/archive/refs/tags/#{latest_version}.zip") + + execute 'zigbee2mqtt_files' do + command lazy { "unzip -o /tmp/zigbee2mqtt.zip -d #{node['bridge']['dir']} && mv #{node['bridge']['dir']}/zigbee2mqtt*/* #{node['bridge']['dir']}/ && rm -rf #{node['bridge']['dir']}/zigbee2mqtt*" } + user node['app']['user'] + group node['app']['group'] + only_if { ::File.exist?('/tmp/zigbee2mqtt.zip') } + notifies :run, 'execute[zigbee2mqtt_build]', :immediately + end + + execute 'zigbee2mqtt_build' do + command 'pnpm install --frozen-lockfile && pnpm build' + user node['app']['user'] + group node['app']['group'] + environment('HOME' => '/tmp') + cwd node['bridge']['dir'] + action :nothing + end + end template "#{node['bridge']['data']}/configuration.yaml" do @@ -82,44 +82,18 @@ adapter: node['bridge']['adapter'], data_dir: node['bridge']['data'], logs_dir: node['bridge']['logs'], - broker_host: Env.get(node, 'broker'), - broker_user: Env.get(node, 'login'), - broker_password: Env.get(node, 'password') + broker_host: broker, + broker_user: login, + broker_password: password ) - not_if { ::File.exist?("#{node['bridge']['data']}/configuration.yaml") } - notifies :restart, "service[zigbee2mqtt]", :delayed + only_if { latest_version && !::File.exist?("#{node['bridge']['data']}/configuration.yaml") } end -Common.application(self, 'zigbee2mqtt', - user: node['app']['user'], cwd: node['bridge']['dir'], +ruby_block "restore_snapshot_if_exists" do + block { Utils.snapshot(self, node['bridge']['data'], restore: true) } +end + +Common.application(self, 'zigbee2mqtt', cwd: node['bridge']['dir'], exec: "/usr/bin/node #{node['bridge']['dir']}/index.js", unit: { 'Service' => { 'Environment' => 'NODE_ENV=production', 'PermissionsStartOnly' => 'true', 'ExecStartPre' => "-/bin/chown #{node['app']['user']}:#{node['app']['group']} #{node['bridge']['serial']}" } } ) - -# removed due root user permissions: -# -# ruby_block 'proxmox_config' do -# block do -# require 'net/http' -# require 'openssl' -# require 'json' -# -# proxmox_host = Env.get(node, 'proxmox_host') -# proxmox_user = Env.get(node, 'proxmox_user') -# proxmox_token = Env.get(node, 'proxmox_token') -# proxmox_secret = Env.get(node, 'proxmox_secret') -# -# uri = URI("https://#{proxmox_host}:8006/api2/json/nodes/pve/lxc/#{ENV['ID']}/config") -# http = Net::HTTP.new(uri.hostname, uri.port) -# http.use_ssl = true -# http.verify_mode = OpenSSL::SSL::VERIFY_NONE -# -# req = Net::HTTP::Put.new(uri.request_uri) -# req['Authorization'] = "PVEAPIToken=#{proxmox_user}!#{proxmox_token}=#{proxmox_secret}" -# req['Content-Type'] = 'application/json' -# req.body = { dev0: node['bridge']['serial'] }.to_json -# -# http.request(req) -# end -# action :run -# end \ No newline at end of file diff --git a/libs/broker/.gitea/workflows/pipeline.yml b/libs/broker/.gitea/workflows/pipeline.yml deleted file mode 100644 index ff44609..0000000 --- a/libs/broker/.gitea/workflows/pipeline.yml +++ /dev/null @@ -1,14 +0,0 @@ -on: - workflow_dispatch: - push: - branches: [ release, main, develop ] - -jobs: - include: - runs-on: shell - steps: - - id: init - uses: main/config/.gitea/workflows@main - with: - repo: ${{ gitea.repository }} - ref: ${{ gitea.ref_name }} diff --git a/libs/broker/attributes/default.rb b/libs/broker/attributes/default.rb index 1d011ec..e8fd9ca 100644 --- a/libs/broker/attributes/default.rb +++ b/libs/broker/attributes/default.rb @@ -1,7 +1,7 @@ default['ip'] = "#{ENV['IP']}" -default['app']['user'] = 'app' -default['app']['group'] = 'app' +default['app']['user'] = Default.user(node) +default['app']['group'] = Default.group(node) default['broker']['port'] = 1883 diff --git a/libs/broker/recipes/default.rb b/libs/broker/recipes/default.rb index 9982bd8..4c23ab5 100644 --- a/libs/broker/recipes/default.rb +++ b/libs/broker/recipes/default.rb @@ -1,20 +1,18 @@ -Env.set(node, 'broker', "mqtt://#{node['ip']}:#{node['broker']['port']}") +Env.dump(self, cookbook_name, repo: cookbook_name) -package 'mosquitto' +Common.directories(self, [node['broker']['dir']['data'], node['broker']['dir']['log']]) + +Env.set(self, 'broker', "mqtt://#{node['ip']}:#{node['broker']['port']}") -Common.directories(self, [node['broker']['dir']['data'], node['broker']['dir']['log']], owner: node['app']['user'], group: node['app']['group']) +package 'mosquitto' template node['broker']['file']['config'] do source 'mosquitto.conf.erb' owner node['app']['user'] group node['app']['group'] mode '0644' - variables({ - port: node['broker']['port'], - data_dir: node['broker']['dir']['data'], - log_dir: node['broker']['dir']['log'], - user_file: node['broker']['file']['user'] - }) + variables({ port: node['broker']['port'], user_file: node['broker']['file']['user'], + data_dir: node['broker']['dir']['data'], log_dir: node['broker']['dir']['log'] }) notifies :restart, 'service[mosquitto]', :delayed end @@ -22,15 +20,14 @@ owner node['app']['user'] group node['app']['group'] mode '0640' - action :create_if_missing end -execute "user-add_#{Env.get(node, 'login')}" do - command "mosquitto_passwd -b #{node['broker']['file']['user']} '#{Env.get(node, 'login')}' '#{Env.get(node, 'password')}'" +execute "user_add_#{Env.get(self, 'login')}" do + command "mosquitto_passwd -b #{node['broker']['file']['user']} '#{Env.get(self, 'login')}' '#{Env.get(self, 'password')}'" user 'root' sensitive true end -Common.application(self, 'mosquitto', user: node['app']['user'], +Common.application(self, 'mosquitto', exec: "/usr/sbin/mosquitto -c #{node['broker']['file']['config']}", subscribe: "template[#{node['broker']['file']['config']}]" ) \ No newline at end of file diff --git a/libs/proxy/.gitea/workflows/pipeline.yml b/libs/proxy/.gitea/workflows/pipeline.yml deleted file mode 100644 index ff44609..0000000 --- a/libs/proxy/.gitea/workflows/pipeline.yml +++ /dev/null @@ -1,14 +0,0 @@ -on: - workflow_dispatch: - push: - branches: [ release, main, develop ] - -jobs: - include: - runs-on: shell - steps: - - id: init - uses: main/config/.gitea/workflows@main - with: - repo: ${{ gitea.repository }} - ref: ${{ gitea.ref_name }} diff --git a/libs/proxy/attributes/default.rb b/libs/proxy/attributes/default.rb index 1d9ac5d..b50c5af 100644 --- a/libs/proxy/attributes/default.rb +++ b/libs/proxy/attributes/default.rb @@ -1,10 +1,11 @@ -default['ip'] = ENV['IP'] +default['ip'] = ENV['IP'] -default['app']['user'] = 'app' -default['app']['group'] = 'app' +default['app']['user'] = Default.user(node) +default['app']['group'] = Default.group(node) -default['proxy']['config']['domain'] = 'lan' -default['proxy']['dir']['app'] = '/app/proxy' -default['proxy']['dir']['logs'] = '/app/proxy/logs' -default['proxy']['download'] = 'https://github.com/caddyserver/caddy/releases/download/v2.7.6/caddy_2.7.6_linux_amd64.tar.gz' +default['proxy']['dir']['app'] = '/app/proxy' +default['proxy']['dir']['logs'] = '/app/proxy/logs' +default['proxy']['config']['domain'] = 'lan' + +default['proxy']['download'] = 'https://github.com/caddyserver/caddy/releases/download/v2.7.6/caddy_2.7.6_linux_amd64.tar.gz' diff --git a/libs/proxy/recipes/default.rb b/libs/proxy/recipes/default.rb index 505b3b3..6a40274 100644 --- a/libs/proxy/recipes/default.rb +++ b/libs/proxy/recipes/default.rb @@ -1,46 +1,21 @@ -package 'caddy' - -Common.directories(self, [node['proxy']['dir']['app'], node['proxy']['dir']['logs']], owner: node['app']['user'], group: node['app']['group']) +Env.dump(self, cookbook_name, repo: cookbook_name) -ruby_block 'fetch_proxmox_containers' do - block do - require 'net/http' - require 'openssl' - require 'json' +Common.directories(self, [node['proxy']['dir']['app'], node['proxy']['dir']['logs']]) - proxmox_host = Env.get(node, 'proxmox_host') - proxmox_user = Env.get(node, 'proxmox_user') - proxmox_token = Env.get(node, 'proxmox_token') - proxmox_secret = Env.get(node, 'proxmox_secret') - - def fetch_data(uri, user, token, secret) - http = Net::HTTP.new(uri.hostname, uri.port) - http.use_ssl = true - http.verify_mode = OpenSSL::SSL::VERIFY_NONE - req = Net::HTTP::Get.new(uri.request_uri) - req['Authorization'] = "PVEAPIToken=#{user}!#{token}=#{secret}" - res = http.request(req) - raise "API-Fehler #{res.code}" unless res.is_a?(Net::HTTPSuccess) - JSON.parse(res.body)['data'] - end - - uri_lxc = URI("https://#{proxmox_host}:8006/api2/json/nodes/pve/lxc") - containers = fetch_data(uri_lxc, proxmox_user, proxmox_token, proxmox_secret) - # .select { |c| c['status'] == 'running' } +package 'caddy' - proxy_hosts = containers.map do |c| - vmid = c['vmid'] - name = c['name'] - uri_config = URI("https://#{proxmox_host}:8006/api2/json/nodes/pve/lxc/#{vmid}/config") - config = fetch_data(uri_config, proxmox_user, proxmox_token, proxmox_secret) +ruby_block 'proxmox_containers' do + block do + domain = node['proxy']['config']['domain'] + node.run_state['proxy_hosts'] = Utils.proxmox(URI, node, 'nodes/pve/lxc').map do |state| + vmid = state['vmid'] + name = state['name'] + config = Utils.proxmox(URI, node, "nodes/pve/lxc/#{vmid}/config") ip = config['net0'] ? config['net0'].match(/ip=([\d\.]+)/)&.[](1) : "404" - "#{name}.#{node['proxy']['config']['domain']} #{ip}" + "#{name}.#{domain} #{ip}" end - - node.run_state['proxy_hosts'] = proxy_hosts - Chef::Log.info(proxy_hosts) + Logs.info(node.run_state['proxy_hosts']) end - action :run end template "#{node['proxy']['dir']['app']}/Caddyfile" do @@ -49,11 +24,8 @@ def fetch_data(uri, user, token, secret) group 'root' mode '0644' variables( - hosts: lazy { node.run_state['proxy_hosts'] || [] }, - log_dir: node['proxy']['dir']['logs'] - ) - action :create + log_dir: node['proxy']['dir']['logs'], hosts: lazy { node.run_state['proxy_hosts'] || [] } ) end -Common.application(self, 'caddy', user: node['app']['user'], +Common.application(self, 'caddy', subscribe: "template[#{node['proxy']['dir']['app']}/Caddyfile]" ) diff --git a/libs/share/attributes/default.rb b/libs/share/attributes/default.rb index b764cb4..7e6cf91 100644 --- a/libs/share/attributes/default.rb +++ b/libs/share/attributes/default.rb @@ -1,9 +1,8 @@ include_attribute 'config::default' -default['mount'] = ENV['MOUNT'].to_s.split(',').each_with_object({}) do |entry, hash| - name, size = entry.split(':') - hash[name.strip] = { - 'path' => name == 'share' ? '/share' : "/share/#{name}", - 'size' => size.to_i - } -end \ No newline at end of file +default['share']['mount'] = [ + '/share' +] + +default['share']['user']['uid'] = 100000 +default['share']['user']['gid'] = 100000 diff --git a/libs/share/recipes/default.rb b/libs/share/recipes/default.rb index f7db09a..1f3130b 100644 --- a/libs/share/recipes/default.rb +++ b/libs/share/recipes/default.rb @@ -1,34 +1,26 @@ -login = Env.get(node, 'login') -password = Env.get(node, 'password') - Common.packages(self, %w[samba samba-common samba-client]) -node['mount'].each do |name| - path = (name == 'share' ? '/share' : "/share/#{name}") +login = Env.get(self, 'login') +password = Env.get(self, 'password') - directory path do - owner node['git']['app']['user'] - group node['git']['app']['group'] - mode '2775' - recursive true - action :create - end +group login do + gid node['share']['user']['gid'] end -template '/etc/samba/smb.conf' do - source 'smb.conf.erb' - variables( - login: login, - user: node['git']['app']['user'], - group: node['git']['app']['group'], - share: node['mount'] - ) - notifies :restart, 'service[smb]' +user login do + uid node['share']['user']['uid'] + gid node['share']['user']['gid'] + shell '/bin/false' + manage_home false end -execute "create_user_#{login}" do - command "useradd --no-create-home --shell /bin/false #{login}" - not_if "id -u #{login}" +Array(node.dig('share','mount')).each do |path| + directory path do + owner login + group login + mode '2775' + recursive false + end end execute "create_samba_#{login}" do @@ -36,6 +28,12 @@ not_if "pdbedit -L | grep -w #{login}" end +template '/etc/samba/smb.conf' do + source 'smb.conf.erb' + variables(login: login, shares: Array(node['share']['mount'])) + notifies :restart, 'service[smb]' +end + service 'smb' do action [:enable, :start] end diff --git a/libs/share/templates/smb.conf.erb b/libs/share/templates/smb.conf.erb index b5e0a59..2a9bf90 100644 --- a/libs/share/templates/smb.conf.erb +++ b/libs/share/templates/smb.conf.erb @@ -1,16 +1,36 @@ [global] +server min protocol = SMB2 +server max protocol = SMB3 + security = user passdb backend = tdbsam map to guest = bad user usershare allow guests = no -<% @share.each do |name, config| %> - [<%= name %>] - path = <%= config['path'] || "/share/#{name}" %> +socket options = TCP_NODELAY SO_RCVBUF=524288 SO_SNDBUF=524288 + +aio read size = 1 +aio write size = 1 + +vfs objects = catia fruit streams_xattr +fruit:metadata = stream +fruit:resource = file +fruit:model = MacSamba +fruit:locking = none + +dirsort = yes + +fruit:posix_rename = yes +fruit:veto_appledouble = no +fruit:nfs_aces = no + +<% Array(@shares).each do |path| -%> + [<%= File.basename(path.to_s.chomp('/')) %>] + path = <%= path %> valid users = <%= @login %> - force user = <%= @user %> - force group = <%= @group %> + force user = <%= @login %> + force group = <%= @login %> read only = no create mask = 0775 directory mask = 2775 -<% end %> +<% end -%> \ No newline at end of file diff --git a/local/.config.json b/local/.config.json index 9d0d47e..f1a1ff6 100644 --- a/local/.config.json +++ b/local/.config.json @@ -3,10 +3,8 @@ "proxmox": { "host": "", - "api": "", "user": "", - "token": "", - "secret": "" + "password": "" }, "login": "", diff --git a/local/Dockerfile b/local/Dockerfile index f96c60f..8d568ee 100644 --- a/local/Dockerfile +++ b/local/Dockerfile @@ -1,11 +1,7 @@ FROM debian:bookworm ARG TARGETARCH=arm64 -ENV IP=127.0.0.1 \ - ID=254 \ - ANSIBLE_ROLES_PATH=/tmp/base \ - LC_ALL=C.UTF-8 - +ENV IP=127.0.0.1 ID=254 ANSIBLE_ROLES_PATH=/tmp/config/base LC_ALL=C.UTF-8 STOPSIGNAL SIGRTMIN+3 RUN apt update -y && \ @@ -25,14 +21,14 @@ RUN rm -rf /lib/systemd/system/multi-user.target.wants/* && \ RUN python3 -m pip install --upgrade ansible --break-system-packages RUN ansible-galaxy collection install community.general --force -RUN mkdir -p /share/.ssh && ssh-keygen -t ed25519 -f "/share/.ssh/${ID}" -N '' +RUN mkdir -p /app/.ssh /share/.ssh && ssh-keygen -t ed25519 -f "/share/.ssh/${ID}" -N '' -COPY ../base/roles /tmp/base -RUN ansible localhost -m include_role -a name=base -e \ - "ip=${IP}" -e "id=${ID}" -e "configure_ssh=0" -e "arch=${TARGETARCH}" \ +COPY ../base/roles /tmp/config/base +RUN ansible localhost -m include_role -a name=base \ + -e "ip=${IP}" -e "id=${ID}" -e "KEYS_DIR=/share/.ssh" -e "configure_ssh=0" -e "arch=${TARGETARCH}" \ -e "private_key='$(cat /share/.ssh/${ID})\n'" -e "public_key='$(cat /share/.ssh/${ID}.pub)\n'" -RUN chown -R app:app /share +RUN chown -R app:config /app WORKDIR / diff --git a/local/run.sh b/local/run.sh index 8fd719f..4cc5210 100755 --- a/local/run.sh +++ b/local/run.sh @@ -30,17 +30,15 @@ esac export DOCKER_DEFAULT_PLATFORM="${DOCKER_DEFAULT_PLATFORM:-linux/${TARGETARCH}}" DOCKERFILE_PATH="${DEVELOP_DIR}/Dockerfile" -[[ -f "${DOCKERFILE_PATH}" ]] || fail "dockerfile_missing:${DOCKERFILE_PATH}" -DOCKERFILE_HASH=$(md5sum "${DOCKERFILE_PATH}" | awk '{print $1}') +BASE=$(find "base" -type f -not -path "*/.git/*" -print0 | sort -z | xargs -0 md5sum | md5sum | awk '{print $1}') +HASH=$(echo "$(md5sum "$DOCKERFILE_PATH" | awk '{print $1}')${BASE}" | md5sum | awk '{print $1}') STORED_HASH_FILE="${DEVELOP_DIR}/.${DOCKER_IMAGE_NAME}.hash" -STORED_HASH=$(cat "${STORED_HASH_FILE}" 2>/dev/null || true) +STORED_HASH=$(cat "$STORED_HASH_FILE" 2>/dev/null || true) -BUILD_NEEDED=false -if [[ -z "$(docker images -q "${DOCKER_IMAGE_NAME}")" || "${STORED_HASH}" != "${DOCKERFILE_HASH}" ]]; then - BUILD_NEEDED=true +if [[ -z "$(docker images -q "${DOCKER_IMAGE_NAME}")" || "$STORED_HASH" != "$HASH" ]]; then log "image" "build_required" docker build --build-arg TARGETARCH="$TARGETARCH" -t "$DOCKER_IMAGE_NAME" -f "$DOCKERFILE_PATH" "$PROJECT_DIR" || fail "build_failed" - echo "$DOCKERFILE_HASH" > "$STORED_HASH_FILE" + echo "$HASH" > "$STORED_HASH_FILE" log "image" "build_complete" fi @@ -62,12 +60,13 @@ CONTAINER_ID=$(docker run -d --privileged --cgroupns=host --tmpfs /tmp \ log "container" "started:${CONTAINER_ID}" sleep "$DOCKER_WAIT" -cmd="cinc-client -l debug --local-mode --config-option node_path=/tmp/nodes \ - --config-option cookbook_path=${COOKBOOK_PATH} ${CONFIG_FILE} --chef-license accept -o ${RECIPE}" -docker exec "$CONTAINER_ID" bash -c "$cmd" || log "error" "exec_failed" +command='sudo $(sudo -u config env) PWD=/tmp/config --preserve-env=ID \ + cinc-client -l info --local-mode --config-option node_path=/tmp/nodes \ + --config-option cookbook_path='"$COOKBOOK_PATH"' '"$CONFIG_FILE"' -o '"$RECIPE"'' +docker exec "$CONTAINER_ID" bash -c "$command" || log "error" "exec_failed" -[[ -z "${COOKBOOK_OVERRIDE}" ]] && cmd+="::repo" +[[ -z "${COOKBOOK_OVERRIDE}" ]] && command+="::repo" while true; do log "rerun" "$RECIPE" && read -r - docker exec "$CONTAINER_ID" bash -c "$cmd" || log "error" "exec_failed" + docker exec "$CONTAINER_ID" bash -c "$command" || log "error" "exec_failed" done