--- # Deploy Single Node OpenShift (SNO) on Proxmox # # Prerequisites: # ansible-galaxy collection install -r collections/requirements.yml # openshift-install is downloaded automatically during the sno play # # Inventory requirements: # sno.openshift.toal.ca - in 'openshift' group # host_vars: ocp_cluster_name, ocp_base_domain, ocp_version, sno_ip, # sno_gateway, sno_nameserver, sno_prefix_length, sno_vm_name, # sno_bridge, sno_vlan, proxmox_node, ... # secrets: vault_ocp_pull_secret (Red Hat pull secret JSON string) # proxmox_api - inventory host (ansible_host: proxmox.lab.toal.ca, ansible_port: 443) # Used as api_host / api_port source for community.proxmox modules # proxmox_host - inventory host (ansible_host: pve1.lab.toal.ca, ansible_connection: ssh) # delegate_to target for qm and file operations # gate.toal.ca - in 'opnsense' group # host_vars: opnsense_host, opnsense_api_key, opnsense_api_secret, # opnsense_api_port, haproxy_public_ip # group_vars/all: dme_account_key, dme_account_secret # # Play order (intentional — DNS must precede VM boot): # Play 1: proxmox — Create SNO VM # Play 2: opnsense — Configure OPNsense local DNS overrides (api/api-int/apps) # Play 3: dns — Configure public DNS records in DNS Made Easy # Play 4: sno — Generate ISO, boot VM, wait for install # # Usage: # ansible-playbook playbooks/deploy_openshift.yml # ansible-playbook playbooks/deploy_openshift.yml --tags proxmox # ansible-playbook playbooks/deploy_openshift.yml --tags sno # ansible-playbook playbooks/deploy_openshift.yml --tags dns,opnsense # ansible-playbook playbooks/deploy_openshift.yml --tags opnsense,sno # --------------------------------------------------------------------------- # Play 1: Create SNO VM in Proxmox # --------------------------------------------------------------------------- - name: Create SNO VM in Proxmox hosts: sno.openshift.toal.ca gather_facts: false connection: local roles: - role: proxmox_sno_vm tags: proxmox # --------------------------------------------------------------------------- # Play 2: Configure OPNsense - Local DNS Overrides # Must run BEFORE booting the VM so that api.openshift.toal.ca resolves # from within the SNO node during bootstrap. # --------------------------------------------------------------------------- - name: Configure OPNsense DNS overrides for OpenShift hosts: gate.toal.ca gather_facts: false connection: local module_defaults: group/oxlorg.opnsense.all: firewall: "{{ opnsense_host }}" api_key: "{{ opnsense_api_key }}" api_secret: "{{ opnsense_api_secret }}" ssl_verify: "{{ opnsense_ssl_verify | default(false) }}" api_port: "{{ opnsense_api_port | default(omit) }}" vars: __deploy_ocp_cluster_name: "{{ hostvars['sno.openshift.toal.ca']['ocp_cluster_name'] }}" __deploy_ocp_base_domain: "{{ hostvars['sno.openshift.toal.ca']['ocp_base_domain'] }}" __deploy_sno_ip: "{{ hostvars['sno.openshift.toal.ca']['sno_ip'] }}" tags: opnsense roles: - role: opnsense_dns_override opnsense_dns_override_entries: - hostname: "api.{{ __deploy_ocp_cluster_name }}" domain: "{{ __deploy_ocp_base_domain }}" value: "{{ __deploy_sno_ip }}" type: host - hostname: "api-int.{{ __deploy_ocp_cluster_name }}" domain: "{{ __deploy_ocp_base_domain }}" value: "{{ __deploy_sno_ip }}" type: host - domain: "apps.{{ __deploy_ocp_cluster_name }}.{{ __deploy_ocp_base_domain }}" value: "{{ __deploy_sno_ip }}" type: forward # --------------------------------------------------------------------------- # Play 3: Configure Public DNS Records in DNS Made Easy # --------------------------------------------------------------------------- - name: Configure public DNS records for OpenShift hosts: sno.openshift.toal.ca gather_facts: false connection: local vars: __deploy_public_ip: "{{ hostvars['gate.toal.ca']['haproxy_public_ip'] }}" tags: dns roles: - role: dnsmadeeasy_record dnsmadeeasy_record_account_key: "{{ dme_account_key }}" dnsmadeeasy_record_account_secret: "{{ dme_account_secret }}" dnsmadeeasy_record_entries: - domain: "{{ ocp_base_domain }}" record_name: "api.{{ ocp_cluster_name }}" record_type: A record_value: "{{ __deploy_public_ip }}" record_ttl: "{{ ocp_dns_ttl }}" - domain: "{{ ocp_base_domain }}" record_name: "*.apps.{{ ocp_cluster_name }}" record_type: A record_value: "{{ __deploy_public_ip }}" record_ttl: "{{ ocp_dns_ttl }}" # --------------------------------------------------------------------------- # Play 4: Generate Agent ISO and deploy SNO (agent-based installer) # # Uses `openshift-install agent create image` — no SaaS API, no SSO required. # The pull secret is the only Red Hat credential needed. # Credentials (kubeconfig, kubeadmin-password) are generated locally under # sno_install_dir/auth/ by openshift-install itself. # --------------------------------------------------------------------------- - name: Generate Agent ISO and Deploy SNO hosts: sno.openshift.toal.ca gather_facts: false connection: local vars: ocp_pull_secret: "{{ vault_ocp_pull_secret }}" tags: sno tasks: # ------------------------------------------------------------------ # Step 0: Ensure sno_vm_id and sno_mac are populated. # These are set as cacheable facts by the proxmox_sno_vm role, but # in ephemeral EEs or when running --tags sno alone the cache is # empty. Re-query Proxmox whenever either value is missing. # ------------------------------------------------------------------ - name: Retrieve VM info from Proxmox (needed when fact cache is empty) community.proxmox.proxmox_vm_info: api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}" api_user: "{{ proxmox_api_user }}" api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}" api_token_id: "{{ proxmox_api_token_id }}" api_token_secret: "{{ proxmox_api_token_secret }}" validate_certs: "{{ proxmox_validate_certs }}" node: "{{ proxmox_node }}" name: "{{ sno_vm_name }}" type: qemu config: current register: __sno_vm_info when: (sno_vm_id | default('')) == '' or (sno_mac | default('')) == '' - name: Set sno_vm_id and sno_mac from live Proxmox query ansible.builtin.set_fact: sno_vm_id: "{{ __sno_vm_info.proxmox_vms[0].vmid }}" sno_mac: >- {{ __sno_vm_info.proxmox_vms[0].config.net0 | regex_search('([0-9A-Fa-f]{2}(?::[0-9A-Fa-f]{2}){5})', '\1') | first }} cacheable: true when: __sno_vm_info is not skipped - name: Ensure local install directories exist ansible.builtin.file: path: "{{ item }}" state: directory mode: "0750" loop: - "{{ sno_install_dir }}" - "{{ sno_install_dir }}/auth" # ------------------------------------------------------------------ # Step 1: Check whether a fresh ISO already exists on Proxmox # AND the local openshift-install state dir is intact. # If the state dir is missing (e.g. /tmp was cleared), # we must regenerate the ISO so wait-for has valid state. # ------------------------------------------------------------------ - name: Check if ISO already exists on Proxmox and is less than 24 hours old ansible.builtin.stat: path: "{{ proxmox_iso_dir }}/{{ sno_iso_filename }}" get_checksum: false delegate_to: proxmox_host register: __proxmox_iso_stat - name: Check if local openshift-install state directory exists ansible.builtin.stat: path: "{{ sno_install_dir }}/.openshift_install_state" get_checksum: false register: __install_state_stat - name: Set fact - skip ISO build if recent ISO exists on Proxmox and local state is intact ansible.builtin.set_fact: __sno_iso_fresh: >- {{ __proxmox_iso_stat.stat.exists and (now(utc=true).timestamp() | int - __proxmox_iso_stat.stat.mtime | int) < 86400 and __install_state_stat.stat.exists }} # ------------------------------------------------------------------ # Step 2: Get openshift-install binary # Always ensure the binary is present — needed for both ISO generation # and wait-for-install-complete regardless of __sno_iso_fresh. # Binaries are stored in sno_install_dir so they survive across runs # when sno_install_dir is a mounted volume in an EE. # ------------------------------------------------------------------ - name: Download openshift-install tarball ansible.builtin.get_url: url: "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable-{{ ocp_version }}/openshift-install-linux.tar.gz" dest: "{{ sno_install_dir }}/openshift-install-{{ ocp_version }}.tar.gz" mode: "0644" checksum: "{{ ocp_install_checksum | default(omit) }}" register: __ocp_install_tarball - name: Extract openshift-install binary ansible.builtin.unarchive: src: "{{ sno_install_dir }}/openshift-install-{{ ocp_version }}.tar.gz" dest: "{{ sno_install_dir }}" remote_src: false include: - openshift-install when: __ocp_install_tarball.changed or not (sno_install_dir ~ '/openshift-install') is file - name: Download openshift-client tarball ansible.builtin.get_url: url: "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable-{{ ocp_version }}/openshift-client-linux.tar.gz" dest: "{{ sno_install_dir }}/openshift-client-{{ ocp_version }}.tar.gz" mode: "0644" checksum: "{{ ocp_client_checksum | default(omit) }}" register: __ocp_client_tarball - name: Extract oc binary ansible.builtin.unarchive: src: "{{ sno_install_dir }}/openshift-client-{{ ocp_version }}.tar.gz" dest: "{{ sno_install_dir }}" remote_src: false include: - oc when: __ocp_client_tarball.changed or not (sno_install_dir ~ '/oc') is file # ------------------------------------------------------------------ # Step 3: Template agent installer config files (skipped if ISO is fresh) # ------------------------------------------------------------------ - name: Template install-config.yaml ansible.builtin.template: src: templates/install-config.yaml.j2 dest: "{{ sno_install_dir }}/install-config.yaml" mode: "0640" when: not __sno_iso_fresh no_log: true - name: Template agent-config.yaml ansible.builtin.template: src: templates/agent-config.yaml.j2 dest: "{{ sno_install_dir }}/agent-config.yaml" mode: "0640" when: not __sno_iso_fresh # ------------------------------------------------------------------ # Step 4: Generate discovery ISO (skipped if ISO is fresh) # Note: openshift-install consumes (moves) the config files into # openshift-install-state/ — this is expected behaviour. # ------------------------------------------------------------------ - name: Generate agent-based installer ISO ansible.builtin.command: cmd: "{{ sno_install_dir }}/openshift-install agent create image --dir {{ sno_install_dir }}" when: not __sno_iso_fresh # ------------------------------------------------------------------ # Step 5: Upload ISO to Proxmox and attach to VM # ------------------------------------------------------------------ - name: Copy discovery ISO to Proxmox ISO storage ansible.builtin.copy: src: "{{ sno_install_dir }}/{{ sno_iso_filename }}" dest: "{{ proxmox_iso_dir }}/{{ sno_iso_filename }}" mode: "0644" delegate_to: proxmox_host when: not __sno_iso_fresh - name: Attach ISO to VM as CDROM ansible.builtin.command: cmd: "qm set {{ sno_vm_id }} --ide2 {{ proxmox_iso_storage }}:iso/{{ sno_iso_filename }},media=cdrom" delegate_to: proxmox_host changed_when: true - name: Ensure boot order prefers disk, falls back to CDROM # order=scsi0;ide2: OVMF tries scsi0 first; on first boot the disk has # no EFI application so OVMF falls through to ide2 (the agent ISO). # After RHCOS writes its EFI entry to the disk, subsequent reboots boot # directly from scsi0 — the CDROM is never tried again, breaking the loop. ansible.builtin.command: cmd: "qm set {{ sno_vm_id }} --boot order=scsi0;ide2" delegate_to: proxmox_host changed_when: true # ------------------------------------------------------------------ # Step 6: Boot the VM # ------------------------------------------------------------------ - name: Start SNO VM community.proxmox.proxmox_kvm: api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}" api_user: "{{ proxmox_api_user }}" api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}" api_token_id: "{{ proxmox_api_token_id }}" api_token_secret: "{{ proxmox_api_token_secret }}" validate_certs: "{{ proxmox_validate_certs }}" node: "{{ proxmox_node }}" name: "{{ sno_vm_name }}" state: started # ------------------------------------------------------------------ # Step 7: Persist credentials to Proxmox host # The EE is ephemeral — copy auth files to a durable location before # the container exits. sno_credentials_dir defaults to # /root/sno- on proxmox_host. # ------------------------------------------------------------------ - name: Create credentials directory on Proxmox host ansible.builtin.file: path: "{{ sno_credentials_dir }}" state: directory mode: "0700" delegate_to: proxmox_host - name: Copy kubeconfig to Proxmox host ansible.builtin.copy: src: "{{ sno_install_dir }}/auth/kubeconfig" dest: "{{ sno_credentials_dir }}/kubeconfig" mode: "0600" delegate_to: proxmox_host - name: Copy kubeadmin-password to Proxmox host ansible.builtin.copy: src: "{{ sno_install_dir }}/auth/kubeadmin-password" dest: "{{ sno_credentials_dir }}/kubeadmin-password" mode: "0600" delegate_to: proxmox_host # ------------------------------------------------------------------ # Step 8: Wait for installation to complete (~60-90 min) # Credentials land in sno_install_dir/auth/ automatically. # Inline poll (poll: 30) is used rather than fire-and-forget async # because the connection is local — no SSH timeout risk — and the # poll: 0 + async_status pattern stores job state in ~/.ansible_async # inside the EE container, which is lost if the EE is restarted. # Ensure your job/EE timeout is set to at least 6000 s (100 min). # ------------------------------------------------------------------ - name: Wait for SNO installation to complete ansible.builtin.command: cmd: "{{ sno_install_dir }}/openshift-install agent wait-for install-complete --dir {{ sno_install_dir }} --log-level=info" async: 5400 poll: 30 # ------------------------------------------------------------------ # Step 9: Eject CDROM so the VM never boots the agent ISO again # ------------------------------------------------------------------ - name: Eject CDROM after successful installation ansible.builtin.command: cmd: "qm set {{ sno_vm_id }} --ide2 none,media=cdrom" delegate_to: proxmox_host changed_when: true - name: Display post-install info ansible.builtin.debug: msg: - "SNO installation complete!" - "API URL : https://api.{{ ocp_cluster_name }}.{{ ocp_base_domain }}:6443" - "Console : https://console-openshift-console.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}" - "Kubeconfig : {{ sno_credentials_dir }}/kubeconfig (on proxmox_host)" - "kubeadmin pass : {{ sno_credentials_dir }}/kubeadmin-password (on proxmox_host)" verbosity: 1