From d11167b345ae78ea65670eea57769fc72ace7151 Mon Sep 17 00:00:00 2001 From: Patrick Toal Date: Mon, 23 Feb 2026 13:18:53 -0500 Subject: [PATCH] Basic Openshift deploy mostly working --- .ansible-lint | 3 +- ansible.cfg | 4 +- collections/requirements.yml | 14 + library/dnsmadeeasy.py | 718 --------------------- playbooks/create_gitea.yml | 2 +- playbooks/deploy_openshift.yml | 403 +++++++++++- playbooks/opnsense.yml | 10 +- playbooks/templates/agent-config.yaml.j2 | 34 + playbooks/templates/install-config.yaml.j2 | 27 + roles/proxmox_sno_vm/defaults/main.yml | 27 + roles/proxmox_sno_vm/tasks/main.yml | 84 +++ router-ca.crt | 0 12 files changed, 597 insertions(+), 729 deletions(-) create mode 100644 collections/requirements.yml delete mode 100644 library/dnsmadeeasy.py create mode 100644 playbooks/templates/agent-config.yaml.j2 create mode 100644 playbooks/templates/install-config.yaml.j2 create mode 100644 roles/proxmox_sno_vm/defaults/main.yml create mode 100644 roles/proxmox_sno_vm/tasks/main.yml create mode 100644 router-ca.crt diff --git a/.ansible-lint b/.ansible-lint index afceea6..7d85d2f 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -32,6 +32,7 @@ skip_list: # progressive: true mock_modules: - - community.general.proxmox_kvm + - community.proxmox.proxmox_kvm + - community.proxmox.proxmox_vm_info mock_roles: [] diff --git a/ansible.cfg b/ansible.cfg index d0f5940..69f5bc8 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,10 +1,10 @@ [defaults] # Inventory - override with -i or ANSIBLE_INVENTORY env var -inventory = /workspaces/inventory +inventory = /home/ptoal/Dev/inventories/toallab-inventory # Role and collection paths roles_path = roles -collections_path = /workspaces/collections:~/.ansible/collections +collections_path = ./collections:/workspaces/collections:~/.ansible/collections:/usr/share/ansible/collections # Interpreter discovery interpreter_python = auto_silent diff --git a/collections/requirements.yml b/collections/requirements.yml new file mode 100644 index 0000000..c538b67 --- /dev/null +++ b/collections/requirements.yml @@ -0,0 +1,14 @@ +--- +collections: + - name: community.general + - name: community.proxmox + - name: community.crypto + - name: netbox.netbox + - name: freeipa.ansible_freeipa + - name: redhat.satellite + - name: onepassword.connect + - name: davidban77.gns3 + - name: oxlorg.opnsense + source: https://github.com/O-X-L/ansible-opnsense + type: git + version: latest diff --git a/library/dnsmadeeasy.py b/library/dnsmadeeasy.py deleted file mode 100644 index e23408d..0000000 --- a/library/dnsmadeeasy.py +++ /dev/null @@ -1,718 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - -DOCUMENTATION = ''' ---- -module: dnsmadeeasy -version_added: "1.3" -short_description: Interface with dnsmadeeasy.com (a DNS hosting service). -description: - - > - Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or - monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/) -options: - account_key: - description: - - Account API Key. - required: true - - account_secret: - description: - - Account Secret Key. - required: true - - domain: - description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster - resolution - required: true - - sandbox: - description: - - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used. - type: bool - default: 'no' - version_added: 2.7 - - record_name: - description: - - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless - of the state argument. - - record_type: - description: - - Record type. - choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] - - record_value: - description: - - > - Record value. HTTPRED: , MX: , NS: , PTR: , - SRV: , TXT: " - - > - If record_value is not specified; no changes will be made and the record will be returned in 'result' - (in other words, this module can be used to fetch a record's current id, type, and ttl) - - record_ttl: - description: - - record's "Time to live". Number of seconds the record remains cached in DNS servers. - default: 1800 - - state: - description: - - whether the record should exist or not - required: true - choices: [ 'present', 'absent' ] - - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: 'yes' - version_added: 1.5.1 - - monitor: - description: - - If C(yes), add or change the monitor. This is applicable only for A records. - type: bool - default: 'no' - version_added: 2.4 - - systemDescription: - description: - - Description used by the monitor. - required: true - default: '' - version_added: 2.4 - - maxEmails: - description: - - Number of emails sent to the contact list by the monitor. - required: true - default: 1 - version_added: 2.4 - - protocol: - description: - - Protocol used by the monitor. - required: true - default: 'HTTP' - choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS'] - version_added: 2.4 - - port: - description: - - Port used by the monitor. - required: true - default: 80 - version_added: 2.4 - - sensitivity: - description: - - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3. - required: true - default: 'Medium' - choices: ['Low', 'Medium', 'High'] - version_added: 2.4 - - contactList: - description: - - Name or id of the contact list that the monitor will notify. - - The default C('') means the Account Owner. - required: true - default: '' - version_added: 2.4 - - httpFqdn: - description: - - The fully qualified domain name used by the monitor. - version_added: 2.4 - - httpFile: - description: - - The file at the Fqdn that the monitor queries for HTTP or HTTPS. - version_added: 2.4 - - httpQueryString: - description: - - The string in the httpFile that the monitor queries for HTTP or HTTPS. - version_added: 2.4 - - failover: - description: - - If C(yes), add or change the failover. This is applicable only for A records. - type: bool - default: 'no' - version_added: 2.4 - - autoFailover: - description: - - If true, fallback to the primary IP address is manual after a failover. - - If false, fallback to the primary IP address is automatic after a failover. - type: bool - default: 'no' - version_added: 2.4 - - ip1: - description: - - Primary IP address for the failover. - - Required if adding or changing the monitor or failover. - version_added: 2.4 - - ip2: - description: - - Secondary IP address for the failover. - - Required if adding or changing the failover. - version_added: 2.4 - - ip3: - description: - - Tertiary IP address for the failover. - version_added: 2.4 - - ip4: - description: - - Quaternary IP address for the failover. - version_added: 2.4 - - ip5: - description: - - Quinary IP address for the failover. - version_added: 2.4 - -notes: - - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few - seconds of actual time by using NTP. - - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'. - These values can be be registered and used in your playbooks. - - Only A records can have a monitor or failover. - - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required. - - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required. - - The monitor and the failover will share 'port', 'protocol', and 'ip1' options. - -requirements: [ hashlib, hmac ] -author: "Brice Burgess (@briceburg)" -''' - -EXAMPLES = ''' -# fetch my.com domain records -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - register: response - -# create / ensure the presence of a record -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - -# update the previously created record -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_value: 192.0.2.23 - -# fetch a specific record -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - register: response - -# delete a record / ensure it is absent -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - record_type: A - state: absent - record_name: test - -# Add a failover -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: True - ip1: 127.0.0.2 - ip2: 127.0.0.3 - -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: True - ip1: 127.0.0.2 - ip2: 127.0.0.3 - ip3: 127.0.0.4 - ip4: 127.0.0.5 - ip5: 127.0.0.6 - -# Add a monitor -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - monitor: yes - ip1: 127.0.0.2 - protocol: HTTP # default - port: 80 # default - maxEmails: 1 - systemDescription: Monitor Test A record - contactList: my contact list - -# Add a monitor with http options -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - monitor: yes - ip1: 127.0.0.2 - protocol: HTTP # default - port: 80 # default - maxEmails: 1 - systemDescription: Monitor Test A record - contactList: 1174 # contact list id - httpFqdn: http://my.com - httpFile: example - httpQueryString: some string - -# Add a monitor and a failover -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: True - ip1: 127.0.0.2 - ip2: 127.0.0.3 - monitor: yes - protocol: HTTPS - port: 443 - maxEmails: 1 - systemDescription: monitoring my.com status - contactList: emergencycontacts - -# Remove a failover -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - failover: no - -# Remove a monitor -- dnsmadeeasy: - account_key: key - account_secret: secret - domain: my.com - state: present - record_name: test - record_type: A - record_value: 127.0.0.1 - monitor: no -''' - -# ============================================ -# DNSMadeEasy module specific support methods. -# - -import json -import hashlib -import hmac -from time import strftime, gmtime - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.six import string_types - - -class DME2(object): - - def __init__(self, apikey, secret, domain, sandbox, module): - self.module = module - - self.api = apikey - self.secret = secret - - if sandbox: - self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/' - self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl) - else: - self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' - - self.domain = str(domain) - self.domain_map = None # ["domain_name"] => ID - self.record_map = None # ["record_name"] => ID - self.records = None # ["record_ID"] => - self.all_records = None - self.contactList_map = None # ["contactList_name"] => ID - - # Lookup the domain ID if passed as a domain name vs. ID - if not self.domain.isdigit(): - self.domain = self.getDomainByName(self.domain)['id'] - - self.record_url = 'dns/managed/' + str(self.domain) + '/records' - self.monitor_url = 'monitor' - self.contactList_url = 'contactList' - - def _headers(self): - currTime = self._get_date() - hashstring = self._create_hash(currTime) - headers = {'x-dnsme-apiKey': self.api, - 'x-dnsme-hmac': hashstring, - 'x-dnsme-requestDate': currTime, - 'content-type': 'application/json'} - return headers - - def _get_date(self): - return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) - - def _create_hash(self, rightnow): - return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest() - - def query(self, resource, method, data=None): - url = self.baseurl + resource - if data and not isinstance(data, string_types): - data = urlencode(data) - - response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) - if info['status'] not in (200, 201, 204): - self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) - - try: - return json.load(response) - except Exception: - return {} - - def getDomain(self, domain_id): - if not self.domain_map: - self._instMap('domain') - - return self.domains.get(domain_id, False) - - def getDomainByName(self, domain_name): - if not self.domain_map: - self._instMap('domain') - - return self.getDomain(self.domain_map.get(domain_name, 0)) - - def getDomains(self): - return self.query('dns/managed', 'GET')['data'] - - def getRecord(self, record_id): - if not self.record_map: - self._instMap('record') - - return self.records.get(record_id, False) - - # Try to find a single record matching this one. - # How we do this depends on the type of record. For instance, there - # can be several MX records for a single record_name while there can - # only be a single CNAME for a particular record_name. Note also that - # there can be several records with different types for a single name. - def getMatchingRecord(self, record_name, record_type, record_value): - # Get all the records if not already cached - if not self.all_records: - self.all_records = self.getRecords() - - if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]: - for result in self.all_records: - if result['name'] == record_name and result['type'] == record_type: - return result - return False - elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]: - for result in self.all_records: - if record_type == "MX": - value = record_value.split(" ")[1] - elif record_type == "SRV": - value = record_value.split(" ")[3] - else: - value = record_value - if result['name'] == record_name and result['type'] == record_type and result['value'] == value: - return result - return False - else: - raise Exception('record_type not yet supported') - - def getRecords(self): - return self.query(self.record_url, 'GET')['data'] - - def _instMap(self, type): - # @TODO cache this call so it's executed only once per ansible execution - map = {} - results = {} - - # iterate over e.g. self.getDomains() || self.getRecords() - for result in getattr(self, 'get' + type.title() + 's')(): - - map[result['name']] = result['id'] - results[result['id']] = result - - # e.g. self.domain_map || self.record_map - setattr(self, type + '_map', map) - setattr(self, type + 's', results) # e.g. self.domains || self.records - - def prepareRecord(self, data): - return json.dumps(data, separators=(',', ':')) - - def createRecord(self, data): - # @TODO update the cache w/ resultant record + id when impleneted - return self.query(self.record_url, 'POST', data) - - def updateRecord(self, record_id, data): - # @TODO update the cache w/ resultant record + id when impleneted - return self.query(self.record_url + '/' + str(record_id), 'PUT', data) - - def deleteRecord(self, record_id): - # @TODO remove record from the cache when impleneted - return self.query(self.record_url + '/' + str(record_id), 'DELETE') - - def getMonitor(self, record_id): - return self.query(self.monitor_url + '/' + str(record_id), 'GET') - - def updateMonitor(self, record_id, data): - return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data) - - def prepareMonitor(self, data): - return json.dumps(data, separators=(',', ':')) - - def getContactList(self, contact_list_id): - if not self.contactList_map: - self._instMap('contactList') - - return self.contactLists.get(contact_list_id, False) - - def getContactlists(self): - return self.query(self.contactList_url, 'GET')['data'] - - def getContactListByName(self, name): - if not self.contactList_map: - self._instMap('contactList') - - return self.getContactList(self.contactList_map.get(name, 0)) - -# =========================================== -# Module execution. -# - - -def main(): - - module = AnsibleModule( - argument_spec=dict( - account_key=dict(required=True), - account_secret=dict(required=True, no_log=True), - domain=dict(required=True), - sandbox=dict(default='no', type='bool'), - state=dict(required=True, choices=['present', 'absent']), - record_name=dict(required=False), - record_type=dict(required=False, choices=[ - 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), - record_value=dict(required=False), - record_ttl=dict(required=False, default=1800, type='int'), - monitor=dict(default='no', type='bool'), - systemDescription=dict(default=''), - maxEmails=dict(default=1, type='int'), - protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']), - port=dict(default=80, type='int'), - sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']), - contactList=dict(default=None), - httpFqdn=dict(required=False), - httpFile=dict(required=False), - httpQueryString=dict(required=False), - failover=dict(default='no', type='bool'), - autoFailover=dict(default='no', type='bool'), - ip1=dict(required=False), - ip2=dict(required=False), - ip3=dict(required=False), - ip4=dict(required=False), - ip5=dict(required=False), - validate_certs=dict(default='yes', type='bool'), - ), - required_together=[ - ['record_value', 'record_ttl', 'record_type'] - ], - required_if=[ - ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']], - ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']] - ] - ) - - protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6) - sensitivities = dict(Low=8, Medium=5, High=3) - - DME = DME2(module.params["account_key"], module.params[ - "account_secret"], module.params["domain"], module.params["sandbox"], module) - state = module.params["state"] - record_name = module.params["record_name"] - record_type = module.params["record_type"] - record_value = module.params["record_value"] - - # Follow Keyword Controlled Behavior - if record_name is None: - domain_records = DME.getRecords() - if not domain_records: - module.fail_json( - msg="The requested domain name is not accessible with this api_key; try using its ID if known.") - module.exit_json(changed=False, result=domain_records) - - # Fetch existing record + Build new one - current_record = DME.getMatchingRecord(record_name, record_type, record_value) - new_record = {'name': record_name} - for i in ["record_value", "record_type", "record_ttl"]: - if not module.params[i] is None: - new_record[i[len("record_"):]] = module.params[i] - # Special handling for mx record - if new_record["type"] == "MX": - new_record["mxLevel"] = new_record["value"].split(" ")[0] - new_record["value"] = new_record["value"].split(" ")[1] - - # Special handling for SRV records - if new_record["type"] == "SRV": - new_record["priority"] = new_record["value"].split(" ")[0] - new_record["weight"] = new_record["value"].split(" ")[1] - new_record["port"] = new_record["value"].split(" ")[2] - new_record["value"] = new_record["value"].split(" ")[3] - - # Fetch existing monitor if the A record indicates it should exist and build the new monitor - current_monitor = dict() - new_monitor = dict() - if current_record and current_record['type'] == 'A': - current_monitor = DME.getMonitor(current_record['id']) - - # Build the new monitor - for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails', - 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString', - 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']: - if module.params[i] is not None: - if i == 'protocol': - # The API requires protocol to be a numeric in the range 1-6 - new_monitor['protocolId'] = protocols[module.params[i]] - elif i == 'sensitivity': - # The API requires sensitivity to be a numeric of 8, 5, or 3 - new_monitor[i] = sensitivities[module.params[i]] - elif i == 'contactList': - # The module accepts either the name or the id of the contact list - contact_list_id = module.params[i] - if not contact_list_id.isdigit() and contact_list_id != '': - contact_list = DME.getContactListByName(contact_list_id) - if not contact_list: - module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id)) - contact_list_id = contact_list.get('id', '') - new_monitor['contactListId'] = contact_list_id - else: - # The module option names match the API field names - new_monitor[i] = module.params[i] - - # Compare new record against existing one - record_changed = False - if current_record: - for i in new_record: - if str(current_record[i]) != str(new_record[i]): - record_changed = True - new_record['id'] = str(current_record['id']) - - monitor_changed = False - if current_monitor: - for i in new_monitor: - if str(current_monitor.get(i)) != str(new_monitor[i]): - monitor_changed = True - - # Follow Keyword Controlled Behavior - if state == 'present': - # return the record if no value is specified - if "value" not in new_record: - if not current_record: - module.fail_json( - msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain'])) - module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) - - # create record and monitor as the record does not exist - if not current_record: - record = DME.createRecord(DME.prepareRecord(new_record)) - if module.params['monitor']: - monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor)) - module.exit_json(changed=True, result=dict(record=record, monitor=monitor)) - else: - module.exit_json(changed=True, result=dict(record=record)) - - # update the record - updated = False - if record_changed: - DME.updateRecord(current_record['id'], DME.prepareRecord(new_record)) - updated = True - if monitor_changed: - DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor)) - updated = True - if updated: - module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor)) - - # return the record (no changes) - module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor)) - - elif state == 'absent': - changed = False - # delete the record (and the monitor/failover) if it exists - if current_record: - DME.deleteRecord(current_record['id']) - module.exit_json(changed=True) - - # record does not exist, return w/o change. - module.exit_json(changed=changed) - - else: - module.fail_json( - msg="'%s' is an unknown value for the state argument" % state) - - -if __name__ == '__main__': - main() diff --git a/playbooks/create_gitea.yml b/playbooks/create_gitea.yml index d79b336..a01277f 100644 --- a/playbooks/create_gitea.yml +++ b/playbooks/create_gitea.yml @@ -23,7 +23,7 @@ name: toallab.infra.opnsense_service tasks_from: provision.yml module_defaults: - group/ansibleguy.opnsense.all: + group/oxlorg.opnsense.all: firewall: "{{ opnsense_host }}" api_key: "{{ opnsense_api_key }}" api_secret: "{{ opnsense_api_secret }}" diff --git a/playbooks/deploy_openshift.yml b/playbooks/deploy_openshift.yml index 6ade77b..4819a57 100644 --- a/playbooks/deploy_openshift.yml +++ b/playbooks/deploy_openshift.yml @@ -1,6 +1,405 @@ -- name: Deploy OpenShift on Proxmox - hosts: all +--- +# Deploy Single Node OpenShift (SNO) on Proxmox +# +# Prerequisites: +# ansible-galaxy collection install -r collections/requirements.yml +# openshift-install is downloaded automatically during the sno play +# +# Inventory requirements: +# sno.openshift.toal.ca - in 'openshift' group +# host_vars: ocp_cluster_name, ocp_base_domain, ocp_version, sno_ip, +# sno_gateway, sno_nameserver, sno_prefix_length, sno_vm_name, +# sno_bridge, sno_vlan, proxmox_node, ... +# secrets: vault_ocp_pull_secret (Red Hat pull secret JSON string) +# proxmox_api - inventory host (ansible_host: proxmox.lab.toal.ca, ansible_port: 443) +# Used as api_host / api_port source for community.proxmox modules +# proxmox_host - inventory host (ansible_host: pve1.lab.toal.ca, ansible_connection: ssh) +# delegate_to target for qm and file operations +# gate.toal.ca - in 'opnsense' group +# host_vars: opnsense_host, opnsense_api_key, opnsense_api_secret, +# opnsense_api_port, haproxy_public_ip +# group_vars/all: dme_account_key, dme_account_secret +# +# Play order (intentional — DNS must precede VM boot): +# Play 1: proxmox — Create SNO VM +# Play 2: opnsense — Configure OPNsense local DNS overrides (api/api-int/apps) +# Play 3: dns — Configure public DNS records in DNS Made Easy +# Play 4: sno — Generate ISO, boot VM, wait for install +# +# Usage: +# ansible-playbook playbooks/deploy_openshift.yml +# ansible-playbook playbooks/deploy_openshift.yml --tags proxmox +# ansible-playbook playbooks/deploy_openshift.yml --tags sno +# ansible-playbook playbooks/deploy_openshift.yml --tags dns,opnsense +# ansible-playbook playbooks/deploy_openshift.yml --tags opnsense,sno + +# --------------------------------------------------------------------------- +# Play 1: Create SNO VM in Proxmox +# --------------------------------------------------------------------------- +- name: Create SNO VM in Proxmox + hosts: sno.openshift.toal.ca gather_facts: false connection: local + + roles: + - role: proxmox_sno_vm + tags: proxmox + +# --------------------------------------------------------------------------- +# Play 2: Configure OPNsense - Local DNS Overrides +# Must run BEFORE booting the VM so that api.openshift.toal.ca resolves +# from within the SNO node during bootstrap. +# --------------------------------------------------------------------------- +- name: Configure OPNsense DNS overrides for OpenShift + hosts: gate.toal.ca + gather_facts: false + connection: local + + module_defaults: + group/oxlorg.opnsense.all: + firewall: "{{ opnsense_host }}" + api_key: "{{ opnsense_api_key }}" + api_secret: "{{ opnsense_api_secret }}" + ssl_verify: "{{ opnsense_ssl_verify | default(false) }}" + api_port: "{{ opnsense_api_port | default(omit) }}" + + tags: opnsense + tasks: + - name: Add Unbound host override for OCP API + oxlorg.opnsense.unbound_host: + hostname: "api.{{ ocp_cluster_name }}" + domain: "{{ ocp_base_domain }}" + value: "{{ sno_ip }}" + match_fields: + - hostname + - domain + state: present + delegate_to: localhost + vars: + ocp_cluster_name: "{{ hostvars['sno.openshift.toal.ca']['ocp_cluster_name'] }}" + ocp_base_domain: "{{ hostvars['sno.openshift.toal.ca']['ocp_base_domain'] }}" + sno_ip: "{{ hostvars['sno.openshift.toal.ca']['sno_ip'] }}" + + - name: Add Unbound host override for OCP API internal + oxlorg.opnsense.unbound_host: + hostname: "api-int.{{ ocp_cluster_name }}" + domain: "{{ ocp_base_domain }}" + value: "{{ sno_ip }}" + match_fields: + - hostname + - domain + state: present + delegate_to: localhost + vars: + ocp_cluster_name: "{{ hostvars['sno.openshift.toal.ca']['ocp_cluster_name'] }}" + ocp_base_domain: "{{ hostvars['sno.openshift.toal.ca']['ocp_base_domain'] }}" + sno_ip: "{{ hostvars['sno.openshift.toal.ca']['sno_ip'] }}" + + - name: Forward apps wildcard domain to SNO ingress + oxlorg.opnsense.unbound_forward: + domain: "apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}" + target: "{{ sno_ip }}" + state: present + delegate_to: localhost + vars: + ocp_cluster_name: "{{ hostvars['sno.openshift.toal.ca']['ocp_cluster_name'] }}" + ocp_base_domain: "{{ hostvars['sno.openshift.toal.ca']['ocp_base_domain'] }}" + sno_ip: "{{ hostvars['sno.openshift.toal.ca']['sno_ip'] }}" + +# --------------------------------------------------------------------------- +# Play 3: Configure Public DNS Records in DNS Made Easy +# --------------------------------------------------------------------------- +- name: Configure public DNS records for OpenShift + hosts: sno.openshift.toal.ca + gather_facts: false + connection: local + + tags: dns + + tasks: + + - name: Create A record for OpenShift API endpoint + community.general.dnsmadeeasy: + account_key: "{{ dme_account_key }}" + account_secret: "{{ dme_account_secret }}" + domain: "{{ ocp_base_domain }}" + record_name: "api.{{ ocp_cluster_name }}" + record_type: A + record_value: "{{ hostvars['gate.toal.ca']['haproxy_public_ip'] }}" + record_ttl: "{{ ocp_dns_ttl }}" + port: 443 + protocol: HTTPS + state: present + + - name: Create A record for OpenShift apps wildcard + community.general.dnsmadeeasy: + account_key: "{{ dme_account_key }}" + account_secret: "{{ dme_account_secret }}" + domain: "{{ ocp_base_domain }}" + record_name: "*.apps.{{ ocp_cluster_name }}" + record_type: A + record_value: "{{ hostvars['gate.toal.ca']['haproxy_public_ip'] }}" + record_ttl: "{{ ocp_dns_ttl }}" + port: 443 + protocol: HTTPS + state: present + +# --------------------------------------------------------------------------- +# Play 4: Generate Agent ISO and deploy SNO (agent-based installer) +# +# Uses `openshift-install agent create image` — no SaaS API, no SSO required. +# The pull secret is the only Red Hat credential needed. +# Credentials (kubeconfig, kubeadmin-password) are generated locally under +# sno_install_dir/auth/ by openshift-install itself. +# --------------------------------------------------------------------------- +- name: Generate Agent ISO and Deploy SNO + hosts: sno.openshift.toal.ca + gather_facts: false + connection: local + + vars: + ocp_pull_secret: "{{ vault_ocp_pull_secret }}" + + tags: sno + + tasks: + + # ------------------------------------------------------------------ + # Step 0: Ensure sno_vm_id and sno_mac are populated. + # These are set as cacheable facts by the proxmox_sno_vm role, but + # in ephemeral EEs or when running --tags sno alone the cache is + # empty. Re-query Proxmox whenever either value is missing. + # ------------------------------------------------------------------ + - name: Retrieve VM info from Proxmox (needed when fact cache is empty) + community.proxmox.proxmox_vm_info: + api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}" + api_user: "{{ proxmox_api_user }}" + api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}" + api_token_id: "{{ proxmox_api_token_id }}" + api_token_secret: "{{ proxmox_api_token_secret }}" + validate_certs: "{{ proxmox_validate_certs }}" + node: "{{ proxmox_node }}" + name: "{{ sno_vm_name }}" + type: qemu + config: current + register: _sno_vm_info + when: (sno_vm_id | default('')) == '' or (sno_mac | default('')) == '' + + - name: Set sno_vm_id and sno_mac from live Proxmox query + ansible.builtin.set_fact: + sno_vm_id: "{{ _sno_vm_info.proxmox_vms[0].vmid }}" + sno_mac: >- + {{ _sno_vm_info.proxmox_vms[0].config.net0 + | regex_search('([0-9A-Fa-f]{2}(?::[0-9A-Fa-f]{2}){5})', '\1') + | first }} + cacheable: true + when: _sno_vm_info is not skipped + + - name: Ensure local install directories exist + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: "0750" + loop: + - "{{ sno_install_dir }}" + - "{{ sno_install_dir }}/auth" + + # ------------------------------------------------------------------ + # Step 1: Check whether a fresh ISO already exists on Proxmox + # AND the local openshift-install state dir is intact. + # If the state dir is missing (e.g. /tmp was cleared), + # we must regenerate the ISO so wait-for has valid state. + # ------------------------------------------------------------------ + - name: Check if ISO already exists on Proxmox and is less than 24 hours old + ansible.builtin.stat: + path: "{{ proxmox_iso_dir }}/{{ sno_iso_filename }}" + get_checksum: false + delegate_to: proxmox_host + register: proxmox_iso_stat + + - name: Check if local openshift-install state directory exists + ansible.builtin.stat: + path: "{{ sno_install_dir }}/.openshift_install_state" + get_checksum: false + register: install_state_stat + + - name: Set fact - skip ISO build if recent ISO exists on Proxmox and local state is intact + ansible.builtin.set_fact: + sno_iso_fresh: >- + {{ + proxmox_iso_stat.stat.exists and + (now(utc=true).timestamp() | int - proxmox_iso_stat.stat.mtime | int) < 86400 and + install_state_stat.stat.exists + }} + + # ------------------------------------------------------------------ + # Step 2: Get openshift-install binary + # Always ensure the binary is present — needed for both ISO generation + # and wait-for-install-complete regardless of sno_iso_fresh. + # Binaries are stored in sno_install_dir so they survive across runs + # when sno_install_dir is a mounted volume in an EE. + # ------------------------------------------------------------------ + - name: Download openshift-install tarball + ansible.builtin.get_url: + url: "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable-{{ ocp_version }}/openshift-install-linux.tar.gz" + dest: "{{ sno_install_dir }}/openshift-install-{{ ocp_version }}.tar.gz" + mode: "0644" + checksum: "{{ ocp_install_checksum | default(omit) }}" + register: ocp_install_tarball + + - name: Extract openshift-install binary + ansible.builtin.unarchive: + src: "{{ sno_install_dir }}/openshift-install-{{ ocp_version }}.tar.gz" + dest: "{{ sno_install_dir }}" + remote_src: false + include: + - openshift-install + when: ocp_install_tarball.changed or not (sno_install_dir ~ '/openshift-install') is file + + - name: Download openshift-client tarball + ansible.builtin.get_url: + url: "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable-{{ ocp_version }}/openshift-client-linux.tar.gz" + dest: "{{ sno_install_dir }}/openshift-client-{{ ocp_version }}.tar.gz" + mode: "0644" + checksum: "{{ ocp_client_checksum | default(omit) }}" + register: ocp_client_tarball + + - name: Extract oc binary + ansible.builtin.unarchive: + src: "{{ sno_install_dir }}/openshift-client-{{ ocp_version }}.tar.gz" + dest: "{{ sno_install_dir }}" + remote_src: false + include: + - oc + when: ocp_client_tarball.changed or not (sno_install_dir ~ '/oc') is file + + # ------------------------------------------------------------------ + # Step 3: Template agent installer config files (skipped if ISO is fresh) + # ------------------------------------------------------------------ + - name: Template install-config.yaml + ansible.builtin.template: + src: templates/install-config.yaml.j2 + dest: "{{ sno_install_dir }}/install-config.yaml" + mode: "0640" + when: not sno_iso_fresh + + - name: Template agent-config.yaml + ansible.builtin.template: + src: templates/agent-config.yaml.j2 + dest: "{{ sno_install_dir }}/agent-config.yaml" + mode: "0640" + when: not sno_iso_fresh + + # ------------------------------------------------------------------ + # Step 4: Generate discovery ISO (skipped if ISO is fresh) + # Note: openshift-install consumes (moves) the config files into + # openshift-install-state/ — this is expected behaviour. + # ------------------------------------------------------------------ + - name: Generate agent-based installer ISO + ansible.builtin.command: + cmd: "{{ sno_install_dir }}/openshift-install agent create image --dir {{ sno_install_dir }}" + when: not sno_iso_fresh + + # ------------------------------------------------------------------ + # Step 5: Upload ISO to Proxmox and attach to VM + # ------------------------------------------------------------------ + - name: Copy discovery ISO to Proxmox ISO storage + ansible.builtin.copy: + src: "{{ sno_install_dir }}/{{ sno_iso_filename }}" + dest: "{{ proxmox_iso_dir }}/{{ sno_iso_filename }}" + mode: "0644" + delegate_to: proxmox_host + when: not sno_iso_fresh + + - name: Attach ISO to VM as CDROM + ansible.builtin.command: + cmd: "qm set {{ sno_vm_id }} --ide2 {{ proxmox_iso_storage }}:iso/{{ sno_iso_filename }},media=cdrom" + delegate_to: proxmox_host + changed_when: true + + - name: Ensure boot order prefers disk, falls back to CDROM + # order=scsi0;ide2: OVMF tries scsi0 first; on first boot the disk has + # no EFI application so OVMF falls through to ide2 (the agent ISO). + # After RHCOS writes its EFI entry to the disk, subsequent reboots boot + # directly from scsi0 — the CDROM is never tried again, breaking the loop. + ansible.builtin.command: + cmd: "qm set {{ sno_vm_id }} --boot order=scsi0;ide2" + delegate_to: proxmox_host + changed_when: true + + # ------------------------------------------------------------------ + # Step 6: Boot the VM + # ------------------------------------------------------------------ + - name: Start SNO VM + community.proxmox.proxmox_kvm: + api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}" + api_user: "{{ proxmox_api_user }}" + api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}" + api_token_id: "{{ proxmox_api_token_id }}" + api_token_secret: "{{ proxmox_api_token_secret }}" + validate_certs: "{{ proxmox_validate_certs }}" + node: "{{ proxmox_node }}" + name: "{{ sno_vm_name }}" + state: started + + # ------------------------------------------------------------------ + # Step 7: Persist credentials to Proxmox host + # The EE is ephemeral — copy auth files to a durable location before + # the container exits. sno_credentials_dir defaults to + # /root/sno- on proxmox_host. + # ------------------------------------------------------------------ + - name: Create credentials directory on Proxmox host + ansible.builtin.file: + path: "{{ sno_credentials_dir }}" + state: directory + mode: "0700" + delegate_to: proxmox_host + + - name: Copy kubeconfig to Proxmox host + ansible.builtin.copy: + src: "{{ sno_install_dir }}/auth/kubeconfig" + dest: "{{ sno_credentials_dir }}/kubeconfig" + mode: "0600" + delegate_to: proxmox_host + + - name: Copy kubeadmin-password to Proxmox host + ansible.builtin.copy: + src: "{{ sno_install_dir }}/auth/kubeadmin-password" + dest: "{{ sno_credentials_dir }}/kubeadmin-password" + mode: "0600" + delegate_to: proxmox_host + # ------------------------------------------------------------------ + # Step 8: Wait for installation to complete (~60-90 min) + # Credentials land in sno_install_dir/auth/ automatically. + # Inline poll (poll: 30) is used rather than fire-and-forget async + # because the connection is local — no SSH timeout risk — and the + # poll: 0 + async_status pattern stores job state in ~/.ansible_async + # inside the EE container, which is lost if the EE is restarted. + # Ensure your job/EE timeout is set to at least 6000 s (100 min). + # ------------------------------------------------------------------ + - name: Wait for SNO installation to complete + ansible.builtin.command: + cmd: "{{ sno_install_dir }}/openshift-install agent wait-for install-complete --dir {{ sno_install_dir }} --log-level=info" + async: 5400 + poll: 30 + + # ------------------------------------------------------------------ + # Step 9: Eject CDROM so the VM never boots the agent ISO again + # ------------------------------------------------------------------ + - name: Eject CDROM after successful installation + ansible.builtin.command: + cmd: "qm set {{ sno_vm_id }} --ide2 none,media=cdrom" + delegate_to: proxmox_host + changed_when: true + + + - name: Display post-install info + ansible.builtin.debug: + msg: + - "SNO installation complete!" + - "API URL : https://api.{{ ocp_cluster_name }}.{{ ocp_base_domain }}:6443" + - "Console : https://console-openshift-console.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}" + - "Kubeconfig : {{ sno_credentials_dir }}/kubeconfig (on proxmox_host)" + - "kubeadmin pass : {{ sno_credentials_dir }}/kubeadmin-password (on proxmox_host)" diff --git a/playbooks/opnsense.yml b/playbooks/opnsense.yml index f9a2e16..94d4b14 100644 --- a/playbooks/opnsense.yml +++ b/playbooks/opnsense.yml @@ -3,7 +3,7 @@ hosts: opnsense gather_facts: false module_defaults: - group/ansibleguy.opnsense.all: + group/oxlorg.opnsense.all: firewall: "{{ opnsense_host }}" api_key: "{{ opnsense_api_key }}" api_secret: "{{ opnsense_api_secret }}" @@ -12,7 +12,7 @@ tasks: - name: Install packages - ansibleguy.opnsense.package: + oxlorg.opnsense.package: name: - os-acme-client action: install @@ -24,13 +24,13 @@ tasks_from: setup.yml - name: Configure KEA DHCP Server - ansibleguy.opnsense.dhcp_general: + oxlorg.opnsense.dhcp_general: enabled: "{{ dhcp_enabled }}" interfaces: "{{ dhcp_interfaces }}" delegate_to: localhost - name: Add subnet - ansibleguy.opnsense.dhcp_subnet: + oxlorg.opnsense.dhcp_subnet: subnet: "{{ item.subnet }}" pools: "{{ item.pools }}" auto_options: false @@ -53,7 +53,7 @@ }} - name: Add DHCP Reservations - ansibleguy.opnsense.dhcp_reservation: + oxlorg.opnsense.dhcp_reservation: hostname: "{{ item.hostname }}" mac: "{{ item.mac }}" ip: "{{ item.address }}" diff --git a/playbooks/templates/agent-config.yaml.j2 b/playbooks/templates/agent-config.yaml.j2 new file mode 100644 index 0000000..8ec75fb --- /dev/null +++ b/playbooks/templates/agent-config.yaml.j2 @@ -0,0 +1,34 @@ +--- +# Generated by Ansible — do not edit by hand +# Source: playbooks/templates/agent-config.yaml.j2 +apiVersion: v1alpha1 +kind: AgentConfig +metadata: + name: {{ ocp_cluster_name }} +rendezvousIP: {{ sno_ip }} +hosts: + - hostname: master-0 + interfaces: + - name: primary + macAddress: "{{ sno_mac }}" + networkConfig: + interfaces: + - name: primary + type: ethernet + state: up + mac-address: "{{ sno_mac }}" + ipv4: + enabled: true + address: + - ip: {{ sno_ip }} + prefix-length: {{ sno_prefix_length }} + dhcp: false + dns-resolver: + config: + server: + - {{ sno_nameserver }} + routes: + config: + - destination: 0.0.0.0/0 + next-hop-address: {{ sno_gateway }} + next-hop-interface: primary diff --git a/playbooks/templates/install-config.yaml.j2 b/playbooks/templates/install-config.yaml.j2 new file mode 100644 index 0000000..bb2dd29 --- /dev/null +++ b/playbooks/templates/install-config.yaml.j2 @@ -0,0 +1,27 @@ +--- +# Generated by Ansible — do not edit by hand +# Source: playbooks/templates/install-config.yaml.j2 +apiVersion: v1 +baseDomain: {{ ocp_base_domain }} +metadata: + name: {{ ocp_cluster_name }} +networking: + networkType: OVNKubernetes + machineNetwork: + - cidr: {{ sno_machine_network }} + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + serviceNetwork: + - 172.30.0.0/16 +compute: + - name: worker + replicas: 0 +controlPlane: + name: master + replicas: 1 +platform: + none: {} +pullSecret: | + {{ ocp_pull_secret | ansible.builtin.to_json }} +sshKey: "{{ ocp_ssh_public_key }}" diff --git a/roles/proxmox_sno_vm/defaults/main.yml b/roles/proxmox_sno_vm/defaults/main.yml new file mode 100644 index 0000000..c4bf62a --- /dev/null +++ b/roles/proxmox_sno_vm/defaults/main.yml @@ -0,0 +1,27 @@ +--- +# Proxmox connection +# proxmox_api_host / proxmox_api_port are derived from the 'proxmox_api' +# inventory host (ansible_host / ansible_port). Do not set them here. +proxmox_node: pve1 +proxmox_api_user: ansible@pam +proxmox_api_token_id: ansible +proxmox_api_token_secret: "{{ vault_proxmox_token_secret }}" +proxmox_validate_certs: false + +# Storage +proxmox_storage: local-lvm # VM disk storage pool +proxmox_iso_storage: local # ISO storage pool name (Proxmox) +proxmox_iso_dir: /var/lib/vz/template/iso # Filesystem path on proxmox_host +sno_credentials_dir: "/root/sno-{{ ocp_cluster_name }}" # Persistent credentials on proxmox_host + +# VM specification +sno_vm_name: "sno-{{ ocp_cluster_name }}" +sno_cpu: 8 +sno_memory_mb: 32768 +sno_disk_gb: 120 +sno_bridge: vmbr0 +sno_vlan: 40 +sno_mac: "" # Leave empty for auto-assignment. Set explicitly to pin MAC for static IP. + +# VM ID - leave 0 for auto-assign by Proxmox +sno_vm_id: 0 diff --git a/roles/proxmox_sno_vm/tasks/main.yml b/roles/proxmox_sno_vm/tasks/main.yml new file mode 100644 index 0000000..46ab077 --- /dev/null +++ b/roles/proxmox_sno_vm/tasks/main.yml @@ -0,0 +1,84 @@ +--- +# Create a Proxmox VM for Single Node OpenShift on VLAN40 (192.168.40.0/24). +# Uses q35 machine type with UEFI (required for SNO / RHCOS). +# An empty ide2 CD-ROM slot is created here so the boot order can reference it; +# the deploy_openshift.yml play loads the actual ISO into it after generation. + +- name: Build net0 string + ansible.builtin.set_fact: + # Proxmox net format: model[=macaddr],bridge=[,tag=] + _sno_net0: >- + virtio{{ + '=' + sno_mac if sno_mac | length > 0 else '' + }},bridge={{ sno_bridge }},tag={{ sno_vlan }} + +- name: Create SNO VM in Proxmox + community.proxmox.proxmox_kvm: + api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}" + api_user: "{{ proxmox_api_user }}" + api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}" + api_token_id: "{{ proxmox_api_token_id }}" + api_token_secret: "{{ proxmox_api_token_secret }}" + validate_certs: "{{ proxmox_validate_certs }}" + node: "{{ proxmox_node }}" + vmid: "{{ sno_vm_id | default(omit, true) }}" + name: "{{ sno_vm_name }}" + cores: "{{ sno_cpu }}" + memory: "{{ sno_memory_mb }}" + cpu: host + numa_enabled: true + machine: q35 + bios: ovmf + efidisk0: + storage: "{{ proxmox_storage }}" + format: raw + efitype: 4m + pre_enrolled_keys: false + scsi: + scsi0: "{{ proxmox_storage }}:{{ sno_disk_gb }},format=raw,iothread=1,cache=writeback" + scsihw: virtio-scsi-single + ide: + ide2: none,media=cdrom + net: + net0: "{{ _sno_net0 }}" + boot: "order=scsi0;ide2" + onboot: true + state: present + register: proxmox_vm_result + +- name: Retrieve VM info + community.proxmox.proxmox_vm_info: + api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}" + api_user: "{{ proxmox_api_user }}" + api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}" + api_token_id: "{{ proxmox_api_token_id }}" + api_token_secret: "{{ proxmox_api_token_secret }}" + validate_certs: "{{ proxmox_validate_certs }}" + node: "{{ proxmox_node }}" + name: "{{ sno_vm_name }}" + type: qemu + config: current + register: proxmox_vm_info + retries: 5 + +- name: Set VM ID fact for subsequent plays + ansible.builtin.set_fact: + sno_vm_id: "{{ proxmox_vm_info.proxmox_vms[0].vmid }}" + cacheable: true + +- name: Extract MAC address from VM config + ansible.builtin.set_fact: + # net0 format: virtio=52:54:00:xx:xx:xx,bridge=vmbr0,tag=40 + sno_mac: >- + {{ proxmox_vm_info.proxmox_vms[0].config.net0 + | regex_search('([0-9A-Fa-f]{2}(?::[0-9A-Fa-f]{2}){5})', '\1') + | first }} + cacheable: true + when: sno_mac | length == 0 + +- name: Display VM details + ansible.builtin.debug: + msg: + - "VM Name : {{ sno_vm_name }}" + - "VM ID : {{ sno_vm_id }}" + - "MAC : {{ sno_mac }}" diff --git a/router-ca.crt b/router-ca.crt new file mode 100644 index 0000000..e69de29