Compare commits

...

5 Commits

Author SHA1 Message Date
d11167b345 Basic Openshift deploy mostly working 2026-02-23 13:18:53 -05:00
7a7c57d0bc WIP 2026-02-13 15:55:41 -05:00
7e75fa0199 Remove useless role. Basic setup for OpnSense 2025-05-22 14:34:15 -04:00
358f6b0067 fix 2024-10-03 11:31:49 -04:00
e13023b221 Fix typo 2024-10-03 11:26:22 -04:00
32 changed files with 968 additions and 959 deletions

38
.ansible-lint Normal file
View File

@@ -0,0 +1,38 @@
---
profile: basic
# Paths to exclude from linting
exclude_paths:
- .ansible/
- collections/ansible_collections/
- roles/geerlingguy.java/
- roles/oatakan.rhel_ovirt_template/
- roles/oatakan.rhel_template_build/
- roles/oatakan.windows_template_build/
- roles/oatakan.windows_update/
- roles/oatakan.windows_virtio/
- roles/ikke_t.container_image_cleanup/
- roles/ikke_t.podman_container_systemd/
- roles/sage905.mineos/
- roles/sage905.waterfall/
# Warn rather than fail on these during initial adoption
warn_list:
- yaml[line-length]
- name[casing]
- fqcn[action-core]
- no-changed-when
# Rules to skip entirely during initial adoption
skip_list:
- role-name # toal-common doesn't follow FQCN yet
# Use progressive mode: only flag new violations on changed files
# (useful for gradual adoption in existing projects)
# progressive: true
mock_modules:
- community.proxmox.proxmox_kvm
- community.proxmox.proxmox_vm_info
mock_roles: []

7
.claude/settings.json Normal file
View File

@@ -0,0 +1,7 @@
{
"permissions": {
"allow": [
"Bash(du:*)"
]
}
}

View File

@@ -0,0 +1,24 @@
{
"name": "ansible-dev-container-codespaces",
"image": "ghcr.io/ansible/community-ansible-dev-tools:latest",
"containerUser": "root",
"runArgs": [
"--security-opt",
"seccomp=unconfined",
"--security-opt",
"label=disable",
"--cap-add=SYS_ADMIN",
"--cap-add=SYS_RESOURCE",
"--device",
"/dev/fuse",
"--security-opt",
"apparmor=unconfined",
"--hostname=ansible-dev-container"
],
"updateRemoteUserUID": true,
"customizations": {
"vscode": {
"extensions": ["redhat.ansible","redhat.vscode-redhat-account"]
}
}
}

View File

@@ -0,0 +1,24 @@
{
"name": "ansible-dev-container-docker",
"image": "ghcr.io/ansible/community-ansible-dev-tools:latest",
"containerUser": "root",
"runArgs": [
"--security-opt",
"seccomp=unconfined",
"--security-opt",
"label=disable",
"--cap-add=SYS_ADMIN",
"--cap-add=SYS_RESOURCE",
"--device",
"/dev/fuse",
"--security-opt",
"apparmor=unconfined",
"--hostname=ansible-dev-container"
],
"updateRemoteUserUID": true,
"customizations": {
"vscode": {
"extensions": ["redhat.ansible","redhat.vscode-redhat-account"]
}
}
}

View File

@@ -0,0 +1,38 @@
{
"name": "ansible-dev-container-podman",
"image": "ghcr.io/ansible/community-ansible-dev-tools:latest",
"containerUser": "root",
"containerEnv": {
"REGISTRY_AUTH_FILE": "/container-auth.json"
},
"runArgs": [
"--cap-add=CAP_MKNOD",
"--cap-add=NET_ADMIN",
"--cap-add=SYS_ADMIN",
"--cap-add=SYS_RESOURCE",
"--device",
"/dev/fuse",
"--security-opt",
"seccomp=unconfined",
"--security-opt",
"label=disable",
"--security-opt",
"apparmor=unconfined",
"--security-opt",
"unmask=/sys/fs/cgroup",
"--userns=host",
"--hostname=ansible-dev-container",
"--env-file",
".env"
],
"customizations": {
"vscode": {
"extensions": ["redhat.ansible","redhat.vscode-redhat-account"]
}
},
"mounts": [
"source=${localEnv:XDG_RUNTIME_DIR}/containers/auth.json,target=/container-auth.json,type=bind,consistency=cached",
"source=${localEnv:HOME}/Dev/inventories/toallab-inventory,target=/workspaces/inventory,type=bind,consistency=cached",
"source=${localEnv:HOME}/Dev/ansible_collections/,target=/workspaces/collections/,type=bind,consistency=cached",
]
}

8
.gitignore vendored
View File

@@ -107,10 +107,18 @@ venv.bak/
# Ansible
*.retry
ansible-navigator.log
.ansible/
# Vendor roles (install via roles/requirements.yml)
roles/geerlingguy.*
roles/oatakan.*
roles/ikke_t.*
roles/sage905.*
.vscode/
keys/
collections/ansible_collections/
.vaultpw
context/
ansible-navigator.yml

View File

@@ -3,3 +3,26 @@ repos:
rev: v8.18.2
hooks:
- id: gitleaks
- repo: https://github.com/adrienverge/yamllint
rev: v1.35.1
hooks:
- id: yamllint
args: [--config-file, .yamllint]
exclude: |
(?x)^(
roles/geerlingguy\..*/|
roles/oatakan\..*/|
roles/ikke_t\..*/|
roles/sage905\..*/|
\.ansible/|
collections/ansible_collections/
)
- repo: https://github.com/ansible/ansible-lint
rev: v25.1.3
hooks:
- id: ansible-lint
# ansible-lint reads .ansible-lint for configuration
additional_dependencies:
- ansible-core>=2.15

39
.yamllint Normal file
View File

@@ -0,0 +1,39 @@
---
extends: default
rules:
# Allow longer lines for readability in tasks
line-length:
max: 160
level: warning
# Allow both true/false and yes/no boolean styles
truthy:
allowed-values: ['true', 'false', 'yes', 'no']
check-keys: false
# Ansible uses double-bracket Jinja2 - allow in strings
braces:
min-spaces-inside: 0
max-spaces-inside: 1
# Allow some indentation flexibility for Ansible block style
indentation:
spaces: 2
indent-sequences: true
check-multi-line-strings: false
# Comments should have a space after #
comments:
min-spaces-from-content: 1
# Don't require document-start marker on every file
document-start: disable
ignore: |
roles/geerlingguy.*
roles/oatakan.*
roles/ikke_t.*
roles/sage905.*
.ansible/
collections/ansible_collections/

44
ansible.cfg Normal file
View File

@@ -0,0 +1,44 @@
[defaults]
# Inventory - override with -i or ANSIBLE_INVENTORY env var
inventory = /home/ptoal/Dev/inventories/toallab-inventory
# Role and collection paths
roles_path = roles
collections_path = ./collections:/workspaces/collections:~/.ansible/collections:/usr/share/ansible/collections
# Interpreter discovery
interpreter_python = auto_silent
# Performance
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp/ansible_fact_cache
fact_caching_timeout = 3600
# Output
stdout_callback = yaml
bin_ansible_callbacks = True
callbacks_enabled = profile_tasks
# SSH settings
host_key_checking = False
timeout = 30
# Vault
vault_password_file = vault-id-from-op-client.sh
# Misc
retry_files_enabled = False
nocows = True
[inventory]
# Enable inventory plugins
enable_plugins = host_list, yaml, ini, auto, toml
[privilege_escalation]
become = False
become_method = sudo
[ssh_connection]
pipelining = True
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no

View File

@@ -0,0 +1,10 @@
---
collections:
- name: davidban77.gns3
- name: netbox.netbox
- name: freeipa.ansible_freeipa
- name: redhat.satellite
- name: community.general
- name: redhat.satellite
- name: community.crypto
- name: onepassword.connect

View File

@@ -1,10 +1,14 @@
---
collections:
- name: davidban77.gns3
- name: community.general
- name: community.proxmox
- name: community.crypto
- name: netbox.netbox
- name: freeipa.ansible_freeipa
- name: redhat.satellite
- name: community.general
- name: redhat.satellite
- name: community.crypto
- name: onepassword.connect
- name: davidban77.gns3
- name: oxlorg.opnsense
source: https://github.com/O-X-L/ansible-opnsense
type: git
version: latest

View File

@@ -1,718 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dnsmadeeasy
version_added: "1.3"
short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
description:
- >
Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
options:
account_key:
description:
- Account API Key.
required: true
account_secret:
description:
- Account Secret Key.
required: true
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
resolution
required: true
sandbox:
description:
- Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used.
type: bool
default: 'no'
version_added: 2.7
record_name:
description:
- Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
of the state argument.
record_type:
description:
- Record type.
choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
record_value:
description:
- >
Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>,
SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
- >
If record_value is not specified; no changes will be made and the record will be returned in 'result'
(in other words, this module can be used to fetch a record's current id, type, and ttl)
record_ttl:
description:
- record's "Time to live". Number of seconds the record remains cached in DNS servers.
default: 1800
state:
description:
- whether the record should exist or not
required: true
choices: [ 'present', 'absent' ]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
version_added: 1.5.1
monitor:
description:
- If C(yes), add or change the monitor. This is applicable only for A records.
type: bool
default: 'no'
version_added: 2.4
systemDescription:
description:
- Description used by the monitor.
required: true
default: ''
version_added: 2.4
maxEmails:
description:
- Number of emails sent to the contact list by the monitor.
required: true
default: 1
version_added: 2.4
protocol:
description:
- Protocol used by the monitor.
required: true
default: 'HTTP'
choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']
version_added: 2.4
port:
description:
- Port used by the monitor.
required: true
default: 80
version_added: 2.4
sensitivity:
description:
- Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3.
required: true
default: 'Medium'
choices: ['Low', 'Medium', 'High']
version_added: 2.4
contactList:
description:
- Name or id of the contact list that the monitor will notify.
- The default C('') means the Account Owner.
required: true
default: ''
version_added: 2.4
httpFqdn:
description:
- The fully qualified domain name used by the monitor.
version_added: 2.4
httpFile:
description:
- The file at the Fqdn that the monitor queries for HTTP or HTTPS.
version_added: 2.4
httpQueryString:
description:
- The string in the httpFile that the monitor queries for HTTP or HTTPS.
version_added: 2.4
failover:
description:
- If C(yes), add or change the failover. This is applicable only for A records.
type: bool
default: 'no'
version_added: 2.4
autoFailover:
description:
- If true, fallback to the primary IP address is manual after a failover.
- If false, fallback to the primary IP address is automatic after a failover.
type: bool
default: 'no'
version_added: 2.4
ip1:
description:
- Primary IP address for the failover.
- Required if adding or changing the monitor or failover.
version_added: 2.4
ip2:
description:
- Secondary IP address for the failover.
- Required if adding or changing the failover.
version_added: 2.4
ip3:
description:
- Tertiary IP address for the failover.
version_added: 2.4
ip4:
description:
- Quaternary IP address for the failover.
version_added: 2.4
ip5:
description:
- Quinary IP address for the failover.
version_added: 2.4
notes:
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
seconds of actual time by using NTP.
- This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
These values can be be registered and used in your playbooks.
- Only A records can have a monitor or failover.
- To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
- To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
- The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
requirements: [ hashlib, hmac ]
author: "Brice Burgess (@briceburg)"
'''
EXAMPLES = '''
# fetch my.com domain records
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
register: response
# create / ensure the presence of a record
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
# update the previously created record
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_value: 192.0.2.23
# fetch a specific record
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
register: response
# delete a record / ensure it is absent
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
record_type: A
state: absent
record_name: test
# Add a failover
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: True
ip1: 127.0.0.2
ip2: 127.0.0.3
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: True
ip1: 127.0.0.2
ip2: 127.0.0.3
ip3: 127.0.0.4
ip4: 127.0.0.5
ip5: 127.0.0.6
# Add a monitor
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: yes
ip1: 127.0.0.2
protocol: HTTP # default
port: 80 # default
maxEmails: 1
systemDescription: Monitor Test A record
contactList: my contact list
# Add a monitor with http options
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: yes
ip1: 127.0.0.2
protocol: HTTP # default
port: 80 # default
maxEmails: 1
systemDescription: Monitor Test A record
contactList: 1174 # contact list id
httpFqdn: http://my.com
httpFile: example
httpQueryString: some string
# Add a monitor and a failover
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: True
ip1: 127.0.0.2
ip2: 127.0.0.3
monitor: yes
protocol: HTTPS
port: 443
maxEmails: 1
systemDescription: monitoring my.com status
contactList: emergencycontacts
# Remove a failover
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: no
# Remove a monitor
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: no
'''
# ============================================
# DNSMadeEasy module specific support methods.
#
import json
import hashlib
import hmac
from time import strftime, gmtime
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.six import string_types
class DME2(object):
def __init__(self, apikey, secret, domain, sandbox, module):
self.module = module
self.api = apikey
self.secret = secret
if sandbox:
self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/'
self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl)
else:
self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
self.domain = str(domain)
self.domain_map = None # ["domain_name"] => ID
self.record_map = None # ["record_name"] => ID
self.records = None # ["record_ID"] => <record>
self.all_records = None
self.contactList_map = None # ["contactList_name"] => ID
# Lookup the domain ID if passed as a domain name vs. ID
if not self.domain.isdigit():
self.domain = self.getDomainByName(self.domain)['id']
self.record_url = 'dns/managed/' + str(self.domain) + '/records'
self.monitor_url = 'monitor'
self.contactList_url = 'contactList'
def _headers(self):
currTime = self._get_date()
hashstring = self._create_hash(currTime)
headers = {'x-dnsme-apiKey': self.api,
'x-dnsme-hmac': hashstring,
'x-dnsme-requestDate': currTime,
'content-type': 'application/json'}
return headers
def _get_date(self):
return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
def _create_hash(self, rightnow):
return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
def query(self, resource, method, data=None):
url = self.baseurl + resource
if data and not isinstance(data, string_types):
data = urlencode(data)
response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
if info['status'] not in (200, 201, 204):
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return json.load(response)
except Exception:
return {}
def getDomain(self, domain_id):
if not self.domain_map:
self._instMap('domain')
return self.domains.get(domain_id, False)
def getDomainByName(self, domain_name):
if not self.domain_map:
self._instMap('domain')
return self.getDomain(self.domain_map.get(domain_name, 0))
def getDomains(self):
return self.query('dns/managed', 'GET')['data']
def getRecord(self, record_id):
if not self.record_map:
self._instMap('record')
return self.records.get(record_id, False)
# Try to find a single record matching this one.
# How we do this depends on the type of record. For instance, there
# can be several MX records for a single record_name while there can
# only be a single CNAME for a particular record_name. Note also that
# there can be several records with different types for a single name.
def getMatchingRecord(self, record_name, record_type, record_value):
# Get all the records if not already cached
if not self.all_records:
self.all_records = self.getRecords()
if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]:
for result in self.all_records:
if result['name'] == record_name and result['type'] == record_type:
return result
return False
elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]:
for result in self.all_records:
if record_type == "MX":
value = record_value.split(" ")[1]
elif record_type == "SRV":
value = record_value.split(" ")[3]
else:
value = record_value
if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
return result
return False
else:
raise Exception('record_type not yet supported')
def getRecords(self):
return self.query(self.record_url, 'GET')['data']
def _instMap(self, type):
# @TODO cache this call so it's executed only once per ansible execution
map = {}
results = {}
# iterate over e.g. self.getDomains() || self.getRecords()
for result in getattr(self, 'get' + type.title() + 's')():
map[result['name']] = result['id']
results[result['id']] = result
# e.g. self.domain_map || self.record_map
setattr(self, type + '_map', map)
setattr(self, type + 's', results) # e.g. self.domains || self.records
def prepareRecord(self, data):
return json.dumps(data, separators=(',', ':'))
def createRecord(self, data):
# @TODO update the cache w/ resultant record + id when impleneted
return self.query(self.record_url, 'POST', data)
def updateRecord(self, record_id, data):
# @TODO update the cache w/ resultant record + id when impleneted
return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
def deleteRecord(self, record_id):
# @TODO remove record from the cache when impleneted
return self.query(self.record_url + '/' + str(record_id), 'DELETE')
def getMonitor(self, record_id):
return self.query(self.monitor_url + '/' + str(record_id), 'GET')
def updateMonitor(self, record_id, data):
return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data)
def prepareMonitor(self, data):
return json.dumps(data, separators=(',', ':'))
def getContactList(self, contact_list_id):
if not self.contactList_map:
self._instMap('contactList')
return self.contactLists.get(contact_list_id, False)
def getContactlists(self):
return self.query(self.contactList_url, 'GET')['data']
def getContactListByName(self, name):
if not self.contactList_map:
self._instMap('contactList')
return self.getContactList(self.contactList_map.get(name, 0))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_key=dict(required=True),
account_secret=dict(required=True, no_log=True),
domain=dict(required=True),
sandbox=dict(default='no', type='bool'),
state=dict(required=True, choices=['present', 'absent']),
record_name=dict(required=False),
record_type=dict(required=False, choices=[
'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
record_value=dict(required=False),
record_ttl=dict(required=False, default=1800, type='int'),
monitor=dict(default='no', type='bool'),
systemDescription=dict(default=''),
maxEmails=dict(default=1, type='int'),
protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
port=dict(default=80, type='int'),
sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
contactList=dict(default=None),
httpFqdn=dict(required=False),
httpFile=dict(required=False),
httpQueryString=dict(required=False),
failover=dict(default='no', type='bool'),
autoFailover=dict(default='no', type='bool'),
ip1=dict(required=False),
ip2=dict(required=False),
ip3=dict(required=False),
ip4=dict(required=False),
ip5=dict(required=False),
validate_certs=dict(default='yes', type='bool'),
),
required_together=[
['record_value', 'record_ttl', 'record_type']
],
required_if=[
['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']],
['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']]
]
)
protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6)
sensitivities = dict(Low=8, Medium=5, High=3)
DME = DME2(module.params["account_key"], module.params[
"account_secret"], module.params["domain"], module.params["sandbox"], module)
state = module.params["state"]
record_name = module.params["record_name"]
record_type = module.params["record_type"]
record_value = module.params["record_value"]
# Follow Keyword Controlled Behavior
if record_name is None:
domain_records = DME.getRecords()
if not domain_records:
module.fail_json(
msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
module.exit_json(changed=False, result=domain_records)
# Fetch existing record + Build new one
current_record = DME.getMatchingRecord(record_name, record_type, record_value)
new_record = {'name': record_name}
for i in ["record_value", "record_type", "record_ttl"]:
if not module.params[i] is None:
new_record[i[len("record_"):]] = module.params[i]
# Special handling for mx record
if new_record["type"] == "MX":
new_record["mxLevel"] = new_record["value"].split(" ")[0]
new_record["value"] = new_record["value"].split(" ")[1]
# Special handling for SRV records
if new_record["type"] == "SRV":
new_record["priority"] = new_record["value"].split(" ")[0]
new_record["weight"] = new_record["value"].split(" ")[1]
new_record["port"] = new_record["value"].split(" ")[2]
new_record["value"] = new_record["value"].split(" ")[3]
# Fetch existing monitor if the A record indicates it should exist and build the new monitor
current_monitor = dict()
new_monitor = dict()
if current_record and current_record['type'] == 'A':
current_monitor = DME.getMonitor(current_record['id'])
# Build the new monitor
for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails',
'contactList', 'httpFqdn', 'httpFile', 'httpQueryString',
'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']:
if module.params[i] is not None:
if i == 'protocol':
# The API requires protocol to be a numeric in the range 1-6
new_monitor['protocolId'] = protocols[module.params[i]]
elif i == 'sensitivity':
# The API requires sensitivity to be a numeric of 8, 5, or 3
new_monitor[i] = sensitivities[module.params[i]]
elif i == 'contactList':
# The module accepts either the name or the id of the contact list
contact_list_id = module.params[i]
if not contact_list_id.isdigit() and contact_list_id != '':
contact_list = DME.getContactListByName(contact_list_id)
if not contact_list:
module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id))
contact_list_id = contact_list.get('id', '')
new_monitor['contactListId'] = contact_list_id
else:
# The module option names match the API field names
new_monitor[i] = module.params[i]
# Compare new record against existing one
record_changed = False
if current_record:
for i in new_record:
if str(current_record[i]) != str(new_record[i]):
record_changed = True
new_record['id'] = str(current_record['id'])
monitor_changed = False
if current_monitor:
for i in new_monitor:
if str(current_monitor.get(i)) != str(new_monitor[i]):
monitor_changed = True
# Follow Keyword Controlled Behavior
if state == 'present':
# return the record if no value is specified
if "value" not in new_record:
if not current_record:
module.fail_json(
msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
# create record and monitor as the record does not exist
if not current_record:
record = DME.createRecord(DME.prepareRecord(new_record))
if module.params['monitor']:
monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor))
module.exit_json(changed=True, result=dict(record=record, monitor=monitor))
else:
module.exit_json(changed=True, result=dict(record=record))
# update the record
updated = False
if record_changed:
DME.updateRecord(current_record['id'], DME.prepareRecord(new_record))
updated = True
if monitor_changed:
DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor))
updated = True
if updated:
module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor))
# return the record (no changes)
module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
elif state == 'absent':
changed = False
# delete the record (and the monitor/failover) if it exists
if current_record:
DME.deleteRecord(current_record['id'])
module.exit_json(changed=True)
# record does not exist, return w/o change.
module.exit_json(changed=changed)
else:
module.fail_json(
msg="'%s' is an unknown value for the state argument" % state)
if __name__ == '__main__':
main()

View File

@@ -1,59 +0,0 @@
---
- name: VM Provisioning
hosts: tag_ansible:&tag_tower
connection: local
collections:
- redhat.rhv
tasks:
- block:
- name: Obtain SSO token from username / password credentials
ovirt_auth:
url: "{{ ovirt_url }}"
username: "{{ ovirt_username }}"
password: "{{ ovirt_password }}"
- name: Disks Created
ovirt_disk:
auth: "{{ ovirt_auth }}"
description: "Boot Disk for {{ inventory_hostname }}"
interface: virtio
size: 120GiB
storage_domain: nas_iscsi
bootable: True
wait: true
name: "{{ inventory_hostname }}_disk0"
state: present
- name: VM Created
ovirt_vm:
- name: Add NIC to VM
ovirt_nic:
state: present
vm:
name: mynic
interface: e1000
mac_address: 00:1a:4a:16:01:56
profile: ovirtmgmt
network: ovirtmgmt
- name: Plug NIC to VM
redhat.rhv.ovirt_nic:
state: plugged
vm: myvm
name: mynic
always:
- name: Always revoke the SSO token
ovirt_auth:
state: absent
ovirt_auth: "{{ ovirt_auth }}"
# - name: VM Configuration
# - name: Automation Platform Installer
# - name:

View File

@@ -1,12 +0,0 @@
- name: Create an ovirt windows template
hosts: windows_template_base
gather_facts: false
connection: local
become: false
vars:
ansible_python_interpreter: "{{ ansible_playbook_python }}"
roles:
- oatakan.windows_ovirt_template

View File

@@ -0,0 +1,31 @@
---
- name: Create Gitea Server
hosts: gitea
gather_facts: false
vars:
dnsmadeeasy_hostname: "{{ service_dns_name.split('.') | first }}"
dnsmadeeasy_domain: "{{ service_dns_name.split('.',1) |last }}"
dnsmadeeasy_record_type: CNAME
dnsmadeeasy_record_value: gate.toal.ca.
dnsmadeeasy_record_ttl: 600
opnsense_service_hostname: "{{ dnsmadeeasy_hostname }}"
opnsense_service_domain: "{{ dnsmadeeasy_domain }}"
tasks:
- name: Configure DNS
ansible.builtin.import_role:
name: toallab.infra.dnsmadeeasy
tasks_from: provision.yml
- name: Configure Service
ansible.builtin.import_role:
name: toallab.infra.opnsense_service
tasks_from: provision.yml
module_defaults:
group/oxlorg.opnsense.all:
firewall: "{{ opnsense_host }}"
api_key: "{{ opnsense_api_key }}"
api_secret: "{{ opnsense_api_secret }}"
ssl_verify: "{{ opnsense_ssl_verify }}"
api_port: "{{ opnsense_api_port|default(omit) }}"

View File

@@ -0,0 +1,405 @@
---
# Deploy Single Node OpenShift (SNO) on Proxmox
#
# Prerequisites:
# ansible-galaxy collection install -r collections/requirements.yml
# openshift-install is downloaded automatically during the sno play
#
# Inventory requirements:
# sno.openshift.toal.ca - in 'openshift' group
# host_vars: ocp_cluster_name, ocp_base_domain, ocp_version, sno_ip,
# sno_gateway, sno_nameserver, sno_prefix_length, sno_vm_name,
# sno_bridge, sno_vlan, proxmox_node, ...
# secrets: vault_ocp_pull_secret (Red Hat pull secret JSON string)
# proxmox_api - inventory host (ansible_host: proxmox.lab.toal.ca, ansible_port: 443)
# Used as api_host / api_port source for community.proxmox modules
# proxmox_host - inventory host (ansible_host: pve1.lab.toal.ca, ansible_connection: ssh)
# delegate_to target for qm and file operations
# gate.toal.ca - in 'opnsense' group
# host_vars: opnsense_host, opnsense_api_key, opnsense_api_secret,
# opnsense_api_port, haproxy_public_ip
# group_vars/all: dme_account_key, dme_account_secret
#
# Play order (intentional — DNS must precede VM boot):
# Play 1: proxmox — Create SNO VM
# Play 2: opnsense — Configure OPNsense local DNS overrides (api/api-int/apps)
# Play 3: dns — Configure public DNS records in DNS Made Easy
# Play 4: sno — Generate ISO, boot VM, wait for install
#
# Usage:
# ansible-playbook playbooks/deploy_openshift.yml
# ansible-playbook playbooks/deploy_openshift.yml --tags proxmox
# ansible-playbook playbooks/deploy_openshift.yml --tags sno
# ansible-playbook playbooks/deploy_openshift.yml --tags dns,opnsense
# ansible-playbook playbooks/deploy_openshift.yml --tags opnsense,sno
# ---------------------------------------------------------------------------
# Play 1: Create SNO VM in Proxmox
# ---------------------------------------------------------------------------
- name: Create SNO VM in Proxmox
hosts: sno.openshift.toal.ca
gather_facts: false
connection: local
roles:
- role: proxmox_sno_vm
tags: proxmox
# ---------------------------------------------------------------------------
# Play 2: Configure OPNsense - Local DNS Overrides
# Must run BEFORE booting the VM so that api.openshift.toal.ca resolves
# from within the SNO node during bootstrap.
# ---------------------------------------------------------------------------
- name: Configure OPNsense DNS overrides for OpenShift
hosts: gate.toal.ca
gather_facts: false
connection: local
module_defaults:
group/oxlorg.opnsense.all:
firewall: "{{ opnsense_host }}"
api_key: "{{ opnsense_api_key }}"
api_secret: "{{ opnsense_api_secret }}"
ssl_verify: "{{ opnsense_ssl_verify | default(false) }}"
api_port: "{{ opnsense_api_port | default(omit) }}"
tags: opnsense
tasks:
- name: Add Unbound host override for OCP API
oxlorg.opnsense.unbound_host:
hostname: "api.{{ ocp_cluster_name }}"
domain: "{{ ocp_base_domain }}"
value: "{{ sno_ip }}"
match_fields:
- hostname
- domain
state: present
delegate_to: localhost
vars:
ocp_cluster_name: "{{ hostvars['sno.openshift.toal.ca']['ocp_cluster_name'] }}"
ocp_base_domain: "{{ hostvars['sno.openshift.toal.ca']['ocp_base_domain'] }}"
sno_ip: "{{ hostvars['sno.openshift.toal.ca']['sno_ip'] }}"
- name: Add Unbound host override for OCP API internal
oxlorg.opnsense.unbound_host:
hostname: "api-int.{{ ocp_cluster_name }}"
domain: "{{ ocp_base_domain }}"
value: "{{ sno_ip }}"
match_fields:
- hostname
- domain
state: present
delegate_to: localhost
vars:
ocp_cluster_name: "{{ hostvars['sno.openshift.toal.ca']['ocp_cluster_name'] }}"
ocp_base_domain: "{{ hostvars['sno.openshift.toal.ca']['ocp_base_domain'] }}"
sno_ip: "{{ hostvars['sno.openshift.toal.ca']['sno_ip'] }}"
- name: Forward apps wildcard domain to SNO ingress
oxlorg.opnsense.unbound_forward:
domain: "apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}"
target: "{{ sno_ip }}"
state: present
delegate_to: localhost
vars:
ocp_cluster_name: "{{ hostvars['sno.openshift.toal.ca']['ocp_cluster_name'] }}"
ocp_base_domain: "{{ hostvars['sno.openshift.toal.ca']['ocp_base_domain'] }}"
sno_ip: "{{ hostvars['sno.openshift.toal.ca']['sno_ip'] }}"
# ---------------------------------------------------------------------------
# Play 3: Configure Public DNS Records in DNS Made Easy
# ---------------------------------------------------------------------------
- name: Configure public DNS records for OpenShift
hosts: sno.openshift.toal.ca
gather_facts: false
connection: local
tags: dns
tasks:
- name: Create A record for OpenShift API endpoint
community.general.dnsmadeeasy:
account_key: "{{ dme_account_key }}"
account_secret: "{{ dme_account_secret }}"
domain: "{{ ocp_base_domain }}"
record_name: "api.{{ ocp_cluster_name }}"
record_type: A
record_value: "{{ hostvars['gate.toal.ca']['haproxy_public_ip'] }}"
record_ttl: "{{ ocp_dns_ttl }}"
port: 443
protocol: HTTPS
state: present
- name: Create A record for OpenShift apps wildcard
community.general.dnsmadeeasy:
account_key: "{{ dme_account_key }}"
account_secret: "{{ dme_account_secret }}"
domain: "{{ ocp_base_domain }}"
record_name: "*.apps.{{ ocp_cluster_name }}"
record_type: A
record_value: "{{ hostvars['gate.toal.ca']['haproxy_public_ip'] }}"
record_ttl: "{{ ocp_dns_ttl }}"
port: 443
protocol: HTTPS
state: present
# ---------------------------------------------------------------------------
# Play 4: Generate Agent ISO and deploy SNO (agent-based installer)
#
# Uses `openshift-install agent create image` — no SaaS API, no SSO required.
# The pull secret is the only Red Hat credential needed.
# Credentials (kubeconfig, kubeadmin-password) are generated locally under
# sno_install_dir/auth/ by openshift-install itself.
# ---------------------------------------------------------------------------
- name: Generate Agent ISO and Deploy SNO
hosts: sno.openshift.toal.ca
gather_facts: false
connection: local
vars:
ocp_pull_secret: "{{ vault_ocp_pull_secret }}"
tags: sno
tasks:
# ------------------------------------------------------------------
# Step 0: Ensure sno_vm_id and sno_mac are populated.
# These are set as cacheable facts by the proxmox_sno_vm role, but
# in ephemeral EEs or when running --tags sno alone the cache is
# empty. Re-query Proxmox whenever either value is missing.
# ------------------------------------------------------------------
- name: Retrieve VM info from Proxmox (needed when fact cache is empty)
community.proxmox.proxmox_vm_info:
api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}"
api_user: "{{ proxmox_api_user }}"
api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token_secret }}"
validate_certs: "{{ proxmox_validate_certs }}"
node: "{{ proxmox_node }}"
name: "{{ sno_vm_name }}"
type: qemu
config: current
register: _sno_vm_info
when: (sno_vm_id | default('')) == '' or (sno_mac | default('')) == ''
- name: Set sno_vm_id and sno_mac from live Proxmox query
ansible.builtin.set_fact:
sno_vm_id: "{{ _sno_vm_info.proxmox_vms[0].vmid }}"
sno_mac: >-
{{ _sno_vm_info.proxmox_vms[0].config.net0
| regex_search('([0-9A-Fa-f]{2}(?::[0-9A-Fa-f]{2}){5})', '\1')
| first }}
cacheable: true
when: _sno_vm_info is not skipped
- name: Ensure local install directories exist
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: "0750"
loop:
- "{{ sno_install_dir }}"
- "{{ sno_install_dir }}/auth"
# ------------------------------------------------------------------
# Step 1: Check whether a fresh ISO already exists on Proxmox
# AND the local openshift-install state dir is intact.
# If the state dir is missing (e.g. /tmp was cleared),
# we must regenerate the ISO so wait-for has valid state.
# ------------------------------------------------------------------
- name: Check if ISO already exists on Proxmox and is less than 24 hours old
ansible.builtin.stat:
path: "{{ proxmox_iso_dir }}/{{ sno_iso_filename }}"
get_checksum: false
delegate_to: proxmox_host
register: proxmox_iso_stat
- name: Check if local openshift-install state directory exists
ansible.builtin.stat:
path: "{{ sno_install_dir }}/.openshift_install_state"
get_checksum: false
register: install_state_stat
- name: Set fact - skip ISO build if recent ISO exists on Proxmox and local state is intact
ansible.builtin.set_fact:
sno_iso_fresh: >-
{{
proxmox_iso_stat.stat.exists and
(now(utc=true).timestamp() | int - proxmox_iso_stat.stat.mtime | int) < 86400 and
install_state_stat.stat.exists
}}
# ------------------------------------------------------------------
# Step 2: Get openshift-install binary
# Always ensure the binary is present — needed for both ISO generation
# and wait-for-install-complete regardless of sno_iso_fresh.
# Binaries are stored in sno_install_dir so they survive across runs
# when sno_install_dir is a mounted volume in an EE.
# ------------------------------------------------------------------
- name: Download openshift-install tarball
ansible.builtin.get_url:
url: "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable-{{ ocp_version }}/openshift-install-linux.tar.gz"
dest: "{{ sno_install_dir }}/openshift-install-{{ ocp_version }}.tar.gz"
mode: "0644"
checksum: "{{ ocp_install_checksum | default(omit) }}"
register: ocp_install_tarball
- name: Extract openshift-install binary
ansible.builtin.unarchive:
src: "{{ sno_install_dir }}/openshift-install-{{ ocp_version }}.tar.gz"
dest: "{{ sno_install_dir }}"
remote_src: false
include:
- openshift-install
when: ocp_install_tarball.changed or not (sno_install_dir ~ '/openshift-install') is file
- name: Download openshift-client tarball
ansible.builtin.get_url:
url: "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable-{{ ocp_version }}/openshift-client-linux.tar.gz"
dest: "{{ sno_install_dir }}/openshift-client-{{ ocp_version }}.tar.gz"
mode: "0644"
checksum: "{{ ocp_client_checksum | default(omit) }}"
register: ocp_client_tarball
- name: Extract oc binary
ansible.builtin.unarchive:
src: "{{ sno_install_dir }}/openshift-client-{{ ocp_version }}.tar.gz"
dest: "{{ sno_install_dir }}"
remote_src: false
include:
- oc
when: ocp_client_tarball.changed or not (sno_install_dir ~ '/oc') is file
# ------------------------------------------------------------------
# Step 3: Template agent installer config files (skipped if ISO is fresh)
# ------------------------------------------------------------------
- name: Template install-config.yaml
ansible.builtin.template:
src: templates/install-config.yaml.j2
dest: "{{ sno_install_dir }}/install-config.yaml"
mode: "0640"
when: not sno_iso_fresh
- name: Template agent-config.yaml
ansible.builtin.template:
src: templates/agent-config.yaml.j2
dest: "{{ sno_install_dir }}/agent-config.yaml"
mode: "0640"
when: not sno_iso_fresh
# ------------------------------------------------------------------
# Step 4: Generate discovery ISO (skipped if ISO is fresh)
# Note: openshift-install consumes (moves) the config files into
# openshift-install-state/ — this is expected behaviour.
# ------------------------------------------------------------------
- name: Generate agent-based installer ISO
ansible.builtin.command:
cmd: "{{ sno_install_dir }}/openshift-install agent create image --dir {{ sno_install_dir }}"
when: not sno_iso_fresh
# ------------------------------------------------------------------
# Step 5: Upload ISO to Proxmox and attach to VM
# ------------------------------------------------------------------
- name: Copy discovery ISO to Proxmox ISO storage
ansible.builtin.copy:
src: "{{ sno_install_dir }}/{{ sno_iso_filename }}"
dest: "{{ proxmox_iso_dir }}/{{ sno_iso_filename }}"
mode: "0644"
delegate_to: proxmox_host
when: not sno_iso_fresh
- name: Attach ISO to VM as CDROM
ansible.builtin.command:
cmd: "qm set {{ sno_vm_id }} --ide2 {{ proxmox_iso_storage }}:iso/{{ sno_iso_filename }},media=cdrom"
delegate_to: proxmox_host
changed_when: true
- name: Ensure boot order prefers disk, falls back to CDROM
# order=scsi0;ide2: OVMF tries scsi0 first; on first boot the disk has
# no EFI application so OVMF falls through to ide2 (the agent ISO).
# After RHCOS writes its EFI entry to the disk, subsequent reboots boot
# directly from scsi0 — the CDROM is never tried again, breaking the loop.
ansible.builtin.command:
cmd: "qm set {{ sno_vm_id }} --boot order=scsi0;ide2"
delegate_to: proxmox_host
changed_when: true
# ------------------------------------------------------------------
# Step 6: Boot the VM
# ------------------------------------------------------------------
- name: Start SNO VM
community.proxmox.proxmox_kvm:
api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}"
api_user: "{{ proxmox_api_user }}"
api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token_secret }}"
validate_certs: "{{ proxmox_validate_certs }}"
node: "{{ proxmox_node }}"
name: "{{ sno_vm_name }}"
state: started
# ------------------------------------------------------------------
# Step 7: Persist credentials to Proxmox host
# The EE is ephemeral — copy auth files to a durable location before
# the container exits. sno_credentials_dir defaults to
# /root/sno-<cluster_name> on proxmox_host.
# ------------------------------------------------------------------
- name: Create credentials directory on Proxmox host
ansible.builtin.file:
path: "{{ sno_credentials_dir }}"
state: directory
mode: "0700"
delegate_to: proxmox_host
- name: Copy kubeconfig to Proxmox host
ansible.builtin.copy:
src: "{{ sno_install_dir }}/auth/kubeconfig"
dest: "{{ sno_credentials_dir }}/kubeconfig"
mode: "0600"
delegate_to: proxmox_host
- name: Copy kubeadmin-password to Proxmox host
ansible.builtin.copy:
src: "{{ sno_install_dir }}/auth/kubeadmin-password"
dest: "{{ sno_credentials_dir }}/kubeadmin-password"
mode: "0600"
delegate_to: proxmox_host
# ------------------------------------------------------------------
# Step 8: Wait for installation to complete (~60-90 min)
# Credentials land in sno_install_dir/auth/ automatically.
# Inline poll (poll: 30) is used rather than fire-and-forget async
# because the connection is local — no SSH timeout risk — and the
# poll: 0 + async_status pattern stores job state in ~/.ansible_async
# inside the EE container, which is lost if the EE is restarted.
# Ensure your job/EE timeout is set to at least 6000 s (100 min).
# ------------------------------------------------------------------
- name: Wait for SNO installation to complete
ansible.builtin.command:
cmd: "{{ sno_install_dir }}/openshift-install agent wait-for install-complete --dir {{ sno_install_dir }} --log-level=info"
async: 5400
poll: 30
# ------------------------------------------------------------------
# Step 9: Eject CDROM so the VM never boots the agent ISO again
# ------------------------------------------------------------------
- name: Eject CDROM after successful installation
ansible.builtin.command:
cmd: "qm set {{ sno_vm_id }} --ide2 none,media=cdrom"
delegate_to: proxmox_host
changed_when: true
- name: Display post-install info
ansible.builtin.debug:
msg:
- "SNO installation complete!"
- "API URL : https://api.{{ ocp_cluster_name }}.{{ ocp_base_domain }}:6443"
- "Console : https://console-openshift-console.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}"
- "Kubeconfig : {{ sno_credentials_dir }}/kubeconfig (on proxmox_host)"
- "kubeadmin pass : {{ sno_credentials_dir }}/kubeadmin-password (on proxmox_host)"

View File

@@ -1,32 +1,64 @@
---
- name: Get info on the existing host entries
hosts: localhost
- name: Configure DHCP
hosts: opnsense
gather_facts: false
module_defaults:
group/ansibleguy.opnsense.all:
firewall: '{{ lookup("env","OPNSENSE_HOST") }}'
api_key: '{{ lookup("env","OPNSENSE_API_KEY") }}'
api_secret: '{{ lookup("env","OPNSENSE_API_SECRET") }}'
api_port: 8443
ansibleguy.opnsense.unbound_host:
match_fields: ['description']
ansibleguy.opnsense.list:
target: 'unbound_host'
group/oxlorg.opnsense.all:
firewall: "{{ opnsense_host }}"
api_key: "{{ opnsense_api_key }}"
api_secret: "{{ opnsense_api_secret }}"
ssl_verify: false
api_port: "{{ opnsense_api_port|default(omit) }}"
tasks:
- name: Listing hosts # noqa args[module]
ansibleguy.opnsense.list:
target: 'unbound_host'
register: existing_entries
- name: Install packages
oxlorg.opnsense.package:
name:
- os-acme-client
action: install
delegate_to: localhost
- name: Printing entries
ansible.builtin.debug:
var: existing_entries.data
- name: Setup ACME Client
ansible.builtin.include_role:
name: toallab.infra.opnsense_service
tasks_from: setup.yml
- name: Generate csv from template
ansible.builtin.template:
src: ../templates/hosts.j2
mode: "0644"
dest: "/data/output.csv"
- name: Configure KEA DHCP Server
oxlorg.opnsense.dhcp_general:
enabled: "{{ dhcp_enabled }}"
interfaces: "{{ dhcp_interfaces }}"
delegate_to: localhost
- name: Add subnet
oxlorg.opnsense.dhcp_subnet:
subnet: "{{ item.subnet }}"
pools: "{{ item.pools }}"
auto_options: false
gateway: '{{ item.gateway }}'
dns: '{{ item.dns }}'
domain: '{{ item.domain }}'
reload: false
delegate_to: localhost
loop: "{{ dhcp_subnets }}"
- name: Get all dhcp_reservations_* variables from hostvars
ansible.builtin.set_fact:
all_dhcp_reservations: >-
{{
hostvars[inventory_hostname] | dict2items
| selectattr('key', 'match', '^dhcp_reservations_')
| map(attribute='value')
| flatten
| selectattr('type', 'match', 'static')
}}
- name: Add DHCP Reservations
oxlorg.opnsense.dhcp_reservation:
hostname: "{{ item.hostname }}"
mac: "{{ item.mac }}"
ip: "{{ item.address }}"
subnet: "{{ item.address | ansible.utils.ipsubnet(24) }}"
description: "{{ item.description | default('') }}"
reload: false
delegate_to: localhost
loop: "{{ all_dhcp_reservations }}"

View File

@@ -0,0 +1,34 @@
---
# Generated by Ansible — do not edit by hand
# Source: playbooks/templates/agent-config.yaml.j2
apiVersion: v1alpha1
kind: AgentConfig
metadata:
name: {{ ocp_cluster_name }}
rendezvousIP: {{ sno_ip }}
hosts:
- hostname: master-0
interfaces:
- name: primary
macAddress: "{{ sno_mac }}"
networkConfig:
interfaces:
- name: primary
type: ethernet
state: up
mac-address: "{{ sno_mac }}"
ipv4:
enabled: true
address:
- ip: {{ sno_ip }}
prefix-length: {{ sno_prefix_length }}
dhcp: false
dns-resolver:
config:
server:
- {{ sno_nameserver }}
routes:
config:
- destination: 0.0.0.0/0
next-hop-address: {{ sno_gateway }}
next-hop-interface: primary

View File

@@ -0,0 +1,27 @@
---
# Generated by Ansible — do not edit by hand
# Source: playbooks/templates/install-config.yaml.j2
apiVersion: v1
baseDomain: {{ ocp_base_domain }}
metadata:
name: {{ ocp_cluster_name }}
networking:
networkType: OVNKubernetes
machineNetwork:
- cidr: {{ sno_machine_network }}
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
serviceNetwork:
- 172.30.0.0/16
compute:
- name: worker
replicas: 0
controlPlane:
name: master
replicas: 1
platform:
none: {}
pullSecret: |
{{ ocp_pull_secret | ansible.builtin.to_json }}
sshKey: "{{ ocp_ssh_public_key }}"

View File

@@ -0,0 +1,27 @@
---
# Proxmox connection
# proxmox_api_host / proxmox_api_port are derived from the 'proxmox_api'
# inventory host (ansible_host / ansible_port). Do not set them here.
proxmox_node: pve1
proxmox_api_user: ansible@pam
proxmox_api_token_id: ansible
proxmox_api_token_secret: "{{ vault_proxmox_token_secret }}"
proxmox_validate_certs: false
# Storage
proxmox_storage: local-lvm # VM disk storage pool
proxmox_iso_storage: local # ISO storage pool name (Proxmox)
proxmox_iso_dir: /var/lib/vz/template/iso # Filesystem path on proxmox_host
sno_credentials_dir: "/root/sno-{{ ocp_cluster_name }}" # Persistent credentials on proxmox_host
# VM specification
sno_vm_name: "sno-{{ ocp_cluster_name }}"
sno_cpu: 8
sno_memory_mb: 32768
sno_disk_gb: 120
sno_bridge: vmbr0
sno_vlan: 40
sno_mac: "" # Leave empty for auto-assignment. Set explicitly to pin MAC for static IP.
# VM ID - leave 0 for auto-assign by Proxmox
sno_vm_id: 0

View File

@@ -0,0 +1,84 @@
---
# Create a Proxmox VM for Single Node OpenShift on VLAN40 (192.168.40.0/24).
# Uses q35 machine type with UEFI (required for SNO / RHCOS).
# An empty ide2 CD-ROM slot is created here so the boot order can reference it;
# the deploy_openshift.yml play loads the actual ISO into it after generation.
- name: Build net0 string
ansible.builtin.set_fact:
# Proxmox net format: model[=macaddr],bridge=<bridge>[,tag=<vlan>]
_sno_net0: >-
virtio{{
'=' + sno_mac if sno_mac | length > 0 else ''
}},bridge={{ sno_bridge }},tag={{ sno_vlan }}
- name: Create SNO VM in Proxmox
community.proxmox.proxmox_kvm:
api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}"
api_user: "{{ proxmox_api_user }}"
api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token_secret }}"
validate_certs: "{{ proxmox_validate_certs }}"
node: "{{ proxmox_node }}"
vmid: "{{ sno_vm_id | default(omit, true) }}"
name: "{{ sno_vm_name }}"
cores: "{{ sno_cpu }}"
memory: "{{ sno_memory_mb }}"
cpu: host
numa_enabled: true
machine: q35
bios: ovmf
efidisk0:
storage: "{{ proxmox_storage }}"
format: raw
efitype: 4m
pre_enrolled_keys: false
scsi:
scsi0: "{{ proxmox_storage }}:{{ sno_disk_gb }},format=raw,iothread=1,cache=writeback"
scsihw: virtio-scsi-single
ide:
ide2: none,media=cdrom
net:
net0: "{{ _sno_net0 }}"
boot: "order=scsi0;ide2"
onboot: true
state: present
register: proxmox_vm_result
- name: Retrieve VM info
community.proxmox.proxmox_vm_info:
api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}"
api_user: "{{ proxmox_api_user }}"
api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token_secret }}"
validate_certs: "{{ proxmox_validate_certs }}"
node: "{{ proxmox_node }}"
name: "{{ sno_vm_name }}"
type: qemu
config: current
register: proxmox_vm_info
retries: 5
- name: Set VM ID fact for subsequent plays
ansible.builtin.set_fact:
sno_vm_id: "{{ proxmox_vm_info.proxmox_vms[0].vmid }}"
cacheable: true
- name: Extract MAC address from VM config
ansible.builtin.set_fact:
# net0 format: virtio=52:54:00:xx:xx:xx,bridge=vmbr0,tag=40
sno_mac: >-
{{ proxmox_vm_info.proxmox_vms[0].config.net0
| regex_search('([0-9A-Fa-f]{2}(?::[0-9A-Fa-f]{2}){5})', '\1')
| first }}
cacheable: true
when: sno_mac | length == 0
- name: Display VM details
ansible.builtin.debug:
msg:
- "VM Name : {{ sno_vm_name }}"
- "VM ID : {{ sno_vm_id }}"
- "MAC : {{ sno_mac }}"

View File

@@ -1,40 +0,0 @@
Role Name
=========
Provisions home lab infrastructure.
Requirements
------------
Really, you need my home lab setup. This role isn't really reusable in that regard.
Role Variables
--------------
TBD
Dependencies
------------
My Home Lab
Example Playbook
----------------
TODO
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
MIT
Author Information
------------------
Patrick Toal - ptoal@takeflight.ca - https://toal.ca

View File

@@ -1,2 +0,0 @@
---
# defaults file for toallab.infrastructure

View File

@@ -1,2 +0,0 @@
---
# handlers file for toallab.infrastructure

View File

@@ -1,53 +0,0 @@
galaxy_info:
author: your name
description: your description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.4
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -1,31 +0,0 @@
---
# tasks file for toallab.infrastructure
- name: Backup IOS Config
connection: network_cli
become: yes
ios_config:
backup: yes
- name: Install base configuration
connection: network_cli
become: yes
ios_config:
lines:
- aaa new-model
- ip domain-name lan.toal.ca
- ip name-server 192.168.1.1
- no cdp run
- lldp run
- ip ssh authentication-retries 2
- ip ssh rsa keypair-name ssh2
- ip ssh version 2
- ntp server 0.ca.pool.ntp.org
- ntp server 0.pool.ntp.org
- ntp server ip 1.pool.ntp.org
- name: Save changes
become: yes
connection: network_cli
ios_config:
save_when: changed

View File

@@ -1,2 +0,0 @@
localhost

View File

@@ -1,5 +0,0 @@
---
- hosts: localhost
remote_user: root
roles:
- toallab.infrastructure

View File

@@ -1,2 +0,0 @@
---
# vars file for toallab.infrastructure

0
router-ca.crt Normal file
View File

View File

@@ -10,7 +10,7 @@
- name: Run Template
condition:
all:
- "event is defined"
actions:
- print_event:
pretty: true
- "true"
action:
print_event:
pretty: true

36
vault-id-from-op-client.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
# Parse input arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--vault-id)
VAULT_ID="$2"
shift 2
;;
*)
echo "Usage: $0 --vault-id <vault id>" >&2
exit 1
;;
esac
done
# Validate vault ID
if [[ -z "$VAULT_ID" ]]; then
echo "Error: Missing required --vault-id argument" >&2
exit 1
fi
ITEM_NAME="${VAULT_ID} vault key"
FIELD_NAME="password"
# Fetch the vault password from 1Password
VAULT_PASSWORD=$(op item get "$ITEM_NAME" --fields "$FIELD_NAME" --format=json --vault LabSecrets 2>/dev/null | jq -r '.value')
# Output the password or report error
if [[ -n "$VAULT_PASSWORD" && "$VAULT_PASSWORD" != "null" ]]; then
echo "$VAULT_PASSWORD"
else
echo "Error: Could not retrieve vault password for vault ID '$VAULT_ID' (item: '$ITEM_NAME')" >&2
exit 1
fi