Update storage and Keycloak config

This commit is contained in:
2026-03-04 12:17:47 -05:00
parent d981b69669
commit d31b14cd72
28 changed files with 1433 additions and 205 deletions

View File

@@ -13,3 +13,4 @@ collections:
type: git type: git
version: latest version: latest
- name: middleware_automation.keycloak - name: middleware_automation.keycloak
- name: infra.aap_configuration

View File

@@ -0,0 +1,54 @@
# Session Summary: AAP Keycloak OIDC Configuration
Date: 2026-02-26
## Work Done
Added Keycloak OIDC authentication support for AAP 2.6 using the correct approach:
`infra.aap_configuration.gateway_authenticators` (AAP Gateway API) instead of CR extra_settings (wrong for 2.6).
## Files Changed
- `collections/requirements.yml` — Added `infra.aap_configuration`
- `playbooks/deploy_aap.yml` — Full rewrite:
- Play 0 (`aap_configure_keycloak`): Creates Keycloak OIDC client with correct callback URI `/accounts/profile/callback/`
- Play 1: Unchanged (installs AAP via `aap_operator` role)
- Play 2 (`aap_configure_oidc`): Fetches admin password from K8s secret, calls `infra.aap_configuration.gateway_authenticators`
- `roles/aap_operator/defaults/main.yml` — Removed OIDC vars (not role responsibility)
- `roles/aap_operator/meta/argument_specs.yml` — Removed OIDC var docs
- `roles/aap_operator/tasks/main.yml` — Removed OIDC include task (was wrong approach)
- `roles/aap_operator/tasks/configure_oidc.yml` — Replaced with redirect comment
## Key Decisions
- **OIDC must be configured via AAP Gateway API** (not CR extra_settings). AAP 2.5+ Gateway uses Django-based auth with `ansible_base.authentication` plugins.
- **authenticator type**: `ansible_base.authentication.authenticator_plugins.generic_oidc`
- **Callback URL**: `{aap_gateway_url}/accounts/profile/callback/` (not `/social/complete/oidc/`)
- **Admin password**: Fetched dynamically from K8s secret `{platform_name}-admin-password` (not stored separately in vault)
- **OIDC not in `aap_operator` role**: Kept as a separate playbook play (post-install concern)
## Variables Required in `aap` host_vars
```yaml
aap_gateway_url: "https://aap.apps.<cluster>.<domain>"
aap_oidc_issuer: "https://keycloak.toal.ca/realms/<realm>"
aap_oidc_client_id: aap # optional, default: aap
```
## Vault Variables
```
vault_aap_oidc_client_secret — OIDC client secret from Keycloak
vault_aap_deployer_token — K8s SA token (already required)
vault_keycloak_admin_password — required for Play 0
```
## Usage
```bash
# Step 1: Create Keycloak client (once, idempotent)
ansible-navigator run playbooks/deploy_aap.yml --tags aap_configure_keycloak
# Step 2: Deploy AAP
ansible-navigator run playbooks/deploy_aap.yml
# Step 3: Register OIDC authenticator in AAP Gateway
ansible-navigator run playbooks/deploy_aap.yml --tags aap_configure_oidc
```
## Open Items
- ASSUMED: `infra.aap_configuration` + its dependency `ansible.platform` are available or installable in `aap.toal.ca/ee-demo:latest`. If not, a custom EE rebuild is needed.
- The `aap-deployer` SA has `get` on secrets in `aap` namespace — confirmed via RBAC in `deploy_openshift.yml` Play 9.

View File

@@ -1,46 +0,0 @@
- name: Publish CVs
hosts: satellite1.mgmt.toal.ca
vars:
sat_env_name: Library
sat_org: Toal.ca
sat_publish_description: Automated CV Update
tasks:
- name: Pre-tasks | Find all CVs
redhat.satellite.resource_info:
username: "{{ satellite_admin_user }}"
password: "{{ satellite_admin_pass }}"
server_url: "{{ satellite_url }}"
organization: "{{ sat_org }}"
resource: content_views
validate_certs: no
register: raw_list_cvs
- name: Pre-tasks | Get resource information
set_fact:
list_all_cvs: "{{ raw_list_cvs['resources'] | json_query(jmesquery) | list }}"
vars:
jmesquery: "[*].{name: name, composite: composite, id: id}"
- name: Pre-tasks | Extract list of content views
set_fact:
sat6_content_views_list: "{{ sat6_content_views_list|default([]) }} + ['{{ item.name }}' ]"
loop: "{{ list_all_cvs | reject('search', 'Default Organization View') | list }}"
when: item.composite == false
- name: Publish content
redhat.satellite.content_view_version:
username: "{{ satellite_admin_user }}"
password: "{{ satellite_admin_pass }}"
server_url: "{{ satellite_url }}"
organization: "{{ sat_org }}"
content_view: "{{ item }}"
validate_certs: no
description: "{{ sat_publish_description }}"
lifecycle_environments:
- Library
- "{{ sat_env_name }}"
loop: "{{ sat6_content_views_list | list }}"
loop_control:
loop_var: "item"
register: cv_publish_sleeper

219
playbooks/deploy_aap.yml Normal file
View File

@@ -0,0 +1,219 @@
---
# Deploy Ansible Automation Platform on OpenShift
#
# Authenticates via the aap-deployer ServiceAccount token (not kubeadmin).
# The token is stored in 1Password and loaded via vault_aap_deployer_token.
#
# Prerequisites:
# - OpenShift cluster deployed (deploy_openshift.yml)
# - aap-deployer ServiceAccount provisioned:
# ansible-navigator run playbooks/deploy_openshift.yml --tags sno_deploy_service_accounts
# - SA token saved to 1Password as vault_aap_deployer_token
#
# Keycloak OIDC prerequisites (--tags aap_configure_keycloak,aap_configure_oidc):
# - Keycloak realm exists (configured via deploy_openshift.yml)
# - vault_aap_oidc_client_secret in 1Password (or it will be generated and displayed)
# - In host_vars for the aap host:
# aap_gateway_url: "https://aap.apps.<cluster>.<domain>"
# aap_oidc_client_id: aap
# aap_oidc_issuer: "https://keycloak.example.com/realms/<realm>"
# aap_oidc_public_key: "<RS256 public key from Keycloak realm Keys tab>"
#
# Play order:
# Play 0: aap_configure_keycloak — Create Keycloak OIDC client for AAP Gateway
# Play 1: (default) — Install AAP via aap_operator role
# Play 2: aap_configure_oidc — Configure OIDC Authentication Method in AAP Gateway
#
# Usage:
# ansible-navigator run playbooks/deploy_aap.yml
# ansible-navigator run playbooks/deploy_aap.yml --tags aap_configure_keycloak
# ansible-navigator run playbooks/deploy_aap.yml --tags aap_configure_oidc
# ansible-navigator run playbooks/deploy_aap.yml --tags aap_configure_keycloak,aap_configure_oidc
# ---------------------------------------------------------------------------
# Play 0: Create Keycloak OIDC client for AAP (optional)
# Runs on openshift hosts to access keycloak_url/keycloak_realm host vars.
# Creates the OIDC client in Keycloak with the correct AAP Gateway callback URI.
# ---------------------------------------------------------------------------
- name: Configure Keycloak OIDC client for AAP
hosts: openshift
gather_facts: false
connection: local
tags:
- never
- aap_configure_keycloak
vars:
__aap_keycloak_api_url: "{{ keycloak_url }}{{ keycloak_context | default('') }}"
__aap_oidc_client_id: "{{ aap_oidc_client_id | default('aap') }}"
# AAP operator generates the Gateway route as {platform_name}-{namespace}.apps.{cluster}.{domain}
# e.g. platform 'aap' in namespace 'aap' → aap-aap.apps.openshift.toal.ca
__aap_platform_name: "{{ aap_operator_platform_name | default('aap') }}"
__aap_namespace: "{{ aap_operator_namespace | default('aap') }}"
__aap_oidc_redirect_uris:
- "https://{{ __aap_platform_name }}-{{ __aap_namespace }}.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}/accounts/profile/callback/"
module_defaults:
middleware_automation.keycloak.keycloak_client:
auth_client_id: admin-cli
auth_keycloak_url: "{{ __aap_keycloak_api_url }}"
auth_realm: master
auth_username: "{{ keycloak_admin_user }}"
auth_password: "{{ vault_keycloak_admin_password }}"
validate_certs: "{{ keycloak_validate_certs | default(true) }}"
tasks:
- name: Set AAP OIDC client secret (vault value or generated)
ansible.builtin.set_fact:
__aap_oidc_client_secret: "{{ vault_aap_oidc_client_secret | default(lookup('community.general.random_string', length=32, special=false)) }}"
__aap_oidc_secret_generated: "{{ vault_aap_oidc_client_secret is not defined }}"
no_log: true
- name: Create AAP OIDC client in Keycloak
middleware_automation.keycloak.keycloak_client:
realm: "{{ keycloak_realm }}"
client_id: "{{ __aap_oidc_client_id }}"
name: "Ansible Automation Platform"
description: "OIDC client for AAP Gateway on {{ ocp_cluster_name }}.{{ ocp_base_domain }}"
enabled: true
protocol: openid-connect
public_client: false
standard_flow_enabled: true
implicit_flow_enabled: false
direct_access_grants_enabled: false
service_accounts_enabled: false
secret: "{{ __aap_oidc_client_secret }}"
redirect_uris: "{{ __aap_oidc_redirect_uris }}"
web_origins:
- "+"
protocol_mappers:
- name: groups
protocol: openid-connect
protocolMapper: oidc-group-membership-mapper
config:
full.path: "false"
id.token.claim: "true"
access.token.claim: "true"
userinfo.token.claim: "true"
claim.name: groups
state: present
no_log: "{{ keycloak_no_log | default(true) }}"
- name: Display generated client secret (save this to vault!)
ansible.builtin.debug:
msg:
- "*** GENERATED AAP OIDC CLIENT SECRET — SAVE THIS TO VAULT ***"
- "vault_aap_oidc_client_secret: {{ __aap_oidc_client_secret }}"
- ""
- "Save to 1Password and reference as vault_aap_oidc_client_secret."
when: __aap_oidc_secret_generated | bool
- name: Display Keycloak AAP OIDC configuration summary
ansible.builtin.debug:
msg:
- "Keycloak AAP OIDC client configured:"
- " Realm : {{ keycloak_realm }}"
- " Client : {{ __aap_oidc_client_id }}"
- " Issuer : {{ __aap_keycloak_api_url }}/realms/{{ keycloak_realm }}"
- " Redirect : {{ __aap_oidc_redirect_uris | join(', ') }}"
- ""
- "Set in host_vars for the aap host:"
- " aap_gateway_url: https://{{ __aap_platform_name }}-{{ __aap_namespace }}.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}"
- " aap_oidc_issuer: {{ __aap_keycloak_api_url }}/realms/{{ keycloak_realm }}"
- ""
- "Then run: --tags aap_configure_oidc to register the authenticator in AAP."
verbosity: 1
# ---------------------------------------------------------------------------
# Play 1: Install Ansible Automation Platform
# ---------------------------------------------------------------------------
- name: Install Ansible Automation Platform
hosts: aap
gather_facts: false
connection: local
pre_tasks:
- name: Verify aap-deployer token is available
ansible.builtin.assert:
that:
- vault_aap_deployer_token is defined
- vault_aap_deployer_token | length > 0
fail_msg: >-
vault_aap_deployer_token is not set. Provision the ServiceAccount with:
ansible-navigator run playbooks/deploy_openshift.yml --tags sno_deploy_service_accounts
Then save the displayed token to 1Password as vault_aap_deployer_token.
# environment:
# K8S_AUTH_HOST: "{{ aap_k8s_api_url }}"
# K8S_AUTH_API_KEY: "{{ vault_aap_deployer_token }}"
roles:
- role: aap_operator
# ---------------------------------------------------------------------------
# Play 2: Configure Keycloak OIDC Authentication Method in AAP Gateway (optional)
# Uses infra.aap_configuration.gateway_authenticators to register the OIDC
# provider via the AAP Gateway API. Run after Play 1 (AAP must be Running).
#
# Requires in host_vars for the aap host:
# aap_gateway_url: "https://aap.apps.<cluster>.<domain>"
# aap_oidc_issuer: "https://keycloak.example.com/realms/<realm>"
# aap_oidc_client_id: aap (optional, default: aap)
# aap_oidc_public_key: "<RS256 public key from Keycloak realm Keys tab>"
# Vault:
# vault_aap_oidc_client_secret — OIDC client secret from Keycloak
# ---------------------------------------------------------------------------
- name: Configure Keycloak OIDC Authentication in AAP Gateway
hosts: aap
gather_facts: false
connection: local
tags:
- never
- aap_configure_oidc
vars:
__aap_namespace: "{{ aap_operator_namespace | default('aap') }}"
__aap_platform_name: "{{ aap_operator_platform_name | default('aap') }}"
environment:
K8S_AUTH_HOST: "{{ aap_k8s_api_url }}"
K8S_AUTH_API_KEY: "{{ vault_aap_deployer_token }}"
pre_tasks:
- name: Fetch AAP admin password from K8s secret
kubernetes.core.k8s_info:
api_version: v1
kind: Secret
namespace: "{{ __aap_namespace }}"
name: "{{ __aap_platform_name }}-admin-password"
register: __aap_admin_secret
no_log: false
- name: Set AAP admin password fact
ansible.builtin.set_fact:
__aap_admin_password: "{{ __aap_admin_secret.resources[0].data.password | b64decode }}"
no_log: true
tasks:
- name: Configure Keycloak OIDC authenticator in AAP Gateway
ansible.builtin.include_role:
name: infra.aap_configuration.gateway_authenticators
vars:
aap_hostname: "{{ aap_gateway_url }}"
aap_username: "{{ aap_operator_admin_user | default('admin') }}"
aap_password: "{{ __aap_admin_password }}"
gateway_authenticators:
- name: Keycloak
type: ansible_base.authentication.authenticator_plugins.keycloak
slug: keycloak
enabled: true
configuration:
KEY: "{{ aap_oidc_client_id | default('aap') }}"
SECRET: "{{ vault_aap_oidc_client_secret }}"
PUBLIC_KEY: "{{ aap_oidc_public_key }}"
ACCESS_TOKEN_URL: "{{ aap_oidc_issuer }}/protocol/openid-connect/token"
AUTHORIZATION_URL: "{{ aap_oidc_issuer }}/protocol/openid-connect/auth"
GROUPS_CLAIM: "groups"
state: present

View File

@@ -8,11 +8,14 @@
# Inventory requirements: # Inventory requirements:
# sno.openshift.toal.ca - in 'openshift' group # sno.openshift.toal.ca - in 'openshift' group
# host_vars: ocp_cluster_name, ocp_base_domain, ocp_version, sno_ip, # host_vars: ocp_cluster_name, ocp_base_domain, ocp_version, sno_ip,
# sno_gateway, sno_nameserver, sno_prefix_length, sno_vm_name, # sno_gateway, sno_nameserver, sno_prefix_length, sno_machine_network,
# sno_bridge, sno_vlan, proxmox_node, keycloak_url, keycloak_realm, # sno_vm_name, sno_vnet, sno_storage_ip, sno_storage_ip_prefix_length,
# sno_storage_vnet, proxmox_node, keycloak_url, keycloak_realm,
# oidc_admin_groups, sno_deploy_letsencrypt_email, ... # oidc_admin_groups, sno_deploy_letsencrypt_email, ...
# secrets: vault_ocp_pull_secret, vault_keycloak_admin_password, # secrets: vault_ocp_pull_secret, vault_keycloak_admin_password,
# vault_oidc_client_secret (optional) # vault_oidc_client_secret (optional)
# optional: ocp_kubeconfig (defaults to ~/.kube/config; set to
# sno_install_dir/auth/kubeconfig for fresh installs)
# proxmox_api - inventory host (ansible_host, ansible_port) # proxmox_api - inventory host (ansible_host, ansible_port)
# proxmox_host - inventory host (ansible_host, ansible_connection: ssh) # proxmox_host - inventory host (ansible_host, ansible_connection: ssh)
# gate.toal.ca - in 'opnsense' group # gate.toal.ca - in 'opnsense' group
@@ -27,6 +30,11 @@
# Play 4: sno_deploy_install — Generate ISO, boot VM, wait for install # Play 4: sno_deploy_install — Generate ISO, boot VM, wait for install
# Play 5: keycloak — Configure Keycloak OIDC client # Play 5: keycloak — Configure Keycloak OIDC client
# Play 6: sno_deploy_oidc / sno_deploy_certmanager / sno_deploy_delete_kubeadmin # Play 6: sno_deploy_oidc / sno_deploy_certmanager / sno_deploy_delete_kubeadmin
# Play 7: sno_deploy_lvms — Install LVM Storage for persistent volumes
# Play 8: sno_deploy_nfs — Deploy in-cluster NFS provisioner (RWX StorageClass)
# Play 9: sno_deploy_service_accounts — Provision ServiceAccounts for app deployers
#
# AAP deployment is in a separate playbook: deploy_aap.yml
# #
# Usage: # Usage:
# ansible-navigator run playbooks/deploy_openshift.yml # ansible-navigator run playbooks/deploy_openshift.yml
@@ -35,6 +43,9 @@
# ansible-navigator run playbooks/deploy_openshift.yml --tags opnsense,dns # ansible-navigator run playbooks/deploy_openshift.yml --tags opnsense,dns
# ansible-navigator run playbooks/deploy_openshift.yml --tags keycloak,sno_deploy_oidc # ansible-navigator run playbooks/deploy_openshift.yml --tags keycloak,sno_deploy_oidc
# ansible-navigator run playbooks/deploy_openshift.yml --tags sno_deploy_certmanager # ansible-navigator run playbooks/deploy_openshift.yml --tags sno_deploy_certmanager
# ansible-navigator run playbooks/deploy_openshift.yml --tags sno_deploy_lvms
# ansible-navigator run playbooks/deploy_openshift.yml --tags sno_deploy_nfs
# ansible-navigator run playbooks/deploy_openshift.yml --tags sno_deploy_service_accounts
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Play 1: Create SNO VM in Proxmox # Play 1: Create SNO VM in Proxmox
@@ -244,7 +255,7 @@
connection: local connection: local
environment: environment:
KUBECONFIG: "{{ sno_install_dir }}/auth/kubeconfig" KUBECONFIG: "{{ ocp_kubeconfig | default('~/.kube/config') }}"
K8S_AUTH_VERIFY_SSL: "false" K8S_AUTH_VERIFY_SSL: "false"
tags: tags:
@@ -274,20 +285,80 @@
- sno_deploy_delete_kubeadmin - sno_deploy_delete_kubeadmin
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Play 7: Install Ansible Automation Platform (opt-in via --tags aap) # Play 7: Install LVM Storage for persistent volumes
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
- name: Install Ansible Automation Platform - name: Configure LVM Storage for persistent volumes
hosts: sno.openshift.toal.ca
gather_facts: false
connection: local
tags: sno_deploy_lvms
environment:
KUBECONFIG: "{{ ocp_kubeconfig | default('~/.kube/config') }}"
K8S_AUTH_VERIFY_SSL: "false"
roles:
- role: lvms_operator
# ---------------------------------------------------------------------------
# Play 8: Deploy NFS provisioner for ReadWriteMany storage
# Set nfs_provisioner_external_server / nfs_provisioner_external_path to use
# a pre-existing NFS share (e.g. 192.168.129.100:/mnt/BIGPOOL/NoBackups/OCPNFS).
# When those are unset, an in-cluster NFS server is deployed; LVMS (Play 7) must
# have run first to provide the backing RWO PVC.
# ---------------------------------------------------------------------------
- name: Deploy in-cluster NFS provisioner
hosts: sno.openshift.toal.ca
gather_facts: false
connection: local
tags: sno_deploy_nfs
environment:
KUBECONFIG: "{{ ocp_kubeconfig | default('~/.kube/config') }}"
K8S_AUTH_VERIFY_SSL: "false"
roles:
- role: nfs_provisioner
# ---------------------------------------------------------------------------
# Play 9: Provision ServiceAccounts for application deployers
# ---------------------------------------------------------------------------
- name: Provision OpenShift service accounts
hosts: sno.openshift.toal.ca hosts: sno.openshift.toal.ca
gather_facts: false gather_facts: false
connection: local connection: local
environment: environment:
KUBECONFIG: "{{ sno_install_dir }}/auth/kubeconfig" KUBECONFIG: "{{ ocp_kubeconfig | default('~/.kube/config') }}"
K8S_AUTH_VERIFY_SSL: "false" K8S_AUTH_VERIFY_SSL: "false"
tags: tags:
- never - never
- aap - sno_deploy_service_accounts
roles: roles:
- role: aap_operator - role: ocp_service_account
ocp_service_account_name: aap-deployer
ocp_service_account_namespace: aap
ocp_service_account_cluster_role_rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list", "create", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "patch"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "list", "watch"]
- apiGroups: ["operators.coreos.com"]
resources: ["operatorgroups", "subscriptions", "clusterserviceversions"]
verbs: ["get", "list", "create", "patch", "watch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch"]
- apiGroups: ["aap.ansible.com"]
resources: ["ansibleautomationplatforms"]
verbs: ["get", "list", "create", "patch", "watch"]

View File

@@ -4,20 +4,26 @@ aap_operator_namespace: aap
aap_operator_channel: "stable-2.6" aap_operator_channel: "stable-2.6"
aap_operator_source: redhat-operators aap_operator_source: redhat-operators
aap_operator_name: ansible-automation-platform-operator aap_operator_name: ansible-automation-platform-operator
aap_operator_wait_timeout: 600 aap_operator_wait_timeout: 1800
# --- Automation Controller --- # --- AnsibleAutomationPlatform CR ---
aap_operator_controller_enabled: true aap_operator_platform_name: aap
aap_operator_controller_name: controller
aap_operator_controller_replicas: 1
# --- Automation Hub --- # --- Components (set disabled: true to skip) ---
aap_operator_hub_enabled: true aap_operator_controller_disabled: false
aap_operator_hub_name: hub aap_operator_hub_disabled: false
aap_operator_eda_disabled: false
# --- Event-Driven Ansible (EDA) --- # --- Storage ---
aap_operator_eda_enabled: true # RWO StorageClass for PostgreSQL (all components)
aap_operator_eda_name: eda aap_operator_storage_class: lvms-vg-data
# RWX StorageClass for Hub file/artifact storage
aap_operator_hub_file_storage_class: nfs-client
aap_operator_hub_file_storage_size: 10Gi
# --- Admin --- # --- Admin ---
aap_operator_admin_user: admin aap_operator_admin_user: admin
# --- Routing (optional) ---
# Set to a custom hostname to override the auto-generated Controller route
# aap_operator_controller_route_host: aap.example.com

View File

@@ -1,13 +1,13 @@
--- ---
argument_specs: argument_specs:
main: main:
short_description: Install AAP via OpenShift OLM operator short_description: Install AAP via OpenShift OLM operator (AnsibleAutomationPlatform CR)
description: description:
- Installs the Ansible Automation Platform operator via OLM and - Installs the Ansible Automation Platform operator via OLM and creates a
creates AutomationController, AutomationHub, and EDA instances. single AnsibleAutomationPlatform CR that manages Controller, Hub, and EDA.
options: options:
aap_operator_namespace: aap_operator_namespace:
description: Namespace for the AAP operator and instances. description: Namespace for the AAP operator and platform instance.
type: str type: str
default: aap default: aap
aap_operator_channel: aap_operator_channel:
@@ -23,38 +23,45 @@ argument_specs:
type: str type: str
default: ansible-automation-platform-operator default: ansible-automation-platform-operator
aap_operator_wait_timeout: aap_operator_wait_timeout:
description: Seconds to wait for operator and instances to become ready. description: Seconds to wait for operator and platform to become ready.
type: int type: int
default: 600 default: 1800
aap_operator_controller_enabled: aap_operator_platform_name:
description: Whether to create an AutomationController instance. description: Name of the AnsibleAutomationPlatform CR.
type: bool
default: true
aap_operator_controller_name:
description: Name of the AutomationController CR.
type: str type: str
default: controller default: aap
aap_operator_controller_replicas: aap_operator_controller_disabled:
description: Number of Controller replicas. description: Set true to skip deploying Automation Controller.
type: int
default: 1
aap_operator_hub_enabled:
description: Whether to create an AutomationHub instance.
type: bool type: bool
default: true default: false
aap_operator_hub_name: aap_operator_hub_disabled:
description: Name of the AutomationHub CR. description: Set true to skip deploying Automation Hub.
type: str
default: hub
aap_operator_eda_enabled:
description: Whether to create an EDA Controller instance.
type: bool type: bool
default: true default: false
aap_operator_eda_name: aap_operator_eda_disabled:
description: Name of the EDA CR. description: Set true to skip deploying Event-Driven Ansible.
type: bool
default: false
aap_operator_storage_class:
description: StorageClass for PostgreSQL persistent volumes (RWO).
type: str type: str
default: eda default: lvms-vg-data
aap_operator_hub_file_storage_class:
description: StorageClass for Hub file/artifact storage (RWX).
type: str
default: nfs-client
aap_operator_hub_file_storage_size:
description: Size of the Hub file storage PVC.
type: str
default: 10Gi
aap_operator_admin_user: aap_operator_admin_user:
description: Admin username for Controller and Hub. description: Admin username for the platform.
type: str type: str
default: admin default: admin
aap_operator_controller_route_host:
description: >
Custom hostname for the Automation Controller Route.
When set, overrides the auto-generated route hostname (e.g. aap.example.com).
Leave unset to use the default apps subdomain route.
type: str
required: false

View File

@@ -0,0 +1,4 @@
---
# OIDC is configured via the AAP Gateway API, not via this role.
# See: playbooks/deploy_aap.yml --tags aap_configure_keycloak,aap_configure_oidc
# Uses: infra.aap_configuration.gateway_authenticators

View File

@@ -1,8 +1,8 @@
--- ---
# Install Ansible Automation Platform via OpenShift OLM operator. # Install Ansible Automation Platform via OpenShift OLM operator.
# #
# Deploys the AAP operator, then creates AutomationController, # Deploys the AAP operator, then creates a single AnsibleAutomationPlatform
# AutomationHub, and EDA instances based on enabled flags. # CR that manages Controller, Hub, and EDA as a unified platform.
# All tasks are idempotent (kubernetes.core.k8s state: present). # All tasks are idempotent (kubernetes.core.k8s state: present).
# ------------------------------------------------------------------ # ------------------------------------------------------------------
@@ -17,6 +17,28 @@
metadata: metadata:
name: "{{ aap_operator_namespace }}" name: "{{ aap_operator_namespace }}"
- name: Read global pull secret
kubernetes.core.k8s_info:
api_version: v1
kind: Secret
namespace: openshift-config
name: pull-secret
register: __aap_operator_global_pull_secret
- name: Copy pull secret to AAP namespace
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Secret
metadata:
name: redhat-operators-pull-secret
namespace: "{{ aap_operator_namespace }}"
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: "{{ __aap_operator_global_pull_secret.resources[0].data['.dockerconfigjson'] }}"
no_log: false
- name: Create OperatorGroup for AAP - name: Create OperatorGroup for AAP
kubernetes.core.k8s: kubernetes.core.k8s:
state: present state: present
@@ -27,6 +49,8 @@
name: "{{ aap_operator_name }}" name: "{{ aap_operator_name }}"
namespace: "{{ aap_operator_namespace }}" namespace: "{{ aap_operator_namespace }}"
spec: spec:
targetNamespaces:
- "{{ aap_operator_namespace }}"
upgradeStrategy: Default upgradeStrategy: Default
- name: Subscribe to AAP operator - name: Subscribe to AAP operator
@@ -48,142 +72,95 @@
# ------------------------------------------------------------------ # ------------------------------------------------------------------
# Step 2: Wait for operator to be ready # Step 2: Wait for operator to be ready
# ------------------------------------------------------------------ # ------------------------------------------------------------------
- name: Wait for AutomationController CRD to be available - name: Wait for AnsibleAutomationPlatform CRD to be available
kubernetes.core.k8s_info: kubernetes.core.k8s_info:
api_version: apiextensions.k8s.io/v1 api_version: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
name: automationcontrollers.automationcontroller.ansible.com name: ansibleautomationplatforms.aap.ansible.com
register: __aap_operator_crd register: __aap_operator_crd
until: __aap_operator_crd.resources | length > 0 until: __aap_operator_crd.resources | length > 0
retries: "{{ __aap_operator_wait_retries }}" retries: "{{ __aap_operator_wait_retries }}"
delay: 10 delay: 10
- name: Wait for AAP operator deployment to be ready - name: Wait for AAP operator deployments to be ready
kubernetes.core.k8s_info: kubernetes.core.k8s_info:
api_version: apps/v1 api_version: apps/v1
kind: Deployment kind: Deployment
namespace: "{{ aap_operator_namespace }}" namespace: "{{ aap_operator_namespace }}"
label_selectors: label_selectors:
- "app.kubernetes.io/name={{ aap_operator_name }}" - "operators.coreos.com/{{ aap_operator_name }}.{{ aap_operator_namespace }}"
register: __aap_operator_deploy register: __aap_operator_deploy
until: >- until: >-
__aap_operator_deploy.resources | length > 0 and __aap_operator_deploy.resources | length > 0 and
(__aap_operator_deploy.resources[0].status.readyReplicas | default(0)) >= 1 (__aap_operator_deploy.resources
| rejectattr('status.readyReplicas', 'undefined')
| selectattr('status.readyReplicas', '>=', 1)
| list | length) == (__aap_operator_deploy.resources | length)
retries: "{{ __aap_operator_wait_retries }}" retries: "{{ __aap_operator_wait_retries }}"
delay: 10 delay: 10
# ------------------------------------------------------------------ # ------------------------------------------------------------------
# Step 3: Create AutomationController instance # Step 3: Deploy the unified AnsibleAutomationPlatform
# ------------------------------------------------------------------ # ------------------------------------------------------------------
- name: Create AutomationController instance - name: Create AnsibleAutomationPlatform
kubernetes.core.k8s: kubernetes.core.k8s:
state: present state: present
definition: definition:
apiVersion: automationcontroller.ansible.com/v1beta1 apiVersion: aap.ansible.com/v1alpha1
kind: AutomationController kind: AnsibleAutomationPlatform
metadata: metadata:
name: "{{ aap_operator_controller_name }}" name: "{{ aap_operator_platform_name }}"
namespace: "{{ aap_operator_namespace }}" namespace: "{{ aap_operator_namespace }}"
spec: spec:
replicas: "{{ aap_operator_controller_replicas }}"
admin_user: "{{ aap_operator_admin_user }}" admin_user: "{{ aap_operator_admin_user }}"
when: aap_operator_controller_enabled | bool # PostgreSQL storage for all components (RWO)
database:
postgres_storage_class: "{{ aap_operator_storage_class }}"
# Component toggles and per-component config
controller:
disabled: "{{ aap_operator_controller_disabled | bool }}"
route_host: "{{ aap_operator_controller_route_host | default(omit) }}"
hub:
disabled: "{{ aap_operator_hub_disabled | bool }}"
# Hub file/artifact storage (RWX) — must be under hub:
storage_type: file
file_storage_storage_class: "{{ aap_operator_hub_file_storage_class }}"
file_storage_size: "{{ aap_operator_hub_file_storage_size }}"
eda:
disabled: "{{ aap_operator_eda_disabled | bool }}"
# ------------------------------------------------------------------ # ------------------------------------------------------------------
# Step 4: Create AutomationHub instance # Step 4: Wait for platform to be ready
# ------------------------------------------------------------------ # ------------------------------------------------------------------
- name: Create AutomationHub instance - name: Wait for AnsibleAutomationPlatform to be ready
kubernetes.core.k8s:
state: present
definition:
apiVersion: automationhub.ansible.com/v1beta1
kind: AutomationHub
metadata:
name: "{{ aap_operator_hub_name }}"
namespace: "{{ aap_operator_namespace }}"
spec:
admin_password_secret: ""
route_host: ""
when: aap_operator_hub_enabled | bool
# ------------------------------------------------------------------
# Step 5: Create EDA Controller instance
# ------------------------------------------------------------------
- name: Create EDA Controller instance
kubernetes.core.k8s:
state: present
definition:
apiVersion: eda.ansible.com/v1alpha1
kind: EDA
metadata:
name: "{{ aap_operator_eda_name }}"
namespace: "{{ aap_operator_namespace }}"
spec:
automation_server_url: "https://{{ aap_operator_controller_name }}-{{ aap_operator_namespace }}.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}"
when: aap_operator_eda_enabled | bool
# ------------------------------------------------------------------
# Step 6: Wait for instances to be ready
# ------------------------------------------------------------------
- name: Wait for AutomationController to be ready
kubernetes.core.k8s_info: kubernetes.core.k8s_info:
api_version: automationcontroller.ansible.com/v1beta1 api_version: aap.ansible.com/v1alpha1
kind: AutomationController kind: AnsibleAutomationPlatform
namespace: "{{ aap_operator_namespace }}" namespace: "{{ aap_operator_namespace }}"
name: "{{ aap_operator_controller_name }}" name: "{{ aap_operator_platform_name }}"
register: __aap_operator_controller_status register: __aap_operator_platform_status
ignore_errors: true
until: >- until: >-
__aap_operator_controller_status.resources | length > 0 and __aap_operator_platform_status.resources is defined and
(__aap_operator_controller_status.resources[0].status.conditions | default([]) __aap_operator_platform_status.resources | length > 0 and
(__aap_operator_platform_status.resources[0].status.conditions | default([])
| selectattr('type', '==', 'Running') | selectattr('type', '==', 'Running')
| selectattr('status', '==', 'True') | list | length > 0) | selectattr('status', '==', 'True') | list | length > 0)
retries: "{{ __aap_operator_wait_retries }}" retries: "{{ __aap_operator_wait_retries }}"
delay: 10 delay: 10
when: aap_operator_controller_enabled | bool
- name: Wait for AutomationHub to be ready
kubernetes.core.k8s_info:
api_version: automationhub.ansible.com/v1beta1
kind: AutomationHub
namespace: "{{ aap_operator_namespace }}"
name: "{{ aap_operator_hub_name }}"
register: __aap_operator_hub_status
until: >-
__aap_operator_hub_status.resources | length > 0 and
(__aap_operator_hub_status.resources[0].status.conditions | default([])
| selectattr('type', '==', 'Running')
| selectattr('status', '==', 'True') | list | length > 0)
retries: "{{ __aap_operator_wait_retries }}"
delay: 10
when: aap_operator_hub_enabled | bool
- name: Wait for EDA Controller to be ready
kubernetes.core.k8s_info:
api_version: eda.ansible.com/v1alpha1
kind: EDA
namespace: "{{ aap_operator_namespace }}"
name: "{{ aap_operator_eda_name }}"
register: __aap_operator_eda_status
until: >-
__aap_operator_eda_status.resources | length > 0 and
(__aap_operator_eda_status.resources[0].status.conditions | default([])
| selectattr('type', '==', 'Running')
| selectattr('status', '==', 'True') | list | length > 0)
retries: "{{ __aap_operator_wait_retries }}"
delay: 10
when: aap_operator_eda_enabled | bool
# ------------------------------------------------------------------ # ------------------------------------------------------------------
# Step 7: Display summary # Step 5: Display summary
# ------------------------------------------------------------------ # ------------------------------------------------------------------
- name: Display AAP deployment summary - name: Display AAP deployment summary
ansible.builtin.debug: ansible.builtin.debug:
msg: msg:
- "Ansible Automation Platform deployment complete!" - "Ansible Automation Platform deployment complete!"
- " Namespace : {{ aap_operator_namespace }}" - " Namespace : {{ aap_operator_namespace }}"
- " Controller : {{ aap_operator_controller_name + ' (enabled)' if aap_operator_controller_enabled else 'disabled' }}" - " Platform CR: {{ aap_operator_platform_name }}"
- " Hub : {{ aap_operator_hub_name + ' (enabled)' if aap_operator_hub_enabled else 'disabled' }}" - " Controller : {{ 'disabled' if aap_operator_controller_disabled else 'enabled' }}"
- " EDA : {{ aap_operator_eda_name + ' (enabled)' if aap_operator_eda_enabled else 'disabled' }}" - " Hub : {{ 'disabled' if aap_operator_hub_disabled else 'enabled' }}"
- " EDA : {{ 'disabled' if aap_operator_eda_disabled else 'enabled' }}"
- "" - ""
- "Admin password secret: {{ aap_operator_controller_name }}-admin-password" - "Admin password secret: {{ aap_operator_platform_name }}-admin-password"
- "Retrieve with: oc get secret {{ aap_operator_controller_name }}-admin-password -n {{ aap_operator_namespace }} -o jsonpath='{.data.password}' | base64 -d" - "Retrieve with: oc get secret {{ aap_operator_platform_name }}-admin-password -n {{ aap_operator_namespace }} -o jsonpath='{.data.password}' | base64 -d"

View File

@@ -0,0 +1,13 @@
---
# --- OLM subscription ---
lvms_operator_namespace: openshift-storage
lvms_operator_channel: "stable-4.21"
lvms_operator_source: redhat-operators
lvms_operator_name: lvms-operator
lvms_operator_wait_timeout: 300
# --- LVMCluster ---
lvms_operator_vg_name: vg-data
lvms_operator_device_paths:
- /dev/sdb
lvms_operator_storage_class_name: lvms-vg-data

View File

@@ -0,0 +1,42 @@
---
argument_specs:
main:
short_description: Install LVMS operator for persistent storage on OpenShift
description:
- Installs the LVM Storage operator via OLM and creates an LVMCluster
with a volume group backed by specified block devices.
options:
lvms_operator_namespace:
description: Namespace for the LVMS operator.
type: str
default: openshift-storage
lvms_operator_channel:
description: OLM subscription channel.
type: str
default: "stable-4.21"
lvms_operator_source:
description: OLM catalog source name.
type: str
default: redhat-operators
lvms_operator_name:
description: Operator package name in the catalog.
type: str
default: lvms-operator
lvms_operator_wait_timeout:
description: Seconds to wait for operator and LVMCluster to become ready.
type: int
default: 300
lvms_operator_vg_name:
description: Name of the volume group to create in the LVMCluster.
type: str
default: vg-data
lvms_operator_device_paths:
description: List of block device paths to include in the volume group.
type: list
elements: str
default:
- /dev/sdb
lvms_operator_storage_class_name:
description: Name of the StorageClass created by LVMS for this volume group.
type: str
default: lvms-vg-data

View File

@@ -0,0 +1,18 @@
---
galaxy_info:
author: ptoal
description: Install LVM Storage (LVMS) operator on OpenShift for persistent volumes
license: MIT
min_ansible_version: "2.16"
platforms:
- name: GenericLinux
versions:
- all
galaxy_tags:
- openshift
- lvms
- storage
- operator
- olm
dependencies: []

View File

@@ -0,0 +1,135 @@
---
# Install LVM Storage (LVMS) operator via OpenShift OLM.
#
# Creates an LVMCluster with a volume group backed by the specified
# block devices, providing a StorageClass for persistent volume claims.
# All tasks are idempotent (kubernetes.core.k8s state: present).
# ------------------------------------------------------------------
# Step 1: Install LVMS operator via OLM
# ------------------------------------------------------------------
- name: Create LVMS namespace
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ lvms_operator_namespace }}"
- name: Create OperatorGroup for LVMS
kubernetes.core.k8s:
state: present
definition:
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: "{{ lvms_operator_name }}"
namespace: "{{ lvms_operator_namespace }}"
spec:
targetNamespaces:
- "{{ lvms_operator_namespace }}"
upgradeStrategy: Default
- name: Subscribe to LVMS operator
kubernetes.core.k8s:
state: present
definition:
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: "{{ lvms_operator_name }}"
namespace: "{{ lvms_operator_namespace }}"
spec:
channel: "{{ lvms_operator_channel }}"
installPlanApproval: Automatic
name: "{{ lvms_operator_name }}"
source: "{{ lvms_operator_source }}"
sourceNamespace: openshift-marketplace
# ------------------------------------------------------------------
# Step 2: Wait for operator to be ready
# ------------------------------------------------------------------
- name: Wait for LVMCluster CRD to be available
kubernetes.core.k8s_info:
api_version: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: lvmclusters.lvm.topolvm.io
register: __lvms_operator_crd
until: __lvms_operator_crd.resources | length > 0
retries: "{{ __lvms_operator_wait_retries }}"
delay: 10
- name: Wait for LVMS operator deployment to be ready
kubernetes.core.k8s_info:
api_version: apps/v1
kind: Deployment
namespace: "{{ lvms_operator_namespace }}"
label_selectors:
- "operators.coreos.com/{{ lvms_operator_name }}.{{ lvms_operator_namespace }}"
register: __lvms_operator_deploy
until: >-
__lvms_operator_deploy.resources | length > 0 and
(__lvms_operator_deploy.resources
| rejectattr('status.readyReplicas', 'undefined')
| selectattr('status.readyReplicas', '>=', 1)
| list | length) == (__lvms_operator_deploy.resources | length)
retries: "{{ __lvms_operator_wait_retries }}"
delay: 10
# ------------------------------------------------------------------
# Step 3: Create LVMCluster
# ------------------------------------------------------------------
- name: Create LVMCluster
kubernetes.core.k8s:
state: present
definition:
apiVersion: lvm.topolvm.io/v1alpha1
kind: LVMCluster
metadata:
name: lvms-cluster
namespace: "{{ lvms_operator_namespace }}"
spec:
storage:
deviceClasses:
- name: "{{ lvms_operator_vg_name }}"
default: true
deviceSelector:
paths: "{{ lvms_operator_device_paths }}"
thinPoolConfig:
name: thin-pool
sizePercent: 90
overprovisionRatio: 10
# ------------------------------------------------------------------
# Step 4: Wait for LVMCluster to be ready
# ------------------------------------------------------------------
- name: Wait for LVMCluster to be ready
kubernetes.core.k8s_info:
api_version: lvm.topolvm.io/v1alpha1
kind: LVMCluster
namespace: "{{ lvms_operator_namespace }}"
name: lvms-cluster
register: __lvms_operator_cluster_status
until: >-
__lvms_operator_cluster_status.resources | length > 0 and
(__lvms_operator_cluster_status.resources[0].status.state | default('')) == 'Ready'
retries: "{{ __lvms_operator_wait_retries }}"
delay: 10
- name: Verify StorageClass exists
kubernetes.core.k8s_info:
api_version: storage.k8s.io/v1
kind: StorageClass
name: "{{ lvms_operator_storage_class_name }}"
register: __lvms_operator_sc
failed_when: __lvms_operator_sc.resources | length == 0
- name: Display LVMS summary
ansible.builtin.debug:
msg:
- "LVM Storage deployment complete!"
- " Namespace : {{ lvms_operator_namespace }}"
- " Volume Group : {{ lvms_operator_vg_name }}"
- " Device Paths : {{ lvms_operator_device_paths | join(', ') }}"
- " StorageClass : {{ lvms_operator_storage_class_name }}"

View File

@@ -0,0 +1,3 @@
---
# Computed internal variables - do not override
__lvms_operator_wait_retries: "{{ (lvms_operator_wait_timeout / 10) | int }}"

View File

@@ -0,0 +1,23 @@
---
# --- Namespace ---
nfs_provisioner_namespace: nfs-provisioner
# --- External NFS server (set these to use a pre-existing NFS share) ---
# When nfs_provisioner_external_server is set, the in-cluster NFS server is
# not deployed; the provisioner points directly at the external share.
nfs_provisioner_external_server: "" # e.g. 192.168.129.100
nfs_provisioner_external_path: "" # e.g. /mnt/BIGPOOL/NoBackups/OCPNFS
# --- Backing storage for in-cluster NFS server (ignored when external_server is set) ---
nfs_provisioner_storage_class: lvms-vg-data
nfs_provisioner_storage_size: 50Gi
nfs_provisioner_server_image: registry.k8s.io/volume-nfs:0.8
nfs_provisioner_export_path: /exports
# --- NFS provisioner ---
nfs_provisioner_name: nfs-client
nfs_provisioner_storage_class_name: nfs-client
nfs_provisioner_image: registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
# --- Wait ---
nfs_provisioner_wait_timeout: 300

View File

@@ -0,0 +1,67 @@
---
argument_specs:
main:
short_description: Deploy NFS provisioner (external or in-cluster) for RWX storage on OpenShift
description:
- Deploys the nfs-subdir-external-provisioner and a ReadWriteMany StorageClass.
- When nfs_provisioner_external_server is set, points directly at a pre-existing
NFS share (no in-cluster NFS server pod is deployed).
- When nfs_provisioner_external_server is empty, deploys an in-cluster NFS server
pod backed by an LVMS PVC.
options:
nfs_provisioner_namespace:
description: Namespace for the NFS provisioner (and optional in-cluster NFS server).
type: str
default: nfs-provisioner
nfs_provisioner_external_server:
description: >-
IP or hostname of a pre-existing external NFS server. When set, the
in-cluster NFS server pod is not deployed. Leave empty to use in-cluster mode.
type: str
default: ""
nfs_provisioner_external_path:
description: >-
Exported path on the external NFS server.
Required when nfs_provisioner_external_server is set.
type: str
default: ""
nfs_provisioner_storage_class:
description: >-
StorageClass (RWO) for the in-cluster NFS server backing PVC.
Ignored when nfs_provisioner_external_server is set.
type: str
default: lvms-vg-data
nfs_provisioner_storage_size:
description: >-
Size of the in-cluster NFS server backing PVC.
Ignored when nfs_provisioner_external_server is set.
type: str
default: 50Gi
nfs_provisioner_name:
description: Provisioner name written into the StorageClass.
type: str
default: nfs-client
nfs_provisioner_storage_class_name:
description: Name of the RWX StorageClass created by this role.
type: str
default: nfs-client
nfs_provisioner_image:
description: Container image for the nfs-subdir-external-provisioner.
type: str
default: registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
nfs_provisioner_server_image:
description: >-
Container image for the in-cluster NFS server.
Ignored when nfs_provisioner_external_server is set.
type: str
default: registry.k8s.io/volume-nfs:0.8
nfs_provisioner_export_path:
description: >-
Path exported by the in-cluster NFS server.
Ignored when nfs_provisioner_external_server is set.
type: str
default: /exports
nfs_provisioner_wait_timeout:
description: Seconds to wait for deployments to become ready.
type: int
default: 300

View File

@@ -0,0 +1,17 @@
---
galaxy_info:
author: ptoal
description: Deploy in-cluster NFS server and provisioner for ReadWriteMany storage on OpenShift
license: MIT
min_ansible_version: "2.16"
platforms:
- name: GenericLinux
versions:
- all
galaxy_tags:
- openshift
- nfs
- storage
- provisioner
dependencies: []

View File

@@ -0,0 +1,394 @@
---
# Deploy nfs-subdir-external-provisioner on OpenShift, backed by either:
# (a) an external NFS server (set nfs_provisioner_external_server / nfs_provisioner_external_path)
# (b) an in-cluster NFS server pod backed by an LVMS RWO PVC (default)
#
# Architecture (in-cluster mode):
# - NFS server StatefulSet: backs exports with an LVMS RWO PVC
# - Service: exposes NFS server at a stable ClusterIP
# - nfs-subdir-external-provisioner: creates PVs on-demand under the export path
# - StorageClass: "nfs-client" with ReadWriteMany support
#
# Architecture (external mode, nfs_provisioner_external_server != ""):
# - In-cluster NFS server is NOT deployed
# - nfs-subdir-external-provisioner points directly at the external NFS share
# - StorageClass: "nfs-client" with ReadWriteMany support
#
# The in-cluster NFS server requires privileged SCC on OpenShift (kernel NFS).
# All tasks are idempotent (kubernetes.core.k8s state: present).
# ------------------------------------------------------------------
# Step 1: Namespace and RBAC
# ------------------------------------------------------------------
- name: Create NFS provisioner namespace
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ nfs_provisioner_namespace }}"
- name: Create NFS server ServiceAccount
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-server
namespace: "{{ nfs_provisioner_namespace }}"
when: nfs_provisioner_external_server | length == 0
- name: Create NFS provisioner ServiceAccount
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
namespace: "{{ nfs_provisioner_namespace }}"
- name: Create ClusterRole to use privileged SCC (NFS server)
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nfs-server-scc
rules:
- apiGroups: [security.openshift.io]
resources: [securitycontextconstraints]
verbs: [use]
resourceNames: [privileged]
when: nfs_provisioner_external_server | length == 0
- name: Bind privileged SCC ClusterRole to NFS server ServiceAccount
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nfs-server-scc
subjects:
- kind: ServiceAccount
name: nfs-server
namespace: "{{ nfs_provisioner_namespace }}"
roleRef:
kind: ClusterRole
name: nfs-server-scc
apiGroup: rbac.authorization.k8s.io
when: nfs_provisioner_external_server | length == 0
- name: Create ClusterRole to use hostmount-anyuid SCC (NFS provisioner)
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nfs-provisioner-scc
rules:
- apiGroups: [security.openshift.io]
resources: [securitycontextconstraints]
verbs: [use]
resourceNames: [hostmount-anyuid]
- name: Bind hostmount-anyuid SCC ClusterRole to NFS provisioner ServiceAccount
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nfs-provisioner-scc
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: "{{ nfs_provisioner_namespace }}"
roleRef:
kind: ClusterRole
name: nfs-provisioner-scc
apiGroup: rbac.authorization.k8s.io
- name: Create ClusterRole for NFS provisioner
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nfs-provisioner-runner
rules:
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, create, delete]
- apiGroups: [""]
resources: [persistentvolumeclaims]
verbs: [get, list, watch, update]
- apiGroups: [storage.k8s.io]
resources: [storageclasses]
verbs: [get, list, watch]
- apiGroups: [""]
resources: [events]
verbs: [create, update, patch]
- name: Bind ClusterRole to NFS provisioner ServiceAccount
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: run-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: "{{ nfs_provisioner_namespace }}"
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
- name: Create Role for leader election
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-locking-nfs-provisioner
namespace: "{{ nfs_provisioner_namespace }}"
rules:
- apiGroups: [""]
resources: [endpoints]
verbs: [get, list, watch, create, update, patch]
- name: Bind leader election Role
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: leader-locking-nfs-provisioner
namespace: "{{ nfs_provisioner_namespace }}"
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: "{{ nfs_provisioner_namespace }}"
roleRef:
kind: Role
name: leader-locking-nfs-provisioner
apiGroup: rbac.authorization.k8s.io
# ------------------------------------------------------------------
# Step 2: NFS server backing storage and StatefulSet (in-cluster mode only)
# ------------------------------------------------------------------
- name: Create NFS server backing PVC
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-server-data
namespace: "{{ nfs_provisioner_namespace }}"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "{{ nfs_provisioner_storage_size }}"
storageClassName: "{{ nfs_provisioner_storage_class }}"
when: nfs_provisioner_external_server | length == 0
- name: Deploy NFS server StatefulSet
kubernetes.core.k8s:
state: present
definition:
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nfs-server
namespace: "{{ nfs_provisioner_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: nfs-server
serviceName: nfs-server
template:
metadata:
labels:
app: nfs-server
spec:
serviceAccountName: nfs-server
containers:
- name: nfs-server
image: "{{ nfs_provisioner_server_image }}"
ports:
- name: nfs
containerPort: 2049
- name: mountd
containerPort: 20048
- name: rpcbind
containerPort: 111
securityContext:
privileged: true
volumeMounts:
- name: nfs-data
mountPath: "{{ nfs_provisioner_export_path }}"
volumes:
- name: nfs-data
persistentVolumeClaim:
claimName: nfs-server-data
when: nfs_provisioner_external_server | length == 0
- name: Create NFS server Service
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: nfs-server
namespace: "{{ nfs_provisioner_namespace }}"
spec:
selector:
app: nfs-server
ports:
- name: nfs
port: 2049
- name: mountd
port: 20048
- name: rpcbind
port: 111
when: nfs_provisioner_external_server | length == 0
# ------------------------------------------------------------------
# Step 3: Wait for in-cluster NFS server to be ready (in-cluster mode only)
# ------------------------------------------------------------------
- name: Wait for NFS server to be ready
kubernetes.core.k8s_info:
api_version: apps/v1
kind: StatefulSet
namespace: "{{ nfs_provisioner_namespace }}"
name: nfs-server
register: __nfs_provisioner_server_status
until: >-
__nfs_provisioner_server_status.resources | length > 0 and
(__nfs_provisioner_server_status.resources[0].status.readyReplicas | default(0)) >= 1
retries: "{{ __nfs_provisioner_wait_retries }}"
delay: 10
when: nfs_provisioner_external_server | length == 0
# ------------------------------------------------------------------
# Step 4: Resolve NFS server address, then deploy nfs-subdir-external-provisioner
# ------------------------------------------------------------------
- name: Set NFS server address (external)
ansible.builtin.set_fact:
__nfs_provisioner_server_addr: "{{ nfs_provisioner_external_server }}"
__nfs_provisioner_server_path: "{{ nfs_provisioner_external_path }}"
when: nfs_provisioner_external_server | length > 0
- name: Retrieve in-cluster NFS server ClusterIP
kubernetes.core.k8s_info:
api_version: v1
kind: Service
namespace: "{{ nfs_provisioner_namespace }}"
name: nfs-server
register: __nfs_provisioner_svc
when: nfs_provisioner_external_server | length == 0
- name: Set NFS server address (in-cluster)
ansible.builtin.set_fact:
__nfs_provisioner_server_addr: "{{ __nfs_provisioner_svc.resources[0].spec.clusterIP }}"
__nfs_provisioner_server_path: "{{ nfs_provisioner_export_path }}"
when: nfs_provisioner_external_server | length == 0
- name: Deploy nfs-subdir-external-provisioner
kubernetes.core.k8s:
state: present
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-provisioner
namespace: "{{ nfs_provisioner_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: nfs-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
serviceAccountName: nfs-provisioner
containers:
- name: nfs-provisioner
image: "{{ nfs_provisioner_image }}"
env:
- name: PROVISIONER_NAME
value: "{{ nfs_provisioner_name }}"
- name: NFS_SERVER
value: "{{ __nfs_provisioner_server_addr }}"
- name: NFS_PATH
value: "{{ __nfs_provisioner_server_path }}"
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
volumes:
- name: nfs-client-root
nfs:
server: "{{ __nfs_provisioner_server_addr }}"
path: "{{ __nfs_provisioner_server_path }}"
# ------------------------------------------------------------------
# Step 5: Create StorageClass
# ------------------------------------------------------------------
- name: Create NFS StorageClass
kubernetes.core.k8s:
state: present
definition:
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: "{{ nfs_provisioner_storage_class_name }}"
provisioner: "{{ nfs_provisioner_name }}"
parameters:
archiveOnDelete: "false"
reclaimPolicy: Delete
volumeBindingMode: Immediate
# ------------------------------------------------------------------
# Step 6: Wait for provisioner to be ready
# ------------------------------------------------------------------
- name: Wait for NFS provisioner deployment to be ready
kubernetes.core.k8s_info:
api_version: apps/v1
kind: Deployment
namespace: "{{ nfs_provisioner_namespace }}"
name: nfs-provisioner
register: __nfs_provisioner_deploy_status
until: >-
__nfs_provisioner_deploy_status.resources | length > 0 and
(__nfs_provisioner_deploy_status.resources[0].status.readyReplicas | default(0)) >= 1
retries: "{{ __nfs_provisioner_wait_retries }}"
delay: 10
- name: Display NFS provisioner summary
ansible.builtin.debug:
msg:
- "NFS provisioner deployment complete!"
- " Namespace : {{ nfs_provisioner_namespace }}"
- " NFS server : {{ __nfs_provisioner_server_addr }}:{{ __nfs_provisioner_server_path }}"
- " Mode : {{ 'external' if nfs_provisioner_external_server | length > 0 else 'in-cluster (LVMS-backed)' }}"
- " StorageClass : {{ nfs_provisioner_storage_class_name }} (ReadWriteMany)"

View File

@@ -0,0 +1,3 @@
---
# Computed internal variables - do not override
__nfs_provisioner_wait_retries: "{{ (nfs_provisioner_wait_timeout / 10) | int }}"

View File

@@ -0,0 +1,6 @@
---
# ocp_service_account_name: "" # required — SA and ClusterRole name
# ocp_service_account_namespace: "" # required — namespace for SA and token secret
# ocp_service_account_cluster_role_rules: [] # required — list of RBAC policy rules
ocp_service_account_create_namespace: true

View File

@@ -0,0 +1,29 @@
---
argument_specs:
main:
short_description: Create an OpenShift ServiceAccount with scoped ClusterRole
description:
- Creates a ServiceAccount, ClusterRole, ClusterRoleBinding, and a
long-lived token Secret. The token is registered as
__ocp_service_account_token for downstream use.
options:
ocp_service_account_name:
description: Name for the ServiceAccount, ClusterRole, and ClusterRoleBinding.
type: str
required: true
ocp_service_account_namespace:
description: Namespace where the ServiceAccount and token Secret are created.
type: str
required: true
ocp_service_account_cluster_role_rules:
description: >-
List of RBAC policy rules for the ClusterRole.
Each item follows the Kubernetes PolicyRule schema
(apiGroups, resources, verbs).
type: list
elements: dict
required: true
ocp_service_account_create_namespace:
description: Whether to create the namespace if it does not exist.
type: bool
default: true

View File

@@ -0,0 +1,16 @@
---
galaxy_info:
author: ptoal
description: Create an OpenShift ServiceAccount with ClusterRole and long-lived token
license: MIT
min_ansible_version: "2.16"
platforms:
- name: GenericLinux
versions:
- all
galaxy_tags:
- openshift
- rbac
- serviceaccount
dependencies: []

View File

@@ -0,0 +1,111 @@
---
# Create an OpenShift ServiceAccount with a scoped ClusterRole and long-lived token.
#
# Requires: ocp_service_account_name, ocp_service_account_namespace,
# ocp_service_account_cluster_role_rules
#
# Registers: __ocp_service_account_token (decoded bearer token)
- name: Validate required variables
ansible.builtin.assert:
that:
- ocp_service_account_name | length > 0
- ocp_service_account_namespace | length > 0
- ocp_service_account_cluster_role_rules | length > 0
fail_msg: "ocp_service_account_name, ocp_service_account_namespace, and ocp_service_account_cluster_role_rules are required"
- name: Create namespace {{ ocp_service_account_namespace }}
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ ocp_service_account_namespace }}"
when: ocp_service_account_create_namespace | bool
- name: Create ServiceAccount {{ ocp_service_account_name }}
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ ocp_service_account_name }}"
namespace: "{{ ocp_service_account_namespace }}"
labels:
app.kubernetes.io/managed-by: ocp-service-account-role
- name: Create ClusterRole {{ ocp_service_account_name }}
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "{{ ocp_service_account_name }}"
labels:
app.kubernetes.io/managed-by: ocp-service-account-role
rules: "{{ ocp_service_account_cluster_role_rules }}"
- name: Create ClusterRoleBinding {{ ocp_service_account_name }}
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: "{{ ocp_service_account_name }}"
labels:
app.kubernetes.io/managed-by: ocp-service-account-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "{{ ocp_service_account_name }}"
subjects:
- kind: ServiceAccount
name: "{{ ocp_service_account_name }}"
namespace: "{{ ocp_service_account_namespace }}"
- name: Create long-lived token Secret for {{ ocp_service_account_name }}
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Secret
metadata:
name: "{{ ocp_service_account_name }}-token"
namespace: "{{ ocp_service_account_namespace }}"
labels:
app.kubernetes.io/managed-by: ocp-service-account-role
app.kubernetes.io/instance: "{{ ocp_service_account_name }}"
annotations:
kubernetes.io/service-account.name: "{{ ocp_service_account_name }}"
type: kubernetes.io/service-account-token
- name: Wait for token to be populated
kubernetes.core.k8s_info:
api_version: v1
kind: Secret
namespace: "{{ ocp_service_account_namespace }}"
name: "{{ ocp_service_account_name }}-token"
register: __ocp_sa_token_secret
until: >-
__ocp_sa_token_secret.resources | length > 0 and
(__ocp_sa_token_secret.resources[0].data.token | default('') | length > 0)
retries: 12
delay: 5
- name: Register SA token for downstream use
ansible.builtin.set_fact:
__ocp_service_account_token: "{{ __ocp_sa_token_secret.resources[0].data.token | b64decode }}"
no_log: true
- name: Display SA token for vault storage
ansible.builtin.debug:
msg:
- "*** SERVICE ACCOUNT TOKEN — SAVE TO 1PASSWORD ***"
- "ServiceAccount: {{ ocp_service_account_name }} ({{ ocp_service_account_namespace }})"
- "Vault variable: vault_{{ ocp_service_account_name | regex_replace('-', '_') }}_token"
- ""
- "Token: {{ __ocp_service_account_token }}"

View File

@@ -19,11 +19,16 @@ sno_vm_name: "sno-{{ ocp_cluster_name }}"
sno_cpu: 8 sno_cpu: 8
sno_memory_mb: 32768 sno_memory_mb: 32768
sno_disk_gb: 120 sno_disk_gb: 120
sno_bridge: vmbr0 sno_pvc_disk_gb: 100
sno_vlan: 40 sno_vnet: ocp
sno_mac: "" sno_mac: "" # populated after VM creation; set here to pin MAC
sno_vm_id: 0 sno_vm_id: 0
sno_storage_ip: ""
sno_storage_ip_prefix_length: 24
sno_storage_vnet: storage
sno_storage_mac: "" # populated after VM creation; set here to pin MAC
# --- Installer --- # --- Installer ---
sno_install_dir: "/tmp/sno-{{ ocp_cluster_name }}" sno_install_dir: "/tmp/sno-{{ ocp_cluster_name }}"
sno_iso_filename: agent.x86_64.iso sno_iso_filename: agent.x86_64.iso

View File

@@ -62,17 +62,34 @@ argument_specs:
description: Primary disk size in gigabytes. description: Primary disk size in gigabytes.
type: int type: int
default: 120 default: 120
sno_bridge: sno_vnet:
description: Proxmox network bridge for the VM NIC. description: Proxmox SDN VNet name for the primary (OCP) NIC.
type: str type: str
default: vmbr0 default: ocp
sno_vlan:
description: VLAN tag for the VM NIC.
type: int
default: 40
sno_mac: sno_mac:
description: >- description: >-
MAC address to assign. Leave empty for auto-assignment by Proxmox. MAC address for the primary NIC. Leave empty for auto-assignment by Proxmox.
Set here to pin the MAC across VM recreations.
type: str
default: ""
sno_storage_ip:
description: >-
IP address for the secondary storage NIC. Leave empty to skip storage
interface configuration in agent-config.
type: str
default: ""
sno_storage_ip_prefix_length:
description: Prefix length for the storage NIC IP address.
type: int
default: 24
sno_storage_vnet:
description: Proxmox SDN VNet name for the secondary storage NIC.
type: str
default: storage
sno_storage_mac:
description: >-
MAC address for the storage NIC. Leave empty for auto-assignment by Proxmox.
Set here to pin the MAC across VM recreations.
type: str type: str
default: "" default: ""
sno_vm_id: sno_vm_id:

View File

@@ -8,7 +8,14 @@
__sno_deploy_net0: >- __sno_deploy_net0: >-
virtio{{ virtio{{
'=' + sno_mac if sno_mac | length > 0 else '' '=' + sno_mac if sno_mac | length > 0 else ''
}},bridge={{ sno_bridge }},tag={{ sno_vlan }} }},bridge={{ sno_vnet }}
- name: Build net1 (storage) string
ansible.builtin.set_fact:
__sno_deploy_net1: >-
virtio{{
'=' + sno_storage_mac if sno_storage_mac | length > 0 else ''
}},bridge={{ sno_storage_vnet }}
- name: Create SNO VM in Proxmox - name: Create SNO VM in Proxmox
community.proxmox.proxmox_kvm: community.proxmox.proxmox_kvm:
@@ -34,11 +41,13 @@
pre_enrolled_keys: false pre_enrolled_keys: false
scsi: scsi:
scsi0: "{{ proxmox_storage }}:{{ sno_disk_gb }},format=raw,iothread=1,cache=writeback" scsi0: "{{ proxmox_storage }}:{{ sno_disk_gb }},format=raw,iothread=1,cache=writeback"
scsi1: "{{ proxmox_storage }}:{{ sno_pvc_disk_gb }},format=raw,iothread=1,cache=writeback"
scsihw: virtio-scsi-single scsihw: virtio-scsi-single
ide: ide:
ide2: none,media=cdrom ide2: none,media=cdrom
net: net:
net0: "{{ __sno_deploy_net0 }}" net0: "{{ __sno_deploy_net0 }}"
net1: "{{ __sno_deploy_net1 }}"
boot: "order=scsi0;ide2" boot: "order=scsi0;ide2"
onboot: true onboot: true
state: present state: present
@@ -73,10 +82,20 @@
cacheable: true cacheable: true
when: sno_mac | length == 0 when: sno_mac | length == 0
- name: Extract storage MAC address from VM config
ansible.builtin.set_fact:
sno_storage_mac: >-
{{ __sno_deploy_vm_info.proxmox_vms[0].config.net1
| regex_search('([0-9A-Fa-f]{2}(?::[0-9A-Fa-f]{2}){5})', '\1')
| first }}
cacheable: true
when: sno_storage_mac | length == 0
- name: Display VM details - name: Display VM details
ansible.builtin.debug: ansible.builtin.debug:
msg: msg:
- "VM Name : {{ sno_vm_name }}" - "VM Name : {{ sno_vm_name }}"
- "VM ID : {{ sno_vm_id }}" - "VM ID : {{ sno_vm_id }}"
- "MAC : {{ sno_mac }}" - "MAC (net0) : {{ sno_mac }}"
- "MAC (net1) : {{ sno_storage_mac }}"
verbosity: 1 verbosity: 1

View File

@@ -11,6 +11,10 @@ hosts:
interfaces: interfaces:
- name: primary - name: primary
macAddress: "{{ sno_mac }}" macAddress: "{{ sno_mac }}"
{% if sno_storage_ip | length > 0 %}
- name: storage
macAddress: "{{ sno_storage_mac }}"
{% endif %}
networkConfig: networkConfig:
interfaces: interfaces:
- name: primary - name: primary
@@ -23,6 +27,18 @@ hosts:
- ip: {{ sno_ip }} - ip: {{ sno_ip }}
prefix-length: {{ sno_prefix_length }} prefix-length: {{ sno_prefix_length }}
dhcp: false dhcp: false
{% if sno_storage_ip | length > 0 %}
- name: storage
type: ethernet
state: up
mac-address: "{{ sno_storage_mac }}"
ipv4:
enabled: true
address:
- ip: {{ sno_storage_ip }}
prefix-length: {{ sno_storage_ip_prefix_length }}
dhcp: false
{% endif %}
dns-resolver: dns-resolver:
config: config:
server: server:

View File

@@ -23,6 +23,7 @@ fi
# Skip silently for the default vault ID (no named vault to look up) # Skip silently for the default vault ID (no named vault to look up)
if [[ "$VAULT_ID" == "default" ]]; then if [[ "$VAULT_ID" == "default" ]]; then
echo "default"
exit 0 exit 0
fi fi