Configure OIDC, make idempotent, fix bugs. Claude.ai

This commit is contained in:
2026-02-25 13:20:12 -05:00
parent 995b7c4070
commit d981b69669
23 changed files with 2269 additions and 760 deletions

View File

@@ -0,0 +1,23 @@
---
# --- OLM subscription ---
aap_operator_namespace: aap
aap_operator_channel: "stable-2.6"
aap_operator_source: redhat-operators
aap_operator_name: ansible-automation-platform-operator
aap_operator_wait_timeout: 600
# --- Automation Controller ---
aap_operator_controller_enabled: true
aap_operator_controller_name: controller
aap_operator_controller_replicas: 1
# --- Automation Hub ---
aap_operator_hub_enabled: true
aap_operator_hub_name: hub
# --- Event-Driven Ansible (EDA) ---
aap_operator_eda_enabled: true
aap_operator_eda_name: eda
# --- Admin ---
aap_operator_admin_user: admin

View File

@@ -0,0 +1,60 @@
---
argument_specs:
main:
short_description: Install AAP via OpenShift OLM operator
description:
- Installs the Ansible Automation Platform operator via OLM and
creates AutomationController, AutomationHub, and EDA instances.
options:
aap_operator_namespace:
description: Namespace for the AAP operator and instances.
type: str
default: aap
aap_operator_channel:
description: OLM subscription channel.
type: str
default: "stable-2.6"
aap_operator_source:
description: OLM catalog source name.
type: str
default: redhat-operators
aap_operator_name:
description: Operator package name in the catalog.
type: str
default: ansible-automation-platform-operator
aap_operator_wait_timeout:
description: Seconds to wait for operator and instances to become ready.
type: int
default: 600
aap_operator_controller_enabled:
description: Whether to create an AutomationController instance.
type: bool
default: true
aap_operator_controller_name:
description: Name of the AutomationController CR.
type: str
default: controller
aap_operator_controller_replicas:
description: Number of Controller replicas.
type: int
default: 1
aap_operator_hub_enabled:
description: Whether to create an AutomationHub instance.
type: bool
default: true
aap_operator_hub_name:
description: Name of the AutomationHub CR.
type: str
default: hub
aap_operator_eda_enabled:
description: Whether to create an EDA Controller instance.
type: bool
default: true
aap_operator_eda_name:
description: Name of the EDA CR.
type: str
default: eda
aap_operator_admin_user:
description: Admin username for Controller and Hub.
type: str
default: admin

View File

@@ -0,0 +1,18 @@
---
galaxy_info:
author: ptoal
description: Install Ansible Automation Platform via OpenShift OLM operator
license: MIT
min_ansible_version: "2.16"
platforms:
- name: GenericLinux
versions:
- all
galaxy_tags:
- openshift
- aap
- operator
- olm
- ansible
dependencies: []

View File

@@ -0,0 +1,189 @@
---
# Install Ansible Automation Platform via OpenShift OLM operator.
#
# Deploys the AAP operator, then creates AutomationController,
# AutomationHub, and EDA instances based on enabled flags.
# All tasks are idempotent (kubernetes.core.k8s state: present).
# ------------------------------------------------------------------
# Step 1: Install AAP operator via OLM
# ------------------------------------------------------------------
- name: Create AAP namespace
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ aap_operator_namespace }}"
- name: Create OperatorGroup for AAP
kubernetes.core.k8s:
state: present
definition:
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: "{{ aap_operator_name }}"
namespace: "{{ aap_operator_namespace }}"
spec:
upgradeStrategy: Default
- name: Subscribe to AAP operator
kubernetes.core.k8s:
state: present
definition:
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: "{{ aap_operator_name }}"
namespace: "{{ aap_operator_namespace }}"
spec:
channel: "{{ aap_operator_channel }}"
installPlanApproval: Automatic
name: "{{ aap_operator_name }}"
source: "{{ aap_operator_source }}"
sourceNamespace: openshift-marketplace
# ------------------------------------------------------------------
# Step 2: Wait for operator to be ready
# ------------------------------------------------------------------
- name: Wait for AutomationController CRD to be available
kubernetes.core.k8s_info:
api_version: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: automationcontrollers.automationcontroller.ansible.com
register: __aap_operator_crd
until: __aap_operator_crd.resources | length > 0
retries: "{{ __aap_operator_wait_retries }}"
delay: 10
- name: Wait for AAP operator deployment to be ready
kubernetes.core.k8s_info:
api_version: apps/v1
kind: Deployment
namespace: "{{ aap_operator_namespace }}"
label_selectors:
- "app.kubernetes.io/name={{ aap_operator_name }}"
register: __aap_operator_deploy
until: >-
__aap_operator_deploy.resources | length > 0 and
(__aap_operator_deploy.resources[0].status.readyReplicas | default(0)) >= 1
retries: "{{ __aap_operator_wait_retries }}"
delay: 10
# ------------------------------------------------------------------
# Step 3: Create AutomationController instance
# ------------------------------------------------------------------
- name: Create AutomationController instance
kubernetes.core.k8s:
state: present
definition:
apiVersion: automationcontroller.ansible.com/v1beta1
kind: AutomationController
metadata:
name: "{{ aap_operator_controller_name }}"
namespace: "{{ aap_operator_namespace }}"
spec:
replicas: "{{ aap_operator_controller_replicas }}"
admin_user: "{{ aap_operator_admin_user }}"
when: aap_operator_controller_enabled | bool
# ------------------------------------------------------------------
# Step 4: Create AutomationHub instance
# ------------------------------------------------------------------
- name: Create AutomationHub instance
kubernetes.core.k8s:
state: present
definition:
apiVersion: automationhub.ansible.com/v1beta1
kind: AutomationHub
metadata:
name: "{{ aap_operator_hub_name }}"
namespace: "{{ aap_operator_namespace }}"
spec:
admin_password_secret: ""
route_host: ""
when: aap_operator_hub_enabled | bool
# ------------------------------------------------------------------
# Step 5: Create EDA Controller instance
# ------------------------------------------------------------------
- name: Create EDA Controller instance
kubernetes.core.k8s:
state: present
definition:
apiVersion: eda.ansible.com/v1alpha1
kind: EDA
metadata:
name: "{{ aap_operator_eda_name }}"
namespace: "{{ aap_operator_namespace }}"
spec:
automation_server_url: "https://{{ aap_operator_controller_name }}-{{ aap_operator_namespace }}.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}"
when: aap_operator_eda_enabled | bool
# ------------------------------------------------------------------
# Step 6: Wait for instances to be ready
# ------------------------------------------------------------------
- name: Wait for AutomationController to be ready
kubernetes.core.k8s_info:
api_version: automationcontroller.ansible.com/v1beta1
kind: AutomationController
namespace: "{{ aap_operator_namespace }}"
name: "{{ aap_operator_controller_name }}"
register: __aap_operator_controller_status
until: >-
__aap_operator_controller_status.resources | length > 0 and
(__aap_operator_controller_status.resources[0].status.conditions | default([])
| selectattr('type', '==', 'Running')
| selectattr('status', '==', 'True') | list | length > 0)
retries: "{{ __aap_operator_wait_retries }}"
delay: 10
when: aap_operator_controller_enabled | bool
- name: Wait for AutomationHub to be ready
kubernetes.core.k8s_info:
api_version: automationhub.ansible.com/v1beta1
kind: AutomationHub
namespace: "{{ aap_operator_namespace }}"
name: "{{ aap_operator_hub_name }}"
register: __aap_operator_hub_status
until: >-
__aap_operator_hub_status.resources | length > 0 and
(__aap_operator_hub_status.resources[0].status.conditions | default([])
| selectattr('type', '==', 'Running')
| selectattr('status', '==', 'True') | list | length > 0)
retries: "{{ __aap_operator_wait_retries }}"
delay: 10
when: aap_operator_hub_enabled | bool
- name: Wait for EDA Controller to be ready
kubernetes.core.k8s_info:
api_version: eda.ansible.com/v1alpha1
kind: EDA
namespace: "{{ aap_operator_namespace }}"
name: "{{ aap_operator_eda_name }}"
register: __aap_operator_eda_status
until: >-
__aap_operator_eda_status.resources | length > 0 and
(__aap_operator_eda_status.resources[0].status.conditions | default([])
| selectattr('type', '==', 'Running')
| selectattr('status', '==', 'True') | list | length > 0)
retries: "{{ __aap_operator_wait_retries }}"
delay: 10
when: aap_operator_eda_enabled | bool
# ------------------------------------------------------------------
# Step 7: Display summary
# ------------------------------------------------------------------
- name: Display AAP deployment summary
ansible.builtin.debug:
msg:
- "Ansible Automation Platform deployment complete!"
- " Namespace : {{ aap_operator_namespace }}"
- " Controller : {{ aap_operator_controller_name + ' (enabled)' if aap_operator_controller_enabled else 'disabled' }}"
- " Hub : {{ aap_operator_hub_name + ' (enabled)' if aap_operator_hub_enabled else 'disabled' }}"
- " EDA : {{ aap_operator_eda_name + ' (enabled)' if aap_operator_eda_enabled else 'disabled' }}"
- ""
- "Admin password secret: {{ aap_operator_controller_name }}-admin-password"
- "Retrieve with: oc get secret {{ aap_operator_controller_name }}-admin-password -n {{ aap_operator_namespace }} -o jsonpath='{.data.password}' | base64 -d"

View File

@@ -0,0 +1,3 @@
---
# Computed internal variables - do not override
__aap_operator_wait_retries: "{{ (aap_operator_wait_timeout / 10) | int }}"

View File

@@ -1,58 +0,0 @@
# proxmox_sno_vm
Creates a Proxmox virtual machine configured for Single Node OpenShift (SNO) deployment. The VM uses q35 machine type with UEFI boot (required for RHCOS), VirtIO NIC with optional VLAN tagging, and an empty CD-ROM slot for the agent installer ISO.
After creation the role retrieves the VM ID and MAC address, setting them as cacheable facts for use by subsequent plays.
## Requirements
- `community.proxmox` collection
- A `proxmox_api` inventory host with `ansible_host` and `ansible_port` set to the Proxmox API endpoint
## Role Variables
| Variable | Default | Description |
|---|---|---|
| `proxmox_node` | `pve1` | Proxmox cluster node |
| `proxmox_api_user` | `ansible@pam` | API username |
| `proxmox_api_token_id` | `ansible` | API token ID |
| `proxmox_api_token_secret` | *required* | API token secret (sensitive) |
| `proxmox_validate_certs` | `false` | Validate TLS certificates |
| `proxmox_storage` | `local-lvm` | Storage pool for VM disks |
| `proxmox_iso_storage` | `local` | Storage pool for ISOs |
| `proxmox_iso_dir` | `/var/lib/vz/template/iso` | ISO filesystem path on Proxmox host |
| `sno_credentials_dir` | `/root/sno-{{ ocp_cluster_name }}` | Credential persistence directory |
| `sno_vm_name` | `sno-{{ ocp_cluster_name }}` | VM name in Proxmox |
| `sno_cpu` | `8` | CPU cores |
| `sno_memory_mb` | `32768` | Memory in MB |
| `sno_disk_gb` | `120` | Disk size in GB |
| `sno_bridge` | `vmbr0` | Network bridge |
| `sno_vlan` | `40` | VLAN tag |
| `sno_mac` | `""` | MAC address (empty = auto-assign) |
| `sno_vm_id` | `0` | VM ID (0 = auto-assign) |
## Cacheable Facts Set
- `sno_vm_id` — assigned Proxmox VM ID
- `sno_mac` — assigned or detected MAC address
## Example Playbook
```yaml
- name: Create SNO VM in Proxmox
hosts: sno.openshift.toal.ca
gather_facts: false
connection: local
roles:
- role: proxmox_sno_vm
tags: proxmox
```
## License
MIT
## Author
ptoal

View File

@@ -1,27 +0,0 @@
---
# Proxmox connection
# proxmox_api_host / proxmox_api_port are derived from the 'proxmox_api'
# inventory host (ansible_host / ansible_port). Do not set them here.
proxmox_node: pve1
proxmox_api_user: ansible@pam
proxmox_api_token_id: ansible
proxmox_api_token_secret: "{{ vault_proxmox_token_secret }}"
proxmox_validate_certs: false
# Storage
proxmox_storage: local-lvm # VM disk storage pool
proxmox_iso_storage: local # ISO storage pool name (Proxmox)
proxmox_iso_dir: /var/lib/vz/template/iso # Filesystem path on proxmox_host
sno_credentials_dir: "/root/sno-{{ ocp_cluster_name }}" # Persistent credentials on proxmox_host
# VM specification
sno_vm_name: "sno-{{ ocp_cluster_name }}"
sno_cpu: 8
sno_memory_mb: 32768
sno_disk_gb: 120
sno_bridge: vmbr0
sno_vlan: 40
sno_mac: "" # Leave empty for auto-assignment. Set explicitly to pin MAC for static IP.
# VM ID - leave 0 for auto-assign by Proxmox
sno_vm_id: 0

View File

@@ -0,0 +1,51 @@
---
# --- Proxmox connection ---
# proxmox_api_host / proxmox_api_port are derived from the 'proxmox_api'
# inventory host (ansible_host / ansible_port). Do not set them here.
proxmox_node: pve1
proxmox_api_user: ansible@pam
proxmox_api_token_id: ansible
proxmox_api_token_secret: "{{ vault_proxmox_token_secret }}"
proxmox_validate_certs: false
# --- Storage ---
proxmox_storage: local-lvm
proxmox_iso_storage: local
proxmox_iso_dir: /var/lib/vz/template/iso
sno_credentials_dir: "/root/sno-{{ ocp_cluster_name }}"
# --- VM specification ---
sno_vm_name: "sno-{{ ocp_cluster_name }}"
sno_cpu: 8
sno_memory_mb: 32768
sno_disk_gb: 120
sno_bridge: vmbr0
sno_vlan: 40
sno_mac: ""
sno_vm_id: 0
# --- Installer ---
sno_install_dir: "/tmp/sno-{{ ocp_cluster_name }}"
sno_iso_filename: agent.x86_64.iso
# --- OIDC ---
oidc_provider_name: keycloak
oidc_client_id: openshift
oidc_admin_groups: []
oidc_ca_cert_file: ""
# --- Keycloak ---
keycloak_context: ""
# --- cert-manager ---
sno_deploy_certmanager_channel: "stable-v1"
sno_deploy_certmanager_source: redhat-operators
sno_deploy_letsencrypt_email: ""
sno_deploy_letsencrypt_server: "https://acme-v02.api.letsencrypt.org/directory"
sno_deploy_letsencrypt_staging_server: "https://acme-staging-v02.api.letsencrypt.org/directory"
sno_deploy_letsencrypt_use_staging: false
sno_deploy_certmanager_wait_timeout: 300
sno_deploy_certificate_wait_timeout: 600
sno_deploy_certmanager_dns_provider: dnsmadeeasy
sno_deploy_webhook_image: "ghcr.io/ptoal/cert-manager-webhook-dnsmadeeasy:latest"
sno_deploy_webhook_group_name: "acme.toal.ca"

View File

@@ -1,10 +1,11 @@
---
argument_specs:
main:
short_description: Create a Proxmox VM for Single Node OpenShift
short_description: Deploy and configure Single Node OpenShift on Proxmox
description:
- Creates a q35/UEFI virtual machine in Proxmox suitable for SNO deployment.
- Retrieves the assigned VM ID and MAC address as cacheable facts.
- Creates a Proxmox VM, installs SNO via agent-based installer,
configures OIDC authentication, deploys cert-manager with LetsEncrypt,
and removes the kubeadmin user.
options:
proxmox_node:
description: Proxmox cluster node to create the VM on.
@@ -72,12 +73,38 @@ argument_specs:
sno_mac:
description: >-
MAC address to assign. Leave empty for auto-assignment by Proxmox.
Set explicitly to pin a MAC for static IP reservations.
type: str
default: ""
sno_vm_id:
description: >-
Proxmox VM ID. Set to 0 for auto-assignment.
Populated as a cacheable fact after VM creation.
description: Proxmox VM ID. Set to 0 for auto-assignment.
type: int
default: 0
sno_install_dir:
description: Local directory for openshift-install working files.
type: str
default: "/tmp/sno-{{ ocp_cluster_name }}"
sno_iso_filename:
description: Filename for the agent-based installer ISO.
type: str
default: agent.x86_64.iso
oidc_provider_name:
description: Identity provider name shown on OpenShift login page.
type: str
default: keycloak
oidc_client_id:
description: OIDC client ID registered in Keycloak.
type: str
default: openshift
oidc_admin_groups:
description: List of OIDC groups to grant cluster-admin via ClusterRoleBinding.
type: list
elements: str
default: []
sno_deploy_letsencrypt_email:
description: Email address for LetsEncrypt ACME account registration.
type: str
required: true
sno_deploy_certmanager_channel:
description: OLM subscription channel for cert-manager operator.
type: str
default: "stable-v1"

View File

@@ -1,7 +1,7 @@
---
galaxy_info:
author: ptoal
description: Create a Proxmox VM for Single Node OpenShift (SNO) deployment
description: Deploy and configure Single Node OpenShift (SNO) on Proxmox
license: MIT
min_ansible_version: "2.16"
platforms:
@@ -13,5 +13,7 @@ galaxy_info:
- openshift
- sno
- vm
- oidc
- certmanager
dependencies: []

View File

@@ -0,0 +1,542 @@
---
# Install cert-manager operator and configure LetsEncrypt certificates.
#
# Installs the Red Hat cert-manager operator via OLM, creates a ClusterIssuer
# for LetsEncrypt with DNS-01 challenges via DNS Made Easy, and provisions
# certificates for the ingress wildcard and API server.
# ------------------------------------------------------------------
# Step 1: Install cert-manager operator via OLM
# ------------------------------------------------------------------
- name: Ensure cert-manager-operator namespace exists
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager-operator
- name: Create OperatorGroup for cert-manager-operator
kubernetes.core.k8s:
state: present
definition:
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: cert-manager-operator
namespace: cert-manager-operator
spec:
targetNamespaces:
- cert-manager-operator
- name: Subscribe to cert-manager operator
kubernetes.core.k8s:
state: present
definition:
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: openshift-cert-manager-operator
namespace: cert-manager-operator
spec:
channel: "{{ sno_deploy_certmanager_channel }}"
installPlanApproval: Automatic
name: openshift-cert-manager-operator
source: "{{ sno_deploy_certmanager_source }}"
sourceNamespace: openshift-marketplace
# ------------------------------------------------------------------
# Step 2: Wait for cert-manager to be ready
# ------------------------------------------------------------------
- name: Wait for cert-manager CRDs to be available
kubernetes.core.k8s_info:
api_version: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: certificates.cert-manager.io
register: __sno_deploy_certmanager_crd
until: __sno_deploy_certmanager_crd.resources | length > 0
retries: "{{ (sno_deploy_certmanager_wait_timeout / 10) | int }}"
delay: 10
- name: Wait for cert-manager deployment to be ready
kubernetes.core.k8s_info:
api_version: apps/v1
kind: Deployment
namespace: cert-manager
name: cert-manager
register: __sno_deploy_certmanager_deploy
until: >-
__sno_deploy_certmanager_deploy.resources | length > 0 and
(__sno_deploy_certmanager_deploy.resources[0].status.readyReplicas | default(0)) >= 1
retries: "{{ (sno_deploy_certmanager_wait_timeout / 10) | int }}"
delay: 10
# ------------------------------------------------------------------
# Step 3: Create DNS Made Easy API credentials for DNS-01 challenges
# ------------------------------------------------------------------
- name: Create DNS Made Easy API credentials secret
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Secret
metadata:
name: dme-api-credentials
namespace: cert-manager
type: Opaque
stringData:
api-key: "{{ dme_account_key }}"
secret-key: "{{ dme_account_secret }}"
no_log: true
# ------------------------------------------------------------------
# Step 4: Deploy DNS Made Easy webhook solver
# ------------------------------------------------------------------
- name: Create webhook namespace
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager-webhook-dnsmadeeasy
- name: Create webhook ServiceAccount
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: cert-manager-webhook-dnsmadeeasy
namespace: cert-manager-webhook-dnsmadeeasy
- name: Create webhook ClusterRole
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-webhook-dnsmadeeasy
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["flowcontrol.apiserver.k8s.io"]
resources: ["flowschemas", "prioritylevelconfigurations"]
verbs: ["list", "watch"]
- name: Create webhook ClusterRoleBinding
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-webhook-dnsmadeeasy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-webhook-dnsmadeeasy
subjects:
- kind: ServiceAccount
name: cert-manager-webhook-dnsmadeeasy
namespace: cert-manager-webhook-dnsmadeeasy
- name: Create auth-delegator ClusterRoleBinding for webhook
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-webhook-dnsmadeeasy:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: cert-manager-webhook-dnsmadeeasy
namespace: cert-manager-webhook-dnsmadeeasy
- name: Create authentication-reader RoleBinding for webhook
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook-dnsmadeeasy:webhook-authentication-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: cert-manager-webhook-dnsmadeeasy
namespace: cert-manager-webhook-dnsmadeeasy
- name: Create domain-solver ClusterRole for cert-manager
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-webhook-dnsmadeeasy:domain-solver
rules:
- apiGroups: ["{{ sno_deploy_webhook_group_name }}"]
resources: ["*"]
verbs: ["create"]
- name: Bind domain-solver to cert-manager ServiceAccount
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-webhook-dnsmadeeasy:domain-solver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-webhook-dnsmadeeasy:domain-solver
subjects:
- kind: ServiceAccount
name: cert-manager
namespace: cert-manager
- name: Create self-signed Issuer for webhook TLS
kubernetes.core.k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: cert-manager-webhook-dnsmadeeasy-selfsign
namespace: cert-manager-webhook-dnsmadeeasy
spec:
selfSigned: {}
- name: Create webhook TLS certificate
kubernetes.core.k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: cert-manager-webhook-dnsmadeeasy-tls
namespace: cert-manager-webhook-dnsmadeeasy
spec:
secretName: cert-manager-webhook-dnsmadeeasy-tls
duration: 8760h
renewBefore: 720h
issuerRef:
name: cert-manager-webhook-dnsmadeeasy-selfsign
kind: Issuer
dnsNames:
- cert-manager-webhook-dnsmadeeasy
- cert-manager-webhook-dnsmadeeasy.cert-manager-webhook-dnsmadeeasy
- cert-manager-webhook-dnsmadeeasy.cert-manager-webhook-dnsmadeeasy.svc
- name: Deploy webhook solver
kubernetes.core.k8s:
state: present
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: cert-manager-webhook-dnsmadeeasy
namespace: cert-manager-webhook-dnsmadeeasy
spec:
replicas: 1
selector:
matchLabels:
app: cert-manager-webhook-dnsmadeeasy
template:
metadata:
labels:
app: cert-manager-webhook-dnsmadeeasy
spec:
serviceAccountName: cert-manager-webhook-dnsmadeeasy
containers:
- name: webhook
image: "{{ sno_deploy_webhook_image }}"
args:
- --tls-cert-file=/tls/tls.crt
- --tls-private-key-file=/tls/tls.key
- --secure-port=8443
ports:
- containerPort: 8443
name: https
protocol: TCP
env:
- name: GROUP_NAME
value: "{{ sno_deploy_webhook_group_name }}"
livenessProbe:
httpGet:
path: /healthz
port: https
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 10
readinessProbe:
httpGet:
path: /healthz
port: https
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 10
resources:
requests:
cpu: 10m
memory: 32Mi
limits:
memory: 64Mi
volumeMounts:
- name: certs
mountPath: /tls
readOnly: true
volumes:
- name: certs
secret:
secretName: cert-manager-webhook-dnsmadeeasy-tls
- name: Create webhook Service
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: cert-manager-webhook-dnsmadeeasy
namespace: cert-manager-webhook-dnsmadeeasy
spec:
type: ClusterIP
ports:
- port: 443
targetPort: https
protocol: TCP
name: https
selector:
app: cert-manager-webhook-dnsmadeeasy
- name: Register webhook APIService
kubernetes.core.k8s:
state: present
definition:
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: "v1alpha1.{{ sno_deploy_webhook_group_name }}"
spec:
group: "{{ sno_deploy_webhook_group_name }}"
groupPriorityMinimum: 1000
versionPriority: 15
service:
name: cert-manager-webhook-dnsmadeeasy
namespace: cert-manager-webhook-dnsmadeeasy
version: v1alpha1
insecureSkipTLSVerify: true
- name: Wait for webhook deployment to be ready
kubernetes.core.k8s_info:
api_version: apps/v1
kind: Deployment
namespace: cert-manager-webhook-dnsmadeeasy
name: cert-manager-webhook-dnsmadeeasy
register: __sno_deploy_webhook_deploy
until: >-
__sno_deploy_webhook_deploy.resources | length > 0 and
(__sno_deploy_webhook_deploy.resources[0].status.readyReplicas | default(0)) >= 1
retries: 30
delay: 10
# ------------------------------------------------------------------
# Step 5: Create ClusterIssuer for LetsEncrypt
# ------------------------------------------------------------------
- name: Create LetsEncrypt ClusterIssuer
kubernetes.core.k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
email: "{{ sno_deploy_letsencrypt_email }}"
server: "{{ __sno_deploy_letsencrypt_server_url }}"
privateKeySecretRef:
name: letsencrypt-production-account-key
solvers:
- dns01:
webhook:
groupName: "{{ sno_deploy_webhook_group_name }}"
solverName: dnsmadeeasy
config:
apiKeySecretRef:
name: dme-api-credentials
key: api-key
secretKeySecretRef:
name: dme-api-credentials
key: secret-key
- name: Wait for ClusterIssuer to be ready
kubernetes.core.k8s_info:
api_version: cert-manager.io/v1
kind: ClusterIssuer
name: letsencrypt-production
register: __sno_deploy_clusterissuer
until: >-
__sno_deploy_clusterissuer.resources | length > 0 and
(__sno_deploy_clusterissuer.resources[0].status.conditions | default([])
| selectattr('type', '==', 'Ready')
| selectattr('status', '==', 'True') | list | length > 0)
retries: 12
delay: 10
# ------------------------------------------------------------------
# Step 6: Create Certificate resources
# ------------------------------------------------------------------
- name: Create apps wildcard certificate
kubernetes.core.k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: apps-wildcard-cert
namespace: openshift-ingress
spec:
secretName: apps-wildcard-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
dnsNames:
- "{{ __sno_deploy_apps_wildcard }}"
duration: 2160h
renewBefore: 720h
- name: Create API server certificate
kubernetes.core.k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: api-server-cert
namespace: openshift-config
spec:
secretName: api-server-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
dnsNames:
- "{{ __sno_deploy_api_hostname }}"
duration: 2160h
renewBefore: 720h
# ------------------------------------------------------------------
# Step 7: Wait for certificates to be issued
# ------------------------------------------------------------------
- name: Wait for apps wildcard certificate to be ready
kubernetes.core.k8s_info:
api_version: cert-manager.io/v1
kind: Certificate
namespace: openshift-ingress
name: apps-wildcard-cert
register: __sno_deploy_apps_cert
until: >-
__sno_deploy_apps_cert.resources | length > 0 and
(__sno_deploy_apps_cert.resources[0].status.conditions | default([])
| selectattr('type', '==', 'Ready')
| selectattr('status', '==', 'True') | list | length > 0)
retries: "{{ (sno_deploy_certificate_wait_timeout / 10) | int }}"
delay: 10
- name: Wait for API server certificate to be ready
kubernetes.core.k8s_info:
api_version: cert-manager.io/v1
kind: Certificate
namespace: openshift-config
name: api-server-cert
register: __sno_deploy_api_cert
until: >-
__sno_deploy_api_cert.resources | length > 0 and
(__sno_deploy_api_cert.resources[0].status.conditions | default([])
| selectattr('type', '==', 'Ready')
| selectattr('status', '==', 'True') | list | length > 0)
retries: "{{ (sno_deploy_certificate_wait_timeout / 10) | int }}"
delay: 10
# ------------------------------------------------------------------
# Step 8: Patch IngressController and APIServer to use the certs
# ------------------------------------------------------------------
- name: Patch default IngressController to use LetsEncrypt cert
kubernetes.core.k8s:
state: present
merge_type: merge
definition:
apiVersion: operator.openshift.io/v1
kind: IngressController
metadata:
name: default
namespace: openshift-ingress-operator
spec:
defaultCertificate:
name: apps-wildcard-tls
- name: Patch APIServer to use LetsEncrypt cert
kubernetes.core.k8s:
state: present
merge_type: merge
definition:
apiVersion: config.openshift.io/v1
kind: APIServer
metadata:
name: cluster
spec:
servingCerts:
namedCertificates:
- names:
- "{{ __sno_deploy_api_hostname }}"
servingCertificate:
name: api-server-tls
# ------------------------------------------------------------------
# Step 9: Wait for rollouts
# ------------------------------------------------------------------
- name: Wait for API server to begin restart
ansible.builtin.pause:
seconds: 30
- name: Wait for router pods to restart with new cert
kubernetes.core.k8s_info:
api_version: apps/v1
kind: Deployment
namespace: openshift-ingress
name: router-default
register: __sno_deploy_router
until: >-
__sno_deploy_router.resources is defined and
__sno_deploy_router.resources | length > 0 and
(__sno_deploy_router.resources[0].status.updatedReplicas | default(0)) ==
(__sno_deploy_router.resources[0].status.replicas | default(1)) and
(__sno_deploy_router.resources[0].status.readyReplicas | default(0)) ==
(__sno_deploy_router.resources[0].status.replicas | default(1))
retries: 60
delay: 10
- name: Display cert-manager configuration summary
ansible.builtin.debug:
msg:
- "cert-manager configuration complete!"
- " ClusterIssuer : letsencrypt-production"
- " Apps wildcard : {{ __sno_deploy_apps_wildcard }}"
- " API cert : {{ __sno_deploy_api_hostname }}"
verbosity: 1

View File

@@ -0,0 +1,145 @@
---
# Configure OpenShift OAuth with Keycloak OIDC.
#
# Prerequisites:
# - SNO cluster installed and accessible
# - Keycloak OIDC client created (Play 5 in deploy_openshift.yml)
# - KUBECONFIG environment variable set or oc_kubeconfig defined
# ------------------------------------------------------------------
# Secret: Keycloak client secret in openshift-config namespace
# ------------------------------------------------------------------
- name: Set OIDC client secret value
ansible.builtin.set_fact:
__sno_deploy_oidc_client_secret_value: >-
{{ hostvars[inventory_hostname]['__oidc_client_secret']
| default(vault_oidc_client_secret) }}
no_log: true
- name: Create Keycloak client secret in openshift-config
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Secret
metadata:
name: "{{ __sno_deploy_oidc_secret_name }}"
namespace: openshift-config
type: Opaque
stringData:
clientSecret: "{{ __sno_deploy_oidc_client_secret_value }}"
no_log: false
# ------------------------------------------------------------------
# CA bundle: only needed when Keycloak uses a private/internal CA
# ------------------------------------------------------------------
- name: Create CA bundle ConfigMap for Keycloak TLS
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: "{{ __sno_deploy_oidc_ca_configmap_name }}"
namespace: openshift-config
data:
ca.crt: "{{ lookup('ansible.builtin.file', oidc_ca_cert_file) }}"
when: oidc_ca_cert_file | default('') | length > 0
# ------------------------------------------------------------------
# OAuth cluster resource: add/replace Keycloak IdP entry
# ------------------------------------------------------------------
- name: Get current OAuth cluster configuration
kubernetes.core.k8s_info:
api_version: config.openshift.io/v1
kind: OAuth
name: cluster
register: __sno_deploy_current_oauth
- name: Build Keycloak OIDC identity provider definition
ansible.builtin.set_fact:
__sno_deploy_new_idp: >-
{{
{
'name': oidc_provider_name,
'mappingMethod': 'claim',
'type': 'OpenID',
'openID': (
{
'clientID': oidc_client_id,
'clientSecret': {'name': __sno_deploy_oidc_secret_name},
'issuer': __sno_deploy_oidc_issuer,
'claims': {
'preferredUsername': ['preferred_username'],
'name': ['name'],
'email': ['email'],
'groups': ['groups']
}
} | combine(
(oidc_ca_cert_file | default('') | length > 0) | ternary(
{'ca': {'name': __sno_deploy_oidc_ca_configmap_name}}, {}
)
)
)
}
}}
- name: Build updated identity providers list
ansible.builtin.set_fact:
__sno_deploy_updated_idps: >-
{{
(__sno_deploy_current_oauth.resources[0].spec.identityProviders | default([])
| selectattr('name', '!=', oidc_provider_name) | list)
+ [__sno_deploy_new_idp]
}}
- name: Apply updated OAuth cluster configuration
kubernetes.core.k8s:
state: present
merge_type: merge
definition:
apiVersion: config.openshift.io/v1
kind: OAuth
metadata:
name: cluster
spec:
identityProviders: "{{ __sno_deploy_updated_idps }}"
- name: Wait for OAuth deployment to roll out
ansible.builtin.command:
cmd: "{{ __sno_deploy_oc }} rollout status deployment/oauth-openshift -n openshift-authentication --timeout=300s --insecure-skip-tls-verify"
changed_when: false
# ------------------------------------------------------------------
# ClusterRoleBinding: grant cluster-admin to OIDC admin groups
# ------------------------------------------------------------------
- name: Create ClusterRoleBinding for OIDC admin groups
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: "oidc-{{ item | regex_replace('[^a-zA-Z0-9-]', '-') }}-cluster-admin"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: "{{ item }}"
loop: "{{ oidc_admin_groups }}"
when: oidc_admin_groups | length > 0
- name: Display post-configuration summary
ansible.builtin.debug:
msg:
- "OpenShift OIDC configuration complete!"
- " Provider : {{ oidc_provider_name }}"
- " Issuer : {{ __sno_deploy_oidc_issuer }}"
- " Console : https://console-openshift-console.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}"
- " Login : https://oauth-openshift.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}"
- ""
- "Note: OAuth pods are restarting — login may be unavailable for ~2 minutes."
verbosity: 1

View File

@@ -1,13 +1,11 @@
---
# Create a Proxmox VM for Single Node OpenShift on VLAN40 (192.168.40.0/24).
# Create a Proxmox VM for Single Node OpenShift.
# Uses q35 machine type with UEFI (required for SNO / RHCOS).
# An empty ide2 CD-ROM slot is created here so the boot order can reference it;
# the deploy_openshift.yml play loads the actual ISO into it after generation.
# An empty ide2 CD-ROM slot is created for the agent installer ISO.
- name: Build net0 string
ansible.builtin.set_fact:
# Proxmox net format: model[=macaddr],bridge=<bridge>[,tag=<vlan>]
__proxmox_sno_vm_net0: >-
__sno_deploy_net0: >-
virtio{{
'=' + sno_mac if sno_mac | length > 0 else ''
}},bridge={{ sno_bridge }},tag={{ sno_vlan }}
@@ -40,11 +38,11 @@
ide:
ide2: none,media=cdrom
net:
net0: "{{ __proxmox_sno_vm_net0 }}"
net0: "{{ __sno_deploy_net0 }}"
boot: "order=scsi0;ide2"
onboot: true
state: present
register: __proxmox_sno_vm_result
register: __sno_deploy_vm_result
- name: Retrieve VM info
community.proxmox.proxmox_vm_info:
@@ -58,19 +56,18 @@
name: "{{ sno_vm_name }}"
type: qemu
config: current
register: __proxmox_sno_vm_info
register: __sno_deploy_vm_info
retries: 5
- name: Set VM ID fact for subsequent plays
ansible.builtin.set_fact:
sno_vm_id: "{{ __proxmox_sno_vm_info.proxmox_vms[0].vmid }}"
sno_vm_id: "{{ __sno_deploy_vm_info.proxmox_vms[0].vmid }}"
cacheable: true
- name: Extract MAC address from VM config
ansible.builtin.set_fact:
# net0 format: virtio=52:54:00:xx:xx:xx,bridge=vmbr0,tag=40
sno_mac: >-
{{ __proxmox_sno_vm_info.proxmox_vms[0].config.net0
{{ __sno_deploy_vm_info.proxmox_vms[0].config.net0
| regex_search('([0-9A-Fa-f]{2}(?::[0-9A-Fa-f]{2}){5})', '\1')
| first }}
cacheable: true

View File

@@ -0,0 +1,52 @@
---
# Delete the kubeadmin user after OIDC is configured and admin groups
# have cluster-admin. This is a security best practice.
#
# Safety checks:
# 1. Verify at least one group in oidc_admin_groups is configured
# 2. Verify ClusterRoleBindings exist for those groups
# 3. Verify the OAuth deployment is ready (OIDC login is available)
# 4. Only then delete the kubeadmin secret
- name: Fail if no admin groups are configured
ansible.builtin.fail:
msg: >-
Cannot delete kubeadmin: oidc_admin_groups is empty.
At least one OIDC group must have cluster-admin before kubeadmin can be removed.
when: oidc_admin_groups | length == 0
- name: Verify OIDC admin ClusterRoleBindings exist
kubernetes.core.k8s_info:
api_version: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
name: "oidc-{{ item | regex_replace('[^a-zA-Z0-9-]', '-') }}-cluster-admin"
loop: "{{ oidc_admin_groups }}"
register: __sno_deploy_admin_crbs
failed_when: __sno_deploy_admin_crbs.resources | length == 0
- name: Verify OAuth deployment is ready
kubernetes.core.k8s_info:
api_version: apps/v1
kind: Deployment
namespace: openshift-authentication
name: oauth-openshift
register: __sno_deploy_oauth_status
failed_when: >-
__sno_deploy_oauth_status.resources | length == 0 or
(__sno_deploy_oauth_status.resources[0].status.readyReplicas | default(0)) < 1
- name: Delete kubeadmin secret
kubernetes.core.k8s:
api_version: v1
kind: Secret
namespace: kube-system
name: kubeadmin
state: absent
register: __sno_deploy_kubeadmin_deleted
- name: Display kubeadmin deletion result
ansible.builtin.debug:
msg: >-
{{ 'kubeadmin user deleted successfully. Login is now only available via OIDC.'
if __sno_deploy_kubeadmin_deleted.changed
else 'kubeadmin was already deleted.' }}

View File

@@ -0,0 +1,389 @@
---
# Generate Agent ISO and deploy SNO (agent-based installer).
#
# Uses `openshift-install agent create image` — no SaaS API, no SSO required.
# The pull secret is the only Red Hat credential needed.
# Credentials (kubeconfig, kubeadmin-password) are generated locally under
# sno_install_dir/auth/ by openshift-install itself.
#
# Idempotency: If the cluster API is already responding, all install steps
# are skipped. Credentials on Proxmox host are never overwritten once saved.
# ------------------------------------------------------------------
# Step 0: Ensure sno_vm_id and sno_mac are populated.
# These are set as cacheable facts by create_vm.yml, but in ephemeral
# EEs or when running --tags sno_deploy_install alone the cache is empty.
# ------------------------------------------------------------------
- name: Retrieve VM info from Proxmox (needed when fact cache is empty)
community.proxmox.proxmox_vm_info:
api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}"
api_user: "{{ proxmox_api_user }}"
api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token_secret }}"
validate_certs: "{{ proxmox_validate_certs }}"
node: "{{ proxmox_node }}"
name: "{{ sno_vm_name }}"
type: qemu
config: current
register: __sno_deploy_vm_info
when: (sno_vm_id | default('')) == '' or (sno_mac | default('')) == ''
- name: Set sno_vm_id and sno_mac from live Proxmox query
ansible.builtin.set_fact:
sno_vm_id: "{{ __sno_deploy_vm_info.proxmox_vms[0].vmid }}"
sno_mac: >-
{{ __sno_deploy_vm_info.proxmox_vms[0].config.net0
| regex_search('([0-9A-Fa-f]{2}(?::[0-9A-Fa-f]{2}){5})', '\1')
| first }}
cacheable: true
when: __sno_deploy_vm_info is not skipped
# ------------------------------------------------------------------
# Step 0b: Check if OpenShift is already deployed and responding.
# If the API is reachable, skip ISO generation, boot, and install.
# ------------------------------------------------------------------
- name: Check if OpenShift cluster is already responding
ansible.builtin.uri:
url: "https://api.{{ ocp_cluster_name }}.{{ ocp_base_domain }}:6443/readyz"
method: GET
validate_certs: false
status_code: [200, 401, 403]
timeout: 10
register: __sno_deploy_cluster_alive
ignore_errors: true
- name: Set cluster deployed flag
ansible.builtin.set_fact:
__sno_deploy_cluster_deployed: "{{ __sno_deploy_cluster_alive is success }}"
- name: Display cluster status
ansible.builtin.debug:
msg: >-
{{ 'OpenShift cluster is already deployed and responding — skipping install steps.'
if __sno_deploy_cluster_deployed | bool
else 'OpenShift cluster is not yet deployed — proceeding with installation.' }}
- name: Ensure local install directories exist
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: "0750"
loop:
- "{{ sno_install_dir }}"
- "{{ sno_install_dir }}/auth"
# ------------------------------------------------------------------
# Step 0c: When cluster is already deployed, ensure a valid kubeconfig
# exists so post-install tasks can authenticate to the API.
# Try in order: local file → Proxmox host backup → SSH to SNO node.
# After obtaining a kubeconfig, validate it against the API and fall
# through to the next source if credentials are expired.
# ------------------------------------------------------------------
- name: Check if local kubeconfig already exists
ansible.builtin.stat:
path: "{{ __sno_deploy_kubeconfig }}"
register: __sno_deploy_local_kubeconfig
when: __sno_deploy_cluster_deployed | bool
- name: Validate local kubeconfig against API
ansible.builtin.command:
cmd: "oc whoami --kubeconfig={{ __sno_deploy_kubeconfig }} --insecure-skip-tls-verify"
register: __sno_deploy_local_kubeconfig_valid
ignore_errors: true
changed_when: false
when:
- __sno_deploy_cluster_deployed | bool
- __sno_deploy_local_kubeconfig.stat.exists | default(false)
- name: Check if kubeconfig exists on Proxmox host
ansible.builtin.stat:
path: "{{ sno_credentials_dir }}/kubeconfig"
delegate_to: proxmox_host
register: __sno_deploy_proxmox_kubeconfig
when:
- __sno_deploy_cluster_deployed | bool
- not (__sno_deploy_local_kubeconfig.stat.exists | default(false)) or
(__sno_deploy_local_kubeconfig_valid is failed)
- name: Recover kubeconfig from Proxmox host
ansible.builtin.fetch:
src: "{{ sno_credentials_dir }}/kubeconfig"
dest: "{{ __sno_deploy_kubeconfig }}"
flat: true
delegate_to: proxmox_host
when:
- __sno_deploy_cluster_deployed | bool
- not (__sno_deploy_local_kubeconfig.stat.exists | default(false)) or
(__sno_deploy_local_kubeconfig_valid is failed)
- __sno_deploy_proxmox_kubeconfig.stat.exists | default(false)
- name: Validate recovered Proxmox kubeconfig against API
ansible.builtin.command:
cmd: "oc whoami --kubeconfig={{ __sno_deploy_kubeconfig }} --insecure-skip-tls-verify"
register: __sno_deploy_proxmox_kubeconfig_valid
ignore_errors: true
changed_when: false
when:
- __sno_deploy_cluster_deployed | bool
- not (__sno_deploy_local_kubeconfig.stat.exists | default(false)) or
(__sno_deploy_local_kubeconfig_valid is failed)
- __sno_deploy_proxmox_kubeconfig.stat.exists | default(false)
- name: Set flag - need SSH recovery
ansible.builtin.set_fact:
__sno_deploy_need_ssh_recovery: >-
{{
(__sno_deploy_cluster_deployed | bool) and
(
(not (__sno_deploy_local_kubeconfig.stat.exists | default(false)) and
not (__sno_deploy_proxmox_kubeconfig.stat.exists | default(false)))
or
((__sno_deploy_local_kubeconfig_valid | default({})) is failed and
(__sno_deploy_proxmox_kubeconfig_valid | default({})) is failed)
or
(not (__sno_deploy_local_kubeconfig.stat.exists | default(false)) and
(__sno_deploy_proxmox_kubeconfig_valid | default({})) is failed)
)
}}
- name: Recover kubeconfig from SNO node via SSH
ansible.builtin.command:
cmd: >-
ssh -o StrictHostKeyChecking=no core@{{ sno_ip }}
sudo cat /etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/node-kubeconfigs/lb-ext.kubeconfig
register: __sno_deploy_recovered_kubeconfig
when: __sno_deploy_need_ssh_recovery | bool
- name: Write recovered kubeconfig from SNO node
ansible.builtin.copy:
content: "{{ __sno_deploy_recovered_kubeconfig.stdout }}"
dest: "{{ __sno_deploy_kubeconfig }}"
mode: "0600"
when:
- __sno_deploy_recovered_kubeconfig is not skipped
- __sno_deploy_recovered_kubeconfig.rc == 0
- name: Update kubeconfig backup on Proxmox host
ansible.builtin.copy:
src: "{{ __sno_deploy_kubeconfig }}"
dest: "{{ sno_credentials_dir }}/kubeconfig"
mode: "0600"
backup: true
delegate_to: proxmox_host
when:
- __sno_deploy_recovered_kubeconfig is not skipped
- __sno_deploy_recovered_kubeconfig.rc == 0
- name: Fail if no valid kubeconfig could be obtained
ansible.builtin.fail:
msg: >-
Cluster is deployed but no valid kubeconfig could be obtained.
Tried: local file, Proxmox host ({{ sno_credentials_dir }}/kubeconfig),
and SSH to core@{{ sno_ip }}. Cannot proceed with post-install tasks.
when:
- __sno_deploy_need_ssh_recovery | bool
- __sno_deploy_recovered_kubeconfig is skipped or __sno_deploy_recovered_kubeconfig.rc != 0
# ------------------------------------------------------------------
# Step 1: Check whether a fresh ISO already exists on Proxmox
# AND the local openshift-install state dir is intact.
# ------------------------------------------------------------------
- name: Check if ISO already exists on Proxmox and is less than 24 hours old
ansible.builtin.stat:
path: "{{ proxmox_iso_dir }}/{{ sno_iso_filename }}"
get_checksum: false
delegate_to: proxmox_host
register: __sno_deploy_iso_stat
when: not __sno_deploy_cluster_deployed | bool
- name: Check if local openshift-install state directory exists
ansible.builtin.stat:
path: "{{ sno_install_dir }}/.openshift_install_state"
get_checksum: false
register: __sno_deploy_state_stat
when: not __sno_deploy_cluster_deployed | bool
- name: Set fact - skip ISO build if recent ISO exists on Proxmox and local state is intact
ansible.builtin.set_fact:
__sno_deploy_iso_fresh: >-
{{
not (__sno_deploy_cluster_deployed | bool) and
__sno_deploy_iso_stat.stat.exists | default(false) and
(now(utc=true).timestamp() | int - __sno_deploy_iso_stat.stat.mtime | default(0) | int) < 86400 and
__sno_deploy_state_stat.stat.exists | default(false)
}}
# ------------------------------------------------------------------
# Step 2: Get openshift-install binary
# Always ensure the binary is present — needed for both ISO generation
# and wait-for-install-complete regardless of __sno_deploy_iso_fresh.
# ------------------------------------------------------------------
- name: Download openshift-install tarball
ansible.builtin.get_url:
url: "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable-{{ ocp_version }}/openshift-install-linux.tar.gz"
dest: "{{ sno_install_dir }}/openshift-install-{{ ocp_version }}.tar.gz"
mode: "0644"
checksum: "{{ ocp_install_checksum | default(omit) }}"
register: __sno_deploy_install_tarball
when: not __sno_deploy_cluster_deployed | bool
- name: Extract openshift-install binary
ansible.builtin.unarchive:
src: "{{ sno_install_dir }}/openshift-install-{{ ocp_version }}.tar.gz"
dest: "{{ sno_install_dir }}"
remote_src: false
include:
- openshift-install
when: not __sno_deploy_cluster_deployed | bool and (__sno_deploy_install_tarball.changed or not (sno_install_dir ~ '/openshift-install') is file)
- name: Download openshift-client tarball
ansible.builtin.get_url:
url: "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable-{{ ocp_version }}/openshift-client-linux.tar.gz"
dest: "{{ sno_install_dir }}/openshift-client-{{ ocp_version }}.tar.gz"
mode: "0644"
checksum: "{{ ocp_client_checksum | default(omit) }}"
register: __sno_deploy_client_tarball
when: not __sno_deploy_cluster_deployed | bool
- name: Extract oc binary
ansible.builtin.unarchive:
src: "{{ sno_install_dir }}/openshift-client-{{ ocp_version }}.tar.gz"
dest: "{{ sno_install_dir }}"
remote_src: false
include:
- oc
when: not __sno_deploy_cluster_deployed | bool and (__sno_deploy_client_tarball.changed or not (sno_install_dir ~ '/oc') is file)
# ------------------------------------------------------------------
# Step 3: Template agent installer config files (skipped if ISO is fresh)
# ------------------------------------------------------------------
- name: Template install-config.yaml
ansible.builtin.template:
src: install-config.yaml.j2
dest: "{{ sno_install_dir }}/install-config.yaml"
mode: "0640"
when: not __sno_deploy_cluster_deployed | bool and not __sno_deploy_iso_fresh | bool
no_log: true
- name: Template agent-config.yaml
ansible.builtin.template:
src: agent-config.yaml.j2
dest: "{{ sno_install_dir }}/agent-config.yaml"
mode: "0640"
when: not __sno_deploy_cluster_deployed | bool and not __sno_deploy_iso_fresh | bool
# ------------------------------------------------------------------
# Step 4: Generate discovery ISO (skipped if ISO is fresh)
# ------------------------------------------------------------------
- name: Generate agent-based installer ISO
ansible.builtin.command:
cmd: "{{ sno_install_dir }}/openshift-install agent create image --dir {{ sno_install_dir }}"
when: not __sno_deploy_cluster_deployed | bool and not __sno_deploy_iso_fresh | bool
# ------------------------------------------------------------------
# Step 5: Upload ISO to Proxmox and attach to VM
# ------------------------------------------------------------------
- name: Copy discovery ISO to Proxmox ISO storage
ansible.builtin.copy:
src: "{{ sno_install_dir }}/{{ sno_iso_filename }}"
dest: "{{ proxmox_iso_dir }}/{{ sno_iso_filename }}"
mode: "0644"
delegate_to: proxmox_host
when: not __sno_deploy_cluster_deployed | bool and not __sno_deploy_iso_fresh | bool
- name: Attach ISO to VM as CDROM
ansible.builtin.command:
cmd: "qm set {{ sno_vm_id }} --ide2 {{ proxmox_iso_storage }}:iso/{{ sno_iso_filename }},media=cdrom"
delegate_to: proxmox_host
changed_when: true
when: not __sno_deploy_cluster_deployed | bool
- name: Ensure boot order prefers disk, falls back to CDROM
ansible.builtin.command:
cmd: "qm set {{ sno_vm_id }} --boot order=scsi0;ide2"
delegate_to: proxmox_host
changed_when: true
when: not __sno_deploy_cluster_deployed | bool
# ------------------------------------------------------------------
# Step 6: Boot the VM
# ------------------------------------------------------------------
- name: Start SNO VM
community.proxmox.proxmox_kvm:
api_host: "{{ hostvars['proxmox_api']['ansible_host'] }}"
api_user: "{{ proxmox_api_user }}"
api_port: "{{ hostvars['proxmox_api']['ansible_port'] }}"
api_token_id: "{{ proxmox_api_token_id }}"
api_token_secret: "{{ proxmox_api_token_secret }}"
validate_certs: "{{ proxmox_validate_certs }}"
node: "{{ proxmox_node }}"
name: "{{ sno_vm_name }}"
state: started
when: not __sno_deploy_cluster_deployed | bool
# ------------------------------------------------------------------
# Step 7: Wait for installation to complete (~60-90 min)
# ------------------------------------------------------------------
- name: Wait for SNO installation to complete
ansible.builtin.command:
cmd: "{{ sno_install_dir }}/openshift-install agent wait-for install-complete --dir {{ sno_install_dir }} --log-level=info"
async: 5400
poll: 30
when: not __sno_deploy_cluster_deployed | bool
# ------------------------------------------------------------------
# Step 8: Persist credentials to Proxmox host
# Only copy if credentials do not already exist on the remote host,
# to prevent overwriting valid credentials on re-runs.
# ------------------------------------------------------------------
- name: Create credentials directory on Proxmox host
ansible.builtin.file:
path: "{{ sno_credentials_dir }}"
state: directory
mode: "0700"
delegate_to: proxmox_host
- name: Check if credentials already exist on Proxmox host
ansible.builtin.stat:
path: "{{ sno_credentials_dir }}/kubeadmin-password"
delegate_to: proxmox_host
register: __sno_deploy_remote_creds
- name: Copy kubeconfig to Proxmox host
ansible.builtin.copy:
src: "{{ sno_install_dir }}/auth/kubeconfig"
dest: "{{ sno_credentials_dir }}/kubeconfig"
mode: "0600"
backup: true
delegate_to: proxmox_host
when: not __sno_deploy_remote_creds.stat.exists
- name: Copy kubeadmin-password to Proxmox host
ansible.builtin.copy:
src: "{{ sno_install_dir }}/auth/kubeadmin-password"
dest: "{{ sno_credentials_dir }}/kubeadmin-password"
mode: "0600"
backup: true
delegate_to: proxmox_host
when: not __sno_deploy_remote_creds.stat.exists
# ------------------------------------------------------------------
# Step 9: Eject CDROM so the VM never boots the agent ISO again
# ------------------------------------------------------------------
- name: Eject CDROM after successful installation
ansible.builtin.command:
cmd: "qm set {{ sno_vm_id }} --ide2 none,media=cdrom"
delegate_to: proxmox_host
changed_when: true
when: not __sno_deploy_cluster_deployed | bool
- name: Display post-install info
ansible.builtin.debug:
msg:
- "SNO installation complete!"
- "API URL : https://api.{{ ocp_cluster_name }}.{{ ocp_base_domain }}:6443"
- "Console : https://console-openshift-console.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}"
- "Kubeconfig : {{ sno_credentials_dir }}/kubeconfig (on proxmox_host)"
- "kubeadmin pass : {{ sno_credentials_dir }}/kubeadmin-password (on proxmox_host)"
verbosity: 1

View File

@@ -0,0 +1,41 @@
---
# Entry point for the sno_deploy role.
#
# Each phase is gated by tags so individual steps can be run with --tags.
# When invoked from deploy_openshift.yml, individual task files are
# called directly via include_role + tasks_from to control play ordering.
- name: Create SNO VM in Proxmox
ansible.builtin.include_tasks:
file: create_vm.yml
apply:
tags: sno_deploy_vm
tags: sno_deploy_vm
- name: Install SNO via agent-based installer
ansible.builtin.include_tasks:
file: install.yml
apply:
tags: sno_deploy_install
tags: sno_deploy_install
- name: Configure OpenShift OAuth with OIDC
ansible.builtin.include_tasks:
file: configure_oidc.yml
apply:
tags: sno_deploy_oidc
tags: sno_deploy_oidc
- name: Configure cert-manager and LetsEncrypt certificates
ansible.builtin.include_tasks:
file: configure_certmanager.yml
apply:
tags: sno_deploy_certmanager
tags: sno_deploy_certmanager
- name: Delete kubeadmin user
ansible.builtin.include_tasks:
file: delete_kubeadmin.yml
apply:
tags: sno_deploy_delete_kubeadmin
tags: sno_deploy_delete_kubeadmin

View File

@@ -0,0 +1,34 @@
---
# Generated by Ansible — do not edit by hand
# Source: roles/sno_deploy/templates/agent-config.yaml.j2
apiVersion: v1alpha1
kind: AgentConfig
metadata:
name: {{ ocp_cluster_name }}
rendezvousIP: {{ sno_ip }}
hosts:
- hostname: master-0
interfaces:
- name: primary
macAddress: "{{ sno_mac }}"
networkConfig:
interfaces:
- name: primary
type: ethernet
state: up
mac-address: "{{ sno_mac }}"
ipv4:
enabled: true
address:
- ip: {{ sno_ip }}
prefix-length: {{ sno_prefix_length }}
dhcp: false
dns-resolver:
config:
server:
- {{ sno_nameserver }}
routes:
config:
- destination: 0.0.0.0/0
next-hop-address: {{ sno_gateway }}
next-hop-interface: primary

View File

@@ -0,0 +1,27 @@
---
# Generated by Ansible — do not edit by hand
# Source: roles/sno_deploy/templates/install-config.yaml.j2
apiVersion: v1
baseDomain: {{ ocp_base_domain }}
metadata:
name: {{ ocp_cluster_name }}
networking:
networkType: OVNKubernetes
machineNetwork:
- cidr: {{ sno_machine_network }}
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
serviceNetwork:
- 172.30.0.0/16
compute:
- name: worker
replicas: 0
controlPlane:
name: master
replicas: 1
platform:
none: {}
pullSecret: |
{{ vault_ocp_pull_secret | ansible.builtin.to_json }}
sshKey: "{{ ocp_ssh_public_key }}"

View File

@@ -0,0 +1,13 @@
---
# Computed internal variables - do not override
__sno_deploy_oc: "{{ oc_binary | default('oc') }}"
__sno_deploy_kubeconfig: "{{ sno_install_dir }}/auth/kubeconfig"
__sno_deploy_oidc_secret_name: "{{ oidc_provider_name | lower }}"
__sno_deploy_oidc_ca_configmap_name: "{{ oidc_provider_name }}-oidc-ca-bundle"
__sno_deploy_oidc_redirect_uri: "https://oauth-openshift.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}/oauth2callback/{{ oidc_provider_name }}"
__sno_deploy_oidc_issuer: "{{ keycloak_url }}{{ keycloak_context }}/realms/{{ keycloak_realm }}"
__sno_deploy_api_hostname: "api.{{ ocp_cluster_name }}.{{ ocp_base_domain }}"
__sno_deploy_apps_wildcard: "*.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}"
__sno_deploy_letsencrypt_server_url: >-
{{ sno_deploy_letsencrypt_use_staging | bool |
ternary(sno_deploy_letsencrypt_staging_server, sno_deploy_letsencrypt_server) }}