vmware builds

This commit is contained in:
2021-06-28 17:49:11 -04:00
parent 833f589d56
commit dfe9dea2ca
79 changed files with 1986 additions and 44 deletions

1
.gitignore vendored
View File

@@ -113,3 +113,4 @@ venv.bak/
keys/
collections/ansible_collections/
.vscode/
.vaultpw

1
bindep.txt Normal file
View File

@@ -0,0 +1 @@
python38-devel

View File

@@ -1,12 +1,14 @@
---
- name: VM Provisioning
hosts: tag_ansible:&tag_tower:&cluster_ToalLabRHV
hosts: tag_ansible:&tag_tower
connection: local
collections:
- redhat.rhv
tasks:
- block:
- name: Obtain SSO token from username / password credentials
redhat.rhv.ovirt_auth:
ovirt_auth:
url: "{{ ovirt_url }}"
username: "{{ ovirt_username }}"
password: "{{ ovirt_password }}"
@@ -24,7 +26,8 @@
state: present
- name: VM Created
ovirt_vm:
- name: Add NIC to VM

View File

@@ -9,22 +9,22 @@
architecture: x86_64
build: true
comment: "RHEL 8 Template"
compute_profile: "3-Large"
compute_resource: "Home Lab"
compute_attributes:
cpus: 4
memory_mb: 4096
content_view: "RHEL8"
compute_profile: "4c8g"
compute_attributes:
start: "1"
compute_resource: "ToalLab"
content_view: "composite-rhel8"
domain: "sandbox.toal.ca"
enabled: true
hostgroup: "Lab RHEL Hosts/RHEL 8"
kickstart_repository:
hostgroup: "RHEL8"
lifecycle_environment: "Library"
location: "Lab"
name: "rhel8build.sandbox.toal.ca"
operatingsystem: "Red Hat 8.3"
operatingsystem: "RedHat 8.3"
organization: "Toal.ca"
password: "{{ vault_sat6_pass }}"
server_url: "https://sat6.lab.toal.ca/"
subnet: "192.168.16.0"
username: "{{ vault_sat6_user }}"
server_url: "https://satellite1.mgmt.toal.ca/"
subnet: "sandbox"
username: "{{ satellite_admin_user }}"
password: "{{ satellite_admin_pass }}"
validate_certs: no

View File

@@ -1,4 +1,4 @@
# Playbook to build new VMs in RHV Cluster
# Playbook to build new VMs in RHV Cluste
# Currently only builds RHEL VMs
# Create Host
@@ -41,28 +41,28 @@
virtual_machine: "{{ inventory_hostname }}"
state: new
register: new_ip
when: primary_ipv4 is undefined
when: primary_ip4 is undefined
delegate_to: localhost
- set_fact:
primary_ipv4: "{{ new_ip.ip_address.address|ipaddr('address') }}"
primary_ip4: "{{ new_ip.ip_address.address|ipaddr('address') }}"
vm_hostname: "{{ inventory_hostname.split('.')[0] }}"
vm_domain: "{{ inventory_hostname.split('.',1)[1] }}"
delegate_to: localhost
when: primary_ipv4 is undefined
when: primary_ip4 is undefined
- name: Primary IPv4 Assigned in Netbox
netbox_virtual_machine:
netbox_url: "{{ netbox_api }}"
netbox_token: "{{ netbox_token }}"
data:
primary_ip4: "{{ primary_ipv4 }}"
primary_ip4: "{{ primary_ip4 }}"
name: "{{ inventory_hostname }}"
delegate_to: localhost
- name: Primary IPv4 Address
debug:
var: primary_ipv4
var: primary_ip4
- name: Ensure IP Address in IdM
ipadnsrecord:
@@ -171,7 +171,7 @@
hostgroup: "RHEL8/RHEL8 Sandbox"
organization: Toal.ca
location: Lab
ip: "{{ primary_ipv4 }}"
ip: "{{ primary_ip4 }}"
mac: "{{ interface_result.results[0].nic.mac.address }}" #fragile
build: "{{ vm_result.changed |ternary(true,false) }}"
validate_certs: no
@@ -224,10 +224,10 @@
# netbox_virtual_machine:
# data:
# name: "{{ inventory_hostname }}"
# primary_ip4: "{{ primary_ipv4 }}"
# primary_ip4: "{{ primary_ip4 }}"
# netbox_url: "{{ netbox_api }}"
# netbox_token: "{{ netbox_token }}"
# state: present
# delegate_to: localhost
#TODO: Clear Build tag
#TODO: Clear Build tag

View File

@@ -34,3 +34,4 @@ collections:
- name: community.crypto
source: https://galaxy.ansible.com
- name: onepassword.connect

25
context/Containerfile Normal file
View File

@@ -0,0 +1,25 @@
ARG ANSIBLE_RUNNER_IMAGE=quay.io/ansible/ansible-runner:stable-2.9-devel
ARG PYTHON_BUILDER_IMAGE=quay.io/ansible/python-builder:latest
FROM $ANSIBLE_RUNNER_IMAGE as galaxy
ADD _build/ansible.cfg ~/.ansible.cfg
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS=
ADD _build /build
WORKDIR /build
RUN ansible-galaxy role install -r requirements.yml --roles-path /usr/share/ansible/roles
RUN ansible-galaxy collection install $ANSIBLE_GALAXY_CLI_COLLECTION_OPTS -r requirements.yml --collections-path /usr/share/ansible/collections
FROM $PYTHON_BUILDER_IMAGE as builder
ADD _build/requirements_combined.txt /tmp/src/requirements.txt
ADD _build/bindep_combined.txt /tmp/src/bindep.txt
RUN assemble
FROM $ANSIBLE_RUNNER_IMAGE
COPY --from=galaxy /usr/share/ansible /usr/share/ansible
COPY --from=builder /output/ /output/
RUN /output/install-from-bindep && rm -rf /output/wheels

View File

@@ -0,0 +1,59 @@
[defaults]
# Use the YAML callback plugin.
stdout_callback = yaml
# Profile
#stdout_callback = profile_tasks
# Turn on pipelining for speed
pipelining = True
# Increase polling speed, to check completed tasks
poll_interval = 5
# Increase forks for more better parallelism. :)
forks = 50
# This can get annoying for non-production environments, so disable it.
host_key_checking = False
# These are annoying during playbook authoring / testing.
retry_files_enabled = False
callback_whitelist = timer,profile_tasks
nocows = 1
deprecation_warnings=False
#display_skipped_hosts=False
# Installs collections into [current dir]/ansible_collections/namespace/collection_name
#collections_paths = ./
force_valid_group_names = always
interpreter_python = auto
# Installs roles into [current dir]/roles/namespace.rolename
roles_path = ./roles
inventory=/Users/ptoal/.ansible/inventories/toallab/inventory
inventory_plugins = host_list, script, yaml, ini, auto
#vault_identity_list = toallab@/home/ptoal/.toallab.vault
[ssh_connection]
scp_if_ssh = True
ssh_args = -o ControlMaster=auto -o ControlPersist=300s -o PreferredAuthentications=publickey,password
control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r
[persistent_connection]
connect_timeout = 60
ssh_type = libssh
[galaxy]
server_list = automation_hub, public, staging
[galaxy_server.published_repo]
url=https://hub.mgmt.toal.ca/api/galaxy/content/published/
token="1a8b080f80ce789e64bd81257cffef3f8368f4b5"
[galaxy_server.automation_hub]
url=https://cloud.redhat.com/api/automation-hub/
auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token
token="eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJhZDUyMjdhMy1iY2ZkLTRjZjAtYTdiNi0zOTk4MzVhMDg1NjYifQ.eyJpYXQiOjE2MTg4NDU3NTIsImp0aSI6IjNkYTBjMTRkLTAyMGEtNGYxNC05YTFlLWI4NzA5MWQ4NWM1OSIsImlzcyI6Imh0dHBzOi8vc3NvLnJlZGhhdC5jb20vYXV0aC9yZWFsbXMvcmVkaGF0LWV4dGVybmFsIiwiYXVkIjoiaHR0cHM6Ly9zc28ucmVkaGF0LmNvbS9hdXRoL3JlYWxtcy9yZWRoYXQtZXh0ZXJuYWwiLCJzdWIiOiJmOjUyOGQ3NmZmLWY3MDgtNDNlZC04Y2Q1LWZlMTZmNGZlMGNlNjpwdG9hbCIsInR5cCI6Ik9mZmxpbmUiLCJhenAiOiJjbG91ZC1zZXJ2aWNlcyIsIm5vbmNlIjoiNDVhOTA4NTUtZjNiYi00Mjg5LTlhZWMtY2VmNjQyNWZkZGNmIiwic2Vzc2lvbl9zdGF0ZSI6ImM0ZjRmODc1LTk3ZDMtNDljYy04MzIxLTQ3NGI1ZjFjYTg4NCIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIn0.KQCwoU5V46Pmi0F9dcGasHC02cKbh68whrYtCP3lz60"
[galaxy_server.public]
url=https://galaxy.ansible.com/
[galaxy_server.staging]
url=https://hub.mgmt.toal.ca/api/galaxy/content/staging/
token="1a8b080f80ce789e64bd81257cffef3f8368f4b5"

View File

@@ -0,0 +1,10 @@
openssl-devel [platform:rpm compile] # from collection ovirt.ovirt, redhat.rhv
gcc [platform:rpm compile] # from collection ovirt.ovirt, redhat.rhv
libcurl-devel [platform:rpm compile] # from collection ovirt.ovirt, redhat.rhv
libxml2-devel [platform:rpm compile] # from collection ovirt.ovirt, redhat.rhv
python3-pycurl [platform:rpm] # from collection ovirt.ovirt, redhat.rhv
python3-netaddr [platform:rpm] # from collection ovirt.ovirt, redhat.rhv
python3-jmespath [platform:rpm] # from collection ovirt.ovirt, redhat.rhv
python3-passlib [platform:rpm epel] # from collection ovirt.ovirt, redhat.rhv
qemu-img [platform:rpm] # from collection ovirt.ovirt, redhat.rhv
python38-devel # from collection user

View File

@@ -0,0 +1,37 @@
---
collections:
- name: davidban77.gns3
source: https://galaxy.ansible.com
- name: netbox.netbox
source: https://galaxy.ansible.com
- name: freeipa.ansible_freeipa
source: https://galaxy.ansible.com
# source: https://hub.mgmt.toal.ca/api/galaxy/content/published/
- name: ovirt.ovirt
source: https://galaxy.ansible.com
- name: redhat.rhv
source: https://cloud.redhat.com/api/automation-hub/
- name: redhat.satellite
source: https://cloud.redhat.com/api/automation-hub/
- name: community.general
source: https://galaxy.ansible.com
- name: jjaswanson4.install_satellite
source: https://galaxy.ansible.com
- name: jjaswanson4.configure_satellite
source: https://galaxy.ansible.com
- name: redhat.satellite
source: https://cloud.redhat.com/api/automation-hub/
- name: community.crypto
source: https://galaxy.ansible.com
- name: onepassword.connect

View File

@@ -0,0 +1,7 @@
gns3fy>=0.5.2 # from collection davidban77.gns3
ovirt-engine-sdk-python>=4.4.10,>=4.4.10 # from collection ovirt.ovirt,redhat.rhv
requests>=2.4.2 # from collection redhat.satellite
ipaddress # from collection redhat.satellite
pynetbox # from collection user
boto3 # from collection user
packaging # from collection user

12
execution-environment.yml Normal file
View File

@@ -0,0 +1,12 @@
---
version: 1
build_arg_defaults:
ANSIBLE_RUNNER_IMAGE: 'quay.io/ansible/ansible-runner:stable-2.9-devel'
ansible_config: '../../../.ansible.cfg'
dependencies:
galaxy: collections/requirements.yml
python: requirements.txt
system: bindep.txt

3
requirements.txt Normal file
View File

@@ -0,0 +1,3 @@
pynetbox
boto3
packaging

View File

@@ -1,2 +1,2 @@
install_date: Wed Apr 21 16:48:33 2021
install_date: Wed May 5 16:13:59 2021
version: 3.1.0

View File

@@ -1,2 +1,2 @@
install_date: Wed Apr 21 16:48:32 2021
install_date: Wed May 5 16:13:58 2021
version: 1.10.0

View File

@@ -1,2 +1,2 @@
install_date: Wed Apr 21 16:48:44 2021
install_date: Wed May 5 16:14:11 2021
version: master

View File

@@ -1,2 +1,2 @@
install_date: Wed Apr 21 16:48:42 2021
install_date: Wed May 5 16:14:10 2021
version: 2.1.0

View File

@@ -1,2 +1,2 @@
install_date: Wed Apr 21 16:48:45 2021
install_date: Wed May 5 16:14:13 2021
version: 1.3.0

View File

@@ -11,5 +11,7 @@ ansible_port: 22
instance_wait_retry_limit: 600
instance_wait_connection_timeout: 300
ip_wait_retry_limit: 600
wait_for_static_ip_assigned: yes

View File

@@ -1,2 +1,2 @@
install_date: Thu Apr 22 02:46:27 2021
install_date: Wed May 5 16:14:07 2021
version: ''

View File

@@ -20,6 +20,6 @@ galaxy_info:
- rhev
collections:
- ovirt.ovirt
- ovirt.ovirt:==1.3.1
dependencies: []

View File

@@ -47,7 +47,7 @@
until: deployed_instances.finished
retries: "{{ instance_wait_retry_limit }}"
delay: 10
no_log: false
no_log: true
with_items: "{{ deploy.results }}"
when:
- nodes is defined
@@ -108,6 +108,7 @@
nic_netmask: "{{ item.networks[0].netmask | default('') }}"
nic_gateway: "{{ item.networks[0].gateway | default('') }}"
nic_name: "{{ item.networks[0].nic_name | default(item.networks[0].device_name) | default('eth0') }}"
nic_on_boot: true
host_name: "{{ item.name }}.{{ item.domain | default('') }}"
dns_servers: "{{ item.dns_servers|join(' ') | default([]) }}"
custom_script: "{{ item.custom_script | default('') }}"
@@ -163,7 +164,6 @@
- name: assign tags to provisioned vms
ovirt_tag:
auth: "{{ ovirt_auth }}"
name: "{{ item.1 }}_{{ item.0.item.item[item.1] }}"
vms: ["{{ item.0.item.item.name }}"]
state: attached

View File

@@ -59,7 +59,7 @@
- nics.ovirt_nics[0].reported_devices[0].ips[0].address is defined
- nics.ovirt_nics[0].reported_devices[0].ips[0].version == 'v4'
- nics.ovirt_nics[0].reported_devices[0].ips[0].address == item.networks[0].ip
retries: 300
retries: "{{ ip_wait_retry_limit }}"
delay: 10
with_items: "{{ nodes }}"
when:

View File

@@ -59,7 +59,7 @@
- nics.ansible_facts.ovirt_nics[0].reported_devices[0].ips[0].address is defined
- nics.ansible_facts.ovirt_nics[0].reported_devices[0].ips[0].version == 'v4'
- nics.ansible_facts.ovirt_nics[0].reported_devices[0].ips[0].address == item.networks[0].ip
retries: 300
retries: "{{ ip_wait_retry_limit }}"
delay: 10
with_items: "{{ nodes }}"
when:

View File

@@ -1,2 +1,2 @@
install_date: Thu Apr 22 15:31:09 2021
install_date: Wed May 5 16:14:02 2021
version: ''

View File

@@ -0,0 +1,29 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 Orcun Atakan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,86 @@
# rhel_ovirt_template
This repo contains an Ansible role that builds a RHEL/CentOS VM template from an ISO file on Ovirt/RHV.
You can run this role as a part of CI/CD pipeline for building RHEL/CentOS templates on Ovirt/RHV from an ISO file.
> **_Note:_** This role is provided as an example only. Do not use this in production. You can fork/clone and add/remove steps for your environment based on your organization's security and operational requirements.
Requirements
------------
You need to have the following packages installed on your control machine:
- mkisofs
- genisoimage
You need to enable qemu_cmdline hook on your RHV/Ovirt environment, this is required to enable attaching multiple iso files. Follow the instructions documented here:
https://www.ovirt.org/develop/developer-guide/vdsm/hook/qemucmdline.html
Before you can use this role, you need to make sure you have RHEL/CentOS install media iso file uploaded to a iso domain on your RHV/Ovirt environment.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
Import ovirt.ovirt collections.
A list of roles that this role utilizes:
- oatakan.rhn
- oatakan.rhel_upgrade
- oatakan.rhel_template_build
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
# import ovirt.ovirt collections
- name: create a ovirt rhel template
hosts: all
gather_facts: False
connection: local
become: no
vars:
template_force: yes #overwrite existing template with the same name
export_ovf: no # export the template to export domain upon creation
local_account_password: ''
local_administrator_password: ''
linux_distro_name: rhel_81 # this needs to be one of the standard values see 'os_short_names' var
template_vm_name: rhel81-x64-v1
template_vm_root_disk_size: 10
template_vm_memory: 4096
template_vm_efi: false # you need to install efi file to use this, false should be fine in most cases
iso_file_name: '' # name of the iso file
ovirt_datacenter: '' # name of the datacenter
ovirt_cluster: '' # name of the cluster
ovirt_data_domain: '' # name of the data domain
ovirt_export_domain: '' # name of the iso domain
ovirt_iso_domain: '' # this is deprecated as of 4.3 you can omit if not used
template_vm_network_name: ovirtmgmt
template_vm_ip_address: 192.168.10.95 # static ip is required
template_vm_netmask: 255.255.255.0
template_vm_gateway: 192.168.10.254
template_vm_domain: example.com
template_vm_dns_servers:
- 8.8.4.4
- 8.8.8.8
roles:
- oatakan.rhel_ovirt_template
License
-------
MIT
Author Information
------------------
Orcun Atakan

View File

@@ -0,0 +1,91 @@
---
install_updates: yes
instance_wait_retry_limit: 300
instance_wait_connection_timeout: 600
# this will remove existing template with the same name
template_force: no
template_found: no
export_ovf: no
datastore_iso_folder: iso
datastore_ova_folder: ova
remove_vm_on_error: yes
custom_efi_enabled: no
custom_efi_path: /usr/share/edk2.git/ovmf-x64/OVMF_CODE-pure-efi.fd
qemu_second_cdrom_device_bus_type: ide
qemu_second_cdrom_device_bus_id: 3
qemu_second_cdrom_device_bus_unit: 0
local_administrator_password: Chang3MyP@ssw0rd21
local_account_username: ansible
local_account_password: Chang3MyP@ssw0rd21
linux_distro_name: rhel_77
iso_file_name: CentOS-7-x86_64-DVD-1908.iso
linux_ks_folder: rhel7
template_vm_name: centos77-x64-bigdisk_v1
template_vm_root_disk_size: 300
template_vm_root_disk_format: cow
template_vm_root_disk_interface: virtio
template_vm_memory: 4096
template_vm_cpu: 2
template_vm_guest_id: rhel_7x64
template_vm_efi: no
template_vm_network_name: ovirtmgmt
template_vm_ip_address: 192.168.10.96
template_vm_netmask: 255.255.255.0
template_vm_gateway: 192.168.10.254
template_vm_domain: example.com
template_vm_dns_servers:
- 8.8.4.4
- 8.8.8.8
template_convert_timeout: 600
template_convert_seal: no
template_selinux_enabled: no
ovirt_datacenter: mydatacenter
ovirt_cluster: production
ovirt_folder: template
ovirt_data_domain: data_domain
ovirt_export_domain: export_domain
ovirt_iso_domain: iso_domain
os_short_names:
rhel_77:
ks_folder: rhel7
guest_id: rhel_7x64
rhel_78:
ks_folder: rhel7
guest_id: rhel_7x64
rhel_80:
ks_folder: rhel8
guest_id: rhel_8x64
rhel_81:
ks_folder: rhel8
guest_id: rhel_8x64
rhel_82:
ks_folder: rhel8
guest_id: rhel_8x64
rhel_83:
ks_folder: rhel8
guest_id: rhel_8x64
centos_77:
ks_folder: rhel7
guest_id: rhel_7x64
centos_80:
ks_folder: rhel8
guest_id: rhel_8x64
centos_81:
ks_folder: rhel8
guest_id: rhel_8x64
centos_82:
ks_folder: rhel8
guest_id: rhel_8x64

View File

@@ -0,0 +1,2 @@
install_date: Wed May 5 16:14:09 2021
version: master

View File

@@ -0,0 +1,28 @@
---
galaxy_info:
author: Orcun Atakan
description: Ansible galaxy role for building a RHEL/CentOS VM template from an ISO file on Ovirt/RHV.
role_name: rhel_ovirt_template
company: Red Hat
license: MIT
min_ansible_version: 2.5
platforms:
- name: EL
versions:
- all
cloud_platforms:
- oVirt
galaxy_tags:
- rhel
- ovirt
- rhv
- cloud
- multicloud
- template
dependencies: []

View File

@@ -0,0 +1,10 @@
---
- name: convert to template
ovirt.ovirt.ovirt_template:
auth: "{{ ovirt_auth }}"
name: "{{ template.name }}"
vm: "{{ template.name }}"
cluster: "{{ providers.ovirt.cluster }}"
timeout: "{{ template_convert_timeout }}"
seal: "{{ template_convert_seal }}"
when: template is defined

View File

@@ -0,0 +1,22 @@
---
- block:
- name: remove iso file from data_domain
ovirt.ovirt.ovirt_disk:
auth: "{{ ovirt_auth }}"
name: "{{ iso_file }}"
storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"
state: absent
rescue:
- include_tasks: wait_iso_disk_unlock_pre29.yml
when: ansible_version.full is version('2.9', '<')
- include_tasks: wait_iso_disk_unlock.yml
when: ansible_version.full is version('2.9', '>=')
- name: remove iso file from data_domain
ovirt.ovirt.ovirt_disk:
auth: "{{ ovirt_auth }}"
name: "{{ iso_file }}"
storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"
state: absent

View File

@@ -0,0 +1,29 @@
---
- name: validate file
stat:
path: "{{ playbook_dir }}/{{ temp_directory }}/linux_{{ linux_distro_name }}_ks_autogen.iso"
get_checksum: no
register: iso_file_check
- name: upload iso file to data_domain
ovirt.ovirt.ovirt_disk:
auth: "{{ ovirt_auth }}"
name: "{{ iso_file }}"
upload_image_path: "{{ iso_file_check.stat.path }}"
storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"
size: "{{ (iso_file_check.stat.size/1024/1024)|round(0, 'ceil')|int|string }}MiB"
wait: true
bootable: true
format: raw
content_type: iso
force: yes
register: disk_iso_file
when: iso_file_check.stat.exists
- name: set iso file disk id
set_fact:
ks_iso_file_disk_id: "{{ disk_iso_file.disk.id }}"
ks_iso_file_image_id: "{{ disk_iso_file.disk.image_id }}"
ovirt_datacenter_id: "{{ disk_iso_file.disk.quota.href | regex_replace('^/ovirt-engine/api/datacenters/(.*)/quotas.*$', '\\1') }}"
ovirt_datastore_id: "{{ disk_iso_file.disk.storage_domains[0].id }}"

View File

@@ -0,0 +1,20 @@
---
- name: export template to export domain
ovirt.ovirt.ovirt_template:
auth: "{{ ovirt_auth }}"
state: exported
name: "{{ template.name }}"
export_domain: "{{ providers.ovirt.export_domain }}"
cluster: "{{ providers.ovirt.cluster }}"
async: 7200
poll: 0
register: export_ovf_file
- name: wait for export to complete
async_status:
jid: "{{ export_ovf_file.ansible_job_id }}"
register: ovf
until: ovf.finished
retries: "{{ instance_wait_retry_limit }}"
delay: 10

View File

@@ -0,0 +1,138 @@
---
- name: obtain SSO token with using username/password credentials
ovirt.ovirt.ovirt_auth:
url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
insecure: yes
- include_tasks: preflight_check_pre29.yml
when: ansible_version.full is version('2.9', '<')
- include_tasks: preflight_check.yml
when: ansible_version.full is version('2.9', '>=')
# remove existing template
- block:
- include_tasks: remove_template.yml
when:
- template_force|bool
- template_found|bool
- block:
- include_tasks: make_iso.yml
- include_tasks: provision_vm.yml
- name: refresh inventory
meta: refresh_inventory
- name: clear gathered facts
meta: clear_facts
- name: clear any host errors
meta: clear_host_errors
- name: add host
add_host:
hostname: template_vm
ansible_host: '{{ template_vm_ip_address }}'
host_key_checking: false
ansible_user: "{{ local_account_username }}"
ansible_password: "{{ local_account_password }}"
ansible_port: "{{ vm_ansible_port | default('22') }}"
ansible_ssh_common_args: '-o UserKnownHostsFile=/dev/null'
ansible_python_interpreter: auto
- name: run setup module
setup:
delegate_to: template_vm
connection: ssh
- block:
- include_role:
name: oatakan.rhn
apply:
delegate_to: template_vm
connection: ssh
become: yes
- include_role:
name: oatakan.rhel_upgrade
apply:
delegate_to: template_vm
connection: ssh
become: yes
when: install_updates|bool
- include_role:
name: oatakan.rhel_template_build
apply:
delegate_to: template_vm
connection: ssh
become: yes
vars:
target_ovirt: yes
always:
- include_role:
name: oatakan.rhn
apply:
delegate_to: template_vm
connection: ssh
become: yes
vars:
role_action: unregister
- name: force handlers to run before stoppping the vm
meta: flush_handlers
- name: refresh SSO credentials
ovirt.ovirt.ovirt_auth:
url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
insecure: yes
- include_tasks: stop_vm.yml
- include_tasks: convert_to_template.yml
- include_tasks: export_ovf.yml
when: export_ovf|bool
rescue:
- name: refresh SSO credentials
ovirt.ovirt.ovirt_auth:
url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
insecure: yes
- include_tasks: remove_template.yml
when: remove_vm_on_error|bool
always:
- name: refresh SSO credentials
ovirt.ovirt.ovirt_auth:
url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
insecure: yes
- include_tasks: remove_vm.yml
- include_tasks: datastore_iso_remove.yml
- name: remove temporary directory
file:
path: "{{ temp_directory }}"
state: absent
- name: logout from oVirt
ovirt.ovirt.ovirt_auth:
state: absent
ovirt_auth: "{{ ovirt_auth }}"

View File

@@ -0,0 +1,29 @@
---
- block:
- name: create temporary directory
file:
path: "{{ temp_directory }}/ks_iso"
state: directory
- name: create ks.cfg file
template:
src: "{{ linux_ks_folder }}/ks.cfg.j2"
dest: "{{ temp_directory }}/ks_iso/ks.cfg"
- name: create iso
command: >
mkisofs -U -A "OEMDRV" -V "OEMDRV"
-volset "OEMDRV" -J -joliet-long -r -v -T
-o {{ playbook_dir }}/{{ temp_directory }}/linux_{{ linux_distro_name }}_ks_autogen.iso .
args:
chdir: "{{ playbook_dir }}/{{ temp_directory }}/ks_iso"
- include_tasks: datastore_upload.yml
always:
- name: remove temporary files
file:
path: "{{ temp_directory }}/{{ item }}"
state: absent
loop:
- linux_{{ linux_distro_name }}_ks_autogen.iso
- ks_iso/

View File

@@ -0,0 +1,70 @@
---
- name: get the datacenter name
ovirt.ovirt.ovirt_datacenter_info:
auth: "{{ ovirt_auth }}"
pattern: "Clusters.name = {{ providers.ovirt.cluster }}"
register: datacenter_info
- name: get storage information
ovirt.ovirt.ovirt_storage_domain_info:
auth: "{{ ovirt_auth }}"
pattern: "datacenter={{ datacenter_info.ovirt_datacenters[0].name }}"
register: storage_info
when:
- template_disk_storage is undefined
- name: get data domain
set_fact:
disk_storage_domain: "{{ storage_info.ovirt_storage_domains|json_query(the_query)|list|first|default(None) }}"
when:
- template_disk_storage is undefined
vars:
the_query: "[?type=='data']"
- name: get iso domain (deprecated as of oVirt/RHV 4.3)
set_fact:
iso_domain: "{{ storage_info.ovirt_storage_domains|json_query(the_query)|list|first|default(None) }}"
vars:
the_query: "[?type=='iso']"
- name: check if template already exists
ovirt.ovirt.ovirt_template_info:
auth: "{{ ovirt_auth }}"
pattern: "name={{ template.name }} and datacenter={{ datacenter_info.ovirt_datacenters[0].name }}"
register: template_info
- block:
- name: set template_found to yes
set_fact:
template_found: yes
- name: fail with message
fail:
msg: "Existing template found on ovirt/rhv: {{ template.name }}"
when: not template_force|bool
when:
- template_info.ovirt_templates is defined
- template_info.ovirt_templates | length > 0
- name: check iso file on data domain
ovirt.ovirt.ovirt_disk_info:
auth: "{{ ovirt_auth }}"
pattern: "name={{ iso_file_name }}"
register: ovirt_disk_main_iso
when: iso_file_name is defined
- name: set file id of the iso file
set_fact:
iso_file_id: "{{ ovirt_disk_main_iso.ovirt_disks[0].id }}"
when:
- ovirt_disk_main_iso.ovirt_disks | length > 0
- ovirt_disk_main_iso.ovirt_disks[0].id is defined
- ovirt_disk_main_iso.ovirt_disks[0].content_type == 'iso'
- name: fail with message
fail:
msg: "iso file ({{ iso_file_name }}) could not be found on the data domain and iso domain does not exists"
when:
- iso_file_id is undefined
- iso_domain is undefined or iso_domain|length == 0

View File

@@ -0,0 +1,67 @@
---
- name: get the datacenter name (<2.9)
ovirt_datacenter_facts:
auth: "{{ ovirt_auth }}"
pattern: "Clusters.name = {{ providers.ovirt.cluster }}"
- name: get storage information (<2.9)
ovirt_storage_domain_facts:
auth: "{{ ovirt_auth }}"
pattern: "datacenter={{ ovirt_datacenters[0].name }}"
when:
- template_disk_storage is undefined
- name: get data domain (<2.9)
set_fact:
disk_storage_domain: "{{ ovirt_storage_domains|json_query(the_query)|list|first }}"
when:
- template_disk_storage is undefined
vars:
the_query: "[?type=='data']"
- name: get iso domain (deprecated as of oVirt/RHV 4.3)
set_fact:
iso_domain: "{{ ovirt_storage_domains|json_query(the_query)|list|first }}"
vars:
the_query: "[?type=='iso']"
- name: check if template already exists (<2.9)
ovirt_template_facts:
auth: "{{ ovirt_auth }}"
pattern: "name={{ template.name }} and datacenter={{ ovirt_datacenters[0].name }}"
- block:
- name: set template_found to yes
set_fact:
template_found: yes
- name: fail with message
fail:
msg: "Existing template found on ovirt/rhv: {{ template.name }}"
when: not template_force|bool
when:
- ovirt_templates is defined
- ovirt_templates | length > 0
- name: check iso file on data domain
ovirt_disk_facts:
auth: "{{ ovirt_auth }}"
pattern: "name={{ iso_file_name }}"
when: iso_file_name is defined
- name: set file id of the iso file
set_fact:
iso_file_id: "{{ ovirt_disks[0].id }}"
when:
- ovirt_disks | length > 0
- ovirt_disks[0].id is defined
- ovirt_disks[0].content_type == 'iso'
- name: fail with message
fail:
msg: "iso file ({{ template.name }}) could not be found on the data domain and iso domain does not exists"
when:
- iso_file_id is undefined
- iso_domain is undefined or iso_domain|length == 0

View File

@@ -0,0 +1,123 @@
---
- name: provision a new vm
ovirt.ovirt.ovirt_vm:
auth: "{{ ovirt_auth }}"
name: "{{ template.name }}"
cluster: "{{ providers.ovirt.cluster|default('Default') }}"
state: present
wait: yes
memory: "{{ template.memory }}MiB"
cpu_sockets: "{{ template.cpu }}"
bios_type: "{{ template.bios_type | default(omit) }}"
boot_devices:
- hd
- cdrom
cd_iso: "{{ template.cd_iso }}"
type: server
high_availability: true
nics:
- name: nic1
profile_name: "{{ template.networks[0].name }}"
network: "{{ template.networks[0].name }}"
custom_properties: "{{ custom_properties | default(omit) }}"
operating_system: "{{ template_vm_guest_id | default(omit) }}"
async: 7200
poll: 0
register: deploy
- name: wait for instance creation to complete
async_status: jid="{{ deploy.ansible_job_id }}"
register: instance
until: instance.finished
retries: "{{ instance_wait_retry_limit }}"
delay: 10
- name: create a disk
ovirt.ovirt.ovirt_disk:
auth: "{{ ovirt_auth }}"
name: "{% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}"
vm_name: "{{ template.name }}"
size: "{{ item.size | default(omit) }}"
format: "{{ item.format | default(omit) }}"
interface: "{{ item.interface | default(omit) }}"
bootable: "{{ item.bootable | default(omit) }}"
storage_domain: "{{ item.storage_domain | default(omit) }}"
activate: yes
state: present
wait: yes
async: 7200
poll: 0
register: create_disks
loop: "{{ template.disks }}"
when:
- template is defined
- template.disks is defined
- name: wait for disk creation to complete
async_status:
jid: "{{ item.ansible_job_id }}"
register: disks_creation
until: disks_creation.finished
retries: "{{ instance_wait_retry_limit }}"
delay: 10
loop: "{{ create_disks.results }}"
when:
- template is defined
- create_disks.results is defined
- item.ansible_job_id is defined
- include_tasks: wait_disk_unlock_pre29.yml
when:
- ansible_version.full is version('2.9', '<')
- template is defined
- template.disks is defined
- disks_creation.results is defined
- include_tasks: wait_disk_unlock.yml
when:
- ansible_version.full is version('2.9', '>=')
- template is defined
- template.disks is defined
- disks_creation.results is defined
- name: assign tags to provisioned vms
ovirt.ovirt.ovirt_tag:
name: "{{ item }}_{{ instance.item.item[item] }}"
vms: ["{{ instance.item.item.name }}"]
state: attached
loop:
- app_name
- role
when:
- template is defined
- instance is defined
- instance.vm is defined
- instance.item.item[item] is defined
- name: start vm
ovirt.ovirt.ovirt_vm:
auth: "{{ ovirt_auth }}"
name: "{{ template.name }}"
cluster: "{{ providers.ovirt.cluster|default('Default') }}"
state: running
async: 7200
poll: 0
register: start
- name: wait for instance creation to complete
async_status: jid="{{ start.ansible_job_id }}"
register: instance
until: instance.finished
retries: "{{ instance_wait_retry_limit }}"
delay: 10
- name: waiting for server to come online
wait_for:
host: "{{ template.networks[0].ip }}"
port: "{{ template.ansible_port | default(vm_ansible_port) | default(ansible_port) | default('22') }}"
timeout: "{{ instance_wait_connection_timeout }}"
when:
- instance is changed
- template is defined
ignore_errors: yes

View File

@@ -0,0 +1,20 @@
---
- name: remove template
ovirt.ovirt.ovirt_template:
auth: "{{ ovirt_auth }}"
cluster: "{{ providers.ovirt.cluster }}"
name: "{{ template.name }}"
state: absent
async: 7200
poll: 0
register: undeploy
when: template is defined
- name: wait for template deletion to complete
async_status:
jid: "{{ undeploy.ansible_job_id }}"
register: instance
until: instance.finished
retries: "{{ instance_wait_retry_limit }}"
delay: 10

View File

@@ -0,0 +1,20 @@
---
- name: remove vm
ovirt.ovirt.ovirt_vm:
auth: "{{ ovirt_auth }}"
cluster: "{{ providers.ovirt.cluster }}"
name: "{{ template.name }}"
state: absent
async: 7200
poll: 0
register: undeploy
when: template is defined
- name: wait for template deletion to complete
async_status:
jid: "{{ undeploy.ansible_job_id }}"
register: instance
until: instance.finished
retries: "{{ instance_wait_retry_limit }}"
delay: 10

View File

@@ -0,0 +1,45 @@
---
- block:
- name: shutdown guest vm
ovirt.ovirt.ovirt_vm:
auth: "{{ ovirt_auth }}"
cluster: "{{ providers.ovirt.cluster }}"
name: "{{ template.name }}"
state: stopped
async: 7200
poll: 0
register: shutdown
when: template is defined
- name: wait for server to stop responding
wait_for:
host: "{{ template_vm_ip_address }}"
port: "{{ vm_ansible_port | default('22') }}"
timeout: 120
state: stopped
- include_tasks: wait_vm_poweredoff_pre29.yml
when: ansible_version.full is version('2.9', '<')
- include_tasks: wait_vm_poweredoff.yml
when: ansible_version.full is version('2.9', '>=')
rescue:
- name: ignoring any error
debug:
msg: "ignoring error..."
- name: reconfigure vm
ovirt.ovirt.ovirt_vm:
auth: "{{ ovirt_auth }}"
cluster: "{{ providers.ovirt.cluster }}"
name: "{{ template.name }}"
boot_devices:
- hd
cd_iso: ""
custom_properties: "{{ custom_properties_efi if (template_vm_efi|bool and custom_efi_enabled|bool) else ([{}]) }}"
force: yes
state: present
when: template is defined

View File

@@ -0,0 +1,11 @@
---
- name: wait until the image is unlocked by the oVirt engine
ovirt.ovirt.ovirt_disk_info:
auth: "{{ ovirt_auth }}"
pattern: "name={% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}"
register: ovirt_disk_info
until: (ovirt_disk_info.ovirt_disks is defined) and (ovirt_disk_info.ovirt_disks | length > 0) and (ovirt_disk_info.ovirt_disks[0].status != "locked")
retries: 10
delay: 3
loop: "{{ template.disks }}"

View File

@@ -0,0 +1,10 @@
---
- name: wait until the image is unlocked by the oVirt engine (<2.9)
ovirt_disk_facts:
auth: "{{ ovirt_auth }}"
pattern: "name={% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}"
until: (ovirt_disks is defined) and (ovirt_disks | length > 0) and (ovirt_disks[0].status != "locked")
retries: 10
delay: 3
loop: "{{ template.disks }}"

View File

@@ -0,0 +1,11 @@
---
- name: wait until the disk is unlocked by the oVirt engine
ovirt.ovirt.ovirt_disk_info:
auth: "{{ ovirt_auth }}"
pattern: "name={{ iso_file }}"
register: ovirt_disk_info
until: (ovirt_disk_info.ovirt_disks is defined) and (ovirt_disk_info.ovirt_disks | length > 0) and (ovirt_disk_info.ovirt_disks[0].status != "locked")
retries: 10
delay: 3
when: iso_file is defined

View File

@@ -0,0 +1,10 @@
---
- name: wait until the disk is unlocked by the oVirt engine (<2.9)
ovirt_disk_facts:
auth: "{{ ovirt_auth }}"
pattern: "name={{ iso_file }}"
until: (ovirt_disks is defined) and (ovirt_disks | length > 0) and (ovirt_disks[0].status != "locked")
retries: 10
delay: 3
when: iso_file is defined

View File

@@ -0,0 +1,13 @@
---
- name: wait for vm status to be poweredoff
ovirt.ovirt.ovirt_vm_info:
auth: "{{ ovirt_auth }}"
pattern: name={{ template.name }} and cluster={{ providers.ovirt.cluster }}
register: ovirt_vm_info_result
until:
- ovirt_vm_info_result.ovirt_vms is defined
- ovirt_vm_info_result.ovirt_vms|length > 0
- ovirt_vm_info_result.ovirt_vms[0].status == 'down'
delay: 5
retries: 30

View File

@@ -0,0 +1,12 @@
---
- name: wait for vm status to be poweredoff
ovirt_vm_facts:
auth: "{{ ovirt_auth }}"
pattern: name={{ template.name }} and cluster={{ providers.ovirt.cluster }}
until:
- ovirt_vms is defined
- ovirt_vms|length > 0
- ovirt_vms[0].status == 'down'
delay: 5
retries: 30

View File

@@ -0,0 +1,131 @@
firewall --disabled
install
cdrom
lang en_US.UTF-8
keyboard us
timezone UTC
{% if template.networks is defined and template.networks[0].ip is defined and template.networks[0].gateway is defined and template.networks[0].netmask is defined %}
network --bootproto=static --ip={{ template.networks[0].ip }} --netmask={{ template.networks[0].netmask }} --gateway={{ template.networks[0].gateway }}{% if template.networks[0].dns_servers is defined %} --nameserver={{ template.networks[0].dns_servers|join(',') }}{% endif %}
{% else %}
network --bootproto=dhcp
{% endif %}
network --hostname=localhost.localdomain
rootpw {{ local_administrator_password }}
authconfig --enableshadow --passalgo=sha512
{% if template_selinux_enabled is undefined or not template_selinux_enabled %}
selinux --disabled
{% endif %}
text
skipx
logging --level=info
eula --agreed
bootloader --append="no_timer_check"
clearpart --all --initlabel
part /boot/efi --fstype="efi" --size=200 --fsoptions="umask=0077,shortname=winnt" --asprimary
part /boot --fstype="xfs" --size=1024 --asprimary
part pv.00 --fstype="lvmpv" --size=1 --grow --asprimary
volgroup vg00 --pesize=4096 pv.00
logvol swap --fstype="swap" --size=4096 --name=swap --vgname=vg00
logvol / --fstype="xfs" --size=1 --grow --name=root --vgname=vg00
#clearpart --all --initlabel
#part /boot/efi --fstype=efi --grow --maxsize=200 --size=20
#part /boot --size=1000 --asprimary --fstype=ext4 --label=boot --fsoptions=acl,user_xattr,errors=remount-ro,nodev,noexec,nosuid
#part pv.00 --size=1 --grow --asprimary
#volgroup vg00 pv.00
#logvol swap --name=swap --vgname=vg00 --size=4098
#logvol / --fstype=xfs --fsoptions=acl,user_xattr,errors=remount-ro --size=1 --grow --name=root --vgname=vg00
#bootloader --boot-drive=vda
#reqpart --add-boot
#part swap --size 4098 --asprimary
#part pv.01 --fstype xfs --size=1 --grow --asprimary volgroup VolGroup00 pv.01
#logvol / --fstype xfs --name=lv_root --vgname=VolGroup00 --size=32768
auth --useshadow --enablemd5
firstboot --disabled
services --enabled=NetworkManager,sshd
reboot
user --name={{ local_account_username }} --plaintext --password {{ local_account_password }} --groups={{ local_account_username }},wheel
%packages --ignoremissing --excludedocs
@Base
@Core
openssh-clients
sudo
openssl-devel
readline-devel
zlib-devel
kernel-headers
kernel-devel
gcc
make
perl
curl
wget
ntp
nfs-utils
net-tools
vim
curl
unbound-libs
bzip2
sshpass
-fprintd-pam
-intltool
-NetworkManager
-NetworkManager-tui
# unnecessary firmware
-aic94xx-firmware
-atmel-firmware
-b43-openfwwf
-bfa-firmware
-ipw2100-firmware
-ipw2200-firmware
-ivtv-firmware
-iwl100-firmware
-iwl1000-firmware
-iwl3945-firmware
-iwl4965-firmware
-iwl5000-firmware
-iwl5150-firmware
-iwl6000-firmware
-iwl6000g2a-firmware
-iwl6050-firmware
-libertas-usb8388-firmware
-ql2100-firmware
-ql2200-firmware
-ql23xx-firmware
-ql2400-firmware
-ql2500-firmware
-rt61pci-firmware
-rt73usb-firmware
-xorg-x11-drv-ati-firmware
-zd1211-firmware
%end
%post
# update root certs
# wget -O/etc/pki/tls/certs/ca-bundle.crt https://curl.haxx.se/ca/cacert.pem --no-check-certificate
# yum reinstall ca-certificates
# permit root login
sed -i s'/PermitRootLogin\ prohibit-password/PermitRootLogin\ yes'/g /etc/ssh/sshd_config
# sudo
yum install -y sudo
echo "{{ local_account_username }} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers.d/{{ local_account_username }}
sed -i "s/^.*requiretty/#Defaults requiretty/" /etc/sudoers
{% if template_selinux_enabled is undefined or not template_selinux_enabled %}
sed -i s'/SELINUX=enforcing/SELINUX=disabled'/g /etc/selinux/config
{% endif %}
yum clean all
%end

View File

@@ -0,0 +1,113 @@
firewall --disabled
install
cdrom
lang en_US.UTF-8
keyboard us
timezone UTC
{% if template.networks is defined and template.networks[0].ip is defined and template.networks[0].gateway is defined and template.networks[0].netmask is defined %}
network --bootproto=static --ip={{ template.networks[0].ip }} --netmask={{ template.networks[0].netmask }} --gateway={{ template.networks[0].gateway }}{% if template.networks[0].dns_servers is defined %} --nameserver={{ template.networks[0].dns_servers|join(',') }}{% endif %}
{% else %}
network --bootproto=dhcp
{% endif %}
network --hostname=localhost.localdomain
rootpw {{ local_administrator_password }}
authconfig --enableshadow --passalgo=sha512
{% if template_selinux_enabled is undefined or not template_selinux_enabled %}
selinux --disabled
{% endif %}
text
skipx
logging --level=info
eula --agreed
bootloader --append="no_timer_check"
clearpart --all --initlabel
part /boot/efi --fstype="efi" --size=200 --fsoptions="umask=0077,shortname=winnt" --asprimary
part /boot --fstype="xfs" --size=1024 --asprimary
part pv.00 --fstype="lvmpv" --size=1 --grow --asprimary
volgroup vg00 --pesize=4096 pv.00
logvol swap --fstype="swap" --size=4096 --name=swap --vgname=vg00
logvol / --fstype="xfs" --size=1 --grow --name=root --vgname=vg00
auth --useshadow --enablemd5
firstboot --disabled
services --enabled=NetworkManager,sshd
reboot
# this doesn't seem to work in RHEL 8.0
#user --name={{ local_account_username }} --plaintext --password {{ local_account_password }} --groups={{ local_account_username }},wheel
%packages --ignoremissing --excludedocs
@Base
@Core
openssh-clients
sudo
openssl-devel
readline-devel
zlib-devel
kernel-headers
kernel-devel
gcc
make
perl
curl
wget
ntp
nfs-utils
net-tools
vim
curl
unbound-libs
bzip2
sshpass
openssl
# unnecessary firmware
-aic94xx-firmware
-atmel-firmware
-b43-openfwwf
-bfa-firmware
-ipw2100-firmware
-ipw2200-firmware
-ivtv-firmware
-iwl100-firmware
-iwl1000-firmware
-iwl3945-firmware
-iwl4965-firmware
-iwl5000-firmware
-iwl5150-firmware
-iwl6000-firmware
-iwl6000g2a-firmware
-iwl6050-firmware
-libertas-usb8388-firmware
-ql2100-firmware
-ql2200-firmware
-ql23xx-firmware
-ql2400-firmware
-ql2500-firmware
-rt61pci-firmware
-rt73usb-firmware
-xorg-x11-drv-ati-firmware
-zd1211-firmware
%end
%post
# update root certs
# wget -O/etc/pki/tls/certs/ca-bundle.crt https://curl.haxx.se/ca/cacert.pem --no-check-certificate
# yum reinstall ca-certificates
# sudo
groupadd {{ local_account_username }}
useradd -g {{ local_account_username }} -G {{ local_account_username }},wheel -d /home/{{ local_account_username }} -m -p $(openssl passwd -1 {{ local_account_password }}) {{ local_account_username }}
yum install -y sudo
echo "{{ local_account_username }} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers.d/{{ local_account_username }}
sed -i "s/^.*requiretty/#Defaults requiretty/" /etc/sudoers
{% if template_selinux_enabled is undefined or not template_selinux_enabled %}
sed -i s'/SELINUX=enforcing/SELINUX=disabled'/g /etc/selinux/config
{% endif %}
yum clean all
%end

View File

@@ -0,0 +1 @@
localhost

View File

@@ -0,0 +1,7 @@
---
- hosts: localhost
gather_facts: False
connection: local
become: no
roles:
- ../.

View File

@@ -0,0 +1,61 @@
---
temp_directory: tmp{{ awx_job_id | default('') }}
iso_file: "linux_{{ linux_distro_name }}_ks{{ awx_job_id | default('') }}.iso"
export_dir: "{{ playbook_dir }}/{{ temp_directory }}"
providers:
ovirt:
datacenter: "{{ ovirt_datacenter }}"
cluster: "{{ ovirt_cluster }}"
data_domain: "{{ ovirt_data_domain }}"
export_domain: "{{ ovirt_export_domain }}"
iso_domain: "{{ ovirt_iso_domain }}"
template:
name: "{{ template_vm_name }}"
role: linux_template
app_name: linux_template_generate
domain: "{{ template_vm_domain }}"
disks:
- name: "{{ template_vm_name }}"
size: "{{ template_vm_root_disk_size }}GiB"
format: "{{ template_vm_root_disk_format }}"
interface: "{{ template_vm_root_disk_interface | default('virtio') }}"
bootable: yes
storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"
memory: "{{ template_vm_memory }}"
cpu: "{{ template_vm_cpu }}"
bios_type: "{{ ('q35_ovmf') if (template_vm_efi|bool and not custom_efi_enabled|bool) else (omit) }}"
networks:
- name: "{{ template_vm_network_name }}"
ip: "{{ template_vm_ip_address }}"
netmask: "{{ template_vm_netmask }}"
gateway: "{{ template_vm_gateway }}"
domain: "{{ template_vm_domain }}"
device_type: e1000
dns_servers: "{{ template_vm_dns_servers }}"
cd_iso: "{{ iso_file_id | default(iso_file_name) }}" # if using data domain, file name does not work, need to use id
linux_ks_folder: "{{ os_short_names[(linux_distro_name|default('rhel_80'))].ks_folder | default('rhel8') }}"
template_vm_guest_id: "{{ os_short_names[(linux_distro_name|default('rhel_80'))].guest_id | default('rhel_8x64') }}"
qemu_cmdline_second_iso:
- -device
- ide-cd,bus={{ qemu_second_cdrom_device_bus_type }}.{{ qemu_second_cdrom_device_bus_id }},unit={{ qemu_second_cdrom_device_bus_unit }},drive=drive-ua-0001,id=ua-0001,bootindex=3
- -drive
- format=raw,if=none,id=drive-ua-0001,werror=report,rerror=report,readonly=on,file=/rhev/data-center/{{ ovirt_datacenter_id }}/{{ ovirt_datastore_id }}/images/{{ ks_iso_file_disk_id }}/{{ ks_iso_file_image_id }}
qemu_cmdline_efi:
- -drive
- if=pflash,format=raw,readonly,file={{ custom_efi_path }}
custom_properties:
- name: qemu_cmdline
value: "{{ ((qemu_cmdline_second_iso + qemu_cmdline_efi) | to_json) if (template_vm_efi|bool and custom_efi_enabled|bool) else (qemu_cmdline_second_iso | to_json) }}"
custom_properties_efi:
- name: qemu_cmdline
value: "{{ (qemu_cmdline_efi | to_json) if (template_vm_efi|bool and custom_efi_enabled|bool) else ('[]') }}"

View File

@@ -0,0 +1,24 @@
---
sudo: required
language: python
python: "2.7"
env:
- SITE=test.yml
before_install:
- sudo apt-get update -qq
install:
# Install Ansible.
- pip install ansible
# Add ansible.cfg to pick up roles path.
- "{ echo '[defaults]'; echo 'roles_path = ../'; } >> ansible.cfg"
script:
# Check the role/playbook's syntax.
- "ansible-playbook -i tests/inventory tests/$SITE --syntax-check"
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2017 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,2 @@
# oatakan.rhel_template_build
Ansible role to configure RHEL/CentOS via Packer Ansible provisioner

View File

@@ -0,0 +1,19 @@
---
target_vagrant: no
target_ovirt: no
local_account_username: ansible
local_account_password: Chang3MyP@ssw0rd21
ovirt_guest_agent_service_name: ovirt-guest-agent
qemu_guest_agent_service_name: qemu-guest-agent
ovirt_guest_agent_package_name:
RedHat: rhevm-guest-agent-common
CentOS: ovirt-guest-agent-common
qemu_guest_agent_package_name: qemu-guest-agent
the_root_vgname: vg00
the_root_lvname: root

View File

@@ -0,0 +1,2 @@
install_date: Wed May 5 16:14:08 2021
version: master

View File

@@ -0,0 +1,23 @@
---
dependencies: []
galaxy_info:
author: oatakan
description: RedHat/CentOS template build.
role_name: rhel_template_build
company: "Red Hat"
license: "license (BSD, MIT)"
min_ansible_version: 2.4
platforms:
- name: EL
versions:
- 6
- 7
- 8
galaxy_tags:
- cloud
- system
- packaging
- development
- packer
- vmware

View File

@@ -0,0 +1,31 @@
---
- name: ensure cloud-init packages are installed
package:
name:
- cloud-init
- cloud-utils-growpart
- gdisk
- block:
- name: ensure cloud-init scripts directory exists
file:
path: /var/lib/cloud/scripts/per-instance
state: directory
mode: '0755'
- name: create growpart cloud-init script to grow partition on boot
template:
src: grow_part.sh.j2
dest: /var/lib/cloud/scripts/per-instance/grow_part.sh
mode: u=rwx,g=rx,o=rx
# when:
# - ansible_lvm is defined
# - ansible_lvm.lvs is defined
# - ansible_lvm.lvs[the_root_lvname] is defined
# - ansible_lvm.lvs[the_root_lvname].vg is defined
# - ansible_lvm.vgs is defined
# - ansible_lvm.pvs is defined
# - ansible_cmdline is defined
# - ansible_cmdline.root is defined
# - ansible_lvm.lvs[the_root_lvname].vg in ansible_cmdline.root

View File

@@ -0,0 +1,216 @@
---
- name: Get the current kernel release.
command: uname -r
changed_when: false
register: kernel_release
- name: Ensure necessary packages are installed.
yum:
name:
- wget
- perl
- cpp
- gcc
- make
- bzip2
- kernel-headers
- kernel-devel
- "kernel-devel-{{ kernel_release.stdout }}"
- cifs-utils
state: present
- name: Ensure libselinux-python package is installed.
yum:
name:
- libselinux-python
state: present
when: ansible_distribution_major_version|int < 8
- name: Ensure python3-libselinux package is installed.
yum:
name:
- python3-libselinux
state: present
when: ansible_distribution_major_version|int == 8
# Fix slow DNS.
- name: Fix slow DNS (adapted from Bento).
lineinfile:
dest: /etc/sysconfig/network
regexp: '^RES_OPTIONS'
line: 'RES_OPTIONS="single-request-reopen"'
state: present
# see https://fedoraproject.org/wiki/Changes/NetworkManager_keyfile_instead_of_ifcfg_rh
- name: ensure older style network config files for greater compatibility
copy:
dest: /etc/NetworkManager/conf.d/99-main-plugins.conf
content: |
[main]
plugins=ifcfg-rh
when: ansible_distribution_major_version|int == 8
- name: Restart network service (explicitly).
service:
name: network
state: restarted
when: ansible_distribution_major_version|int < 8
- name: Restart NetworkManager service (explicitly).
service:
name: NetworkManager
state: restarted
when: ansible_distribution_major_version|int == 8
- name: Ensure we can still connect
wait_for_connection:
# SSH daemon configuration.
- name: Configure SSH daemon.
lineinfile:
dest: /etc/ssh/sshd_config
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
state: present
loop:
- { regexp: '^UseDNS', line: 'UseDNS no' }
- { regexp: '^GSSAPIAuthentication', line: 'GSSAPIAuthentication no' }
# Local user SSH configuration.
- name: Configure local user .ssh directory.
file:
path: /home/{{ local_account_username }}/.ssh
state: directory
owner: "{{ local_account_username }}"
group: "{{ local_account_username }}"
mode: 0700
- name: Get Vagrant's public key.
get_url:
url: https://github.com/mitchellh/vagrant/raw/master/keys/vagrant.pub
dest: /home/{{ local_account_username }}/.ssh/authorized_keys
owner: "{{ local_account_username }}"
group: "{{ local_account_username }}"
mode: 0600
ignore_errors: yes
when: target_vagrant | bool
- name: autolabel on boot
command: fixfiles onboot
changed_when: False
- include_tasks: cloud-init.yml
when: target_ovirt | bool
- include_tasks: ovirt.yml
when: target_ovirt | bool
# VirtualBox tools installation.
- name: Check if VirtualBox is running the guest VM.
stat:
path: /home/{{ local_account_username }}/.vbox_version
register: virtualbox_check
- include_tasks: virtualbox.yml
when: virtualbox_check.stat.exists
# VMware tools installation.
- name: Check if VMWare is running the guest VM.
shell: |
set -o pipefail
cat /proc/scsi/scsi | grep VMware
changed_when: false
failed_when: false
register: vmware_check
- include_tasks: vmware.yml
when: vmware_check.rc == 0
# Cleanup tasks.
- name: Remove unneeded packages.
yum:
name:
- cpp
- kernel-devel
- kernel-headers
disablerepo: '*'
state: absent
- name: Clean up yum.
command: yum clean all
args:
warn: no
changed_when: false
- name: Flag the system for re-configuration
file:
path: /.unconfigured
state: touch
- name: Reset hostname to localhost.localadmin
copy:
content: 'localhost.localdomain'
dest: /etc/hostname
- name: Remove RedHat interface persistence (step 1).
file:
path: /etc/udev/rules.d/70-persistent-net.rules
state: absent
- name: Check for network config file
stat:
path: /etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4.interface | default('eth0') }}
register: network_config_file
- name: Remove RedHat interface persistence (step 2).
lineinfile:
dest: "{{ network_config_file.stat.path }}"
regexp: "{{ item }}"
state: absent
loop:
- '^HWADDR'
- '^UUID'
- '^IPADDR'
- '^NETMASK'
- '^GATEWAY'
when: network_config_file.stat.exists
- name: Set interface to DHCP
lineinfile:
dest: "{{ network_config_file.stat.path }}"
regexp: '^BOOTPROTO='
line: BOOTPROTO=dhcp
when: network_config_file.stat.exists
- name: Force logs to rotate (step 1)
shell: /usr/sbin/logrotate -f /etc/logrotate.conf
changed_when: false
- name: Find any log files to delete
find:
paths: /var/log
patterns:
- "*-????????"
- "*.gz"
register: find_log_files
- name: Force logs to rotate (step 2)
file:
path: "{{ item.path }}"
state: absent
loop: "{{ find_log_files.files }}"
- name: Clear audit log and wtmp (step 1)
shell: cat /dev/null > /var/log/audit/audit.log
changed_when: false
- name: Clear audit log and wtmp (step 2)
shell: cat /dev/null > /var/log/wtmp
changed_when: false
- name: Remove ssh-host files
command: rm -fr /etc/ssh/ssh_host_*
changed_when: false
args:
warn: false

View File

@@ -0,0 +1,48 @@
---
- name: import epel gpg key
rpm_key:
state: present
key: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}
when: ansible_distribution == 'CentOS'
- name: ensure epel is installed
yum:
name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
state: present
register: install_epel
until: '"error" not in install_epel'
retries: 5
delay: 10
when: ansible_distribution == 'CentOS'
# rhevm-guest-agent-common package is not yet available for RHEL 8
- name: ensure ovirt guest agent package is installed
package:
name: "{{ ovirt_guest_agent_package_name[ansible_distribution] }}"
register: ovirt_package_installation
when: ansible_distribution_major_version|int < 8
# try installing qemu package on RHEL/CentOS 8 for now
- name: ensure qemu guest agent package is installed
package:
name: "{{ qemu_guest_agent_package_name }}"
when: ansible_distribution_major_version|int == 8
register: qemu_package_installation
ignore_errors: yes
- name: ensure ovirt guest agent is enabled
service:
name: "{{ ovirt_guest_agent_service_name }}"
enabled: yes
when:
- ansible_distribution_major_version|int < 8
- ovirt_package_installation is succeeded
- name: ensure qemu guest agent is enabled
service:
name: "{{ qemu_guest_agent_service_name }}"
enabled: yes
when:
- ansible_distribution_major_version|int == 8
- qemu_package_installation is succeeded

View File

@@ -0,0 +1,34 @@
---
- name: Get VirtualBox version.
slurp:
src: /home/{{ local_account_username }}/.vbox_version
register: get_virtualbox_version
- name: Set VirtualBox version.
set_fact:
virtualbox_version: "{{ get_virtualbox_version['content'] | b64decode }}"
- name: Mount VirtualBox guest additions ISO.
mount:
name: /tmp/vbox
src: "/home/{{ local_account_username }}/VBoxGuestAdditions_{{ virtualbox_version }}.iso"
opts: loop
state: mounted
fstype: iso9660
- name: Run VirtualBox guest additions installation.
shell: sh /tmp/vbox/VBoxLinuxAdditions.run
changed_when: false
failed_when: false
- name: Unmount VirtualBox guest additions ISO.
mount:
name: /tmp/vbox
src: "/home/{{ local_account_username }}/VBoxGuestAdditions_{{ virtualbox_version }}.iso"
state: absent
fstype: iso9660
- name: Delete VirtualBox guest additions ISO.
file:
path: "/home/{{ local_account_username }}/VBoxGuestAdditions_{{ virtualbox_version }}.iso"
state: absent

View File

@@ -0,0 +1,65 @@
---
- name: Add VMWare tools repository.
template:
src: vmware-tools.repo.j2
dest: /etc/yum.repos.d/vmware-tools.repo
- name: Import VMWare tools GPG keys.
rpm_key:
key: "https://packages.vmware.com/tools/keys/VMWARE-PACKAGING-GPG-RSA-KEY.pub"
state: present
- name: Create temporary directories for VMware tools.
file:
path: "/tmp/{{ item }}"
state: directory
loop:
- vmfusion
- vmfusion-archive
- name: Mount VMware tools.
mount:
name: /tmp/vmfusion
src: /home/{{ local_account_username }}/linux.iso
fstype: iso9660
opts: loop
state: mounted
- name: Find any VMwareTools file.
find:
paths: /tmp/vmfusion
patterns: "^VMwareTools-*.tar.gz"
use_regex: yes
register: vmware_tools_files
- block:
- name: Decompress VMware Tools installer into archive folder.
unarchive:
src: "{{ vmware_tools_files.files[0] }}"
dest: /tmp/vmfusion-archive
remote_src: yes
- name: Run the VMware tools installer.
shell: /tmp/vmfusion-archive/vmware-tools-distrib/vmware-install.pl --default
changed_when: false
when: vmware_tools_files.matched > 0
- name: Unmount VMware tools.
mount:
name: /tmp/vmfusion
src: /home/{{ local_account_username }}/linux.iso
fstype: iso9660
state: absent
- name: Remove temporary directories for VMware tools.
file:
path: "/tmp/{{ item }}"
state: absent
loop:
- vmfusion
- vmfusion-archive
- name: Delete VMware Tools.
file:
path: /home/{{ local_account_username }}/linux.iso
state: absent

View File

@@ -0,0 +1,16 @@
---
- name: Add vmhgfs module (RHEL 6).
template:
src: vmhgfs.modules.j2
dest: /etc/sysconfig/modules/vmhgfs.modules
mode: 0755
when: ansible_distribution_major_version|int <= 6
- name: Install open-vm-tools.
yum:
name: open-vm-tools
state: present
when: ansible_distribution_major_version|int >= 7
- include_tasks: vmware-tools.yml
when: ansible_distribution_major_version|int <= 6

View File

@@ -0,0 +1,13 @@
#!/bin/bash
the_root_vgname='{{ ansible_lvm.lvs[the_root_lvname].vg | default('vg00') }}'
the_root_lvname='{{ the_root_lvname | default('root') }}'
the_root_pvname=$(vgdisplay -v $the_root_vgname 2> /dev/null | awk '/PV Name/ {print $3}')
the_root_pv_partnum=$(echo $the_root_pvname | grep -o '[0-9]$')
the_root_pv_device=$(echo $the_root_pvname | grep -o '.*[^0-9]')
the_root_mount_point=$(lsblk -l -o NAME,MOUNTPOINT | grep $the_root_vgname-$the_root_lvname | awk '{print $2}')
/usr/bin/growpart $the_root_pv_device $the_root_pv_partnum
/usr/sbin/pvresize $the_root_pvname
/usr/sbin/lvextend /dev/mapper/$the_root_vgname-$the_root_lvname $the_root_pvname
/usr/sbin/xfs_growfs $the_root_mount_point

View File

@@ -0,0 +1 @@
modprobe vmhgfs

View File

@@ -0,0 +1,9 @@
[vmware-tools]
name=VMware Tools
{% if ansible_distribution_major_version == "7" %}
baseurl=http://packages.vmware.com/packages/rhel7/x86_64/
{% else %}
baseurl=http://packages.vmware.com/tools/esx/latest/rhel{{ ansible_distribution_major_version }}/$basearch
{% endif %}
enabled=1
gpgcheck=1

View File

@@ -0,0 +1 @@
localhost

View File

@@ -0,0 +1,6 @@
---
- hosts: localhost
remote_user: root
roles:
- oatakan.rhel_template_build

View File

@@ -1,2 +1,2 @@
install_date: Wed Apr 21 16:48:38 2021
install_date: Wed May 5 16:14:04 2021
version: master

View File

@@ -1,2 +1,2 @@
install_date: Wed Apr 21 16:48:35 2021
version: master
install_date: Wed May 5 16:14:00 2021
version: ''

View File

@@ -1,2 +1,2 @@
install_date: Wed Apr 21 16:48:36 2021
install_date: Wed May 5 16:14:01 2021
version: master

View File

@@ -1,2 +1,2 @@
install_date: Wed Apr 21 17:13:32 2021
install_date: Wed May 5 16:14:06 2021
version: master

View File

@@ -1,2 +1,2 @@
install_date: Wed Apr 21 16:48:40 2021
install_date: Wed May 5 16:14:05 2021
version: master

View File

@@ -16,6 +16,8 @@
- name: oatakan.windows_update
- name: oatakan.ansible-role-ovirt
src: git+https://github.com/oatakan/ansible-role-ovirt.git
- name: oatakan.rhel_template_build
- name: oatakan.rhel_ovirt_template
- name: ikke_t.podman_container_systemd
- name: ikke_t.container_image_cleanup

View File

@@ -1,3 +1,15 @@
---
- name: this is an example
- name: Task 2
- name: Create 1Password Secret
hosts: localhost
tasks:
- onepassword.connect.generic_item:
vault_id: "e63n3krpqx7qpohuvlyqpn6m34"
title: Lab Secrets Test
state: created
fields:
- label: Codeword
value: "hunter2"
section: "Personal Info"
field_type: concealed
# no_log: true
register: op_item

View File

@@ -0,0 +1,7 @@
{
"version": "1.0.0",
"plays": [],
"stdout": [],
"status": "failed",
"status_color": 9
}