Clean up some cruft
This commit is contained in:
42
playbooks/bootstrap.yml
Normal file
42
playbooks/bootstrap.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
# Note: need to specify extra_vars, providing ansible_ssh_user, and ansible_ssh_pass
|
||||
|
||||
- name: Set up IPA Client
|
||||
hosts: tag_ipa_client
|
||||
become: yes
|
||||
collections:
|
||||
- freeipa.ansible_freeipa
|
||||
pre_tasks:
|
||||
- name: Set hostname
|
||||
hostname:
|
||||
name: "{{inventory_hostname}}"
|
||||
|
||||
- name: Attach subscriptions
|
||||
command: '/usr/bin/subscription-manager attach'
|
||||
register: result
|
||||
changed_when:
|
||||
- '"All installed products are covered by valid entitlements. No need to update subscriptions at this time." not in result.stdout'
|
||||
when: ansible_distribution == "RedHat"
|
||||
|
||||
|
||||
roles:
|
||||
- role: debian-freeipa-client
|
||||
when: ansible_os_family == "Debian"
|
||||
- role: ipaclient
|
||||
state: present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Set up Basic Lab Packages
|
||||
hosts: all
|
||||
become: yes
|
||||
roles:
|
||||
- role: toal-common
|
||||
|
||||
- name: Packages
|
||||
hosts: all
|
||||
become: yes
|
||||
tasks:
|
||||
|
||||
- name: Host Packages
|
||||
package:
|
||||
state: present
|
||||
name: "{{ host_packages }}"
|
||||
59
playbooks/build_ansible.yml
Normal file
59
playbooks/build_ansible.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
- name: VM Provisioning
|
||||
hosts: tag_ansible:&tag_tower
|
||||
connection: local
|
||||
collections:
|
||||
- redhat.rhv
|
||||
|
||||
tasks:
|
||||
- block:
|
||||
- name: Obtain SSO token from username / password credentials
|
||||
ovirt_auth:
|
||||
url: "{{ ovirt_url }}"
|
||||
username: "{{ ovirt_username }}"
|
||||
password: "{{ ovirt_password }}"
|
||||
|
||||
- name: Disks Created
|
||||
ovirt_disk:
|
||||
auth: "{{ ovirt_auth }}"
|
||||
description: "Boot Disk for {{ inventory_hostname }}"
|
||||
interface: virtio
|
||||
size: 120GiB
|
||||
storage_domain: nas_iscsi
|
||||
bootable: True
|
||||
wait: true
|
||||
name: "{{ inventory_hostname }}_disk0"
|
||||
state: present
|
||||
|
||||
- name: VM Created
|
||||
ovirt_vm:
|
||||
|
||||
|
||||
|
||||
- name: Add NIC to VM
|
||||
ovirt_nic:
|
||||
state: present
|
||||
vm:
|
||||
name: mynic
|
||||
interface: e1000
|
||||
mac_address: 00:1a:4a:16:01:56
|
||||
profile: ovirtmgmt
|
||||
network: ovirtmgmt
|
||||
|
||||
- name: Plug NIC to VM
|
||||
redhat.rhv.ovirt_nic:
|
||||
state: plugged
|
||||
vm: myvm
|
||||
name: mynic
|
||||
|
||||
|
||||
always:
|
||||
- name: Always revoke the SSO token
|
||||
ovirt_auth:
|
||||
state: absent
|
||||
ovirt_auth: "{{ ovirt_auth }}"
|
||||
|
||||
|
||||
# - name: VM Configuration
|
||||
# - name: Automation Platform Installer
|
||||
# - name:
|
||||
30
playbooks/build_rhel_template.yml
Normal file
30
playbooks/build_rhel_template.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: Create Build Host in Satellite
|
||||
hosts: localhost
|
||||
connection: localhost
|
||||
|
||||
tasks:
|
||||
- name: Build Host
|
||||
redhat.satellite.host:
|
||||
architecture: x86_64
|
||||
build: true
|
||||
comment: "RHEL 8 Template"
|
||||
compute_profile: "4c8g"
|
||||
compute_attributes:
|
||||
start: "1"
|
||||
compute_resource: "ToalLab"
|
||||
content_view: "composite-rhel8"
|
||||
domain: "sandbox.toal.ca"
|
||||
enabled: true
|
||||
hostgroup: "RHEL8"
|
||||
lifecycle_environment: "Library"
|
||||
location: "Lab"
|
||||
name: "rhel8build.sandbox.toal.ca"
|
||||
operatingsystem: "RedHat 8.3"
|
||||
organization: "Toal.ca"
|
||||
password: "{{ vault_sat6_pass }}"
|
||||
server_url: "https://satellite1.mgmt.toal.ca/"
|
||||
subnet: "sandbox"
|
||||
username: "{{ satellite_admin_user }}"
|
||||
password: "{{ satellite_admin_pass }}"
|
||||
validate_certs: no
|
||||
12
playbooks/build_windows_template.yml
Normal file
12
playbooks/build_windows_template.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
- name: create an ovirt windows template
|
||||
hosts: windows_template_base
|
||||
gather_facts: False
|
||||
connection: local
|
||||
become: no
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
|
||||
|
||||
roles:
|
||||
- oatakan.windows_ovirt_template
|
||||
233
playbooks/buildvm.yml
Normal file
233
playbooks/buildvm.yml
Normal file
@@ -0,0 +1,233 @@
|
||||
# Playbook to build new VMs in RHV Cluste
|
||||
# Currently only builds RHEL VMs
|
||||
|
||||
# Create Host
|
||||
|
||||
- name: Preflight checks
|
||||
hosts: tag_build
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- assert:
|
||||
that:
|
||||
- site == "sagely_dc"
|
||||
- is_virtual
|
||||
|
||||
- name: Ensure Primary IP exists and is in DNS
|
||||
hosts: tag_build
|
||||
gather_facts: false
|
||||
collections:
|
||||
- netbox.netbox
|
||||
- freeipa.ansible_freeipa
|
||||
- redhat.rhv
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Obtain SSO token for RHV
|
||||
ovirt_auth:
|
||||
url: "{{ ovirt_url }}"
|
||||
username: "{{ ovirt_username }}"
|
||||
insecure: true
|
||||
password: "{{ ovirt_password }}"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Get unused IP Address from pool
|
||||
netbox_ip_address:
|
||||
netbox_url: "{{ netbox_api }}"
|
||||
netbox_token: "{{ netbox_token }}"
|
||||
data:
|
||||
prefix: 192.168.16.0/20
|
||||
assigned_object:
|
||||
name: eth0
|
||||
virtual_machine: "{{ inventory_hostname }}"
|
||||
state: new
|
||||
register: new_ip
|
||||
when: primary_ip4 is undefined
|
||||
delegate_to: localhost
|
||||
|
||||
- set_fact:
|
||||
primary_ip4: "{{ new_ip.ip_address.address|ipaddr('address') }}"
|
||||
vm_hostname: "{{ inventory_hostname.split('.')[0] }}"
|
||||
vm_domain: "{{ inventory_hostname.split('.',1)[1] }}"
|
||||
delegate_to: localhost
|
||||
when: primary_ip4 is undefined
|
||||
|
||||
- name: Primary IPv4 Assigned in Netbox
|
||||
netbox_virtual_machine:
|
||||
netbox_url: "{{ netbox_api }}"
|
||||
netbox_token: "{{ netbox_token }}"
|
||||
data:
|
||||
primary_ip4: "{{ primary_ip4 }}"
|
||||
name: "{{ inventory_hostname }}"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Primary IPv4 Address
|
||||
debug:
|
||||
var: primary_ip4
|
||||
|
||||
- name: Ensure IP Address in IdM
|
||||
ipadnsrecord:
|
||||
records:
|
||||
- name: "{{ vm_hostname }}"
|
||||
zone_name: "{{ vm_domain }}"
|
||||
record_type: A
|
||||
record_value:
|
||||
- "{{ new_ip.ip_address.address|ipaddr('address') }}"
|
||||
create_reverse: true
|
||||
ipaadmin_password: "{{ ipaadmin_password }}"
|
||||
delegate_to: idm1.mgmt.toal.ca
|
||||
|
||||
- name: Create VMs
|
||||
hosts: tag_build
|
||||
connection: local
|
||||
gather_facts: no
|
||||
collections:
|
||||
- netbox.netbox
|
||||
- redhat.rhv
|
||||
vars:
|
||||
# Workaround to get correct venv python interpreter
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
|
||||
|
||||
tasks:
|
||||
- name: Basic Disk Profile
|
||||
set_fact:
|
||||
vm_disks:
|
||||
- name: '{{ inventory_hostname }}_boot'
|
||||
bootable: true
|
||||
sparse: true
|
||||
descr: '{{ inventory_hostname }} Boot / Root disk'
|
||||
interface: virtio
|
||||
size: '{{ disk|default(40) }}'
|
||||
state: present
|
||||
storage_domain: "{{ rhv_storage_domain }}"
|
||||
activate: true
|
||||
when: vm_disks is not defined
|
||||
|
||||
- name: Create VM Disks
|
||||
ovirt_disk:
|
||||
auth: '{{ ovirt_auth }}'
|
||||
name: '{{ item.name }}'
|
||||
description: '{{ item.descr }}'
|
||||
interface: '{{ item.interface }}'
|
||||
size: '{{ item.size|int * 1024000 }}'
|
||||
state: '{{ item.state }}'
|
||||
sparse: '{{ item.sparse }}'
|
||||
wait: true
|
||||
storage_domain: '{{ item.storage_domain }}'
|
||||
async: 300
|
||||
poll: 15
|
||||
loop: '{{ vm_disks }}'
|
||||
|
||||
- set_fact:
|
||||
nb_query_filter: "slug={{ platform }}"
|
||||
- debug: msg='{{ query("netbox.netbox.nb_lookup", "platforms", api_filter=nb_query_filter, api_endpoint=netbox_api, token=netbox_token)[0].value.name }}'
|
||||
|
||||
- name: Create VM in RHV
|
||||
ovirt_vm:
|
||||
auth: '{{ ovirt_auth }}'
|
||||
name: '{{ inventory_hostname }}'
|
||||
state: present
|
||||
memory: '{{ memory }}MiB'
|
||||
memory_guaranteed: '{{ (memory / 2)|int }}MiB'
|
||||
disks: '{{ vm_disks }}'
|
||||
cpu_cores: '{{ vcpus }}'
|
||||
cluster: '{{ cluster }}'
|
||||
# This is ugly Can we do better?
|
||||
operating_system: '{{ query("netbox.netbox.nb_lookup", "platforms", api_filter=nb_query_filter, api_endpoint=netbox_api, token=netbox_token)[0].value.name }}'
|
||||
type: server
|
||||
graphical_console:
|
||||
protocol:
|
||||
- vnc
|
||||
- spice
|
||||
boot_devices:
|
||||
- hd
|
||||
async: 300
|
||||
poll: 15
|
||||
notify: PXE Boot
|
||||
register: vm_result
|
||||
|
||||
- name: Assign NIC
|
||||
ovirt_nic:
|
||||
auth: '{{ ovirt_auth }}'
|
||||
interface: virtio
|
||||
mac_address: '{{ item.mac_address|default(omit) }}'
|
||||
name: '{{ item.name }}'
|
||||
profile: '{{ item.untagged_vlan.name }}'
|
||||
network: '{{ item.untagged_vlan.name }}' # This is fragile
|
||||
state: '{{ (item.enabled == True) |ternary("plugged","unplugged") }}'
|
||||
linked: yes
|
||||
vm: '{{ inventory_hostname }}'
|
||||
loop: '{{ interfaces }}'
|
||||
register: interface_result
|
||||
|
||||
- debug: var=interface_result
|
||||
|
||||
- name: Host configured in Satellite
|
||||
redhat.satellite.host:
|
||||
username: "{{ satellite_admin_user }}"
|
||||
password: "{{ satellite_admin_pass }}"
|
||||
server_url: "{{ satellite_url }}"
|
||||
name: "{{ inventory_hostname }}"
|
||||
hostgroup: "RHEL8/RHEL8 Sandbox"
|
||||
organization: Toal.ca
|
||||
location: Lab
|
||||
ip: "{{ primary_ip4 }}"
|
||||
mac: "{{ interface_result.results[0].nic.mac.address }}" #fragile
|
||||
build: "{{ vm_result.changed |ternary(true,false) }}"
|
||||
validate_certs: no
|
||||
|
||||
- name: Assign interface MACs to Netbox
|
||||
netbox_vm_interface:
|
||||
netbox_url: "{{ netbox_api }}"
|
||||
netbox_token: "{{ netbox_token }}"
|
||||
data:
|
||||
name: "{{ item.nic.name }}"
|
||||
mac_address: "{{ item.nic.mac.address }}"
|
||||
virtual_machine: "{{ inventory_hostname }}"
|
||||
loop: "{{ interface_result.results }}"
|
||||
|
||||
handlers:
|
||||
- name: PXE Boot
|
||||
ovirt_vm:
|
||||
auth: "{{ ovirt_auth }}"
|
||||
name: "{{ inventory_hostname }}"
|
||||
boot_devices:
|
||||
- network
|
||||
state: running
|
||||
register: vm_build_result
|
||||
|
||||
- name: Ensure VM is running and reachable
|
||||
hosts: tag_build
|
||||
gather_facts: no
|
||||
connection: local
|
||||
collections:
|
||||
- redhat.rhv
|
||||
vars:
|
||||
# Hack to work around virtualenv python interpreter
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
|
||||
tasks:
|
||||
- name: VM is running
|
||||
ovirt_vm:
|
||||
auth: "{{ ovirt_auth }}"
|
||||
name: "{{ inventory_hostname }}"
|
||||
state: running
|
||||
boot_devices:
|
||||
- hd
|
||||
|
||||
- name: Wait for SSH to be ready
|
||||
wait_for_connection:
|
||||
timeout: 1800
|
||||
sleep: 5
|
||||
|
||||
# - name: Ensure IP address is correct in Netbox
|
||||
# netbox_virtual_machine:
|
||||
# data:
|
||||
# name: "{{ inventory_hostname }}"
|
||||
# primary_ip4: "{{ primary_ip4 }}"
|
||||
# netbox_url: "{{ netbox_api }}"
|
||||
# netbox_token: "{{ netbox_token }}"
|
||||
# state: present
|
||||
# delegate_to: localhost
|
||||
|
||||
#TODO: Clear Build tag
|
||||
33
playbooks/create_host.yml
Normal file
33
playbooks/create_host.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
# Create Host
|
||||
- name: Get IP Address from pool
|
||||
hosts: localhost
|
||||
|
||||
tasks:
|
||||
- name: Get unused IP Address from pool
|
||||
netbox.netbox.netbox_ip_address:
|
||||
netbox_url: "{{ netbox_api }}"
|
||||
netbox_token: "{{ netbox_token }}"
|
||||
data:
|
||||
prefix: 192.168.16.0/20
|
||||
state: new
|
||||
register: new_ip
|
||||
|
||||
- debug: var=new_ip
|
||||
|
||||
- name: Create IP Address in IdM
|
||||
freeipa.ansible_freeipa.ipadnsrecord:
|
||||
records:
|
||||
- name: test1
|
||||
zone_name: sandbox.toal.ca
|
||||
record_type: A
|
||||
record_value:
|
||||
- "{{ new_ip.ip_address.address|ipaddr('address') }}"
|
||||
create_reverse: true
|
||||
ipaadmin_password: "{{ ipaadmin_password }}"
|
||||
delegate_to: idm1.mgmt.toal.ca
|
||||
|
||||
# Test Comment
|
||||
# - name: Create Satellite Host
|
||||
# - name: Create RHV VM
|
||||
# - name:
|
||||
25
playbooks/create_satellite_host.yml
Normal file
25
playbooks/create_satellite_host.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
- name: Create new VM / Host in Satellite
|
||||
hosts: sat6.lab.toal.ca
|
||||
vars:
|
||||
sat6_fqdn: "sat6.lab.toal.ca"
|
||||
sat6_user: "{{ vault_sat6_user }}"
|
||||
sat6_pass: "{{ vault_sat6_pass }}"
|
||||
sat6_organization: "Toal.ca"
|
||||
sat6_fail_on_existing: True
|
||||
sat6_power_on_delay: 60
|
||||
sat6_hosts:
|
||||
#VM
|
||||
- name: "rhel1"
|
||||
host_group: "Lab RHEL Hosts/RHEL 8"
|
||||
location: "Lab"
|
||||
ipv4: "192.168.16.10"
|
||||
domain: "sandbox.toal.ca"
|
||||
comment: "Demo Server"
|
||||
compute_resource: "Home Lab"
|
||||
compute_profile: "2-Medium"
|
||||
parameters:
|
||||
- name: "build_type"
|
||||
value: "httpd"
|
||||
roles:
|
||||
- ahuffman.sat6_create_hosts
|
||||
46
playbooks/cvpublish.yml
Normal file
46
playbooks/cvpublish.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
- name: Publish CVs
|
||||
hosts: satellite1.mgmt.toal.ca
|
||||
vars:
|
||||
sat_env_name: Library
|
||||
sat_org: Toal.ca
|
||||
sat_publish_description: Automated CV Update
|
||||
|
||||
tasks:
|
||||
- name: Pre-tasks | Find all CVs
|
||||
redhat.satellite.resource_info:
|
||||
username: "{{ satellite_admin_user }}"
|
||||
password: "{{ satellite_admin_pass }}"
|
||||
server_url: "{{ satellite_url }}"
|
||||
organization: "{{ sat_org }}"
|
||||
resource: content_views
|
||||
validate_certs: no
|
||||
register: raw_list_cvs
|
||||
|
||||
- name: Pre-tasks | Get resource information
|
||||
set_fact:
|
||||
list_all_cvs: "{{ raw_list_cvs['resources'] | json_query(jmesquery) | list }}"
|
||||
vars:
|
||||
jmesquery: "[*].{name: name, composite: composite, id: id}"
|
||||
|
||||
- name: Pre-tasks | Extract list of content views
|
||||
set_fact:
|
||||
sat6_content_views_list: "{{ sat6_content_views_list|default([]) }} + ['{{ item.name }}' ]"
|
||||
loop: "{{ list_all_cvs | reject('search', 'Default Organization View') | list }}"
|
||||
when: item.composite == false
|
||||
|
||||
- name: Publish content
|
||||
redhat.satellite.content_view_version:
|
||||
username: "{{ satellite_admin_user }}"
|
||||
password: "{{ satellite_admin_pass }}"
|
||||
server_url: "{{ satellite_url }}"
|
||||
organization: "{{ sat_org }}"
|
||||
content_view: "{{ item }}"
|
||||
validate_certs: no
|
||||
description: "{{ sat_publish_description }}"
|
||||
lifecycle_environments:
|
||||
- Library
|
||||
- "{{ sat_env_name }}"
|
||||
loop: "{{ sat6_content_views_list | list }}"
|
||||
loop_control:
|
||||
loop_var: "item"
|
||||
register: cv_publish_sleeper
|
||||
29
playbooks/debug.yml
Normal file
29
playbooks/debug.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
- name: Show Some Debugging
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: Print Debugging info
|
||||
vars:
|
||||
msg: |
|
||||
Module Variables ("vars"):
|
||||
--------------------------
|
||||
{{ vars | to_nice_json }}
|
||||
|
||||
Environment Variables ("environment"):
|
||||
--------------------------------------
|
||||
{{ environment | to_nice_json }}
|
||||
|
||||
GROUP NAMES Variables ("group_names"):
|
||||
-------------------------------------
|
||||
{{ group_names | to_nice_json }}
|
||||
|
||||
GROUPS Variables ("groups"):
|
||||
----------------------------
|
||||
{{ groups | to_nice_json }}
|
||||
|
||||
HOST Variables ("hostvars"):
|
||||
----------------------------
|
||||
{{ hostvars | to_nice_json }}
|
||||
|
||||
debug:
|
||||
msg: "{{ msg.split('\n') }}"
|
||||
tags: debug_info
|
||||
12
playbooks/execution-environment.yml
Normal file
12
playbooks/execution-environment.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
version: 1
|
||||
|
||||
build_arg_defaults:
|
||||
EE_BASE_IMAGE: 'registry.redhat.io/ansible-automation-platform-21/ee-supported-rhel8:latest'
|
||||
|
||||
ansible_config: '../../../.ansible.cfg'
|
||||
|
||||
dependencies:
|
||||
galaxy: collections/requirements.yml
|
||||
python: requirements.txt
|
||||
system: bindep.txt
|
||||
41
playbooks/gitea.yml
Normal file
41
playbooks/gitea.yml
Normal file
@@ -0,0 +1,41 @@
|
||||
- name: Ensure Gitea is running on Zenyatta
|
||||
become: yes
|
||||
hosts: zenyatta.lab.toal.ca
|
||||
vars:
|
||||
container_state: running
|
||||
container_name: gitea
|
||||
container_image: gitea/gitea:latest
|
||||
gitea_nfs_mountpoint: /mnt/gitea
|
||||
gitea_nfs_src: nas.lab.toal.ca:/mnt/BIGPOOL/BackedUp/git
|
||||
gitea_dir_owner: ptoal
|
||||
gitea_dir_group: ptoal
|
||||
container_run_args: >-
|
||||
--rm
|
||||
-p 3000:3000/tcp -p 3222:22/tcp
|
||||
-v "{{ gitea_nfs_mountpoint }}:/data"
|
||||
--hostname=gitea.mgmt.toal.ca
|
||||
--memory=1024M
|
||||
container_firewall_ports:
|
||||
- 3000/tcp
|
||||
- 3222/tcp
|
||||
|
||||
tasks:
|
||||
- name: Ensure container data mount points
|
||||
tags: mount
|
||||
file:
|
||||
path: "{{ gitea_nfs_mountpoint }}"
|
||||
state: directory
|
||||
|
||||
- name: ensure container NFS mounts from NAS
|
||||
tags: [ mount, nfs ]
|
||||
mount:
|
||||
src: "{{ gitea_nfs_src }}"
|
||||
path: "{{ gitea_nfs_mountpoint }}"
|
||||
fstype: nfs
|
||||
opts: rw,rsize=8192,wsize=8192,timeo=14,intr,vers=3
|
||||
state: mounted
|
||||
|
||||
- name: ensure container state
|
||||
tags: container
|
||||
import_role:
|
||||
name: ikke_t.podman_container_systemd
|
||||
39
playbooks/gitlab.yml
Normal file
39
playbooks/gitlab.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
- name: Ensure GitLab is installed
|
||||
hosts: gitlab.lab.toal.ca
|
||||
become: true
|
||||
vars:
|
||||
gitlab_external_url: "http://gitlab.lab.toal.ca/"
|
||||
gitlab_git_data_dir: "/var/opt/gitlab/git-data"
|
||||
# gitlab_edition: "gitlab-ce"
|
||||
# gitlab_redirect_http_to_https: "false"
|
||||
# # LDAP Configuration.
|
||||
# gitlab_ldap_enabled: "true"
|
||||
# gitlab_ldap_host: "idm1.mgmt.toal.ca"
|
||||
# gitlab_ldap_port: "389"
|
||||
# gitlab_ldap_uid: "ldapauth"
|
||||
# gitlab_ldap_method: "start_tls"
|
||||
# gitlab_ldap_bind_dn: "uid=ldapauth,cn=sysaccounts,cn=etc,dc=idm,dc=toal,dc=ca"
|
||||
# gitlab_ldap_password: "growwaternapkin"
|
||||
# gitlab_ldap_base: "cn=users,cn=accounts,dc=idm,dc=toal,dc=ca"
|
||||
# # Email configuration.
|
||||
# gitlab_email_enabled: "true"
|
||||
# gitlab_email_from: "gitlab@takeflight.ca"
|
||||
# gitlab_email_display_name: "Gitlab"
|
||||
# gitlab_email_reply_to: "ptoal@takeflight.ca"
|
||||
# # SMTP Configuration
|
||||
# gitlab_smtp_enable: "true"
|
||||
# gitlab_smtp_address: "smtp.gmail.com"
|
||||
# gitlab_smtp_port: "587"
|
||||
# gitlab_smtp_user_name: "server"
|
||||
# gitlab_smtp_password: "ReJ3n_Dj9EB-j3b"
|
||||
# gitlab_smtp_domain: "takeflight.ca"
|
||||
# gitlab_smtp_authentication: "login"
|
||||
# gitlab_smtp_enable_starttls_auto: "true"
|
||||
# # gitlab_smtp_tls: "false"
|
||||
# gitlab_smtp_openssl_verify_mode: "none"
|
||||
# # gitlab_smtp_ca_path: "/etc/ssl/certs"
|
||||
# # gitlab_smtp_ca_file: "/etc/ssl/certs/ca-certificates.crt"
|
||||
# gitlab_nginx_listen_https: "false"
|
||||
|
||||
roles:
|
||||
- { role: geerlingguy.gitlab }
|
||||
6
playbooks/interface_config.yml
Normal file
6
playbooks/interface_config.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
# Configure host interface and network switch
|
||||
# First use-case is bond's
|
||||
#
|
||||
- name: Host Network
|
||||
hosts: "{{ }}"
|
||||
17
playbooks/minecraft.yml
Normal file
17
playbooks/minecraft.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
- name: Minecraft Systems - MineOS
|
||||
hosts: tag_mc_mineos:&tag_ansible
|
||||
become: true
|
||||
vars:
|
||||
# nodejs_version: "8.x"
|
||||
# mineos_repo: "https://github.com/sage905/mineos-node.git"
|
||||
#mineos_version: "pam_auth"
|
||||
roles:
|
||||
- ansible-role-nodejs
|
||||
- sage905.mineos
|
||||
|
||||
- name: Minecraft Systems - Mark2
|
||||
hosts: tag_mc_mark2:&tag_ansible
|
||||
become: true
|
||||
roles:
|
||||
- sage905.mark2
|
||||
- sage905.waterfall
|
||||
67
playbooks/monitoring.yml
Normal file
67
playbooks/monitoring.yml
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
- name: Prometheus
|
||||
hosts: monitor.mgmt.toal.ca # Hard-coded for now
|
||||
become: yes
|
||||
vars:
|
||||
container_image: prom/prometheus
|
||||
container_name: prometheus
|
||||
container_state: running
|
||||
container_firewall_ports:
|
||||
- 8090/tcp
|
||||
container_run_args: >-
|
||||
-p 8090:8090
|
||||
-v /home/prometheus/etc:/etc/prometheus:Z
|
||||
-v /home/prometheus/data:/prometheus:Z
|
||||
-v /home/prometheus/console_libraries:/usr/share/prometheus/console_libraries:Z
|
||||
-v /home/prometheus/console_templates:/usr/share/prometheus/consoles:Z
|
||||
roles:
|
||||
- ikke_t.podman_container_systemd
|
||||
|
||||
pre_tasks:
|
||||
- name: Directories exist
|
||||
file:
|
||||
path: '{{ item }}'
|
||||
state: directory
|
||||
owner: nobody
|
||||
group: nobody
|
||||
loop:
|
||||
- /home/prometheus/etc
|
||||
- /home/prometheus/data
|
||||
- /home/prometheus/console_libraries
|
||||
- /home/prometheus/console_template
|
||||
post_tasks:
|
||||
- name: Firewall
|
||||
firewalld:
|
||||
state: enabled # required. choices: enabled;disabled;present;absent. Enable or disable a setting. For ports: Should this port accept(enabled) or reject(disabled) connections. The states "present" and "absent" can only be used in zone level operations (i.e. when no other parameters but zone and state are set).
|
||||
permanent: true
|
||||
port: 9090/tcp
|
||||
|
||||
- name: Grafana
|
||||
hosts: monitor.mgmt.toal.ca # Hard-coded for now
|
||||
become: yes
|
||||
vars:
|
||||
container_image: grafana/grafana
|
||||
container_name: grafana
|
||||
container_state: running
|
||||
container_firewall_ports:
|
||||
- 3000/tcp
|
||||
container_run_args: >-
|
||||
-p 3000:3000
|
||||
-v /home/grafana/data:/var/lib/grafana:Z
|
||||
pre_tasks:
|
||||
- name: Directories exist
|
||||
file:
|
||||
path: '{{ item }}'
|
||||
state: directory
|
||||
owner: nobody
|
||||
group: nobody
|
||||
loop:
|
||||
- /home/grafana/data
|
||||
roles:
|
||||
- ikke_t.podman_container_systemd
|
||||
post_tasks:
|
||||
- name: Firewall
|
||||
firewalld:
|
||||
state: enabled # required. choices: enabled;disabled;present;absent. Enable or disable a setting. For ports: Should this port accept(enabled) or reject(disabled) connections. The states "present" and "absent" can only be used in zone level operations (i.e. when no other parameters but zone and state are set).
|
||||
permanent: true
|
||||
port: 3000/tcp
|
||||
15
playbooks/podhost.yml
Normal file
15
playbooks/podhost.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
- name: K8S installed and ready
|
||||
hosts: k8s
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: EPEL Repo enabled
|
||||
yum:
|
||||
name: epel-release
|
||||
state: present
|
||||
|
||||
- name: Setting sebool container_manage_cgroup
|
||||
seboolean:
|
||||
name: container_manage_cgroup
|
||||
state: yes
|
||||
persistent: yes
|
||||
16
playbooks/publish_cvs.yml
Normal file
16
playbooks/publish_cvs.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Publish CVs
|
||||
hosts: sat6.lab.toal.ca
|
||||
connection: local
|
||||
|
||||
tasks:
|
||||
- name: Publish Content View
|
||||
redhat.satellite.content_view_version:
|
||||
username: "{{ vault_sat6_user }}"
|
||||
password: "{{ vault_sat6_pass }}"
|
||||
server_url: "https://{{ inventory_hostname }}"
|
||||
validate_certs: false
|
||||
content_view: "RHEL8"
|
||||
organization: "Toal.ca"
|
||||
lifecycle_environments:
|
||||
- Library
|
||||
32
playbooks/reset_port.yml
Normal file
32
playbooks/reset_port.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
- name: Reset Port
|
||||
hosts: switch01
|
||||
become_method: enable
|
||||
become: yes
|
||||
connection: network_cli
|
||||
gather_facts: no
|
||||
vars:
|
||||
switch_port: GigabitEthernet2/0/13
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Simple Debug Task
|
||||
debug:
|
||||
msg: "Hello World"
|
||||
|
||||
- name: Shut down port
|
||||
ios_interface:
|
||||
name: "{{ switch_port }}"
|
||||
enabled: False
|
||||
state: down
|
||||
delay: 20
|
||||
|
||||
- name: Pause for 5 seconds
|
||||
pause:
|
||||
seconds: 5
|
||||
|
||||
- name: Bring up port
|
||||
ios_interface:
|
||||
name: "{{ switch_port }}"
|
||||
enabled: True
|
||||
delay: 20
|
||||
|
||||
222
playbooks/rhv_setup.yml
Normal file
222
playbooks/rhv_setup.yml
Normal file
@@ -0,0 +1,222 @@
|
||||
---
|
||||
- name: Check for existing cert
|
||||
hosts: rhv.mgmt.toal.ca
|
||||
connection: local
|
||||
vars:
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
|
||||
|
||||
tasks:
|
||||
- name: Information from existing key
|
||||
community.crypto.x509_certificate_info:
|
||||
path: "keys/{{ acme_certificate_domains|first }}.pem"
|
||||
ignore_errors: yes
|
||||
register: key_info
|
||||
|
||||
- set_fact: have_valid_cert=false
|
||||
|
||||
- set_fact:
|
||||
have_valid_cert: "{{ (key_info.not_after|to_datetime('%Y%m%d%H%M%SZ')).timestamp() > ansible_date_time.epoch|int + 2592000 }}"
|
||||
when:
|
||||
- not key_info.failed
|
||||
|
||||
# Probably want to split this out into a proper certificate management role for Toal.ca
|
||||
- name: Request TLS Certificate from LetsEncrypt
|
||||
hosts: rhv.mgmt.toal.ca
|
||||
connection: local
|
||||
gather_facts: false
|
||||
# This doesn't belong here
|
||||
#vars:
|
||||
# acme_certificate_root_certificate: https://letsencrypt.org/certs/trustid-x3-root.pem.txt
|
||||
|
||||
|
||||
pre_tasks:
|
||||
- name: Ensure Let's Encrypt Account Exists
|
||||
acme_account:
|
||||
state: present
|
||||
acme_directory: "{{ acme_directory }}"
|
||||
terms_agreed: true
|
||||
allow_creation: true
|
||||
contact:
|
||||
- mailto:ptoal@takeflight.ca
|
||||
account_key_content: "{{ acme_key }}"
|
||||
acme_version: 2
|
||||
|
||||
- name: tmpfile for Account Key
|
||||
tempfile:
|
||||
state: file
|
||||
register: acme_tmp_key
|
||||
|
||||
- name: Account Key to File
|
||||
copy:
|
||||
dest: "{{ acme_tmp_key.path }}"
|
||||
content: "{{ acme_key }}"
|
||||
mode: "600"
|
||||
|
||||
- set_fact:
|
||||
acme_certificate_acme_account: "{{ acme_tmp_key.path }}"
|
||||
|
||||
roles:
|
||||
- name: felixfontein.acme_certificate
|
||||
when: have_valid_cert is defined and not have_valid_cert
|
||||
|
||||
post_tasks:
|
||||
- name: Remove tempfile
|
||||
file:
|
||||
path: "{{ acme_tmp_key.path }}"
|
||||
state: absent
|
||||
|
||||
- name: Install custom CA Certificate in RHV-M
|
||||
hosts: rhv.mgmt.toal.ca
|
||||
become: true
|
||||
vars:
|
||||
key_files_prefix: "keys/{{ acme_certificate_domains|first }}"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
|
||||
tasks:
|
||||
- name: Certificate trust in store
|
||||
copy:
|
||||
src: "{{ key_files_prefix}}-rootchain.pem"
|
||||
dest: /etc/pki/ca-trust/source/anchors/
|
||||
register: rootchain_result
|
||||
notify:
|
||||
- update ca-trust
|
||||
- restart httpd
|
||||
|
||||
- name: Certificate store updated
|
||||
command: /usr/bin/update-ca-trust
|
||||
when: rootchain_result.changed
|
||||
notify: restart httpd
|
||||
|
||||
- name: Apache CA is file, not link
|
||||
file:
|
||||
path: /etc/pki/ovirt-engine/apache-ca.pem
|
||||
state: file
|
||||
register: apache_ca_stat
|
||||
|
||||
- name: Apache CA link is removed
|
||||
file:
|
||||
path: /etc/pki/ovirt-engine/apache-ca.pem
|
||||
state: absent
|
||||
when: apache_ca_stat.state == "file"
|
||||
|
||||
- name: CA Rootchain in Apache config
|
||||
copy:
|
||||
src: "{{ key_files_prefix }}-rootchain.pem"
|
||||
dest: /etc/pki/ovirt-engine/apache-ca.pem
|
||||
backup: yes
|
||||
notify: restart httpd
|
||||
|
||||
- name: Private key installed
|
||||
copy:
|
||||
src: "{{ key_files_prefix }}.key"
|
||||
dest: "{{ item }}"
|
||||
backup: yes
|
||||
owner: root
|
||||
group: ovirt
|
||||
mode: 0640
|
||||
notify: restart httpd
|
||||
loop:
|
||||
- /etc/pki/ovirt-engine/keys/apache.key.nopass
|
||||
- /etc/pki/ovirt-engine/keys/websocket-proxy.key.nopass
|
||||
|
||||
- name: Certificate installed
|
||||
copy:
|
||||
src: "{{ key_files_prefix }}.pem"
|
||||
dest: "{{ item }}"
|
||||
backup: yes
|
||||
owner: root
|
||||
group: ovirt
|
||||
mode: 0644
|
||||
notify: restart httpd
|
||||
loop:
|
||||
- /etc/pki/ovirt-engine/certs/websocket-proxy.cer
|
||||
- /etc/pki/ovirt-engine/certs/apache.cer
|
||||
|
||||
- name: Trust Store Configuration
|
||||
copy:
|
||||
dest: /etc/ovirt-engine/engine.conf.d/99-custom-truststore.conf
|
||||
content: |
|
||||
ENGINE_HTTPS_PKI_TRUST_STORE="/etc/pki/java/cacerts"
|
||||
ENGINE_HTTPS_PKI_TRUST_STORE_PASSWORD=""
|
||||
notify:
|
||||
- restart ovn
|
||||
- restart ovirt-engine
|
||||
|
||||
- name: Websocket Proxy configuration
|
||||
lineinfile:
|
||||
path: /etc/ovirt-engine/ovirt-websocket-proxy.conf.d/10-setup.conf
|
||||
state: present
|
||||
backup: yes
|
||||
line: "{{ item.name }}={{ item.value }}"
|
||||
regexp: "^{{ item.name }}="
|
||||
loop:
|
||||
- name: SSL_CERTIFICATE
|
||||
value: /etc/pki/ovirt-engine/certs/websocket-proxy.cer
|
||||
- name: SSL_KEY
|
||||
value: /etc/pki/ovirt-engine/keys/websocket-proxy.key.nopass
|
||||
# - SSL_CERTIFICATE=/etc/pki/ovirt-engine/certs/apache.cer
|
||||
# - SSL_KEY=/etc/pki/ovirt-engine/keys/apache.key.nopass
|
||||
notify:
|
||||
- restart ovirt-websocket-proxy
|
||||
|
||||
handlers:
|
||||
- name: restart httpd
|
||||
service:
|
||||
name: httpd
|
||||
state: restarted
|
||||
|
||||
- name: update ca-trust
|
||||
command: update-ca-trust
|
||||
|
||||
- name: restart ovn
|
||||
service:
|
||||
name: ovirt-provider-ovn
|
||||
state: restarted
|
||||
|
||||
- name: restart ovirt-engine
|
||||
service:
|
||||
name: ovirt-engine
|
||||
state: restarted
|
||||
|
||||
- name: restart ovirt-websocket-proxy
|
||||
service:
|
||||
name: ovirt-websocket-proxy
|
||||
state: restarted
|
||||
|
||||
|
||||
- name: Create RHV/ovirt VLANs
|
||||
hosts: rhv.mgmt.toal.ca
|
||||
connection: local
|
||||
vars:
|
||||
# Hack to work around virtualenv python interpreter
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
tasks:
|
||||
- name: Obtain SSO token for RHV
|
||||
ovirt_auth:
|
||||
state: present
|
||||
insecure: true
|
||||
|
||||
|
||||
- ovirt_network:
|
||||
auth: "{{ ovirt_auth }}"
|
||||
fetch_nested: true
|
||||
data_center: "{{ item.data_center }}"
|
||||
name: "{{ item.name }}"
|
||||
vlan_tag: "{{ item.vlan_tag|default(omit) }}"
|
||||
vm_network: "{{ item.vm_network }}"
|
||||
mtu: "{{ item.mtu }}"
|
||||
description: "{{ item.description }}"
|
||||
loop: "{{ ovirt_networks }}"
|
||||
register: networkinfo
|
||||
|
||||
- debug: msg="{{networkinfo}}"
|
||||
|
||||
- name: Reminder
|
||||
hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Reminder
|
||||
debug:
|
||||
msg: "Don't forget to add tso off gro off gso off lro off to i217-LM NIC's (eg: Dell Optiplex)!"
|
||||
# https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html/administration_guide/sect-hosts_and_networking#Editing_Host_Network_Interfaces_and_Assigning_Logical_Networks_to_Hosts
|
||||
316
playbooks/satellite.yml
Normal file
316
playbooks/satellite.yml
Normal file
@@ -0,0 +1,316 @@
|
||||
# Playbook to install Satellite server on RHV
|
||||
|
||||
#TODO: Fix Partitioning, as /var/lib/pulp doesn't get it's own partition now.
|
||||
|
||||
- name: Preflight Setup
|
||||
hosts: "{{ vm_name }}"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Obtain SSO token from username / password credentials
|
||||
redhat.rhv.ovirt_auth:
|
||||
url: "{{ ovirt_url }}"
|
||||
username: "{{ ovirt_username }}"
|
||||
password: "{{ ovirt_password }}"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Add host to satellite group
|
||||
add_host:
|
||||
hostname: '{{ vm_name }}'
|
||||
groups: satellite
|
||||
changed_when: false
|
||||
|
||||
- name: Get VM Tags
|
||||
ovirt.ovirt.ovirt_tag_info:
|
||||
vm: "{{ vm_name }}"
|
||||
register: vmtags_result
|
||||
delegate_to: localhost
|
||||
ignore_errors: true
|
||||
|
||||
- name: Add host to provisioned group
|
||||
add_host:
|
||||
hostname: '{{ vm_name }}'
|
||||
groups: provisioned
|
||||
when:
|
||||
- vmtags_result.ovirt_tags is defined
|
||||
- vmtags_result.ovirt_tags|length > 0
|
||||
- "'provisioned' in vmtags_result.ovirt_tags|map(attribute='name')|list"
|
||||
|
||||
- name: Build VM
|
||||
hosts: "{{ vm_name }}:!provisioned"
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: ISO is uploaded to RHV
|
||||
redhat.rhv.ovirt_disk:
|
||||
name: "{{ rhel_iso_filename }}"
|
||||
upload_image_path: "{{ rhel_iso_path }}/{{ rhel_iso_filename }}"
|
||||
storage_domain: ssdvdo0
|
||||
size: 5 GiB
|
||||
wait: true
|
||||
bootable: true
|
||||
format: raw
|
||||
content_type: iso
|
||||
register: iso_disk
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Remove known_hosts entry
|
||||
known_hosts:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- "{{ vm_name }}"
|
||||
- "{{ ansible_host }}"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create VM disk
|
||||
ovirt_disk:
|
||||
name: "{{ vm_name }}_Disk0"
|
||||
description: '{{ vm_name }} Primary Disk'
|
||||
interface: 'virtio_scsi'
|
||||
size: '{{ disk }}GiB'
|
||||
state: attached
|
||||
sparse: yes
|
||||
wait: true
|
||||
storage_domain: "ssdvdo0"
|
||||
async: 300
|
||||
poll: 15
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create Satellite VM in RHV
|
||||
ovirt_vm:
|
||||
name: "{{ vm_name }}"
|
||||
state: present
|
||||
memory: "{{ memory}}GiB"
|
||||
disks:
|
||||
- name: "{{ vm_name }}_Disk0"
|
||||
activate: yes
|
||||
bootable: yes
|
||||
cpu_cores: "{{ vcpus }}"
|
||||
cluster: "{{ cluster }}"
|
||||
operating_system: "rhel_7x64"
|
||||
type: server
|
||||
graphical_console:
|
||||
protocol:
|
||||
- vnc
|
||||
boot_devices:
|
||||
- hd
|
||||
async: 300
|
||||
poll: 15
|
||||
register: vm_result
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Assign NIC
|
||||
ovirt_nic:
|
||||
interface: virtio
|
||||
name: nic1
|
||||
profile: ovirtmgmt
|
||||
network: ovirtmgmt
|
||||
state: plugged
|
||||
vm: "{{ vm_name }}"
|
||||
register: nic_result
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create directory for initial boot files
|
||||
tempfile:
|
||||
state: directory
|
||||
register: kstmpdir
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Extract ISO files
|
||||
community.general.iso_extract:
|
||||
image: "{{ rhel_iso_path }}/{{ rhel_iso_filename }}"
|
||||
dest: "{{ kstmpdir.path }}"
|
||||
files:
|
||||
- isolinux/vmlinuz
|
||||
- isolinux/initrd.img
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Generate Kickstart File
|
||||
template:
|
||||
src: templates/ks.cfg
|
||||
dest: "/var/www/ks/{{ vm_name }}.cfg"
|
||||
become: yes
|
||||
delegate_to: webserver.mgmt.toal.ca
|
||||
|
||||
- name: Temporary Directory
|
||||
file:
|
||||
path: "/tmp/{{ vm_name }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
delegate_to: "{{ vm_host }}"
|
||||
|
||||
- name: Transfer files to Hypervisor
|
||||
copy:
|
||||
src: "{{ hostvars[vm_name].kstmpdir.path }}/{{ item }}"
|
||||
dest: "/tmp/{{ vm_name }}/{{ item }}"
|
||||
loop:
|
||||
- vmlinuz
|
||||
- initrd.img
|
||||
delegate_to: "{{ vm_host }}"
|
||||
|
||||
# NOTE: This is not idempotent
|
||||
- name: First Boot
|
||||
hosts: "{{ vm_name }}:!provisioned"
|
||||
gather_facts: no
|
||||
|
||||
tasks:
|
||||
- block:
|
||||
- name: Start VM with first-boot parameters
|
||||
ovirt_vm:
|
||||
name: "{{ vm_name }}"
|
||||
host: "{{ vm_host }}"
|
||||
kernel_params_persist: false
|
||||
cd_iso: "{{ iso_disk.id }}"
|
||||
kernel_path: "/tmp/{{ vm_name }}/vmlinuz"
|
||||
kernel_params: "ks=http://192.168.1.199/ks/{{ vm_name }}.cfg inst.stage2=hd:LABEL=RHEL-7.9\\x20Server.x86_64"
|
||||
initrd_path: "/tmp/{{ vm_name }}/initrd.img"
|
||||
state: running
|
||||
delegate_to: localhost
|
||||
|
||||
|
||||
- name: Wait for system to shut down after installation
|
||||
ovirt_vm_info:
|
||||
pattern: "name={{ vm_name }}"
|
||||
register: vm_info
|
||||
until: vm_info['ovirt_vms'][0]['status'] == "down"
|
||||
delay: 20
|
||||
retries: 60
|
||||
delegate_to: localhost
|
||||
|
||||
when: hostvars[vm_name].vm_result.vm.status != 'up'
|
||||
|
||||
- name: Power up VM
|
||||
ovirt_vm:
|
||||
name: "{{ vm_name }}"
|
||||
state: running
|
||||
delegate_to: localhost
|
||||
|
||||
- name: VM is running
|
||||
ovirt_vm:
|
||||
name: "{{ vm_name }}"
|
||||
state: running
|
||||
boot_devices:
|
||||
- hd
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Set provisioned tag
|
||||
ovirt_tag:
|
||||
name: provisioned
|
||||
vms:
|
||||
- "{{ vm_name }}"
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: OS Preparation
|
||||
hosts: "{{ vm_name }}"
|
||||
gather_facts: no
|
||||
|
||||
tasks:
|
||||
- name: Set authentication for bootstrap
|
||||
no_log: True
|
||||
set_fact:
|
||||
ansible_ssh_user: "root"
|
||||
ansible_ssh_pass: "{{ initial_root_pass }}"
|
||||
|
||||
- name: Wait for SSH to be ready
|
||||
wait_for_connection:
|
||||
timeout: 1800
|
||||
sleep: 5
|
||||
|
||||
- name: Register System to Red Hat
|
||||
redhat_subscription:
|
||||
state: present
|
||||
username: "{{ rhn_username }}"
|
||||
password: "{{ rhn_password }}"
|
||||
# TODO This shouldn't be hard-coded
|
||||
pool_ids: 8a85f99c727637ad0172e1ba2856736d
|
||||
|
||||
- name: Firewall
|
||||
firewalld:
|
||||
port: "{{ item }}"
|
||||
state: enabled
|
||||
permanent: yes
|
||||
loop:
|
||||
- "80/tcp"
|
||||
- "81/tcp"
|
||||
- "443/tcp"
|
||||
- "5647/tcp"
|
||||
- "8000/tcp"
|
||||
- "8140/tcp"
|
||||
- "9090/tcp"
|
||||
- "53/udp"
|
||||
- "53/tcp"
|
||||
- "67/udp"
|
||||
- "69/udp"
|
||||
- "5000/tcp"
|
||||
notify: Reload Firewall
|
||||
|
||||
handlers:
|
||||
- name: Reload Firewall
|
||||
service:
|
||||
name: firewalld
|
||||
state: reloaded
|
||||
|
||||
- name: Set up IPA Client
|
||||
hosts: "{{ vm_name }}"
|
||||
become: yes
|
||||
vars:
|
||||
ipaclient_realm: IDM.TOAL.CA
|
||||
ipaclient_mkhomedir: true
|
||||
ipaclient_domain: "mgmt.toal.ca"
|
||||
ipaclient_ssh_trust_dns: yes
|
||||
ipaclient_all_ip_addresses: yes
|
||||
|
||||
collections:
|
||||
- freeipa.ansible_freeipa
|
||||
pre_tasks:
|
||||
- name: Hostname is set
|
||||
hostname:
|
||||
name: "{{ vm_name }}"
|
||||
roles:
|
||||
- role: ipaclient
|
||||
state: present
|
||||
|
||||
#TODO Automatically set up DNS GSSAPI per: https://access.redhat.com/documentation/en-us/red_hat_satellite/6.8/html/installing_satellite_server_from_a_connected_network/configuring-external-services#configuring-external-idm-dns_satellite
|
||||
|
||||
- name: Set up Basic Lab Packages
|
||||
hosts: "{{ vm_name }}"
|
||||
become: yes
|
||||
roles:
|
||||
- role: toal-common
|
||||
|
||||
- name: Install Satellite Servers
|
||||
hosts: "{{ vm_name }}"
|
||||
become: true
|
||||
|
||||
roles:
|
||||
- role: jjaswanson4.install_satellite.install_satellite
|
||||
|
||||
- name: Configure Satellite Servers
|
||||
hosts: "{{ vm_name }}"
|
||||
collections:
|
||||
- jjaswanson4.configure_satellite
|
||||
|
||||
tasks:
|
||||
- name: include configure_foreman role with katello independent pieces
|
||||
include_role:
|
||||
name: configure_satellite_foreman
|
||||
- name: build satellite by organization
|
||||
include_role:
|
||||
name: configure_satellite_katello
|
||||
loop_control:
|
||||
loop_var: organization
|
||||
loop: "{{ satellite.katello }}"
|
||||
- name: do that again but for katello dependent pieces
|
||||
include_role:
|
||||
name: configure_satellite_foreman
|
||||
vars:
|
||||
requires_katello_content: true
|
||||
|
||||
# - name: Customize Satellite Installation
|
||||
# hosts: "{{ vm_name }}"
|
||||
|
||||
# collections:
|
||||
# - freeipa.ansible_freeipa
|
||||
|
||||
# tasks:
|
||||
# - name:
|
||||
64
playbooks/site.yml
Normal file
64
playbooks/site.yml
Normal file
@@ -0,0 +1,64 @@
|
||||
# Toal Lab Site Playbook
|
||||
- name: Common Lab Machine Setup
|
||||
hosts: platform_linux,platform_fedora_linux
|
||||
become: true
|
||||
roles:
|
||||
|
||||
- name: linux-system-roles.network
|
||||
when: network_connections is defined
|
||||
- name: toal-common
|
||||
|
||||
- name: Set Network OS from Netbox info.
|
||||
gather_facts: no
|
||||
hosts: switch01
|
||||
tasks:
|
||||
- name: Set network os type for Cisco
|
||||
set_fact: ansible_network_os="ios"
|
||||
when: "'Cisco IOS' in platforms"
|
||||
|
||||
- name: Configure infrastructure
|
||||
hosts: switch01
|
||||
become_method: enable
|
||||
connection: network_cli
|
||||
gather_facts: no
|
||||
|
||||
roles:
|
||||
- toallab.infrastructure
|
||||
|
||||
- name: DHCP Server
|
||||
hosts: service_dhcp
|
||||
become: yes
|
||||
|
||||
pre_tasks:
|
||||
# - name: Gather interfaces for dhcp service
|
||||
# set_fact:
|
||||
# dhcp_interfaces:
|
||||
# - name: Gather subnets
|
||||
# set_fact:
|
||||
# subnets: ""
|
||||
# prefixes: "{{ query('netbox.netbox.nb_lookup', 'prefixes', api_endpoint=netbox_api, token=netbox_token) }}"
|
||||
|
||||
# - ip: 192.168.222.0
|
||||
# netmask: 255.255.255.128
|
||||
# domain_name_servers:
|
||||
# - 10.0.2.3
|
||||
# - 10.0.2.4
|
||||
# range_begin: 192.168.222.50
|
||||
# range_end: 192.168.222.127
|
||||
# - ip: 192.168.222.128
|
||||
# default_lease_time: 3600
|
||||
# max_lease_time: 7200
|
||||
# netmask: 255.255.255.128
|
||||
# domain_name_servers: 10.0.2.3
|
||||
# routers: 192.168.222.129
|
||||
roles:
|
||||
- name: sage905.netbox-to-dhcp
|
||||
|
||||
- name: Include Minecraft tasks
|
||||
import_playbook: minecraft.yml
|
||||
|
||||
# - name: Include Gitea tasks
|
||||
# import_playbook: gitea.yml
|
||||
|
||||
# - name: Include Pod Host
|
||||
# include: podhost.yml
|
||||
23
playbooks/switch_config.yml
Normal file
23
playbooks/switch_config.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: Collect Netbox Data
|
||||
hosts: role_core-switch
|
||||
gather_facts: false
|
||||
collections:
|
||||
- netbox.netbox
|
||||
vars:
|
||||
api_endpoint: "{{ lookup('env','NETBOX_API') }}"
|
||||
api_token: "{{ lookup('env', 'NETBOX_TOKEN') }}"
|
||||
|
||||
tasks:
|
||||
# - name: Gather Device Information
|
||||
# set_fact:
|
||||
# device_info: "{{ query('netbox.netbox.nb_lookup', 'devices', api_filter='name=' + inventory_hostname, api_endpoint=api_endpoint, token=api_token )[0] }}"
|
||||
- debug:
|
||||
var: interfaces
|
||||
|
||||
# - name: Obtain list of devices from Netbox
|
||||
# debug:
|
||||
# var: >
|
||||
# "Device {{ item.value.display_name }} (ID: {{ item.key }}) was
|
||||
# manufactured by {{ item.value.device_type.manufacturer.name }}"
|
||||
# loop: "{{ query('netbox.netbox.nb_lookup', 'devices', api_endpoint=api_endpoint, token=api_token ) }}"
|
||||
45
playbooks/t.yml
Normal file
45
playbooks/t.yml
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
- name: DHCP Server
|
||||
hosts: service_dhcp
|
||||
connection: local
|
||||
|
||||
pre_tasks:
|
||||
# - name: Gather interfaces for dhcp service
|
||||
# set_fact:
|
||||
# dhcp_interfaces:
|
||||
# - name: Gather subnets
|
||||
# set_fact:
|
||||
# subnets: ""
|
||||
# prefixes: "{{ query('netbox.netbox.nb_lookup', 'prefixes', api_endpoint=netbox_api, token=netbox_token) }}"
|
||||
|
||||
# - ip: 192.168.222.0
|
||||
# netmask: 255.255.255.128
|
||||
# domain_name_servers:
|
||||
# - 10.0.2.3
|
||||
# - 10.0.2.4
|
||||
# range_begin: 192.168.222.50
|
||||
# range_end: 192.168.222.127
|
||||
# - ip: 192.168.222.128
|
||||
# default_lease_time: 3600
|
||||
# max_lease_time: 7200
|
||||
# netmask: 255.255.255.128
|
||||
# domain_name_servers: 10.0.2.3
|
||||
# routers: 192.168.222.129
|
||||
roles:
|
||||
- name: sage905.netbox-to-dhcp
|
||||
|
||||
# tasks:
|
||||
# - debug:
|
||||
# var: services
|
||||
|
||||
# # - name: Collect Prefix information for interfaces in DHCP Service
|
||||
|
||||
# - name: Collect host interfaces
|
||||
# set_fact:
|
||||
# dhcp_prefixes: "{{ query('netbox.netbox.nb_lookup', 'prefixes', api_filter='', api_endpoint=netbox_api, token=netbox_token) }}"
|
||||
|
||||
# - name: Test
|
||||
# debug: var=dhcp_service
|
||||
{% for interface in (services|selectattr('name','eq','dhcp')|first).ipaddresses %}
|
||||
{% set prefix=(query('netbox.netbox.nb_lookup', 'prefixes', api_filter='q=' + interface.address, api_endpoint=netbox_api, token=netbox_token)|first).value %}
|
||||
subnet {{ prefix.prefix|ipaddr('net') }} netmask {{ prefix.prefix|ipaddr('netmask') }} {
|
||||
15
playbooks/test.yml
Normal file
15
playbooks/test.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Create 1Password Secret
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- onepassword.connect.generic_item:
|
||||
vault_id: "e63n3krpqx7qpohuvlyqpn6m34"
|
||||
title: Lab Secrets Test
|
||||
state: created
|
||||
fields:
|
||||
- label: Codeword
|
||||
value: "hunter2"
|
||||
section: "Personal Info"
|
||||
field_type: concealed
|
||||
# no_log: true
|
||||
register: op_item
|
||||
16
playbooks/windows_ad.yml
Normal file
16
playbooks/windows_ad.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
- name: Create Windows AD Server
|
||||
hosts: WinAD
|
||||
gather_facts: False
|
||||
connection: local
|
||||
become: no
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
|
||||
roles:
|
||||
- oatakan.ansible-role-ovirt
|
||||
|
||||
- name: Configure AD Controller
|
||||
hosts: WinAD
|
||||
become: yes
|
||||
- oatakan.ansible-role-windows-ad-controller
|
||||
Reference in New Issue
Block a user