Add certificate generation

This commit is contained in:
Patrick Toal
2019-08-31 19:22:32 -04:00
parent 1a207029eb
commit fa2d28367a
37 changed files with 2315 additions and 2 deletions

1
.gitignore vendored
View File

@@ -112,3 +112,4 @@ roles/toallab.infrastructure/backup/
roles/lightbulb-ansiblered-deck/
.vscode/
keys/

718
library/dnsmadeeasy.py Normal file
View File

@@ -0,0 +1,718 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dnsmadeeasy
version_added: "1.3"
short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
description:
- >
Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
options:
account_key:
description:
- Account API Key.
required: true
account_secret:
description:
- Account Secret Key.
required: true
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
resolution
required: true
sandbox:
description:
- Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used.
type: bool
default: 'no'
version_added: 2.7
record_name:
description:
- Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
of the state argument.
record_type:
description:
- Record type.
choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
record_value:
description:
- >
Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>,
SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
- >
If record_value is not specified; no changes will be made and the record will be returned in 'result'
(in other words, this module can be used to fetch a record's current id, type, and ttl)
record_ttl:
description:
- record's "Time to live". Number of seconds the record remains cached in DNS servers.
default: 1800
state:
description:
- whether the record should exist or not
required: true
choices: [ 'present', 'absent' ]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
version_added: 1.5.1
monitor:
description:
- If C(yes), add or change the monitor. This is applicable only for A records.
type: bool
default: 'no'
version_added: 2.4
systemDescription:
description:
- Description used by the monitor.
required: true
default: ''
version_added: 2.4
maxEmails:
description:
- Number of emails sent to the contact list by the monitor.
required: true
default: 1
version_added: 2.4
protocol:
description:
- Protocol used by the monitor.
required: true
default: 'HTTP'
choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']
version_added: 2.4
port:
description:
- Port used by the monitor.
required: true
default: 80
version_added: 2.4
sensitivity:
description:
- Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3.
required: true
default: 'Medium'
choices: ['Low', 'Medium', 'High']
version_added: 2.4
contactList:
description:
- Name or id of the contact list that the monitor will notify.
- The default C('') means the Account Owner.
required: true
default: ''
version_added: 2.4
httpFqdn:
description:
- The fully qualified domain name used by the monitor.
version_added: 2.4
httpFile:
description:
- The file at the Fqdn that the monitor queries for HTTP or HTTPS.
version_added: 2.4
httpQueryString:
description:
- The string in the httpFile that the monitor queries for HTTP or HTTPS.
version_added: 2.4
failover:
description:
- If C(yes), add or change the failover. This is applicable only for A records.
type: bool
default: 'no'
version_added: 2.4
autoFailover:
description:
- If true, fallback to the primary IP address is manual after a failover.
- If false, fallback to the primary IP address is automatic after a failover.
type: bool
default: 'no'
version_added: 2.4
ip1:
description:
- Primary IP address for the failover.
- Required if adding or changing the monitor or failover.
version_added: 2.4
ip2:
description:
- Secondary IP address for the failover.
- Required if adding or changing the failover.
version_added: 2.4
ip3:
description:
- Tertiary IP address for the failover.
version_added: 2.4
ip4:
description:
- Quaternary IP address for the failover.
version_added: 2.4
ip5:
description:
- Quinary IP address for the failover.
version_added: 2.4
notes:
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
seconds of actual time by using NTP.
- This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
These values can be be registered and used in your playbooks.
- Only A records can have a monitor or failover.
- To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
- To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
- The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
requirements: [ hashlib, hmac ]
author: "Brice Burgess (@briceburg)"
'''
EXAMPLES = '''
# fetch my.com domain records
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
register: response
# create / ensure the presence of a record
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
# update the previously created record
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_value: 192.0.2.23
# fetch a specific record
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
register: response
# delete a record / ensure it is absent
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
record_type: A
state: absent
record_name: test
# Add a failover
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: True
ip1: 127.0.0.2
ip2: 127.0.0.3
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: True
ip1: 127.0.0.2
ip2: 127.0.0.3
ip3: 127.0.0.4
ip4: 127.0.0.5
ip5: 127.0.0.6
# Add a monitor
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: yes
ip1: 127.0.0.2
protocol: HTTP # default
port: 80 # default
maxEmails: 1
systemDescription: Monitor Test A record
contactList: my contact list
# Add a monitor with http options
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: yes
ip1: 127.0.0.2
protocol: HTTP # default
port: 80 # default
maxEmails: 1
systemDescription: Monitor Test A record
contactList: 1174 # contact list id
httpFqdn: http://my.com
httpFile: example
httpQueryString: some string
# Add a monitor and a failover
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: True
ip1: 127.0.0.2
ip2: 127.0.0.3
monitor: yes
protocol: HTTPS
port: 443
maxEmails: 1
systemDescription: monitoring my.com status
contactList: emergencycontacts
# Remove a failover
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: no
# Remove a monitor
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: no
'''
# ============================================
# DNSMadeEasy module specific support methods.
#
import json
import hashlib
import hmac
from time import strftime, gmtime
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.six import string_types
class DME2(object):
def __init__(self, apikey, secret, domain, sandbox, module):
self.module = module
self.api = apikey
self.secret = secret
if sandbox:
self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/'
self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl)
else:
self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
self.domain = str(domain)
self.domain_map = None # ["domain_name"] => ID
self.record_map = None # ["record_name"] => ID
self.records = None # ["record_ID"] => <record>
self.all_records = None
self.contactList_map = None # ["contactList_name"] => ID
# Lookup the domain ID if passed as a domain name vs. ID
if not self.domain.isdigit():
self.domain = self.getDomainByName(self.domain)['id']
self.record_url = 'dns/managed/' + str(self.domain) + '/records'
self.monitor_url = 'monitor'
self.contactList_url = 'contactList'
def _headers(self):
currTime = self._get_date()
hashstring = self._create_hash(currTime)
headers = {'x-dnsme-apiKey': self.api,
'x-dnsme-hmac': hashstring,
'x-dnsme-requestDate': currTime,
'content-type': 'application/json'}
return headers
def _get_date(self):
return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
def _create_hash(self, rightnow):
return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
def query(self, resource, method, data=None):
url = self.baseurl + resource
if data and not isinstance(data, string_types):
data = urlencode(data)
response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
if info['status'] not in (200, 201, 204):
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return json.load(response)
except Exception:
return {}
def getDomain(self, domain_id):
if not self.domain_map:
self._instMap('domain')
return self.domains.get(domain_id, False)
def getDomainByName(self, domain_name):
if not self.domain_map:
self._instMap('domain')
return self.getDomain(self.domain_map.get(domain_name, 0))
def getDomains(self):
return self.query('dns/managed', 'GET')['data']
def getRecord(self, record_id):
if not self.record_map:
self._instMap('record')
return self.records.get(record_id, False)
# Try to find a single record matching this one.
# How we do this depends on the type of record. For instance, there
# can be several MX records for a single record_name while there can
# only be a single CNAME for a particular record_name. Note also that
# there can be several records with different types for a single name.
def getMatchingRecord(self, record_name, record_type, record_value):
# Get all the records if not already cached
if not self.all_records:
self.all_records = self.getRecords()
if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]:
for result in self.all_records:
if result['name'] == record_name and result['type'] == record_type:
return result
return False
elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]:
for result in self.all_records:
if record_type == "MX":
value = record_value.split(" ")[1]
elif record_type == "SRV":
value = record_value.split(" ")[3]
else:
value = record_value
if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
return result
return False
else:
raise Exception('record_type not yet supported')
def getRecords(self):
return self.query(self.record_url, 'GET')['data']
def _instMap(self, type):
# @TODO cache this call so it's executed only once per ansible execution
map = {}
results = {}
# iterate over e.g. self.getDomains() || self.getRecords()
for result in getattr(self, 'get' + type.title() + 's')():
map[result['name']] = result['id']
results[result['id']] = result
# e.g. self.domain_map || self.record_map
setattr(self, type + '_map', map)
setattr(self, type + 's', results) # e.g. self.domains || self.records
def prepareRecord(self, data):
return json.dumps(data, separators=(',', ':'))
def createRecord(self, data):
# @TODO update the cache w/ resultant record + id when impleneted
return self.query(self.record_url, 'POST', data)
def updateRecord(self, record_id, data):
# @TODO update the cache w/ resultant record + id when impleneted
return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
def deleteRecord(self, record_id):
# @TODO remove record from the cache when impleneted
return self.query(self.record_url + '/' + str(record_id), 'DELETE')
def getMonitor(self, record_id):
return self.query(self.monitor_url + '/' + str(record_id), 'GET')
def updateMonitor(self, record_id, data):
return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data)
def prepareMonitor(self, data):
return json.dumps(data, separators=(',', ':'))
def getContactList(self, contact_list_id):
if not self.contactList_map:
self._instMap('contactList')
return self.contactLists.get(contact_list_id, False)
def getContactlists(self):
return self.query(self.contactList_url, 'GET')['data']
def getContactListByName(self, name):
if not self.contactList_map:
self._instMap('contactList')
return self.getContactList(self.contactList_map.get(name, 0))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_key=dict(required=True),
account_secret=dict(required=True, no_log=True),
domain=dict(required=True),
sandbox=dict(default='no', type='bool'),
state=dict(required=True, choices=['present', 'absent']),
record_name=dict(required=False),
record_type=dict(required=False, choices=[
'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
record_value=dict(required=False),
record_ttl=dict(required=False, default=1800, type='int'),
monitor=dict(default='no', type='bool'),
systemDescription=dict(default=''),
maxEmails=dict(default=1, type='int'),
protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
port=dict(default=80, type='int'),
sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
contactList=dict(default=None),
httpFqdn=dict(required=False),
httpFile=dict(required=False),
httpQueryString=dict(required=False),
failover=dict(default='no', type='bool'),
autoFailover=dict(default='no', type='bool'),
ip1=dict(required=False),
ip2=dict(required=False),
ip3=dict(required=False),
ip4=dict(required=False),
ip5=dict(required=False),
validate_certs=dict(default='yes', type='bool'),
),
required_together=[
['record_value', 'record_ttl', 'record_type']
],
required_if=[
['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']],
['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']]
]
)
protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6)
sensitivities = dict(Low=8, Medium=5, High=3)
DME = DME2(module.params["account_key"], module.params[
"account_secret"], module.params["domain"], module.params["sandbox"], module)
state = module.params["state"]
record_name = module.params["record_name"]
record_type = module.params["record_type"]
record_value = module.params["record_value"]
# Follow Keyword Controlled Behavior
if record_name is None:
domain_records = DME.getRecords()
if not domain_records:
module.fail_json(
msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
module.exit_json(changed=False, result=domain_records)
# Fetch existing record + Build new one
current_record = DME.getMatchingRecord(record_name, record_type, record_value)
new_record = {'name': record_name}
for i in ["record_value", "record_type", "record_ttl"]:
if not module.params[i] is None:
new_record[i[len("record_"):]] = module.params[i]
# Special handling for mx record
if new_record["type"] == "MX":
new_record["mxLevel"] = new_record["value"].split(" ")[0]
new_record["value"] = new_record["value"].split(" ")[1]
# Special handling for SRV records
if new_record["type"] == "SRV":
new_record["priority"] = new_record["value"].split(" ")[0]
new_record["weight"] = new_record["value"].split(" ")[1]
new_record["port"] = new_record["value"].split(" ")[2]
new_record["value"] = new_record["value"].split(" ")[3]
# Fetch existing monitor if the A record indicates it should exist and build the new monitor
current_monitor = dict()
new_monitor = dict()
if current_record and current_record['type'] == 'A':
current_monitor = DME.getMonitor(current_record['id'])
# Build the new monitor
for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails',
'contactList', 'httpFqdn', 'httpFile', 'httpQueryString',
'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']:
if module.params[i] is not None:
if i == 'protocol':
# The API requires protocol to be a numeric in the range 1-6
new_monitor['protocolId'] = protocols[module.params[i]]
elif i == 'sensitivity':
# The API requires sensitivity to be a numeric of 8, 5, or 3
new_monitor[i] = sensitivities[module.params[i]]
elif i == 'contactList':
# The module accepts either the name or the id of the contact list
contact_list_id = module.params[i]
if not contact_list_id.isdigit() and contact_list_id != '':
contact_list = DME.getContactListByName(contact_list_id)
if not contact_list:
module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id))
contact_list_id = contact_list.get('id', '')
new_monitor['contactListId'] = contact_list_id
else:
# The module option names match the API field names
new_monitor[i] = module.params[i]
# Compare new record against existing one
record_changed = False
if current_record:
for i in new_record:
if str(current_record[i]) != str(new_record[i]):
record_changed = True
new_record['id'] = str(current_record['id'])
monitor_changed = False
if current_monitor:
for i in new_monitor:
if str(current_monitor.get(i)) != str(new_monitor[i]):
monitor_changed = True
# Follow Keyword Controlled Behavior
if state == 'present':
# return the record if no value is specified
if "value" not in new_record:
if not current_record:
module.fail_json(
msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
# create record and monitor as the record does not exist
if not current_record:
record = DME.createRecord(DME.prepareRecord(new_record))
if module.params['monitor']:
monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor))
module.exit_json(changed=True, result=dict(record=record, monitor=monitor))
else:
module.exit_json(changed=True, result=dict(record=record))
# update the record
updated = False
if record_changed:
DME.updateRecord(current_record['id'], DME.prepareRecord(new_record))
updated = True
if monitor_changed:
DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor))
updated = True
if updated:
module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor))
# return the record (no changes)
module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
elif state == 'absent':
changed = False
# delete the record (and the monitor/failover) if it exists
if current_record:
DME.deleteRecord(current_record['id'])
module.exit_json(changed=True)
# record does not exist, return w/o change.
module.exit_json(changed=changed)
else:
module.fail_json(
msg="'%s' is an unknown value for the state argument" % state)
if __name__ == '__main__':
main()

View File

@@ -1,6 +1,41 @@
---
# Probably want to split this out into a proper certificate management role for Toal.ca
- name: Request TLS Certificate from LetsEncrypt
hosts: rhv.mgmt.toal.ca
connection: local
gather_facts: false
# This doesn't belong here
vars:
acme_email: ptoal@takeflight.ca
challenge: dns-01
dns_provider: dme
domains:
- rhv.mgmt.toal.ca
pre_tasks:
- name: Ensure Let's Encrypt Account Exists
acme_account:
state: present
terms_agreed: true
allow_creation: true
contact:
- mailto:ptoal@takeflight.ca
account_key_content: "{{ acme_key }}"
acme_version: 2
roles:
- acme-certificate
# - name: Install custom CA Certificate in RHV-M
# hosts: rhv.lab.toal.ca
# tasks:
# - name: Download LetsEncrypt Root/Intermediate Certificates
- name: Create RHV/ovirt VLANs
hosts: rhv.lab.toal.ca
hosts: rhv.mgmt.toal.ca
connection: local
vars:
# Hack to work around virtualenv python interpreter
@@ -11,7 +46,7 @@
fetch_nested: true
data_center: "{{ item.data_center }}"
name: "{{ item.name }}"
vlan_tag: "{{ item.vlan_tag }}"
vlan_tag: "{{ item.vlan_tag|default(omit) }}"
vm_network: "{{ item.vm_network }}"
mtu: "{{ item.mtu }}"
description: "{{ item.description }}"

1
roles/acme-certificate Symbolic link
View File

@@ -0,0 +1 @@
/Users/ptoal/Dev/acme-certificate/

6
roles/ovirt.manageiq/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
*retry
.tox
*.tar.gz
output/
ovirt-ansible-manageiq.spec
exported-artifacts/

View File

@@ -0,0 +1,31 @@
---
sudo: required
language: python
python:
- "2.7"
services:
- docker
env:
global:
- ANSIBLE_HOST_KEY_CHECKING="False"
# Install python-pip
addons:
apt:
packages:
- python-pip
install:
- pip install tox ansible docker-py yamllint
# Check ansible version
- ansible --version
script:
# Run sytax checks and linters
- tox
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,264 @@
Deploy ManageIQ in oVirt
==================================================
The `ovirt.manageiq` role downloads a ManageIQ/CloudForms QCOW image and deploys it into oVirt/Red Hat Virtualization (RHV).
The role also enables you to create a virtual machine and attach the ManageIQ disk, then wait for the ManageIQ system to initialize, and register oVirt as an infrastructure provider.
Note
----
Please note that when installing this role from Ansible Galaxy you are instructed to run following command:
```bash
$ ansible-galaxy install ovirt.manageiq
```
This will download the role to the directory with the same name as you specified on the
command line, in this case `ovirt.manageiq`. But note that it is case sensitive, so if you specify
for example `OVIRT.manageiq` it will download the same role, but it will add it to the directory named
`OVIRT.manageiq`, so you later always have to use this role with upper case prefix. So be careful how
you specify the name of the role on command line.
For the RPM installation we install three legacy names `oVirt.manageiq`, `ovirt.manageiq` and `ovirt-manageiq`.
So you can use any of these names. This documentation and examples in this repository are using name `ovirt.manageiq`.
`oVirt.manageiq` and `ovirt-manageiq` role names are deprecated.
Requirements
------------
* oVirt has to be 4.0.4 or higher.
* Ansible has to be 2.5 or higher.
* [ovirt-imageio](http://www.ovirt.org/develop/release-management/features/storage/image-upload/) must be installed and running.
* [oVirt Python SDK version 4](https://pypi.python.org/pypi/ovirt-engine-sdk-python/4.2.4).
Additionally, perform the following checks to ensure the required processes are running.
* Check whether `ovirt-imageio-proxy` is running on the engine:
```
systemctl status ovirt-imageio-proxy
```
* Check whether `ovirt-imageio-daemon` is running on the hosts:
```
systemctl status ovirt-imageio-daemon
```
You will also require the CA certificate of the engine. To do this, configure the `ovirt_ca` variable with the path to the CA certificate.
Limitations
-----------
* We don not support Ansible Check Mode (Dry Run), because this role is using few modules(command module),
which do not support it. Once all modules used by this role will support it, we will support it.
Role Variables
--------------
QCOW variables:
| Name | Default value | Description |
|---------------|----------------------------------------------------------|--------------------------------------------------------------|
| miq_qcow_url | http://releases.manageiq.org/manageiq-ovirt-gaprindashvili-3.qc2 | The URL of the ManageIQ QCOW image. |
| miq_image_path | /tmp/ | Path where the QCOW2 image will be downloaded to. If directory the base name of the URL on the remote server will be used. |
| miq_image_checksum | UNDEF | If a checksum is defined, the digest of the destination file will be calculated after it is downloaded to ensure its integrity and verify that the transfer completed successfully. Format: :, e.g. checksum="sha256:D98291AC[...]B6DC7B97". |
Engine login variables:
| Name | Default value | Description |
|---------------------|-------------------|-----------------------------------------|
| engine_user | UNDEF | The user to access the engine. |
| engine_password | UNDEF | The password of the 'engine_user'. |
| engine_fqdn | UNDEF | The FQDN of the engine. |
| engine_ca | UNDEF | The path to the engine's CA certificate.|
Virtual machine variables:
| Name | Default value | Description |
|-----------------------|---------------------|----------------------------------------------------------------|
| miq_vm_name | manageiq_gaprindashvili-3 | The name of the ManageIQ virtual machine. |
| miq_vm_cluster | Default | The cluster of the virtual machine. |
| miq_vm_memory | 16GiB | The virtual machine's system memory. |
| miq_vm_memory_guaranteed | UNDEF | Amount of minimal guaranteed memory of the Virtual Machine. miq_vm_memory_guaranteed parameter can't be lower than miq_vm_memory parameter. |
| miq_vm_memory_max | UNDEF | Upper bound of virtual machine memory up to which memory hot-plug can be performed. |
| miq_vm_cpu | 4 | The number of virtual machine CPU cores. |
| miq_vm_cpu_shares | UNDEF | Set a CPU shares for this Virtual Machine. |
| miq_vm_cpu_sockets | UNDEF | Number of virtual CPUs sockets of the Virtual Machine. |
| miq_vm_cpu_threads | UNDEF | Number of virtual CPUs threads of the Virtual Machine. |
| miq_vm_os | rhel_7x64 | The virtual machine operating system. |
| miq_vm_root_password | `miq_app_password` | The root password for the virtual machine. |
| miq_vm_cloud_init | UNDEF | The cloud init dictionary to be passed to the virtual machine. |
| miq_vm_high_availability | true | If yes ManageIQ virtual machine will be set as highly available. |
| miq_vm_high_availability_priority | 50 | Indicates the priority of the virtual machine inside the run and migration queues. The value is an integer between 0 and 100. The higher the value, the higher the priority. |
| miq_vm_delete_protected | true | If yes ManageIQ virtual machine will be set as delete protected. |
| miq_debug_create | false | If true log sensitive data, useful for debug purposes. |
| miq_wait_for_ip_version | v4 | Specify which IP version should be wait for. Either v4 or v6. |
Virtual machine main disks variables (e.g. operating system):
| Name | Default value | Description |
|---------------------|----------------------|-----------------------------------------|
| miq_vm_disk_name | `miq_vm_name` | The name of the virtual machine disk. |
| miq_vm_disk_storage | UNDEF | The target storage domain of the disk. |
| miq_vm_disk_size | Size of qcow disk | The virtual machine disk size. |
| miq_vm_disk_interface | virtio_scsi | The virtual machine disk interface type.|
| miq_vm_disk_format | cow | The format of the virtual machine disk. |
Virtual machine extra disks (e.g. database, log, tmp): a dict named
`miq_vm_disks` allows to describe each of the extra disks (see example
playbook). Note, that this works only with CFME.
For each disk, the following attributes can be set:
| Name | Default value | Description |
|-----------|---------------|----------------------------------------------------------------------|
| name | `miq_vm_name`_`type` | The name of the virtual machine disk. |
| size | UNDEF | The virtual machine disk size (`XXGiB`). |
| interface | virtio_scsi | The virtual machine disk interface type (`virtio` or `virtio_scsi`). `virtio_scsi` is recommended, as `virtio` has low limit of count of disks. |
| format | UNDEF | The format of the virtual machine disk (`raw` or `cow`). |
| timeout | UNDEF | Timeout of disk creation. |
Virtual machine NICs variables:
| Name | Default value | Description |
|---------------------|-------------------|------------------------------------------------------|
| miq_vm_nics | {'name': 'nic1', 'profile_name': 'ovirtmgmt', 'interaface': 'virtio'} | List of dictionaries that defines the virtual machine network interfaces. |
The item in `miq_vm_nics` list of can contain following attributes:
| Name | Default value | |
|--------------------|----------------|----------------------------------------------|
| name | UNDEF | The name of the network interface. |
| interface | UNDEF | Type of the network interface. |
| mac_address | UNDEF | Custom MAC address of the network interface, by default it's obtained from MAC pool. |
| network | UNDEF | Logical network which the VM network interface should use. If network is not specified, then Empty network is used. |
| profile | UNDEF | Virtual network interface profile to be attached to VM network interface. |
ManageIQ variables:
| Name | Default value | Description |
|--------------------|---------------------|----------------------------------------------------------------------------|
| miq_app_username | admin | The username used to login to ManageIQ. |
| miq_app_password | smartvm | The password of user specific in username used to login to ManageIQ. |
| miq_username | admin | Alias of `miq_app_username` for backward compatibility. |
| miq_password | smartvm | Alias of `miq_app_password` for backward compatibility. |
| miq_db_username | root | The username to connect to the database. |
| miq_db_password | `miq_app_password` | The password of user specific in username used to connect to the database. |
| miq_region | 0 | The ManageIQ region created in the database. Note: Works only with CFME. |
| miq_company | My Company | The company name of the appliance. |
| miq_disabled_roles | [] | List of ManageIQ server roles to disable on the appliance. |
| miq_enabled_roles | [] | List of ManageIQ server roles to enable on the appliance. |
Both on ManageIQ and CloudForms, the default enabled server roles are:
- `automate` - Automation Engine
- `database_operations` - Database Operations
- `event` - Event Monitor
- `ems_inventory` - Provider Inventory
- `ems_operations` - Provider Operations
- `reporting` - Reporting
- `scheduler` - Scheduler
- `smartstate` - SmartState Analysis
- `user_interface` - User Interface
- `websocket` - Websocket
- `web_services` - Web Services
RHV provider and RHV metrics variables:
| Name | Default value | Description |
|-----------------------|----------------|--------------------------------------------------------|
| miq_rhv_provider_name | RHV provider | Name of the RHV provider to be displayed in ManageIQ. |
| metrics_fqdn | UNDEF | FQDN of the oVirt/RHV metrics. |
| metrics_user | engine_history | The user to connection to metrics server. |
| metrics_password | "" | The password of the `metrics_user` . |
| metrics_port | 5432 | Port to connect to oVirt/RHV metrics. |
| metrics_db_name | ovirt_engine_history | Database name of the oVirt engine metrics database. |
Dependencies
------------
No.
Example Playbook
----------------
Note that for passwords you should use Ansible vault.
Here is an example how to deploy CFME:
```yaml
- name: Deploy CFME to oVirt engine
hosts: localhost
gather_facts: no
vars_files:
# Contains encrypted `engine_password` varibale using ansible-vault
- passwords.yml
vars:
engine_fqdn: ovirt-engine.example.com
engine_user: admin@internal
miq_qcow_url: https://cdn.example.com/cfme-rhevm-5.9.1.2-1.x86_64.qcow2
miq_vm_name: cfme_59
miq_vm_cluster: mycluster
miq_vm_cloud_init:
host_name: "{{ miq_vm_name }}"
miq_vm_disks:
database:
name: "{{ miq_vm_name }}_database"
size: 10GiB
interface: virtio_scsi
format: raw
log:
name: "{{ miq_vm_name }}_log"
size: 10GiB
interface: virtio_scsi
format: cow
tmp:
name: "{{ miq_vm_name }}_tmp"
size: 10GiB
interface: virtio_scsi
format: cow
miq_disabled_roles:
- smartstate
miq_enabled_roles:
- notifier
- ems_metrics_coordinator
- ems_metrics_collector
- ems_metrics_processor
- embedded_ansible
roles:
- ovirt.manageiq
```
Here is an example how to deploy ManageIQ:
```
---
- name: oVirt ManageIQ deployment
hosts: localhost
connection: local
gather_facts: false
vars_files:
# Contains encrypted `engine_password` and `metrics_password`
# varibale using ansible-vault
- passwords.yml
vars:
engine_fqdn: ovirt.example.com
engine_user: admin@internal
engine_cafile: /etc/pki/ovirt-engine/ca.pem
miq_qcow_url: http://releases.manageiq.org/manageiq-ovirt-gaprindashvili-5.qc2
miq_vm_name: manageiq_g5
miq_vm_cluster: mycluster
metrics_fqdn: metrics.example.com
metrics_port: 8443
metrics_user: admin
roles:
- ovirt.manageiq
`````````````````````````````````

View File

@@ -0,0 +1,6 @@
distros:
- fc29
- fc28
- el7
release_branches:
master: [ "ovirt-master", "ovirt-4.3", "ovirt-4.2" ]

View File

@@ -0,0 +1,8 @@
Continuous Integration Scripts
==============================
This directory contains scripts for Continuous Integration provided by
[oVirt Jenkins](http://jenkins.ovirt.org/)
system and follows the standard defined in
[Build and test standards](http://www.ovirt.org/CI/Build_and_test_standards)
wiki page.

View File

@@ -0,0 +1 @@
build-artifacts.req

View File

@@ -0,0 +1,29 @@
#!/bin/bash -xe
# remove any previous artifacts
rm -rf output
make clean
# Get the tarball
./build.sh dist
# create the src.rpm, assuming the tarball is in the project's directory
rpmbuild \
-D "_srcrpmdir $PWD/output" \
-D "_topmdir $PWD/rpmbuild" \
-ts ./*.gz
# install any build requirements
yum-builddep output/*src.rpm
# create the rpms
rpmbuild \
-D "_rpmdir $PWD/output" \
-D "_topmdir $PWD/rpmbuild" \
--rebuild output/*.src.rpm
# Store any relevant artifacts in exported-artifacts for the ci system to
# archive
[[ -d exported-artifacts ]] || mkdir -p exported-artifacts
find output -iname \*rpm -exec mv "{}" exported-artifacts/ \;
mv ./*tar.gz exported-artifacts/

View File

@@ -0,0 +1,3 @@
yum-utils
ansible
git

View File

@@ -0,0 +1,29 @@
#!/bin/bash -xe
# remove any previous artifacts
rm -rf output
rm -f ./*tar.gz
# Get the tarball
./build.sh dist
# create the src.rpm
rpmbuild \
-D "_srcrpmdir $PWD/output" \
-D "_topmdir $PWD/rpmbuild" \
-ts ./*.gz
# install any build requirements
yum-builddep output/*src.rpm
# create the rpms
rpmbuild \
-D "_rpmdir $PWD/output" \
-D "_topmdir $PWD/rpmbuild" \
--rebuild output/*.src.rpm
# Store any relevant artifacts in exported-artifacts for the ci system to
# archive
[[ -d exported-artifacts ]] || mkdir -p exported-artifacts
find output -iname \*rpm -exec mv "{}" exported-artifacts/ \;
mv ./*tar.gz exported-artifacts/

View File

@@ -0,0 +1,3 @@
yum-utils
git
ansible

View File

@@ -0,0 +1,27 @@
#!/bin/bash -xe
# remove any previous artifacts
rm -rf output
rm -f ./*tar.gz
# Get the tarball
./build.sh dist
# create the src.rpm
rpmbuild \
-D "_srcrpmdir $PWD/output" \
-D "_topmdir $PWD/rpmbuild" \
-ts ./*.gz
# install any build requirements
yum-builddep output/*src.rpm
# create the rpms
rpmbuild \
-D "_rpmdir $PWD/output" \
-D "_topmdir $PWD/rpmbuild" \
--rebuild output/*.src.rpm
[[ -d exported-artifacts ]] || mkdir -p exported-artifacts
find output -iname \*rpm -exec mv "{}" exported-artifacts/ \;
mv *.tar.gz exported-artifacts

60
roles/ovirt.manageiq/build.sh Executable file
View File

@@ -0,0 +1,60 @@
#!/bin/bash
VERSION="1.1.14"
MILESTONE=
RPM_RELEASE="1"
ROLE_NAME="ovirt.manageiq"
PACKAGE_NAME="ovirt-ansible-manageiq"
PREFIX=/usr/local
DATAROOT_DIR=$PREFIX/share
ROLES_DATAROOT_DIR=$DATAROOT_DIR/ansible/roles
DOC_DIR=$DATAROOT_DIR/doc
PKG_DATA_DIR=${PKG_DATA_DIR:-$ROLES_DATAROOT_DIR/$PACKAGE_NAME}
PKG_DATA_DIR_ORIG=${PKG_DATA_DIR_ORIG:-$PKG_DATA_DIR}
PKG_DOC_DIR=${PKG_DOC_DIR:-$DOC_DIR/$PACKAGE_NAME}
ROLENAME_LEGACY="${ROLENAME_LEGACY:-$ROLES_DATAROOT_DIR/ovirt-manageiq}"
ROLENAME_LEGACY_UPPERCASE="${ROLENAME_LEGACY_UPPERCASE:-$ROLES_DATAROOT_DIR/oVirt.manageiq}"
RPM_VERSION=$VERSION
PACKAGE_VERSION=$VERSION
[ -n "$MILESTONE" ] && PACKAGE_VERSION+="_$MILESTONE"
DISPLAY_VERSION=$PACKAGE$VERSION
TARBALL="$PACKAGE_NAME-$PACKAGE_VERSION.tar.gz"
dist() {
echo "Creating tar archive '$TARBALL' ... "
sed \
-e "s|@RPM_VERSION@|$RPM_VERSION|g" \
-e "s|@RPM_RELEASE@|$RPM_RELEASE|g" \
-e "s|@PACKAGE_NAME@|$PACKAGE_NAME|g" \
-e "s|@PACKAGE_VERSION@|$PACKAGE_VERSION|g" \
< ovirt-ansible-manageiq.spec.in > ovirt-ansible-manageiq.spec
git ls-files | tar --files-from /proc/self/fd/0 -czf "$TARBALL" ovirt-ansible-manageiq.spec
echo "tar archive '$TARBALL' created."
}
install() {
echo "Installing data..."
mkdir -p $PKG_DATA_DIR
mkdir -p $PKG_DOC_DIR
# Create a symlink, so legacy role name does work:
ln -f -s $PKG_DATA_DIR_ORIG $ROLENAME_LEGACY
# Create a symlink, so legacy role name does work with upper case:
ln -f -s $PKG_DATA_DIR_ORIG $ROLENAME_LEGACY_UPPERCASE
cp -pR defaults/ $PKG_DATA_DIR
cp -pR filter_plugins/ $PKG_DATA_DIR
cp -pR library/ $PKG_DATA_DIR
cp -pR tasks/ $PKG_DATA_DIR
cp -pR templates/ $PKG_DATA_DIR
cp -pR vars/ $PKG_DATA_DIR
echo "Installation done."
}
$1

View File

@@ -0,0 +1,80 @@
---
### This option disables no_log of tasks using sensistive data:
miq_debug_create: false
### Wait for IP version (v4 or v6):
miq_wait_for_ip_version: v4
### ManageIQ/CloudForms ###
miq_image_path: /tmp
# QCOW2 ManageIQ/CloudForms image URL:
miq_qcow_url: http://releases.manageiq.org/manageiq-ovirt-gaprindashvili-5.qc2
# ManageIQ/CloudForms application credentials
# We keep miq_{username,password} for backward compatibility
miq_username: admin
miq_password: smartvm
miq_app_username: "{{ miq_username }}"
miq_app_password: "{{ miq_password }}"
# ManageIQ/CloudForms database credentials
miq_db_username: root
miq_db_password: "{{ miq_app_password }}"
# ManageIQ/CloudForms region
miq_region: 0
miq_region_id: 1
# ManageIQ/CloudForms company name
miq_company: My Company
# Providers:
miq_rhv_provider_name: RHV provider
miq_initialize: true
### oVirt/RHV ###
# VM variables:
miq_vm_name: manageiq_gaprindashvili-5
miq_vm_cluster: Default
miq_vm_memory: 16GiB
miq_vm_cpu: 4
miq_vm_os: rhel_7x64
miq_vm_root_password: "{{ miq_app_password }}"
miq_vm_high_availability: true
miq_vm_high_availability_priority: 50
miq_vm_delete_protected: true
# Vm disks
miq_vm_disk_interface: virtio
miq_vm_disk_format: cow
miq_disk_deploy_failed: false
# Additional disks.
# Default one is database disk.
miq_vm_disks:
database:
name: "{{ miq_vm_name }}_database"
size: 50GiB
interface: virtio_scsi
format: raw
timeout: 900
# Vm NICS:
miq_vm_nics:
- name: nic1
profile_name: ovirtmgmt
interface: virtio
# Metrics DB name
metrics_db_name: ovirt_engine_history
metrics_port: 5432
metrics_user: engine_history
metrics_password: ''
# ManageIQ/CloudForms roles
miq_disabled_roles: []
miq_enabled_roles: []
# Command to initialize cloudforms
miq_init_cmd: "appliance_console_cli -i -r {{ miq_region }} -U {{ miq_db_username }} -p '{{ miq_db_password }}' -k -f"

View File

@@ -0,0 +1,48 @@
---
- name: RHV CFME deployment
hosts: localhost
connection: local
gather_facts: false
vars_files:
# Contains encrypted `engine_password` and `metrics_password`
# varibale using ansible-vault
- passwords.yml
vars:
engine_fqdn: ovirt-engine.example.com
engine_user: admin@internal
miq_vm_name: cfme_59
miq_qcow_url: https://cdn.example.com/cfme-rhevm-5.9.1.2-1.x86_64.qcow2
miq_vm_cluster: mycluster
miq_vm_root_password: securepassword
miq_vm_cloud_init:
host_name: "{{ miq_vm_name }}"
miq_vm_disks:
database:
name: "{{ miq_vm_name }}_database"
size: 10GiB
interface: virtio_scsi
format: raw
log:
name: "{{ miq_vm_name }}_log"
size: 10GiB
interface: virtio_scsi
format: cow
tmp:
name: "{{ miq_vm_name }}_tmp"
size: 10GiB
interface: virtio_scsi
format: raw
miq_disabled_roles:
- smartstate
miq_enabled_roles:
- notifier
- ems_metrics_coordinator
- ems_metrics_collector
- ems_metrics_processor
- embedded_ansible
roles:
- ovirt.manageiq

View File

@@ -0,0 +1,27 @@
---
- name: oVirt ManageIQ deployment
hosts: localhost
connection: local
gather_facts: false
vars_files:
# Contains encrypted `engine_password` and `metrics_password`
# varibale using ansible-vault
- passwords.yml
vars:
engine_fqdn: ovirt.example.com
engine_user: admin@internal
engine_cafile: /etc/pki/ovirt-engine/ca.pem
miq_qcow_url: http://releases.manageiq.org/manageiq-ovirt-gaprindashvili-3.qc2
miq_vm_name: manageiq_g3
miq_vm_cluster: mycluster
metrics_fqdn: metrics.example.com
metrics_port: 8443
metrics_user: admin
roles:
- ovirt.manageiq

View File

@@ -0,0 +1 @@
../

View File

@@ -0,0 +1,12 @@
---
# As an example this file is keep in plaintext, if you want to
# encrypt this file, please execute following command:
#
# $ ansible-vault encrypt passwords.yml
#
# It will ask you for a password, which you must then pass to
# ansible interactively when executing the playbook.
#
# $ ansible-playbook myplaybook.yml --ask-vault-pass
#
engine_password: 123456

View File

@@ -0,0 +1,73 @@
#!/usr/bin/python
'Module to create filter to find IP addresses in VMs'
class FilterModule(object):
'Filter for IP addresses on newly created VMs'
def filters(self):
'Define filters'
return {
'ovirtvmip': self.ovirtvmip,
'ovirtvmips': self.ovirtvmips,
'ovirtvmipv4': self.ovirtvmipv4,
'ovirtvmipsv4': self.ovirtvmipsv4,
'ovirtvmipv6': self.ovirtvmipv6,
'ovirtvmipsv6': self.ovirtvmipsv6,
}
def ovirtvmip(self, ovirt_vms, attr=None):
'Return first IP'
return self.__get_first_ip(self.ovirtvmips(ovirt_vms, attr))
def ovirtvmips(self, ovirt_vms, attr=None):
'Return list of IPs'
return self._parse_ips(ovirt_vms, attr=attr)
def ovirtvmipv4(self, ovirt_vms, attr=None):
'Return first IPv4 IP'
return self.__get_first_ip(self.ovirtvmipsv4(ovirt_vms, attr))
def ovirtvmipsv4(self, ovirt_vms, attr=None):
'Return list of IPv4 IPs'
return self._parse_ips(ovirt_vms, lambda version: version == 'v4', attr)
def ovirtvmipv6(self, ovirt_vms, attr=None):
'Return first IPv6 IP'
return self.__get_first_ip(self.ovirtvmipsv6(ovirt_vms, attr))
def ovirtvmipsv6(self, ovirt_vms, attr=None):
'Return list of IPv6 IPs'
return self._parse_ips(ovirt_vms, lambda version: version == 'v6', attr)
def _parse_ips(self, ovirt_vms, version_condition=lambda version: True, attr=None):
if not isinstance(ovirt_vms, list):
ovirt_vms = [ovirt_vms]
if attr is None:
return self._parse_ips_aslist(ovirt_vms, version_condition)
else:
return self._parse_ips_asdict(ovirt_vms, version_condition, attr)
@staticmethod
def _parse_ips_asdict(ovirt_vms, version_condition=lambda version: True, attr=None):
vm_ips = {}
for ovirt_vm in ovirt_vms:
ips = []
for device in ovirt_vm.get('reported_devices', []):
for curr_ip in device.get('ips', []):
if version_condition(curr_ip.get('version')):
ips.append(curr_ip.get('address'))
vm_ips[ovirt_vm.get(attr)] = ips
return vm_ips
@staticmethod
def _parse_ips_aslist(ovirt_vms, version_condition=lambda version: True):
ips = []
for ovirt_vm in ovirt_vms:
for device in ovirt_vm.get('reported_devices', []):
for curr_ip in device.get('ips', []):
if version_condition(curr_ip.get('version')):
ips.append(curr_ip.get('address'))
return ips
@staticmethod
def __get_first_ip(res):
return res[0] if isinstance(res, list) and res else res

View File

@@ -0,0 +1 @@
{install_date: 'Wed Aug 28 12:51:49 2019', version: 1.1.14}

View File

@@ -0,0 +1,20 @@
galaxy_info:
author: Ondra Machacek
description: Role to deploy ManageIQ/CloudForms into oVirt/RHV.
company: Red Hat, Inc.
license: Apache License 2.0
min_ansible_version: 2.5
platforms:
- name: EL
versions:
- 7
- name: Fedora
versions:
- 24
galaxy_tags: [manageiq, cloudforms, ovirt, rhv, rhev, virtualization]
dependencies: []

View File

@@ -0,0 +1,133 @@
%global rolename manageiq
%global roleprefix ovirt.
%global roleprefix_legacy ovirt-
%global roleprefix_legacy_uppercase oVirt.
%global ansible_roles_dir ansible/roles
Name: @PACKAGE_NAME@
Summary: Ansible role to create ManageIQ or CloudForms virtual machine from qcow image
Version: @RPM_VERSION@
Release: @RPM_RELEASE@%{?release_suffix}%{?dist}
Source0: http://resources.ovirt.org/pub/src/@PACKAGE_NAME@/@PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz
License: ASL 2.0
Group: Virtualization/Management
BuildArch: noarch
Url: http://www.ovirt.org
Requires: ansible >= 2.7.2
%description
This Ansible role provide funtionality to create ManageIQ or CloudForms virtual
machine from qcow image.
%pretrans -p <lua>
-- Remove the legacy directory before installing the symlink. This is known issue in RPM:
-- https://fedoraproject.org/wiki/Packaging:Directory_Replacement
path_dash = "%{_datadir}/%{ansible_roles_dir}/%{roleprefix_legacy}%{rolename}"
path_uppercase = "%{_datadir}/%{ansible_roles_dir}/%{roleprefix_legacy_uppercase}%{rolename}"
st = posix.stat(path_dash)
if st and st.type == "directory" then
os.execute('rm -rf "'..path_dash..'"')
end
st = posix.stat(path_uppercase)
if st and st.type == "directory" then
os.execute('rm -rf "'..path_uppercase..'"')
end
%prep
%setup -c -q
%build
%install
export PKG_DATA_DIR_ORIG=%{_datadir}/%{ansible_roles_dir}/%{roleprefix}%{rolename}
export PKG_DATA_DIR=%{buildroot}$PKG_DATA_DIR_ORIG
export PKG_DOC_DIR=%{buildroot}%{_pkgdocdir}
export ROLENAME_LEGACY=%{buildroot}%{_datadir}/%{ansible_roles_dir}/%{roleprefix_legacy}%{rolename}
export ROLENAME_LEGACY_UPPERCASE=%{buildroot}%{_datadir}/%{ansible_roles_dir}/%{roleprefix_legacy_uppercase}%{rolename}
sh build.sh install
%files
%{_datadir}/%{ansible_roles_dir}/%{roleprefix}%{rolename}
%{_datadir}/%{ansible_roles_dir}/%{roleprefix_legacy}%{rolename}
%{_datadir}/%{ansible_roles_dir}/%{roleprefix_legacy_uppercase}%{rolename}
%doc README.md
%doc examples/
%license LICENSE
%changelog
* Wed May 15 2019 Ondra Machacek <omachace@redhat.com> - 1.1.14-1
- Don't try to re-init miq when initiailized.
* Thu Nov 29 2018 Ondra Machacek <omachace@redhat.com> - 1.1.13-1
- Change default disks interface to virtio_scsi.
- Support ovirt.manageiq name.
- Require Ansible 2.7.2.
* Tue Sep 18 2018 Ondra Machacek <omachace@redhat.com> - 1.1.12-1
- Document additional disks works with CFME only. rhbz#1627020
- Skip region settings in case of ManageIQ. rhbz#1627018
- Don't fail if miq_vm_disk_storage is defined. rhbz#1624836
- RHV provider can be added using env vars. rhbz#1613914
- Document that dry mode is unsupported. rhbz#1614314
- Support using engine_fqdn instead of url. rhbz#1613914
- Use corect IDs for MiQ region number. rhbz#1592857
- Add miq_wait_for_ip_version variable. rhbz#1613723
- Add miq_debug_create to disable no_log. gh#62
- Add support for OVIRT_HOSTNAME env var.
- Add possibility to login external from role.
- Fix ternary operator in ovirt_auth. rhbz#1584772
* Mon Jul 9 2018 Ondra Machacek <omachace@redhat.com> - 1.1.11-1
- Can't set password via miq_app_password variable. rhbz#1590336
* Thu Jun 7 2018 Ondra Machacek <omachace@redhat.com> - 1.1.10-1
- Add /vars directory to RPM. rhbz@1588415
* Wed May 30 2018 Ondra Machacek <omachace@redhat.com> - 1.1.9-1
- Fix JSON generation for RHV provider registration
- Add default values for metrics variables
- Create appliance disk names based on it's type
- Add embedded ansible as a default service to example
- Disable cloud-init service after deployment
- Don't hardcode /tmp/ovirt_image_path
- Update to Gaprindashvili-3
- Add root user to cfme appliance inventory
- Add support to disable/enable roles on the appliance.
- Add support to set company name.
- Add support for log and tmp disks.
- Add support for different passwords: application, database and operating system.
* Thu May 3 2018 Ondra Machacek <omachace@redhat.com> - 1.1.8-1
- Use no_log=true for uri module.
* Tue Apr 10 2018 Ondra Machacek <omachace@redhat.com> - 1.1.7-1
- Require Ansible 2.5.
- Initialize CFME with default value.
- Update defaults to use Gaprindashvili-2.
* Thu Mar 1 2018 Ondra Machacek <omachace@redhat.com> - 1.1.6-1
- Ensure VM is running before checking ManageIQ API.
- Add variable for C&U Metrics Database Name.
- Pass if provider already exists.
- Verify API connectivity before fetching image.
- Move to the Gaprindashvili release of ManageIQ.
* Wed Jan 17 2018 Ondra Machacek <omachace@redhat.com> - 1.1.5-1
- Support RHV credentials for Ansible Tower/AWX.
* Tue Dec 12 2017 Ondra Machacek <omachace@redhat.com> - 1.1.4-1
- Add missing templates/ directory.
* Mon Dec 04 2017 Ondra Machacek <omachace@redhat.com> - 1.1.3-1
- Add %pretrans scriplet to remove legacy role name.
* Wed Nov 29 2017 Ondra Machacek <omachace@redhat.com> - 1.1.2-1
- Rename prefix ovirt. to oVirt. to sync with galaxy
* Thu Nov 16 2017 Ondra Machacek <omachace@redhat.com> - 1.1.0-1
- Initial release

View File

@@ -0,0 +1,15 @@
- name: Add {{ item }} disk for CFME
ovirt_disk:
auth: "{{ ovirt_auth }}"
name: "{{ miq_vm_disks[item].name | default(miq_vm_name ~ '_' ~ item) }}"
vm_name: "{{ miq_vm_name }}"
interface: "{{ miq_vm_disks[item].interface | default('virtio_scsi') }}"
size: "{{ miq_vm_disks[item].size | default(omit) }}"
format: "{{ miq_vm_disks[item].format | default(omit) }}"
timeout: "{{ miq_vm_disks[item].timeout | default(omit) }}"
storage_domain: "{{ miq_vm_disks[item].storage | default(disk_storage_domain.name if disk_storage_domain is defined else miq_vm_disk_storage) }}"
- name: Add {{ item }} disk to CloudForms initialization command
no_log: "{{ not miq_debug_create }}"
set_fact:
miq_init_cmd2: "{{ miq_init_cmd2 }} {{ miq_init_cmd_options.disks[item] }} {{ miq_vm_disks_devices[item] }}"

View File

@@ -0,0 +1,73 @@
- name: Set database disk to /dev/vdb if disk interface is virtio
set_fact:
miq_vm_disks_db_device: /dev/vdb
when: "miq_vm_disks.database.interface == 'virtio'"
- name: Set database disk to /dev/sdb if disk interface is virtio_scsi
set_fact:
miq_vm_disks_db_device: /dev/sdb
when: "miq_vm_disks.database.interface == 'virtio_scsi'"
- set_fact:
miq_vm_disks_devices:
database: "{{ miq_vm_disks_db_device }}"
- block:
- name: Set log disk to /dev/vdc if disk interface is virtio
set_fact:
miq_vm_disks_log_device: /dev/vdc
when: "miq_vm_disks.log.interface == 'virtio'"
- name: Set log disk to /dev/sdc if disk interface is virtio_scsi
set_fact:
miq_vm_disks_log_device: /dev/sdc
when: "miq_vm_disks.log.interface == 'virtio_scsi'"
- set_fact:
miq_vm_disks_log_device_dict:
log: "{{ miq_vm_disks_log_device }}"
- set_fact:
miq_vm_disks_devices: "{{ miq_vm_disks_devices | combine(miq_vm_disks_log_device_dict) }}"
when: "'log' in miq_vm_disks"
- block:
- block:
- name: Set tmp disk to /dev/vdc if disk interface is virtio
set_fact:
miq_vm_disks_tmp_device: /dev/vdc
when: "miq_vm_disks.tmp.interface == 'virtio'"
- name: Set tmp disk to /dev/sdc if disk interface is virtio_scsi
set_fact:
miq_vm_disks_tmp_device: /dev/sdc
when: "miq_vm_disks.tmp.interface == 'virtio_scsi'"
when: "'log' not in miq_vm_disks"
- block:
- name: Set tmp disk to /dev/vdd if disk interface is virtio
set_fact:
miq_vm_disks_tmp_device: /dev/vdd
when: "miq_vm_disks.tmp.interface == 'virtio'"
- name: Set tmp disk to /dev/sdd if disk interface is virtio_scsi
set_fact:
miq_vm_disks_tmp_device: /dev/sdd
when: "miq_vm_disks.tmp.interface == 'virtio_scsi'"
when: "'log' in miq_vm_disks"
- set_fact:
miq_vm_disks_tmp_device_dict:
tmp: "{{ miq_vm_disks_tmp_device }}"
- set_fact:
miq_vm_disks_devices: "{{ miq_vm_disks_devices | combine(miq_vm_disks_tmp_device_dict) }}"
when: "'tmp' in miq_vm_disks"

View File

@@ -0,0 +1,84 @@
- name: Check if {{ miq_image_path }} is directory
stat:
path: "{{ miq_image_path }}"
register: image_path_st
- name: Download the qcow image
get_url:
url: "{{ miq_qcow_url }}"
dest: "{{ image_path_st.stat.isdir | ternary( miq_image_path~'/'~miq_qcow_url.rpartition('/')[-1], miq_image_path) | regex_replace('//', '/') }}"
checksum: "{{ miq_image_checksum | default(omit) }}"
register: downloaded_file
- name: Check file type
command: "/usr/bin/file {{ downloaded_file.dest | quote }}"
changed_when: false
register: filetype
- name: Fail if image is not qcow
fail:
msg: "The downloaded file is not a valid QCOW file."
when: '"QCOW" not in filetype.stdout'
- name: Calculate image size in GiB
set_fact:
miq_image_size_gib: "{{ filetype.stdout_lines[0].split()[5] | int // 2**30 }}"
#
# Find default disk size for miq disk:
#
- block:
- name: Extract integer from miq_vm_disk_size
set_fact:
miq_vm_disk_size_gib: "{{ miq_vm_disk_size | regex_replace('GiB$') }}"
- name: Fail if miq_vm_disk_size is less than qcow size
fail:
msg: "Setting a disk size ({{ miq_vm_disk_size }}) lower than the image size ({{ miq_image_size_gib }}GiB) may result in disk corruption."
when: "miq_vm_disk_size_gib < miq_image_size_gib"
when: "miq_vm_disk_size is defined"
#
# Find default data storage domain for Miq disk:
#
- block:
- name: Fetch storages
ovirt_storage_domain_facts:
auth: "{{ ovirt_auth }}"
pattern: "Clusters.name={{ miq_vm_cluster }} and status=active"
- name: Find data domain
set_fact:
disk_storage_domain: "{{ ovirt_storage_domains | json_query(the_query) | list | first }}"
vars:
the_query: "[?type=='data']"
when: miq_vm_disk_storage is undefined
- name: Check if VM already exists
ovirt_vm_facts:
auth: "{{ ovirt_auth }}"
pattern: "name={{ miq_vm_name }}"
- block:
- name: Deploy the qcow image to oVirt engine
ovirt_disk:
auth: "{{ ovirt_auth }}"
name: "{{ miq_vm_disk_name | default(miq_vm_name) }}"
interface: "{{ miq_vm_disk_interface }}"
size: "{{ miq_vm_disk_size | default(miq_image_size_gib + 'GiB') }}"
format: "{{ miq_vm_disk_format }}"
image_path: "{{ downloaded_file.dest }}"
storage_domain: "{{ disk_storage_domain.name if disk_storage_domain is defined else miq_vm_disk_storage }}"
force: "{{ ovirt_vms | length == 0 }}"
register: ovirt_disk
rescue:
- name: Remove failed disk
ovirt_disk:
auth: "{{ ovirt_auth }}"
state: absent
name: "{{ miq_vm_disk_name | default(miq_vm_name) }}"
- name: Set miq_disk_deploy_failed
set_fact:
miq_disk_deploy_failed: true

View File

@@ -0,0 +1,57 @@
- name: Add host alias of appliance
no_log: "{{ not miq_debug_create }}"
add_host:
hostname: "{{ miq_ip_addr }}"
ansible_host: "{{ miq_ip_addr }}"
ansible_user: root
ansible_password: smartvm
ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
changed_when: false
- name: Wait for SSH port of appliance
wait_for:
host: "{{ miq_ip_addr }}"
port: 22
delay: 10
- name: Fetch info about appliance
command: "rpm -qi cfme"
args:
warn: no
register: cfme_rpm
ignore_errors: yes
changed_when: false
delegate_to: "{{ miq_ip_addr }}"
- name: Check if ManageIQ/CloudForms was initilized
no_log: "{{ not miq_debug_create }}"
uri:
url: "https://{{ miq_ip_addr }}/api/"
validate_certs: no
user: "{{ miq_app_username }}"
password: smartvm
register: init_miq_vm
ignore_errors: yes
- block:
- name: Set region id
set_fact:
miq_region_id: "{{ miq_region|int * 1000000000000 + 1 }}"
- name: Initialize CloudForms
command: "{{ miq_init_cmd2 }}"
delegate_to: "{{ miq_ip_addr }}"
when: "init_miq_vm.failed"
when: "cfme_rpm.rc == 0 and init_miq_vm.failed"
- name: Set root password of appliance
no_log: "{{ not miq_debug_create }}"
shell: echo '{{ miq_vm_root_password }}' | passwd --stdin root
delegate_to: "{{ miq_ip_addr }}"
- name: Disable cloud-init service
service:
enabled: no
name: cloud-init
delegate_to: "{{ miq_ip_addr }}"

View File

@@ -0,0 +1,108 @@
---
- block:
## Initialize authentication parameters:
- set_fact:
engine_url: "{{ 'https://' ~engine_fqdn | default(lookup('env', 'OVIRT_HOSTNAME')) ~ '/ovirt-engine/api' }}"
when: engine_fqdn is defined or lookup('env', 'OVIRT_HOSTNAME')
- set_fact:
engine_user: "{{ engine_user | default(lookup('env', 'OVIRT_USERNAME')) }}"
engine_password: "{{ engine_password | default(lookup('env', 'OVIRT_PASSWORD')) }}"
engine_url: "{{ engine_url | default(lookup('env', 'OVIRT_URL')) }}"
engine_cafile: "{{ engine_cafile | default(lookup('env', 'OVIRT_CAFILE')) }}"
- name: Login to oVirt engine
ovirt_auth:
username: "{{ engine_user }}"
password: "{{ engine_password }}"
url: "{{ engine_url }}"
ca_file: "{{ engine_cafile }}"
insecure: "{{ engine_cafile == '' }}"
when: ovirt_auth is undefined or not ovirt_auth
register: loggedin
tags:
- always
- name: Deploy qcow disk
include_tasks: deploy_qcow2.yml
- block:
- name: Create ManageIQ virtual machine
ovirt_vm:
auth: "{{ ovirt_auth }}"
state: present
name: "{{ miq_vm_name }}"
cluster: "{{ miq_vm_cluster }}"
memory: "{{ miq_vm_memory }}"
memory_max: "{{ miq_vm_memory_max | default(omit) }}"
memory_guaranteed: "{{ miq_vm_memory_guaranteed | default(omit) }}"
cpu_cores: "{{ miq_vm_cpu }}"
cpu_shares: "{{ miq_vm_cpu_shares | default(omit) }}"
cpu_sockets: "{{ miq_vm_cpu_sockets | default(omit) }}"
cpu_threads: "{{ miq_vm_cpu_threads | default(omit) }}"
operating_system: "{{ miq_vm_os }}"
high_availability: "{{ miq_vm_high_availability }}"
high_availability_priority: "{{ miq_vm_high_availability_priority }}"
delete_protected: "{{ miq_vm_delete_protected }}"
type: server
disks:
- id: "{{ ovirt_disk.id }}"
bootable: true
nics: "{{ miq_vm_nics }}"
register: create_vm
- name: Duplicate miq_init_cmd variable to override it
set_fact:
miq_init_cmd2: "{{ miq_init_cmd }}"
- include_tasks: cfme_identify_disk_device.yml
- include_tasks: cfme_add_disk.yml
when: "item in miq_vm_disks"
with_items: "{{ miq_vm_disks_types }}"
- name: Ensure virtual machine is running
ovirt_vm:
auth: "{{ ovirt_auth }}"
state: running
name: "{{ miq_vm_name }}"
cloud_init: "{{ miq_vm_cloud_init | default(omit) }}"
- set_fact:
ip_cond: "ovirt_vms | ovirtvmip{{ miq_wait_for_ip_version }} | length > 0"
- name: Wait for VM IP
ovirt_vm_facts:
auth: "{{ ovirt_auth }}"
pattern: "name={{ miq_vm_name }}"
fetch_nested: true
nested_attributes: ips
until: "ip_cond"
retries: 10
delay: 10
- name: ManageIQ host IPv4 address
set_fact:
miq_ip_addr: "{{ ovirt_vms | ovirtvmipv4 }}"
when: miq_wait_for_ip_version == 'v4'
- name: ManageIQ host IPv6 address
set_fact:
miq_ip_addr: "{{ ovirt_vms | ovirtvmipv6 }}"
when: miq_wait_for_ip_version == 'v6'
- block:
- include: init_cfme.yml
- include: wait_for_api.yml
when: "miq_initialize"
when: "not miq_disk_deploy_failed"
always:
- name: Logout from oVirt engine
ovirt_auth:
state: absent
ovirt_auth: "{{ ovirt_auth }}"
when: not loggedin.skipped | default(false)
tags:
- always

View File

@@ -0,0 +1,33 @@
---
- name: Get the list of enabled roles
uri:
url: "https://{{ miq_ip_addr }}/api/servers/{{ miq_region_id }}/settings"
user: "{{ miq_app_username }}"
password: "{{ miq_app_password }}"
method: GET
validate_certs: no
register: miq_active_roles_json
- name: Extracting the roles from the JSON output
set_fact:
miq_active_roles: "{{ miq_active_roles_json.json.server.role.split(',') }}"
- name: Remove roles from the list of active roles
set_fact:
miq_active_roles: "{{ miq_active_roles | difference(miq_disabled_roles) }}"
- name: Add extra roles to list of active roles
set_fact:
miq_active_roles: "{{ miq_active_roles | union(miq_enabled_roles) }}"
- name: Update list of active roles
uri:
url: https://{{ miq_ip_addr }}/api/servers/{{ miq_region_id }}/settings
user: "{{ miq_app_username }}"
password: "{{ miq_app_password }}"
method: PATCH
validate_certs: no
body_format: json
body:
server:
role: "{{ miq_active_roles | join(',') }}"

View File

@@ -0,0 +1,68 @@
- name: Wait for ManageIQ/CloudForms API
no_log: "{{ not miq_debug_create }}"
uri:
url: "https://{{ miq_ip_addr }}/api/"
validate_certs: no
user: "{{ miq_app_username }}"
password: smartvm
register: miq_vm
until: "miq_vm.status == 200"
retries: 50
delay: 20
- name: Set application admin password
no_log: "{{ not miq_debug_create }}"
uri:
url: "https://{{ miq_ip_addr }}/api/users/{{ miq_region_id }}"
validate_certs: no
method: POST
user: "{{ miq_app_username }}"
password: smartvm
force_basic_auth: yes
body_format: json
body:
action: "edit"
resource:
password: "{{ miq_app_password | string }}"
register: miq_admin_password
changed_when: "miq_admin_password.status == 201 or miq_admin_password.status == 200"
failed_when:
- "miq_admin_password.json is defined and 'error' in miq_admin_password.json"
- name: Update ManageIQ company name
uri:
url: "https://{{ miq_ip_addr }}/api/servers/{{ miq_region_id }}/settings"
user: "{{ miq_app_username }}"
password: "{{ miq_app_password }}"
method: PATCH
validate_certs: no
body_format: json
body:
server:
company: "{{ miq_company }}"
register: miq_update_company
changed_when: "miq_update_company.status == 201 or miq_update_company.status == 200"
failed_when:
- "miq_update_company.json is defined and 'error' in miq_update_company.json"
- include_tasks: manage_appliance_roles.yml
- name: Add oVirt/RHV provider to ManageIQ/CloudForms
no_log: "{{ not miq_debug_create }}"
uri:
url: "https://{{ miq_ip_addr }}/api/providers"
validate_certs: no
method: POST
user: "{{ miq_app_username }}"
password: "{{ miq_app_password }}"
body: "{{ lookup('template', 'add_rhv_provider.j2') }}"
force_basic_auth: yes
body_format: json
register: miq_rhv_provider
changed_when: "miq_rhv_provider.status == 201 or miq_rhv_provider.status == 200"
failed_when:
- "miq_rhv_provider.json is defined and 'error' in miq_rhv_provider.json"
- "miq_rhv_provider.json.error.message is defined and 'has already been taken' not in miq_rhv_provider.json.error.message"
# FIXME: If provider already exists with different name, don't fail, but we should change the name
# when there will exist any ansible module for managing providers:
- "miq_rhv_provider.json.error.message is defined and 'Host Name has to be unique per provider type' not in miq_rhv_provider.json.error.message"

View File

@@ -0,0 +1,43 @@
{
"type": "ManageIQ::Providers::Redhat::InfraManager",
"name": "{{ miq_rhv_provider_name }}",
"connection_configurations": [{
"endpoint": {
"role": "default",
{% if engine_fqdn is defined %}
"hostname": "{{ engine_fqdn.split(':')[0] }}",
{% if engine_fqdn.split(':') | length > 1 %}
"port": "{{ engine_fqdn.split(':')[1] }}",
{% endif %}
{% else %}
"hostname": "{{ engine_url | urlsplit('hostname') }}",
{% if engine_url | urlsplit('port') != "" %}
"port": "{{ engine_url | urlsplit('port') }}",
{% endif %}
{% endif %}
"verify_ssl": {{ engine_cafile != '' }},
{% if engine_cafile != '' %}
"certificate_authority": {{ lookup('file', engine_cafile) | to_json }}
{% endif %}
},
"authentication": {
"userid": "{{ engine_user }}",
"password": "{{ engine_password }}"
}
}{% if metrics_fqdn is defined %},{% endif %}
{% if metrics_fqdn is defined %}
{
"endpoint": {
"role": "metrics",
"path": "{{ metrics_db_name }}",
"hostname": "{{ metrics_fqdn }}",
"port": "{{ metrics_port }}"
},
"authentication": {
"userid": "{{ metrics_user }}",
"password": "{{ metrics_password }}"
}
}
{% endif %}
]
}

View File

@@ -0,0 +1,11 @@
[tox]
skipsdist=True
envlist =
py27-{yamllint,ansible_syntax,generate_validation}
skip_missing_interpreters=True
[testenv]
skip_install=True
commands =
yamllint: yamllint examples/

View File

@@ -0,0 +1,10 @@
---
miq_vm_disks_types:
- database
- log
- tmp
miq_init_cmd_options:
disks:
database: "-b"
log: "-l"
tmp: "-t"

View File

@@ -183,6 +183,9 @@
name: sos
state: latest
# TODO: set this in Administer -> Settings -> Puppet Puppet out of sync disabled = Yes
# Description: Disable host configuration status turning to out of sync for Puppet after report does not arrive within configured interval
# TODO: Make this work
# For now:
# hammer user-group create --admin yes --name satellite_admins