diff --git a/.gitignore b/.gitignore index fcaeeea..8936dc4 100644 --- a/.gitignore +++ b/.gitignore @@ -108,9 +108,8 @@ venv.bak/ # Ansible *.retry -roles/ .vscode/ keys/ collections/ansible_collections/ -.vscode/ \ No newline at end of file +.vscode/ diff --git a/roles/bertvv.bind/.gitignore b/roles/bertvv.bind/.gitignore new file mode 100644 index 0000000..b433c0a --- /dev/null +++ b/roles/bertvv.bind/.gitignore @@ -0,0 +1,19 @@ +# .gitignore + +# Hidden Vagrant-directory +.vagrant + +# Backup files (e.g. Vim, Gedit, etc.) +*~ + +# Vagrant base boxes (you never know when someone puts one in the repository) +*.box + +# Python artefacts +.ropeproject +*.pyc + +# Ignore test directory +tests/ +vagrant-tests/ +docker-tests/ diff --git a/roles/bertvv.bind/.travis.yml b/roles/bertvv.bind/.travis.yml new file mode 100644 index 0000000..05a9d32 --- /dev/null +++ b/roles/bertvv.bind/.travis.yml @@ -0,0 +1,48 @@ +--- +language: python + +# Use the new container infrastructure +sudo: required + +env: + global: + - ROLE_NAME: bind + matrix: + - MOLECULE_DISTRO: centos7 + - MOLECULE_DISTRO: centos8 + - MOLECULE_DISTRO: debian8 + - MOLECULE_DISTRO: debian9 + - MOLECULE_DISTRO: debian10 + - MOLECULE_DISTRO: ubuntu1604 + - MOLECULE_DISTRO: ubuntu1804 + - MOLECULE_DISTRO: ubuntu2004 + +# Enable docker support +services: + - docker + +install: + - sudo apt-get update + - sudo apt-get install bats curl dnsutils + # Install dependencies for Molecule test + - python3 -m pip install molecule yamllint ansible-lint docker netaddr + # Check ansible and molecule version + - ansible --version + - molecule --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +before_script: + # Renames ansible-role-bind to bertvv.bind to make it match with Ansible + # Galaxy + - cd ../ + - mv ansible-role-$ROLE_NAME bertvv.$ROLE_NAME + - cd bertvv.$ROLE_NAME + +script: + # Run molecule test + - molecule test + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ diff --git a/roles/bertvv.bind/.yamllint b/roles/bertvv.bind/.yamllint new file mode 100644 index 0000000..d19261c --- /dev/null +++ b/roles/bertvv.bind/.yamllint @@ -0,0 +1,56 @@ +--- + +rules: + braces: + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: -1 + max-spaces-inside-empty: -1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: -1 + max-spaces-inside-empty: -1 + colons: + max-spaces-before: 0 + max-spaces-after: 1 + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + comments: + level: warning + require-starting-space: true + min-spaces-from-content: 2 + comments-indentation: + level: warning + document-end: disable + document-start: + level: warning + present: true + empty-lines: + max: 2 + max-start: 0 + max-end: 0 + empty-values: + forbid-in-block-mappings: false + forbid-in-flow-mappings: false + hyphens: + max-spaces-after: 1 + indentation: + spaces: consistent + indent-sequences: true + check-multi-line-strings: false + key-duplicates: enable + key-ordering: disable + line-length: + max: 1000 + level: warning + allow-non-breakable-words: true + allow-non-breakable-inline-mappings: false + new-line-at-end-of-file: enable + new-lines: + type: unix + trailing-spaces: enable + truthy: + level: warning diff --git a/roles/bertvv.bind/CHANGELOG.md b/roles/bertvv.bind/CHANGELOG.md new file mode 100644 index 0000000..2a54ff4 --- /dev/null +++ b/roles/bertvv.bind/CHANGELOG.md @@ -0,0 +1,250 @@ +# Change log + +This file contains al notable changes to the bind Ansible role. + +This file adheres to the guidelines of [http://keepachangelog.com/](http://keepachangelog.com/). Versioning follows [Semantic Versioning](http://semver.org/). "GH-X" refers to the X'th issue/pull request on the Github project. + +## 4.2.0 - 2020-05-23 + +An update that's been long overdue. Several PRs with new features were merged! + +A special thanks to @blofeldthefish for his willingness to help out with maintaining this role and to @RobinsOphalvens for contributing the new testing harness based on Molecule. Thanks to them, further development of this role got out of the deadlock it's been in since the previous version. + +## Added + +- New supported platforms + - CentOS 8 (GH-107, credit: [Paulius Mazeika](https://github.com/pauliusm)) + - Debian 10 (no changes were needed) + - FreeBSD (GH-100, credit: [psa](https://github.com/psa)) + - Ubuntu 20.04 LTS (no changes were needed) +- (GH-69) Allow TTLs for individual records (credit: [Stuart Knight](https://github.com/blofeldthefish)) +- (GH-79) Added support for the SSHFP record type (credit: [Romuald](https://github.com/rds13)) +- (GH-81) Added support for the DNAME record type (credit: [B. Verschueren](https://github.com/bverschueren)) +- (GH-82) Added support for the NAPTR record type (credit: [Aido](https://github.com/aido)) +- (GH-83) Added support for the [`$GENERATE` directive](http://www.zytrax.com/books/dns/ch8/generate.html) (credit: [Rayford Johnson](https://github.com/rayfordj)) +- (GH-85) New role variable `bind_other_logs` (credit: [Paulo E. Castro](https://github.com/pecastro)) +- (GH-87) New role variable `bind_dns_keys`, a list of binding keys (credit: [Jérôme Avond](https://github.com/jadjay)) +- (GH-88) New role variable `bind_statistics_channels` (credit: [Stuart Knight](https://github.com/blofeldthefish)) +- (GH-105, GH-113) New role variable `bind_query_log`, with more flexibility w.r.t. query logging (credit: [Romuald](https://github.com/rds13) and [Jascha Sticher](https://github.com/itbane)) +- New keys in `bind_zone_domains`: `create_forward_zones` and `create_reverse_zones`. When present and set to false, they will prevent the creation of the forward or reverse zones, respectively. This results in a reverse only or forward only name server for that zone. + +## Changed + +- Molecule is now used as testing harness (credit: [Robin Ophalvens](https://github.com/RobinOphalvens)). The previous system was written before any standardised testing tools were available. It became too cumbersome to maintain, which had serious impact on the further development of this role. +- (GH-75) Refactored hash gathering to determine if zone files need to be regenerated (credit: [Stuart Knight](https://github.com/blofeldthefish)) +- (GH-89) Add missing `allow-recursion` parameter for bind slaves, allowing them to handle recursion correctly (credit: [Lennart Weller](https://github.com/lhw)) +- (GH-91) Ensure the directory for cached slave zones is created (credit: [Otto Sabart](https://github.com/seberm)) +- (GH-99) Use `bind_group` variable instead of hard-coded value (credit: [Boris Momčilović](https://github.com/kornrunner)) +- (GH-114,115) Fix error with scenario in conjunction with a dhcp shared secret key to provide dynamic dns updates. (credit: [Fabio Rocha](https://github.com/frock81)) + +## Removed + +- (GH-106) Removed DNSSEC Lookaside Validation, this service has been shut down + +## 4.1.0 - 2018-10-05 + +## Added + +- (GH-53) Add variable `bind_zone_dir` and `bind_zone_file_mode` for setting the master zone file path and mode, and `bind_extra_include_files` for including arbitrary configuration files into named.conf. (credit: [Brad Durrow](https://github.com/bdurrow)) +- (GH-64) Add variable `bind_query_log` to enable query logging (credit: [Angel Barrera](https://github.com/angelbarrera92)) + +## Changed + +- (GH-55) Fix issue with non-existing file when grepping domain (credit: [Tom Meinlschmidt](https://github.com/tmeinlschmidt)) +- (GH-57) Fix issue with forwarding in subdomain delegations (credit: [Stuart Knight](https://github.com/blofeldthefish)) +- (GH-66) Fix issue that causes playbook to fail when running in `--check` mode (credit: [Jörg Eichhorn](https://github.com/jeichhorn)) +- (GH-67) Improved documentation with minimal slave configuration (credit: [Christopher Hicks](https://github.com/chicks-net)) +- Add Ubuntu 18.04, Debian 8-9 and Arch Linux to list of supported distros. + +## 4.0.1 - 2018-05-21 + +### Changed + +- (GH-52) Move all zone specific configuration options to `bind_zones` (credit: [Stuart Knight](https://github.com/blofeldthefish)) + +## 4.0.0 - 2018-05-19 + +### Added + +- (GH-50) Add support for multiple zones (credit: [Stuart Knight](https://github.com/blofeldthefish)). **This is a breaking change,** as it changes the syntax for specifying zones. +- Allow out-of-zone name server records + +## 3.9.1 - 2018-04-22 + +## Changed + +- Allow multi-line `ansible_managed` comment (credit: [Fazle Arefin](https://github.com/fazlearefin)) +- Fix the atrocious implementation of (GH-35) +- Updated documentation for specifying hosts with multiple IP addresses +- Create serial as UTC UNIX time (credit: [David J. Haines](https://github.com/dhaines)) +- Fix bugs, linter and deprecation warnings + +## 3.9.0 - 2017-11-21 + +### Added + +- (GH-35) Role variable `bind_check_names`, which adds support for check-names (e.g. `check-names master ignore;`) +- (GH-36) Role variable `bind_allow_recursion`, which adds support for allow-recursion (credit: [Loic Dachary](https://github.com/dachary)) +- (GH-39) Role variable `bind_zone_delegate`, which adds support for zone delegation / NS records (credit: [Loic Dachary](https://github.com/dachary)) +- (GH-40) Role variables `bind_dnssec_enable` and `bind_dnssec_validation`, which makes DNSSEC validation configurable (credit: [Guillaume Darmont](https://github.com/gdarmont)). + +### Changed + +- (GH-38) Only append domain to MX if it does not end with a dot (credit: [Loic Dachary](https://github.com/dachary)) + +## 3.8.0 - 2017-07-12 + +This release adds support for multiple TXT entries and fixes some bugs. + +### Added + +- (GH-31) Support for multiple TXT entries for the same name (credit: [Rafael Bodill](https://github.com/rafi)) + +### Changed + +- (GH-31) Fixed ipv6 reverse zone hash calculation for complete idempotency (credit: [Stuart Knight](https://github.com/blofeldthefish)) +- (GH-32, GH-33) Fix for bug where CNAMEs and Multi-IP entries weren't working (credit: [Greg Cockburn](https://github.com/gergnz)) + +## 3.7.1 - 2017-07-03 + +### Changed + +- (GH-29) Zone files are fully idempotent, so are only changed when actual content changes (credit: [@Stuart Knight](https://github.com/blofeldthefish)) + +## 3.7.0 - 2017-06-01 + +### Added + +- (GH-10) Implement reverse IPv6 lookups +- (GH-28) Add option `bind_forwarders` and `bind_forward_only`, which allows BIND to be set up as a caching name server. + +## 3.6.1 - 2017-06-01 + +### Changed + +- Fixed a bug with generating the reverse zone names. + +## 3.6.0 - 2017-06-01 + +### Changed + +- (GH-25) Allow slave log file to be set with variable `bind_log` instead of a hard coded value (credit @kartone). +- The alignment of columns in the reverse zone file are improved + +### Added + +- (GH-22, 23) Documentation improvements +- (GH-27) Allow dynamic updates (credit: @bverschueren) + +### Removed + +- The custom filter plugins were removed. The functionality has since been added to Ansible's built-in filter plugins. This does require `python-netaddr` to be installed on the management node. + +## 3.5.2 - 2016-09-29 + +### Changed + +* The call to `named-checkconf` was fixed. It had the full path to the binary, which is not the same on all distributions. (GH-20, credit @peterjanes) + +## 3.5.1 - 2016-09-22 + +### Changed + +* The check for master/slave server is improved (GH-19, credit @josetaas) + +## 3.5.0 - 2016-07-28 + +### Added + +* Introduced role variable `bind_log`, the path to the log file. +* Introduced role variable `bind_zone_also_notify`, a list of servers that will receive a notification when the master zone file is reloaded (GH-18, credit: Joanna Delaporte) +* Reverse zone files now handle the case with only a single host (GH-18, credit: Joanna Delaporte) + +## 3.4.0 - 2016-05-26 + +### Added + +* (GH-16) Support for service record (SRV) lookups +* Support for text record (TXT) lookups + +### Changed + +* Fixed Ansible 2.0 deprecation warnings +* Generating a serial is no longer considered a change +* Ensured that all role variables have a default value, e.g. empty list instead of undefined. This simplifies template logic (no `if defined` tests), and is considered [deprecated in playbooks within a *with_* loop](https://docs.ansible.com/ansible/porting_guide_2.0.html#deprecated). + +## 3.3.1 - 2016-04-08 + +### Removed + +* The `version:` field in `meta/main.yml`. This an unofficial field that is used by a third-party tool for managing role dependencies (librarian-ansible). Custom meta fields are no longer accepted in Ansible 2.0. See [ansible/ansible#13496](https://github.com/ansible/ansible/issues/13496) for more info. Unfortunately, this will break support for librarian-ansible. As a workaround, until this issue is resolved upstream, use version 3.3.0 of this role. + +## 3.3.0 - 2016-04-08 + +### Added + +* Added role variable `bind_other_name_servers` for adding NS records for DNS servers outside of the domain. (GH-12) +* Re-added `bind_recursion`, as it is needed in some cases. (GH-14) + +### Removed + +## 3.2.1 - 2015-12-15 + +### Added + +* The domain name can now also point to an IP address, enabling e.g. "http://example.com/" (GH-11) + +## 3.2.0 - 2015-12-07 + +### Added + +* Add support for multiple IP addresses per host (GH-9) +* Allow setting `rrset-order` (for DNS round robin) +* Add support for (multiple) IPv6 (AAAA) records (GH-2). For now, only forward lookups are supported. + +### Changed + +* Test code is put into a separate branch. This means that test code is no longer included when installing the role from Ansible Galaxy. + +## 3.1.0 - 2015-12-04 + +### Added + +* Add support for zone transfers (GH-8) +* Check whether `bind_zone_master_server_ip` was set (GH-7) + +### Removed + +* Role variable `bind_recursion` was removed. This role is explicitly only suitable for an authoritative DNS server, and in this case, recursion should be off. + +## 3.0.0 - 2015-06-14 + +### Added + +* You can now set up a master and slave DNS server. +* The variable `bind_zone_master_server_ip` was added. This is a **required** variable, which makes this release not backwards compatible. +* Automated acceptance tests for the test playbook + +## 2.0.0 - 2015-06-10 + +### Added + +* Added EL6 to supported platforms. Thanks to @rilindo for verifying this. + +### Changed + +* Recursion is turned off by default, which fits an authoritative name server. This change is not backwards compatible, as the behaviour of BIND is different from before when you do not set the variable `bind_recursion` explicitly. + +### Removed + +* Firewall settings. This should not be a concern of this role. Configuring the firewall is functionality offered by other roles (e.g. [bertvv.bind](https://github.com/bertvv/ansible-role-el7)) + +## 1.0.0 - 2015-04-22 + +First release! + +### Added + +- Functionality for master DNS server +- Multiple reverse lookup zones + diff --git a/roles/bertvv.bind/LICENSE.md b/roles/bertvv.bind/LICENSE.md new file mode 100644 index 0000000..8411892 --- /dev/null +++ b/roles/bertvv.bind/LICENSE.md @@ -0,0 +1,13 @@ +# BSD License + +Copyright (c) 2014, Bert Van Vreckem, (bert.vanvreckem@gmail.com) + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/roles/bertvv.bind/README.md b/roles/bertvv.bind/README.md new file mode 100644 index 0000000..fd32d43 --- /dev/null +++ b/roles/bertvv.bind/README.md @@ -0,0 +1,317 @@ +# Ansible role `bind` + +[![Build Status](https://travis-ci.org/bertvv/ansible-role-bind.svg?branch=master)](https://travis-ci.org/bertvv/ansible-role-bind) + +An Ansible role for setting up BIND ISC as an **authoritative-only** DNS server for multiple domains. Specifically, the responsibilities of this role are to: + +- install BIND +- set up the main configuration file + - master server + - slave server +- set up forward and reverse lookup zone files + +This role supports multiple forward and reverse zones, including for IPv6. Although enabling recursion is supported (albeit *strongly* discouraged), consider using another role if you want to set up a caching or forwarding name server. + +Configuring the firewall is not a concern of this role, so you should do this using another role (e.g. [bertvv.rh-base](https://galaxy.ansible.com/bertvv/rh-base/)). + +If you like/use this role, please consider giving it a star and rating it on the role's [Ansible Galaxy page](https://galaxy.ansible.com/bertvv/bind). Thanks! + +See the [change log](CHANGELOG.md) for notable changes between versions. + +## Requirements + +- **The package `python-ipaddr` should be installed on the management node** (since v3.7.0) + +## Role Variables + +Variables are not required, unless specified. + +| Variable | Default | Comments (type) | +| :--- | :--- | :--- | +| `bind_acls` | `[]` | A list of ACL definitions, which are dicts with fields `name` and `match_list`. See below for an example. | +| `bind_allow_query` | `['localhost']` | A list of hosts that are allowed to query this DNS server. Set to ['any'] to allow all hosts | +| `bind_allow_recursion` | `['any']` | Similar to bind_allow_query, this option applies to recursive queries. | +| `bind_check_names` | `[]` | Check host names for compliance with RFC 952 and RFC 1123 and take the defined action (e.g. `warn`, `ignore`, `fail`). | +| `bind_dns_keys` | `[]` | A list of binding keys, which are dicts with fields `name` `algorithm` and `secret`. See below for an example. | +| `bind_dnssec_enable` | `true` | Is DNSSEC enabled | +| `bind_dnssec_validation` | `true` | Is DNSSEC validation enabled | +| `bind_extra_include_files` | `[]` | | +| `bind_forward_only` | `false` | If `true`, BIND is set up as a caching name server | +| `bind_forwarders` | `[]` | A list of name servers to forward DNS requests to. | +| `bind_listen_ipv4` | `['127.0.0.1']` | A list of the IPv4 address of the network interface(s) to listen on. Set to ['any'] to listen on all interfaces. | +| `bind_listen_ipv6` | `['::1']` | A list of the IPv6 address of the network interface(s) to listen on | +| `bind_log` | `data/named.run` | Path to the log file | +| `bind_other_logs` | - | A list of logging channels to configure, with a separate dict for each domain, with relevant details | +| `- allow_update` | `['none']` | A list of hosts that are allowed to dynamically update this DNS zone. | +| `- also_notify` | - | A list of servers that will receive a notification when the master zone file is reloaded. | +| `- delegate` | `[]` | Zone delegation. See below this table for examples. | +| `bind_query_log` | - | A dict with fields `file` (e.g. `data/query.log`), `versions`, `size`, when defined this will turn on the query log | +| `bind_recursion` | `false` | Determines whether requests for which the DNS server is not authoritative should be forwarded†. | +| `bind_rrset_order` | `random` | Defines order for DNS round robin (either `random` or `cyclic`) | +| `bind_statistcs_channels` | `false` | if `true`, BIND is configured with a statistics_channels clause (currently only supports a single inet) | +| `bind_zone_dir` | - | When defined, sets a custom absolute path to the server directory (for zone files, etc.) instead of the default. | +| `bind_zone_domains` | n/a | A list of domains to configure, with a separate dict for each domain, with relevant details | +| `- allow_update` | `['none']` | A list of hosts that are allowed to dynamically update this DNS zone. | +| `- also_notify` | - | A list of servers that will receive a notification when the master zone file is reloaded. | +| `- create_forward_zones` | - | When initialized and set to `false`, creation of forward zones will be skipped (resulting in a reverse only zone) | +| `- create_reverse_zones` | - | When initialized and set to `false`, creation of reverse zones will be skipped (resulting in a forward only zone) | +| `- delegate` | `[]` | Zone delegation. See below this table for examples. | +| `- hostmaster_email` | `hostmaster` | The e-mail address of the system administrator for the zone | +| `- hosts` | `[]` | Host definitions. See below this table for examples. | +| `- ipv6_networks` | `[]` | A list of the IPv6 networks that are part of the domain, in CIDR notation (e.g. 2001:db8::/48) | +| `- mail_servers` | `[]` | A list of dicts (with fields `name` and `preference`) specifying the mail servers for this domain. | +| `- name_servers` | `[ansible_hostname]` | A list of the DNS servers for this domain. | +| `- name` | `example.com` | The domain name | +| `- networks` | `['10.0.2']` | A list of the networks that are part of the domain | +| `- other_name_servers` | `[]` | A list of the DNS servers outside of this domain. | +| `- services` | `[]` | A list of services to be advertised by SRV records | +| `- text` | `[]` | A list of dicts with fields `name` and `text`, specifying TXT records. `text` can be a list or string. | +| `- naptr` | `[]` | A list of dicts with fields `name`, `order`, `pref`, `flags`, `service`, `regex` and `replacement` specifying NAPTR records. | +| `bind_zone_file_mode` | 0640 | The file permissions for the main config file (named.conf) | +| `bind_zone_master_server_ip` | - | **(Required)** The IP address of the master DNS server. | +| `bind_zone_minimum_ttl` | `1D` | Minimum TTL field in the SOA record. | +| `bind_zone_time_to_expire` | `1W` | Time to expire field in the SOA record. | +| `bind_zone_time_to_refresh` | `1D` | Time to refresh field in the SOA record. | +| `bind_zone_time_to_retry` | `1H` | Time to retry field in the SOA record. | +| `bind_zone_ttl` | `1W` | Time to Live field in the SOA record. | + +† Best practice for an authoritative name server is to leave recursion turned off. However, [for some cases](http://www.zytrax.com/books/dns/ch7/queries.html#allow-query-cache) it may be necessary to have recursion turned on. + +### Minimal variables for a working zone + +Even though only variable `bind_zone_master_server_ip` is required for the role to run without errors, this is not sufficient to get a working zone. In order to set up an authoritative name server that is available to clients, you should also at least define the following variables: + +| Variable | Master | Slave | +| :--- | :---: | :---: | +| `bind_zone_domains` | V | V | +| `- name` | V | V | +| `- networks` | V | V | +| `- name_servers` | V | -- | +| `- hosts` | V | -- | +| `bind_listen_ipv4` | V | V | +| `bind_allow_query` | V | V | + +### Domain definitions + +```Yaml +bind_zone_domains: + - name: mydomain.com # Domain name + create_reverse_zones: false # Skip creation of reverse zones + hosts: + - name: pub01 + ip: 192.0.2.1 + ipv6: 2001:db8::1 + aliases: + - ns + - name: '@' # Enables "http://mydomain.com/" + ip: + - 192.0.2.2 # Multiple IP addresses for a single host + - 192.0.2.3 # results in DNS round robin + sshfp: # Secure shell fingerprint + - "3 1 1262006f9a45bb36b1aa14f45f354b694b77d7c3" + - "3 2 e5921564252fe10d2dbafeb243733ed8b1d165b8fa6d5a0e29198e5793f0623b" + ipv6: + - 2001:db8::2 + - 2001:db8::3 + aliases: + - www + - name: priv01 # This IP is in another subnet, will result in + ip: 10.0.0.1 # multiple reverse zones + - name: mydomain.net. + aliases: + - name: sub01 + type: DNAME # Example of a DNAME alias record + networks: + - '192.0.2' + - '10' + - '172.16' + delegate: + - zone: foo + dns: 192.0.2.1 + services: + - name: _ldap._tcp + weight: 100 + port: 88 + target: dc001 + naptr: # Name Authority Pointer record, used for IP + - name: "sip" # telephony + order: 100 + pref: 10 + flags: "S" + service: "SIP+D2T" + regex: "!^.*$!sip:customer-service@example.com!" + replacement: "_sip._tcp.example.com." +``` + +### Minimal slave configuration + +```Yaml + bind_listen_ipv4: ['any'] + bind_allow_query: ['any'] + bind_zone_master_server_ip: 192.168.111.222 + bind_zone_domains: + - name: example.com +``` + +### Hosts + +Host names that this DNS server should resolve can be specified in `hosts` as a list of dicts with fields `name`, `ip`, `aliases` and `sshfp`. Aliases can be CNAME (default) or DNAME records. + +To allow to surf to `http://example.com/`, set the host name of your web server to `'@'` (must be quoted!). In BIND syntax, `@` indicates the domain name itself. + +If you want to specify multiple IP addresses for a host, add entries to `bind_zone_hosts` with the same name (e.g. `priv01` in the code snippet). This results in multiple A/AAAA records for that host and allows [DNS round robin](http://www.zytrax.com/books/dns/ch9/rr.html), a simple load balancing technique. The order in which the IP addresses are returned can be configured with role variable `bind_rrset_order`. + +### Networks + +As you can see, not all hosts are in the same network. This is perfectly acceptable, and supported by this role. All networks should be specified in `networks` (part of bind_zone_domains.name dict), though, or the host will not get a PTR record for reverse lookup: + +Remark that only the network part should be specified here! When specifying a class B IP address (e.g. "172.16") in a variable file, it must be quoted. Otherwise, the Yaml parser will interpret it as a float. + +Based on the idea and examples detailed at for the gdnsd package, the zonefiles are fully idempotent, and thus only get updated if "real" content changes. + +### Zone delgation + +To delegate a zone to a DNS, it is enough to create a `NS` record (under delegate) which is the equivalent of: + +```text +foo IN NS 192.0.2.1 +``` + +### Service records + +Service (SRV) records can be added with the services. This should be a list of dicts with mandatory fields `name` (service name), `target` (host providing the service), `port` (TCP/UDP port of the service) and optional fields `priority` (default = 0) and `weight` (default = 0). + +### ACLs + +ACLs can be defined like this: + +```Yaml +bind_acls: + - name: acl1 + match_list: + - 192.0.2.0/24 + - 10.0.0.0/8 +``` + +The names of the ACLs will be added to the `allow-transfer` clause in global options. + +### Binding Keys + +Binding keys can be defined like this: + +```Yaml +bind_dns_keys: + - name: master_key + algorithm: hmac-sha256 + secret: "azertyAZERTY123456" +bind_extra_include_files: + - "{{ bind_auth_file }}" +``` + +**tip**: Extra include file must be set as an ansible variable because file is OS dependant + +This will be set in a file *"{{ bind_auth_file }}* (e.g. /etc/bind/auth_transfer.conf for debian) which have to be added in the list variable **bind_extra_include_files** + +## Dependencies + +No dependencies. + +## Example Playbook + +See the test playbook [converge.yml](molecule/default/converge.yml) for an elaborate example that showcases most features. + +## Testing + +This role is tested using [Ansible Molecule](https://molecule.readthedocs.io/). Tests are launched automatically on [Travis CI](https://travis-ci.org/bertvv/ansible-role-bind) after each commit and PR. + +This Molecule configuration will: + +- Run Yamllint and Ansible Lint +- Create two Docker containers, one primary (`ns1`) and one secondary (`ns2`) DNS server +- Run a syntax check +- Apply the role with a [test playbook](molecule/default/converge.yml) +- Run acceptance tests with [BATS](https://github.com/bats-core/bats-core/) + +This process is repeated for the supported Linux distributions. + +### Local test environment + +If you want to set up a local test environment, you can use this reproducible setup based on Vagrant+VirtualBox: . Steps to install the necessary tools manually: + +1. Docker and BATS should be installed on your machine (assumed to run Linux). No Docker containers should be running when you start the test. +2. As recommended by Molecule, create a python virtual environment +3. Install the software tools `python3 -m pip install molecule docker netaddr yamllint ansible-lint` +4. Navigate to the root of the role directory and run `molecule test` + +Molecule automatically deletes the containers after a test. If you would like to check out the containers yourself, run `molecule converge` followed by `molecule login --host HOSTNAME`. + +The Docker containers are based on images created by [Jeff Geerling](https://hub.docker.com/u/geerlingguy), specifically for Ansible testing (look for images named `geerlingguy/docker-DISTRO-ansible`). You can use any of his images, but only the distributions mentioned in [meta/main.yml](meta/main.yml) are supported. + +The default config will start two Centos 7 containers (the primary supported platform at this time). Choose another distro by setting the `MOLECULE_DISTRO` variable with the command, e.g.: + +``` bash +MOLECULE_DISTRO=debian9 molecule test +``` + +or + +``` bash +MOLECULE_DISTRO=debian9 molecule converge +``` + +You can run the acceptance tests on both servers with `molecule verify` or manually with + +```console +SUT_IP=172.17.0.2 bats molecule/default/files/dns.bats +``` + +You need to initialise the variable `SUT_IP`, the system under test's IP address. The primary server, `ns1`, should have IP address 172.17.0.2 and the secondary server, `ns2` 172.17.0.3. + +## License + +BSD + +## Contributors + +This role could only have been realized thanks to the contributions of many. If you have an idea to improve it even further, don't hesitate to pitch in! + +Issues, feature requests, ideas, suggestions, etc. can be posted in the Issues section. + +Pull requests are also very welcome. Please create a topic branch for your proposed changes. If you don't, this will create conflicts in your fork after the merge. Don't hesitate to add yourself to the contributor list below in your PR! + +Maintainers: + +- [Bert Van Vreckem](https://github.com/bertvv/) +- [Stuart Knight](https://github.com/blofeldthefish) + +Contributors: + +- [Aido](https://github.com/aido) +- [Angel Barrera](https://github.com/angelbarrera92) +- [B. Verschueren](https://github.com/bverschueren) +- [Boris Momčilović](https://github.com/kornrunner) +- [Brad Durrow](https://github.com/bdurrow) +- [Christopher Hicks](http://www.chicks.net/) +- [David J. Haines](https://github.com/dhaines) +- [Fabio Rocha](https://github.com/frock81) +- [Fazle Arefin](https://github.com/fazlearefin) +- [Greg Cockburn](https://github.com/gergnz) +- [Guillaume Darmont](https://github.com/gdarmont) +- [jadjay](https://github.com/jadjay) +- [Jascha Sticher](https://github.com/itbane) +- [Joanna Delaporte](https://github.com/jdelaporte) +- [Jörg Eichhorn](https://github.com/jeichhorn) +- [Jose Taas](https://github.com/josetaas) +- [Lennart Weller](https://github.com/lhw) +- [Loic Dachary](http://dachary.org) +- [Mario Ciccarelli](https://github.com/kartone) +- [Otto Sabart](https://github.com/seberm) +- [Paulius Mazeika](https://github.com/pauliusm) +- [Paulo E. Castro](https://github.com/pecastro) +- [Peter Janes](https://github.com/peterjanes) +- [psa](https://github.com/psa) +- [Rafael Bodill](https://github.com/rafi) +- [Rayford Johnson](https://github.com/rayfordj) +- [Robin Ophalvens](https://github.com/RobinOphalvens) +- [Romuald](https://github.com/rds13) +- [Tom Meinlschmidt](https://github.com/tmeinlschmidt) diff --git a/roles/bertvv.bind/defaults/main.yml b/roles/bertvv.bind/defaults/main.yml new file mode 100644 index 0000000..ac6aafc --- /dev/null +++ b/roles/bertvv.bind/defaults/main.yml @@ -0,0 +1,70 @@ +# roles/bind/defaults/main.yml +--- + +bind_log: "data/named.run" + +bind_zone_domains: + - name: "example.com" + hostmaster_email: "hostmaster" + networks: + - "10.0.2" + +# List of acls. +bind_acls: [] + +# Key binding for slaves +bind_dns_keys: [] +# - name: master_key +# algorithm: hmac-sha256 +# secret: "azertyAZERTY123456" + +# List of IPv4 address of the network interface(s) to listen on. Set to "any" +# to listen on all interfaces +bind_listen_ipv4: + - "127.0.0.1" + +# List of IPv6 address of the network interface(s) to listen on. +bind_listen_ipv6: + - "::1" + +# List of hosts that are allowed to query this DNS server. +bind_allow_query: + - "localhost" + +# Determines whether recursion should be allowed. Typically, an authoritative +# name server should have recursion turned OFF. +bind_recursion: false +bind_allow_recursion: + - "any" + +# Allows BIND to be set up as a caching name server +bind_forward_only: false + +# List of name servers to forward DNS requests to. +bind_forwarders: [] + +# DNS round robin order (random or cyclic) +bind_rrset_order: "random" + +# statistics channels configuration +bind_statistics_channels: false +bind_statistics_port: 8053 +bind_statistics_host: 127.0.0.1 +bind_statistics_allow: + - "127.0.0.1" + +# DNSSEC configuration +bind_dnssec_enable: true +bind_dnssec_validation: true + +bind_extra_include_files: [] + +# SOA information +bind_zone_ttl: "1W" +bind_zone_time_to_refresh: "1D" +bind_zone_time_to_retry: "1H" +bind_zone_time_to_expire: "1W" +bind_zone_minimum_ttl: "1D" + +# File mode for master zone files (needs to be something like 0660 for dynamic updates) +bind_zone_file_mode: "0640" diff --git a/roles/bertvv.bind/handlers/main.yml b/roles/bertvv.bind/handlers/main.yml new file mode 100644 index 0000000..9acaaad --- /dev/null +++ b/roles/bertvv.bind/handlers/main.yml @@ -0,0 +1,7 @@ +# roles/bind/handlers/main.yml +--- + +- name: reload bind + service: + name: "{{ bind_service }}" + state: reloaded diff --git a/roles/bertvv.bind/meta/.galaxy_install_info b/roles/bertvv.bind/meta/.galaxy_install_info new file mode 100644 index 0000000..58fa52d --- /dev/null +++ b/roles/bertvv.bind/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Sun Jun 28 14:49:10 2020 +version: v4.2.0 diff --git a/roles/bertvv.bind/meta/main.yml b/roles/bertvv.bind/meta/main.yml new file mode 100644 index 0000000..abfa20f --- /dev/null +++ b/roles/bertvv.bind/meta/main.yml @@ -0,0 +1,34 @@ +--- +galaxy_info: + author: Bert Van Vreckem + description: > + Sets up ISC BIND as an authoritative DNS server for one or more domains + (primary and/or secondary). + license: BSD + min_ansible_version: 2.7 + platforms: + - name: ArchLinux + versions: + - any + - name: Debian + versions: + - jessie + - stretch + - buster + - name: FreeBSD + versions: + - any + - name: EL + versions: + - 7 + - 8 + - name: Ubuntu + versions: + - xenial + - bionic + - focal + galaxy_tags: + - dns + - networking + - system +dependencies: [] diff --git a/roles/bertvv.bind/molecule/default/converge.yml b/roles/bertvv.bind/molecule/default/converge.yml new file mode 100644 index 0000000..dcc118a --- /dev/null +++ b/roles/bertvv.bind/molecule/default/converge.yml @@ -0,0 +1,117 @@ +--- +- name: Converge + hosts: all + vars: + bind_zone_dir: /var/local/named-zones + bind_zone_file_mode: '0660' + bind_allow_query: + - any + bind_listen_ipv4: + - any + bind_listen_ipv6: + - any + bind_acls: + - name: acl1 + match_list: + - 172.17.0.0/16 + bind_forwarders: + - '8.8.8.8' + - '8.8.4.4' + bind_recursion: true + bind_query_log: 'data/query.log' + bind_check_names: 'master ignore' + bind_zone_master_server_ip: 172.17.0.2 + bind_zone_minimum_ttl: "2D" + bind_zone_ttl: "2W" + bind_zone_time_to_refresh: "2D" + bind_zone_time_to_retry: "2H" + bind_zone_time_to_expire: "2W" + bind_zone_domains: + - name: 'example.com' + networks: + - '192.0.2' + ipv6_networks: + - '2001:db9::/48' + name_servers: + - ns1.acme-inc.com. + - ns2.acme-inc.com. + hostmaster_email: admin + hosts: + - name: srv001 + ip: 192.0.2.1 + ipv6: '2001:db9::1' + aliases: + - www + - name: srv002 + ip: 192.0.2.2 + ipv6: '2001:db9::2' + - name: mail001 + ip: 192.0.2.10 + ipv6: '2001:db9::3' + mail_servers: + - name: mail001 + preference: 10 + - name: 'acme-inc.com' + networks: + - '172.17' + - '10' + ipv6_networks: + - '2001:db8::/48' + name_servers: + - ns1 + - ns2 + hosts: + - name: ns1 + ip: 172.17.0.2 + - name: ns2 + ip: 172.17.0.3 + - name: srv001 + ip: 172.17.1.1 + ipv6: 2001:db8::1 + aliases: + - www + - name: srv002 + ip: 172.17.1.2 + ipv6: 2001:db8::2 + aliases: + - mysql + - name: mail001 + ip: 172.17.2.1 + ipv6: 2001:db8::d:1 + aliases: + - smtp + - mail-in + - name: mail002 + ip: 172.17.2.2 + ipv6: 2001:db8::d:2 + - name: mail003 + ip: 172.17.2.3 + ipv6: 2001:db8::d:3 + aliases: + - imap + - mail-out + - name: srv010 + ip: 10.0.0.10 + - name: srv011 + ip: 10.0.0.11 + - name: srv012 + ip: 10.0.0.12 + mail_servers: + - name: mail001 + preference: 10 + - name: mail002 + preference: 20 + services: + - name: _ldap._tcp + weight: 100 + port: 88 + target: srv010 + text: + - name: _kerberos + text: KERBEROS.ACME-INC.COM + - name: '@' + text: + - 'some text' + - 'more text' + roles: + - role: bertvv.bind diff --git a/roles/bertvv.bind/molecule/default/files/dns.bats b/roles/bertvv.bind/molecule/default/files/dns.bats new file mode 100644 index 0000000..7c98ff4 --- /dev/null +++ b/roles/bertvv.bind/molecule/default/files/dns.bats @@ -0,0 +1,263 @@ +#! /usr/bin/env bats +# +# Functional tests for a DNS server set up as a test case for Ansible role +# bertvv.bind +# +# The variable SUT_IP, the IP address of the System Under Test must be set +# outside of the script. + +#{{{ Helper functions + +# Usage: assert_forward_lookup NAME DOMAIN IP +# Exits with status 0 if NAME.DOMAIN resolves to IP, a nonzero +# status otherwise +assert_forward_lookup() { + local name="$1" + local domain="$2" + local ip="$3" + + local result + result=$(dig @"${SUT_IP}" "${name}.${domain}" +short) + + echo "Expected: ${ip}" + echo "Actual : ${result}" + [ "${ip}" = "${result}" ] +} + +# Usage: assert_forward_ipv6_lookup NAME DOMAIN IP +assert_forward_ipv6_lookup() { + local name="${1}" + local domain="${2}" + local ip="${3}" + + local result + result=$(dig @"${SUT_IP}" AAAA "${name}.${domain}" +short) + + echo "Expected: ${ip}" + echo "Actual : ${result}" + [ "${ip}" = "${result}" ] +} + +# Usage: assert_reverse_lookup NAME DOMAIN IP +# Exits with status 0 if a reverse lookup on IP resolves to NAME, +# a nonzero status otherwise +assert_reverse_lookup() { + local name="$1" + local domain="$2" + local ip="$3" + + local expected="${name}.${domain}." + local result + result=$(dig @"${SUT_IP}" -x "${ip}" +short) + + echo "Expected: ${expected}" + echo "Actual : ${result}" + [ "${expected}" = "${result}" ] +} + +# Usage: assert_alias_lookup ALIAS NAME DOMAIN IP +# Exits with status 0 if a forward lookup on NAME resolves to the +# host name NAME.DOMAIN and to IP, a nonzero status otherwise +assert_alias_lookup() { + local alias="$1" + local name="$2" + local domain="$3" + local ip="$4" + local result + result=$(dig @"${SUT_IP}" "${alias}.${domain}" +short) + + grep "${name}\\.${domain}\\." <<< "${result}" + grep "${ip}" <<< "${result}" +} + +# Usage: assert_ns_lookup DOMAIN NS_NAME... +# Exits with status 0 if all specified host names occur in the list of +# name servers for the domain. +assert_ns_lookup() { + local domain="${1}" + shift + local result + result=$(dig @"${SUT_IP}" "${domain}" NS +short) + + [ -n "${result}" ] # the list of name servers should not be empty + while (( "$#" )); do + grep "$1\\." <<< "${result}" + shift + done +} + +# Usage: assert_mx_lookup DOMAIN PREF1 NAME1 PREF2 NAME2... +# e.g. assert_mx_lookup example.com 10 mailsrv1 20 mailsrv2 +# Exits with status 0 if all specified host names occur in the list of +# mail servers for the domain. +assert_mx_lookup() { + local domain="${1}" + shift + local result + result=$(dig @"${SUT_IP}" "${domain}" MX +short) + + [ -n "${result}" ] # the list of name servers should not be empty + while (( "$#" )); do + grep "$1 $2\\.${domain}\\." <<< "${result}" + shift + shift + done +} + +# Usage: assert_srv_lookup DOMAIN SERVICE WEIGHT PORT TARGET +# e.g. assert_srv_lookup example.com _ldap._tcp 0 100 88 ldapsrv +assert_srv_lookup() { + local domain="${1}" + shift + local service="${1}" + shift + local expected="${*}.${domain}." + local result + result=$(dig @"${SUT_IP}" SRV "${service}.${domain}" +short) + + echo "expected: ${expected}" + echo "actual : ${result}" + [ "${result}" = "${expected}" ] +} + +# Perform a TXT record lookup +# Usage: assert_txt_lookup NAME TEXT... +# e.g. assert_txt_lookup _kerberos.example.com KERBEROS.EXAMPLE.COM +assert_txt_lookup() { + local name="$1" + shift + local result + result=$(dig @"${SUT_IP}" TXT "${name}" +short) + + echo "expected: ${*}" + echo "actual : ${result}" + while [ "$#" -ne "0" ]; do + grep "${1}" <<< "${result}" + shift + done +} + + +#}}} + +@test "Forward lookups acme-inc.com" { + # host name domain IP + assert_forward_lookup ns1 acme-inc.com 172.17.0.2 + assert_forward_lookup ns2 acme-inc.com 172.17.0.3 + assert_forward_lookup srv001 acme-inc.com 172.17.1.1 + assert_forward_lookup srv002 acme-inc.com 172.17.1.2 + assert_forward_lookup mail001 acme-inc.com 172.17.2.1 + assert_forward_lookup mail002 acme-inc.com 172.17.2.2 + assert_forward_lookup mail003 acme-inc.com 172.17.2.3 + assert_forward_lookup srv010 acme-inc.com 10.0.0.10 + assert_forward_lookup srv011 acme-inc.com 10.0.0.11 + assert_forward_lookup srv012 acme-inc.com 10.0.0.12 +} + +@test "Reverse lookups acme-inc.com" { + # host name domain IP + assert_reverse_lookup ns1 acme-inc.com 172.17.0.2 + assert_reverse_lookup ns2 acme-inc.com 172.17.0.3 + assert_reverse_lookup srv001 acme-inc.com 172.17.1.1 + assert_reverse_lookup srv002 acme-inc.com 172.17.1.2 + assert_reverse_lookup mail001 acme-inc.com 172.17.2.1 + assert_reverse_lookup mail002 acme-inc.com 172.17.2.2 + assert_reverse_lookup mail003 acme-inc.com 172.17.2.3 + assert_reverse_lookup srv010 acme-inc.com 10.0.0.10 + assert_reverse_lookup srv011 acme-inc.com 10.0.0.11 + assert_reverse_lookup srv012 acme-inc.com 10.0.0.12 +} + +@test "Alias lookups acme-inc.com" { + # alias hostname domain IP + assert_alias_lookup www srv001 acme-inc.com 172.17.1.1 + assert_alias_lookup mysql srv002 acme-inc.com 172.17.1.2 + assert_alias_lookup smtp mail001 acme-inc.com 172.17.2.1 + assert_alias_lookup mail-in mail001 acme-inc.com 172.17.2.1 + assert_alias_lookup imap mail003 acme-inc.com 172.17.2.3 + assert_alias_lookup mail-out mail003 acme-inc.com 172.17.2.3 + +} + +@test "IPv6 forward lookups acme-inc.com" { + # hostname domain IPv6 + assert_forward_ipv6_lookup srv001 acme-inc.com 2001:db8::1 + assert_forward_ipv6_lookup srv002 acme-inc.com 2001:db8::2 + assert_forward_ipv6_lookup mail001 acme-inc.com 2001:db8::d:1 + assert_forward_ipv6_lookup mail002 acme-inc.com 2001:db8::d:2 + assert_forward_ipv6_lookup mail003 acme-inc.com 2001:db8::d:3 +} + +@test "IPv6 reverse lookups acme-inc.com" { + # hostname domain IPv6 + assert_forward_ipv6_lookup srv001 acme-inc.com 2001:db8::1 + assert_forward_ipv6_lookup srv002 acme-inc.com 2001:db8::2 + assert_forward_ipv6_lookup mail001 acme-inc.com 2001:db8::d:1 + assert_forward_ipv6_lookup mail002 acme-inc.com 2001:db8::d:2 + assert_forward_ipv6_lookup mail003 acme-inc.com 2001:db8::d:3 +} + +@test "NS record lookup acme-inc.com" { + assert_ns_lookup acme-inc.com \ + ns1.acme-inc.com \ + ns2.acme-inc.com +} + +@test "Mail server lookup acme-inc.com" { + assert_mx_lookup acme-inc.com \ + 10 mail001 \ + 20 mail002 +} + +@test "Service record lookup acme-inc.com" { + assert_srv_lookup acme-inc.com _ldap._tcp 0 100 88 srv010 +} + +@test "TXT record lookup acme-inc.com" { + assert_txt_lookup _kerberos.acme-inc.com KERBEROS.ACME-INC.COM + assert_txt_lookup acme-inc.com "some text" "more text" +} + +# Tests for domain example.com + + +@test "Forward lookups example.com" { + # host name domain IP + assert_forward_lookup srv001 example.com 192.0.2.1 + assert_forward_lookup srv002 example.com 192.0.2.2 + assert_forward_lookup mail001 example.com 192.0.2.10 +} + +@test "Reverse lookups example.com" { + # host name domain IP + assert_reverse_lookup srv001 example.com 192.0.2.1 + assert_reverse_lookup srv002 example.com 192.0.2.2 + assert_reverse_lookup mail001 example.com 192.0.2.10 +} + +@test "Alias lookups example.com" { + # alias hostname domain IP + assert_alias_lookup www srv001 example.com 192.0.2.1 +} + +@test "IPv6 forward lookups example.com" { + # hostname domain IPv6 + assert_forward_ipv6_lookup srv001 example.com 2001:db9::1 +} + +@test "IPv6 reverse lookups example.com" { + # hostname domain IPv6 + assert_reverse_lookup srv001 example.com 2001:db9::1 +} + +@test "NS record lookup example.com" { + assert_ns_lookup example.com \ + ns1.acme-inc.com \ + ns2.acme-inc.com +} + +@test "Mail server lookup example.com" { + assert_mx_lookup example.com \ + 10 mail001 +} + diff --git a/roles/bertvv.bind/molecule/default/files/functional-tests.sh b/roles/bertvv.bind/molecule/default/files/functional-tests.sh new file mode 100755 index 0000000..301ee34 --- /dev/null +++ b/roles/bertvv.bind/molecule/default/files/functional-tests.sh @@ -0,0 +1,117 @@ +#! /usr/bin/env bash +# +# Author: Bert Van Vreckem +# +# Run BATS test files in the current directory, and the ones in the subdirectory +# matching the host name. +# +# The script installs BATS if needed. It's best to put ${bats_install_dir} in +# your .gitignore. + +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable + +#{{{ Variables + +test_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +bats_archive="v0.4.0.tar.gz" +bats_url="https://github.com/sstephenson/bats/archive/${bats_archive}" +bats_install_dir="/opt" +bats_default_location="${bats_install_dir}/bats/libexec/bats" +test_file_pattern="*.bats" + +# Color definitions +readonly reset='\e[0m' +readonly black='\e[0;30m' +readonly red='\e[0;31m' +readonly green='\e[0;32m' +readonly yellow='\e[0;33m' +readonly blue='\e[0;34m' +readonly purple='\e[0;35m' +readonly cyan='\e[0;36m' +readonly white='\e[0;37m' +#}}} + +main() { + + bats=$(find_bats_executable) + + if [ -z "${bats}" ]; then + install_bats + bats="${bats_default_location}" + fi + + debug "Using BATS executable at: ${bats}" + + # List all test cases (i.e. files in the test dir matching the test file + # pattern) + + # Tests to be run on all hosts + global_tests=$(find_tests "${test_dir}" 1) + + # Tests for individual hosts + host_tests=$(find_tests "${test_dir}/${HOSTNAME}") + + # Loop over test files + for test_case in ${global_tests} ${host_tests}; do + info "Running test ${test_case}" + ${bats} "${test_case}" + done +} + +#{{{ Functions + +# Tries to find BATS executable in the PATH or the place where this script +# installs it. +find_bats_executable() { + if which bats > /dev/null; then + which bats + elif [ -x "${bats_default_location}" ]; then + echo "${bats_default_location}" + else + echo "" + fi +} + +# Usage: install_bats +install_bats() { + pushd "${bats_install_dir}" > /dev/null 2>&1 + curl --location --remote-name "${bats_url}" + tar xzf "${bats_archive}" + mv bats-* bats + rm "${bats_archive}" + popd > /dev/null 2>&1 +} + +# Usage: find_tests DIR [MAX_DEPTH] +# +# Finds BATS test suites in the specified directory +find_tests() { + local max_depth="" + if [ "$#" -eq "2" ]; then + max_depth="-maxdepth $2" + fi + + local tests + tests=$(find "$1" ${max_depth} -type f -name "${test_file_pattern}" -printf '%p\n' 2> /dev/null) + + echo "${tests}" +} + +# Usage: info [ARG]... +# +# Prints all arguments on the standard output stream +info() { + printf "${yellow}### %s${reset}\n" "${*}" +} + +# Usage: debug [ARG]... +# +# Prints all arguments on the standard output stream +debug() { + printf "${cyan}### %s${reset}\n" "${*}" +} +#}}} + +main diff --git a/roles/bertvv.bind/molecule/default/molecule.yml b/roles/bertvv.bind/molecule/default/molecule.yml new file mode 100644 index 0000000..e9f92eb --- /dev/null +++ b/roles/bertvv.bind/molecule/default/molecule.yml @@ -0,0 +1,53 @@ +--- +dependency: + name: galaxy + +driver: + # Specifies the driver that should be used. Podman should also work + name: docker + +# Linting with yamllint and ansible-lint +# verify.yml is skipped because it uses the shell: module, which would trigger +# a linting error. +lint: | + yamllint . + ansible-lint --exclude=molecule/default/verify.yml + +platforms: + # Set name and hostname + - name: ns1 + hostname: ns1 + # Specify which image should be used. Geerlingguys images are Ansible + # compatible and have Systemd installed + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest" + # Command to execute when the container starts + command: ${MOLECULE_DOCKER_COMMAND:-""} + # Volumes to mount within the container. Important to enable systemd + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + # Give extended privileges to the container. Necessary for Systemd to + # operate within the container. DO NOT use extended privileges in a + # production environment! + privileged: true + # Allocate pseudo-TTY + tty: true + environment: + container: docker + + - name: ns2 + hostname: ns2 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + privileged: true + tty: true + environment: + container: docker + +provisioner: + name: ansible + +# Runs the verify.yml playbook +verifier: + name: ansible diff --git a/roles/bertvv.bind/molecule/default/verify.yml b/roles/bertvv.bind/molecule/default/verify.yml new file mode 100644 index 0000000..317556e --- /dev/null +++ b/roles/bertvv.bind/molecule/default/verify.yml @@ -0,0 +1,9 @@ +--- +- name: Verify + hosts: all + tasks: + # We run the BATS tests from the localhost, since they are black box tests + - name: "Run BATS tests for {{ ansible_hostname }}" + shell: SUT_IP={{ ansible_default_ipv4.address }} bats {{ playbook_dir }}/files/dns.bats + delegate_to: localhost + changed_when: false diff --git a/roles/bertvv.bind/tasks/main.yml b/roles/bertvv.bind/tasks/main.yml new file mode 100644 index 0000000..7d2ffb9 --- /dev/null +++ b/roles/bertvv.bind/tasks/main.yml @@ -0,0 +1,69 @@ +# roles/bind/tasks/main.yml +--- + +# Initialise distribution-specific variables +- name: Source specific variables + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution }}.yml" + - "{{ ansible_os_family }}.yml" + tags: bind,pretask + +- name: Check whether `bind_zone_master_server_ip` was set + assert: + that: bind_zone_master_server_ip is defined + +- name: Install BIND + package: + pkg: "{{ item }}" + state: present + with_items: + - "{{ bind_packages }}" + tags: bind + +- name: Ensure runtime directories referenced in config exist + file: + path: "{{ item }}" + state: directory + owner: root + group: "{{ bind_group }}" + mode: 0770 + with_items: + - "{{ bind_dir }}/dynamic" + - "{{ bind_dir }}/data" + - "{{ bind_zone_dir }}" + tags: bind + +- name: Create serial, based on UTC UNIX time + command: date -u +%s + register: timestamp + changed_when: false + run_once: true + check_mode: false + tags: bind + +# file to set keys for XFR authentication +- name: create extra config file for authenticated XFR request + tags: pretask + template: + src: auth_transfer.j2 + dest: "{{ bind_conf_dir }}/{{ auth_file }}" + mode: 0640 + owner: root + group: "{{ bind_group }}" + when: bind_dns_keys is defined and bind_dns_keys|length > 0 + +- name: Set up the machine as a master DNS server + include_tasks: master.yml + when: bind_zone_master_server_ip in ansible_all_ipv4_addresses + +- name: Set up the machine as a slave DNS server + include_tasks: slave.yml + when: bind_zone_master_server_ip not in ansible_all_ipv4_addresses + +- name: Start BIND service + service: + name: "{{ bind_service }}" + state: started + enabled: true + tags: bind diff --git a/roles/bertvv.bind/tasks/master.yml b/roles/bertvv.bind/tasks/master.yml new file mode 100644 index 0000000..3188a39 --- /dev/null +++ b/roles/bertvv.bind/tasks/master.yml @@ -0,0 +1,140 @@ +# roles/bind/tasks/master.yml +# Set up a BIND master server +--- + +- name: Read forward zone hashes + shell: 'grep -s "^; Hash:" {{ bind_zone_dir }}/{{ item.name }} || true' + changed_when: false + check_mode: false + register: forward_hashes_temp + with_items: + - "{{ bind_zone_domains }}" + run_once: true + loop_control: + label: "{{ item.name }}" + +- name: create dict of forward hashes + set_fact: + forward_hashes: "{{ forward_hashes|default([]) + [ {'hash': item.stdout|default(), 'name': item.item.name} ] }}" + with_items: + - "{{ forward_hashes_temp.results }}" + run_once: true + loop_control: + label: "{{ item.item.name }}" + +- name: Read reverse ipv4 zone hashes + shell: "grep -s \"^; Hash:\" {{ bind_zone_dir }}/{{ ('.'.join(item.1.replace(item.1+'.','').split('.')[::-1])) }}.in-addr.arpa || true" + changed_when: false + check_mode: false + register: reverse_hashes_temp + with_subelements: + - "{{ bind_zone_domains }}" + - networks + - flags: + skip_missing: true + run_once: true + loop_control: + label: "{{ item.1 }}" + +- name: create dict of reverse hashes + set_fact: + reverse_hashes: "{{ reverse_hashes|default([]) + [ {'hash': item.0.stdout|default(), 'network': item.1} ] }}" + with_subelements: + - "{{ reverse_hashes_temp.results }}" + - item + run_once: true + loop_control: + label: "{{ item.1.name |default(item.0.cmd.split(' ')[4]) }}" + +- name: Read reverse ipv6 zone hashes + shell: "grep -s \"^; Hash:\" {{ bind_zone_dir }}/{{ (item.1 | ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):-1] }} || true" + changed_when: false + check_mode: false + register: reverse_hashes_ipv6_temp + with_subelements: + - "{{ bind_zone_domains }}" + - ipv6_networks + - flags: + skip_missing: true + run_once: true + loop_control: + label: "{{ item.1 }}" + +- name: create dict of reverse ipv6 hashes + set_fact: + reverse_hashes_ipv6: "{{ reverse_hashes_ipv6|default([]) + [ {'hash': item.0.stdout|default(), 'network': item.1} ] }}" + with_subelements: + - "{{ reverse_hashes_ipv6_temp.results }}" + - item + run_once: true + loop_control: + label: "{{ item.1.name |default(item.0.cmd.split(' ')[4]) }}" + +- name: Master | Main BIND config file (master) + template: + src: master_etc_named.conf.j2 + dest: "{{ bind_config }}" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: '0640' + setype: named_conf_t + validate: 'named-checkconf %s' + notify: reload bind + tags: bind + +- name: Master | Create forward lookup zone file + template: + src: bind_zone.j2 + dest: "{{ bind_zone_dir }}/{{ item.name }}" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: "{{ bind_zone_file_mode }}" + setype: named_zone_t + validate: 'named-checkzone -d {{ item.name }} %s' + with_items: + - "{{ bind_zone_domains }}" + loop_control: + label: "{{ item.name }}" + when: item.create_forward_zones is not defined or item.create_forward_zones + notify: reload bind + tags: bind + +- name: Master | Create reverse lookup zone file + template: + src: reverse_zone.j2 + dest: "{{ bind_zone_dir }}/{{ ('.'.join(item.1.replace(item.1+'.','').split('.')[::-1])) }}.in-addr.arpa" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: "{{ bind_zone_file_mode }}" + setype: named_zone_t + validate: "named-checkzone {{ ('.'.join(item.1.replace(item.1+'.','').split('.')[::-1])) }}.in-addr.arpa %s" + with_subelements: + - "{{ bind_zone_domains }}" + - networks + - flags: + skip_missing: true + loop_control: + label: "{{ item.1 }}" + when: item.create_reverse_zones is not defined or item.create_reverse_zones + notify: reload bind + tags: bind + +- name: Master | Create reverse IPv6 lookup zone file + template: + src: reverse_zone_ipv6.j2 + dest: "{{ bind_zone_dir }}/{{ (item.1 | ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):-1] }}" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: "{{ bind_zone_file_mode }}" + setype: named_zone_t + validate: "named-checkzone {{ (item.1 | ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):] }} %s" + with_subelements: + - "{{ bind_zone_domains }}" + - ipv6_networks + - flags: + skip_missing: true + loop_control: + label: "{{ item.1 }}" + when: item.create_reverse_zones is not defined or item.create_reverse_zones + notify: reload bind + tags: bind diff --git a/roles/bertvv.bind/tasks/slave.yml b/roles/bertvv.bind/tasks/slave.yml new file mode 100644 index 0000000..c8efa88 --- /dev/null +++ b/roles/bertvv.bind/tasks/slave.yml @@ -0,0 +1,24 @@ +# roles/bind/tasks/master.yml +# Set up a BIND slave server +--- + +- name: Slave | Main BIND config file (slave) + template: + src: slave_etc_named.conf.j2 + dest: "{{ bind_config }}" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: '0640' + setype: named_conf_t + validate: 'named-checkconf %s' + notify: reload bind + tags: bind + +- name: Slave | ensure directory for cached slaves zones + file: + path: "{{ bind_dir }}/slaves" + state: directory + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: '0770' + setype: named_cache_t diff --git a/roles/bertvv.bind/templates/auth_transfer.j2 b/roles/bertvv.bind/templates/auth_transfer.j2 new file mode 100644 index 0000000..95868ca --- /dev/null +++ b/roles/bertvv.bind/templates/auth_transfer.j2 @@ -0,0 +1,12 @@ + +server {{ ansible_default_ipv4.address }} { + keys { {% for mykey in bind_dns_keys %} {{ mykey.name }}; {% endfor %} }; +}; + +{% for mykey in bind_dns_keys %} +key {{ mykey.name }} { + algorithm {{ mykey.algorithm }}; + secret "{{ mykey.secret }}"; +{% endfor %} +}; + diff --git a/roles/bertvv.bind/templates/bind_zone.j2 b/roles/bertvv.bind/templates/bind_zone.j2 new file mode 100644 index 0000000..04584cf --- /dev/null +++ b/roles/bertvv.bind/templates/bind_zone.j2 @@ -0,0 +1,140 @@ +{# + # First create a dict holding the entire zone information and create a hash + # from it, that it can be compared with subsequent task executions. In this + # way the serial will only be updated if there are some content changes. + #} +{% set _zone_data = {} %} +{% set _ = _zone_data.update({'ttl': bind_zone_ttl}) %} +{% set _ = _zone_data.update({'domain': item.name}) %} +{% set _ = _zone_data.update({'mname': item.name_servers|default([])}) %} +{% set _ = _zone_data.update({'aname': item.other_name_servers|default([])}) %} +{% set _ = _zone_data.update({'mail': item.mail_servers|default([])}) %} +{% if item.hostmaster_email is defined %} +{% set _ = _zone_data.update({'rname': (( item.hostmaster_email )) + ('' if (item.hostmaster_email is search('\.')) else ('.' + _zone_data['domain']))}) %} +{% else %} +{% set _ = _zone_data.update({'rname': 'hostmaster.' + _zone_data['domain']}) %} +{% endif %} +{% set _ = _zone_data.update({'refresh': bind_zone_time_to_refresh}) %} +{% set _ = _zone_data.update({'retry': bind_zone_time_to_retry}) %} +{% set _ = _zone_data.update({'expire': bind_zone_time_to_expire}) %} +{% set _ = _zone_data.update({'minimum': bind_zone_minimum_ttl}) %} +{% set _ = _zone_data.update({'hosts': item.hosts|default([])}) %} +{% set _ = _zone_data.update({'delegate': item.delegate|default([])}) %} +{% set _ = _zone_data.update({'services': item.services|default([])}) %} +{% set _ = _zone_data.update({'text': item.text|default([])}) %} +{% set _ = _zone_data.update({'naptr': item.naptr|default([])}) %} +{# + # Compare the zone file hash with the current zone data hash and set serial + # accordingly + #} +{% set _zone = {'hash': _zone_data | string | hash('md5')} %} +{% for _result in forward_hashes if _result.name == item.name %} +{% set _hash_serial = _result.hash.split(' ')[2:] %} +{% if _hash_serial and _hash_serial[0] == _zone['hash'] %} +{% set _ = _zone.update({'serial': _hash_serial[1]}) %} +{% else %} +{% set _ = _zone.update({'serial': timestamp.stdout}) %} +{% endif %} +{% endfor %} +{# + # Eventually output the zone data + #} +; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }} +; Zone file for {{ _zone_data['domain'] }} +{{ ansible_managed | comment(decoration='; ') }} + +$ORIGIN {{ _zone_data['domain'] }}. +$TTL {{ _zone_data['ttl'] }} + +{% if _zone_data['mname']|length > 0 %} +@ IN SOA {{ _zone_data['mname']|first }}{% if not _zone_data['mname']|first|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %} {{ _zone_data['rname'] }}. ( +{% else %} +@ IN SOA {{ ansible_hostname }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. ( +{% endif %} + {{ _zone['serial'] }} + {{ _zone_data['refresh'] }} + {{ _zone_data['retry'] }} + {{ _zone_data['expire'] }} + {{ _zone_data['minimum'] }} ) + +{% if _zone_data['mname']|length > 0 %} +{% for ns in _zone_data['mname'] %} + IN NS {{ ns }}{% if not ns|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %} + +{% endfor %} +{% else %} + IN NS {{ ansible_hostname }}.{{ _zone_data['domain'] }}. +{% endif %} +{% for ns in _zone_data['aname'] %} + IN NS {{ ns }}. +{% endfor %} + +{% for mail in _zone_data['mail'] %} +{% if loop.first %}@{% else %} {% endif %} IN MX {{ mail.preference}} {{ mail.name }}{% if not mail.name.endswith('.') %}.{{ _zone_data['domain'] }}.{% endif %} +{% endfor %} + +{% if _zone_data['delegate']|length > 0 %} +{% for host in _zone_data['delegate'] %} +{{ host.zone.ljust(20) }} IN NS {{ host.dns }} +{% endfor %} +{% endif %} + +{% if _zone_data['hosts']|length > 0 %} +{% for host in _zone_data['hosts'] %} +{% if host.ip is defined %} +{% if host.ip is string %} +{% if "$GENERATE" not in host.name.upper() %} +{{ host.name.ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN A {{ host.ip }} +{% endif %} +{% if "$GENERATE" in host.name.upper() %} +{{ host.name.ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN A {{ host.ip }} +{% endif %} +{% else %} +{% for ip in host.ip %} +{{ host.name.ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN A {{ ip }} +{% endfor %} +{% endif %} +{% endif %} +{% if host.ipv6 is defined %} +{% if host.ipv6 is string %} +{{ host.name.ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN AAAA {{ host.ipv6 }} +{% else %} +{% for ip6 in host.ipv6 %} +{{ host.name.ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN AAAA {{ ip6 }} +{% endfor %} +{% endif %} +{% endif %} +{% if host.aliases is defined %} +{% for alias in host.aliases %} +{% if "$GENERATE" not in host.name.upper() %} +{{ (alias.name|default(alias)).ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN {{ alias.type|default('cname')|upper}} {{ host.name }} +{% endif %} +{% if "$GENERATE" in host.name.upper() %} +{{ alias.ljust(20) }} IN CNAME {{ host.name.rsplit(None, 1)[1] }} +{% endif %} +{% endfor %} +{% endif %} +{% if host.sshfp is defined %} +{% for sshfp in host.sshfp %} +{{ host.name.ljust(20) }} IN SSHFP {{ sshfp}} +{% endfor %} +{% endif %} +{% endfor %} +{% else %} +{{ ansible_hostname.ljust(26) }} IN A {{ ansible_default_ipv4.address }} +{% endif %} +{% for service in _zone_data['services'] %} +{{ service.name.ljust(20) }}{{ (service.ttl|string).rjust(6) if service.ttl is defined else ''.ljust(6) }} IN SRV {{ service.priority|default('0') }} {{ service.weight|default('0') }} {{ service.port }} {{ service.target }} +{% endfor %} +{% for text in _zone_data['text'] %} +{% if text.text is string %} +{{ text.name.ljust(20) }} IN TXT "{{ text.text }}" +{% else %} +{% for entry in text.text %} +{{ text.name.ljust(20) }} IN TXT "{{ entry }}" +{% endfor %} +{% endif %} +{% endfor %} +{% for naptr in _zone_data['naptr'] %} +{{ naptr.name.ljust(20) }} IN NAPTR {{ naptr.order|default('100') }} {{ naptr.pref|default('10') }} "{{ naptr.flags }}" "{{ naptr.service }}" "{{ naptr.regex }}" {{ naptr.replacement }} +{% endfor %} diff --git a/roles/bertvv.bind/templates/master_etc_named.conf.j2 b/roles/bertvv.bind/templates/master_etc_named.conf.j2 new file mode 100644 index 0000000..9d9a2b6 --- /dev/null +++ b/roles/bertvv.bind/templates/master_etc_named.conf.j2 @@ -0,0 +1,158 @@ +// +// named.conf +// +{{ ansible_managed | comment('c') }} +// +{% for acl in bind_acls %} +acl "{{ acl.name }}" { +{% for match in acl.match_list %} + {{ match }}; +{% endfor %} +}; + +{% endfor %} +options { + listen-on port 53 { {{ bind_listen_ipv4|join('; ') }}; }; + listen-on-v6 port 53 { {{ bind_listen_ipv6|join('; ') }}; }; + directory "{{ bind_dir }}"; + dump-file "{{ bind_dir }}/data/cache_dump.db"; + statistics-file "{{ bind_dir }}/data/named_stats.txt"; + memstatistics-file "{{ bind_dir }}/data/named_mem_stats.txt"; + allow-query { {{ bind_allow_query|join('; ') }}; }; +{% if bind_acls|length != 0 %} + allow-transfer { {% for acl in bind_acls %}"{{ acl.name }}"; {% endfor %}}; +{% endif %} +{% if bind_check_names is defined %} + check-names {{ bind_check_names }}; +{% endif %} + + recursion {% if bind_recursion %}yes{% else %}no{% endif %}; + {% if bind_recursion %}allow-recursion { {{ bind_allow_recursion|join('; ') }}; }; + {% endif %} +{% if bind_forwarders|length > 0 %}forwarders { {{ bind_forwarders|join('; ') }}; };{% endif %} + {% if bind_forward_only %}forward only;{% endif %} + + rrset-order { order {{ bind_rrset_order }}; }; + + dnssec-enable {{ bind_dnssec_enable }}; + dnssec-validation {{ bind_dnssec_validation }}; + + /* Path to ISC DLV key */ + bindkeys-file "{{ bind_bindkeys_file }}"; + + managed-keys-directory "{{ bind_dir }}/dynamic"; + + pid-file "{{ bind_pid_file }}"; + session-keyfile "{{ bind_session_keyfile }}"; +{% if bind_query_log is defined %} + + querylog yes; +{% endif %} +}; + +{% if bind_statistics_channels %} +statistics-channels { + inet {{ bind_statistics_host }} port {{ bind_statistics_port }} allow { {{ bind_statistics_allow|join('; ') }}; }; +}; +{% endif %} + +logging { + channel default_debug { + file "{{ bind_log }}"; + severity dynamic; + print-time yes; + }; +{% if bind_query_log is defined %} + channel querylog { + {% if bind_query_log.file is defined %} + file "{{ bind_query_log.file }}" versions {{ bind_query_log.versions }} size {{ bind_query_log.size }}; + {% else %} + file "{{ bind_query_log }}" versions 600 size 20m; + {% endif %} + severity dynamic; + print-time yes; + }; + category queries { querylog; }; +{% endif %} +{% if bind_other_logs is defined %} + +{% for log in bind_other_logs %} + channel {{ log.name }} { + file "{{ log.file }}" versions {{ log.versions }} size {{ log.size }}; + severity dynamic; + print-time yes; + }; + category "{{ log.name }}" { "{{ log.name }}"; }; +{% endfor %} +{% endif %} +}; + +{% for file in bind_default_zone_files %} +include "{{ file }}"; +{% endfor %} +{% for file in bind_extra_include_files %} +include "{{ file }}"; +{% endfor %} + +{% if bind_zone_domains is defined %} +{% for bind_zone in bind_zone_domains %} +{% if bind_zone.create_forward_zones is not defined or bind_zone.create_forward_zones %} +zone "{{ bind_zone.name }}" IN { + type master; + file "{{ bind_zone_dir }}/{{ bind_zone.name }}"; + notify yes; +{% if bind_zone.also_notify is defined %} + also-notify { {{ bind_zone.also_notify|join('; ') }}; }; +{% endif %} +{% if bind_zone.allow_update is defined %} + allow-update { {{ bind_zone.allow_update|join('; ') }}; }; +{% else %} + allow-update { none; }; +{% endif %} +{% if bind_zone.delegate is defined %} + forwarders {}; +{% endif %} +}; +{% endif %} + +{% if bind_zone.networks is defined %} +{% if bind_zone.create_reverse_zones is not defined or bind_zone.create_reverse_zones %} +{% for network in bind_zone.networks %} +zone "{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa" IN { + type master; + file "{{ bind_zone_dir }}/{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa"; + notify yes; +{% if bind_zone.also_notify is defined %} + also-notify { {{ bind_zone.also_notify|join('; ') }}; }; +{% endif %} +{% if bind_zone.allow_update is defined %} + allow-update { {{ bind_zone.allow_update|join('; ') }}; }; +{% else %} + allow-update { none; }; +{% endif %} +}; +{% endfor %} +{% endif %} +{% endif %} + +{% if bind_zone.ipv6_networks is defined %} +{% if bind_zone.create_reverse_zones is not defined or bind_zone.create_reverse_zones %} +{% for network in bind_zone.ipv6_networks %} +zone "{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):] }}" IN { + type master; + file "{{ bind_zone_dir }}/{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):-1] }}"; + notify yes; +{% if bind_zone.also_notify is defined %} + also-notify { {{ bind_zone.also_notify|join('; ') }}; }; +{% endif %} +{% if bind_zone.allow_update is defined %} + allow-update { {{ bind_zone.allow_update|join('; ') }}; }; +{% else %} + allow-update { none; }; +{% endif %} +}; +{% endfor %} +{% endif %} +{% endif %} +{% endfor %} +{% endif %} diff --git a/roles/bertvv.bind/templates/reverse_zone.j2 b/roles/bertvv.bind/templates/reverse_zone.j2 new file mode 100644 index 0000000..d639afa --- /dev/null +++ b/roles/bertvv.bind/templates/reverse_zone.j2 @@ -0,0 +1,101 @@ +{# + # First create a dict holding the entire zone information and create a hash + # from it, that it can be compared with subsequent task executions. In this + # way the serial will only be updated if there are some content changes. + #} +{% set _zone_data = {} %} +{% set _ = _zone_data.update({'ttl': bind_zone_ttl}) %} +{% set _ = _zone_data.update({'domain': item.0.name}) %} +{% set _ = _zone_data.update({'mname': item.0.name_servers|default([])}) %} +{% set _ = _zone_data.update({'aname': item.0.other_name_servers|default([])}) %} +{% if item.0.hostmaster_email is defined %} +{% set _ = _zone_data.update({'rname': (( item.0.hostmaster_email )) + ('' if (item.0.hostmaster_email is search('\.')) else ('.' + _zone_data['domain']))}) %} +{% else %} +{% set _ = _zone_data.update({'rname': 'hostmaster.' + _zone_data['domain']}) %} +{% endif %} +{% set _ = _zone_data.update({'refresh': bind_zone_time_to_refresh}) %} +{% set _ = _zone_data.update({'retry': bind_zone_time_to_retry}) %} +{% set _ = _zone_data.update({'expire': bind_zone_time_to_expire}) %} +{% set _ = _zone_data.update({'minimum': bind_zone_minimum_ttl}) %} +{% set _ = _zone_data.update({'hosts': item.0.hosts|default([]) | selectattr('ip', 'defined') | selectattr('ip', 'string') | selectattr('ip', 'search', '^'+item.1) | list}) %} +{% set _ = _zone_data.update({'revip': ('.'.join(item.1.replace(item.1+'.','').split('.')[::-1]))}) %} +{# + # Compare the zone file hash with the current zone data hash and set serial + # accordingly + #} +{% set _zone = {'hash': _zone_data | string | hash('md5')} %} +{% for _result in reverse_hashes if _result.network == item.1 %} +{% set _hash_serial = _result.hash.split(' ')[2:] %} +{% if _hash_serial and _hash_serial[0] == _zone['hash'] %} +{% set _ = _zone.update({'serial': _hash_serial[1]}) %} +{% else %} +{% set _ = _zone.update({'serial': timestamp.stdout}) %} +{% endif %} +{% endfor %} +{# + # Eventually output the zone data + #} +; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }} +; Reverse zone file for {{ _zone_data['domain'] }} +{{ ansible_managed | comment(decoration='; ') }} + +$TTL {{ _zone_data['ttl'] }} +$ORIGIN {{ ('.'.join(item.1.replace(item.1+'.','').split('.')[::-1])) }}.in-addr.arpa. + +{% if _zone_data['mname']|length > 0 %} +@ IN SOA {{ _zone_data['mname']|first }}{% if not _zone_data['mname']|first|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %} {{ _zone_data['rname'] }}. ( +{% else %} +@ IN SOA {{ ansible_hostname }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. ( +{% endif %} + {{ _zone['serial'] }} + {{ _zone_data['refresh'] }} + {{ _zone_data['retry'] }} + {{ _zone_data['expire'] }} + {{ _zone_data['minimum'] }} ) + +{% if _zone_data['mname']|length > 0 %} +{% for ns in _zone_data['mname'] %} + IN NS {{ ns }}{% if not ns|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %} + +{% endfor %} +{% else %} + IN NS {{ ansible_hostname }}.{{ _zone_data['domain'] }}. +{% endif %} +{% for ns in _zone_data['aname'] %} + IN NS {{ ns }}. +{% endfor %} + +{% if _zone_data['hosts']|length > 0 %} +{% for host in _zone_data['hosts'] %} +{% if host.ip is defined %} +{% if host.ip == item.1 %} +@ IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. +{% else %} +{% if host.ip is string and host.ip.startswith(item.1) %} +{% if host.name == '@' %} +{{ ('.'.join(host.ip.replace(item.1+'.','').split('.')[::-1])).ljust(16) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ _zone_data['domain'] }}. +{% else %} +{% if "$GENERATE" not in host.name.upper() %} +{{ ('.'.join(host.ip.replace(item.1+'.','').split('.')[::-1])).ljust(16) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. +{% endif %} +{% if "$GENERATE" in host.name.upper() %} +{{ host.name.rsplit(None, 1)[0] }} {{ ('.'.join(host.ip.replace(item.1+'.','').split('.')[::-1])).ljust(16) }} IN PTR {{ host.name.rsplit(None, 1)[1] }}.{{ _zone_data['domain'] }}. +{% endif %} +{% endif %} +{% else %} +{% for ip in host.ip %} +{% if ip.startswith(item.1) %} +{{ ('.'.join(ip.replace(item.1+'.','').split('.')[::-1])).ljust(16) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ _zone_data['domain'] }}. +{% if host.name == '@' %} +{% else %} +{{ ('.'.join(ip.replace(item.1+'.','').split('.')[::-1])).ljust(16) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. +{% endif %} +{% endif %} +{% endfor %} +{% endif %} +{% endif %} +{% endif %} +{% endfor %} +{% else %} +{{ ('.'.join(ansible_default_ipv4.address.replace(item.1+'.','').split('.')[::-1])).ljust(16) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ ansible_hostname }}.{{ _zone_data['domain'] }}. +{% endif %} diff --git a/roles/bertvv.bind/templates/reverse_zone_ipv6.j2 b/roles/bertvv.bind/templates/reverse_zone_ipv6.j2 new file mode 100644 index 0000000..2a1be82 --- /dev/null +++ b/roles/bertvv.bind/templates/reverse_zone_ipv6.j2 @@ -0,0 +1,96 @@ +{# + # First create a dict holding the entire zone information and create a hash + # from it, that it can be compared with subsequent task executions. In this + # way the serial will only be updated if there are some content changes. + #} +{% set _zone_data = {} %} +{% set _ = _zone_data.update({'ttl': bind_zone_ttl}) %} +{% set _ = _zone_data.update({'domain': item.0.name}) %} +{% set _ = _zone_data.update({'mname': item.0.name_servers|default([])}) %} +{% set _ = _zone_data.update({'aname': item.0.other_name_servers|default([])}) %} +{% if item.0.hostmaster_email is defined %} +{% set _ = _zone_data.update({'rname': (( item.0.hostmaster_email )) + ('' if (item.0.hostmaster_email is search('\.')) else ('.' + _zone_data['domain']))}) %} +{% else %} +{% set _ = _zone_data.update({'rname': 'hostmaster.' + _zone_data['domain']}) %} +{% endif %} +{% set _ = _zone_data.update({'refresh': bind_zone_time_to_refresh}) %} +{% set _ = _zone_data.update({'retry': bind_zone_time_to_retry}) %} +{% set _ = _zone_data.update({'expire': bind_zone_time_to_expire}) %} +{% set _ = _zone_data.update({'minimum': bind_zone_minimum_ttl}) %} +{% set _ = _zone_data.update({'hosts': item.0.hosts|default([]) | selectattr('ipv6','defined') | selectattr('ipv6','string') | selectattr('ipv6', 'search', '^'+item.1|regex_replace('/.*$','')) | list }) %} +{% set _ = _zone_data.update({'revip': (item.1 | ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):] }) %} +{# + # Compare the zone file hash with the current zone data hash and set serial + # accordingly + #} +{% set _zone = {'hash': _zone_data | string | hash('md5')} %} +{% for _result in reverse_hashes_ipv6 if _result.network == item.1 %} +{% set _hash_serial = _result.hash.split(' ')[2:] %} +{% if _hash_serial and _hash_serial[0] == _zone['hash'] %} +{% set _ = _zone.update({'serial': _hash_serial[1]}) %} +{% else %} +{% set _ = _zone.update({'serial': timestamp.stdout}) %} +{% endif %} +{% endfor %} +{# + # Eventually output the zone data + #} +; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }} +; Reverse zone file for {{ _zone_data['domain'] }} +{{ ansible_managed | comment(decoration='; ') }} + +$TTL {{ _zone_data['ttl'] }} +$ORIGIN {{ (item.1 | ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):] }} + +{% if _zone_data['mname']|length > 0 %} +@ IN SOA {{ _zone_data['mname']|first }}{% if not _zone_data['mname']|first|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %} {{ _zone_data['rname'] }}. ( +{% else %} +@ IN SOA {{ ansible_hostname }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. ( +{% endif %} + {{ _zone['serial'] }} + {{ _zone_data['refresh'] }} + {{ _zone_data['retry'] }} + {{ _zone_data['expire'] }} + {{ _zone_data['minimum'] }} ) + +{% if _zone_data['mname']|length > 0 %} +{% for ns in _zone_data['mname'] %} + IN NS {{ ns }}{% if not ns|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %} + +{% endfor %} +{% else %} + IN NS {{ ansible_hostname }}.{{ _zone_data['domain'] }}. +{% endif %} +{% for ns in _zone_data['aname'] %} + IN NS {{ ns }}. +{% endfor %} + +{% if _zone_data['hosts']|length > 0 %} +{% for host in _zone_data['hosts'] %} +{% if host.ipv6 is defined %} +{% if host.ipv6 == item.1 %} +@ IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. +{% else %} +{% if host.ipv6 is string and host.ipv6.startswith(item.1|regex_replace('/.*$','')) %} +{% if host.name == '@' %} +{{ host.ipv6 | ipaddr('revdns') }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ _zone_data['domain'] }}. +{% else %} +{{ host.ipv6 | ipaddr('revdns') }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. +{% endif %} +{% else %} +{% for ip in host.ipv6 %} +{% if ip.startswith(item.1|regex_replace('/.*$','')) %} +{{ ip | ipaddr('revdns') }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ _zone_data['domain'] }}. +{% if host.name == '@' %} +{% else %} +{{ ip | ipaddr('revdns') }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. +{% endif %} +{% endif %} +{% endfor %} +{% endif %} +{% endif %} +{% endif %} +{% endfor %} +{% else %} +{{ ansible_default_ipv6.address | ipaddr('revdns') }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ ansible_hostname }}.{{ _zone_data['domain'] }}. +{% endif %} diff --git a/roles/bertvv.bind/templates/slave_etc_named.conf.j2 b/roles/bertvv.bind/templates/slave_etc_named.conf.j2 new file mode 100644 index 0000000..4386723 --- /dev/null +++ b/roles/bertvv.bind/templates/slave_etc_named.conf.j2 @@ -0,0 +1,120 @@ +// +// named.conf +// +{{ ansible_managed | comment('c') }} +// +{% for acl in bind_acls %} +acl "{{ acl.name }}" { +{% for match in acl.match_list %} + {{ match }}; +{% endfor %} +}; + +{% endfor %} +options { + listen-on port 53 { {{ bind_listen_ipv4|join(';') }}; }; + listen-on-v6 port 53 { {{ bind_listen_ipv6|join(';') }}; }; + directory "{{ bind_dir }}"; + dump-file "{{ bind_dir }}/data/cache_dump.db"; + statistics-file "{{ bind_dir }}/data/named_stats.txt"; + memstatistics-file "{{ bind_dir }}/data/named_mem_stats.txt"; + allow-query { {{ bind_allow_query|join(';') }}; }; +{% if bind_acls|length != 0 %} + allow-transfer { {% for acl in bind_acls %}"{{ acl.name }}"; {% endfor %}}; +{% endif %} + + recursion {% if bind_recursion %}yes{% else %}no{% endif %}; + {% if bind_recursion %}allow-recursion { {{ bind_allow_recursion|join('; ') }}; }; + {% endif %} +{% if bind_forwarders|length > 0 %}forwarders { {{ bind_forwarders|join('; ') }}; };{% endif %} + {% if bind_forward_only %}forward only;{% endif %} + + rrset-order { order {{ bind_rrset_order }}; }; + + dnssec-enable {{ bind_dnssec_enable }}; + dnssec-validation {{ bind_dnssec_validation }}; + + /* Path to ISC DLV key */ + bindkeys-file "{{ bind_bindkeys_file }}"; + + managed-keys-directory "{{ bind_dir }}/dynamic"; + + pid-file "{{ bind_pid_file }}"; + session-keyfile "{{ bind_session_keyfile }}"; + +{% if bind_query_log is defined %} + querylog yes; +{% endif %} +}; + +{% if bind_statistics_channels %} +statistics-channels { + inet {{ bind_statistics_host }} port {{ bind_statistics_port }} allow { {{ bind_statistics_allow|join('; ') }}; }; +}; +{% endif %} + +logging { + channel default_debug { + file "{{ bind_log }}"; + severity dynamic; + print-time yes; + }; +{% if bind_query_log is defined %} + channel querylog { + {% if bind_query_log.file is defined %} + file "{{ bind_query_log.file }}" versions {{ bind_query_log.versions }} size {{ bind_query_log.size }}; + {% else %} + file "{{ bind_query_log }}" versions 600 size 20m; + {% endif %} + severity dynamic; + print-time yes; + }; + category queries { querylog; }; +{% endif %} +}; + +{% for file in bind_default_zone_files %} +include "{{ file }}"; +{% endfor %} +{% for file in bind_extra_include_files %} +include "{{ file }}"; +{% endfor %} + +{% if bind_zone_domains is defined %} +{% for bind_zone in bind_zone_domains %} +{% if bind_zone.create_forward_zones is not defined or bind_zone.create_forward_zones %} +zone "{{ bind_zone.name }}" IN { + type slave; + masters { {{ bind_zone_master_server_ip }}; }; + file "{{ bind_slave_dir }}/{{ bind_zone.name }}"; +{% if bind_zone.delegate is defined %} + forwarders {}; +{% endif %} +}; +{% endif %} + +{% if bind_zone.create_reverse_zones is not defined or bind_zone.create_reverse_zones %} +{% if bind_zone.networks is defined %} +{% for network in bind_zone.networks %} +zone "{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa" IN { + type slave; + masters { {{ bind_zone_master_server_ip }}; }; + file "{{ bind_slave_dir }}/{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa"; +}; +{% endfor %} +{% endif %} +{% endif %} + +{% if bind_zone.create_reverse_zones is not defined or bind_zone.create_reverse_zones %} +{% if bind_zone.ipv6_networks is defined %} +{% for network in bind_zone.ipv6_networks %} +zone "{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):] }}" IN { + type slave; + masters { {{ bind_zone_master_server_ip }}; }; + file "{{ bind_slave_dir }}/{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):-1] }}"; +}; +{% endfor %} +{% endif %} +{% endif %} +{% endfor %} +{% endif %} diff --git a/roles/bertvv.bind/vars/Archlinux.yml b/roles/bertvv.bind/vars/Archlinux.yml new file mode 100644 index 0000000..016fd0d --- /dev/null +++ b/roles/bertvv.bind/vars/Archlinux.yml @@ -0,0 +1,32 @@ +# roles/bind/vars/RedHat.yml +--- + +bind_packages: + - python-netaddr + - bind + - bind-tools + +bind_service: named + +# Main config file +bind_config: /etc/named.conf + +# Zone files included in the installation +bind_default_zone_files: [] + +# Directory with run-time stuff +bind_dir: /var/named +bind_conf_dir: "{{ bind_dir }}" +auth_file: "auth_transfer.conf" +bind_auth_file: "{{ bind_conf_dir }}/{{ auth_file }}" + +bind_owner: root +bind_group: named + +bind_bindkeys_file: "/etc/named.iscdlv.key" +bind_pid_file: "/run/named/named.pid" +bind_session_keyfile: "/run/named/session.key" + +# Custom location for zone files +bind_zone_dir: "{{ bind_dir }}" +bind_slave_dir: "{{ bind_dir }}/slaves" diff --git a/roles/bertvv.bind/vars/Debian.yml b/roles/bertvv.bind/vars/Debian.yml new file mode 100644 index 0000000..066d99c --- /dev/null +++ b/roles/bertvv.bind/vars/Debian.yml @@ -0,0 +1,33 @@ +# roles/bind/vars/Debian.yml +--- + +bind_packages: + - python-netaddr + - bind9 + - bind9utils + +bind_service: bind9 + +# Main config file +bind_config: /etc/bind/named.conf + +# Localhost zone +bind_default_zone_files: + - /etc/bind/named.conf.default-zones + +# Directory with run-time stuff +bind_dir: /var/cache/bind +bind_conf_dir: "/etc/bind" +auth_file: "auth_transfer.conf" +bind_auth_file: "{{ bind_conf_dir }}/{{ auth_file }}" + +bind_owner: root +bind_group: bind + +bind_bindkeys_file: "/etc/named.iscdlv.key" +bind_pid_file: "/run/named/named.pid" +bind_session_keyfile: "/run/named/session.key" + +# Custom location for master zone files +bind_zone_dir: "{{ bind_dir }}" +bind_slave_dir: "{{ bind_dir }}/slaves" diff --git a/roles/bertvv.bind/vars/FreeBSD.yml b/roles/bertvv.bind/vars/FreeBSD.yml new file mode 100644 index 0000000..18c9035 --- /dev/null +++ b/roles/bertvv.bind/vars/FreeBSD.yml @@ -0,0 +1,32 @@ +# roles/bind/vars/Debian.yml +--- + +bind_packages: + - py36-netaddr + - bind911 + +bind_service: named + +# Main config file +bind_config: /usr/local/etc/namedb/named.conf + +# Localhost zone +bind_default_zone_files: + - /usr/local/etc/namedb/named.conf.default-zones + +# Directory with run-time stuff +bind_dir: /var/cache/named +bind_conf_dir: "/usr/local/etc/namedb/" +auth_file: "auth_transfer.conf" +bind_auth_file: "{{ bind_conf_dir }}/{{ auth_file }}" + +bind_owner: bind +bind_group: bind + +bind_bindkeys_file: "/usr/local/etc/namedb/bind.keys" +bind_pid_file: "/var/run/named/named.pid" +bind_session_keyfile: "/var/run/named/session.key" + +# Custom location for master zone files +bind_zone_dir: "{{ bind_dir }}/master" +bind_slave_dir: "{{ bind_dir }}/slave" diff --git a/roles/bertvv.bind/vars/RedHat.yml b/roles/bertvv.bind/vars/RedHat.yml new file mode 100644 index 0000000..fb3b56a --- /dev/null +++ b/roles/bertvv.bind/vars/RedHat.yml @@ -0,0 +1,34 @@ +# roles/bind/vars/RedHat.yml +--- + +bind_packages: + - "{{ ( ansible_distribution_major_version == '8' ) | ternary( 'python3-netaddr', 'python-netaddr' ) }}" + - bind + - bind-utils + +bind_service: named + +# Main config file +bind_config: /etc/named.conf + +# Zone files included in the installation +bind_default_zone_files: + - /etc/named.root.key + - /etc/named.rfc1912.zones + +# Directory with run-time stuff +bind_dir: /var/named +bind_conf_dir: "/etc/named" +auth_file: "auth_transfer.conf" +bind_auth_file: "{{ bind_conf_dir }}/{{ auth_file }}" + +bind_owner: root +bind_group: named + +bind_bindkeys_file: "/etc/named.iscdlv.key" +bind_pid_file: "/run/named/named.pid" +bind_session_keyfile: "/run/named/session.key" + +# Custom location for master zone files +bind_zone_dir: "{{ bind_dir }}" +bind_slave_dir: "{{ bind_dir }}/slaves" diff --git a/roles/geerlingguy.gitlab/.github/FUNDING.yml b/roles/geerlingguy.gitlab/.github/FUNDING.yml new file mode 100644 index 0000000..96b4938 --- /dev/null +++ b/roles/geerlingguy.gitlab/.github/FUNDING.yml @@ -0,0 +1,4 @@ +# These are supported funding model platforms +--- +github: geerlingguy +patreon: geerlingguy diff --git a/roles/geerlingguy.gitlab/.github/stale.yml b/roles/geerlingguy.gitlab/.github/stale.yml new file mode 100644 index 0000000..c7ff127 --- /dev/null +++ b/roles/geerlingguy.gitlab/.github/stale.yml @@ -0,0 +1,56 @@ +# Configuration for probot-stale - https://github.com/probot/stale + +# Number of days of inactivity before an Issue or Pull Request becomes stale +daysUntilStale: 90 + +# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. +# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. +daysUntilClose: 30 + +# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) +onlyLabels: [] + +# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable +exemptLabels: + - pinned + - security + - planned + +# Set to true to ignore issues in a project (defaults to false) +exemptProjects: false + +# Set to true to ignore issues in a milestone (defaults to false) +exemptMilestones: false + +# Set to true to ignore issues with an assignee (defaults to false) +exemptAssignees: false + +# Label to use when marking as stale +staleLabel: stale + +# Limit the number of actions per hour, from 1-30. Default is 30 +limitPerRun: 30 + +pulls: + markComment: |- + This pull request has been marked 'stale' due to lack of recent activity. If there is no further activity, the PR will be closed in another 30 days. Thank you for your contribution! + + Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark pull requests as stale. + + unmarkComment: >- + This pull request is no longer marked for closure. + + closeComment: >- + This pull request has been closed due to inactivity. If you feel this is in error, please reopen the pull request or file a new PR with the relevant details. + +issues: + markComment: |- + This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution! + + Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale. + + unmarkComment: >- + This issue is no longer marked for closure. + + closeComment: >- + This issue has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details. diff --git a/roles/geerlingguy.gitlab/.gitignore b/roles/geerlingguy.gitlab/.gitignore new file mode 100644 index 0000000..f56f5b5 --- /dev/null +++ b/roles/geerlingguy.gitlab/.gitignore @@ -0,0 +1,3 @@ +*.retry +*/__pycache__ +*.pyc diff --git a/roles/geerlingguy.gitlab/.travis.yml b/roles/geerlingguy.gitlab/.travis.yml new file mode 100644 index 0000000..9409aa5 --- /dev/null +++ b/roles/geerlingguy.gitlab/.travis.yml @@ -0,0 +1,32 @@ +--- +language: python +services: docker + +env: + global: + - ROLE_NAME: gitlab + matrix: + - MOLECULE_DISTRO: centos7 + - MOLECULE_DISTRO: ubuntu1804 + - MOLECULE_DISTRO: debian9 + - MOLECULE_DISTRO: centos7 + MOLECULE_PLAYBOOK: playbook-version.yml + - MOLECULE_DISTRO: ubuntu1804 + MOLECULE_PLAYBOOK: playbook-version.yml + +install: + # Install test dependencies. + - pip install molecule yamllint ansible-lint docker + +before_script: + # Use actual Ansible Galaxy role name for the project directory. + - cd ../ + - mv ansible-role-$ROLE_NAME geerlingguy.$ROLE_NAME + - cd geerlingguy.$ROLE_NAME + +script: + # Run tests. + - molecule test + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ diff --git a/roles/geerlingguy.gitlab/.yamllint b/roles/geerlingguy.gitlab/.yamllint new file mode 100644 index 0000000..d43c306 --- /dev/null +++ b/roles/geerlingguy.gitlab/.yamllint @@ -0,0 +1,6 @@ +--- +extends: default +rules: + line-length: + max: 140 + level: warning diff --git a/roles/geerlingguy.gitlab/LICENSE b/roles/geerlingguy.gitlab/LICENSE new file mode 100644 index 0000000..4275cf3 --- /dev/null +++ b/roles/geerlingguy.gitlab/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Jeff Geerling + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/roles/geerlingguy.gitlab/README.md b/roles/geerlingguy.gitlab/README.md new file mode 100644 index 0000000..324167c --- /dev/null +++ b/roles/geerlingguy.gitlab/README.md @@ -0,0 +1,179 @@ +# Ansible Role: GitLab + +[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-gitlab.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-gitlab) + +Installs GitLab, a Ruby-based front-end to Git, on any RedHat/CentOS or Debian/Ubuntu linux system. + +GitLab's default administrator account details are below; be sure to login immediately after installation and change these credentials! + + root + 5iveL!fe + +## Requirements + +None. + +## Role Variables + +Available variables are listed below, along with default values (see `defaults/main.yml`): + + gitlab_domain: gitlab + gitlab_external_url: "https://{{ gitlab_domain }}/" + +The domain and URL at which the GitLab instance will be accessible. This is set as the `external_url` configuration setting in `gitlab.rb`, and if you want to run GitLab on a different port (besides 80/443), you can specify the port here (e.g. `https://gitlab:8443/` for port 8443). + + gitlab_git_data_dir: "/var/opt/gitlab/git-data" + +The `gitlab_git_data_dir` is the location where all the Git repositories will be stored. You can use a shared drive or any path on the system. + + gitlab_backup_path: "/var/opt/gitlab/backups" + +The `gitlab_backup_path` is the location where Gitlab backups will be stored. + + gitlab_edition: "gitlab-ce" + +The edition of GitLab to install. Usually either `gitlab-ce` (Community Edition) or `gitlab-ee` (Enterprise Edition). + + gitlab_version: '' + +If you'd like to install a specific version, set the version here (e.g. `11.4.0-ce.0` for Debian/Ubuntu, or `11.4.0-ce.0.el7` for RedHat/CentOS). + + gitlab_config_template: "gitlab.rb.j2" + +The `gitlab.rb.j2` template packaged with this role is meant to be very generic and serve a variety of use cases. However, many people would like to have a much more customized version, and so you can override this role's default template with your own, adding any additional customizations you need. To do this: + + - Create a `templates` directory at the same level as your playbook. + - Create a `templates\mygitlab.rb.j2` file (just choose a different name from the default template). + - Set the variable like: `gitlab_config_template: mygitlab.rb.j2` (with the name of your custom template). + +### SSL Configuration. + + gitlab_redirect_http_to_https: "true" + gitlab_ssl_certificate: "/etc/gitlab/ssl/{{ gitlab_domain }}.crt" + gitlab_ssl_certificate_key: "/etc/gitlab/ssl/{{ gitlab_domain }}.key" + +GitLab SSL configuration; tells GitLab to redirect normal http requests to https, and the path to the certificate and key (the default values will work for automatic self-signed certificate creation, if set to `true` in the variable below). + + # SSL Self-signed Certificate Configuration. + gitlab_create_self_signed_cert: "true" + gitlab_self_signed_cert_subj: "/C=US/ST=Missouri/L=Saint Louis/O=IT/CN={{ gitlab_domain }}" + +Whether to create a self-signed certificate for serving GitLab over a secure connection. Set `gitlab_self_signed_cert_subj` according to your locality and organization. + + # LDAP Configuration. + gitlab_ldap_enabled: "false" + gitlab_ldap_host: "example.com" + gitlab_ldap_port: "389" + gitlab_ldap_uid: "sAMAccountName" + gitlab_ldap_method: "plain" + gitlab_ldap_bind_dn: "CN=Username,CN=Users,DC=example,DC=com" + gitlab_ldap_password: "password" + gitlab_ldap_base: "DC=example,DC=com" + +GitLab LDAP configuration; if `gitlab_ldap_enabled` is `true`, the rest of the configuration will tell GitLab how to connect to an LDAP server for centralized authentication. + + gitlab_dependencies: + - openssh-server + - postfix + - curl + - openssl + - tzdata + +Dependencies required by GitLab for certain functionality, like timezone support or email. You may change this list in your own playbook if, for example, you would like to install `exim` instead of `postfix`. + + gitlab_time_zone: "UTC" + +Gitlab timezone. + + gitlab_backup_keep_time: "604800" + +How long to keep local backups (useful if you don't want backups to fill up your drive!). + + gitlab_download_validate_certs: true + +Controls whether to validate certificates when downloading the GitLab installation repository install script. + + # Email configuration. + gitlab_email_enabled: "false" + gitlab_email_from: "gitlab@example.com" + gitlab_email_display_name: "Gitlab" + gitlab_email_reply_to: "gitlab@example.com" + +Gitlab system mail configuration. Disabled by default; set `gitlab_email_enabled` to `true` to enable, and make sure you enter valid from/reply-to values. + + # SMTP Configuration + gitlab_smtp_enable: "false" + gitlab_smtp_address: "smtp.server" + gitlab_smtp_port: "465" + gitlab_smtp_user_name: "smtp user" + gitlab_smtp_password: "smtp password" + gitlab_smtp_domain: "example.com" + gitlab_smtp_authentication: "login" + gitlab_smtp_enable_starttls_auto: "true" + gitlab_smtp_tls: "false" + gitlab_smtp_openssl_verify_mode: "none" + gitlab_smtp_ca_path: "/etc/ssl/certs" + gitlab_smtp_ca_file: "/etc/ssl/certs/ca-certificates.crt" + +Gitlab SMTP configuration; of `gitlab_smtp_enable` is `true`, the rest of the configuration will tell GitLab how to send mails using an smtp server. + + gitlab_nginx_listen_port: 8080 + +If you are running GitLab behind a reverse proxy, you may want to override the listen port to something else. + + gitlab_nginx_listen_https: "false" + +If you are running GitLab behind a reverse proxy, you may wish to terminate SSL at another proxy server or load balancer + + gitlab_nginx_ssl_verify_client: "" + gitlab_nginx_ssl_client_certificate: "" + +If you want to enable [2-way SSL Client Authentication](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-2-way-ssl-client-authentication), set `gitlab_nginx_ssl_verify_client` and add a path to the client certificate in `gitlab_nginx_ssl_client_certificate`. + + gitlab_default_theme: 2 + +GitLab includes a number of themes, and you can set the default for all users with this variable. See [the included GitLab themes to choose a default](https://github.com/gitlabhq/gitlabhq/blob/master/config/gitlab.yml.example#L79-L85). + + gitlab_extra_settings: + - gitlab_rails: + - key: "trusted_proxies" + value: "['foo', 'bar']" + - key: "env" + type: "plain" + value: | + { + "http_proxy" => "https://my_http_proxy.company.com:3128", + "https_proxy" => "https://my_http_proxy.company.com:3128", + "no_proxy" => "localhost, 127.0.0.1, company.com" + } + - unicorn: + - key: "worker_processes" + value: 5 + - key: "pidfile" + value: "/opt/gitlab/var/unicorn/unicorn.pid" + +Gitlab have many other settings ([see official documentation](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-config-template/gitlab.rb.template)), and you can add them with this special variable `gitlab_extra_settings` with the concerned setting and the `key` and `value` keywords. + +## Dependencies + +None. + +## Example Playbook + + - hosts: servers + vars_files: + - vars/main.yml + roles: + - { role: geerlingguy.gitlab } + +*Inside `vars/main.yml`*: + + gitlab_external_url: "https://gitlab.example.com/" + +## License + +MIT / BSD + +## Author Information + +This role was created in 2014 by [Jeff Geerling](http://jeffgeerling.com/), author of [Ansible for DevOps](http://ansiblefordevops.com/). diff --git a/roles/geerlingguy.gitlab/defaults/main.yml b/roles/geerlingguy.gitlab/defaults/main.yml new file mode 100644 index 0000000..0499186 --- /dev/null +++ b/roles/geerlingguy.gitlab/defaults/main.yml @@ -0,0 +1,75 @@ +--- +# General config. +gitlab_domain: gitlab +gitlab_external_url: "https://{{ gitlab_domain }}/" +gitlab_git_data_dir: "/var/opt/gitlab/git-data" +gitlab_edition: "gitlab-ce" +gitlab_version: '' +gitlab_backup_path: "/var/opt/gitlab/backups" +gitlab_config_template: "gitlab.rb.j2" + +# SSL Configuration. +gitlab_redirect_http_to_https: "true" +gitlab_ssl_certificate: "/etc/gitlab/ssl/{{ gitlab_domain }}.crt" +gitlab_ssl_certificate_key: "/etc/gitlab/ssl/{{ gitlab_domain }}.key" + +# SSL Self-signed Certificate Configuration. +gitlab_create_self_signed_cert: "true" +gitlab_self_signed_cert_subj: "/C=US/ST=Missouri/L=Saint Louis/O=IT/CN={{ gitlab_domain }}" + +# LDAP Configuration. +gitlab_ldap_enabled: "false" +gitlab_ldap_host: "example.com" +gitlab_ldap_port: "389" +gitlab_ldap_uid: "sAMAccountName" +gitlab_ldap_method: "plain" +gitlab_ldap_bind_dn: "CN=Username,CN=Users,DC=example,DC=com" +gitlab_ldap_password: "password" +gitlab_ldap_base: "DC=example,DC=com" + +# SMTP Configuration +gitlab_smtp_enable: "false" +gitlab_smtp_address: "smtp.server" +gitlab_smtp_port: "465" +gitlab_smtp_user_name: "smtp user" +gitlab_smtp_password: "smtp password" +gitlab_smtp_domain: "example.com" +gitlab_smtp_authentication: "login" +gitlab_smtp_enable_starttls_auto: "true" +gitlab_smtp_tls: "false" +gitlab_smtp_openssl_verify_mode: "none" +gitlab_smtp_ca_path: "/etc/ssl/certs" +gitlab_smtp_ca_file: "/etc/ssl/certs/ca-certificates.crt" + +# 2-way SSL Client Authentication support. +gitlab_nginx_ssl_verify_client: "" +gitlab_nginx_ssl_client_certificate: "" + +# Probably best to leave this as the default, unless doing testing. +gitlab_restart_handler_failed_when: 'gitlab_restart.rc != 0' + +# Dependencies. +gitlab_dependencies: + - openssh-server + - postfix + - curl + - openssl + - tzdata + +# Optional settings. +gitlab_time_zone: "UTC" +gitlab_backup_keep_time: "604800" +gitlab_download_validate_certs: true +gitlab_default_theme: '2' + +# Email configuration. +gitlab_email_enabled: "false" +gitlab_email_from: "gitlab@example.com" +gitlab_email_display_name: "Gitlab" +gitlab_email_reply_to: "gitlab@example.com" + +# Registry configuration. +gitlab_registry_enable: "false" +gitlab_registry_external_url: "https://gitlab.example.com:4567" +gitlab_registry_nginx_ssl_certificate: "/etc/gitlab/ssl/gitlab.crt" +gitlab_registry_nginx_ssl_certificate_key: "/etc/gitlab/ssl/gitlab.key" diff --git a/roles/geerlingguy.gitlab/handlers/main.yml b/roles/geerlingguy.gitlab/handlers/main.yml new file mode 100644 index 0000000..2470b5f --- /dev/null +++ b/roles/geerlingguy.gitlab/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart gitlab + command: gitlab-ctl reconfigure + register: gitlab_restart + failed_when: gitlab_restart_handler_failed_when | bool diff --git a/roles/geerlingguy.gitlab/meta/.galaxy_install_info b/roles/geerlingguy.gitlab/meta/.galaxy_install_info new file mode 100644 index 0000000..6eaf28c --- /dev/null +++ b/roles/geerlingguy.gitlab/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Wed Jun 24 18:44:32 2020 +version: 3.0.0 diff --git a/roles/geerlingguy.gitlab/meta/main.yml b/roles/geerlingguy.gitlab/meta/main.yml new file mode 100644 index 0000000..ef25250 --- /dev/null +++ b/roles/geerlingguy.gitlab/meta/main.yml @@ -0,0 +1,28 @@ +--- +dependencies: [] + +galaxy_info: + author: geerlingguy + description: GitLab Git web interface + company: "Midwestern Mac, LLC" + license: "license (BSD, MIT)" + min_ansible_version: 2.0 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Debian + versions: + - all + - name: Ubuntu + versions: + - all + galaxy_tags: + - development + - web + - gitlab + - git + - repository + - ci + - integration diff --git a/roles/geerlingguy.gitlab/molecule/default/converge.yml b/roles/geerlingguy.gitlab/molecule/default/converge.yml new file mode 100644 index 0000000..8bbf802 --- /dev/null +++ b/roles/geerlingguy.gitlab/molecule/default/converge.yml @@ -0,0 +1,21 @@ +--- +- name: Converge + hosts: all + become: true + + vars: + gitlab_restart_handler_failed_when: false + + pre_tasks: + - name: Update apt cache. + apt: update_cache=true cache_valid_time=600 + when: ansible_os_family == 'Debian' + changed_when: false + + - name: Remove the .dockerenv file so GitLab Omnibus doesn't get confused. + file: + path: /.dockerenv + state: absent + + roles: + - role: geerlingguy.gitlab diff --git a/roles/geerlingguy.gitlab/molecule/default/molecule.yml b/roles/geerlingguy.gitlab/molecule/default/molecule.yml new file mode 100644 index 0000000..2da47dd --- /dev/null +++ b/roles/geerlingguy.gitlab/molecule/default/molecule.yml @@ -0,0 +1,21 @@ +--- +dependency: + name: galaxy +driver: + name: docker +lint: | + set -e + yamllint . + ansible-lint +platforms: + - name: instance + image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: true +provisioner: + name: ansible + playbooks: + converge: ${MOLECULE_PLAYBOOK:-converge.yml} diff --git a/roles/geerlingguy.gitlab/molecule/default/playbook-version.yml b/roles/geerlingguy.gitlab/molecule/default/playbook-version.yml new file mode 100644 index 0000000..f7060c9 --- /dev/null +++ b/roles/geerlingguy.gitlab/molecule/default/playbook-version.yml @@ -0,0 +1,31 @@ +--- +- name: Converge + hosts: all + become: true + + vars: + gitlab_restart_handler_failed_when: false + + pre_tasks: + - name: Update apt cache. + apt: update_cache=true cache_valid_time=600 + when: ansible_os_family == 'Debian' + changed_when: false + + - name: Remove the .dockerenv file so GitLab Omnibus doesn't get confused. + file: + path: /.dockerenv + state: absent + + - name: Set the test GitLab version number for Debian. + set_fact: + gitlab_version: '11.4.0-ce.0' + when: ansible_os_family == 'Debian' + + - name: Set the test GitLab version number for RedHat. + set_fact: + gitlab_version: '11.4.0-ce.0.el7' + when: ansible_os_family == 'RedHat' + + roles: + - role: geerlingguy.gitlab diff --git a/roles/geerlingguy.gitlab/tasks/main.yml b/roles/geerlingguy.gitlab/tasks/main.yml new file mode 100644 index 0000000..b978c93 --- /dev/null +++ b/roles/geerlingguy.gitlab/tasks/main.yml @@ -0,0 +1,81 @@ +--- +- name: Include OS-specific variables. + include_vars: "{{ ansible_os_family }}.yml" + +- name: Check if GitLab configuration file already exists. + stat: path=/etc/gitlab/gitlab.rb + register: gitlab_config_file + +- name: Check if GitLab is already installed. + stat: path=/usr/bin/gitlab-ctl + register: gitlab_file + +# Install GitLab and its dependencies. +- name: Install GitLab dependencies. + package: + name: "{{ gitlab_dependencies }}" + state: present + +- name: Install GitLab dependencies (Debian). + apt: + name: gnupg2 + state: present + when: ansible_os_family == 'Debian' + +- name: Download GitLab repository installation script. + get_url: + url: "{{ gitlab_repository_installation_script_url }}" + dest: /tmp/gitlab_install_repository.sh + validate_certs: "{{ gitlab_download_validate_certs }}" + when: not gitlab_file.stat.exists + +- name: Install GitLab repository. + command: bash /tmp/gitlab_install_repository.sh + register: output + when: not gitlab_file.stat.exists + +- name: Define the Gitlab package name. + set_fact: + gitlab_package_name: "{{ gitlab_edition }}{{ gitlab_package_version_separator }}{{ gitlab_version }}" + when: gitlab_version | default(false) + +- name: Install GitLab + package: + name: "{{ gitlab_package_name | default(gitlab_edition) }}" + state: present + async: 300 + poll: 5 + when: not gitlab_file.stat.exists + +# Start and configure GitLab. Sometimes the first run fails, but after that, +# restarts fix problems, so ignore failures on this run. +- name: Reconfigure GitLab (first run). + command: > + gitlab-ctl reconfigure + creates=/var/opt/gitlab/bootstrapped + failed_when: false + +- name: Create GitLab SSL configuration folder. + file: + path: /etc/gitlab/ssl + state: directory + owner: root + group: root + mode: 0700 + when: gitlab_create_self_signed_cert + +- name: Create self-signed certificate. + command: > + openssl req -new -nodes -x509 -subj "{{ gitlab_self_signed_cert_subj }}" + -days 3650 -keyout {{ gitlab_ssl_certificate_key }} -out {{ gitlab_ssl_certificate }} -extensions v3_ca + creates={{ gitlab_ssl_certificate }} + when: gitlab_create_self_signed_cert + +- name: Copy GitLab configuration file. + template: + src: "{{ gitlab_config_template }}" + dest: /etc/gitlab/gitlab.rb + owner: root + group: root + mode: 0600 + notify: restart gitlab diff --git a/roles/geerlingguy.gitlab/templates/gitlab.rb.j2 b/roles/geerlingguy.gitlab/templates/gitlab.rb.j2 new file mode 100644 index 0000000..80088ea --- /dev/null +++ b/roles/geerlingguy.gitlab/templates/gitlab.rb.j2 @@ -0,0 +1,108 @@ +# The URL through which GitLab will be accessed. +external_url "{{ gitlab_external_url }}" + +# gitlab.yml configuration +gitlab_rails['time_zone'] = "{{ gitlab_time_zone }}" +gitlab_rails['backup_keep_time'] = {{ gitlab_backup_keep_time }} +gitlab_rails['gitlab_email_enabled'] = {{ gitlab_email_enabled }} +{% if gitlab_email_enabled == "true" %} +gitlab_rails['gitlab_email_from'] = "{{ gitlab_email_from }}" +gitlab_rails['gitlab_email_display_name'] = "{{ gitlab_email_display_name }}" +gitlab_rails['gitlab_email_reply_to'] = "{{ gitlab_email_reply_to }}" +{% endif %} + +# Default Theme +gitlab_rails['gitlab_default_theme'] = "{{ gitlab_default_theme }}" + +# Whether to redirect http to https. +nginx['redirect_http_to_https'] = {{ gitlab_redirect_http_to_https }} +nginx['ssl_certificate'] = "{{ gitlab_ssl_certificate }}" +nginx['ssl_certificate_key'] = "{{ gitlab_ssl_certificate_key }}" + +# The directory where Git repositories will be stored. +git_data_dirs({"default" => {"path" => "{{ gitlab_git_data_dir }}"} }) + +# The directory where Gitlab backups will be stored +gitlab_rails['backup_path'] = "{{ gitlab_backup_path }}" + +# These settings are documented in more detail at +# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/gitlab.yml.example#L118 +gitlab_rails['ldap_enabled'] = {{ gitlab_ldap_enabled }} +{% if gitlab_ldap_enabled == "true" %} +gitlab_rails['ldap_host'] = '{{ gitlab_ldap_host }}' +gitlab_rails['ldap_port'] = {{ gitlab_ldap_port }} +gitlab_rails['ldap_uid'] = '{{ gitlab_ldap_uid }}' +gitlab_rails['ldap_method'] = '{{ gitlab_ldap_method}}' # 'ssl' or 'plain' +gitlab_rails['ldap_bind_dn'] = '{{ gitlab_ldap_bind_dn }}' +gitlab_rails['ldap_password'] = '{{ gitlab_ldap_password }}' +gitlab_rails['ldap_allow_username_or_email_login'] = true +gitlab_rails['ldap_base'] = '{{ gitlab_ldap_base }}' +{% endif %} + +# GitLab Nginx +## See https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/nginx.md +{% if gitlab_nginx_listen_port is defined %} +nginx['listen_port'] = "{{ gitlab_nginx_listen_port }}" +{% endif %} +{% if gitlab_nginx_listen_https is defined %} +nginx['listen_https'] = {{ gitlab_nginx_listen_https }} +{% endif %} + +# Use smtp instead of sendmail/postfix +# More details and example configuration at +# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/smtp.md +gitlab_rails['smtp_enable'] = {{ gitlab_smtp_enable }} +{% if gitlab_smtp_enable == "true" %} +gitlab_rails['smtp_address'] = '{{ gitlab_smtp_address }}' +gitlab_rails['smtp_port'] = {{ gitlab_smtp_port }} +{% if gitlab_smtp_user_name %} +gitlab_rails['smtp_user_name'] = '{{ gitlab_smtp_user_name }}' +{% endif %} +{% if gitlab_smtp_password %} +gitlab_rails['smtp_password'] = '{{ gitlab_smtp_password }}' +{% endif %} +gitlab_rails['smtp_domain'] = '{{ gitlab_smtp_domain }}' +{% if gitlab_smtp_authentication %} +gitlab_rails['smtp_authentication'] = '{{ gitlab_smtp_authentication }}' +{% endif %} +gitlab_rails['smtp_enable_starttls_auto'] = {{ gitlab_smtp_enable_starttls_auto }} +gitlab_rails['smtp_tls'] = {{ gitlab_smtp_tls }} +gitlab_rails['smtp_openssl_verify_mode'] = '{{ gitlab_smtp_openssl_verify_mode }}' +gitlab_rails['smtp_ca_path'] = '{{ gitlab_smtp_ca_path }}' +gitlab_rails['smtp_ca_file'] = '{{ gitlab_smtp_ca_file }}' +{% endif %} + +# 2-way SSL Client Authentication. +{% if gitlab_nginx_ssl_verify_client %} +nginx['ssl_verify_client'] = "{{ gitlab_nginx_ssl_verify_client }}" +{% endif %} +{% if gitlab_nginx_ssl_client_certificate %} +nginx['ssl_client_certificate'] = "{{ gitlab_nginx_ssl_client_certificate }}" +{% endif %} + +# GitLab registry. +registry['enable'] = {{ gitlab_registry_enable }} +{% if gitlab_registry_enable %} +registry_external_url "{{ gitlab_registry_external_url }}" +registry_nginx['ssl_certificate'] = "{{ gitlab_registry_nginx_ssl_certificate }}" +registry_nginx['ssl_certificate_key'] = "{{ gitlab_registry_nginx_ssl_certificate_key }}" +{% endif %} + +{% if gitlab_extra_settings is defined %} +# Extra configuration +{% for extra in gitlab_extra_settings %} +{% for setting in extra %} +{% for kv in extra[setting] %} +{% if (kv.type is defined and kv.type == 'plain') or (kv.value is not string) %} +{{ setting }}['{{ kv.key }}'] = {{ kv.value }} +{% else %} +{{ setting }}['{{ kv.key }}'] = '{{ kv.value }}' +{% endif %} +{% endfor %} +{% endfor %} + +{% endfor %} +{% endif %} + +# To change other settings, see: +# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/README.md#changing-gitlab-yml-settings diff --git a/roles/geerlingguy.gitlab/vars/Debian.yml b/roles/geerlingguy.gitlab/vars/Debian.yml new file mode 100644 index 0000000..5da8774 --- /dev/null +++ b/roles/geerlingguy.gitlab/vars/Debian.yml @@ -0,0 +1,3 @@ +--- +gitlab_package_version_separator: '=' +gitlab_repository_installation_script_url: "https://packages.gitlab.com/install/repositories/gitlab/{{ gitlab_edition }}/script.deb.sh" diff --git a/roles/geerlingguy.gitlab/vars/RedHat.yml b/roles/geerlingguy.gitlab/vars/RedHat.yml new file mode 100644 index 0000000..e4c0e94 --- /dev/null +++ b/roles/geerlingguy.gitlab/vars/RedHat.yml @@ -0,0 +1,3 @@ +--- +gitlab_package_version_separator: '-' +gitlab_repository_installation_script_url: "https://packages.gitlab.com/install/repositories/gitlab/{{ gitlab_edition }}/script.rpm.sh" diff --git a/roles/geerlingguy.java/.github/FUNDING.yml b/roles/geerlingguy.java/.github/FUNDING.yml new file mode 100644 index 0000000..96b4938 --- /dev/null +++ b/roles/geerlingguy.java/.github/FUNDING.yml @@ -0,0 +1,4 @@ +# These are supported funding model platforms +--- +github: geerlingguy +patreon: geerlingguy diff --git a/roles/geerlingguy.java/.github/stale.yml b/roles/geerlingguy.java/.github/stale.yml new file mode 100644 index 0000000..c7ff127 --- /dev/null +++ b/roles/geerlingguy.java/.github/stale.yml @@ -0,0 +1,56 @@ +# Configuration for probot-stale - https://github.com/probot/stale + +# Number of days of inactivity before an Issue or Pull Request becomes stale +daysUntilStale: 90 + +# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. +# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. +daysUntilClose: 30 + +# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) +onlyLabels: [] + +# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable +exemptLabels: + - pinned + - security + - planned + +# Set to true to ignore issues in a project (defaults to false) +exemptProjects: false + +# Set to true to ignore issues in a milestone (defaults to false) +exemptMilestones: false + +# Set to true to ignore issues with an assignee (defaults to false) +exemptAssignees: false + +# Label to use when marking as stale +staleLabel: stale + +# Limit the number of actions per hour, from 1-30. Default is 30 +limitPerRun: 30 + +pulls: + markComment: |- + This pull request has been marked 'stale' due to lack of recent activity. If there is no further activity, the PR will be closed in another 30 days. Thank you for your contribution! + + Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark pull requests as stale. + + unmarkComment: >- + This pull request is no longer marked for closure. + + closeComment: >- + This pull request has been closed due to inactivity. If you feel this is in error, please reopen the pull request or file a new PR with the relevant details. + +issues: + markComment: |- + This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution! + + Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale. + + unmarkComment: >- + This issue is no longer marked for closure. + + closeComment: >- + This issue has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details. diff --git a/roles/geerlingguy.java/.yamllint b/roles/geerlingguy.java/.yamllint new file mode 100644 index 0000000..a3dbc38 --- /dev/null +++ b/roles/geerlingguy.java/.yamllint @@ -0,0 +1,6 @@ +--- +extends: default +rules: + line-length: + max: 120 + level: warning diff --git a/roles/geerlingguy.java/molecule/default/converge.yml b/roles/geerlingguy.java/molecule/default/converge.yml new file mode 100644 index 0000000..c99558d --- /dev/null +++ b/roles/geerlingguy.java/molecule/default/converge.yml @@ -0,0 +1,13 @@ +--- +- name: Converge + hosts: all + become: true + + pre_tasks: + - name: Update apt cache. + apt: update_cache=true cache_valid_time=600 + when: ansible_os_family == 'Debian' + changed_when: false + + roles: + - role: geerlingguy.java diff --git a/roles/geerlingguy.java/vars/Ubuntu-20.yml b/roles/geerlingguy.java/vars/Ubuntu-20.yml new file mode 100644 index 0000000..bd058c2 --- /dev/null +++ b/roles/geerlingguy.java/vars/Ubuntu-20.yml @@ -0,0 +1,6 @@ +--- +# JDK version options include: +# - java +# - openjdk-11-jdk +__java_packages: + - openjdk-11-jdk diff --git a/roles/lightbulb-ansiblered-deck/README.md b/roles/lightbulb-ansiblered-deck/README.md new file mode 100644 index 0000000..d5f0dc7 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/README.md @@ -0,0 +1,88 @@ +[![GoKEV](http://GoKEV.com/GoKEV200.png)](http://GoKEV.com/) + +
+ +# lightbulb-ansiblered-deck + +This project is the "Ansible Red" deck HTML content. Optionally, a daemon will be started up if you don't exclude tag "phpdaemon" + + +## Example Playbooks +Here's an example of how you could launch this role and deploy the PHP daemon to start on port `php_port` and also place the php redirect in the main `{{ workshop_web_path }}`` directory: +
+## both of these example tags default to "never" and will not execute
+## unless you explicitly call them at launch time. Therefore, the default
+## nature of this role will ONLY synch content and not start the PHP web
+## service or place the HTML redirect in web root unless run with these tags:
+
+ansible-playbook -i ec2.hosts GoKEV-lab-provision.yml --tags "phpdaemon,phpredirect"
+
+ +Here's an example of how you could launch this role and and not start the PHP daemon (only synch the content). +
+ansible-playbook -i ec2.hosts GoKEV-lab-provision.yml 
+
+ + +## Here's an example of the playbook + +
+---
+- name: Deploy the Ansible Red deck and run it as a daemon
+  hosts: myserver
+
+  vars:
+    workshop_web_path: /ansible-php-content/
+    workshop_image: images/ansible-logo.png
+    workshop_name: Ansible Essentials Workshop
+    workshop_presenter: Demo McDemoson
+    workshop_title: Solution Architect, Red Hat
+    workshop_message: my email and contact info
+    php_port: 8000
+
+  roles:
+    - lightbulb-ansiblered-deck
+
+
+ + +## Stuff still needing to be done +* Inside `index.php` there are variables for the github star and download counts... probably should be converted to vars in defaults or `extra_vars` params +* Certain presenters have requested dynamic ways to exclude certain sections (exclude entire dir `010_topic_that_bores_my_audience`) +* The HTML ID tags can be manually (accidentally) set to the same name. This has commonly bitten me when duplicating a slide as a starting point and then forgetting to change the ID. At some point, these should be dynamically generated per slide. Two slides with the same ID cause an issue where advancing forward / backward can navigate you all the way back to the first instance of the slide and really make an awkward presentation. + * `
` or something similar would make a more unique and less likely duplicated tag. + + +## Easter Eggs +* Not implemented, but capable: Each directory in `html_slides` can have a `labs` directory. Slides in this dir will automatically be presented as LABS at the end of each section (numbered directory) and presented with a gray intro slide when the deck advances past the topic section. + * example: `html_slides/123_some_topic/labs/00_lab1.html` + +* Troubleshooting: See what files are being included by running a dry run: + * `http://ansible.red/deck-ansible/?dryrun` + +* Changing other dynamic aspects of the content via URL: + * `person=shadd` (if that person has a preferences file, context will switch to it. This parallels and overrides the variable determined by a FQDN of `shadd.ansible.red` ) + * `labs` :: `http://ansible.red/deck-ansible/?labs` (no value is required - simply passing this empty variable forces labs-only display mode and will not show the deck + * `nolabs` :: `http://ansible.red/deck-ansible/?nolabs` (no value is required - opposite of `labs`, this variable forces deck-only display mode and will not show the labs + * without `labs` or `nolabs` the default behavior is to show labs at the end of each section. + * `force` :: `http://ansible.red/deck-ansible/?force` (no value is required). This can be used on its own or in combination with labs, nolabs, person as: `?person=shadd&nolabs&force`. This parameter shows the status on the HTML output to display the mode. Output is something like: "LAB LIMIT 2 = No Labs, only deck" on the very top white line of the deck throughout the entire presentation. + + +## Notes +* index.php includes a lot of stuff as dynamic files from the html_slides directory +* Any file or dir inside `html_slides` can be excluded by starting it with an underscore + * example: `html_slides/_000_skipping_this_section` + * example: `html_slides/001_not_kipping_this_section/_skipping_this_slide.html` + +* Lab slides can also include some variables defined by parsing the directory names. Take a look at this file for a better understanding and see how the `` php tags are used: + * `html_slides/080_Tasks/_labs/00_Tasks_Labs.html` + * all labs (at this point) are committed with underscore prepending the directory name and won't publish until that dir is changed to `labs`. + + +## Author + - Adapted by [Kevin Holmes](http://GoKEV.com/) from the original lightbulb workshop deck, split into dynamic individual slides + +## Changelog + - 2018-11-01 This project was first committed November 1, 2018 by [Kevin Holmes](http://GoKEV.com/). + - 2018-11-02 Added an `index.php` file to deploy to web root, redirecting to `/deck-ansible` directory when called via tag `phpredirect` + diff --git a/roles/lightbulb-ansiblered-deck/defaults/main.yml b/roles/lightbulb-ansiblered-deck/defaults/main.yml new file mode 100644 index 0000000..7448e8f --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/defaults/main.yml @@ -0,0 +1,11 @@ +--- +# defaults file for ansiblered-deck-ansible + +workshop_web_path: /ansible-php-content/ +workshop_image: images/ansible-logo.png +workshop_name: Ansible Essentials Workshop +workshop_presenter: ' ' +workshop_title: ' ' +workshop_message: ' ' + +php_port: 8000 diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/.rsync-filter b/roles/lightbulb-ansiblered-deck/files/deck-ansible/.rsync-filter new file mode 100644 index 0000000..863cda3 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/.rsync-filter @@ -0,0 +1 @@ +- prefs/default.prefs.php diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/ansible-essentials.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/ansible-essentials.html new file mode 100644 index 0000000..d2610bc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/ansible-essentials.html @@ -0,0 +1,1139 @@ + + + + + + + Ansible Essentials Workshop + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+
+ +

Ansible Essentials Workshop

+ +
+
+

What You Will Learn

+

Ansible is capable of handling many powerful automation tasks with the flexibility to adapt to many environments and workflows. With Ansible, users can very quickly get up and running to do real work.

+
    +
  • What is Ansible and The Ansible Way
  • +
  • Installing Ansible
  • +
  • How Ansible Works and its Key Components
  • +
  • Ad-Hoc Commands
  • +
  • Playbook Basics
  • +
  • Reuse and Redistribution of Ansible Content with Roles
  • +
+ +
+
+

What is Ansible?

+

It's a simple automation language that can perfectly describe an IT application infrastructure in Ansible Playbooks.

+

It's an automation engine that runs Ansible Playbooks.

+

Ansible Tower is an enterprise framework for controlling, securing and managing your Ansible automation with a UI and RESTful API.

+ +
+
+

Ansible Is...

+ +
+
+

The Ansible Way

+

+

CROSS PLATFORM – Linux, Windows, UNIX
+ Agentless support for all major OS variants, physical, virtual, cloud and network

+

HUMAN READABLE – YAML
+ Perfectly describe and document every aspect of your application environment

+

PERFECT DESCRIPTION OF APPLICATION
+ Every change can be made by playbooks, ensuring everyone is on the same page

+

VERSION CONTROLLED
+ Playbooks are plain-text. Treat them like code in your existing version control.

+

DYNAMIC INVENTORIES
+ Capture all the servers 100% of the time, regardless of infrastructure, location, etc.

+

ORCHESTRATION THAT PLAYS WELL WITH OTHERS – HP SA, Puppet, Jenkins, RHNSS, etc. Homogenize existing environments by leveraging current toolsets and update mechanisms.

+
+ +
+
+

Ansible: The Language of DevOps

+
+ +

COMMUNICATION IS THE KEY TO DEVOPS.

+

Ansible is the first automation language
that can be read and written across IT.

+

Ansible is the only automation engine
that can automate the entire application lifecycle
and continuous delivery pipeline.

+
+ +
+
+

Batteries Included

+

Ansible comes bundled with hundreds of modules for a wide variety of automation tasks

+
+
+
    +
  • cloud
  • +
  • containers
  • +
  • database
  • +
  • files
  • +
  • messaging
  • +
  • monitoring
  • +
  • network
  • +
+
+
+
    +
  • notifications
  • +
  • packaging
  • +
  • source control
  • +
  • system
  • +
  • testing
  • +
  • utilities
  • +
  • web infrastructure
  • +
+
+
+
+
+ +
+
+

Community

+
+

THE MOST POPULAR OPEN-SOURCE AUTOMATION COMMUNITY ON GITHUB

+
+
+
    +
  • 28,000+ stars & 10,000+ forks on GitHub
  • +
  • 3200+ GitHub Contributors
  • +
  • Over 1300 modules shipped with Ansible
  • +
  • New contributors added every day
  • +
  • 1200+ users on IRC channel
  • +
  • Top 10 open source projects in 2017
  • +
  • World-wide meetups taking place every week
  • +
  • Ansible Galaxy: over 18,000 subscribers
  • +
  • 250,000+ downloads a month
  • +
  • AnsibleFest and Ansible Automates events across the globe
  • +
+

http://ansible.com/community

+
+
+ +
+
+
+ +
+
+

Complete Automation

+ + +
+
+

Use Cases

+ + +
+
+

Installing Ansible

+

+# the most common and preferred way of
+# installation
+$ sudo pip install ansible
+
+# you will need the extras repo configured on
+# CentOS, RHEL, or Scientific Linux
+$ sudo yum install ansible
+
+# you will need the PPA repo configured on
+# Debian or Ubuntu
+$ sudo apt-get install ansible
+          
+ +
+
+
+

Demo Time:
Installing Ansible

+ +
+
+

Workshop:
Installing Ansible

+ +
+
+
+

How Ansible Works

+ + +
+
+

Plays & Playbooks

+ + +
+
+

Modules & Tasks

+ + +
+
+

Plugins

+ + +
+
+

Inventory

+ + +
+
+

Inventory

+ + +
+
+

Modules

+

Modules are bits of code transferred to the target system and executed to satisfy the task declaration.

+
+
+
    +
  • apt/yum
  • +
  • copy
  • +
  • file
  • +
  • get_url
  • +
  • git
  • +
  • ping
  • +
  • debug
  • +
+
+
+
    +
  • service
  • +
  • synchronize
  • +
  • template
  • +
  • uri
  • +
  • user
  • +
  • wait_for
  • +
  • assert
  • +
+
+
+
+
+ +
+
+

Modules Documentation

+
+
+

http://docs.ansible.com/

+
+
+ +
+
+ +
+
+

Modules Documentation

+

+# List out all modules installed
+$ ansible-doc -l
+...
+copy
+cron
+...
+
+# Read documentation for installed module
+$ ansible-doc copy
+> COPY
+
+  The [copy] module copies a file on the local box to remote locations. Use the [fetch] module to copy files from remote locations to the local
+  box. If you need variable interpolation in copied files, use the [template] module.
+
+  * note: This module has a corresponding action plugin.
+
+Options (= is mandatory):
+...
+          
+
+ +
+

Modules: Run Commands

+

If Ansible doesn't have a module that suits your needs there are the “run command” modules:


+
    +
  • command: Takes the command and executes it on the host. The most secure and predictable.
  • +
  • shell: Executes through a shell like /bin/sh so you can use pipes etc. Be careful.
  • +
  • script: Runs a local script on a remote node after transferring it.
  • +
  • raw: Executes a command without going through the Ansible module subsystem.
  • +
+


NOTE: Unlike standard modules, run commands have no concept of desired state and should only be used as a last resort.

+ +
+
+

Inventory

+

Inventory is a collection of hosts (nodes) with associated data and groupings that Ansible can connect and manage.

+
    +
  • Hosts (nodes)
  • +
  • Groups
  • +
  • Inventory-specific data (variables)
  • +
  • Static or dynamic sources
  • +
+ +
+
+

Static Inventory Example

+

+10.42.0.2
+10.42.0.6
+10.42.0.7
+10.42.0.8
+10.42.0.100
+host.example.com
+          
+ +
+
+

Static Inventory Example

+

+[control]
+control ansible_host=10.42.0.2
+
+[web]
+node-[1:3] ansible_host=10.42.0.[6:8]
+
+[haproxy]
+haproxy ansible_host=10.42.0.100
+
+[all:vars]
+ansible_user=vagrant
+ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key
+          
+ +
+
+
+

Ad-Hoc Commands

+

An ad-hoc command is a single Ansible task to perform quickly, but don’t want to save for later.

+ +
+
+

Ad-Hoc Commands: Common Options

+
    +
  • -m MODULE_NAME, --module-name=MODULE_NAME
    Module name to execute the ad-hoc command
  • +
  • -a MODULE_ARGS, --args=MODULE_ARGS
    Module arguments for the ad-hoc command
  • +
  • -b, --become
    Run ad-hoc command with elevated rights such as sudo, the default method
  • +
  • -e EXTRA_VARS, --extra-vars=EXTRA_VARS
    Set additional variables as key=value or YAML/JSON
  • +
  • --version
    Display the version of Ansible
  • +
  • --help
    Display the MAN page for the Ansible tool
  • +
+ +
+
+
+

Ad-Hoc Commands

+

+# check all my inventory hosts are ready to be
+# managed by Ansible
+$ ansible all -m ping
+
+# collect and display the discovered facts
+# for the localhost
+$ ansible localhost -m setup
+
+# run the uptime command on all hosts in the
+# web group
+$ ansible web -m command -a "uptime"
+          
+ +
+
+

Sidebar: Discovered Facts

+

Facts are bits of information derived from examining a host systems that are stored as variables for later use in a play.

+

+$ ansible localhost -m setup
+localhost | success >> {
+  "ansible_facts": {
+      "ansible_default_ipv4": {
+          "address": "192.168.1.37",
+          "alias": "wlan0",
+          "gateway": "192.168.1.1",
+          "interface": "wlan0",
+          "macaddress": "c4:85:08:3b:a9:16",
+          "mtu": 1500,
+          "netmask": "255.255.255.0",
+          "network": "192.168.1.0",
+          "type": "ether"
+      },
+          
+ +
+
+
+

Demo Time:
Ad-Hoc Commands

+ +
+
+

Workshop:
Ad-Hoc Commands

+ +
+
+
+

Variables

+

Ansible can work with metadata from various sources and manage their context in the form of variables.

+
    +
  • Command line parameters
  • +
  • Plays and tasks
  • +
  • Files
  • +
  • Inventory
  • +
  • Discovered facts
  • +
  • Roles
  • +
+
+ +
+

Variable Precedence

+

The order in which the same variable from different sources will override each other.

+
+
+
    +
  1. extra vars
  2. +
  3. task vars (only for the task)
  4. +
  5. block vars (only for tasks in block)
  6. +
  7. role and include vars
  8. +
  9. play vars_files
  10. +
  11. play vars_prompt
  12. +
  13. play vars
  14. +
  15. set_facts
  16. +
+
+
+
    +
  1. registered vars
  2. +
  3. host facts
  4. +
  5. playbook host_vars
  6. +
  7. playbook group_vars
  8. +
  9. inventory host_vars
  10. +
  11. inventory group_vars
  12. +
  13. inventory vars
  14. +
  15. role defaults
  16. +
+
+
+ +
+
+

Tasks

+

Tasks are the application of a module to perform a specific unit of work.

+
    +
  • file: A directory should exist
  • +
  • yum: A package should be installed
  • +
  • service: A service should be running
  • +
  • template: Render a configuration file from a template
  • +
  • get_url: Fetch an archive file from a URL
  • +
  • git: Clone a source code repository
  • +
+
+
+

Example Tasks in a Play

+

+tasks:
+- name: Ensure httpd package is present
+  yum:
+    name: httpd
+    state: latest
+
+- name: Ensure latest index.html file is present
+  copy:
+    src: files/index.html
+    dest: /var/www/html/
+
+- name: Restart httpd
+  service:
+    name: httpd
+    state: restarted
+          
+ +
+
+

Handler Tasks

+

Handlers are special tasks that run at the end of a play if notified by another task when a change occurs.

+
If a package gets installed or updated, notify a service restart task that it needs to run.
+ +
+
+

Example Handler Task in a Play

+

+tasks:
+- name: Ensure httpd package is present
+  yum:
+    name: httpd
+    state: latest
+  notify: restart httpd
+
+- name: Ensure latest index.html file is present
+  copy:
+    src: files/index.html
+    dest: /var/www/html/
+
+handlers:
+- name: Restart httpd
+  service:
+    name: httpd
+    state: restarted
+          
+ +
+
+

Plays & Playbooks

+

Plays are ordered sets of tasks to execute against host selections from your inventory. A playbook is a file containing one or more plays.

+ +
+
+

Playbook Example

+

+---
+- name: Ensure apache is installed and started
+  hosts: web
+  become: yes
+  vars:
+    http_port: 80
+
+  tasks:
+  - name: Ensure httpd package is present
+    yum:
+      name: httpd
+      state: latest
+
+  - name: Ensure latest index.html file is present
+    copy:
+      src: files/index.html
+      dest: /var/www/html/
+
+  - name: Ensure httpd is started
+    service:
+      name: httpd
+      state: started
+          
+ +
+ +
+

Human-Meaningful Naming

+

+ ---
+ - name: install and start apache
+   hosts: web
+   become: yes
+   vars:
+     http_port: 80
+
+   tasks:
+   - name: httpd package is present
+     yum:
+       name: httpd
+       state: latest
+
+   - name: latest index.html file is present
+     copy:
+       src: files/index.html
+       dest: /var/www/html/
+
+   - name: httpd is started
+     service:
+        name: httpd
+        state: started
+          
+ +
+ +
+

Host Selector

+

+---
+- name: Ensure apache is installed and started
+  hosts: web
+  become: yes
+  vars:
+    http_port: 80
+
+  tasks:
+  - name: Ensure httpd package is present
+    yum:
+      name: httpd
+      state: latest
+
+  - name: Ensure latest index.html file is present
+    copy:
+      src: files/index.html
+      dest: /var/www/html/
+
+  - name: Ensure httpd is started
+    service:
+      name: httpd
+      state: started
+          
+ +
+
+

Privilege Escalation

+

+---
+- name: Ensure apache is installed and started
+  hosts: web
+  become: yes
+  vars:
+    http_port: 80
+
+  tasks:
+  - name: Ensure httpd package is present
+    yum:
+      name: httpd
+      state: latest
+
+  - name: Ensure latest index.html file is present
+    copy:
+      src: files/index.html
+      dest: /var/www/html/
+
+  - name: Ensure httpd is started
+    service:
+      name: httpd
+      state: started
+          
+ +
+
+

Play Variables

+

+---
+- name: Ensure apache is installed and started
+  hosts: web
+  become: yes
+  vars:
+    http_port: 80
+
+  tasks:
+  - name: Ensure httpd package is present
+    yum:
+      name: httpd
+      state: latest
+
+  - name: Ensure latest index.html file is present
+    copy:
+      src: files/index.html
+      dest: /var/www/html/
+
+  - name: Ensure httpd is started
+    service:
+      name: httpd
+      state: started
+          
+ +
+
+

Tasks

+

+---
+- name: Ensure apache is installed and started
+  hosts: web
+  become: yes
+  vars:
+    http_port: 80
+
+  tasks:
+  - name: Ensure latest httpd package is present
+    yum:
+      name: httpd
+      state: latest
+
+  - name: Ensure latest index.html file is present
+    copy:
+      src: files/index.html
+      dest: /var/www/html/
+
+  - name: Ensure httpd is started
+    service:
+      name: httpd
+      state: started
+          
+ +
+
+
+

Demo Time:
A Simple Playbook Run

+ +
+
+

Workshop:
Your First Playbook

+ +
+
+
+
+

Doing More with Playbooks

+

Here are some more essential playbook features that you can apply:

+
    +
  • Templates
  • +
  • Loops
  • +
  • Conditionals
  • +
  • Tags
  • +
  • Blocks
  • +
+ +
+
+

Templates

+

Ansible embeds the Jinja2 templating engine that can be used to dynamically:

+
    +
  • Set and modify play variables
  • +
  • Conditional logic
  • +
  • Generate files such as configurations from variables
  • +
+ +
+
+

Loops

+

Loops can do one task on multiple things, such as create a lot of users, install a lot of packages, or repeat a polling step until a certain result is reached.

+

+- yum:
+    name: "{{ item }}"
+    state: latest
+  with_items:
+  - httpd
+  - mod_wsgi
+            
+ +
+
+

Conditionals

+

Ansible supports the conditional execution of a task based on the run-time evaluation of variable, fact, or previous task result.

+

+- yum:
+    name: httpd
+    state: latest
+  when: ansible_os_family == "RedHat"
+            
+ +
+
+

Tags

+

Tags are useful to be able to run a subset of a playbook on-demand.

+

+- yum:
+    name: "{{ item }}"
+    state: latest
+  with_items:
+  - httpd
+  - mod_wsgi
+  tags:
+     - packages
+
+ - template:
+     src: templates/httpd.conf.j2
+     dest: /etc/httpd/conf/httpd.conf
+  tags:
+     - configuration
+            
+ +
+
+

Blocks

+

Blocks cut down on repetitive task directives, allow for logical grouping of tasks and even in play error handling.

+

+- block:
+  - yum:
+      name: "{{ item }}"
+      state: latest
+    with_items:
+    - httpd
+    - mod_wsgi
+
+  - template:
+      src: templates/httpd.conf.j2
+      dest: /etc/httpd/conf/httpd.conf
+  when: ansible_os_family == "RedHat"
+            
+
+
+
+
+

Demo Time:
A More Practical Playbook

+ +
+
+

Workshop:
Practical Playbook Development

+ +
+
+
+

Roles

+

Roles are a packages of closely related Ansible content that can be shared more easily than plays alone.

+
    +
  • Improves readability and maintainability of complex plays
  • +
  • Eases sharing, reuse and standardization of automation processes
  • +
  • Enables Ansible content to exist independently of playbooks, projects -- even organizations
  • +
  • Provides functional conveniences such as file path resolution and default values
  • +
+ +
+
+

Project with Embedded Roles Example

+

+site.yml
+roles/
+   common/
+     files/
+     templates/
+     tasks/
+     handlers/
+     vars/
+     defaults/
+     meta/
+   apache/
+     files/
+     templates/
+     tasks/
+     handlers/
+     vars/
+     defaults/
+     meta/
+          
+ +
+
+

Project with Embedded Roles Example

+

+# site.yml
+---
+- name: Execute common and apache role
+  hosts: web
+  roles:
+     - common
+     - apache
+          
+ +
+
+

Ansible Galaxy

+

http://galaxy.ansible.com

+

Ansible Galaxy is a hub for finding, reusing and sharing Ansible content.

+

Jump-start your automation project with content contributed and reviewed by the Ansible community.

+ +
+
+
+

Demo Time:
A Playbook Using Roles

+ +
+
+

Workshop:
Your First Roles

+ +
+
+
+

Next Steps

+
    +
  • It's easy to get started
    ansible.com/get-started
  • +
  • Join the Ansible community
    ansible.com/community
  • +
  • Would you like to learn a lot more?
    redhat.com/en/services/training/do407-automation-ansible
  • +
+
+
+
+ + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/GoKEV.css b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/GoKEV.css new file mode 100644 index 0000000..5126190 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/GoKEV.css @@ -0,0 +1,48 @@ +/** + * Some GoKEV-specific styling + */ + +div.transbox { + background-color: #000000; + opacity: 0.7; +/* + max-height: 500px; + max-width: 450; + */ + margin: 5%; + text-align: center; + filter: alpha(opacity=60); /* For IE8 and earlier */ + border: 2px solid white; + +} + +div.transbox p { + margin: 5%; + font-weight: bold; + color: #FFFFFF; + text-align: center; +} + +div.transbox h1 { + display: block; + font-size: 2.8em; + text-align: center; + margin-top: .5em; + margin-bottom: .3em; + margin-left: 0; + margin-right: 0; + font-weight: bold; + color: #FFFFFF; +} + +div.transbox h2 { + display: block; + font-size: 2.8em; + text-align: center; + margin-top: -1em; + margin-bottom: .3em; + margin-left: 0; + margin-right: 0; + font-weight: bold; + color: #FFFFFF; +} diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/ansible.css b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/ansible.css new file mode 100644 index 0000000..3cdee0b --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/ansible.css @@ -0,0 +1,600 @@ +/** + * An ANSIBLE theme for reveal.js presentations, similar + * to the simple theme. + */ +@import url(https://fonts.googleapis.com/css?family=News+Cycle:400,700); +@import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic); + +@import url('https://fonts.googleapis.com/css?family=Open+Sans:400,700'); + +/********************************************* + * GLOBAL STYLES + *********************************************/ + +*{ + outline: none; +} + +body { + background: #fff; + background-color: #fff; +} + +mark{ + background-color: yellow; + color: black; +} + +mark span.hljs-number{ + color: #286669; +} + +mark span.hljs-symbol, +mark span.hljs-string{ + color: #af2e2e; +} + +.reveal { + font-family: "Open Sans", sans-serif; + font-size: 28px; + font-weight: normal; + color: #555; + text-align: left; +} + +.reveal .slides{ + text-align: left; +} + +::selection { + color: #fff; + background: rgba(0, 0, 0, 0.99); + text-shadow: none; +} + +.reveal .slides > section, +.reveal .slides > section > section { + line-height: 1.3; + font-weight: inherit; +} + + +.text-center{ + text-align: center; +} + +.text-small{ + font-size: 70% !important; +} + +/********************************************* + * HEADERS + *********************************************/ +.reveal h1, +.reveal h2, +.reveal h3, +.reveal h4, +.reveal h5, +.reveal h6 { + margin: 0 0 20px 0; + color: #cc0000; + font-family: "Open Sans", Impact, sans-serif; + font-weight: bold; + letter-spacing: normal; + text-transform: none; + text-shadow: none; + margin-bottom: 0.6em; + word-wrap: break-word; } + +.reveal h1 { + font-size: 2em; +} + +.cover .reveal h1{ + margin: 1em 0; +} + +.reveal h2 { + font-size: 1.35em; } + +.reveal h3 { + font-size: 1.2em; + color: #555; +} + +.reveal h4 { + font-size: 1em; + color: #555; +} + +.reveal h1 { + text-shadow: none; } + +/********************************************* + * OTHER + *********************************************/ +.reveal p { + margin: 20px 0 0.5em; + line-height: 1.3; } + +/* Ensure certain elements are never larger than the slide itself */ +.reveal img, +.reveal video, +.reveal iframe { + max-width: 95%; + max-height: 95%; } + +.reveal strong, +.reveal b { + font-weight: bold; } + +.reveal em { + font-style: italic; } + +.reveal ol, +.reveal dl, +.reveal ul { + /*display: inline-block;*/ + text-align: left; + margin: 0 0 0 1em; } + +.reveal li{ + margin: 0 0 0.3em; +} + +.reveal ol { + list-style-type: decimal; } + +.reveal ul { + list-style-type: disc; } + +.reveal ul ul { + list-style-type: square; } + +.reveal ul ul ul { + list-style-type: circle; } + +.reveal ul ul, +.reveal ul ol, +.reveal ol ol, +.reveal ol ul { + display: block; + margin-left: 40px; } + +.reveal dt { + font-weight: bold; } + +.reveal dd { + margin-left: 40px; } + +.reveal q, +.reveal blockquote { + quotes: none; } + +.reveal blockquote { + display: block; + position: relative; + width: 70%; + margin: 20px auto; + padding: 5px; + font-style: italic; + background: rgba(255, 255, 255, 0.05); + /*box-shadow: 0px 0px 2px rgba(0, 0, 0, 0.2);*/ +} + +.reveal blockquote p:first-child, +.reveal blockquote p:last-child { + display: inline-block; } + +.reveal q { + font-style: italic; } + +.reveal pre, +.reveal pre[class*=language-] { + display: block; + position: relative; + margin: 10px auto; + text-align: left; + font-size: 0.55em; + font-family: monospace; + line-height: 1em; + word-wrap: break-word; + box-shadow: 0px 0px 6px rgba(0, 0, 0, 0.3); + padding: 0px; +} + +.reveal code { + font-family: monospace; } + +.reveal pre code { + display: block; + padding: 5px; + overflow: auto; + max-height: 55vh; + word-wrap: normal; +} + +.reveal table { + margin: auto; + border-collapse: collapse; + border-spacing: 0; } + +.reveal table th { + font-weight: bold; } + +.reveal table th, +.reveal table td { + text-align: left; + padding: 0.2em 0.5em 0.2em 0.5em; + border-bottom: 1px solid; } + +.reveal table th[align="center"], +.reveal table td[align="center"] { + text-align: center; } + +.reveal table th[align="right"], +.reveal table td[align="right"] { + text-align: right; } + +.reveal table tbody tr:last-child th, +.reveal table tbody tr:last-child td { + border-bottom: none; } + +.reveal sup { + vertical-align: super; } + +.reveal sub { + vertical-align: sub; } + +.reveal small { + display: inline-block; + font-size: 0.6em; + line-height: 1.2em; + vertical-align: top; } + +.reveal small * { + vertical-align: top; } + +/********************************************* + * LINKS + *********************************************/ +.reveal a { + color: #00008B; + text-decoration: none; + -webkit-transition: color .15s ease; + -moz-transition: color .15s ease; + transition: color .15s ease; } + +.reveal a:hover { + color: #0000f1; + text-shadow: none; + border: none; } + +.reveal .roll span:after { + color: #fff; + background: #00003f; } + +/********************************************* + * IMAGES + *********************************************/ +.reveal section img { + margin: 15px 0px; + background: rgba(255, 255, 255, 0.12); + max-height: 500px; +} + +.reveal section img.plain { + border: 0; + box-shadow: none; } + +.reveal a img { + -webkit-transition: all .15s linear; + -moz-transition: all .15s linear; + transition: all .15s linear; } + +.reveal a:hover img { + background: rgba(255, 255, 255, 0.2); + border-color: #00008B; + box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); } + +/********************************************* + * NAVIGATION CONTROLS + *********************************************/ + +.reveal .controls { + bottom: auto; + top: 10px; +} + + +.reveal .controls .navigate-left, +.reveal .controls .navigate-left.enabled { + border-right-color: #00008B; } + +.reveal .controls .navigate-right, +.reveal .controls .navigate-right.enabled { + border-left-color: #00008B; } + +.reveal .controls .navigate-up, +.reveal .controls .navigate-up.enabled { + border-bottom-color: #00008B; } + +.reveal .controls .navigate-down, +.reveal .controls .navigate-down.enabled { + border-top-color: #00008B; } + +.reveal .controls .navigate-left.enabled:hover { + border-right-color: #0000f1; } + +.reveal .controls .navigate-right.enabled:hover { + border-left-color: #0000f1; } + +.reveal .controls .navigate-up.enabled:hover { + border-bottom-color: #0000f1; } + +.reveal .controls .navigate-down.enabled:hover { + border-top-color: #0000f1; } + +/********************************************* + * PROGRESS BAR + *********************************************/ +.reveal .progress { + background: rgba(0, 0, 0, 0.2); +} + +.reveal .progress span { + background: #00008B; + -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); + -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); + transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); +} + + +/********************************************* + * CUSTOM STYLES + * + * + * colors: + * + * Pool - #5bbdbf; + * Mango - #ff5850; + *********************************************/ +.reveal section .ans-logo img{ + box-shadow: none; + border: none; + margin: 0px; + background: none; +} + +.cover div.ans-logo{ + display: block; +} + +div.ans-mark{ + position: absolute; + width: 6vw; /*vertical-width*/ + bottom: 0.5em; + right: 1em; + z-index: 9999; +} + +div.ans-mark .circle{ + fill:#CC0000; +} + +.cover div.ans-mark .circle, +.title.alt div.ans-mark .circle{ + fill:#000; +} + +svg .a-mark{ + fill:#FFFFFF; +} + +.reveal { + box-sizing: border-box; + transition: all 300ms ease-in-out; +} + +.reveal p, +.reveal li{ + width: 80%; +} + +.reveal p.fullwidth, +.reveal .col p, +.reveal .col li, +.reveal aside p{ + width: 100%; +} + +.reveal ul, +.reveal ol{ + margin-left: 2em; +} + +.slide-background.present{ + background-color: #fff; +} + +.cover .slide-background.present, +.title.alt .slide-background.present{ + background-color: #cc0000; + display: block !important; +} + +.lab.alt .slide-background.present{ + background-color: #A9A9A9 ; + display: block !important; +} + + +.cover .present, +.cover .present h1, +.cover .present h2, +.cover .present h3, +.cover .present h4, +.cover .present h5, +.cover .present h6, +.cover .present p, +.cover .present li{ + color: #fff; +} + +.title .reveal h1, +.title .reveal h2, +.title .reveal h3, +.title .reveal h4{ + color: #cc0000; +} + + +.title.alt .reveal h1, +.title.alt .reveal h2, +.title.alt .reveal h3, +.title.alt .reveal h4, +.title.alt .reveal p{ + color: #fff; +} + +.columns{ + display: flex; + justify-content: flex-start; +} + +.reveal .columns > *{ + flex-basis: 31%; + margin-right: 2.333%; + flex-grow: 1; +} + +.reveal .progress span { + background: #ff5850; +} + +.monospace{ + font-family: courier, monospace !important; +} + + +/********************************************* + * CONTROLS + *********************************************/ + +.reveal .controls .navigate-left, .reveal .controls .navigate-left.enabled{ + border-right-color: #ccc; +} +.reveal .controls .navigate-left.enabled:hover { + border-right-color: #999; + opacity: 0.5; +} + +.reveal .controls .navigate-right, .reveal .controls .navigate-right.enabled{ + border-left-color: #ccc; +} +.reveal .controls .navigate-right.enabled:hover { + border-left-color: #999; + opacity: 0.5; +} + +.reveal .controls .navigate-down, .reveal .controls .navigate-down.enabled { + border-top-color: #ccc; +} +.reveal .controls .navigate-down.enabled:hover { + border-top-color: #999; + opacity: 0.5; +} + +.reveal .controls .navigate-up, .reveal .controls .navigate-up.enabled { + border-bottom-color: #ccc; +} + +.reveal .controls .navigate-up.enabled:hover { + border-bottom-color: #999; + opacity: 0.5; +} + +.reveal .controls button { + opacity: 0.1; +} + + + +/********************************************* + * PRINT + *********************************************/ + +@media print{ + /*@page {size: landscape}*/ + + .ans-logo{ + padding: 20px !important; + background: #c00; + display: inline-block; + } + + .reveal, + .reveal h1, + .reveal h2, + .reveal h3, + .reveal h4, + .reveal h5, + .reveal h6, + .reveal p, + .reveal ul, + .reveal ol, + .reveal li, + .reveal blockquote{ + font-family: "Open Sans", sans-serif !important; + color: #000 !important; + } + + .reveal blockquote{ + font-size: 20px !important; + border: none !important; + } + + .reveal p, + .reveal li{ + font-size: 20px !important; + } + + .reveal img{ + border: none !important; + } + + .reveal .columns .col { + max-width: 50%; + } + + .reveal pre code{ + max-height: none !important; + } + + .reveal section aside.notes { + display: block; + border-top: 1px solid black; + margin-top: 60px; + padding-top: 20px; + } + + .reveal section aside.notes *{ + font-size: 14px !important; + font-family: sans-serif; + } + + div.ans-mark{ + position: fixed; + width: 6vw; /*vertical-width*/ + z-index: 9999; + } + +} + +/* +@media print and (orientation:landscape) { + .reveal section aside.notes { + display: none !important; + } +} +*/ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/dormant/index.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/dormant/index.php new file mode 100644 index 0000000..82677dc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/dormant/index.php @@ -0,0 +1,3 @@ +code[class*=language-],pre[class*=language-]{background:#f5f2f0}:not(pre)>code[class*=language-]{padding:.1em;border-radius:.3em;white-space:normal}.token.cdata,.token.comment,.token.doctype,.token.prolog{color:#708090}.token.punctuation{color:#999}.namespace{opacity:.7}.token.boolean,.token.constant,.token.deleted,.token.number,.token.property,.token.symbol,.token.tag{color:#905}.token.attr-name,.token.builtin,.token.char,.token.inserted,.token.selector,.token.string{color:#690}.language-css .token.string,.style .token.string,.token.entity,.token.operator,.token.url{color:#a67f59;background:hsla(0,0%,100%,.5)}.token.atrule,.token.attr-value,.token.keyword{color:#07a}.token.function{color:#DD4A68}.token.important,.token.regex,.token.variable{color:#e90}.token.bold,.token.important{font-weight:700}.token.italic{font-style:italic}.token.entity{cursor:help}/*# sourceMappingURL=prism.min.css.map */ \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/dormant/zenburn.css b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/dormant/zenburn.css new file mode 100644 index 0000000..07be502 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/dormant/zenburn.css @@ -0,0 +1,80 @@ +/* + +Zenburn style from voldmar.ru (c) Vladimir Epifanov +based on dark.css by Ivan Sagalaev + +*/ + +.hljs { + display: block; + overflow-x: auto; + padding: 0.5em; + background: #3f3f3f; + color: #dcdcdc; +} + +.hljs-keyword, +.hljs-selector-tag, +.hljs-tag { + color: #e3ceab; +} + +.hljs-template-tag { + color: #dcdcdc; +} + +.hljs-number { + color: #8cd0d3; +} + +.hljs-variable, +.hljs-template-variable, +.hljs-attribute { + color: #efdcbc; +} + +.hljs-literal { + color: #efefaf; +} + +.hljs-subst { + color: #8f8f8f; +} + +.hljs-title, +.hljs-name, +.hljs-selector-id, +.hljs-selector-class, +.hljs-section, +.hljs-type { + color: #efef8f; +} + +.hljs-symbol, +.hljs-bullet, +.hljs-link { + color: #dca3a3; +} + +.hljs-deletion, +.hljs-string, +.hljs-built_in, +.hljs-builtin-name { + color: #cc9393; +} + +.hljs-addition, +.hljs-comment, +.hljs-quote, +.hljs-meta { + color: #7f9f7f; +} + + +.hljs-emphasis { + font-style: italic; +} + +.hljs-strong { + font-weight: bold; +} diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/faketerminal.css b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/faketerminal.css new file mode 100644 index 0000000..e0f9165 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/faketerminal.css @@ -0,0 +1,110 @@ +* { margin: 0; padding: 0; } +.terminal { + border-radius: 5px 5px 0 0; + position: relative; +} +.terminal .top { + background: #E8E6E8; + color: black; + padding: 5px; + border-radius: 3px 3px 0 0; +} +.terminal .btns { + position: absolute; + top: 7px; + left: 5px; +} + +.terminal .circle { + width: 12px; + height: 12px; + display: inline-block; + border-radius: 15px; + margin-left: 2px; + border-width: 1px; + border-style: solid; +} + +.red { background: #EC6A5F; border-color: #D04E42; } +.green { background: #64CC57; border-color: #4EA73B; } +.yellow{ background: #F5C04F; border-color: #D6A13D; } +.clear{clear: both;} + +.title{ + text-align: center; + font-size: 8px; + +} + +.terminal .title { + color: #000000; + padding: 0px; + font-family: verdana; + font-size: 12px; +} + +.terminal .bodys { + background: black; + color: #7AFB4C; + padding: 15px; + overflow: auto; + font-family: monospace; + text-align: center; + font-size: 16px; +} + +.terminal .bodysw { + background: black; + color: #FFFFFF; + padding: 15px; + overflow: auto; + font-family: monospace; + font-size: 16px; +} + + +.terminal .bodym { + background: black; + color: #7AFB4C; + padding: 15px; + overflow: auto; + font-family: monospace; + font-size: 24px; +} + +.terminal .bodymw { + background: black; + color: #FFFFFF; + padding: 15px; + overflow: auto; + font-family: monospace; + font-size: 24px; +} + +.terminal .bodyl { + background: black; + color: #7AFB4C; + padding: 15px; + overflow: auto; + font-family: monospace; + font-size: 36px; +} + +.terminal .bodylw { + background: black; + color: #FFFFFF; + padding: 15px; + overflow: auto; + font-family: monospace; + font-size: 36px; +} + +.space { + background: black; + margin: 5px; +} + +.shadow { + box-shadow: 0px 0px 10px rgba(0,0,0,.4) +} + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/index.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/index.php new file mode 100644 index 0000000..82677dc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/index.php @@ -0,0 +1,3 @@ + section { + -ms-perspective: 600px; } + +.reveal .slides > section, +.reveal .slides > section > section { + display: none; + position: absolute; + width: 100%; + padding: 20px 0px; + z-index: 10; + -webkit-transform-style: flat; + transform-style: flat; + -webkit-transition: -webkit-transform-origin 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985), -webkit-transform 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985), visibility 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985), opacity 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); + transition: transform-origin 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985), transform 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985), visibility 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985), opacity 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); } + +/* Global transition speed settings */ +.reveal[data-transition-speed="fast"] .slides section { + -webkit-transition-duration: 400ms; + transition-duration: 400ms; } + +.reveal[data-transition-speed="slow"] .slides section { + -webkit-transition-duration: 1200ms; + transition-duration: 1200ms; } + +/* Slide-specific transition speed overrides */ +.reveal .slides section[data-transition-speed="fast"] { + -webkit-transition-duration: 400ms; + transition-duration: 400ms; } + +.reveal .slides section[data-transition-speed="slow"] { + -webkit-transition-duration: 1200ms; + transition-duration: 1200ms; } + +.reveal .slides > section.stack { + padding-top: 0; + padding-bottom: 0; } + +.reveal .slides > section.present, +.reveal .slides > section > section.present { + display: block; + z-index: 11; + opacity: 1; } + +.reveal.center, +.reveal.center .slides, +.reveal.center .slides section { + min-height: 0 !important; } + +/* Don't allow interaction with invisible slides */ +.reveal .slides > section.future, +.reveal .slides > section > section.future, +.reveal .slides > section.past, +.reveal .slides > section > section.past { + pointer-events: none; } + +.reveal.overview .slides > section, +.reveal.overview .slides > section > section { + pointer-events: auto; } + +.reveal .slides > section.past, +.reveal .slides > section.future, +.reveal .slides > section > section.past, +.reveal .slides > section > section.future { + opacity: 0; } + +/********************************************* + * Mixins for readability of transitions + *********************************************/ +/********************************************* + * SLIDE TRANSITION + * Aliased 'linear' for backwards compatibility + *********************************************/ +.reveal.slide section { + -webkit-backface-visibility: hidden; + backface-visibility: hidden; } + +.reveal .slides > section[data-transition=slide].past, +.reveal .slides > section[data-transition~=slide-out].past, +.reveal.slide .slides > section:not([data-transition]).past { + -webkit-transform: translate(-150%, 0); + transform: translate(-150%, 0); } + +.reveal .slides > section[data-transition=slide].future, +.reveal .slides > section[data-transition~=slide-in].future, +.reveal.slide .slides > section:not([data-transition]).future { + -webkit-transform: translate(150%, 0); + transform: translate(150%, 0); } + +.reveal .slides > section > section[data-transition=slide].past, +.reveal .slides > section > section[data-transition~=slide-out].past, +.reveal.slide .slides > section > section:not([data-transition]).past { + -webkit-transform: translate(0, -150%); + transform: translate(0, -150%); } + +.reveal .slides > section > section[data-transition=slide].future, +.reveal .slides > section > section[data-transition~=slide-in].future, +.reveal.slide .slides > section > section:not([data-transition]).future { + -webkit-transform: translate(0, 150%); + transform: translate(0, 150%); } + +.reveal.linear section { + -webkit-backface-visibility: hidden; + backface-visibility: hidden; } + +.reveal .slides > section[data-transition=linear].past, +.reveal .slides > section[data-transition~=linear-out].past, +.reveal.linear .slides > section:not([data-transition]).past { + -webkit-transform: translate(-150%, 0); + transform: translate(-150%, 0); } + +.reveal .slides > section[data-transition=linear].future, +.reveal .slides > section[data-transition~=linear-in].future, +.reveal.linear .slides > section:not([data-transition]).future { + -webkit-transform: translate(150%, 0); + transform: translate(150%, 0); } + +.reveal .slides > section > section[data-transition=linear].past, +.reveal .slides > section > section[data-transition~=linear-out].past, +.reveal.linear .slides > section > section:not([data-transition]).past { + -webkit-transform: translate(0, -150%); + transform: translate(0, -150%); } + +.reveal .slides > section > section[data-transition=linear].future, +.reveal .slides > section > section[data-transition~=linear-in].future, +.reveal.linear .slides > section > section:not([data-transition]).future { + -webkit-transform: translate(0, 150%); + transform: translate(0, 150%); } + +/********************************************* + * CONVEX TRANSITION + * Aliased 'default' for backwards compatibility + *********************************************/ +.reveal .slides section[data-transition=default].stack, +.reveal.default .slides section.stack { + -webkit-transform-style: preserve-3d; + transform-style: preserve-3d; } + +.reveal .slides > section[data-transition=default].past, +.reveal .slides > section[data-transition~=default-out].past, +.reveal.default .slides > section:not([data-transition]).past { + -webkit-transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0); + transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0); } + +.reveal .slides > section[data-transition=default].future, +.reveal .slides > section[data-transition~=default-in].future, +.reveal.default .slides > section:not([data-transition]).future { + -webkit-transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0); + transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0); } + +.reveal .slides > section > section[data-transition=default].past, +.reveal .slides > section > section[data-transition~=default-out].past, +.reveal.default .slides > section > section:not([data-transition]).past { + -webkit-transform: translate3d(0, -300px, 0) rotateX(70deg) translate3d(0, -300px, 0); + transform: translate3d(0, -300px, 0) rotateX(70deg) translate3d(0, -300px, 0); } + +.reveal .slides > section > section[data-transition=default].future, +.reveal .slides > section > section[data-transition~=default-in].future, +.reveal.default .slides > section > section:not([data-transition]).future { + -webkit-transform: translate3d(0, 300px, 0) rotateX(-70deg) translate3d(0, 300px, 0); + transform: translate3d(0, 300px, 0) rotateX(-70deg) translate3d(0, 300px, 0); } + +.reveal .slides section[data-transition=convex].stack, +.reveal.convex .slides section.stack { + -webkit-transform-style: preserve-3d; + transform-style: preserve-3d; } + +.reveal .slides > section[data-transition=convex].past, +.reveal .slides > section[data-transition~=convex-out].past, +.reveal.convex .slides > section:not([data-transition]).past { + -webkit-transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0); + transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0); } + +.reveal .slides > section[data-transition=convex].future, +.reveal .slides > section[data-transition~=convex-in].future, +.reveal.convex .slides > section:not([data-transition]).future { + -webkit-transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0); + transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0); } + +.reveal .slides > section > section[data-transition=convex].past, +.reveal .slides > section > section[data-transition~=convex-out].past, +.reveal.convex .slides > section > section:not([data-transition]).past { + -webkit-transform: translate3d(0, -300px, 0) rotateX(70deg) translate3d(0, -300px, 0); + transform: translate3d(0, -300px, 0) rotateX(70deg) translate3d(0, -300px, 0); } + +.reveal .slides > section > section[data-transition=convex].future, +.reveal .slides > section > section[data-transition~=convex-in].future, +.reveal.convex .slides > section > section:not([data-transition]).future { + -webkit-transform: translate3d(0, 300px, 0) rotateX(-70deg) translate3d(0, 300px, 0); + transform: translate3d(0, 300px, 0) rotateX(-70deg) translate3d(0, 300px, 0); } + +/********************************************* + * CONCAVE TRANSITION + *********************************************/ +.reveal .slides section[data-transition=concave].stack, +.reveal.concave .slides section.stack { + -webkit-transform-style: preserve-3d; + transform-style: preserve-3d; } + +.reveal .slides > section[data-transition=concave].past, +.reveal .slides > section[data-transition~=concave-out].past, +.reveal.concave .slides > section:not([data-transition]).past { + -webkit-transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0); + transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0); } + +.reveal .slides > section[data-transition=concave].future, +.reveal .slides > section[data-transition~=concave-in].future, +.reveal.concave .slides > section:not([data-transition]).future { + -webkit-transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0); + transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0); } + +.reveal .slides > section > section[data-transition=concave].past, +.reveal .slides > section > section[data-transition~=concave-out].past, +.reveal.concave .slides > section > section:not([data-transition]).past { + -webkit-transform: translate3d(0, -80%, 0) rotateX(-70deg) translate3d(0, -80%, 0); + transform: translate3d(0, -80%, 0) rotateX(-70deg) translate3d(0, -80%, 0); } + +.reveal .slides > section > section[data-transition=concave].future, +.reveal .slides > section > section[data-transition~=concave-in].future, +.reveal.concave .slides > section > section:not([data-transition]).future { + -webkit-transform: translate3d(0, 80%, 0) rotateX(70deg) translate3d(0, 80%, 0); + transform: translate3d(0, 80%, 0) rotateX(70deg) translate3d(0, 80%, 0); } + +/********************************************* + * ZOOM TRANSITION + *********************************************/ +.reveal .slides section[data-transition=zoom], +.reveal.zoom .slides section:not([data-transition]) { + -webkit-transition-timing-function: ease; + transition-timing-function: ease; } + +.reveal .slides > section[data-transition=zoom].past, +.reveal .slides > section[data-transition~=zoom-out].past, +.reveal.zoom .slides > section:not([data-transition]).past { + visibility: hidden; + -webkit-transform: scale(16); + transform: scale(16); } + +.reveal .slides > section[data-transition=zoom].future, +.reveal .slides > section[data-transition~=zoom-in].future, +.reveal.zoom .slides > section:not([data-transition]).future { + visibility: hidden; + -webkit-transform: scale(0.2); + transform: scale(0.2); } + +.reveal .slides > section > section[data-transition=zoom].past, +.reveal .slides > section > section[data-transition~=zoom-out].past, +.reveal.zoom .slides > section > section:not([data-transition]).past { + -webkit-transform: translate(0, -150%); + transform: translate(0, -150%); } + +.reveal .slides > section > section[data-transition=zoom].future, +.reveal .slides > section > section[data-transition~=zoom-in].future, +.reveal.zoom .slides > section > section:not([data-transition]).future { + -webkit-transform: translate(0, 150%); + transform: translate(0, 150%); } + +/********************************************* + * CUBE TRANSITION + *********************************************/ +.reveal.cube .slides { + -webkit-perspective: 1300px; + perspective: 1300px; } + +.reveal.cube .slides section { + padding: 30px; + min-height: 700px; + -webkit-backface-visibility: hidden; + backface-visibility: hidden; + box-sizing: border-box; + -webkit-transform-style: preserve-3d; + transform-style: preserve-3d; } + +.reveal.center.cube .slides section { + min-height: 0; } + +.reveal.cube .slides section:not(.stack):before { + content: ''; + position: absolute; + display: block; + width: 100%; + height: 100%; + left: 0; + top: 0; + background: rgba(0, 0, 0, 0.1); + border-radius: 4px; + -webkit-transform: translateZ(-20px); + transform: translateZ(-20px); } + +.reveal.cube .slides section:not(.stack):after { + content: ''; + position: absolute; + display: block; + width: 90%; + height: 30px; + left: 5%; + bottom: 0; + background: none; + z-index: 1; + border-radius: 4px; + box-shadow: 0px 95px 25px rgba(0, 0, 0, 0.2); + -webkit-transform: translateZ(-90px) rotateX(65deg); + transform: translateZ(-90px) rotateX(65deg); } + +.reveal.cube .slides > section.stack { + padding: 0; + background: none; } + +.reveal.cube .slides > section.past { + -webkit-transform-origin: 100% 0%; + transform-origin: 100% 0%; + -webkit-transform: translate3d(-100%, 0, 0) rotateY(-90deg); + transform: translate3d(-100%, 0, 0) rotateY(-90deg); } + +.reveal.cube .slides > section.future { + -webkit-transform-origin: 0% 0%; + transform-origin: 0% 0%; + -webkit-transform: translate3d(100%, 0, 0) rotateY(90deg); + transform: translate3d(100%, 0, 0) rotateY(90deg); } + +.reveal.cube .slides > section > section.past { + -webkit-transform-origin: 0% 100%; + transform-origin: 0% 100%; + -webkit-transform: translate3d(0, -100%, 0) rotateX(90deg); + transform: translate3d(0, -100%, 0) rotateX(90deg); } + +.reveal.cube .slides > section > section.future { + -webkit-transform-origin: 0% 0%; + transform-origin: 0% 0%; + -webkit-transform: translate3d(0, 100%, 0) rotateX(-90deg); + transform: translate3d(0, 100%, 0) rotateX(-90deg); } + +/********************************************* + * PAGE TRANSITION + *********************************************/ +.reveal.page .slides { + -webkit-perspective-origin: 0% 50%; + perspective-origin: 0% 50%; + -webkit-perspective: 3000px; + perspective: 3000px; } + +.reveal.page .slides section { + padding: 30px; + min-height: 700px; + box-sizing: border-box; + -webkit-transform-style: preserve-3d; + transform-style: preserve-3d; } + +.reveal.page .slides section.past { + z-index: 12; } + +.reveal.page .slides section:not(.stack):before { + content: ''; + position: absolute; + display: block; + width: 100%; + height: 100%; + left: 0; + top: 0; + background: rgba(0, 0, 0, 0.1); + -webkit-transform: translateZ(-20px); + transform: translateZ(-20px); } + +.reveal.page .slides section:not(.stack):after { + content: ''; + position: absolute; + display: block; + width: 90%; + height: 30px; + left: 5%; + bottom: 0; + background: none; + z-index: 1; + border-radius: 4px; + box-shadow: 0px 95px 25px rgba(0, 0, 0, 0.2); + -webkit-transform: translateZ(-90px) rotateX(65deg); } + +.reveal.page .slides > section.stack { + padding: 0; + background: none; } + +.reveal.page .slides > section.past { + -webkit-transform-origin: 0% 0%; + transform-origin: 0% 0%; + -webkit-transform: translate3d(-40%, 0, 0) rotateY(-80deg); + transform: translate3d(-40%, 0, 0) rotateY(-80deg); } + +.reveal.page .slides > section.future { + -webkit-transform-origin: 100% 0%; + transform-origin: 100% 0%; + -webkit-transform: translate3d(0, 0, 0); + transform: translate3d(0, 0, 0); } + +.reveal.page .slides > section > section.past { + -webkit-transform-origin: 0% 0%; + transform-origin: 0% 0%; + -webkit-transform: translate3d(0, -40%, 0) rotateX(80deg); + transform: translate3d(0, -40%, 0) rotateX(80deg); } + +.reveal.page .slides > section > section.future { + -webkit-transform-origin: 0% 100%; + transform-origin: 0% 100%; + -webkit-transform: translate3d(0, 0, 0); + transform: translate3d(0, 0, 0); } + +/********************************************* + * FADE TRANSITION + *********************************************/ +.reveal .slides section[data-transition=fade], +.reveal.fade .slides section:not([data-transition]), +.reveal.fade .slides > section > section:not([data-transition]) { + -webkit-transform: none; + transform: none; + -webkit-transition: opacity 0.5s; + transition: opacity 0.5s; } + +.reveal.fade.overview .slides section, +.reveal.fade.overview .slides > section > section { + -webkit-transition: none; + transition: none; } + +/********************************************* + * NO TRANSITION + *********************************************/ +.reveal .slides section[data-transition=none], +.reveal.none .slides section:not([data-transition]) { + -webkit-transform: none; + transform: none; + -webkit-transition: none; + transition: none; } + +/********************************************* + * PAUSED MODE + *********************************************/ +.reveal .pause-overlay { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: black; + visibility: hidden; + opacity: 0; + z-index: 100; + -webkit-transition: all 1s ease; + transition: all 1s ease; } + +.reveal.paused .pause-overlay { + visibility: visible; + opacity: 1; } + +/********************************************* + * FALLBACK + *********************************************/ +.no-transforms { + overflow-y: auto; } + +.no-transforms .reveal .slides { + position: relative; + width: 80%; + height: auto !important; + top: 0; + left: 50%; + margin: 0; + text-align: center; } + +.no-transforms .reveal .controls, +.no-transforms .reveal .progress { + display: none !important; } + +.no-transforms .reveal .slides section { + display: block !important; + opacity: 1 !important; + position: relative !important; + height: auto; + min-height: 0; + top: 0; + left: -50%; + margin: 70px 0; + -webkit-transform: none; + transform: none; } + +.no-transforms .reveal .slides section section { + left: 0; } + +.reveal .no-transition, +.reveal .no-transition * { + -webkit-transition: none !important; + transition: none !important; } + +/********************************************* + * PER-SLIDE BACKGROUNDS + *********************************************/ +.reveal .backgrounds { + position: absolute; + width: 100%; + height: 100%; + top: 0; + left: 0; + -webkit-perspective: 600px; + perspective: 600px; } + +.reveal .slide-background { + display: none; + position: absolute; + width: 100%; + height: 100%; + opacity: 0; + visibility: hidden; + background-color: transparent; + background-position: 50% 50%; + background-repeat: no-repeat; + background-size: cover; + -webkit-transition: all 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); + transition: all 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); } + +.reveal .slide-background.stack { + display: block; } + +.reveal .slide-background.present { + opacity: 1; + visibility: visible; } + +.print-pdf .reveal .slide-background { + opacity: 1 !important; + visibility: visible !important; } + +/* Video backgrounds */ +.reveal .slide-background video { + position: absolute; + width: 100%; + height: 100%; + max-width: none; + max-height: none; + top: 0; + left: 0; } + +/* Immediate transition style */ +.reveal[data-background-transition=none] > .backgrounds .slide-background, +.reveal > .backgrounds .slide-background[data-background-transition=none] { + -webkit-transition: none; + transition: none; } + +/* Slide */ +.reveal[data-background-transition=slide] > .backgrounds .slide-background, +.reveal > .backgrounds .slide-background[data-background-transition=slide] { + opacity: 1; + -webkit-backface-visibility: hidden; + backface-visibility: hidden; } + +.reveal[data-background-transition=slide] > .backgrounds .slide-background.past, +.reveal > .backgrounds .slide-background.past[data-background-transition=slide] { + -webkit-transform: translate(-100%, 0); + transform: translate(-100%, 0); } + +.reveal[data-background-transition=slide] > .backgrounds .slide-background.future, +.reveal > .backgrounds .slide-background.future[data-background-transition=slide] { + -webkit-transform: translate(100%, 0); + transform: translate(100%, 0); } + +.reveal[data-background-transition=slide] > .backgrounds .slide-background > .slide-background.past, +.reveal > .backgrounds .slide-background > .slide-background.past[data-background-transition=slide] { + -webkit-transform: translate(0, -100%); + transform: translate(0, -100%); } + +.reveal[data-background-transition=slide] > .backgrounds .slide-background > .slide-background.future, +.reveal > .backgrounds .slide-background > .slide-background.future[data-background-transition=slide] { + -webkit-transform: translate(0, 100%); + transform: translate(0, 100%); } + +/* Convex */ +.reveal[data-background-transition=convex] > .backgrounds .slide-background.past, +.reveal > .backgrounds .slide-background.past[data-background-transition=convex] { + opacity: 0; + -webkit-transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0); + transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0); } + +.reveal[data-background-transition=convex] > .backgrounds .slide-background.future, +.reveal > .backgrounds .slide-background.future[data-background-transition=convex] { + opacity: 0; + -webkit-transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0); + transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0); } + +.reveal[data-background-transition=convex] > .backgrounds .slide-background > .slide-background.past, +.reveal > .backgrounds .slide-background > .slide-background.past[data-background-transition=convex] { + opacity: 0; + -webkit-transform: translate3d(0, -100%, 0) rotateX(90deg) translate3d(0, -100%, 0); + transform: translate3d(0, -100%, 0) rotateX(90deg) translate3d(0, -100%, 0); } + +.reveal[data-background-transition=convex] > .backgrounds .slide-background > .slide-background.future, +.reveal > .backgrounds .slide-background > .slide-background.future[data-background-transition=convex] { + opacity: 0; + -webkit-transform: translate3d(0, 100%, 0) rotateX(-90deg) translate3d(0, 100%, 0); + transform: translate3d(0, 100%, 0) rotateX(-90deg) translate3d(0, 100%, 0); } + +/* Concave */ +.reveal[data-background-transition=concave] > .backgrounds .slide-background.past, +.reveal > .backgrounds .slide-background.past[data-background-transition=concave] { + opacity: 0; + -webkit-transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0); + transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0); } + +.reveal[data-background-transition=concave] > .backgrounds .slide-background.future, +.reveal > .backgrounds .slide-background.future[data-background-transition=concave] { + opacity: 0; + -webkit-transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0); + transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0); } + +.reveal[data-background-transition=concave] > .backgrounds .slide-background > .slide-background.past, +.reveal > .backgrounds .slide-background > .slide-background.past[data-background-transition=concave] { + opacity: 0; + -webkit-transform: translate3d(0, -100%, 0) rotateX(-90deg) translate3d(0, -100%, 0); + transform: translate3d(0, -100%, 0) rotateX(-90deg) translate3d(0, -100%, 0); } + +.reveal[data-background-transition=concave] > .backgrounds .slide-background > .slide-background.future, +.reveal > .backgrounds .slide-background > .slide-background.future[data-background-transition=concave] { + opacity: 0; + -webkit-transform: translate3d(0, 100%, 0) rotateX(90deg) translate3d(0, 100%, 0); + transform: translate3d(0, 100%, 0) rotateX(90deg) translate3d(0, 100%, 0); } + +/* Zoom */ +.reveal[data-background-transition=zoom] > .backgrounds .slide-background, +.reveal > .backgrounds .slide-background[data-background-transition=zoom] { + -webkit-transition-timing-function: ease; + transition-timing-function: ease; } + +.reveal[data-background-transition=zoom] > .backgrounds .slide-background.past, +.reveal > .backgrounds .slide-background.past[data-background-transition=zoom] { + opacity: 0; + visibility: hidden; + -webkit-transform: scale(16); + transform: scale(16); } + +.reveal[data-background-transition=zoom] > .backgrounds .slide-background.future, +.reveal > .backgrounds .slide-background.future[data-background-transition=zoom] { + opacity: 0; + visibility: hidden; + -webkit-transform: scale(0.2); + transform: scale(0.2); } + +.reveal[data-background-transition=zoom] > .backgrounds .slide-background > .slide-background.past, +.reveal > .backgrounds .slide-background > .slide-background.past[data-background-transition=zoom] { + opacity: 0; + visibility: hidden; + -webkit-transform: scale(16); + transform: scale(16); } + +.reveal[data-background-transition=zoom] > .backgrounds .slide-background > .slide-background.future, +.reveal > .backgrounds .slide-background > .slide-background.future[data-background-transition=zoom] { + opacity: 0; + visibility: hidden; + -webkit-transform: scale(0.2); + transform: scale(0.2); } + +/* Global transition speed settings */ +.reveal[data-transition-speed="fast"] > .backgrounds .slide-background { + -webkit-transition-duration: 400ms; + transition-duration: 400ms; } + +.reveal[data-transition-speed="slow"] > .backgrounds .slide-background { + -webkit-transition-duration: 1200ms; + transition-duration: 1200ms; } + +/********************************************* + * OVERVIEW + *********************************************/ +.reveal.overview { + -webkit-perspective-origin: 50% 50%; + perspective-origin: 50% 50%; + -webkit-perspective: 700px; + perspective: 700px; } + .reveal.overview .slides { + -moz-transform-style: preserve-3d; } + .reveal.overview .slides section { + height: 100%; + top: 0 !important; + opacity: 1 !important; + overflow: hidden; + visibility: visible !important; + cursor: pointer; + box-sizing: border-box; } + .reveal.overview .slides section:hover, + .reveal.overview .slides section.present { + outline: 10px solid rgba(150, 150, 150, 0.4); + outline-offset: 10px; } + .reveal.overview .slides section .fragment { + opacity: 1; + -webkit-transition: none; + transition: none; } + .reveal.overview .slides section:after, + .reveal.overview .slides section:before { + display: none !important; } + .reveal.overview .slides > section.stack { + padding: 0; + top: 0 !important; + background: none; + outline: none; + overflow: visible; } + .reveal.overview .backgrounds { + -webkit-perspective: inherit; + perspective: inherit; + -moz-transform-style: preserve-3d; } + .reveal.overview .backgrounds .slide-background { + opacity: 1; + visibility: visible; + outline: 10px solid rgba(150, 150, 150, 0.1); + outline-offset: 10px; } + +.reveal.overview .slides section, +.reveal.overview-deactivating .slides section { + -webkit-transition: none; + transition: none; } + +.reveal.overview .backgrounds .slide-background, +.reveal.overview-deactivating .backgrounds .slide-background { + -webkit-transition: none; + transition: none; } + +.reveal.overview-animated .slides { + -webkit-transition: -webkit-transform 0.4s ease; + transition: transform 0.4s ease; } + +/********************************************* + * RTL SUPPORT + *********************************************/ +.reveal.rtl .slides, +.reveal.rtl .slides h1, +.reveal.rtl .slides h2, +.reveal.rtl .slides h3, +.reveal.rtl .slides h4, +.reveal.rtl .slides h5, +.reveal.rtl .slides h6 { + direction: rtl; + font-family: sans-serif; } + +.reveal.rtl pre, +.reveal.rtl code { + direction: ltr; } + +.reveal.rtl ol, +.reveal.rtl ul { + text-align: right; } + +.reveal.rtl .progress span { + float: right; } + +/********************************************* + * PARALLAX BACKGROUND + *********************************************/ +.reveal.has-parallax-background .backgrounds { + -webkit-transition: all 0.8s ease; + transition: all 0.8s ease; } + +/* Global transition speed settings */ +.reveal.has-parallax-background[data-transition-speed="fast"] .backgrounds { + -webkit-transition-duration: 400ms; + transition-duration: 400ms; } + +.reveal.has-parallax-background[data-transition-speed="slow"] .backgrounds { + -webkit-transition-duration: 1200ms; + transition-duration: 1200ms; } + +/********************************************* + * LINK PREVIEW OVERLAY + *********************************************/ +.reveal .overlay { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + z-index: 1000; + background: rgba(0, 0, 0, 0.9); + opacity: 0; + visibility: hidden; + -webkit-transition: all 0.3s ease; + transition: all 0.3s ease; } + +.reveal .overlay.visible { + opacity: 1; + visibility: visible; } + +.reveal .overlay .spinner { + position: absolute; + display: block; + top: 50%; + left: 50%; + width: 32px; + height: 32px; + margin: -16px 0 0 -16px; + z-index: 10; + background-image: url(data:image/gif;base64,R0lGODlhIAAgAPMAAJmZmf%2F%2F%2F6%2Bvr8nJybW1tcDAwOjo6Nvb26ioqKOjo7Ozs%2FLy8vz8%2FAAAAAAAAAAAACH%2FC05FVFNDQVBFMi4wAwEAAAAh%2FhpDcmVhdGVkIHdpdGggYWpheGxvYWQuaW5mbwAh%2BQQJCgAAACwAAAAAIAAgAAAE5xDISWlhperN52JLhSSdRgwVo1ICQZRUsiwHpTJT4iowNS8vyW2icCF6k8HMMBkCEDskxTBDAZwuAkkqIfxIQyhBQBFvAQSDITM5VDW6XNE4KagNh6Bgwe60smQUB3d4Rz1ZBApnFASDd0hihh12BkE9kjAJVlycXIg7CQIFA6SlnJ87paqbSKiKoqusnbMdmDC2tXQlkUhziYtyWTxIfy6BE8WJt5YJvpJivxNaGmLHT0VnOgSYf0dZXS7APdpB309RnHOG5gDqXGLDaC457D1zZ%2FV%2FnmOM82XiHRLYKhKP1oZmADdEAAAh%2BQQJCgAAACwAAAAAIAAgAAAE6hDISWlZpOrNp1lGNRSdRpDUolIGw5RUYhhHukqFu8DsrEyqnWThGvAmhVlteBvojpTDDBUEIFwMFBRAmBkSgOrBFZogCASwBDEY%2FCZSg7GSE0gSCjQBMVG023xWBhklAnoEdhQEfyNqMIcKjhRsjEdnezB%2BA4k8gTwJhFuiW4dokXiloUepBAp5qaKpp6%2BHo7aWW54wl7obvEe0kRuoplCGepwSx2jJvqHEmGt6whJpGpfJCHmOoNHKaHx61WiSR92E4lbFoq%2BB6QDtuetcaBPnW6%2BO7wDHpIiK9SaVK5GgV543tzjgGcghAgAh%2BQQJCgAAACwAAAAAIAAgAAAE7hDISSkxpOrN5zFHNWRdhSiVoVLHspRUMoyUakyEe8PTPCATW9A14E0UvuAKMNAZKYUZCiBMuBakSQKG8G2FzUWox2AUtAQFcBKlVQoLgQReZhQlCIJesQXI5B0CBnUMOxMCenoCfTCEWBsJColTMANldx15BGs8B5wlCZ9Po6OJkwmRpnqkqnuSrayqfKmqpLajoiW5HJq7FL1Gr2mMMcKUMIiJgIemy7xZtJsTmsM4xHiKv5KMCXqfyUCJEonXPN2rAOIAmsfB3uPoAK%2B%2BG%2Bw48edZPK%2BM6hLJpQg484enXIdQFSS1u6UhksENEQAAIfkECQoAAAAsAAAAACAAIAAABOcQyEmpGKLqzWcZRVUQnZYg1aBSh2GUVEIQ2aQOE%2BG%2BcD4ntpWkZQj1JIiZIogDFFyHI0UxQwFugMSOFIPJftfVAEoZLBbcLEFhlQiqGp1Vd140AUklUN3eCA51C1EWMzMCezCBBmkxVIVHBWd3HHl9JQOIJSdSnJ0TDKChCwUJjoWMPaGqDKannasMo6WnM562R5YluZRwur0wpgqZE7NKUm%2BFNRPIhjBJxKZteWuIBMN4zRMIVIhffcgojwCF117i4nlLnY5ztRLsnOk%2BaV%2BoJY7V7m76PdkS4trKcdg0Zc0tTcKkRAAAIfkECQoAAAAsAAAAACAAIAAABO4QyEkpKqjqzScpRaVkXZWQEximw1BSCUEIlDohrft6cpKCk5xid5MNJTaAIkekKGQkWyKHkvhKsR7ARmitkAYDYRIbUQRQjWBwJRzChi9CRlBcY1UN4g0%2FVNB0AlcvcAYHRyZPdEQFYV8ccwR5HWxEJ02YmRMLnJ1xCYp0Y5idpQuhopmmC2KgojKasUQDk5BNAwwMOh2RtRq5uQuPZKGIJQIGwAwGf6I0JXMpC8C7kXWDBINFMxS4DKMAWVWAGYsAdNqW5uaRxkSKJOZKaU3tPOBZ4DuK2LATgJhkPJMgTwKCdFjyPHEnKxFCDhEAACH5BAkKAAAALAAAAAAgACAAAATzEMhJaVKp6s2nIkolIJ2WkBShpkVRWqqQrhLSEu9MZJKK9y1ZrqYK9WiClmvoUaF8gIQSNeF1Er4MNFn4SRSDARWroAIETg1iVwuHjYB1kYc1mwruwXKC9gmsJXliGxc%2BXiUCby9ydh1sOSdMkpMTBpaXBzsfhoc5l58Gm5yToAaZhaOUqjkDgCWNHAULCwOLaTmzswadEqggQwgHuQsHIoZCHQMMQgQGubVEcxOPFAcMDAYUA85eWARmfSRQCdcMe0zeP1AAygwLlJtPNAAL19DARdPzBOWSm1brJBi45soRAWQAAkrQIykShQ9wVhHCwCQCACH5BAkKAAAALAAAAAAgACAAAATrEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiRMDjI0Fd30%2FiI2UA5GSS5UDj2l6NoqgOgN4gksEBgYFf0FDqKgHnyZ9OX8HrgYHdHpcHQULXAS2qKpENRg7eAMLC7kTBaixUYFkKAzWAAnLC7FLVxLWDBLKCwaKTULgEwbLA4hJtOkSBNqITT3xEgfLpBtzE%2FjiuL04RGEBgwWhShRgQExHBAAh%2BQQJCgAAACwAAAAAIAAgAAAE7xDISWlSqerNpyJKhWRdlSAVoVLCWk6JKlAqAavhO9UkUHsqlE6CwO1cRdCQ8iEIfzFVTzLdRAmZX3I2SfZiCqGk5dTESJeaOAlClzsJsqwiJwiqnFrb2nS9kmIcgEsjQydLiIlHehhpejaIjzh9eomSjZR%2BipslWIRLAgMDOR2DOqKogTB9pCUJBagDBXR6XB0EBkIIsaRsGGMMAxoDBgYHTKJiUYEGDAzHC9EACcUGkIgFzgwZ0QsSBcXHiQvOwgDdEwfFs0sDzt4S6BK4xYjkDOzn0unFeBzOBijIm1Dgmg5YFQwsCMjp1oJ8LyIAACH5BAkKAAAALAAAAAAgACAAAATwEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiUd6GGl6NoiPOH16iZKNlH6KmyWFOggHhEEvAwwMA0N9GBsEC6amhnVcEwavDAazGwIDaH1ipaYLBUTCGgQDA8NdHz0FpqgTBwsLqAbWAAnIA4FWKdMLGdYGEgraigbT0OITBcg5QwPT4xLrROZL6AuQAPUS7bxLpoWidY0JtxLHKhwwMJBTHgPKdEQAACH5BAkKAAAALAAAAAAgACAAAATrEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiUd6GAULDJCRiXo1CpGXDJOUjY%2BYip9DhToJA4RBLwMLCwVDfRgbBAaqqoZ1XBMHswsHtxtFaH1iqaoGNgAIxRpbFAgfPQSqpbgGBqUD1wBXeCYp1AYZ19JJOYgH1KwA4UBvQwXUBxPqVD9L3sbp2BNk2xvvFPJd%2BMFCN6HAAIKgNggY0KtEBAAh%2BQQJCgAAACwAAAAAIAAgAAAE6BDISWlSqerNpyJKhWRdlSAVoVLCWk6JKlAqAavhO9UkUHsqlE6CwO1cRdCQ8iEIfzFVTzLdRAmZX3I2SfYIDMaAFdTESJeaEDAIMxYFqrOUaNW4E4ObYcCXaiBVEgULe0NJaxxtYksjh2NLkZISgDgJhHthkpU4mW6blRiYmZOlh4JWkDqILwUGBnE6TYEbCgevr0N1gH4At7gHiRpFaLNrrq8HNgAJA70AWxQIH1%2BvsYMDAzZQPC9VCNkDWUhGkuE5PxJNwiUK4UfLzOlD4WvzAHaoG9nxPi5d%2BjYUqfAhhykOFwJWiAAAIfkECQoAAAAsAAAAACAAIAAABPAQyElpUqnqzaciSoVkXVUMFaFSwlpOCcMYlErAavhOMnNLNo8KsZsMZItJEIDIFSkLGQoQTNhIsFehRww2CQLKF0tYGKYSg%2BygsZIuNqJksKgbfgIGepNo2cIUB3V1B3IvNiBYNQaDSTtfhhx0CwVPI0UJe0%2Bbm4g5VgcGoqOcnjmjqDSdnhgEoamcsZuXO1aWQy8KAwOAuTYYGwi7w5h%2BKr0SJ8MFihpNbx%2B4Erq7BYBuzsdiH1jCAzoSfl0rVirNbRXlBBlLX%2BBP0XJLAPGzTkAuAOqb0WT5AH7OcdCm5B8TgRwSRKIHQtaLCwg1RAAAOwAAAAAAAAAAAA%3D%3D); + visibility: visible; + opacity: 0.6; + -webkit-transition: all 0.3s ease; + transition: all 0.3s ease; } + +.reveal .overlay header { + position: absolute; + left: 0; + top: 0; + width: 100%; + height: 40px; + z-index: 2; + border-bottom: 1px solid #222; } + +.reveal .overlay header a { + display: inline-block; + width: 40px; + height: 40px; + line-height: 36px; + padding: 0 10px; + float: right; + opacity: 0.6; + box-sizing: border-box; } + +.reveal .overlay header a:hover { + opacity: 1; } + +.reveal .overlay header a .icon { + display: inline-block; + width: 20px; + height: 20px; + background-position: 50% 50%; + background-size: 100%; + background-repeat: no-repeat; } + +.reveal .overlay header a.close .icon { + background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAABkklEQVRYR8WX4VHDMAxG6wnoJrABZQPYBCaBTWAD2g1gE5gg6OOsXuxIlr40d81dfrSJ9V4c2VLK7spHuTJ/5wpM07QXuXc5X0opX2tEJcadjHuV80li/FgxTIEK/5QBCICBD6xEhSMGHgQPgBgLiYVAB1dpSqKDawxTohFw4JSEA3clzgIBPCURwE2JucBR7rhPJJv5OpJwDX+SfDjgx1wACQeJG1aChP9K/IMmdZ8DtESV1WyP3Bt4MwM6sj4NMxMYiqUWHQu4KYA/SYkIjOsm3BXYWMKFDwU2khjCQ4ELJUJ4SmClRArOCmSXGuKma0fYD5CbzHxFpCSGAhfAVSSUGDUk2BWZaff2g6GE15BsBQ9nwmpIGDiyHQddwNTMKkbZaf9fajXQca1EX44puJZUsnY0ObGmITE3GVLCbEhQUjGVt146j6oasWN+49Vph2w1pZ5EansNZqKBm1txbU57iRRcZ86RWMDdWtBJUHBHwoQPi1GV+JCbntmvok7iTX4/Up9mgyTc/FJYDTcndgH/AA5A/CHsyEkVAAAAAElFTkSuQmCC); } + +.reveal .overlay header a.external .icon { + background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAcElEQVRYR+2WSQoAIQwEzf8f7XiOMkUQxUPlGkM3hVmiQfQR9GYnH1SsAQlI4DiBqkCMoNb9y2e90IAEJPAcgdznU9+engMaeJ7Azh5Y1U67gAho4DqBqmB1buAf0MB1AlVBek83ZPkmJMGc1wAR+AAqod/B97TRpQAAAABJRU5ErkJggg==); } + +.reveal .overlay .viewport { + position: absolute; + display: -webkit-box; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + top: 40px; + right: 0; + bottom: 0; + left: 0; } + +.reveal .overlay.overlay-preview .viewport iframe { + width: 100%; + height: 100%; + max-width: 100%; + max-height: 100%; + border: 0; + opacity: 0; + visibility: hidden; + -webkit-transition: all 0.3s ease; + transition: all 0.3s ease; } + +.reveal .overlay.overlay-preview.loaded .viewport iframe { + opacity: 1; + visibility: visible; } + +.reveal .overlay.overlay-preview.loaded .viewport-inner { + position: absolute; + z-index: -1; + left: 0; + top: 45%; + width: 100%; + text-align: center; + letter-spacing: normal; } + +.reveal .overlay.overlay-preview .x-frame-error { + opacity: 0; + -webkit-transition: opacity 0.3s ease 0.3s; + transition: opacity 0.3s ease 0.3s; } + +.reveal .overlay.overlay-preview.loaded .x-frame-error { + opacity: 1; } + +.reveal .overlay.overlay-preview.loaded .spinner { + opacity: 0; + visibility: hidden; + -webkit-transform: scale(0.2); + transform: scale(0.2); } + +.reveal .overlay.overlay-help .viewport { + overflow: auto; + color: #fff; } + +.reveal .overlay.overlay-help .viewport .viewport-inner { + width: 600px; + margin: auto; + padding: 20px 20px 80px 20px; + text-align: center; + letter-spacing: normal; } + +.reveal .overlay.overlay-help .viewport .viewport-inner .title { + font-size: 20px; } + +.reveal .overlay.overlay-help .viewport .viewport-inner table { + border: 1px solid #fff; + border-collapse: collapse; + font-size: 16px; } + +.reveal .overlay.overlay-help .viewport .viewport-inner table th, +.reveal .overlay.overlay-help .viewport .viewport-inner table td { + width: 200px; + padding: 14px; + border: 1px solid #fff; + vertical-align: middle; } + +.reveal .overlay.overlay-help .viewport .viewport-inner table th { + padding-top: 20px; + padding-bottom: 20px; } + +/********************************************* + * PLAYBACK COMPONENT + *********************************************/ +.reveal .playback { + position: fixed; + left: 15px; + bottom: 20px; + z-index: 30; + cursor: pointer; + -webkit-transition: all 400ms ease; + transition: all 400ms ease; } + +.reveal.overview .playback { + opacity: 0; + visibility: hidden; } + +/********************************************* + * ROLLING LINKS + *********************************************/ +.reveal .roll { + display: inline-block; + line-height: 1.2; + overflow: hidden; + vertical-align: top; + -webkit-perspective: 400px; + perspective: 400px; + -webkit-perspective-origin: 50% 50%; + perspective-origin: 50% 50%; } + +.reveal .roll:hover { + background: none; + text-shadow: none; } + +.reveal .roll span { + display: block; + position: relative; + padding: 0 2px; + pointer-events: none; + -webkit-transition: all 400ms ease; + transition: all 400ms ease; + -webkit-transform-origin: 50% 0%; + transform-origin: 50% 0%; + -webkit-transform-style: preserve-3d; + transform-style: preserve-3d; + -webkit-backface-visibility: hidden; + backface-visibility: hidden; } + +.reveal .roll:hover span { + background: rgba(0, 0, 0, 0.5); + -webkit-transform: translate3d(0px, 0px, -45px) rotateX(90deg); + transform: translate3d(0px, 0px, -45px) rotateX(90deg); } + +.reveal .roll span:after { + content: attr(data-title); + display: block; + position: absolute; + left: 0; + top: 0; + padding: 0 2px; + -webkit-backface-visibility: hidden; + backface-visibility: hidden; + -webkit-transform-origin: 50% 0%; + transform-origin: 50% 0%; + -webkit-transform: translate3d(0px, 110%, 0px) rotateX(-90deg); + transform: translate3d(0px, 110%, 0px) rotateX(-90deg); } + +/********************************************* + * SPEAKER NOTES + *********************************************/ +.reveal aside.notes { + display: none; } + +.reveal .speaker-notes { + display: none; + position: absolute; + width: 70%; + max-height: 15%; + left: 15%; + bottom: 26px; + padding: 10px; + z-index: 1; + font-size: 18px; + line-height: 1.4; + color: #fff; + background-color: rgba(0, 0, 0, 0.5); + overflow: auto; + box-sizing: border-box; + text-align: left; + font-family: Helvetica, sans-serif; + -webkit-overflow-scrolling: touch; } + +.reveal .speaker-notes.visible:not(:empty) { + display: block; } + +@media screen and (max-width: 1024px) { + .reveal .speaker-notes { + font-size: 14px; } } + +@media screen and (max-width: 600px) { + .reveal .speaker-notes { + width: 90%; + left: 5%; } } + +/********************************************* + * ZOOM PLUGIN + *********************************************/ +.zoomed .reveal *, +.zoomed .reveal *:before, +.zoomed .reveal *:after { + -webkit-backface-visibility: visible !important; + backface-visibility: visible !important; } + +.zoomed .reveal .progress, +.zoomed .reveal .controls { + opacity: 0; } + +.zoomed .reveal .roll span { + background: none; } + +.zoomed .reveal .roll span:after { + visibility: hidden; } diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/urls.txt b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/urls.txt new file mode 100644 index 0000000..0fb09de --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/css/urls.txt @@ -0,0 +1,12 @@ +https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.4.1/css/reveal.css +https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.4.1/css/print/pdf.css +https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.4.1/css/print/paper.css +https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.4.1/lib/css/zenburn.css +https://cdnjs.cloudflare.com/ajax/libs/prism/1.6.0/themes/prism.min.css +https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.4.1/lib/js/head.min.js +https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.4.1/js/reveal.js +https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.4.1/plugin/markdown/marked.js +https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.4.1/plugin/markdown/markdown.js +https://cdnjs.cloudflare.com/ajax/libs/prism/1.6.0/prism.min.js +https://cdnjs.cloudflare.com/ajax/libs/prism/1.6.0/components/prism-yaml.min.js +https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.4.1/plugin/highlight/highlight.js diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/envars.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/envars.php new file mode 100644 index 0000000..5f69117 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/envars.php @@ -0,0 +1,89 @@ +\n\t\t

\$_ENV

\n\t\n"; +foreach( $_ENV as $key => $value){ + $count++; + $linefront = "\t\n\t\t$count\n\t\t"; + $lineend = "\n\t\t$value\n\t\n"; + $outvars .= "$linefront\$_ENV['$key']$lineend"; +} + + +$outvars .= "\t\n\t\t

\$_SERVER

\n\t\n"; +foreach( $_SERVER as $key => $value){ + if ($key == "PHP_AUTH_PW"){ + $value = ereg_replace("[^.*]", "*", $value) . " <--- (this value displays in plaintext but has been obfusticated in this script)"; + } + $count++; + $linefront = "\t\n\t\t$count\n\t\t"; + $lineend = "\n\t\t$value\n\t\n"; + $outvars .= "$linefront\$_SERVER['$key']$lineend"; +} + + +$outvars .= "\t\n\t\t

\$_POST

\n\t\n"; +foreach( $_POST as $key => $value){ + $count++; + $linefront = "\t\n\t\t$count\n\t\t"; + $lineend = "\n\t\t$value\n\t\n"; + $outvars .= "$linefront\$_POST['$key']$lineend"; +} + + +$outvars .= "\t\n\t\t

\$_GET

\n\t\n"; +foreach( $_GET as $key => $value){ + $count++; + $linefront = "\t\n\t\t$count\n\t\t"; + $lineend = "\n\t\t$value\n\t\n"; + $outvars .= "$linefront\$_GET['$key']$lineend"; +} + + +?> + +
+ + + (this script)
+
+
+
+ +
+ +
diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/favicon.ico b/roles/lightbulb-ansiblered-deck/files/deck-ansible/favicon.ico new file mode 100644 index 0000000..07cd505 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/favicon.ico differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/010_What_You_Will_Learn.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/010_What_You_Will_Learn.html new file mode 100644 index 0000000..934c864 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/010_What_You_Will_Learn.html @@ -0,0 +1,17 @@ +
+

What You Will Learn

+

Ansible is capable of handling many powerful automation tasks with the flexibility to adapt to many environments and workflows. With Ansible, users can very quickly get up and running to do real work.

+
    +
  • What is Ansible? / "The Ansible Way"
  • +
  • How Ansible Works and its Key Components
  • +
  • Automating with Ad-Hoc Commands
  • +
  • Writing Playbooks / Playbook Basics
  • +
  • Reuse and Redistribution of Ansible Content with Roles
  • +
+ +
diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/020_A_What_is_Ansible.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/020_A_What_is_Ansible.html new file mode 100644 index 0000000..5067657 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/020_A_What_is_Ansible.html @@ -0,0 +1,13 @@ +
+

What is Ansible?

+

It's a simple automation language that can perfectly describe an IT application infrastructure in Ansible Playbooks.

+

It's an automation engine that runs Ansible Playbooks.


+

Ansible is an automation platform: +

    +
  • Playbooks make up the automation language
  • +
  • The code base is the automation engine.
  • +
  • Ansible Tower manages existing automation
  • +
+

+ +
diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/021_B_What_is_Tower.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/021_B_What_is_Tower.html new file mode 100644 index 0000000..03ed4a9 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/021_B_What_is_Tower.html @@ -0,0 +1,14 @@ +
+

What is Ansible Tower?

+

Ansible Tower is an enterprise framework for controlling, securing and managing your Ansible automation with a UI and RESTful API.

+ + +
diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/030_WhyAnsible.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/030_WhyAnsible.html new file mode 100644 index 0000000..75bfdcb --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/030_WhyAnsible.html @@ -0,0 +1,38 @@ +
+

Why Ansible?    What Sets Ansible Apart?

+ +
+ +
+

Why Ansible?    What Sets Ansible Apart?

+ +
+ +
+

Why Ansible?    What Sets Ansible Apart?

+ +
+ +
+

Why Ansible?    What Sets Ansible Apart?

+ +
+ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/031_UseCasesIndividual.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/031_UseCasesIndividual.html new file mode 100644 index 0000000..0702c36 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/031_UseCasesIndividual.html @@ -0,0 +1,58 @@ +
+ +
+ +

+

+

+

+

+

+

+

+

+

+

+

+ +
+ + + + + + + + + + + + + + + + + +
+

Ansible Provides End To End Goodness

+
  + + + + + +
  + + + + + +
+ + + +
diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/040_The_Ansible_Way.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/040_The_Ansible_Way.html new file mode 100644 index 0000000..c360b4d --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/040_The_Ansible_Way.html @@ -0,0 +1,146 @@ +
+

The Ansible Way

+

+ +

CROSS PLATFORM – Linux, Windows, UNIX
+ Agentless support for all major OS variants, physical, virtual, cloud and network

+

HUMAN READABLE – YAML
+ Perfectly describe and document every aspect of your application environment

+

PERFECT DESCRIPTION OF APPLICATION
+ Every change can be made by playbooks, ensuring everyone is on the same page

+

VERSION CONTROLLED
+ Playbooks are plain-text. Treat them like code in your existing version control.

+

DYNAMIC INVENTORIES
+ Capture all the servers 100% of the time, regardless of infrastructure, location, etc.

+

ORCHESTRATION THAT PLAYS WELL WITH OTHERS – HP SA, Puppet, Jenkins, RHNSS, etc. Homogenize existing environments by leveraging current toolsets and update mechanisms.

+
+ + +
+ +
+

The Ansible Way - Venture Across The Platform

+

+ +

CROSS PLATFORM – Linux, Windows, UNIX
+ Agentless support for all major OS variants, physical, virtual, cloud and network

+

HUMAN READABLE – YAML
+ Perfectly describe and document every aspect of your application environment

+

PERFECT DESCRIPTION OF APPLICATION
+ Every change can be made by playbooks, ensuring everyone is on the same page

+

VERSION CONTROLLED
+ Playbooks are plain-text. Treat them like code in your existing version control.

+

DYNAMIC INVENTORIES
+ Capture all the servers 100% of the time, regardless of infrastructure, location, etc.

+

ORCHESTRATION THAT PLAYS WELL WITH OTHERS – HP SA, Puppet, Jenkins, RHNSS, etc. Homogenize existing environments by leveraging current toolsets and update mechanisms.

+
+ + +
+ +
+

The Ansible Way - Communicate Clearly

+

+ +

CROSS PLATFORM – Linux, Windows, UNIX
+ Agentless support for all major OS variants, physical, virtual, cloud and network

+

HUMAN READABLE – YAML
+ Perfectly describe and document every aspect of your application environment

+

PERFECT DESCRIPTION OF APPLICATION
+ Every change can be made by playbooks, ensuring everyone is on the same page

+

VERSION CONTROLLED
+ Playbooks are plain-text. Treat them like code in your existing version control.

+

DYNAMIC INVENTORIES
+ Capture all the servers 100% of the time, regardless of infrastructure, location, etc.

+

ORCHESTRATION THAT PLAYS WELL WITH OTHERS – HP SA, Puppet, Jenkins, RHNSS, etc. Homogenize existing environments by leveraging current toolsets and update mechanisms.

+
+ + +
+ +
+

The Ansible Way - One Source To Rule Them All

+

+ +

CROSS PLATFORM – Linux, Windows, UNIX
+ Agentless support for all major OS variants, physical, virtual, cloud and network

+

HUMAN READABLE – YAML
+ Perfectly describe and document every aspect of your application environment

+

PERFECT DESCRIPTION OF APPLICATION
+ Every change can be made by playbooks, ensuring everyone is on the same page

+

VERSION CONTROLLED
+ Playbooks are plain-text. Treat them like code in your existing version control.

+

DYNAMIC INVENTORIES
+ Capture all the servers 100% of the time, regardless of infrastructure, location, etc.

+

ORCHESTRATION THAT PLAYS WELL WITH OTHERS – HP SA, Puppet, Jenkins, RHNSS, etc. Homogenize existing environments by leveraging current toolsets and update mechanisms.

+
+ + +
+ +
+

The Ansible Way - Version Controlled Goodness

+

+

CROSS PLATFORM – Linux, Windows, UNIX
+ Agentless support for all major OS variants, physical, virtual, cloud and network

+

HUMAN READABLE – YAML
+ Perfectly describe and document every aspect of your application environment

+

PERFECT DESCRIPTION OF APPLICATION
+ Every change can be made by playbooks, ensuring everyone is on the same page

+

VERSION CONTROLLED
+ Playbooks are plain-text. Treat them like code in your existing version control.

+

DYNAMIC INVENTORIES
+ Capture all the servers 100% of the time, regardless of infrastructure, location, etc.

+

ORCHESTRATION THAT PLAYS WELL WITH OTHERS – HP SA, Puppet, Jenkins, RHNSS, etc. Homogenize existing environments by leveraging current toolsets and update mechanisms.

+
+ + +
+ +
+

The Ansible Way - Dynamically Utilize Inventories

+

+ +

CROSS PLATFORM – Linux, Windows, UNIX
+ Agentless support for all major OS variants, physical, virtual, cloud and network

+

HUMAN READABLE – YAML
+ Perfectly describe and document every aspect of your application environment

+

PERFECT DESCRIPTION OF APPLICATION
+ Every change can be made by playbooks, ensuring everyone is on the same page

+

VERSION CONTROLLED
+ Playbooks are plain-text. Treat them like code in your existing version control.

+

DYNAMIC INVENTORIES
+ Capture all the servers 100% of the time, regardless of infrastructure, location, etc.

+

ORCHESTRATION THAT PLAYS WELL WITH OTHERS – HP SA, Puppet, Jenkins, RHNSS, etc. Homogenize existing environments by leveraging current toolsets and update mechanisms.

+
+ + +
+ + +
+

The Ansible Way - Play Nicely With Others

+

+ +

CROSS PLATFORM – Linux, Windows, UNIX
+ Agentless support for all major OS variants, physical, virtual, cloud and network

+

HUMAN READABLE – YAML
+ Perfectly describe and document every aspect of your application environment

+

PERFECT DESCRIPTION OF APPLICATION
+ Every change can be made by playbooks, ensuring everyone is on the same page

+

VERSION CONTROLLED
+ Playbooks are plain-text. Treat them like code in your existing version control.

+

DYNAMIC INVENTORIES
+ Capture all the servers 100% of the time, regardless of infrastructure, location, etc.

+

ORCHESTRATION THAT PLAYS WELL WITH OTHERS – HP SA, Puppet, Jenkins, RHNSS, etc. Homogenize existing environments by leveraging current toolsets and update mechanisms.

+
+ + +
diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/050_Batteries_Included.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/050_Batteries_Included.html new file mode 100644 index 0000000..f00baca --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/050_Batteries_Included.html @@ -0,0 +1,63 @@ +
+

Batteries Included

+

Ansible comes bundled with hundreds of modules for a wide variety of automation tasks

+ +
+

+ +

+
+ + + + + +
+

+ +

+
+ + +
+

+ +

+
+ + + + +
+
+
    +
  • cloud
  • +
  • containers
  • +
  • database
  • +
  • files
  • +
  • messaging
  • +
  • monitoring
  • +
  • network
  • +
+
+
+
    +
  • notifications
  • +
  • packaging
  • +
  • source control
  • +
  • system
  • +
  • testing
  • +
  • utilities
  • +
  • web infrastructure
  • +
+
+
+
+
+ +
diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/060_The_Language_of_DevOps.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/060_The_Language_of_DevOps.html new file mode 100644 index 0000000..34b29f6 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/060_The_Language_of_DevOps.html @@ -0,0 +1,14 @@ +
+

Ansible: The Language of DevOps

+
+ +

COMMUNICATION IS THE KEY TO DEVOPS.

+

Ansible is the first automation language
that can be read and written across IT.

+

Ansible is the only automation engine
that can automate the entire application lifecycle
and continuous delivery pipeline.

+
+ +
diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/090_A_UseCasesIntro.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/090_A_UseCasesIntro.html new file mode 100644 index 0000000..69b8b4a --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/090_A_UseCasesIntro.html @@ -0,0 +1,19 @@ +
+

Ansible: Endless Use Cases

+

Ansible fills virtually countless use cases with its versatility.

+
  • ✾  Ansible is NOT just a Config Management Tool.
  • +
  • ☇  Ansible is NOT just an Application Deployment Tool.
  • +
  • ☁  Ansible is NOT just a Provisioning Tool.
  • +
  • ☡  Ansible is NOT just a CI/CD Tool.
  • +
  • ✎  Ansible is NOT just an Audit and Compliance Tool.
  • +
  • ➰   Ansible is NOT just an Orchestration Tool.
  • +

    Ansible is a powerful automation engine...

    +

    with strong use cases for all of the above tasks.

    +

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/_061_TowerAndDevOps.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/_061_TowerAndDevOps.html new file mode 100644 index 0000000..60c85f2 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/000_Intro_and_Preamble/_061_TowerAndDevOps.html @@ -0,0 +1,17 @@ +
    + +
    +
    +

    Practical Workflows

    +

    Deployment and CICD Pipelines

    +
    +
    + + + + + + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/000__RedIntro.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/000__RedIntro.html new file mode 100644 index 0000000..61add93 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/000__RedIntro.html @@ -0,0 +1,11 @@ +
    +
    +

    PEOPLE LOVE ANSIBLE.

    + +

    Ansible is taking the world by storm with unbelievable popularity.

    + +
    +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/065_Meetups.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/065_Meetups.html new file mode 100644 index 0000000..9133518 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/065_Meetups.html @@ -0,0 +1,5 @@ +
    + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/066_AnsibleFest.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/066_AnsibleFest.html new file mode 100644 index 0000000..9a48789 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/066_AnsibleFest.html @@ -0,0 +1,34 @@ +
    + + +
    +
    +

    +

    Stars on GitHub

    +
    +
    + +
    +
    +

    +

    Ansible Modules

    +
    +
    + + +
    +
    +

    +

    Downloads Per Month

    +
    +
    + + + + + + + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/067_GitContributions.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/067_GitContributions.html new file mode 100644 index 0000000..e634945 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/067_GitContributions.html @@ -0,0 +1,5 @@ +
    + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/068_MostSearched.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/068_MostSearched.html new file mode 100644 index 0000000..79bfe60 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/068_MostSearched.html @@ -0,0 +1,5 @@ +
    + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/_090_A_UseCasesIntro.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/_090_A_UseCasesIntro.html new file mode 100644 index 0000000..d86bab7 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/010_Promoting_Ansible/_090_A_UseCasesIntro.html @@ -0,0 +1,19 @@ +
    +

    Ansible: Endless Use Cases

    +

    Ansible fills virtually countless use cases with its versatility.

    +
  • ✾  Ansible is NOT just a Config Management Tool.
  • +
  • ☇  Ansible is NOT just an Application Deployment Tool.
  • +
  • ☁  Ansible is NOT just a Provisioning Tool.
  • +
  • ☡  Ansible is NOT just a CI/CD Tool.
  • +
  • ✎  Ansible is NOT just an Audit and Compliance Tool.
  • +
  • ➰   Ansible is NOT just an Orchestration Tool.
  • +

    Ansible is a powerful automation engine...

    +

    with strong use cases for all of the above tasks.

    +

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/011_Installing_Ansible/000_RedIntro-InstallingAnsible.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/011_Installing_Ansible/000_RedIntro-InstallingAnsible.html new file mode 100644 index 0000000..fbce0e7 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/011_Installing_Ansible/000_RedIntro-InstallingAnsible.html @@ -0,0 +1,12 @@ +
    + +

    INSTALLING ANSIBLE

    + +

    It Could Not Be Simpler.

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/011_Installing_Ansible/01_InstallingAnsible.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/011_Installing_Ansible/01_InstallingAnsible.html new file mode 100644 index 0000000..886f02a --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/011_Installing_Ansible/01_InstallingAnsible.html @@ -0,0 +1,26 @@ +
    +
    +
    +
    Ansible Terminal
    +
    +
    +# RHEL "extras" repo or CentOS EPEL:
    +$ sudo yum install ansible
    +
    +# you will need the PPA repo configured on
    +# Debian or Ubuntu
    +$ sudo apt-get install ansible
    +
    +# from your MacBook:
    +$ brew install ansible
    +
    +# For bleeding edge python versions,
    +$ sudo pip install ansible
    +    
    +
    + + + + +
    + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/011_Installing_Ansible/02_InstallingAnsibleVideo.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/011_Installing_Ansible/02_InstallingAnsibleVideo.html new file mode 100644 index 0000000..c95a0cb --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/011_Installing_Ansible/02_InstallingAnsibleVideo.html @@ -0,0 +1,22 @@ +
    +

    Simple: Installing Ansible

    + + + + + + + + + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/000__Red_Slide_HowANsibleWorks.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/000__Red_Slide_HowANsibleWorks.html new file mode 100644 index 0000000..3b597a9 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/000__Red_Slide_HowANsibleWorks.html @@ -0,0 +1,12 @@ +
    + +

    HOW ANSIBLE WORKS

    + +

    Let's Take A Look At The Technology

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/014__How_Ansible_Works.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/014__How_Ansible_Works.html new file mode 100644 index 0000000..7f4b21e --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/014__How_Ansible_Works.html @@ -0,0 +1,10 @@ +
    + +

    How Ansible Works

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/015__Plays_amp_Playbooks.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/015__Plays_amp_Playbooks.html new file mode 100644 index 0000000..18dd970 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/015__Plays_amp_Playbooks.html @@ -0,0 +1,11 @@ +
    + +

    Plays & Playbooks

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/016__Modules_amp_Tasks.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/016__Modules_amp_Tasks.html new file mode 100644 index 0000000..84f6735 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/016__Modules_amp_Tasks.html @@ -0,0 +1,12 @@ +
    + +

    Modules & Tasks

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/017__Plugins.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/017__Plugins.html new file mode 100644 index 0000000..0daa619 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/017__Plugins.html @@ -0,0 +1,19 @@ +
    + +

    Plugins

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/018__Inventory.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/018__Inventory.html new file mode 100644 index 0000000..3fc061c --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/018__Inventory.html @@ -0,0 +1,14 @@ +
    + +

    Inventory

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/019__Inventory.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/019__Inventory.html new file mode 100644 index 0000000..93dc48b --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/020_How_Ansible_Works/019__Inventory.html @@ -0,0 +1,11 @@ +
    + +

    Inventory

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/000_RedIntro-Modules.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/000_RedIntro-Modules.html new file mode 100644 index 0000000..60020c4 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/000_RedIntro-Modules.html @@ -0,0 +1,12 @@ +
    + +

    MODULES

    + +

    We leverage modules within our playbooks to do the heavy lifting.

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/100_What-Are-Modules.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/100_What-Are-Modules.html new file mode 100644 index 0000000..c04eeb6 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/100_What-Are-Modules.html @@ -0,0 +1,39 @@ +
    + +

    Modules

    +

    Modules are bits of code transferred to the target system and executed to satisfy the task declaration.

    +
    +
    +
      +
    • apt/yum
    • +
    • copy
    • +
    • file
    • +
    • get_url
    • +
    • git
    • +
    • ping
    • +
    • debug
    • +
    +
    +
    +
      +
    • service
    • +
    • synchronize
    • +
    • template
    • +
    • uri
    • +
    • user
    • +
    • wait_for
    • +
    • assert
    • +
    +
    +
    +
    +
    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/210_Modules_ansible-doc-CLI-SCROLL.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/210_Modules_ansible-doc-CLI-SCROLL.html new file mode 100644 index 0000000..644465a --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/210_Modules_ansible-doc-CLI-SCROLL.html @@ -0,0 +1,1876 @@ +
    +

    Modules Documentation

    +

    Returns a list of literally every module available on the system

    + + +
    + +
    + ansible-doc -l
    +Provides a long list of all available modules
    +
    +
    +a10_server Manage A10 Networks AX... +a10_server_axapi3 Manage A10 Networks AX... +a10_service_group Manage A10 Networks AX... +a10_virtual_server Manage A10 Networks AX... +accelerate Enable accelerated mod... +aci_aaa_user Manage AAA users (aaa:... +aci_aaa_user_certificate Manage AAA user certif... +aci_access_port_to_interface_policy_leaf_profile Manage Fabric interfac... +aci_aep Manage attachable Acce... +aci_aep_to_domain Bind AEPs to Physical ... +aci_ap Manage top level Appli... +aci_bd Manage Bridge Domains ... +aci_bd_subnet Manage Subnets (fv:Sub... +aci_bd_to_l3out Bind Bridge Domain to ... +aci_config_rollback Provides rollback and ... +aci_config_snapshot Manage Config Snapshot... +aci_contract Manage contract resour... +aci_contract_subject Manage initial Contrac... +aci_contract_subject_to_filter Bind Contract Subjects... +aci_domain Manage physical, virtu... +aci_domain_to_encap_pool Bind Domain to Encap P... +aci_domain_to_vlan_pool Bind Domain to VLAN Po... +aci_encap_pool Manage encap pools (fv... +aci_encap_pool_range Manage encap ranges as... +aci_epg Manage End Point Group... +aci_epg_monitoring_policy Manage monitoring poli... +aci_epg_to_contract Bind EPGs to Contracts... +aci_epg_to_domain Bind EPGs to Domains (... +aci_fabric_node Manage Fabric Node Mem... +aci_filter Manages top level filt... +aci_filter_entry Manage filter entries ... +aci_firmware_source Manage firmware image ... +aci_interface_policy_fc Manage Fibre Channel i... +aci_interface_policy_l2 Manage Layer 2 interfa... +aci_interface_policy_leaf_policy_group Manage fabric interfac... +aci_interface_policy_leaf_profile Manage fabric interfac... +aci_interface_policy_lldp Manage LLDP interface ... +aci_interface_policy_mcp Manage MCP interface p... +aci_interface_policy_port_channel Manage port channel in... +aci_interface_policy_port_security Manage port security (... +aci_interface_selector_to_switch_policy_leaf_profile Bind interface selecto... +aci_l3out Manage Layer 3 Outside... +aci_l3out_route_tag_policy Manage route tag polic... +aci_rest Direct access to the C... +aci_static_binding_to_epg Bind static paths to E... +aci_switch_leaf_selector Bind leaf selectors to... +aci_switch_policy_leaf_profile Manage switch policy l... +aci_switch_policy_vpc_protection_group Manage switch policy e... +aci_taboo_contract Manage taboo contracts... +aci_tenant Manage tenants (fv:Ten... +aci_tenant_action_rule_profile Manage action rule pro... +aci_tenant_ep_retention_policy Manage End Point (EP) ... +aci_tenant_span_dst_group Manage SPAN destinatio... +aci_tenant_span_src_group Manage SPAN source gro... +aci_tenant_span_src_group_to_dst_group Bind SPAN source group... +aci_vlan_pool Manage VLAN pools (fvn... +aci_vlan_pool_encap_block Manage encap blocks as... +aci_vrf Manage contexts or VRF... +acl Sets and retrieves fil... +acme_account Create, modify or dele... +acme_certificate Create SSL certificate... +add_host add a host (and altern... +airbrake_deployment Notify airbrake about ... +aireos_command Run commands on remote... +aireos_config Manage Cisco WLC confi... +aix_inittab Manages the inittab on... +aix_lvol Configure AIX LVM logi... +alternatives Manages alternative pr... +aos_asn_pool Manage AOS ASN Pool +aos_blueprint Manage AOS blueprint i... +aos_blueprint_param Manage AOS blueprint p... +aos_blueprint_virtnet Manage AOS blueprint p... +aos_device Manage Devices on AOS ... +aos_external_router Manage AOS External Ro... +aos_ip_pool Manage AOS IP Pool +aos_logical_device Manage AOS Logical Dev... +aos_logical_device_map Manage AOS Logical Dev... +aos_login Login to AOS server fo... +aos_rack_type Manage AOS Rack Type +aos_template Manage AOS Template +apache2_mod_proxy Set and/or get members... +apache2_module Enables/disables a mod... +apk Manages apk packages +apt Manages apt-packages +apt_key Add or remove an apt k... +apt_repository Add and remove APT rep... +apt_rpm apt_rpm package manage... +archive Creates a compressed a... +aruba_command Run commands on remote... +aruba_config Manage Aruba configura... +asa_acl Manage access-lists on... +asa_command Run arbitrary commands... +asa_config Manage configuration s... +assemble Assembles a configurat... +assert Asserts given expressi... +async_status Obtain status of async... +at Schedule the execution... +atomic_container Manage the containers ... +atomic_host Manage the atomic host... +atomic_image Manage the container i... +authorized_key Adds or removes an SSH... +avi_actiongroupconfig Module for setup of Ac... +avi_alertconfig Module for setup of Al... +avi_alertemailconfig Module for setup of Al... +avi_alertscriptconfig Module for setup of Al... +avi_alertsyslogconfig Module for setup of Al... +avi_analyticsprofile Module for setup of An... +avi_api_session Avi API Module +avi_api_version Avi API Version Module +avi_applicationpersistenceprofile Module for setup of Ap... +avi_applicationprofile Module for setup of Ap... +avi_authprofile Module for setup of Au... +avi_autoscalelaunchconfig Module for setup of Au... +avi_backup Module for setup of Ba... +avi_backupconfiguration Module for setup of Ba... +avi_certificatemanagementprofile Module for setup of Ce... +avi_cloud Module for setup of Cl... +avi_cloudconnectoruser Module for setup of Cl... +avi_cloudproperties Module for setup of Cl... +avi_cluster Module for setup of Cl... +avi_clusterclouddetails Module for setup of Cl... +avi_controllerproperties Module for setup of Co... +avi_customipamdnsprofile Module for setup of Cu... +avi_dnspolicy Module for setup of Dn... +avi_errorpagebody Module for setup of Er... +avi_errorpageprofile Module for setup of Er... +avi_gslb Module for setup of Gs... +avi_gslbapplicationpersistenceprofile Module for setup of Gs... +avi_gslbgeodbprofile Module for setup of Gs... +avi_gslbhealthmonitor Module for setup of Gs... +avi_gslbservice Module for setup of Gs... +avi_gslbservice_patch_member Avi API Module +avi_hardwaresecuritymodulegroup Module for setup of Ha... +avi_healthmonitor Module for setup of He... +avi_httppolicyset Module for setup of HT... +avi_ipaddrgroup Module for setup of Ip... +avi_ipamdnsproviderprofile Module for setup of Ip... +avi_l4policyset Module for setup of L4... +avi_microservicegroup Module for setup of Mi... +avi_network Module for setup of Ne... +avi_networkprofile Module for setup of Ne... +avi_networksecuritypolicy Module for setup of Ne... +avi_pkiprofile Module for setup of PK... +avi_pool Module for setup of Po... +avi_poolgroup Module for setup of Po... +avi_poolgroupdeploymentpolicy Module for setup of Po... +avi_prioritylabels Module for setup of Pr... +avi_role Module for setup of Ro... +avi_scheduler Module for setup of Sc... +avi_seproperties Module for setup of Se... +avi_serverautoscalepolicy Module for setup of Se... +avi_serviceengine Module for setup of Se... +avi_serviceenginegroup Module for setup of Se... +avi_snmptrapprofile Module for setup of Sn... +avi_sslkeyandcertificate Module for setup of SS... +avi_sslprofile Module for setup of SS... +avi_stringgroup Module for setup of St... +avi_systemconfiguration Module for setup of Sy... +avi_tenant Module for setup of Te... +avi_trafficcloneprofile Module for setup of Tr... +avi_useraccount Avi UserAccount Module +avi_useraccountprofile Module for setup of Us... +avi_virtualservice Module for setup of Vi... +avi_vrfcontext Module for setup of Vr... +avi_vsdatascriptset Module for setup of VS... +avi_vsvip Module for setup of Vs... +avi_wafpolicy Module for setup of Wa... +avi_wafprofile Module for setup of Wa... +avi_webhook Module for setup of We... +awall Manage awall policies +aws_acm_facts Retrieve certificate f... +aws_api_gateway Manage AWS API Gateway... +aws_application_scaling_policy Manage Application Aut... +aws_az_facts Gather facts about ava... +aws_batch_compute_environment Manage AWS Batch Compu... +aws_batch_job_definition Manage AWS Batch Job D... +aws_batch_job_queue Manage AWS Batch Job Q... +aws_caller_facts Get facts about the us... +aws_config_aggregation_authorization Manage cross-account A... +aws_config_aggregator Manage AWS Config aggr... +aws_config_delivery_channel Manage AWS Config deli... +aws_config_recorder Manage AWS Config Reco... +aws_config_rule Manage AWS Config reso... +aws_direct_connect_connection Creates, deletes, modi... +aws_direct_connect_gateway Manage AWS Direct Conn... +aws_direct_connect_link_aggregation_group Manage Direct Connect ... +aws_direct_connect_virtual_interface Manage Direct Connect ... +aws_elasticbeanstalk_app create, update, and de... +aws_glue_connection Manage an AWS Glue con... +aws_glue_job Manage an AWS Glue job +aws_inspector_target Create, Update and Del... +aws_kms Perform various KMS ma... +aws_kms_facts Gather facts about AWS... +aws_region_facts Gather facts about AWS... +aws_s3 manage objects in S3. +aws_s3_bucket_facts Lists S3 buckets in AW... +aws_s3_cors Manage CORS for S3 buc... +aws_ses_identity Manages SES email and ... +aws_ses_identity_policy Manages SES sending au... +aws_sgw_facts Fetch AWS Storage Gate... +aws_ssm_parameter_store Manage key-value pairs... +aws_waf_condition create and delete WAF ... +aws_waf_facts Retrieve facts for WAF... +aws_waf_rule create and delete WAF ... +aws_waf_web_acl create and delete WAF ... +azure create or terminate a ... +azure_rm_acs Manage an Azure Contai... +azure_rm_aks Manage a managed Azure... +azure_rm_aks_facts Get Azure Kubernetes S... +azure_rm_availabilityset Manage Azure availabil... +azure_rm_availabilityset_facts Get availability set f... +azure_rm_containerinstance Manage an Azure Contai... +azure_rm_containerregistry Manage an Azure Contai... +azure_rm_deployment Create or destroy Azur... +azure_rm_dnsrecordset Create, delete and upd... +azure_rm_dnsrecordset_facts Get DNS Record Set fac... +azure_rm_dnszone Manage Azure DNS zones... +azure_rm_dnszone_facts Get DNS zone facts. +azure_rm_functionapp Manage Azure Function ... +azure_rm_functionapp_facts Get Azure Function App... +azure_rm_image Manage Azure image. +azure_rm_keyvault Manage Key Vault insta... +azure_rm_keyvaultkey Use Azure KeyVault key... +azure_rm_keyvaultsecret Use Azure KeyVault Sec... +azure_rm_loadbalancer Manage Azure load bala... +azure_rm_loadbalancer_facts Get load balancer fact... +azure_rm_managed_disk Manage Azure Manage Di... +azure_rm_managed_disk_facts Get managed disk facts... +azure_rm_mysqldatabase Manage MySQL Database ... +azure_rm_mysqlserver Manage MySQL Server in... +azure_rm_networkinterface Manage Azure network i... +azure_rm_networkinterface_facts Get network interface ... +azure_rm_postgresqldatabase Manage PostgreSQL Data... +azure_rm_postgresqlserver Manage PostgreSQL Serv... +azure_rm_publicipaddress Manage Azure Public IP... +azure_rm_publicipaddress_facts Get public IP facts. +azure_rm_resource Create any Azure resou... +azure_rm_resource_facts Generic facts of Azure... +azure_rm_resourcegroup Manage Azure resource ... +azure_rm_resourcegroup_facts Get resource group fac... +azure_rm_securitygroup Manage Azure network s... +azure_rm_securitygroup_facts Get security group fac... +azure_rm_sqldatabase Manage SQL Database in... +azure_rm_sqlserver Manage SQL Server inst... +azure_rm_sqlserver_facts Get SQL Server facts. +azure_rm_storageaccount Manage Azure storage a... +azure_rm_storageaccount_facts Get storage account fa... +azure_rm_storageblob Manage blob containers... +azure_rm_subnet Manage Azure subnets. +azure_rm_virtualmachine Manage Azure virtual m... +azure_rm_virtualmachine_extension Managed Azure Virtual ... +azure_rm_virtualmachine_scaleset Manage Azure virtual m... +azure_rm_virtualmachine_scaleset_facts Get Virtual Machine Sc... +azure_rm_virtualmachineimage_facts Get virtual machine im... +azure_rm_virtualnetwork Manage Azure virtual n... +azure_rm_virtualnetwork_facts Get virtual network fa... +bcf_switch Create and remove a bc... +beadm Manage ZFS boot enviro... +bearychat Send BearyChat notific... +bigip_asm_policy Manage BIG-IP ASM poli... +bigip_command Run arbitrary command ... +bigip_config Manage BIG-IP configur... +bigip_configsync_action Perform different acti... +bigip_data_group Manage data groups on ... +bigip_device_connectivity Manages device IP conf... +bigip_device_dns Manage BIG-IP device D... +bigip_device_group Manage device groups o... +bigip_device_group_member Manages members in a d... +bigip_device_httpd Manage HTTPD related s... +bigip_device_license Manage license install... +bigip_device_ntp Manage NTP servers on ... +bigip_device_sshd Manage the SSHD settin... +bigip_device_trust Manage the trust relat... +bigip_facts Collect facts from F5 ... +bigip_gtm_datacenter Manage Datacenter conf... +bigip_gtm_facts Collect facts from F5 ... +bigip_gtm_global Manages global GTM set... +bigip_gtm_monitor_bigip Manages F5 BIG-IP GTM ... +bigip_gtm_monitor_external Manages external GTM m... +bigip_gtm_monitor_firepass Manages F5 BIG-IP GTM ... +bigip_gtm_monitor_http Manages F5 BIG-IP GTM ... +bigip_gtm_monitor_https Manages F5 BIG-IP GTM ... +bigip_gtm_monitor_tcp Manages F5 BIG-IP GTM ... +bigip_gtm_monitor_tcp_half_open Manages F5 BIG-IP GTM ... +bigip_gtm_pool Manages F5 BIG-IP GTM ... +bigip_gtm_pool_member Manage GTM pool member... +bigip_gtm_server Manages F5 BIG-IP GTM ... +bigip_gtm_virtual_server Manages F5 BIG-IP GTM ... +bigip_gtm_wide_ip Manages F5 BIG-IP GTM ... +bigip_hostname Manage the hostname of... +bigip_iapp_service Manages TCL iApp servi... +bigip_iapp_template Manages TCL iApp templ... +bigip_iapplx_package Manages Javascript iAp... +bigip_irule Manage iRules across d... +bigip_log_destination Manages log destinatio... +bigip_log_publisher Manages log publishers... +bigip_management_route Manage system manageme... +bigip_monitor_external Manages external LTM m... +bigip_monitor_http Manages F5 BIG-IP LTM ... +bigip_monitor_https Manages F5 BIG-IP LTM ... +bigip_monitor_snmp_dca Manages BIG-IP SNMP da... +bigip_monitor_tcp Manages F5 BIG-IP LTM ... +bigip_monitor_tcp_echo Manages F5 BIG-IP LTM ... +bigip_monitor_tcp_half_open Manages F5 BIG-IP LTM ... +bigip_monitor_udp Manages F5 BIG-IP LTM ... +bigip_node Manages F5 BIG-IP LTM ... +bigip_partition Manage BIG-IP partitio... +bigip_policy Manage general policy ... +bigip_policy_rule Manage LTM policy rule... +bigip_pool Manages F5 BIG-IP LTM ... +bigip_pool_member Manages F5 BIG-IP LTM ... +bigip_profile_client_ssl Manages client SSL pro... +bigip_profile_dns Manage DNS profiles on... +bigip_profile_tcp Manage TCP profiles on... +bigip_profile_udp Manage UDP profiles on... +bigip_provision Manage BIG-IP module p... +bigip_qkview Manage qkviews on the ... +bigip_remote_syslog Manipulate remote sysl... +bigip_routedomain Manage route domains o... +bigip_security_address_list Manage address lists o... +bigip_security_port_list Manage port lists on B... +bigip_selfip Manage Self-IPs on a B... +bigip_service_policy Manages service polici... +bigip_smtp Manages SMTP settings ... +bigip_snat_pool Manage SNAT pools on a... +bigip_snmp Manipulate general SNM... +bigip_snmp_community Manages SNMP communiti... +bigip_snmp_trap Manipulate SNMP trap i... +bigip_software_update Manage the software up... +bigip_ssl_certificate Import/Delete certific... +bigip_ssl_key Import/Delete SSL keys... +bigip_static_route Manipulate static rout... +bigip_sys_db Manage BIG-IP system d... +bigip_sys_global Manage BIG-IP global s... +bigip_timer_policy Manage timer policies ... +bigip_traffic_group Manages traffic groups... +bigip_trunk Manage trunks on a BIG... +bigip_ucs Manage upload, install... +bigip_ucs_fetch Fetches a UCS file fro... +bigip_user Manage user accounts a... +bigip_vcmp_guest Manages vCMP guests on... +bigip_virtual_address Manage LTM virtual add... +bigip_virtual_server Manage LTM virtual ser... +bigip_vlan Manage VLANs on a BIG-... +bigip_wait Wait for a BIG-IP cond... +bigiq_application_fasthttp Manages BIG-IQ FastHTT... +bigiq_application_fastl4_tcp Manages BIG-IQ FastL4 ... +bigiq_application_fastl4_udp Manages BIG-IQ FastL4 ... +bigiq_application_http Manages BIG-IQ HTTP ap... +bigiq_application_https_offload Manages BIG-IQ HTTPS o... +bigiq_application_https_waf Manages BIG-IQ HTTPS W... +bigiq_regkey_license Manages licenses in a ... +bigiq_regkey_license_assignment Manage regkey license ... +bigiq_regkey_pool Manages registration k... +bigiq_utility_license Manage utility license... +bigmon_chain Create and remove a bi... +bigmon_policy Create and remove a bi... +bigpanda Notify BigPanda about ... +blockinfile Insert/update/remove a... +bower Manage bower packages ... +bundler Manage Ruby Gem depend... +bzr Deploy software (or fi... +campfire Send a message to Camp... +capabilities Manage Linux capabilit... +catapult Send a sms / mms using... +ce_aaa_server Manages AAA server glo... +ce_aaa_server_host Manages AAA server hos... +ce_acl Manages base ACL confi... +ce_acl_advance Manages advanced ACL c... +ce_acl_interface Manages applying ACLs ... +ce_bfd_global Manages BFD global con... +ce_bfd_session Manages BFD session co... +ce_bfd_view Manages BFD session vi... +ce_bgp Manages BGP configurat... +ce_bgp_af Manages BGP Address-fa... +ce_bgp_neighbor Manages BGP peer confi... +ce_bgp_neighbor_af Manages BGP neighbor A... +ce_command Run arbitrary command ... +ce_config Manage Huawei CloudEng... +ce_dldp Manages global DLDP co... +ce_dldp_interface Manages interface DLDP... +ce_eth_trunk Manages Eth-Trunk inte... +ce_evpn_bd_vni Manages EVPN VXLAN Net... +ce_evpn_bgp Manages BGP EVPN confi... +ce_evpn_bgp_rr Manages RR for the VXL... +ce_evpn_global Manages global configu... +ce_facts Gets facts about HUAWE... +ce_file_copy Copy a file to a remot... +ce_info_center_debug Manages information ce... +ce_info_center_global Manages outputting log... +ce_info_center_log Manages information ce... +ce_info_center_trap Manages information ce... +ce_interface Manages physical attri... +ce_interface_ospf Manages configuration ... +ce_ip_interface Manages L3 attributes ... +ce_link_status Get interface link sta... +ce_mlag_config Manages MLAG configura... +ce_mlag_interface Manages MLAG interface... +ce_mtu Manages MTU settings o... +ce_netconf Run an arbitrary netco... +ce_netstream_aging Manages timeout mode o... +ce_netstream_export Manages netstream expo... +ce_netstream_global Manages global paramet... +ce_netstream_template Manages NetStream temp... +ce_ntp Manages core NTP confi... +ce_ntp_auth Manages NTP authentica... +ce_ospf Manages configuration ... +ce_ospf_vrf Manages configuration ... +ce_reboot Reboot a HUAWEI CloudE... +ce_rollback Set a checkpoint or ro... +ce_sflow Manages sFlow configur... +ce_snmp_community Manages SNMP community... +ce_snmp_contact Manages SNMP contact c... +ce_snmp_location Manages SNMP location ... +ce_snmp_target_host Manages SNMP target ho... +ce_snmp_traps Manages SNMP traps con... +ce_snmp_user Manages SNMP user conf... +ce_startup Manages a system start... +ce_static_route Manages static route c... +ce_stp Manages STP configurat... +ce_switchport Manages Layer 2 switch... +ce_vlan Manages VLAN resources... +ce_vrf Manages VPN instance o... +ce_vrf_af Manages VPN instance a... +ce_vrf_interface Manages interface spec... +ce_vrrp Manages VRRP interface... +ce_vxlan_arp Manages ARP attributes... +ce_vxlan_gateway Manages gateway for th... +ce_vxlan_global Manages global attribu... +ce_vxlan_tunnel Manages VXLAN tunnel c... +ce_vxlan_vap Manages VXLAN virtual ... +circonus_annotation create an annotation i... +cisco_spark Send a message to a Ci... +cl_bond Configures a bond port... +cl_bridge Configures a bridge po... +cl_img_install Install a different Cu... +cl_interface Configures a front pan... +cl_interface_policy Configure interface en... +cl_license Install licenses for C... +cl_ports Configure Cumulus Swit... +clc_aa_policy Create or Delete Anti ... +clc_alert_policy Create or Delete Alert... +clc_blueprint_package deploys a blue print p... +clc_firewall_policy Create/delete/update f... +clc_group Create/delete Server G... +clc_loadbalancer Create, Delete shared ... +clc_modify_server modify servers in Cent... +clc_publicip Add and Delete public ... +clc_server Create, Delete, Start ... +clc_server_snapshot Create, Delete and Res... +cloud_init_data_facts Retrieve facts of clou... +cloudflare_dns manage Cloudflare DNS ... +cloudformation Create or delete an AW... +cloudformation_facts Obtain facts about an ... +cloudfront_distribution create, update and del... +cloudfront_facts Obtain facts about an ... +cloudfront_invalidation create invalidations f... +cloudfront_origin_access_identity create, update and del... +cloudscale_floating_ip Manages floating IPs o... +cloudscale_server Manages servers on the... +cloudtrail manage CloudTrail crea... +cloudwatchevent_rule Manage CloudWatch Even... +cloudwatchlogs_log_group create or delete log_g... +cloudwatchlogs_log_group_facts get facts about log_gr... +cnos_backup Backup the current run... +cnos_bgp Manage BGP resources a... +cnos_command Run arbitrary commands... +cnos_conditional_command Execute a single comma... +cnos_conditional_template Manage switch configur... +cnos_config Manage Lenovo CNOS con... +cnos_factory Reset the switch's sta... +cnos_facts Collect facts from rem... +cnos_image Perform firmware upgra... +cnos_interface Manage interface confi... +cnos_portchannel Manage portchannel (po... +cnos_reload Perform switch restart... +cnos_rollback Roll back the running ... +cnos_save Save the running confi... +cnos_showrun Collect the current ru... +cnos_template Manage switch configur... +cnos_vlag Manage VLAG resources ... +cnos_vlan Manage VLAN resources ... +command Executes a command on ... +composer Dependency Manager for... +consul Add, modify & delete s... +consul_acl Manipulate Consul ACL ... +consul_kv Manipulate entries in ... +consul_session Manipulate consul sess... +copy Copies files to remote... +cpanm Manages Perl library d... +cron Manage cron.d and cron... +cronvar Manage variables in cr... +crypttab Encrypted Linux block ... +cs_account Manages accounts on Ap... +cs_affinitygroup Manages affinity group... +cs_cluster Manages host clusters ... +cs_configuration Manages configuration ... +cs_domain Manages domains on Apa... +cs_facts Gather facts on instan... +cs_firewall Manages firewall rules... +cs_host Manages hosts on Apach... +cs_instance Manages instances and ... +cs_instance_facts Gathering facts from t... +cs_instance_nic Manages NICs of an ins... +cs_instance_nic_secondaryip Manages secondary IPs ... +cs_instancegroup Manages instance group... +cs_ip_address Manages public IP addr... +cs_iso Manages ISO images on ... +cs_loadbalancer_rule Manages load balancer ... +cs_loadbalancer_rule_member Manages load balancer ... +cs_network Manages networks on Ap... +cs_network_acl Manages network access... +cs_network_acl_rule Manages network access... +cs_network_offering Manages network offeri... +cs_nic Manages NICs and secon... +cs_pod Manages pods on Apache... +cs_portforward Manages port forwardin... +cs_project Manages projects on Ap... +cs_region Manages regions on Apa... +cs_resourcelimit Manages resource limit... +cs_role Manages user roles on ... +cs_role_permission Manages role permissio... +cs_router Manages routers on Apa... +cs_securitygroup Manages security group... +cs_securitygroup_rule Manages security group... +cs_service_offering Manages service offeri... +cs_snapshot_policy Manages volume snapsho... +cs_sshkeypair Manages SSH keys on Ap... +cs_staticnat Manages static NATs on... +cs_storage_pool Manages Primary Storag... +cs_template Manages templates on A... +cs_user Manages users on Apach... +cs_vmsnapshot Manages VM snapshots o... +cs_volume Manages volumes on Apa... +cs_vpc Manages VPCs on Apache... +cs_vpc_offering Manages vpc offerings ... +cs_vpn_connection Manages site-to-site V... +cs_vpn_customer_gateway Manages site-to-site V... +cs_vpn_gateway Manages site-to-site V... +cs_zone Manages zones on Apach... +cs_zone_facts Gathering facts of zon... +cv_server_provision Provision server port ... +cyberark_authentication Module for CyberArk Va... +cyberark_user Module for CyberArk Us... +data_pipeline Create and manage AWS ... +datadog_event Posts events to DataDo... +datadog_monitor Manages Datadog monito... +dconf Modify and read dconf ... +debconf Configure a .deb packa... +debug Print statements durin... +dellos10_command Run commands on remote... +dellos10_config Manage Dell EMC Networ... +dellos10_facts Collect facts from rem... +dellos6_command Run commands on remote... +dellos6_config Manage Dell EMC Networ... +dellos6_facts Collect facts from rem... +dellos9_command Run commands on remote... +dellos9_config Manage Dell EMC Networ... +dellos9_facts Collect facts from rem... +deploy_helper Manages some of the st... +digital_ocean Create/delete a drople... +digital_ocean_account_facts Gather facts about Dig... +digital_ocean_block_storage Create/destroy or atta... +digital_ocean_certificate Manage certificates in... +digital_ocean_certificate_facts Gather facts about Dig... +digital_ocean_domain Create/delete a DNS re... +digital_ocean_domain_facts Gather facts about Dig... +digital_ocean_floating_ip Manage DigitalOcean Fl... +digital_ocean_floating_ip_facts DigitalOcean Floating ... +digital_ocean_image_facts Gather facts about Dig... +digital_ocean_load_balancer_facts Gather facts about Dig... +digital_ocean_region_facts Gather facts about Dig... +digital_ocean_size_facts Gather facts about Dig... +digital_ocean_snapshot_facts Gather facts about Dig... +digital_ocean_sshkey Manage DigitalOcean SS... +digital_ocean_sshkey_facts DigitalOcean SSH keys ... +digital_ocean_tag Create and remove tag(... +digital_ocean_tag_facts Gather facts about Dig... +digital_ocean_volume_facts Gather facts about Dig... +dimensiondata_network Create, update, and de... +dimensiondata_vlan Manage a VLAN in a Clo... +django_manage Manages a Django appli... +dladm_etherstub Manage etherstubs on S... +dladm_iptun Manage IP tunnel inter... +dladm_linkprop Manage link properties... +dladm_vlan Manage VLAN interfaces... +dladm_vnic Manage VNICs on Solari... +dnf Manages packages with ... +dnsimple Interface with dnsimpl... +dnsmadeeasy Interface with dnsmade... +docker manage docker containe... +docker_container manage docker containe... +docker_image Manage docker images. +docker_image_facts Inspect docker images +docker_login Log into a Docker regi... +docker_network Manage Docker networks +docker_secret Manage docker secrets. +docker_service Manage docker services... +docker_volume Manage Docker volumes +dpkg_selections Dpkg package selection... +dynamodb_table Create, update or dele... +dynamodb_ttl set TTL for a given Dy... +easy_install Installs Python librar... +ec2 create, terminate, sta... +ec2_ami create or destroy an i... +ec2_ami_copy copies AMI between AWS... +ec2_ami_facts Gather facts about ec2... +ec2_ami_find Searches for AMIs to o... +ec2_ami_search Retrieve AWS AMI infor... +ec2_asg Create or delete AWS A... +ec2_asg_facts Gather facts about ec2... +ec2_asg_lifecycle_hook Create, delete or upda... +ec2_customer_gateway Manage an AWS customer... +ec2_customer_gateway_facts Gather facts about cus... +ec2_eip manages EC2 elastic IP... +ec2_eip_facts List EC2 EIP details +ec2_elb De-registers or regist... +ec2_elb_facts Gather facts about EC2... +ec2_elb_lb Creates or destroys Am... +ec2_eni Create and optionally ... +ec2_eni_facts Gather facts about ec2... +ec2_group maintain an ec2 VPC se... +ec2_group_facts Gather facts about ec2... +ec2_instance Create & manage EC2 in... +ec2_instance_facts Gather facts about ec2... +ec2_key create or delete an ec... +ec2_lc Create or delete AWS A... +ec2_lc_facts Gather facts about AWS... +ec2_lc_find Find AWS Autoscaling L... +ec2_metadata_facts Gathers facts (instanc... +ec2_metric_alarm Create/update or delet... +ec2_placement_group Create or delete an EC... +ec2_placement_group_facts List EC2 Placement Gro... +ec2_remote_facts Gather facts about ec2... +ec2_scaling_policy Create or delete AWS s... +ec2_snapshot creates a snapshot fro... +ec2_snapshot_copy copies an EC2 snapshot... +ec2_snapshot_facts Gather facts about ec2... +ec2_tag create and remove tag(... +ec2_vol create and attach a vo... +ec2_vol_facts Gather facts about ec2... +ec2_vpc configure AWS virtual ... +ec2_vpc_dhcp_option Manages DHCP Options, ... +ec2_vpc_dhcp_option_facts Gather facts about dhc... +ec2_vpc_egress_igw Manage an AWS VPC Egre... +ec2_vpc_endpoint Create and delete AWS ... +ec2_vpc_endpoint_facts Retrieves AWS VPC endp... +ec2_vpc_igw Manage an AWS VPC Inte... +ec2_vpc_igw_facts Gather facts about int... +ec2_vpc_nacl create and delete Netw... +ec2_vpc_nacl_facts Gather facts about Net... +ec2_vpc_nat_gateway Manage AWS VPC NAT Gat... +ec2_vpc_nat_gateway_facts Retrieves AWS VPC Mana... +ec2_vpc_net Configure AWS virtual ... +ec2_vpc_net_facts Gather facts about ec2... +ec2_vpc_peer create, delete, accept... +ec2_vpc_peering_facts Retrieves AWS VPC Peer... +ec2_vpc_route_table Manage route tables fo... +ec2_vpc_route_table_facts Gather facts about ec2... +ec2_vpc_subnet Manage subnets in AWS ... +ec2_vpc_subnet_facts Gather facts about ec2... +ec2_vpc_vgw Create and delete AWS ... +ec2_vpc_vgw_facts Gather facts about vir... +ec2_vpc_vpn Create, modify, and de... +ec2_vpc_vpn_facts Gather facts about VPN... +ec2_win_password gets the default admin... +ecs_attribute manage ecs attributes +ecs_cluster create or terminate ec... +ecs_ecr Manage Elastic Contain... +ecs_service create, terminate, sta... +ecs_service_facts list or describe servi... +ecs_task run, start or stop a t... +ecs_taskdefinition register a task defini... +ecs_taskdefinition_facts describe a task defini... +edgeos_command Run one or more comman... +edgeos_config Manage EdgeOS configur... +edgeos_facts Collect facts from rem... +efs create and maintain EF... +efs_facts Get information about ... +ejabberd_user Manages users for ejab... +elasticache Manage cache clusters ... +elasticache_facts Retrieve facts for AWS... +elasticache_parameter_group Manage cache security ... +elasticache_snapshot Manage cache snapshots... +elasticache_subnet_group manage Elasticache sub... +elasticsearch_plugin Manage Elasticsearch p... +elb_application_lb Manage an Application ... +elb_application_lb_facts Gather facts about app... +elb_classic_lb Creates or destroys Am... +elb_classic_lb_facts Gather facts about EC2... +elb_instance De-registers or regist... +elb_network_lb Manage a Network Load ... +elb_target Manage a target in a t... +elb_target_group Manage a target group ... +elb_target_group_facts Gather facts about ELB... +enos_command Run arbitrary commands... +enos_config Manage Lenovo ENOS con... +enos_facts Collect facts from rem... +eos_banner Manage multiline banne... +eos_command Run arbitrary commands... +eos_config Manage Arista EOS conf... +eos_eapi Manage and configure A... +eos_facts Collect facts from rem... +eos_interface Manage Interface on Ar... +eos_l2_interface Manage L2 interfaces o... +eos_l3_interface Manage L3 interfaces o... +eos_linkagg Manage link aggregatio... +eos_lldp Manage LLDP configurat... +eos_logging Manage logging on netw... +eos_static_route Manage static IP route... +eos_system Manage the system attr... +eos_user Manage the collection ... +eos_vlan Manage VLANs on Arista... +eos_vrf Manage VRFs on Arista ... +etcd3 Set or delete key valu... +execute_lambda Execute an AWS Lambda ... +exo_dns_domain Manages domain records... +exo_dns_record Manages DNS records on... +exos_command Run commands on remote... +expect Executes a command and... +facter Runs the discovery pro... +fail Fail with custom messa... +fetch Fetches a file from re... +file Sets attributes of fil... +filesystem Makes a filesystem +find Return a list of files... +firewalld Manage arbitrary ports... +flatpak Manage flatpaks +flatpak_remote Manage flatpak reposit... +flowadm Manage bandwidth resou... +flowdock Send a message to a fl... +fmgr_script Add/Edit/Delete and ex... +foreman Manage Foreman Resourc... +fortios_address Manage fortios firewal... +fortios_config Manage config on Forti... +fortios_ipv4_policy Manage IPv4 policy obj... +fortios_webfilter Configure webfilter ca... +gc_storage This module manages ob... +gcdns_record Creates or removes res... +gcdns_zone Creates or removes zon... +gce create or terminate GC... +gce_eip Create or Destroy Glob... +gce_img utilize GCE image reso... +gce_instance_template create or destroy inst... +gce_labels Create, Update or Dest... +gce_lb create/destroy GCE loa... +gce_mig Create, Update or Dest... +gce_net create/destroy GCE net... +gce_pd utilize GCE persistent... +gce_snapshot Create or destroy snap... +gce_tag add or remove tag(s) t... +gconftool2 Edit GNOME Configurati... +gcp_backend_service Create or Destroy a Ba... +gcp_compute_address Creates a GCP Address +gcp_compute_backend_bucket Creates a GCP BackendB... +gcp_compute_backend_service Creates a GCP BackendS... +gcp_compute_disk Creates a GCP Disk +gcp_compute_firewall Creates a GCP Firewall +gcp_compute_forwarding_rule Creates a GCP Forwardi... +gcp_compute_global_address Creates a GCP GlobalAd... +gcp_compute_global_forwarding_rule Creates a GCP GlobalFo... +gcp_compute_health_check Creates a GCP HealthCh... +gcp_compute_http_health_check Creates a GCP HttpHeal... +gcp_compute_https_health_check Creates a GCP HttpsHea... +gcp_compute_image Creates a GCP Image +gcp_compute_instance Creates a GCP Instance +gcp_compute_instance_group Creates a GCP Instance... +gcp_compute_instance_group_manager Creates a GCP Instance... +gcp_compute_instance_template Creates a GCP Instance... +gcp_compute_network Creates a GCP Network +gcp_compute_route Creates a GCP Route +gcp_compute_ssl_certificate Creates a GCP SslCerti... +gcp_compute_subnetwork Creates a GCP Subnetwo... +gcp_compute_target_http_proxy Creates a GCP TargetHt... +gcp_compute_target_https_proxy Creates a GCP TargetHt... +gcp_compute_target_pool Creates a GCP TargetPo... +gcp_compute_target_ssl_proxy Creates a GCP TargetSs... +gcp_compute_target_tcp_proxy Creates a GCP TargetTc... +gcp_compute_url_map Creates a GCP UrlMap +gcp_container_cluster Creates a GCP Cluster +gcp_container_node_pool Creates a GCP NodePool +gcp_dns_managed_zone Creates a GCP ManagedZ... +gcp_dns_resource_record_set Creates a GCP Resource... +gcp_forwarding_rule Create, Update or Dest... +gcp_healthcheck Create, Update or Dest... +gcp_pubsub_subscription Creates a GCP Subscrip... +gcp_pubsub_topic Creates a GCP Topic +gcp_storage_bucket Creates a GCP Bucket +gcp_storage_bucket_access_control Creates a GCP BucketAc... +gcp_target_proxy Create, Update or Dest... +gcp_url_map Create, Update or Dest... +gcpubsub Create and Delete Topi... +gcpubsub_facts List Topics/Subscripti... +gcspanner Create and Delete Inst... +gem Manage Ruby gems +get_url Downloads files from H... +getent A wrapper to the unix ... +git Deploy software (or fi... +git_config Read and write git con... +github_deploy_key Manages deploy keys fo... +github_hooks Manages GitHub service... +github_issue View GitHub issue. +github_key Manage GitHub access k... +github_release Interact with GitHub R... +gitlab_deploy_key Manages GitLab project... +gitlab_group Creates/updates/delete... +gitlab_hooks Manages GitLab project... +gitlab_project Creates/updates/delete... +gitlab_user Creates/updates/delete... +gluster_peer Attach/Detach peers to... +gluster_volume Manage GlusterFS volum... +grafana_dashboard Manage Grafana dashboa... +grafana_datasource Manage Grafana datasou... +grafana_plugin Manage Grafana plugins... +group Add or remove groups +group_by Create Ansible groups ... +grove Sends a notification t... +gunicorn Run gunicorn with vari... +hall Send notification to H... +haproxy Enable, disable, and s... +helm Manages Kubernetes pac... +heroku_collaborator Add or delete app coll... +hg Manages Mercurial (hg)... +hipchat Send a message to Hipc... +homebrew Package manager for Ho... +homebrew_cask Install/uninstall home... +homebrew_tap Tap a Homebrew reposit... +honeybadger_deployment Notify Honeybadger.io ... +hostname Manage hostname +hpilo_boot Boot system using spec... +hpilo_facts Gather facts through a... +hponcfg Configure HP iLO inter... +htpasswd manage user files for ... +iam Manage IAM users, grou... +iam_cert Manage server certific... +iam_group Manage AWS IAM groups +iam_managed_policy Manage User Managed IA... +iam_mfa_device_facts List the MFA (Multi-Fa... +iam_policy Manage IAM policies fo... +iam_role Manage AWS IAM roles +iam_role_facts Gather information on ... +iam_server_certificate_facts Retrieve the facts of ... +iam_user Manage AWS IAM users +icinga2_feature Manage Icinga2 feature +icinga2_host Manage a host in Icing... +imc_rest Manage Cisco IMC hardw... +imgadm Manage SmartOS images +import_playbook Import a playbook +import_role Import a role into a p... +import_tasks Import a task list +include Include a play or task... +include_role Load and execute a rol... +include_tasks Dynamically include a ... +include_vars Load variables from fi... +infini_export Create, Delete or Modi... +infini_export_client Create, Delete or Modi... +infini_fs Create, Delete or Modi... +infini_host Create, Delete and Mod... +infini_pool Create, Delete and Mod... +infini_vol Create, Delete or Modi... +infinity manage Infinity IPAM u... +influxdb_database Manage InfluxDB databa... +influxdb_query Query data points from... +influxdb_retention_policy Manage InfluxDB retent... +influxdb_user Manage InfluxDB users +influxdb_write Write data points into... +ini_file Tweak settings in INI ... +interfaces_file Tweak settings in /etc... +ios_banner Manage multiline banne... +ios_command Run commands on remote... +ios_config Manage Cisco IOS confi... +ios_facts Collect facts from rem... +ios_interface Manage Interface on Ci... +ios_l2_interface Manage Layer-2 interfa... +ios_l3_interface Manage L3 interfaces o... +ios_linkagg Manage link aggregatio... +ios_lldp Manage LLDP configurat... +ios_logging Manage logging on netw... +ios_ping Tests reachability usi... +ios_static_route Manage static IP route... +ios_system Manage the system attr... +ios_user Manage the aggregate o... +ios_vlan Manage VLANs on IOS ne... +ios_vrf Manage the collection ... +iosxr_banner Manage multiline banne... +iosxr_command Run commands on remote... +iosxr_config Manage Cisco IOS XR co... +iosxr_facts Collect facts from rem... +iosxr_interface Manage Interface on Ci... +iosxr_logging Configuration manageme... +iosxr_netconf Configures NetConf sub... +iosxr_system Manage the system attr... +iosxr_user Manage the aggregate o... +ip_netns Manage network namespa... +ipa_dnsrecord Manage FreeIPA DNS rec... +ipa_dnszone Manage FreeIPA DNS Zon... +ipa_group Manage FreeIPA group +ipa_hbacrule Manage FreeIPA HBAC ru... +ipa_host Manage FreeIPA host +ipa_hostgroup Manage FreeIPA host-gr... +ipa_role Manage FreeIPA role +ipa_service Manage FreeIPA service +ipa_subca Manage FreeIPA Lightwe... +ipa_sudocmd Manage FreeIPA sudo co... +ipa_sudocmdgroup Manage FreeIPA sudo co... +ipa_sudorule Manage FreeIPA sudo ru... +ipa_user Manage FreeIPA users +ipadm_addr Manage IP addresses on... +ipadm_addrprop Manage IP address prop... +ipadm_if Manage IP interfaces ... +ipadm_ifprop Manage IP interface pr... +ipadm_prop Manage protocol proper... +ipify_facts Retrieve the public IP... +ipinfoio_facts Retrieve IP geolocatio... +ipmi_boot Management of order of... +ipmi_power Power management for m... +iptables Modify the systems ipt... +irc Send a message to an I... +ironware_command Run arbitrary commands... +ironware_config Manage configuration s... +ironware_facts Collect facts from dev... +iso_extract Extract files from an ... +jabber Send a message to jabb... +java_cert Uses keytool to import... +jboss deploy applications to... +jenkins_job Manage jenkins jobs +jenkins_job_facts Get facts about Jenkin... +jenkins_plugin Add or remove Jenkins ... +jenkins_script Executes a groovy scri... +jira create and modify issu... +junos_banner Manage multiline banne... +junos_command Run arbitrary commands... +junos_config Manage configuration o... +junos_facts Collect facts from rem... +junos_interface Manage Interface on Ju... +junos_l2_interface Manage Layer-2 interfa... +junos_l3_interface Manage L3 interfaces o... +junos_linkagg Manage link aggregatio... +junos_lldp Manage LLDP configurat... +junos_lldp_interface Manage LLDP interfaces... +junos_logging Manage logging on netw... +junos_netconf Configures the Junos N... +junos_package Installs packages on r... +junos_rpc Runs an arbitrary RPC ... +junos_scp Transfer files from or... +junos_static_route Manage static IP route... +junos_system Manage the system attr... +junos_user Manage local user acco... +junos_vlan Manage VLANs on Junipe... +junos_vrf Manage the VRF definit... +k8s Manage Kubernetes (K8s... +k8s_scale Set a new size for a D... +katello Manage Katello Resourc... +kernel_blacklist Blacklist kernel modul... +keycloak_client Allows administration ... +keycloak_clienttemplate Allows administration ... +kibana_plugin Manage Kibana plugins +kinesis_stream Manage a Kinesis Strea... +known_hosts Add or remove a host f... +kubernetes Manage Kubernetes reso... +lambda Manage AWS Lambda func... +lambda_alias Creates, updates or de... +lambda_event Creates, updates or de... +lambda_facts Gathers AWS Lambda fun... +lambda_policy Creates, updates or de... +layman Manage Gentoo overlays +ldap_attr Add or remove LDAP att... +ldap_entry Add or remove LDAP ent... +ldap_passwd Set passwords in LDAP. +librato_annotation create an annotation i... +lightsail Create or delete a vir... +lineinfile Manage lines in text f... +linode Manage instances on th... +lldp get details reported b... +locale_gen Creates or removes loc... +logentries Module for tracking lo... +logentries_msg Send a message to loge... +logicmonitor Manage your LogicMonit... +logicmonitor_facts Collect facts about Lo... +logstash_plugin Manage Logstash plugin... +lvg Configure LVM volume g... +lvol Configure LVM logical ... +lxc_container Manage LXC Containers +lxd_container Manage LXD Containers +lxd_profile Manage LXD profiles +macports Package manager for Ma... +mail Send an email +make Run targets in a Makef... +manageiq_alert_profiles Configuration of alert... +manageiq_alerts Configuration of alert... +manageiq_policies Management of resource... +manageiq_provider Management of provider... +manageiq_tags Management of resource... +manageiq_user Management of users in... +mattermost Send Mattermost notifi... +maven_artifact Downloads an Artifact ... +memset_dns_reload Request reload of Mems... +memset_zone Creates and deletes Me... +memset_zone_domain Create and delete doma... +memset_zone_record Create and delete reco... +meraki_admin Manage administrators ... +meraki_network Manage networks in the... +meraki_organization Manage organizations i... +meraki_snmp Manage organizations i... +meta Execute Ansible 'actio... +mksysb Generates AIX mksysb r... +modprobe Load or unload kernel ... +mongodb_parameter Change an administrati... +mongodb_user Adds or removes a user... +monit Manage the state of a ... +mount Control active and con... +mqtt Publish a message on a... +mssql_db Add or remove MSSQL da... +mysql_db Add or remove MySQL da... +mysql_replication Manage MySQL replicati... +mysql_user Adds or removes a user... +mysql_variables Manage MySQL global va... +na_cdot_aggregate Manage NetApp cDOT agg... +na_cdot_license Manage NetApp cDOT pro... +na_cdot_lun Manage NetApp cDOT lu... +na_cdot_qtree Manage qtrees +na_cdot_svm Manage NetApp cDOT svm +na_cdot_user useradmin configuratio... +na_cdot_user_role useradmin configuratio... +na_cdot_volume Manage NetApp cDOT vol... +na_ontap_aggregate Manage NetApp ONTAP ag... +na_ontap_broadcast_domain Manage NetApp ONTAP br... +na_ontap_broadcast_domain_ports Manage NetApp Ontap br... +na_ontap_cifs Manage NetApp cifs-sha... +na_ontap_cifs_acl Manage NetApp cifs-sha... +na_ontap_cifs_server cifs server configurat... +na_ontap_cluster Create/Join ONTAP clus... +na_ontap_cluster_ha Manage HA status for c... +na_ontap_export_policy Manage NetApp ONTAP ex... +na_ontap_export_policy_rule Manage ONTAP Export ru... +na_ontap_igroup ONTAP iSCSI igroup con... +na_ontap_interface ONTAP LIF configuratio... +na_ontap_iscsi Manage NetApp Ontap is... +na_ontap_job_schedule Manage NetApp Ontap Jo... +na_ontap_license Manage NetApp ONTAP pr... +na_ontap_lun Manage NetApp Ontap l... +na_ontap_lun_map Manage NetApp Ontap lu... +na_ontap_net_ifgrp Create, modify, destro... +na_ontap_net_port Manage NetApp Ontap ne... +na_ontap_net_routes Manage NetApp Ontap ne... +na_ontap_net_vlan Manage NetApp Ontap ne... +na_ontap_nfs Manage Ontap NFS statu... +na_ontap_ntp Create/Delete/modify_v... +na_ontap_qtree Manage qtrees +na_ontap_service_processor_network Manage NetApp Ontap se... +na_ontap_snapshot Manage NetApp Sanpshot... +na_ontap_snmp Manage NetApp SNMP com... +na_ontap_svm Manage NetApp Ontap sv... +na_ontap_ucadapter ONTAP UC adapter confi... +na_ontap_user useradmin configuratio... +na_ontap_user_role useradmin configuratio... +na_ontap_volume Manage NetApp ONTAP vo... +na_ontap_volume_clone Manage NetApp Ontap vo... +nagios Perform common tasks i... +nclu Configure network inte... +net_banner Manage multiline banne... +net_get Copy a file from a net... +net_interface Manage Interface on ne... +net_l2_interface Manage Layer-2 interfa... +net_l3_interface Manage L3 interfaces o... +net_linkagg Manage link aggregatio... +net_lldp Manage LLDP service co... +net_lldp_interface Manage LLDP interfaces... +net_logging Manage logging on netw... +net_ping Tests reachability usi... +net_put Copy a file from Ansib... +net_static_route Manage static IP route... +net_system Manage the system attr... +net_user Manage the aggregate o... +net_vlan Manage VLANs on networ... +net_vrf Manage VRFs on network... +netact_cm_command Manage network configu... +netapp_e_amg Create, Remove, and Up... +netapp_e_amg_role Update the role of a s... +netapp_e_amg_sync Conduct synchronizatio... +netapp_e_auth Sets or updates the pa... +netapp_e_facts Get facts about NetApp... +netapp_e_flashcache Manage NetApp SSD cach... +netapp_e_host manage eseries hosts +netapp_e_hostgroup Manage NetApp Storage ... +netapp_e_lun_mapping Create or Remove LUN M... +netapp_e_snapshot_group Manage snapshot groups +netapp_e_snapshot_images Create and delete snap... +netapp_e_snapshot_volume Manage E/EF-Series sna... +netapp_e_storage_system Add/remove arrays from... +netapp_e_storagepool Manage disk groups and... +netapp_e_volume Manage storage volumes... +netapp_e_volume_copy Create volume copy pai... +netconf_config netconf device configu... +netconf_get Fetch configuration/st... +netconf_rpc Execute operations on ... +netscaler Manages Citrix NetScal... +netscaler_cs_action Manage content switchi... +netscaler_cs_policy Manage content switchi... +netscaler_cs_vserver Manage content switchi... +netscaler_gslb_service Manage gslb service en... +netscaler_gslb_site Manage gslb site entit... +netscaler_gslb_vserver Configure gslb vserver... +netscaler_lb_monitor Manage load balancing ... +netscaler_lb_vserver Manage load balancing ... +netscaler_nitro_request Issue Nitro API reques... +netscaler_save_config Save Netscaler configu... +netscaler_server Manage server configur... +netscaler_service Manage service configu... +netscaler_servicegroup Manage service group c... +netscaler_ssl_certkey Manage ssl cerificate ... +newrelic_deployment Notify newrelic about ... +nexmo Send a SMS via nexmo +nginx_status_facts Retrieve nginx status ... +nios_dns_view Configure Infoblox NIO... +nios_host_record Configure Infoblox NIO... +nios_network Configure Infoblox NIO... +nios_network_view Configure Infoblox NIO... +nios_zone Configure Infoblox NIO... +nmcli Manage Networking +nosh Manage services with n... +npm Manage node.js package... +nso_action Executes Cisco NSO act... +nso_config Manage Cisco NSO confi... +nso_query Query data from Cisco ... +nso_show Displays data from Cis... +nso_verify Verifies Cisco NSO con... +nsupdate Manage DNS records. +nuage_vspk Manage Nuage VSP envir... +nxos_aaa_server Manages AAA server glo... +nxos_aaa_server_host Manages AAA server hos... +nxos_acl Manages access list en... +nxos_acl_interface Manages applying ACLs ... +nxos_banner Manage multiline banne... +nxos_bgp Manages BGP configurat... +nxos_bgp_af Manages BGP Address-fa... +nxos_bgp_neighbor Manages BGP neighbors ... +nxos_bgp_neighbor_af Manages BGP address-fa... +nxos_command Run arbitrary command ... +nxos_config Manage Cisco NXOS conf... +nxos_evpn_global Handles the EVPN contr... +nxos_evpn_vni Manages Cisco EVPN VXL... +nxos_facts Gets facts about NX-OS... +nxos_feature Manage features in NX-... +nxos_file_copy Copy a file to a remot... +nxos_gir Trigger a graceful rem... +nxos_gir_profile_management Create a maintenance-m... +nxos_hsrp Manages HSRP configura... +nxos_igmp Manages IGMP global co... +nxos_igmp_interface Manages IGMP interface... +nxos_igmp_snooping Manages IGMP snooping ... +nxos_install_os Set boot options like ... +nxos_interface Manages physical attri... +nxos_interface_ospf Manages configuration ... +nxos_ip_interface Manages L3 attributes ... +nxos_l2_interface Manage Layer-2 interfa... +nxos_l3_interface Manage L3 interfaces o... +nxos_linkagg Manage link aggregatio... +nxos_lldp Manage LLDP configurat... +nxos_logging Manage logging on netw... +nxos_mtu Manages MTU settings o... +nxos_ntp Manages core NTP confi... +nxos_ntp_auth Manages NTP authentica... +nxos_ntp_options Manages NTP options. +nxos_nxapi Manage NXAPI configura... +nxos_ospf Manages configuration ... +nxos_ospf_vrf Manages a VRF for an O... +nxos_overlay_global Configures anycast gat... +nxos_pim Manages configuration ... +nxos_pim_interface Manages PIM interface ... +nxos_pim_rp_address Manages configuration ... +nxos_ping Tests reachability usi... +nxos_portchannel Manages port-channel i... +nxos_reboot Reboot a network devic... +nxos_rollback Set a checkpoint or ro... +nxos_smu Perform SMUs on Cisco ... +nxos_snapshot Manage snapshots of th... +nxos_snmp_community Manages SNMP community... +nxos_snmp_contact Manages SNMP contact i... +nxos_snmp_host Manages SNMP host conf... +nxos_snmp_location Manages SNMP location ... +nxos_snmp_traps Manages SNMP traps. +nxos_snmp_user Manages SNMP users for... +nxos_static_route Manages static route c... +nxos_switchport Manages Layer 2 switch... +nxos_system Manage the system attr... +nxos_udld Manages UDLD global co... +nxos_udld_interface Manages UDLD interface... +nxos_user Manage the collection ... +nxos_vlan Manages VLAN resources... +nxos_vpc Manages global VPC con... +nxos_vpc_interface Manages interface VPC ... +nxos_vrf Manages global VRF con... +nxos_vrf_af Manages VRF AF. +nxos_vrf_interface Manages interface spec... +nxos_vrrp Manages VRRP configura... +nxos_vtp_domain Manages VTP domain con... +nxos_vtp_password Manages VTP password c... +nxos_vtp_version Manages VTP version co... +nxos_vxlan_vtep Manages VXLAN Network ... +nxos_vxlan_vtep_vni Creates a Virtual Netw... +oc Manage OpenShift Resou... +office_365_connector_card Use webhooks to create... +ohai Returns inventory data... +omapi_host Setup OMAPI hosts. +one_host Manages OpenNebula Hos... +one_image Manages OpenNebula ima... +one_image_facts Gather facts about Ope... +one_service Deploy and manage Open... +one_vm Creates or terminates ... +oneandone_firewall_policy Configure 1&1 firewall... +oneandone_load_balancer Configure 1&1 load bal... +oneandone_monitoring_policy Configure 1&1 monitori... +oneandone_private_network Configure 1&1 private ... +oneandone_public_ip Configure 1&1 public I... +oneandone_server Create, destroy, start... +oneview_datacenter_facts Retrieve facts about t... +oneview_enclosure_facts Retrieve facts about o... +oneview_ethernet_network Manage OneView Etherne... +oneview_ethernet_network_facts Retrieve the facts abo... +oneview_fc_network Manage OneView Fibre C... +oneview_fc_network_facts Retrieve the facts abo... +oneview_fcoe_network Manage OneView FCoE Ne... +oneview_fcoe_network_facts Retrieve the facts abo... +oneview_logical_interconnect_group Manage OneView Logical... +oneview_logical_interconnect_group_facts Retrieve facts about o... +oneview_network_set Manage HPE OneView Net... +oneview_network_set_facts Retrieve facts about t... +oneview_san_manager Manage OneView SAN Man... +oneview_san_manager_facts Retrieve facts about o... +onyx_bgp Configures BGP on Mell... +onyx_command Run commands on remote... +onyx_config Manage Mellanox ONYX c... +onyx_facts Collect facts from Mel... +onyx_interface Manage Interfaces on M... +onyx_l2_interface Manage Layer-2 interfa... +onyx_l3_interface Manage L3 interfaces o... +onyx_linkagg Manage link aggregatio... +onyx_lldp Manage LLDP configurat... +onyx_lldp_interface Manage LLDP interfaces... +onyx_magp Manage MAGP protocol o... +onyx_mlag_ipl Manage IPL (inter-peer... +onyx_mlag_vip Configures MLAG VIP on... +onyx_ospf Manage OSPF protocol o... +onyx_pfc_interface Manage priority flow c... +onyx_protocol Enables/Disables proto... +onyx_vlan Manage VLANs on Mellan... +open_iscsi Manage iscsi targets w... +openbsd_pkg Manage packages on Ope... +opendj_backendprop Will update the backen... +openshift_raw Manage Kubernetes (K8s... +openshift_scale Set a new size for a D... +openssl_certificate Generate and/or check ... +openssl_csr Generate OpenSSL Certi... +openssl_dhparam Generate OpenSSL Diffi... +openssl_privatekey Generate OpenSSL priva... +openssl_publickey Generate an OpenSSL pu... +openvswitch_bridge Manage Open vSwitch br... +openvswitch_db Configure open vswitch... +openvswitch_port Manage Open vSwitch po... +openwrt_init Manage services on Ope... +opkg Package manager for Op... +ordnance_config Manage Ordnance config... +ordnance_facts Collect facts from Ord... +os_auth Retrieve an auth token +os_client_config Get OpenStack Client c... +os_flavor_facts Retrieve facts about o... +os_floating_ip Add/Remove floating IP... +os_group Manage OpenStack Ident... +os_image Add/Delete images from... +os_image_facts Retrieve facts about a... +os_ironic Create/Delete Bare Met... +os_ironic_inspect Explicitly triggers ba... +os_ironic_node Activate/Deactivate Ba... +os_keypair Add/Delete a keypair f... +os_keystone_domain Manage OpenStack Ident... +os_keystone_domain_facts Retrieve facts about o... +os_keystone_endpoint Manage OpenStack Ident... +os_keystone_role Manage OpenStack Ident... +os_keystone_service Manage OpenStack Ident... +os_network Creates/removes networ... +os_networks_facts Retrieve facts about o... +os_nova_flavor Manage OpenStack compu... +os_nova_host_aggregate Manage OpenStack host ... +os_object Create or Delete objec... +os_port Add/Update/Delete port... +os_port_facts Retrieve facts about p... +os_project Manage OpenStack Proje... +os_project_access Manage OpenStack compu... +os_project_facts Retrieve facts about o... +os_quota Manage OpenStack Quota... +os_recordset Manage OpenStack DNS r... +os_router Create or delete route... +os_security_group Add/Delete security gr... +os_security_group_rule Add/Delete rule from a... +os_server Create/Delete Compute ... +os_server_action Perform actions on Com... +os_server_facts Retrieve facts about o... +os_server_group Manage OpenStack serve... +os_server_metadata Add/Update/Delete Meta... +os_server_volume Attach/Detach Volumes ... +os_stack Add/Remove Heat Stack +os_subnet Add/Remove subnet to a... +os_subnets_facts Retrieve facts about o... +os_user Manage OpenStack Ident... +os_user_facts Retrieve facts about o... +os_user_group Associate OpenStack Id... +os_user_role Associate OpenStack Id... +os_volume Create/Delete Cinder V... +os_volume_snapshot Create/Delete Cinder V... +os_zone Manage OpenStack DNS z... +osx_defaults osx_defaults allows us... +ovh_ip_loadbalancing_backend Manage OVH IP LoadBala... +ovirt oVirt/RHEV platform ma... +ovirt_affinity_group Module to manage affin... +ovirt_affinity_label Module to manage affin... +ovirt_affinity_label_facts Retrieve facts about o... +ovirt_api_facts Retrieve facts about t... +ovirt_auth Module to manage authe... +ovirt_cluster Module to manage clust... +ovirt_cluster_facts Retrieve facts about o... +ovirt_datacenter Module to manage data ... +ovirt_datacenter_facts Retrieve facts about o... +ovirt_disk Module to manage Virtu... +ovirt_disk_facts Retrieve facts about o... +ovirt_external_provider Module to manage exter... +ovirt_external_provider_facts Retrieve facts about o... +ovirt_group Module to manage group... +ovirt_group_facts Retrieve facts about o... +ovirt_host_networks Module to manage host ... +ovirt_host_pm Module to manage power... +ovirt_host_storage_facts Retrieve facts about o... +ovirt_hosts Module to manage hosts... +ovirt_hosts_facts Retrieve facts about o... +ovirt_mac_pools Module to manage MAC p... +ovirt_networks Module to manage logic... +ovirt_networks_facts Retrieve facts about o... +ovirt_nics Module to manage netwo... +ovirt_nics_facts Retrieve facts about o... +ovirt_permissions Module to manage permi... +ovirt_permissions_facts Retrieve facts about o... +ovirt_quotas Module to manage datac... +ovirt_quotas_facts Retrieve facts about o... +ovirt_scheduling_policies_facts Retrieve facts about o... +ovirt_snapshots Module to manage Virtu... +ovirt_snapshots_facts Retrieve facts about o... +ovirt_storage_connections Module to manage stora... +ovirt_storage_domains Module to manage stora... +ovirt_storage_domains_facts Retrieve facts about o... +ovirt_storage_templates_facts Retrieve facts about o... +ovirt_storage_vms_facts Retrieve facts about o... +ovirt_tags Module to manage tags ... +ovirt_tags_facts Retrieve facts about o... +ovirt_templates Module to manage virtu... +ovirt_templates_facts Retrieve facts about o... +ovirt_users Module to manage users... +ovirt_users_facts Retrieve facts about o... +ovirt_vmpools Module to manage VM po... +ovirt_vmpools_facts Retrieve facts about o... +ovirt_vms Module to manage Virtu... +ovirt_vms_facts Retrieve facts about o... +pacemaker_cluster Manage pacemaker clust... +package Generic OS package man... +package_facts package information as... +packet_device Manage a bare metal se... +packet_sshkey Create/delete an SSH k... +pacman Manage packages with `... +pagerduty Create PagerDuty maint... +pagerduty_alert Trigger, acknowledge o... +pam_limits Modify Linux PAM limit... +pamd Manage PAM Modules +panos_admin Add or modify PAN-OS u... +panos_admpwd change admin password ... +panos_cert_gen_ssh generates a self-signe... +panos_check check if PAN-OS device... +panos_commit commit firewall's cand... +panos_dag create a dynamic addre... +panos_dag_tags Create tags for DAG's ... +panos_import import file on PAN-OS ... +panos_interface configure data-port ne... +panos_lic apply authcode to a de... +panos_loadcfg load configuration on ... +panos_match_rule Test for match against... +panos_mgtconfig configure management s... +panos_nat_policy create a policy NAT ru... +panos_nat_rule create a policy NAT ru... +panos_object create/read/update/del... +panos_op execute arbitrary OP c... +panos_pg create a security prof... +panos_query_rules PANOS module that allo... +panos_restart restart a device +panos_sag Create a static addres... +panos_security_policy Create security rule p... +panos_security_rule Create security rule p... +parted Configure block device... +patch Apply patch files usin... +pause Pause playbook executi... +pear Manage pear/pecl packa... +ping Try to connect to host... +pingdom Pause/unpause Pingdom ... +pip Manages Python library... +pkg5 Manages packages with ... +pkg5_publisher Manages Solaris 11 Ima... +pkgin Package manager for Sm... +pkgng Package manager for Fr... +pkgutil Manage CSW-Packages on... +pn_cluster CLI command to create/... +pn_ospf CLI command to add/rem... +pn_ospfarea CLI command to add/rem... +pn_show Run show commands on n... +pn_trunk CLI command to create/... +pn_vlag CLI command to create/... +pn_vlan CLI command to create/... +pn_vrouter CLI command to create/... +pn_vrouterbgp CLI command to add/rem... +pn_vrouterif CLI command to add/rem... +pn_vrouterlbif CLI command to add/rem... +portage Package manager for Ge... +portinstall Installing packages fr... +postgresql_db Add or remove PostgreS... +postgresql_ext Add or remove PostgreS... +postgresql_lang Adds, removes or chang... +postgresql_privs Grant or revoke privil... +postgresql_schema Add or remove PostgreS... +postgresql_user Adds or removes a user... +profitbricks Create, destroy, start... +profitbricks_datacenter Create or destroy a Pr... +profitbricks_nic Create or Remove a NIC... +profitbricks_volume Create or destroy a vo... +profitbricks_volume_attachments Attach or detach a vol... +proxmox management of instance... +proxmox_kvm Management of Qemu(KVM... +proxmox_template management of OS templ... +proxysql_backend_servers Adds or removes mysql ... +proxysql_global_variables Gets or sets the proxy... +proxysql_manage_config Writes the proxysql co... +proxysql_mysql_users Adds or removes mysql ... +proxysql_query_rules Modifies query rules u... +proxysql_replication_hostgroups Manages replication ho... +proxysql_scheduler Adds or removes schedu... +psexec Runs commands on a rem... +pubnub_blocks PubNub blocks manageme... +pulp_repo Add or remove Pulp rep... +puppet Runs puppet +purefa_ds Configure FlashArray D... +purefa_facts Collect facts from Pur... +purefa_hg Manage hostgroups on P... +purefa_host Manage hosts on Pure S... +purefa_pg Manage protection grou... +purefa_pgsnap Manage protection grou... +purefa_snap Manage volume snapshot... +purefa_volume Manage volumes on Pure... +purefb_fs Manage filesystemon Pu... +purefb_snap Manage filesystem snap... +pushbullet Sends notifications to... +pushover Send notifications via... +rabbitmq_binding This module manages ra... +rabbitmq_exchange This module manages ra... +rabbitmq_parameter Adds or removes parame... +rabbitmq_plugin Manage RabbitMQ plugin... +rabbitmq_policy Manage the state of po... +rabbitmq_queue This module manages ra... +rabbitmq_user Adds or removes users ... +rabbitmq_vhost Manage the state of a ... +raw Executes a low-down an... +rax create / delete an ins... +rax_cbs Manipulate Rackspace C... +rax_cbs_attachments Manipulate Rackspace C... +rax_cdb create/delete or resiz... +rax_cdb_database create / delete a data... +rax_cdb_user create / delete a Rack... +rax_clb create / delete a load... +rax_clb_nodes add, modify and remove... +rax_clb_ssl Manage SSL termination... +rax_dns Manage domains on Rack... +rax_dns_record Manage DNS records on ... +rax_facts Gather facts for Racks... +rax_files Manipulate Rackspace C... +rax_files_objects Upload, download, and ... +rax_identity Load Rackspace Cloud I... +rax_keypair Create a keypair for u... +rax_meta Manipulate metadata fo... +rax_mon_alarm Create or delete a Rac... +rax_mon_check Create or delete a Rac... +rax_mon_entity Create or delete a Rac... +rax_mon_notification Create or delete a Rac... +rax_mon_notification_plan Create or delete a Rac... +rax_network create / delete an iso... +rax_queue create / delete a queu... +rax_scaling_group Manipulate Rackspace C... +rax_scaling_policy Manipulate Rackspace C... +rds create, delete, or mod... +rds_instance_facts obtain facts about one... +rds_param_group manage RDS parameter g... +rds_snapshot_facts obtain facts about one... +rds_subnet_group manage RDS database su... +redhat_subscription Manage registration an... +redis Various redis commands... +redshift create, delete, or mod... +redshift_facts Gather facts about Red... +redshift_subnet_group manage Redshift cluste... +replace Replace all instances ... +rhevm RHEV/oVirt automation +rhn_channel Adds or removes Red Ha... +rhn_register Manage Red Hat Network... +rhsm_repository Manage RHSM repositori... +riak This module handles so... +rocketchat Send notifications to ... +rollbar_deployment Notify Rollbar about a... +route53 add or delete entries ... +route53_facts Retrieves route53 deta... +route53_health_check add or delete health-c... +route53_zone add or delete Route53 ... +rpm_key Adds or removes a gpg ... +rundeck_acl_policy Manage Rundeck ACL pol... +rundeck_project Manage Rundeck project... +runit Manage runit services +s3_bucket Manage S3 buckets in A... +s3_lifecycle Manage s3 bucket lifec... +s3_logging Manage logging facilit... +s3_sync Efficiently upload mul... +s3_website Configure an s3 bucket... +say Makes a computer to sp... +scaleway_compute Scaleway compute manag... +scaleway_sshkey Scaleway SSH keys mana... +script Runs a local script on... +seboolean Toggles SELinux boolea... +sefcontext Manages SELinux file c... +selinux Change policy and stat... +selinux_permissive Change permissive doma... +sendgrid Sends an email with th... +sensu_check Manage Sensu checks +sensu_client Manages Sensu client c... +sensu_handler Manages Sensu handler ... +sensu_silence Manage Sensu silence e... +sensu_subscription Manage Sensu subscript... +seport Manages SELinux networ... +serverless Manages a Serverless F... +service Manage services +service_facts Return service state i... +set_fact Set host facts from a ... +set_stats Set stats for the curr... +setup Gathers facts about re... +sf_account_manager Manage SolidFire accou... +sf_check_connections Check connectivity to ... +sf_snapshot_schedule_manager Manage SolidFire snaps... +sf_volume_access_group_manager Manage SolidFire Volum... +sf_volume_manager Manage SolidFire volum... +shell Execute commands in no... +sl_vm create or cancel a vir... +slack Send Slack notificatio... +slackpkg Package manager for Sl... +slurp Slurps a file from rem... +slxos_command Run commands on remote... +slxos_config Manage Extreme Network... +slxos_facts Collect facts from dev... +slxos_interface Manage Interfaces on E... +slxos_l2_interface Manage Layer-2 interfa... +slxos_l3_interface Manage L3 interfaces o... +slxos_linkagg Manage link aggregatio... +slxos_vlan Manage VLANs on Extrem... +smartos_image_facts Get SmartOS image deta... +snmp_facts Retrieve facts for a d... +snow_record Create/Delete/Update r... +sns Send Amazon Simple Not... +sns_topic Manages AWS SNS topics... +solaris_zone Manage Solaris zones +sorcery Package manager for So... +spectrum_device Creates/deletes device... +spotinst_aws_elastigroup Create, update or dele... +sqs_queue Creates or deletes AWS... +sros_command Run commands on remote... +sros_config Manage Nokia SR OS dev... +sros_rollback Configure Nokia SR OS ... +stackdriver Send code deploy and a... +stacki_host Add or remove host to ... +stat Retrieve file or file ... +statusio_maintenance Create maintenance win... +sts_assume_role Assume a role using AW... +sts_session_token Obtain a session token... +subversion Deploys a subversion r... +supervisorctl Manage the state of a ... +svc Manage daemontools ser... +svr4pkg Manage Solaris SVR4 pa... +swdepot Manage packages with s... +swupd Manages updates and bu... +synchronize A wrapper around rsync... +sysctl Manage entries in sysc... +syslogger Log messages in the sy... +systemd Manage services +sysvinit Manage SysV services. +taiga_issue Creates/deletes an iss... +telegram module for sending not... +telnet Executes a low-down an... +tempfile Creates temporary file... +template Templates a file out t... +terraform Manages a Terraform de... +timezone Configure timezone set... +tower_credential create, update, or des... +tower_group create, update, or des... +tower_host create, update, or des... +tower_inventory create, update, or des... +tower_job_cancel Cancel an Ansible Towe... +tower_job_launch Launch an Ansible Job. +tower_job_list List Ansible Tower job... +tower_job_template create, update, or des... +tower_job_wait Wait for Ansible Tower... +tower_label create, update, or des... +tower_organization create, update, or des... +tower_project create, update, or des... +tower_role create, update, or des... +tower_team create, update, or des... +tower_user create, update, or des... +twilio Sends a text message t... +typetalk Send a message to type... +ucs_ip_pool Configures IP address ... +ucs_lan_connectivity Configures LAN Connect... +ucs_mac_pool Configures MAC address... +ucs_san_connectivity Configures SAN Connect... +ucs_vhba_template Configures vHBA templa... +ucs_vlans Configures VLANs on Ci... +ucs_vnic_template Configures vNIC templa... +ucs_vsans Configures VSANs on Ci... +ucs_wwn_pool Configures WWNN or WWP... +udm_dns_record Manage dns entries on ... +udm_dns_zone Manage dns zones on a ... +udm_group Manage of the posix gr... +udm_share Manage samba shares on... +udm_user Manage posix users on ... +ufw Manage firewall with U... +unarchive Unpacks an archive aft... +uptimerobot Pause and start Uptime... +uri Interacts with webserv... +urpmi Urpmi manager +user Manage user accounts +vca_fw add remove firewall ru... +vca_nat add remove nat rules i... +vca_vapp Manages vCloud Air vAp... +vcenter_folder Manage folders on give... +vcenter_license Manage VMware vCenter ... +vdirect_commit Commits pending config... +vdirect_file Uploads a new or updat... +vdirect_runnable Runs templates and wor... +vdo Module to control VDO +vertica_configuration Updates Vertica config... +vertica_facts Gathers Vertica databa... +vertica_role Adds or removes Vertic... +vertica_schema Adds or removes Vertic... +vertica_user Adds or removes Vertic... +virt Manages virtual machin... +virt_net Manage libvirt network... +virt_pool Manage libvirt storage... +vmadm Manage SmartOS virtual... +vmware_cfg_backup Backup / Restore / Res... +vmware_cluster Manage VMware vSphere ... +vmware_cluster_facts Gather facts about clu... +vmware_datacenter Manage VMware vSphere ... +vmware_datastore_cluster Manage VMware vSphere ... +vmware_datastore_facts Gather facts about dat... +vmware_datastore_maintenancemode Place a datastore into... +vmware_dns_config Manage VMware ESXi DNS... +vmware_drs_rule_facts Gathers facts about DR... +vmware_dvs_host Add or remove a host f... +vmware_dvs_portgroup Create or remove a Dis... +vmware_dvswitch Create or remove a dis... +vmware_guest Manages virtual machin... +vmware_guest_disk_facts Gather facts about dis... +vmware_guest_facts Gather facts about a s... +vmware_guest_file_operation Files operation in a V... +vmware_guest_find Find the folder path(s... +vmware_guest_powerstate Manages power states o... +vmware_guest_snapshot Manages virtual machin... +vmware_guest_snapshot_facts Gather facts about vir... +vmware_guest_tools_wait Wait for VMware tools ... +vmware_host Add / Remove ESXi host... +vmware_host_acceptance Manage acceptance leve... +vmware_host_capability_facts Gathers facts about an... +vmware_host_config_facts Gathers facts about an... +vmware_host_config_manager Manage advance configu... +vmware_host_datastore Manage a datastore on ... +vmware_host_dns_facts Gathers facts about an... +vmware_host_facts Gathers facts about re... +vmware_host_firewall_facts Gathers facts about an... +vmware_host_firewall_manager Manage firewall config... +vmware_host_lockdown Manage administrator p... +vmware_host_ntp Manage NTP configurati... +vmware_host_package_facts Gathers facts about av... +vmware_host_powerstate Manages power states o... +vmware_host_service_facts Gathers facts about an... +vmware_host_service_manager Manage services on a g... +vmware_host_vmnic_facts Gathers facts about vm... +vmware_local_role_manager Manage local roles on ... +vmware_local_user_facts Gather facts about use... +vmware_local_user_manager Manage local users on ... +vmware_maintenancemode Place a host into main... +vmware_migrate_vmk Migrate a VMK interfac... +vmware_portgroup Create a VMware portgr... +vmware_portgroup_facts Gathers facts about an... +vmware_resource_pool Add/remove resource po... +vmware_resource_pool_facts Gathers facts about re... +vmware_tag Manage VMware tags +vmware_tag_facts Manage VMware tag fact... +vmware_target_canonical_facts Return canonical (NAA)... +vmware_vm_facts Return basic facts per... +vmware_vm_shell Run commands in a VMwa... +vmware_vm_vm_drs_rule Configure VMware DRS A... +vmware_vm_vss_dvs_migrate Migrates a virtual mac... +vmware_vmkernel Manage a VMware VMkern... +vmware_vmkernel_facts Gathers VMKernel facts... +vmware_vmkernel_ip_config Configure the VMkernel... +vmware_vmotion Move a virtual machine... +vmware_vsan_cluster Configure VSAN cluster... +vmware_vswitch Manage a VMware Standa... +vmware_vswitch_facts Gathers facts about an... +vr_account_facts Gather facts about the... +vr_dns_domain Manages DNS domains on... +vr_dns_record Manages DNS records on... +vr_firewall_group Manages firewall group... +vr_firewall_rule Manages firewall rules... +vr_server Manages virtual server... +vr_ssh_key Manages ssh keys on Vu... +vr_startup_script Manages startup script... +vr_user Manages users on Vultr... +vsphere_copy Copy a file to a vCent... +vsphere_guest Create/delete/manage a... +vyos_banner Manage multiline banne... +vyos_command Run one or more comman... +vyos_config Manage VyOS configurat... +vyos_facts Collect facts from rem... +vyos_interface Manage Interface on Vy... +vyos_l3_interface Manage L3 interfaces o... +vyos_linkagg Manage link aggregatio... +vyos_lldp Manage LLDP configurat... +vyos_lldp_interface Manage LLDP interfaces... +vyos_logging Manage logging on netw... +vyos_static_route Manage static IP route... +vyos_system Run `set system` comma... +vyos_user Manage the collection ... +vyos_vlan Manage VLANs on VyOS n... +wait_for Waits for a condition ... +wait_for_connection Waits until remote sys... +wakeonlan Send a magic Wake-on-L... +webfaction_app Add or remove applicat... +webfaction_db Add or remove a databa... +webfaction_domain Add or remove domains ... +webfaction_mailbox Add or remove mailboxe... +webfaction_site Add or remove a websit... +win_acl Set file/directory/reg... +win_acl_inheritance Change ACL inheritance +win_audit_policy_system Used to make changes t... +win_audit_rule Adds an audit rule to ... +win_certificate_store Manages the certificat... +win_chocolatey Manage packages using ... +win_command Executes a command on ... +win_copy Copies files to remote... +win_defrag Consolidate fragmented... +win_disk_facts Show the attached disk... +win_disk_image Manage ISO/VHD/VHDX mo... +win_dns_client Configures DNS lookup ... +win_domain Ensures the existence ... +win_domain_computer Manage computers in Ac... +win_domain_controller Manage domain controll... +win_domain_group Creates, modifies or r... +win_domain_membership Manage domain/workgrou... +win_domain_user Manages Windows Active... +win_dotnet_ngen Runs ngen to recompile... +win_dsc Invokes a PowerShell D... +win_environment Modify environment var... +win_eventlog Manage Windows event l... +win_eventlog_entry Write entries to Windo... +win_feature Installs and uninstall... +win_file Creates, touches or re... +win_file_version Get DLL or EXE file bu... +win_find Return a list of files... +win_firewall Enable or disable the ... +win_firewall_rule Windows firewall autom... +win_get_url Downloads file from HT... +win_group Add and remove local g... +win_group_membership Manage Windows local g... +win_hostname Manages local Windows ... +win_hotfix Install and uninstalls... +win_iis_virtualdirectory Configures a virtual d... +win_iis_webapplication Configures IIS web app... +win_iis_webapppool Configure IIS Web Appl... +win_iis_webbinding Configures a IIS Web s... +win_iis_website Configures a IIS Web s... +win_lineinfile Ensure a particular li... +win_mapped_drive Map network drives for... +win_msg Sends a message to log... +win_msi Installs and uninstall... +win_nssm NSSM - the Non-Sucking... +win_owner Set owner +win_package Installs/uninstalls an... +win_pagefile Query or change pagefi... +win_path Manage Windows path en... +win_pester Run Pester tests on Wi... +win_ping A windows version of t... +win_power_plan Changes the power plan... +win_product_facts Provides Windows produ... +win_psexec Runs commands (remotel... +win_psmodule Adds or removes a Powe... +win_rabbitmq_plugin Manage RabbitMQ plugin... +win_reboot Reboot a windows machi... +win_reg_stat Get information about ... +win_regedit Add, change, or remove... +win_region Set the region and for... +win_regmerge Merges the contents of... +win_robocopy Synchronizes the conte... +win_route Add or remove a static... +win_say Text to speech module ... +win_scheduled_task Manage scheduled tasks +win_scheduled_task_stat Get information about ... +win_security_policy Change local security ... +win_service Manage and query Windo... +win_share Manage Windows shares +win_shell Execute shell commands... +win_shortcut Manage shortcuts on Wi... +win_stat Get information about ... +win_tempfile Creates temporary file... +win_template Templates a file out t... +win_timezone Sets Windows machine t... +win_toast Sends Toast windows no... +win_unzip Unzips compressed file... +win_updates Download and install W... +win_uri Interacts with webserv... +win_user Manages local Windows ... +win_user_right Manage Windows User Ri... +win_wait_for Waits for a condition ... +win_wakeonlan Send a magic Wake-on-L... +win_webpicmd Installs packages usin... +win_whoami Get information about ... +xattr Manage user defined ex... +xbps Manage packages with X... +xenserver_facts get facts reported on ... +xml Manage bits and pieces... +yarn Manage node.js package... +yum Manages packages with ... +yum_repository Add or remove YUM repo... +zabbix_group Zabbix host groups cre... +zabbix_group_facts Gather facts about Zab... +zabbix_host Zabbix host creates/up... +zabbix_hostmacro Zabbix host macro crea... +zabbix_maintenance Create Zabbix maintena... +zabbix_proxy Zabbix proxy creates/d... +zabbix_screen Zabbix screen creates/... +zabbix_template create/delete/dump zab... +zfs Manage zfs +zfs_facts Gather facts about ZFS... +znode Create, delete, retrie... +zpool_facts Gather facts about ZFS... +zypper Manage packages on SUS... +zypper_repository Add and remove Zypper ... +
    +
    +
    + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/220_Modules_ansible-doc-CLI-Info.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/220_Modules_ansible-doc-CLI-Info.html new file mode 100644 index 0000000..4efa040 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/220_Modules_ansible-doc-CLI-Info.html @@ -0,0 +1,208 @@ +
    +

    Modules Documentation

    +

    Returns a thorough description of the parameters in the module.

    + +
    + +
    + ansible-doc copy 
    +
    +> COPY (/usr/lib/python2.7/site-packages/ansible/modules/files/copy.py) + + The `copy' module copies a file from the local or remote machine to a + location on the remote machine. Use the [fetch] module to copy files + from remote locations to the local box. If you need variable + interpolation in copied files, use the [template] module. For Windows + targets, use the [win_copy] module instead. + + * note: This module has a corresponding action plugin. + +OPTIONS (= is mandatory): + +- attributes + Attributes the file or directory should have. To get supported flags + look at the man page for `chattr' on the target system. This string + should contain the attributes in the same order as the one displayed by + `lsattr'. + (Aliases: attr)[Default: (null)] + version_added: 2.3 + +- backup + Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + [Default: no] + type: bool + version_added: 0.7 + +- checksum + SHA1 checksum of the file being transferred. Used to validate that the + copy of the file was successful. + If this is not provided, ansible will use the local calculated checksum + of the src file. + [Default: (null)] + version_added: 2.5 + +- content + When used instead of `src', sets the contents of a file directly to the + specified value. For anything advanced or with formatting also look at + the template module. + [Default: (null)] + version_added: 1.1 + +- decrypt + This option controls the autodecryption of source files using vault. + [Default: Yes] + type: bool + version_added: 2.4 + += dest + Remote absolute path where the file should be copied to. If `src' is a + directory, this must be a directory too. If `dest' is a nonexistent path + and if either `dest' ends with "/" or `src' is a directory, `dest' is + created. If `src' and `dest' are files, the parent directory of `dest' + isn't created: the task fails if it doesn't already exist. + + +- directory_mode + When doing a recursive copy set the mode for the directories. If this is + not set we will use the system defaults. The mode is only set on + directories which are newly created, and will not affect those that + already existed. + [Default: (null)] + version_added: 1.5 + +- follow + This flag indicates that filesystem links in the destination, if they + exist, should be followed. + [Default: no] + type: bool + version_added: 1.8 + +- force + the default is `yes', which will replace the remote file when contents + are different than the source. If `no', the file will only be + transferred if the destination does not exist. + (Aliases: thirsty)[Default: yes] + type: bool + version_added: 1.1 + +- group + Name of the group that should own the file/directory, as would be fed to + `chown'. + [Default: (null)] + +- local_follow + This flag indicates that filesystem links in the source tree, if they + exist, should be followed. + [Default: yes] + type: bool + version_added: 2.4 + +- mode + Mode the file or directory should be. For those used to `/usr/bin/chmod' + remember that modes are actually octal numbers. You must either specify + the leading zero so that Ansible's YAML parser knows it is an octal + number (like `0644' or `01777') or quote it (like `'644'' or `'0644'' so + Ansible receives a string and can do its own conversion from string into + number. Giving Ansible a number without following one of these rules + will end up with a decimal number which will have unexpected results. + As of version 1.8, the mode may be specified as a symbolic mode (for + example, `u+rwx' or `u=rw,g=r,o=r'). As of version 2.3, the mode may + also be the special string `preserve'. `preserve' means that the file + will be given the same permissions as the source file. + [Default: (null)] + +- owner + Name of the user that should own the file/directory, as would be fed to + `chown'. + [Default: (null)] + +- remote_src + If `no', it will search for `src' at originating/master machine. + If `yes' it will go to the remote/target machine for the `src'. Default + is `no'. + Currently `remote_src' does not support recursive copying. + `remote_src' only works with `mode=preserve' as of version 2.6. + [Default: no] + type: bool + version_added: 2.0 + +- selevel + Level part of the SELinux file context. This is the MLS/MCS attribute, + sometimes known as the `range'. `_default' feature works as for + `seuser'. + [Default: s0] + +- serole + Role part of SELinux file context, `_default' feature works as for + `seuser'. + [Default: (null)] + +- setype + Type part of SELinux file context, `_default' feature works as for + `seuser'. + [Default: (null)] + +- seuser + User part of SELinux file context. Will default to system policy, if + applicable. If set to `_default', it will use the `user' portion of the + policy if available. + [Default: (null)] + +- src + Local path to a file to copy to the remote server; can be absolute or + relative. If path is a directory, it is copied recursively. In this + case, if path ends with "/", only inside contents of that directory are + copied to destination. Otherwise, if it does not end with "/", the + directory itself with all contents is copied. This behavior is similar + to Rsync. + [Default: (null)] + +- unsafe_writes + Normally this module uses atomic operations to prevent data corruption + or inconsistent reads from the target files, sometimes systems are + configured or just broken in ways that prevent this. One example are + docker mounted files, they cannot be updated atomically and can only be + done in an unsafe manner. + This boolean option allows ansible to fall back to unsafe methods of + updating files for those cases in which you do not have any other + choice. Be aware that this is subject to race conditions and can lead to + data corruption. + [Default: no] + type: bool + version_added: 2.2 + +- validate + The validation command to run before copying into place. The path to the + file to validate is passed in via '%s' which must be present as in the + example below. The command is passed securely so shell features like + expansion and pipes won't work. + [Default: (null)] + + +NOTES: + * The [copy] module recursively copy facility does not scale to lots + (>hundreds) of files. For alternative, see [synchronize] module, + which is a wrapper around `rsync'. + * For Windows targets, use the [win_copy] module instead. + +AUTHOR: Ansible Core Team, Michael DeHaan + METADATA: + status: + - stableinterface + supported_by: core + + +
    +
    +
    + + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/230_Modules_ansible-doc-CLI-examples.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/230_Modules_ansible-doc-CLI-examples.html new file mode 100644 index 0000000..42029de --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/230_Modules_ansible-doc-CLI-examples.html @@ -0,0 +1,140 @@ +
    +

    Modules Documentation

    +

    Also displays usable copy / paste examples of working functional tasks with this module.

    + +
    + +
    + ansible-doc copy
    +
    + +EXAMPLES: + +- name: example copying file with owner and permissions + copy: + src: /srv/myfiles/foo.conf + dest: /etc/foo.conf + owner: foo + group: foo + mode: 0644 + +- name: The same example as above, but using a symbolic mode equivalent to 0644 + copy: + src: /srv/myfiles/foo.conf + dest: /etc/foo.conf + owner: foo + group: foo + mode: u=rw,g=r,o=r + +- name: Another symbolic mode example, adding some permissions and removing others + copy: + src: /srv/myfiles/foo.conf + dest: /etc/foo.conf + owner: foo + group: foo + mode: u+rw,g-wx,o-rwx + +- name: Copy a new "ntp.conf file into place, backing up the original if it differs from the... + copy: + src: /mine/ntp.conf + dest: /etc/ntp.conf + owner: root + group: root + mode: 0644 + backup: yes + +- name: Copy a new "sudoers" file into place, after passing validation with visudo + copy: + src: /mine/sudoers + dest: /etc/sudoers + validate: /usr/sbin/visudo -cf %s + +- name: Copy a "sudoers" file on the remote machine for editing + copy: + src: /etc/sudoers + dest: /etc/sudoers.edit + remote_src: yes + validate: /usr/sbin/visudo -cf %s + +- name: Copy using the 'content' for inline data + copy: + content: '# This file was moved to /etc/other.conf' + dest: /etc/mine.conf' + +RETURN VALUES: + + +dest: + description: destination file/path + returned: success + type: string + sample: /path/to/file.txt +src: + description: source file used for the copy on the target machine + returned: changed + type: string + sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source +md5sum: + description: md5 checksum of the file after running copy + returned: when supported + type: string + sample: 2a5aeecc61dc98c4d780b14b330e3282 +checksum: + description: sha1 checksum of the file after running copy + returned: success + type: string + sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827 +backup_file: + description: name of backup file created + returned: changed and if backup=yes + type: string + sample: /path/to/file.txt.2015-02-12@22:09~ +gid: + description: group id of the file, after execution + returned: success + type: int + sample: 100 +group: + description: group of the file, after execution + returned: success + type: string + sample: httpd +owner: + description: owner of the file, after execution + returned: success + type: string + sample: httpd +uid: + description: owner id of the file, after execution + returned: success + type: int + sample: 100 +mode: + description: permissions of the target, after execution + returned: success + type: string + sample: 0644 +size: + description: size of the target, after execution + returned: success + type: int + sample: 1220 +state: + description: state of the target, after execution + returned: success + type: string + sample: file +
    +
    +
    + + + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/240_Modules_ansible-doc-CLI-snippets.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/240_Modules_ansible-doc-CLI-snippets.html new file mode 100644 index 0000000..dec94c1 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/240_Modules_ansible-doc-CLI-snippets.html @@ -0,0 +1,76 @@ +
    +

    Modules Documentation

    +

    Starting with the snippet view, you can create your task from pre-formatted scratch.

    + +
    + +
    + ansible-doc copy -s
    +
    + +- name: Copies files to remote locations + copy: + attributes: # Attributes the file or directory should have. To get supported flags look at the man page for `chattr' on the target system. This string + should contain the attributes in the same order as the one displayed by `lsattr'. + backup: # Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. + checksum: # SHA1 checksum of the file being transferred. Used to validate that the copy of the file was successful. If this is not provided, ansible + will use the local calculated checksum of the src file. + content: # When used instead of `src', sets the contents of a file directly to the specified value. For anything advanced or with formatting also + look at the template module. + decrypt: # This option controls the autodecryption of source files using vault. + dest: # (required) Remote absolute path where the file should be copied to. If `src' is a directory, this must be a directory too. If `dest' is a + nonexistent path and if either `dest' ends with "/" or `src' is a directory, `dest' is created. If `src' + and `dest' are files, the parent directory of `dest' isn't created: the task fails if it doesn't already + exist. + directory_mode: # When doing a recursive copy set the mode for the directories. If this is not set we will use the system defaults. The mode is only set on + directories which are newly created, and will not affect those that already existed. + follow: # This flag indicates that filesystem links in the destination, if they exist, should be followed. + force: # the default is `yes', which will replace the remote file when contents are different than the source. If `no', the file will only be + transferred if the destination does not exist. + group: # Name of the group that should own the file/directory, as would be fed to `chown'. + local_follow: # This flag indicates that filesystem links in the source tree, if they exist, should be followed. + mode: # Mode the file or directory should be. For those used to `/usr/bin/chmod' remember that modes are actually octal numbers. You must either + specify the leading zero so that Ansible's YAML parser knows it is an octal number (like `0644' or + `01777') or quote it (like `'644'' or `'0644'' so Ansible receives a string and can do its own conversion + from string into number. Giving Ansible a number without following one of these rules will end up with a + decimal number which will have unexpected results. As of version 1.8, the mode may be specified as a + symbolic mode (for example, `u+rwx' or `u=rw,g=r,o=r'). As of version 2.3, the mode may also be the + special string `preserve'. `preserve' means that the file will be given the same permissions as the + source file. + owner: # Name of the user that should own the file/directory, as would be fed to `chown'. + remote_src: # If `no', it will search for `src' at originating/master machine. If `yes' it will go to the remote/target machine for the `src'. Default + is `no'. Currently `remote_src' does not support recursive copying. `remote_src' only works with + `mode=preserve' as of version 2.6. + selevel: # Level part of the SELinux file context. This is the MLS/MCS attribute, sometimes known as the `range'. `_default' feature works as for + `seuser'. + serole: # Role part of SELinux file context, `_default' feature works as for `seuser'. + setype: # Type part of SELinux file context, `_default' feature works as for `seuser'. + seuser: # User part of SELinux file context. Will default to system policy, if applicable. If set to `_default', it will use the `user' portion of + the policy if available. + src: # Local path to a file to copy to the remote server; can be absolute or relative. If path is a directory, it is copied recursively. In this + case, if path ends with "/", only inside contents of that directory are copied to destination. Otherwise, + if it does not end with "/", the directory itself with all contents is copied. This behavior is similar to + Rsync. + unsafe_writes: # Normally this module uses atomic operations to prevent data corruption or inconsistent reads from the target files, sometimes systems are + configured or just broken in ways that prevent this. One example are docker mounted files, they cannot be + updated atomically and can only be done in an unsafe manner. This boolean option allows ansible to fall + back to unsafe methods of updating files for those cases in which you do not have any other choice. Be + aware that this is subject to race conditions and can lead to data corruption. + validate: # The validation command to run before copying into place. The path to the file to validate is passed in via '%s' which must be present as + in the example below. The command is passed securely so shell features like expansion and pipes won't + work. + +
    +
    +
    + + + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/250_ModulesDocWebSite.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/250_ModulesDocWebSite.html new file mode 100644 index 0000000..180ecb9 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/250_ModulesDocWebSite.html @@ -0,0 +1,9 @@ +
    + + +
    +

    Modules Documentation

    +
    +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/260_Modules_Run_Commands.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/260_Modules_Run_Commands.html new file mode 100644 index 0000000..5b00500 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/260_Modules_Run_Commands.html @@ -0,0 +1,18 @@ +
    + +

    Modules: Run Commands

    +

    If Ansible doesn't have a module that suits your needs there are the “run command” modules:


    +
      +
    • command: Takes the command and executes it on the host. The most secure and predictable.
    • +
    • shell: Executes through a shell like /bin/sh so you can use pipes etc. Be careful.
    • +
    • raw: Executes a command without going through the Ansible module subsystem.

    • +
    +


    NOTE: Unlike standard modules, run commands have no concept of desired state and should only be used as a last resort.

    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/_200_Modules_ansible-doc-CLI-LIST.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/_200_Modules_ansible-doc-CLI-LIST.html new file mode 100644 index 0000000..2e8199c --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/040_Ansible_Modules/_200_Modules_ansible-doc-CLI-LIST.html @@ -0,0 +1,25 @@ +
    +

    Modules Documentation

    +

    Returns a list of literally every module available on the system

    + +
    +
    +
    Ansible Terminal
    +
    +
    +# List out all modules installed -- returns MANY lines
    + ansible-doc -l
    +
    +copy			Copies files to remote...
    +stat			Retrieve file or file ...
    +synchronize		A wrapper around rsync...
    +template		Templates a file out t...
    +vmware_guest		Manages virtual machin...
    +yum			Manages packages with ...
    +
    +Many, many lines have been truncated.  This is a very small sample
    +of the incredibly long list of modules.
    +    
    +
    + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/000_RedIntro-Inventory.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/000_RedIntro-Inventory.html new file mode 100644 index 0000000..5be3d91 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/000_RedIntro-Inventory.html @@ -0,0 +1,12 @@ +
    + +

    INVENTORY

    + +

    Define hosts on which the playbook will run.

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/024__Inventory.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/024__Inventory.html new file mode 100644 index 0000000..b162ebf --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/024__Inventory.html @@ -0,0 +1,19 @@ +
    + +

    Inventory

    +

    Inventory is a collection of hosts (nodes) with associated data and groupings that Ansible can connect and manage.

    +
      +
    • Hosts (nodes)
    • +
    • Groups
    • +
    • Inventory-specific data (variables)
    • +
    • Static or dynamic sources
    • +
    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/025__Static_Inventory_Basic.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/025__Static_Inventory_Basic.html new file mode 100644 index 0000000..18ed1d3 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/025__Static_Inventory_Basic.html @@ -0,0 +1,28 @@ +
    +

    Static Inventory Example

    + +
    +
    +
    Ansible Terminal
    +
    +
    +# Static inventory example:
    +[myservers]
    +10.42.0.2
    +10.42.0.6
    +10.42.0.7
    +10.42.0.8
    +10.42.0.100
    +host.example.com
    +
    +    
    +
    + + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/026__Static_Inventory_Advanced.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/026__Static_Inventory_Advanced.html new file mode 100644 index 0000000..a225feb --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/026__Static_Inventory_Advanced.html @@ -0,0 +1,179 @@ +
    + +

    A MORE ADVANCED STATIC INVENTORY EXAMPLE

    + +
    +
    +
    Ansible Terminal
    +
    +
    +# A more advanced static inventory example:
    +[control]
    +mainserver ansible_host=10.42.0.2
    +
    +[web]
    +node-[1:30] ansible_host=10.42.0.[31:60]
    +
    +[haproxy]
    +haproxy ansible_host=10.42.0.100
    +
    +[all:vars]
    +ansible_user=kev
    +ansible_ssh_private_key_file=/home/kev/.ssh/id_rsa
    +    
    +
    + + +
    + + + + + + + +
    + +

    A MORE ADVANCED STATIC INVENTORY EXAMPLE

    + +
    +
    +
    Ansible Terminal
    +
    +
    +# A more advanced static inventory example:
    +[control]
    +mainserver ansible_host=10.42.0.2
    +
    +[web]
    +node-[1:30] ansible_host=10.42.0.[31:60]
    +
    +[haproxy]
    +haproxy ansible_host=10.42.0.100
    +
    +[all:vars]
    +ansible_user=kev
    +ansible_ssh_private_key_file=/home/kev/.ssh/id_rsa
    +    
    +
    + + +
    + + + +
    + +

    A MORE ADVANCED STATIC INVENTORY EXAMPLE

    + +
    +
    +
    Ansible Terminal
    +
    +
    +# A more advanced static inventory example:
    +[control]
    +mainserver ansible_host=10.42.0.2
    +
    +[web]
    +node-[1:30] ansible_host=10.42.0.[31:60]
    +
    +[haproxy]
    +haproxy ansible_host=10.42.0.100
    +
    +[all:vars]
    +ansible_user=kev
    +ansible_ssh_private_key_file=/home/kev/.ssh/id_rsa
    +    
    +
    + + +
    + + + + + + + +
    + +

    A MORE ADVANCED STATIC INVENTORY EXAMPLE

    + +
    +
    +
    Ansible Terminal
    +
    +
    +# A more advanced static inventory example:
    +[control]
    +mainserver ansible_host=10.42.0.2
    +
    +[web]
    +node-[1:30] ansible_host=10.42.0.[31:60]
    +
    +[haproxy]
    +haproxy ansible_host=10.42.0.100
    +
    +[all:vars]
    +ansible_user=kev
    +ansible_ssh_private_key_file=/home/kev/.ssh/id_rsa
    +    
    +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/027__Static_Inventory_Windows.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/027__Static_Inventory_Windows.html new file mode 100644 index 0000000..7b77547 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/027__Static_Inventory_Windows.html @@ -0,0 +1,60 @@ +
    + +

    A WINDOWS STATIC INVENTORY EXAMPLE

    + +
    +
    +
    Ansible Terminal
    +
    +
    +# Windows vars inventory example:
    +[winservers]
    +blueiris	ansible_host=10.10.2.20
    +winfileprd[1:3]	ansible_host=10.13.128.[7:9]
    +winwebsrv01	ansible_host=10.14.27.16
    +
    +[winservers:vars]
    +ansible_connection: winrm
    +ansible_winrm_transport: credssp
    +ansible_port: 5986
    +ansible_winrm_server_cert_validation: ignore
    +    
    +
    + + +
    + + +
    + +

    A WINDOWS STATIC INVENTORY EXAMPLE

    + +
    +
    +
    Ansible Terminal
    +
    +
    +# Windows vars inventory example:
    +[winservers]
    +blueiris	ansible_host=10.10.2.20
    +winfileprd[1:3]	ansible_host=10.13.128.[7:9]
    +winwebsrv01	ansible_host=10.14.27.16
    +
    +[winservers:vars]
    +ansible_connection: winrm                   
    +ansible_winrm_transport: credssp            
    +ansible_port: 5986                          
    +ansible_winrm_server_cert_validation: ignore
    +    
    +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/_labs/00_InventoryLabs.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/_labs/00_InventoryLabs.html new file mode 100644 index 0000000..614994e --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/050_Ansible_Inventory/_labs/00_InventoryLabs.html @@ -0,0 +1,10 @@ +
    +

    + +

    Take a moment to get familiar with your lab inventory. Look at the file:
    /some/path/to/something

    +
    +
    +

    + +

    Notice the items in the inventory. This vague statement will be updated soon to be more clear about what this lab involes.

    +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/000__Red_Slide_Ad-Hoc_Commands.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/000__Red_Slide_Ad-Hoc_Commands.html new file mode 100644 index 0000000..33deedc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/000__Red_Slide_Ad-Hoc_Commands.html @@ -0,0 +1,14 @@ +
    + +

    AD-HOC COMMANDS

    + +

    An ad-hoc command is a single Ansible task to perform quickly, but don’t want to save for later.

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/029__Ad-Hoc_Commands_Common_Options.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/029__Ad-Hoc_Commands_Common_Options.html new file mode 100644 index 0000000..796cd73 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/029__Ad-Hoc_Commands_Common_Options.html @@ -0,0 +1,17 @@ +
    +

    Ad-Hoc Commands: Common Options

    +
      +
    • -m MODULE_NAME, --module-name=MODULE_NAME
      Module name to execute the ad-hoc command
    • +
    • -a MODULE_ARGS, --args=MODULE_ARGS
      Module arguments for the ad-hoc command
    • +
    • -b, --become
      Run ad-hoc command with elevated rights such as sudo, the default method
    • +
    • -e EXTRA_VARS, --extra-vars=EXTRA_VARS
      Set additional variables as key=value or YAML/JSON
    • +
    • --version
      Display the version of Ansible
    • +
    • --help
      Display the MAN page for the Ansible tool
    • +
    + +
    +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/030__Ad-Hoc_Commands.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/030__Ad-Hoc_Commands.html new file mode 100644 index 0000000..a2d0102 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/030__Ad-Hoc_Commands.html @@ -0,0 +1,29 @@ +
    + +

    AD HOC COMMANDS

    + +
    +
    +
    Ansible Terminal
    +
    +
    +# Check connections (submarine ping, not ICMP)
    + ansible all -m ping
    +
    +# Run a command on all the hosts in the web group
    + ansible web -m command -a "uptime"
    +
    +# Collect and display known facts
    + ansible localhost -m setup
    +    
    +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/035__SetupM.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/035__SetupM.html new file mode 100644 index 0000000..05bc4b9 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/035__SetupM.html @@ -0,0 +1,584 @@ +
    +

    Full Fact List

    +

    Returns a list of literally every fact available on the system

    + + + +
    + +
    + ansible myserver.mine -m setup 
    +Provides a long list of all facts known of this system
    +
    +
    +localhost | SUCCESS => { + "ansible_facts": { + "ansible_all_ipv4_addresses": [ + "10.0.20.5" + ], + "ansible_all_ipv6_addresses": [ + "fe80::4e98:35fe:3b14:33dc" + ], + "ansible_apparmor": { + "status": "disabled" + }, + "ansible_architecture": "x86_64", + "ansible_bios_date": "04/05/2016", + "ansible_bios_version": "6.00", + "ansible_cmdline": { + "BOOT_IMAGE": "/vmlinuz-3.10.0-693.21.1.el7.x86_64", + "LANG": "en_US.UTF-8", + "crashkernel": "auto", + "quiet": true, + "rhgb": true, + "ro": true, + "root": "UUID=d49a9903-d546-4981-bffb-4e5147e1199b" + }, + "ansible_date_time": { + "date": "2018-08-14", + "day": "14", + "epoch": "1534279494", + "hour": "16", + "iso8601": "2018-08-14T20:44:54Z", + "iso8601_basic": "20180814T164454915629", + "iso8601_basic_short": "20180814T164454", + "iso8601_micro": "2018-08-14T20:44:54.915745Z", + "minute": "44", + "month": "08", + "second": "54", + "time": "16:44:54", + "tz": "EDT", + "tz_offset": "-0400", + "weekday": "Tuesday", + "weekday_number": "2", + "weeknumber": "33", + "year": "2018" + }, + "ansible_default_ipv4": { + "address": "10.0.20.5", + "alias": "ens192", + "broadcast": "10.0.20.255", + "gateway": "10.0.20.1", + "interface": "ens192", + "macaddress": "00:50:56:bb:30:e8", + "mtu": 1500, + "netmask": "255.255.255.0", + "network": "10.0.20.0", + "type": "ether" + }, + "ansible_default_ipv6": {}, + "ansible_device_links": { + "ids": { + "sr0": [ + "ata-VMware_Virtual_SATA_CDRW_Drive_00000000000000000001" + ] + }, + "labels": {}, + "masters": {}, + "uuids": { + "sda1": [ + "619dcc55-451d-44be-b364-8c94278c567c" + ], + "sda2": [ + "d49a9903-d546-4981-bffb-4e5147e1199b" + ] + } + }, + "ansible_devices": { + "sda": { + "holders": [], + "host": "Serial Attached SCSI controller: VMware PVSCSI SCSI Controller (rev 02)", + "links": { + "ids": [], + "labels": [], + "masters": [], + "uuids": [] + }, + "model": "Virtual disk", + "partitions": { + "sda1": { + "holders": [], + "links": { + "ids": [], + "labels": [], + "masters": [], + "uuids": [ + "619dcc55-451d-44be-b364-8c94278c567c" + ] + }, + "sectors": "2097152", + "sectorsize": 512, + "size": "1.00 GB", + "start": "2048", + "uuid": "619dcc55-451d-44be-b364-8c94278c567c" + }, + "sda2": { + "holders": [], + "links": { + "ids": [], + "labels": [], + "masters": [], + "uuids": [ + "d49a9903-d546-4981-bffb-4e5147e1199b" + ] + }, + "sectors": "31455232", + "sectorsize": 512, + "size": "15.00 GB", + "start": "2099200", + "uuid": "d49a9903-d546-4981-bffb-4e5147e1199b" + } + }, + "removable": "0", + "rotational": "1", + "sas_address": null, + "sas_device_handle": null, + "scheduler_mode": "deadline", + "sectors": "41943040", + "sectorsize": "512", + "size": "20.00 GB", + "support_discard": "0", + "vendor": "VMware", + "virtual": 1 + }, + "sr0": { + "holders": [], + "host": "SATA controller: VMware SATA AHCI controller", + "links": { + "ids": [ + "ata-VMware_Virtual_SATA_CDRW_Drive_00000000000000000001" + ], + "labels": [], + "masters": [], + "uuids": [] + }, + "model": "VMware SATA CD00", + "partitions": {}, + "removable": "1", + "rotational": "1", + "sas_address": null, + "sas_device_handle": null, + "scheduler_mode": "cfq", + "sectors": "2097151", + "sectorsize": "512", + "size": "1024.00 MB", + "support_discard": "0", + "vendor": "NECVMWar", + "virtual": 1 + } + }, + "ansible_distribution": "CentOS", + "ansible_distribution_file_parsed": true, + "ansible_distribution_file_path": "/etc/redhat-release", + "ansible_distribution_file_variety": "RedHat", + "ansible_distribution_major_version": "7", + "ansible_distribution_release": "Core", + "ansible_distribution_version": "7.4.1708", + "ansible_dns": { + "nameservers": [ + "10.0.20.1" + ], + "search": [ + "kev" + ] + }, + "ansible_domain": "kev", + "ansible_effective_group_id": 0, + "ansible_effective_user_id": 0, + "ansible_ens192": { + "active": true, + "device": "ens192", + "features": { + "busy_poll": "off [fixed]", + "fcoe_mtu": "off [fixed]", + "generic_receive_offload": "on", + "generic_segmentation_offload": "on", + "highdma": "on", + "hw_tc_offload": "off [fixed]", + "l2_fwd_offload": "off [fixed]", + "large_receive_offload": "on", + "loopback": "off [fixed]", + "netns_local": "off [fixed]", + "ntuple_filters": "off [fixed]", + "receive_hashing": "off [fixed]", + "rx_all": "off [fixed]", + "rx_checksumming": "on", + "rx_fcs": "off [fixed]", + "rx_vlan_filter": "on [fixed]", + "rx_vlan_offload": "on", + "rx_vlan_stag_filter": "off [fixed]", + "rx_vlan_stag_hw_parse": "off [fixed]", + "scatter_gather": "on", + "tcp_segmentation_offload": "on", + "tx_checksum_fcoe_crc": "off [fixed]", + "tx_checksum_ip_generic": "on", + "tx_checksum_ipv4": "off [fixed]", + "tx_checksum_ipv6": "off [fixed]", + "tx_checksum_sctp": "off [fixed]", + "tx_checksumming": "on", + "tx_fcoe_segmentation": "off [fixed]", + "tx_gre_csum_segmentation": "off [fixed]", + "tx_gre_segmentation": "off [fixed]", + "tx_gso_partial": "off [fixed]", + "tx_gso_robust": "off [fixed]", + "tx_ipip_segmentation": "off [fixed]", + "tx_lockless": "off [fixed]", + "tx_mpls_segmentation": "off [fixed]", + "tx_nocache_copy": "off", + "tx_scatter_gather": "on", + "tx_scatter_gather_fraglist": "off [fixed]", + "tx_sctp_segmentation": "off [fixed]", + "tx_sit_segmentation": "off [fixed]", + "tx_tcp6_segmentation": "on", + "tx_tcp_ecn_segmentation": "off [fixed]", + "tx_tcp_mangleid_segmentation": "off", + "tx_tcp_segmentation": "on", + "tx_udp_tnl_csum_segmentation": "off [fixed]", + "tx_udp_tnl_segmentation": "off [fixed]", + "tx_vlan_offload": "on", + "tx_vlan_stag_hw_insert": "off [fixed]", + "udp_fragmentation_offload": "off [fixed]", + "vlan_challenged": "off [fixed]" + }, + "hw_timestamp_filters": [], + "ipv4": { + "address": "10.0.20.5", + "broadcast": "10.0.20.255", + "netmask": "255.255.255.0", + "network": "10.0.20.0" + }, + "ipv6": [ + { + "address": "fe80::4e98:35fe:3b14:33dc", + "prefix": "64", + "scope": "link" + } + ], + "macaddress": "00:50:56:bb:30:e8", + "module": "vmxnet3", + "mtu": 1500, + "pciid": "0000:0b:00.0", + "promisc": false, + "speed": 10000, + "timestamping": [ + "rx_software", + "software" + ], + "type": "ether" + }, + "ansible_env": { + "HISTCONTROL": "ignoredups", + "HISTSIZE": "1000", + "HOME": "/root", + "HOSTNAME": "myserver", + "LANG": "en_US.UTF-8", + "LESSOPEN": "||/usr/bin/lesspipe.sh %s", + "LOGNAME": "root", + "LS_COLORS": "rs=0:di=38;5;27:ln=38;5;51:mh=44;38;5;15:pi=40;38;5;11:so=38;5;13:do=38;5;5:bd=48;5;232;38;5;11:cd=48;5;232;38;5;3:or=48;5;232;38;5;9:mi=05;48;5;232;38;5;15:su=48;5;196;38;5;15:sg=48;5;11;38;5;16:ca=48;5;196;38;5;226:tw=48;5;10;38;5;16:ow=48;5;10;38;5;21:st=48;5;21;38;5;15:ex=38;5;34:*.tar=38;5;9:*.tgz=38;5;9:*.arc=38;5;9:*.arj=38;5;9:*.taz=38;5;9:*.lha=38;5;9:*.lz4=38;5;9:*.lzh=38;5;9:*.lzma=38;5;9:*.tlz=38;5;9:*.txz=38;5;9:*.tzo=38;5;9:*.t7z=38;5;9:*.zip=38;5;9:*.z=38;5;9:*.Z=38;5;9:*.dz=38;5;9:*.gz=38;5;9:*.lrz=38;5;9:*.lz=38;5;9:*.lzo=38;5;9:*.xz=38;5;9:*.bz2=38;5;9:*.bz=38;5;9:*.tbz=38;5;9:*.tbz2=38;5;9:*.tz=38;5;9:*.deb=38;5;9:*.rpm=38;5;9:*.jar=38;5;9:*.war=38;5;9:*.ear=38;5;9:*.sar=38;5;9:*.rar=38;5;9:*.alz=38;5;9:*.ace=38;5;9:*.zoo=38;5;9:*.cpio=38;5;9:*.7z=38;5;9:*.rz=38;5;9:*.cab=38;5;9:*.jpg=38;5;13:*.jpeg=38;5;13:*.gif=38;5;13:*.bmp=38;5;13:*.pbm=38;5;13:*.pgm=38;5;13:*.ppm=38;5;13:*.tga=38;5;13:*.xbm=38;5;13:*.xpm=38;5;13:*.tif=38;5;13:*.tiff=38;5;13:*.png=38;5;13:*.svg=38;5;13:*.svgz=38;5;13:*.mng=38;5;13:*.pcx=38;5;13:*.mov=38;5;13:*.mpg=38;5;13:*.mpeg=38;5;13:*.m2v=38;5;13:*.mkv=38;5;13:*.webm=38;5;13:*.ogm=38;5;13:*.mp4=38;5;13:*.m4v=38;5;13:*.mp4v=38;5;13:*.vob=38;5;13:*.qt=38;5;13:*.nuv=38;5;13:*.wmv=38;5;13:*.asf=38;5;13:*.rm=38;5;13:*.rmvb=38;5;13:*.flc=38;5;13:*.avi=38;5;13:*.fli=38;5;13:*.flv=38;5;13:*.gl=38;5;13:*.dl=38;5;13:*.xcf=38;5;13:*.xwd=38;5;13:*.yuv=38;5;13:*.cgm=38;5;13:*.emf=38;5;13:*.axv=38;5;13:*.anx=38;5;13:*.ogv=38;5;13:*.ogx=38;5;13:*.aac=38;5;45:*.au=38;5;45:*.flac=38;5;45:*.mid=38;5;45:*.midi=38;5;45:*.mka=38;5;45:*.mp3=38;5;45:*.mpc=38;5;45:*.ogg=38;5;45:*.ra=38;5;45:*.wav=38;5;45:*.axa=38;5;45:*.oga=38;5;45:*.spx=38;5;45:*.xspf=38;5;45:", + "MAIL": "/var/spool/mail/root", + "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin", + "PWD": "/root", + "SHELL": "/bin/bash", + "SHLVL": "3", + "SSH_CLIENT": "10.0.0.74 53446 22", + "SSH_CONNECTION": "10.0.0.74 53446 10.0.20.5 22", + "SSH_TTY": "/dev/pts/0", + "TERM": "xterm-256color", + "USER": "root", + "XDG_RUNTIME_DIR": "/run/user/0", + "XDG_SESSION_ID": "270", + "_": "/usr/bin/python2" + }, + "ansible_fips": false, + "ansible_form_factor": "Other", + "ansible_fqdn": "myserver.mine", + "ansible_hostname": "myserver", + "ansible_interfaces": [ + "lo", + "ens192" + ], + "ansible_is_chroot": false, + "ansible_iscsi_iqn": "", + "ansible_kernel": "3.10.0-693.21.1.el7.x86_64", + "ansible_lo": { + "active": true, + "device": "lo", + "features": { + "busy_poll": "off [fixed]", + "fcoe_mtu": "off [fixed]", + "generic_receive_offload": "on", + "generic_segmentation_offload": "on", + "highdma": "on [fixed]", + "hw_tc_offload": "off [fixed]", + "l2_fwd_offload": "off [fixed]", + "large_receive_offload": "off [fixed]", + "loopback": "on [fixed]", + "netns_local": "on [fixed]", + "ntuple_filters": "off [fixed]", + "receive_hashing": "off [fixed]", + "rx_all": "off [fixed]", + "rx_checksumming": "on [fixed]", + "rx_fcs": "off [fixed]", + "rx_vlan_filter": "off [fixed]", + "rx_vlan_offload": "off [fixed]", + "rx_vlan_stag_filter": "off [fixed]", + "rx_vlan_stag_hw_parse": "off [fixed]", + "scatter_gather": "on", + "tcp_segmentation_offload": "on", + "tx_checksum_fcoe_crc": "off [fixed]", + "tx_checksum_ip_generic": "on [fixed]", + "tx_checksum_ipv4": "off [fixed]", + "tx_checksum_ipv6": "off [fixed]", + "tx_checksum_sctp": "on [fixed]", + "tx_checksumming": "on", + "tx_fcoe_segmentation": "off [fixed]", + "tx_gre_csum_segmentation": "off [fixed]", + "tx_gre_segmentation": "off [fixed]", + "tx_gso_partial": "off [fixed]", + "tx_gso_robust": "off [fixed]", + "tx_ipip_segmentation": "off [fixed]", + "tx_lockless": "on [fixed]", + "tx_mpls_segmentation": "off [fixed]", + "tx_nocache_copy": "off [fixed]", + "tx_scatter_gather": "on [fixed]", + "tx_scatter_gather_fraglist": "on [fixed]", + "tx_sctp_segmentation": "on", + "tx_sit_segmentation": "off [fixed]", + "tx_tcp6_segmentation": "on", + "tx_tcp_ecn_segmentation": "on", + "tx_tcp_mangleid_segmentation": "on", + "tx_tcp_segmentation": "on", + "tx_udp_tnl_csum_segmentation": "off [fixed]", + "tx_udp_tnl_segmentation": "off [fixed]", + "tx_vlan_offload": "off [fixed]", + "tx_vlan_stag_hw_insert": "off [fixed]", + "udp_fragmentation_offload": "on", + "vlan_challenged": "on [fixed]" + }, + "hw_timestamp_filters": [], + "ipv4": { + "address": "127.0.0.1", + "broadcast": "host", + "netmask": "255.0.0.0", + "network": "127.0.0.0" + }, + "ipv6": [ + { + "address": "::1", + "prefix": "128", + "scope": "host" + } + ], + "mtu": 65536, + "promisc": false, + "timestamping": [ + "rx_software", + "software" + ], + "type": "loopback" + }, + "ansible_local": { + "is_installed": { + "is_vim_installed": { + "vim": "true" + } + }, + "package_list": { + "all_kev_packages": { + "package1": "acl-2.2.51-12.el7.x86_64", + "package10": "authconfig-6.2.8-30.el7.x86_64", + "package11": "basesystem-10.0-7.el7.centos.noarch", + "package2": "aic94xx-firmware-30-6.el7.noarch", + "package3": "alsa-firmware-1.0.28-2.el7.noarch", + "package4": "alsa-lib-1.1.3-3.el7.x86_64", + "package5": "alsa-tools-firmware-1.1.0-1.el7.x86_64", + "package6": "ansible-2.6.1-1.el7.noarch", + "package7": "ansible-lint-3.4.21-1.el7.noarch", + "package8": "audit-2.7.6-3.el7.x86_64", + "package9": "audit-libs-2.7.6-3.el7.x86_64" + } + }, + "system_owner": { + "system_owner": { + "owner": "kev" + } + } + }, + "ansible_lsb": {}, + "ansible_machine": "x86_64", + "ansible_machine_id": "072bb764550746bd9695d73f43c9cf3b", + "ansible_memfree_mb": 1307, + "ansible_memory_mb": { + "nocache": { + "free": 1579, + "used": 260 + }, + "real": { + "free": 1307, + "total": 1839, + "used": 532 + }, + "swap": { + "cached": 0, + "free": 0, + "total": 0, + "used": 0 + } + }, + "ansible_memtotal_mb": 1839, + "ansible_mounts": [ + { + "block_available": 3300883, + "block_size": 4096, + "block_total": 3929344, + "block_used": 628461, + "device": "/dev/sda2", + "fstype": "xfs", + "inode_available": 7804215, + "inode_total": 7863808, + "inode_used": 59593, + "mount": "/", + "options": "rw,relatime,attr2,inode64,noquota", + "size_available": 13520416768, + "size_total": 16094593024, + "uuid": "d49a9903-d546-4981-bffb-4e5147e1199b" + }, + { + "block_available": 207739, + "block_size": 4096, + "block_total": 259584, + "block_used": 51845, + "device": "/dev/sda1", + "fstype": "xfs", + "inode_available": 523947, + "inode_total": 524288, + "inode_used": 341, + "mount": "/boot", + "options": "rw,relatime,attr2,inode64,noquota", + "size_available": 850898944, + "size_total": 1063256064, + "uuid": "619dcc55-451d-44be-b364-8c94278c567c" + } + ], + "ansible_nodename": "myserver", + "ansible_os_family": "RedHat", + "ansible_pkg_mgr": "yum", + "ansible_processor": [ + "0", + "GenuineIntel", + "Intel(R) Xeon(R) CPU X5660 @ 2.80GHz" + ], + "ansible_processor_cores": 1, + "ansible_processor_count": 1, + "ansible_processor_threads_per_core": 1, + "ansible_processor_vcpus": 1, + "ansible_product_name": "VMware Virtual Platform", + "ansible_product_serial": "VMware-42 3b 24 ce 91 e0 3a be-b1 02 3c 83 0f 86 02 e8", + "ansible_product_uuid": "CE243B42-E091-BE3A-B102-3C830F8602E8", + "ansible_product_version": "None", + "ansible_python": { + "executable": "/usr/bin/python2", + "has_sslcontext": true, + "type": "CPython", + "version": { + "major": 2, + "micro": 5, + "minor": 7, + "releaselevel": "final", + "serial": 0 + }, + "version_info": [ + 2, + 7, + 5, + "final", + 0 + ] + }, + "ansible_python_version": "2.7.5", + "ansible_real_group_id": 0, + "ansible_real_user_id": 0, + "ansible_selinux": { + "status": "disabled" + }, + "ansible_selinux_python_present": true, + "ansible_service_mgr": "systemd", + "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHa2kNk4tpZ4/rXxM6969XkR29JQa91M7sSRHheXeQxmZjnRp/5o2ADQjFmXz+PouYA8PMiBU9u5Mx44oEXxDmU=", + "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIE5MKxH1C+uyIHjAz48pwHj+6HdXw/9vCnc2PHRQZVND", + "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDMNVNNOgPb+L9rnrm0D2dQWRVEjtnWAgbgVDtdixE79+jDR5TGfxuUmf74yRXM0flrdvirQtBvZjSwsj3/fUReUas0gt3LVs1b7jsxK6QeGgQCx6CCeopMgUb0JYXsMexbekZxxnpBcWAXh/Bjhko/8FpZaZhIvk0VTzQMCP7+/netcTi1m+CCCF1YhQmy8bGhN+aJuaaP2VMSDSauOFGUQUUaJkw+pjata+qWMwYemDabszkePFp0rBQEDSo6fIMlXvInm75Jf24tyb+X9+kOycGk4Rits/jXseB6j+L3rZRCX6b/F3JMNtqoBc7nybGvD/8Njn9Sl67DQUZenkcX", + "ansible_swapfree_mb": 0, + "ansible_swaptotal_mb": 0, + "ansible_system": "Linux", + "ansible_system_capabilities": [ + "cap_chown", + "cap_dac_override", + "cap_dac_read_search", + "cap_fowner", + "cap_fsetid", + "cap_kill", + "cap_setgid", + "cap_setuid", + "cap_setpcap", + "cap_linux_immutable", + "cap_net_bind_service", + "cap_net_broadcast", + "cap_net_admin", + "cap_net_raw", + "cap_ipc_lock", + "cap_ipc_owner", + "cap_sys_module", + "cap_sys_rawio", + "cap_sys_chroot", + "cap_sys_ptrace", + "cap_sys_pacct", + "cap_sys_admin", + "cap_sys_boot", + "cap_sys_nice", + "cap_sys_resource", + "cap_sys_time", + "cap_sys_tty_config", + "cap_mknod", + "cap_lease", + "cap_audit_write", + "cap_audit_control", + "cap_setfcap", + "cap_mac_override", + "cap_mac_admin", + "cap_syslog", + "35", + "36+ep" + ], + "ansible_system_capabilities_enforced": "True", + "ansible_system_vendor": "VMware, Inc.", + "ansible_uptime_seconds": 137627, + "ansible_user_dir": "/root", + "ansible_user_gecos": "root", + "ansible_user_gid": 0, + "ansible_user_id": "root", + "ansible_user_shell": "/bin/bash", + "ansible_user_uid": 0, + "ansible_userspace_architecture": "x86_64", + "ansible_userspace_bits": "64", + "ansible_virtualization_role": "guest", + "ansible_virtualization_type": "VMware", + "gather_subset": [ + "all" + ], + "module_setup": true + }, + "changed": false +} + + +
    +
    +
    + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/040__SetupM-Filtered.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/040__SetupM-Filtered.html new file mode 100644 index 0000000..40a4320 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/040__SetupM-Filtered.html @@ -0,0 +1,37 @@ +
    + +

    DISCOVERED FACTS

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +# Small filtered subset of discovered facts
    + ansible myserver.mine -m setup -a "filter=ansible_default_ipv4"
    +
    +myserver.mine | success >> {
    +  "ansible_facts": {
    +      "ansible_default_ipv4": {
    +          "address": "10.41.17.37",
    +          "alias": "eth0",
    +          "gateway": "10.41.17.1",
    +          "interface": "eth0",
    +          "macaddress": "00:69:08:3b:a9:16",
    +          "mtu": 1500,
    +          "netmask": "255.255.255.0",
    +          "network": "10.41.17.0",
    +          "type": "ether"
    +      },
    +    
    +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_028__Ad-Hoc_Commands.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_028__Ad-Hoc_Commands.html new file mode 100644 index 0000000..80ae8b0 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_028__Ad-Hoc_Commands.html @@ -0,0 +1,11 @@ +
    +

    Ad-Hoc Commands

    +

    An ad-hoc command is a single Ansible task to perform quickly, but don’t want to save for later.

    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_labs/00_Lab_AdHoc_1-2-3.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_labs/00_Lab_AdHoc_1-2-3.html new file mode 100644 index 0000000..c0ae5af --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_labs/00_Lab_AdHoc_1-2-3.html @@ -0,0 +1,17 @@ +
    +

    + +

    Ad-hoc command exercise number one

    +
    + +
    +

    + +

    Ad-hoc command exercise number two

    +
    + +
    +

    + +

    Ad-hoc command exercise number three

    +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_labs/10_Terminal.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_labs/10_Terminal.html new file mode 100644 index 0000000..8d6552b --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_labs/10_Terminal.html @@ -0,0 +1,29 @@ +
    + +

    AD HOC COMMANDS

    + +
    +
    +
    Ansible Terminal
    +
    +
    +# Check connections (submarine ping, not ICMP)
    + ansible all -m ping
    +
    +# Run a command on all the hosts in the web group
    + ansible web -m command -a "uptime"
    +
    +# Collect and display known facts
    + ansible localhost -m setup
    +    
    +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_labs/20_Lab_AdHoc_4-5-6.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_labs/20_Lab_AdHoc_4-5-6.html new file mode 100644 index 0000000..c59700f --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/_labs/20_Lab_AdHoc_4-5-6.html @@ -0,0 +1,17 @@ +
    +

    + +

    Ad-hoc command exercise number four

    +
    + +
    +

    + +

    Ad-hoc command exercise number five

    +
    + +
    +

    + +

    Ad-hoc command exercise number six

    +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/setup.json b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/setup.json new file mode 100644 index 0000000..56e3504 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/060_Ad-Hoc_Commands/setup.json @@ -0,0 +1,557 @@ +localhost | SUCCESS => { + "ansible_facts": { + "ansible_all_ipv4_addresses": [ + "10.0.20.5" + ], + "ansible_all_ipv6_addresses": [ + "fe80::4e98:35fe:3b14:33dc" + ], + "ansible_apparmor": { + "status": "disabled" + }, + "ansible_architecture": "x86_64", + "ansible_bios_date": "04/05/2016", + "ansible_bios_version": "6.00", + "ansible_cmdline": { + "BOOT_IMAGE": "/vmlinuz-3.10.0-693.21.1.el7.x86_64", + "LANG": "en_US.UTF-8", + "crashkernel": "auto", + "quiet": true, + "rhgb": true, + "ro": true, + "root": "UUID=d49a9903-d546-4981-bffb-4e5147e1199b" + }, + "ansible_date_time": { + "date": "2018-08-14", + "day": "14", + "epoch": "1534279494", + "hour": "16", + "iso8601": "2018-08-14T20:44:54Z", + "iso8601_basic": "20180814T164454915629", + "iso8601_basic_short": "20180814T164454", + "iso8601_micro": "2018-08-14T20:44:54.915745Z", + "minute": "44", + "month": "08", + "second": "54", + "time": "16:44:54", + "tz": "EDT", + "tz_offset": "-0400", + "weekday": "Tuesday", + "weekday_number": "2", + "weeknumber": "33", + "year": "2018" + }, + "ansible_default_ipv4": { + "address": "10.0.20.5", + "alias": "ens192", + "broadcast": "10.0.20.255", + "gateway": "10.0.20.1", + "interface": "ens192", + "macaddress": "00:50:56:bb:30:e8", + "mtu": 1500, + "netmask": "255.255.255.0", + "network": "10.0.20.0", + "type": "ether" + }, + "ansible_default_ipv6": {}, + "ansible_device_links": { + "ids": { + "sr0": [ + "ata-VMware_Virtual_SATA_CDRW_Drive_00000000000000000001" + ] + }, + "labels": {}, + "masters": {}, + "uuids": { + "sda1": [ + "619dcc55-451d-44be-b364-8c94278c567c" + ], + "sda2": [ + "d49a9903-d546-4981-bffb-4e5147e1199b" + ] + } + }, + "ansible_devices": { + "sda": { + "holders": [], + "host": "Serial Attached SCSI controller: VMware PVSCSI SCSI Controller (rev 02)", + "links": { + "ids": [], + "labels": [], + "masters": [], + "uuids": [] + }, + "model": "Virtual disk", + "partitions": { + "sda1": { + "holders": [], + "links": { + "ids": [], + "labels": [], + "masters": [], + "uuids": [ + "619dcc55-451d-44be-b364-8c94278c567c" + ] + }, + "sectors": "2097152", + "sectorsize": 512, + "size": "1.00 GB", + "start": "2048", + "uuid": "619dcc55-451d-44be-b364-8c94278c567c" + }, + "sda2": { + "holders": [], + "links": { + "ids": [], + "labels": [], + "masters": [], + "uuids": [ + "d49a9903-d546-4981-bffb-4e5147e1199b" + ] + }, + "sectors": "31455232", + "sectorsize": 512, + "size": "15.00 GB", + "start": "2099200", + "uuid": "d49a9903-d546-4981-bffb-4e5147e1199b" + } + }, + "removable": "0", + "rotational": "1", + "sas_address": null, + "sas_device_handle": null, + "scheduler_mode": "deadline", + "sectors": "41943040", + "sectorsize": "512", + "size": "20.00 GB", + "support_discard": "0", + "vendor": "VMware", + "virtual": 1 + }, + "sr0": { + "holders": [], + "host": "SATA controller: VMware SATA AHCI controller", + "links": { + "ids": [ + "ata-VMware_Virtual_SATA_CDRW_Drive_00000000000000000001" + ], + "labels": [], + "masters": [], + "uuids": [] + }, + "model": "VMware SATA CD00", + "partitions": {}, + "removable": "1", + "rotational": "1", + "sas_address": null, + "sas_device_handle": null, + "scheduler_mode": "cfq", + "sectors": "2097151", + "sectorsize": "512", + "size": "1024.00 MB", + "support_discard": "0", + "vendor": "NECVMWar", + "virtual": 1 + } + }, + "ansible_distribution": "CentOS", + "ansible_distribution_file_parsed": true, + "ansible_distribution_file_path": "/etc/redhat-release", + "ansible_distribution_file_variety": "RedHat", + "ansible_distribution_major_version": "7", + "ansible_distribution_release": "Core", + "ansible_distribution_version": "7.4.1708", + "ansible_dns": { + "nameservers": [ + "10.0.20.1" + ], + "search": [ + "kev" + ] + }, + "ansible_domain": "kev", + "ansible_effective_group_id": 0, + "ansible_effective_user_id": 0, + "ansible_ens192": { + "active": true, + "device": "ens192", + "features": { + "busy_poll": "off [fixed]", + "fcoe_mtu": "off [fixed]", + "generic_receive_offload": "on", + "generic_segmentation_offload": "on", + "highdma": "on", + "hw_tc_offload": "off [fixed]", + "l2_fwd_offload": "off [fixed]", + "large_receive_offload": "on", + "loopback": "off [fixed]", + "netns_local": "off [fixed]", + "ntuple_filters": "off [fixed]", + "receive_hashing": "off [fixed]", + "rx_all": "off [fixed]", + "rx_checksumming": "on", + "rx_fcs": "off [fixed]", + "rx_vlan_filter": "on [fixed]", + "rx_vlan_offload": "on", + "rx_vlan_stag_filter": "off [fixed]", + "rx_vlan_stag_hw_parse": "off [fixed]", + "scatter_gather": "on", + "tcp_segmentation_offload": "on", + "tx_checksum_fcoe_crc": "off [fixed]", + "tx_checksum_ip_generic": "on", + "tx_checksum_ipv4": "off [fixed]", + "tx_checksum_ipv6": "off [fixed]", + "tx_checksum_sctp": "off [fixed]", + "tx_checksumming": "on", + "tx_fcoe_segmentation": "off [fixed]", + "tx_gre_csum_segmentation": "off [fixed]", + "tx_gre_segmentation": "off [fixed]", + "tx_gso_partial": "off [fixed]", + "tx_gso_robust": "off [fixed]", + "tx_ipip_segmentation": "off [fixed]", + "tx_lockless": "off [fixed]", + "tx_mpls_segmentation": "off [fixed]", + "tx_nocache_copy": "off", + "tx_scatter_gather": "on", + "tx_scatter_gather_fraglist": "off [fixed]", + "tx_sctp_segmentation": "off [fixed]", + "tx_sit_segmentation": "off [fixed]", + "tx_tcp6_segmentation": "on", + "tx_tcp_ecn_segmentation": "off [fixed]", + "tx_tcp_mangleid_segmentation": "off", + "tx_tcp_segmentation": "on", + "tx_udp_tnl_csum_segmentation": "off [fixed]", + "tx_udp_tnl_segmentation": "off [fixed]", + "tx_vlan_offload": "on", + "tx_vlan_stag_hw_insert": "off [fixed]", + "udp_fragmentation_offload": "off [fixed]", + "vlan_challenged": "off [fixed]" + }, + "hw_timestamp_filters": [], + "ipv4": { + "address": "10.0.20.5", + "broadcast": "10.0.20.255", + "netmask": "255.255.255.0", + "network": "10.0.20.0" + }, + "ipv6": [ + { + "address": "fe80::4e98:35fe:3b14:33dc", + "prefix": "64", + "scope": "link" + } + ], + "macaddress": "00:50:56:bb:30:e8", + "module": "vmxnet3", + "mtu": 1500, + "pciid": "0000:0b:00.0", + "promisc": false, + "speed": 10000, + "timestamping": [ + "rx_software", + "software" + ], + "type": "ether" + }, + "ansible_env": { + "HISTCONTROL": "ignoredups", + "HISTSIZE": "1000", + "HOME": "/root", + "HOSTNAME": "kev-ansible", + "LANG": "en_US.UTF-8", + "LESSOPEN": "||/usr/bin/lesspipe.sh %s", + "LOGNAME": "root", + "LS_COLORS": "rs=0:di=38;5;27:ln=38;5;51:mh=44;38;5;15:pi=40;38;5;11:so=38;5;13:do=38;5;5:bd=48;5;232;38;5;11:cd=48;5;232;38;5;3:or=48;5;232;38;5;9:mi=05;48;5;232;38;5;15:su=48;5;196;38;5;15:sg=48;5;11;38;5;16:ca=48;5;196;38;5;226:tw=48;5;10;38;5;16:ow=48;5;10;38;5;21:st=48;5;21;38;5;15:ex=38;5;34:*.tar=38;5;9:*.tgz=38;5;9:*.arc=38;5;9:*.arj=38;5;9:*.taz=38;5;9:*.lha=38;5;9:*.lz4=38;5;9:*.lzh=38;5;9:*.lzma=38;5;9:*.tlz=38;5;9:*.txz=38;5;9:*.tzo=38;5;9:*.t7z=38;5;9:*.zip=38;5;9:*.z=38;5;9:*.Z=38;5;9:*.dz=38;5;9:*.gz=38;5;9:*.lrz=38;5;9:*.lz=38;5;9:*.lzo=38;5;9:*.xz=38;5;9:*.bz2=38;5;9:*.bz=38;5;9:*.tbz=38;5;9:*.tbz2=38;5;9:*.tz=38;5;9:*.deb=38;5;9:*.rpm=38;5;9:*.jar=38;5;9:*.war=38;5;9:*.ear=38;5;9:*.sar=38;5;9:*.rar=38;5;9:*.alz=38;5;9:*.ace=38;5;9:*.zoo=38;5;9:*.cpio=38;5;9:*.7z=38;5;9:*.rz=38;5;9:*.cab=38;5;9:*.jpg=38;5;13:*.jpeg=38;5;13:*.gif=38;5;13:*.bmp=38;5;13:*.pbm=38;5;13:*.pgm=38;5;13:*.ppm=38;5;13:*.tga=38;5;13:*.xbm=38;5;13:*.xpm=38;5;13:*.tif=38;5;13:*.tiff=38;5;13:*.png=38;5;13:*.svg=38;5;13:*.svgz=38;5;13:*.mng=38;5;13:*.pcx=38;5;13:*.mov=38;5;13:*.mpg=38;5;13:*.mpeg=38;5;13:*.m2v=38;5;13:*.mkv=38;5;13:*.webm=38;5;13:*.ogm=38;5;13:*.mp4=38;5;13:*.m4v=38;5;13:*.mp4v=38;5;13:*.vob=38;5;13:*.qt=38;5;13:*.nuv=38;5;13:*.wmv=38;5;13:*.asf=38;5;13:*.rm=38;5;13:*.rmvb=38;5;13:*.flc=38;5;13:*.avi=38;5;13:*.fli=38;5;13:*.flv=38;5;13:*.gl=38;5;13:*.dl=38;5;13:*.xcf=38;5;13:*.xwd=38;5;13:*.yuv=38;5;13:*.cgm=38;5;13:*.emf=38;5;13:*.axv=38;5;13:*.anx=38;5;13:*.ogv=38;5;13:*.ogx=38;5;13:*.aac=38;5;45:*.au=38;5;45:*.flac=38;5;45:*.mid=38;5;45:*.midi=38;5;45:*.mka=38;5;45:*.mp3=38;5;45:*.mpc=38;5;45:*.ogg=38;5;45:*.ra=38;5;45:*.wav=38;5;45:*.axa=38;5;45:*.oga=38;5;45:*.spx=38;5;45:*.xspf=38;5;45:", + "MAIL": "/var/spool/mail/root", + "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin", + "PWD": "/root", + "SHELL": "/bin/bash", + "SHLVL": "3", + "SSH_CLIENT": "10.0.0.74 53446 22", + "SSH_CONNECTION": "10.0.0.74 53446 10.0.20.5 22", + "SSH_TTY": "/dev/pts/0", + "TERM": "xterm-256color", + "USER": "root", + "XDG_RUNTIME_DIR": "/run/user/0", + "XDG_SESSION_ID": "270", + "_": "/usr/bin/python2" + }, + "ansible_fips": false, + "ansible_form_factor": "Other", + "ansible_fqdn": "kev-ansible.kev", + "ansible_hostname": "kev-ansible", + "ansible_interfaces": [ + "lo", + "ens192" + ], + "ansible_is_chroot": false, + "ansible_iscsi_iqn": "", + "ansible_kernel": "3.10.0-693.21.1.el7.x86_64", + "ansible_lo": { + "active": true, + "device": "lo", + "features": { + "busy_poll": "off [fixed]", + "fcoe_mtu": "off [fixed]", + "generic_receive_offload": "on", + "generic_segmentation_offload": "on", + "highdma": "on [fixed]", + "hw_tc_offload": "off [fixed]", + "l2_fwd_offload": "off [fixed]", + "large_receive_offload": "off [fixed]", + "loopback": "on [fixed]", + "netns_local": "on [fixed]", + "ntuple_filters": "off [fixed]", + "receive_hashing": "off [fixed]", + "rx_all": "off [fixed]", + "rx_checksumming": "on [fixed]", + "rx_fcs": "off [fixed]", + "rx_vlan_filter": "off [fixed]", + "rx_vlan_offload": "off [fixed]", + "rx_vlan_stag_filter": "off [fixed]", + "rx_vlan_stag_hw_parse": "off [fixed]", + "scatter_gather": "on", + "tcp_segmentation_offload": "on", + "tx_checksum_fcoe_crc": "off [fixed]", + "tx_checksum_ip_generic": "on [fixed]", + "tx_checksum_ipv4": "off [fixed]", + "tx_checksum_ipv6": "off [fixed]", + "tx_checksum_sctp": "on [fixed]", + "tx_checksumming": "on", + "tx_fcoe_segmentation": "off [fixed]", + "tx_gre_csum_segmentation": "off [fixed]", + "tx_gre_segmentation": "off [fixed]", + "tx_gso_partial": "off [fixed]", + "tx_gso_robust": "off [fixed]", + "tx_ipip_segmentation": "off [fixed]", + "tx_lockless": "on [fixed]", + "tx_mpls_segmentation": "off [fixed]", + "tx_nocache_copy": "off [fixed]", + "tx_scatter_gather": "on [fixed]", + "tx_scatter_gather_fraglist": "on [fixed]", + "tx_sctp_segmentation": "on", + "tx_sit_segmentation": "off [fixed]", + "tx_tcp6_segmentation": "on", + "tx_tcp_ecn_segmentation": "on", + "tx_tcp_mangleid_segmentation": "on", + "tx_tcp_segmentation": "on", + "tx_udp_tnl_csum_segmentation": "off [fixed]", + "tx_udp_tnl_segmentation": "off [fixed]", + "tx_vlan_offload": "off [fixed]", + "tx_vlan_stag_hw_insert": "off [fixed]", + "udp_fragmentation_offload": "on", + "vlan_challenged": "on [fixed]" + }, + "hw_timestamp_filters": [], + "ipv4": { + "address": "127.0.0.1", + "broadcast": "host", + "netmask": "255.0.0.0", + "network": "127.0.0.0" + }, + "ipv6": [ + { + "address": "::1", + "prefix": "128", + "scope": "host" + } + ], + "mtu": 65536, + "promisc": false, + "timestamping": [ + "rx_software", + "software" + ], + "type": "loopback" + }, + "ansible_local": { + "is_installed": { + "is_vim_installed": { + "vim": "true" + } + }, + "package_list": { + "all_kev_packages": { + "package1": "acl-2.2.51-12.el7.x86_64", + "package10": "authconfig-6.2.8-30.el7.x86_64", + "package11": "basesystem-10.0-7.el7.centos.noarch", + "package2": "aic94xx-firmware-30-6.el7.noarch", + "package3": "alsa-firmware-1.0.28-2.el7.noarch", + "package4": "alsa-lib-1.1.3-3.el7.x86_64", + "package5": "alsa-tools-firmware-1.1.0-1.el7.x86_64", + "package6": "ansible-2.6.1-1.el7.noarch", + "package7": "ansible-lint-3.4.21-1.el7.noarch", + "package8": "audit-2.7.6-3.el7.x86_64", + "package9": "audit-libs-2.7.6-3.el7.x86_64" + } + }, + "system_owner": { + "system_owner": { + "owner": "kev" + } + } + }, + "ansible_lsb": {}, + "ansible_machine": "x86_64", + "ansible_machine_id": "072bb764550746bd9695d73f43c9cf3b", + "ansible_memfree_mb": 1307, + "ansible_memory_mb": { + "nocache": { + "free": 1579, + "used": 260 + }, + "real": { + "free": 1307, + "total": 1839, + "used": 532 + }, + "swap": { + "cached": 0, + "free": 0, + "total": 0, + "used": 0 + } + }, + "ansible_memtotal_mb": 1839, + "ansible_mounts": [ + { + "block_available": 3300883, + "block_size": 4096, + "block_total": 3929344, + "block_used": 628461, + "device": "/dev/sda2", + "fstype": "xfs", + "inode_available": 7804215, + "inode_total": 7863808, + "inode_used": 59593, + "mount": "/", + "options": "rw,relatime,attr2,inode64,noquota", + "size_available": 13520416768, + "size_total": 16094593024, + "uuid": "d49a9903-d546-4981-bffb-4e5147e1199b" + }, + { + "block_available": 207739, + "block_size": 4096, + "block_total": 259584, + "block_used": 51845, + "device": "/dev/sda1", + "fstype": "xfs", + "inode_available": 523947, + "inode_total": 524288, + "inode_used": 341, + "mount": "/boot", + "options": "rw,relatime,attr2,inode64,noquota", + "size_available": 850898944, + "size_total": 1063256064, + "uuid": "619dcc55-451d-44be-b364-8c94278c567c" + } + ], + "ansible_nodename": "kev-ansible", + "ansible_os_family": "RedHat", + "ansible_pkg_mgr": "yum", + "ansible_processor": [ + "0", + "GenuineIntel", + "Intel(R) Xeon(R) CPU X5660 @ 2.80GHz" + ], + "ansible_processor_cores": 1, + "ansible_processor_count": 1, + "ansible_processor_threads_per_core": 1, + "ansible_processor_vcpus": 1, + "ansible_product_name": "VMware Virtual Platform", + "ansible_product_serial": "VMware-42 3b 24 ce 91 e0 3a be-b1 02 3c 83 0f 86 02 e8", + "ansible_product_uuid": "CE243B42-E091-BE3A-B102-3C830F8602E8", + "ansible_product_version": "None", + "ansible_python": { + "executable": "/usr/bin/python2", + "has_sslcontext": true, + "type": "CPython", + "version": { + "major": 2, + "micro": 5, + "minor": 7, + "releaselevel": "final", + "serial": 0 + }, + "version_info": [ + 2, + 7, + 5, + "final", + 0 + ] + }, + "ansible_python_version": "2.7.5", + "ansible_real_group_id": 0, + "ansible_real_user_id": 0, + "ansible_selinux": { + "status": "disabled" + }, + "ansible_selinux_python_present": true, + "ansible_service_mgr": "systemd", + "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHa2kNk4tpZ4/rXxM6969XkR29JQa91M7sSRHheXeQxmZjnRp/5o2ADQjFmXz+PouYA8PMiBU9u5Mx44oEXxDmU=", + "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIE5MKxH1C+uyIHjAz48pwHj+6HdXw/9vCnc2PHRQZVND", + "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDMNVNNOgPb+L9rnrm0D2dQWRVEjtnWAgbgVDtdixE79+jDR5TGfxuUmf74yRXM0flrdvirQtBvZjSwsj3/fUReUas0gt3LVs1b7jsxK6QeGgQCx6CCeopMgUb0JYXsMexbekZxxnpBcWAXh/Bjhko/8FpZaZhIvk0VTzQMCP7+/netcTi1m+CCCF1YhQmy8bGhN+aJuaaP2VMSDSauOFGUQUUaJkw+pjata+qWMwYemDabszkePFp0rBQEDSo6fIMlXvInm75Jf24tyb+X9+kOycGk4Rits/jXseB6j+L3rZRCX6b/F3JMNtqoBc7nybGvD/8Njn9Sl67DQUZenkcX", + "ansible_swapfree_mb": 0, + "ansible_swaptotal_mb": 0, + "ansible_system": "Linux", + "ansible_system_capabilities": [ + "cap_chown", + "cap_dac_override", + "cap_dac_read_search", + "cap_fowner", + "cap_fsetid", + "cap_kill", + "cap_setgid", + "cap_setuid", + "cap_setpcap", + "cap_linux_immutable", + "cap_net_bind_service", + "cap_net_broadcast", + "cap_net_admin", + "cap_net_raw", + "cap_ipc_lock", + "cap_ipc_owner", + "cap_sys_module", + "cap_sys_rawio", + "cap_sys_chroot", + "cap_sys_ptrace", + "cap_sys_pacct", + "cap_sys_admin", + "cap_sys_boot", + "cap_sys_nice", + "cap_sys_resource", + "cap_sys_time", + "cap_sys_tty_config", + "cap_mknod", + "cap_lease", + "cap_audit_write", + "cap_audit_control", + "cap_setfcap", + "cap_mac_override", + "cap_mac_admin", + "cap_syslog", + "35", + "36+ep" + ], + "ansible_system_capabilities_enforced": "True", + "ansible_system_vendor": "VMware, Inc.", + "ansible_uptime_seconds": 137627, + "ansible_user_dir": "/root", + "ansible_user_gecos": "root", + "ansible_user_gid": 0, + "ansible_user_id": "root", + "ansible_user_shell": "/bin/bash", + "ansible_user_uid": 0, + "ansible_userspace_architecture": "x86_64", + "ansible_userspace_bits": "64", + "ansible_virtualization_role": "guest", + "ansible_virtualization_type": "VMware", + "gather_subset": [ + "all" + ], + "module_setup": true + }, + "changed": false +} diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/070_Facts_And_Vars/000_Red-Variables.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/070_Facts_And_Vars/000_Red-Variables.html new file mode 100644 index 0000000..3e707d1 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/070_Facts_And_Vars/000_Red-Variables.html @@ -0,0 +1,12 @@ +
    +

    VARIABLES:

    +
  • What are they?
  • +
  • How do they work for me?
  • +
  • What can I do with them?
  • +
  • How do I override or redefine them in flight?
  • +
    + + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/070_Facts_And_Vars/010_Variables.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/070_Facts_And_Vars/010_Variables.html new file mode 100644 index 0000000..d61ea42 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/070_Facts_And_Vars/010_Variables.html @@ -0,0 +1,19 @@ +
    + +

    Variables

    +

    Ansible can work with metadata from various sources and manage their context in the form of variables.

    +
      +
    • Command line parameters
    • +
    • Plays and tasks
    • +
    • Files
    • +
    • Inventory
    • +
    • Discovered facts
    • +
    • Roles
    • +
    +
    + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/070_Facts_And_Vars/020_Variable_Precedence.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/070_Facts_And_Vars/020_Variable_Precedence.html new file mode 100644 index 0000000..23dd3b0 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/070_Facts_And_Vars/020_Variable_Precedence.html @@ -0,0 +1,39 @@ +
    + +

    Variable Precedence

    +

    The order in which the same variable from different sources will override each other.

    +
    +
    +
      +
    1. extra vars
    2. +
    3. task vars (only for the task)
    4. +
    5. block vars (only for tasks in block)
    6. +
    7. role and include vars
    8. +
    9. play vars_files
    10. +
    11. play vars_prompt
    12. +
    13. play vars
    14. +
    15. set_facts
    16. +
    +
    +
    +
      +
    1. registered vars
    2. +
    3. host facts
    4. +
    5. playbook host_vars
    6. +
    7. playbook group_vars
    8. +
    9. inventory host_vars
    10. +
    11. inventory group_vars
    12. +
    13. inventory vars
    14. +
    15. role defaults
    16. +
    +
    +
    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/000__RedIntro.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/000__RedIntro.html new file mode 100644 index 0000000..043994a --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/000__RedIntro.html @@ -0,0 +1,11 @@ +
    +
    +

    Tasks in a Play

    + +

    Tasks are the powerful pieces within a playbook that state paremeters and instruct the Ansible engine

    + +
    +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/100__Tasks.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/100__Tasks.html new file mode 100644 index 0000000..8256275 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/100__Tasks.html @@ -0,0 +1,19 @@ +
    + +

    Tasks

    +

    Tasks are the application of a module to perform a specific unit of work.

    +
      +
    • file: A directory should exist
    • +
    • yum: A package should be installed
    • +
    • service: A service should be running
    • +
    • template: Render a configuration file from a template
    • +
    • get_url: Fetch an archive file from a URL
    • +
    • git: Clone a source code repository
    • +
    +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/200__Example_Tasks_in_a_Play.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/200__Example_Tasks_in_a_Play.html new file mode 100644 index 0000000..104c379 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/200__Example_Tasks_in_a_Play.html @@ -0,0 +1,43 @@ +
    + +

    EXAMPLE TASKS IN A PLAY

    + +
    +
    +
    Ansible Terminal
    +
    +
    +---
    +tasks:
    +- name: Ensure httpd package is present
    +  yum:
    +    name: httpd
    +    state: latest
    +
    +- name: Ensure latest index.html file is present
    +  copy:
    +    src: files/index.html
    +    dest: /var/www/html/
    +
    +- name: Restart httpd
    +  service:
    +    name: httpd
    +    state: restarted
    +    
    +
    + + + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/300__Handler_Tasks.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/300__Handler_Tasks.html new file mode 100644 index 0000000..2679234 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/300__Handler_Tasks.html @@ -0,0 +1,11 @@ +
    +

    Handler Tasks

    +

    Handlers are special tasks that run at the end of a play if notified by another task when a change occurs.

    +
    If a package gets installed or updated, notify a service restart task that it needs to run.
    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/400__Example_Handler_Task_in_a_Play.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/400__Example_Handler_Task_in_a_Play.html new file mode 100644 index 0000000..5659f31 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/400__Example_Handler_Task_in_a_Play.html @@ -0,0 +1,176 @@ +
    +

    Example Handler Task in a Play

    +
    +
    +
    Ansible Terminal
    +
    +
    +  tasks:
    +
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +    notify: httpd_running
    +
    +  - name: Verify correct config file is present
    +    copy:
    +      src: files/index.html
    +      dest: /var/www/html/index.html
    +
    +  handlers:
    +
    +  - name: httpd_running
    +    service:
    +      name: httpd
    +      state: restarted
    +			
    +
    + + +
    + + + +
    +

    Example Handler Task in a Play

    +
    +
    +
    Ansible Terminal
    +
    +
    +  tasks:
    +
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +    notify: httpd_running                                    
    +
    +  - name: Verify correct config file is present
    +    copy:
    +      src: files/index.html
    +      dest: /var/www/html/index.html
    +
    +  handlers:
    +
    +  - name: httpd_running
    +    service:
    +      name: httpd
    +      state: restarted
    +			
    +
    + + +
    + + + +
    +

    Example Handler Task in a Play

    +
    +
    +
    Ansible Terminal
    +
    +
    +  tasks:
    +
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +    notify: httpd_running
    +
    +  - name: Verify correct config file is present
    +    copy:
    +      src: files/index.html
    +      dest: /var/www/html/index.html
    +
    +  handlers:                                                  
    +
    +  - name: httpd_running
    +    service:
    +      name: httpd
    +      state: restarted
    +			
    +
    + + +
    + + +
    +

    Example Handler Task in a Play

    +
    +
    +
    Ansible Terminal
    +
    +
    +  tasks:
    +
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +    notify: httpd_running
    +
    +  - name: Verify correct config file is present
    +    copy:
    +      src: files/index.html
    +      dest: /var/www/html/index.html
    +
    +  handlers:
    +
    +  - name: httpd_running                                      
    +    service:
    +      name: httpd
    +      state: restarted
    +			
    +
    + + +
    + + +
    +

    Example Handler Task in a Play

    +
    +
    +
    Ansible Terminal
    +
    +
    +  tasks:
    +
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +    notify: httpd_running                                    
    +
    +  - name: Verify correct config file is present
    +    copy:
    +      src: files/index.html
    +      dest: /var/www/html/index.html
    +
    +  handlers:                                                  
    +                                                             
    +  - name: httpd_running                                      
    +    service:
    +      name: httpd
    +      state: restarted
    +			
    +
    + + +
    + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/500__Example_Multiple_Handler_Task_in_a_Play.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/500__Example_Multiple_Handler_Task_in_a_Play.html new file mode 100644 index 0000000..bd8e665 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/500__Example_Multiple_Handler_Task_in_a_Play.html @@ -0,0 +1,145 @@ +
    +

    Multiple Tasks Calling a Handler in a Play

    +
    +
    +
    Ansible Terminal
    +
    +
    +  tasks:
    +
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +    notify: httpd_running
    +
    +  - name: Verify correct config file is present
    +    template:
    +      src: files/httpd.conf.j2
    +      dest: /etc/httpd/conf/httpd.conf
    +    notify: httpd_running
    +
    +  handlers:
    +
    +  - name: httpd_running
    +    service:
    +      name: httpd
    +      state: restarted
    +			
    +
    + + +
    + + + +
    +

    Multiple Tasks Calling a Handler in a Play

    +
    +
    +
    Ansible Terminal
    +
    +
    +  tasks:
    +
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +    notify: httpd_running                                    
    +
    +  - name: Verify correct config file is present
    +    template:
    +      src: files/httpd.conf.j2
    +      dest: /etc/httpd/conf/httpd.conf
    +    notify: httpd_running                                    
    +
    +  handlers:
    +
    +  - name: httpd_running
    +    service:
    +      name: httpd
    +      state: restarted
    +			
    +
    + + +
    + + + +
    +

    Multiple Tasks Calling a Handler in a Play

    +
    +
    +
    Ansible Terminal
    +
    +
    +  tasks:
    +
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +    notify: httpd_running
    +
    +  - name: Verify correct config file is present
    +    template:
    +      src: files/httpd.conf.j2
    +      dest: /etc/httpd/conf/httpd.conf
    +    notify: httpd_running
    +
    +  handlers:                                                  
    +                                                             
    +  - name: httpd_running                                      
    +    service:
    +      name: httpd
    +      state: restarted
    +			
    +
    + + +
    + + +
    +

    Multiple Tasks Calling a Handler in a Play

    +
    +
    +
    Ansible Terminal
    +
    +
    +  tasks:
    +  
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +    notify: httpd_running                                    
    +
    +  - name: Verify correct config file is present
    +    template:
    +      src: files/httpd.conf.j2
    +      dest: /etc/httpd/conf/httpd.conf
    +    notify: httpd_running                                    
    +
    +  handlers:                                                  
    +                                                             
    +  - name: httpd_running                                      
    +    service:
    +      name: httpd
    +      state: restarted
    +			
    +
    + + +
    + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/_labs/00_Tasks_Labs.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/_labs/00_Tasks_Labs.html new file mode 100644 index 0000000..f82dcc7 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/080_Tasks/_labs/00_Tasks_Labs.html @@ -0,0 +1,10 @@ +
    +

    LAB STUFF 01

    + +

    Let's do some stuff with tasks.

    +
    +
    +

    LAB STUFF 02

    + +

    Let's do some more stuff with tasks.

    +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/000__RedIntro.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/000__RedIntro.html new file mode 100644 index 0000000..354db99 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/000__RedIntro.html @@ -0,0 +1,11 @@ +
    +
    +

    ANATOMY OF A PLAYBOOK

    + +

    Let's dissect the basics of a playbook and make sure we understand the core components.

    + +
    +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/039__Plays_amp_Playbooks.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/039__Plays_amp_Playbooks.html new file mode 100644 index 0000000..119a5dc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/039__Plays_amp_Playbooks.html @@ -0,0 +1,11 @@ +
    + +

    Plays & Playbooks

    +

    Plays are ordered sets of tasks to execute against host selections from your inventory. A playbook is a file containing one or more plays.

    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/040__Playbook_Example.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/040__Playbook_Example.html new file mode 100644 index 0000000..aea4b20 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/040__Playbook_Example.html @@ -0,0 +1,40 @@ +
    + +

    PLAYBOOK EXAMPLE

    + +
    +
    +
    Ansible Terminal
    +
    +
    +- name: Ensure apache is installed and started
    +  hosts: web
    +  become: yes
    +
    +  tasks:
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +
    +  - name: Ensure latest index.html file is present
    +    copy:
    +      src: files/index.html
    +      dest: /var/www/html/index.html
    +
    +  - name: Ensure httpd is started
    +    service:
    +      name: httpd
    +      state: started
    +    
    +
    + + + + +
    + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/041__Human-Meaningful_Naming.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/041__Human-Meaningful_Naming.html new file mode 100644 index 0000000..0745e77 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/041__Human-Meaningful_Naming.html @@ -0,0 +1,39 @@ +
    + +

    HUMAN MEANINGFUL NAMING

    + +
    +
    +
    Ansible Terminal
    +
    +
    +- name: Ensure apache is installed and started
    +  hosts: web
    +  become: yes
    +  
    +  tasks:
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +
    +  - name: Ensure latest index.html file is present
    +    copy:
    +      src: files/index.html
    +      dest: /var/www/html/
    +
    +  - name: Ensure httpd is started
    +    service:
    +      name: httpd
    +      state: started
    +    
    +
    + + +
    + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/042__Host_Selector.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/042__Host_Selector.html new file mode 100644 index 0000000..352135d --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/042__Host_Selector.html @@ -0,0 +1,38 @@ +
    + +

    HOST SELECTOR

    + +
    +
    +
    Ansible Terminal
    +
    +
    +- name: Ensure apache is installed and started
    +  hosts: web
    +  become: yes
    +
    +  tasks:
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +
    +  - name: Ensure latest index.html file is present
    +    copy:
    +      src: files/index.html
    +      dest: /var/www/html/
    +
    +  - name: Ensure httpd is started
    +    service:
    +      name: httpd
    +      state: started
    +    
    +
    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/043__Privilege_Escalation.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/043__Privilege_Escalation.html new file mode 100644 index 0000000..fc1cd8a --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/043__Privilege_Escalation.html @@ -0,0 +1,37 @@ +
    + +

    PRIVILEGE ESCALATION

    + +
    +
    +
    Ansible Terminal
    +
    +
    +- name: Ensure apache is installed and started
    +  hosts: web
    +  become: yes
    +
    +  tasks:
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +
    +  - name: Ensure latest index.html file is present
    +    copy:
    +      src: files/index.html
    +      dest: /var/www/html/
    +
    +  - name: Ensure httpd is started
    +    service:
    +      name: httpd
    +      state: started
    +    
    +
    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/044__Play_Variables.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/044__Play_Variables.html new file mode 100644 index 0000000..01a7018 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/044__Play_Variables.html @@ -0,0 +1,46 @@ +
    +

    PLAY VARIABLES

    + +
    +
    +
    Ansible Terminal
    +
    +
    +- name: Ensure apache is installed and started
    +  hosts: web
    +  become: yes
    +  vars:                                
    +    http_port: 80                      
    +    http_docroot: /var/www/gokev.com   
    +
    +  tasks:
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +
    +  - name: Verify correct config file is present
    +    template:
    +      src: templates/httpd.conf.j2
    +      dest: /etc/httpd/conf/httpd.conf
    +
    +  - name: Ensure httpd is started
    +    service:
    +      name: httpd
    +      state: started
    +    
    +
    + +
    +

    + +

    +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/045__Tasks.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/045__Tasks.html new file mode 100644 index 0000000..eb010ce --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/045__Tasks.html @@ -0,0 +1,40 @@ +
    +

    PLAYBOOK TASKS

    + +
    +
    +
    Ansible Terminal
    +
    +
    +- name: Ensure apache is installed and started
    +  hosts: web
    +  become: yes
    +  vars:
    +    http_port: 80
    +    http_docroot: /var/www/gokev.com
    +
    +  tasks:
    +  - name: Ensure latest httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +
    +  - name: Verify correct config file is present
    +    template:
    +      src: templates/httpd.conf.j2
    +      dest: /etc/httpd/conf/httpd.conf
    +
    +  - name: Ensure httpd is started
    +    service:
    +      name: httpd
    +      state: started
    +    
    +
    + + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/_labs/00_LABS-AdvancedPlaybookPieces.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/_labs/00_LABS-AdvancedPlaybookPieces.html new file mode 100644 index 0000000..a14bb57 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/090_Anatomy_Of_A_Playbook/_labs/00_LABS-AdvancedPlaybookPieces.html @@ -0,0 +1,10 @@ +
    +

    LAB STUFF 01

    + +

    Let's do some stuff with advanced pieces.

    +
    +
    +

    LAB STUFF 02

    + +

    Let's do some more stuff with advanced pieces!

    +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/000__RedIntro_DoingMore.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/000__RedIntro_DoingMore.html new file mode 100644 index 0000000..fe48656 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/000__RedIntro_DoingMore.html @@ -0,0 +1,11 @@ +
    +
    +

    DO MORE.

    + +

    Let's get a bit more advanced with some rich functionality that Ansible provides.

    + +
    +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/048__Doing_More_with_Playbooks.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/048__Doing_More_with_Playbooks.html new file mode 100644 index 0000000..856e586 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/048__Doing_More_with_Playbooks.html @@ -0,0 +1,17 @@ +
    +

    Doing More with Playbooks

    +

    Here are some more essential playbook features that you can apply:

    +
      +
    • Templates
    • +
    • Loops
    • +
    • Conditionals
    • +
    • Tags
    • +
    • Blocks
    • +
    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/049__Templates.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/049__Templates.html new file mode 100644 index 0000000..e343cf7 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/049__Templates.html @@ -0,0 +1,21 @@ +
    +

    Templates

    +

    Ansible embeds the Jinja2 templating engine that can be used to dynamically:

    +
      +
    • Set and modify play variables
    • +
    • Conditional logic
    • +
    • Generate files such as configurations from variables
    • +
    + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/050__Loops.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/050__Loops.html new file mode 100644 index 0000000..2c23924 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/050__Loops.html @@ -0,0 +1,33 @@ +
    +

    WORKING WITH LOOPS

    +

    Loops can do one task on multiple things: create a lot of users, install a lot of packages, or repeat a polling step until a certain result is reached.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +- yum:
    +    name: "{{ item }}"
    +    state: latest
    +  with_items:
    +  - httpd
    +  - mysql-server
    +  - php56-mysqlnd
    +  - php56-common
    +  - php56-gd
    +  - php56-soap
    +  - php56-xml
    +    
    +
    + + + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/051__Conditionals.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/051__Conditionals.html new file mode 100644 index 0000000..6570266 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/051__Conditionals.html @@ -0,0 +1,26 @@ +
    +

    WORKING WITH CONDITIONALS

    +

    Ansible supports the conditional execution of a task based on the run-time evaluation of variable, fact, or previous task result.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +- yum:
    +    name: httpd
    +    state: latest
    +  when: ansible_os_family == "RedHat"
    +    
    +
    + + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/052__Tags.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/052__Tags.html new file mode 100644 index 0000000..8fde527 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/052__Tags.html @@ -0,0 +1,40 @@ +
    +

    WORKING WITH TAGS

    +

    Tags are useful to be able to run a subset of a playbook on-demand.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +- yum:
    +    name: "{{ item }}"
    +    state: latest
    +  with_items:
    +  - httpd
    +  - php56
    +  tags:
    +     - packages
    +
    + - template:
    +     src: templates/httpd.conf.j2
    +     dest: /etc/httpd/conf/httpd.conf
    +  tags:
    +     - configuration
    +    
    +
    +
    +

    + +

    +
    + + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/054__Blocks Conditional.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/054__Blocks Conditional.html new file mode 100644 index 0000000..4eb044d --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/054__Blocks Conditional.html @@ -0,0 +1,34 @@ +
    +

    WORKING WITH BLOCKS

    +

    Blocks cut down on repetitive task directives, allow for logical grouping of tasks and even in play error handling.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +  block:
    +  - yum:
    +      name: "{{ item }}"
    +      state: latest
    +    with_items:
    +    - httpd
    +    - php56
    +
    +  - template:
    +      src: templates/httpd.conf.j2
    +      dest: /etc/httpd/conf/httpd.conf
    +  when: ansible_os_family == "RedHat"
    +    
    +
    + + +
    + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/_labs/00_LABS-DoMore.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/_labs/00_LABS-DoMore.html new file mode 100644 index 0000000..a14bb57 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/100_Do_More_With_Playbooks/_labs/00_LABS-DoMore.html @@ -0,0 +1,10 @@ +
    +

    LAB STUFF 01

    + +

    Let's do some stuff with advanced pieces.

    +
    +
    +

    LAB STUFF 02

    + +

    Let's do some more stuff with advanced pieces!

    +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/000__Red_Slide_AnsibleVault.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/000__Red_Slide_AnsibleVault.html new file mode 100644 index 0000000..232afa5 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/000__Red_Slide_AnsibleVault.html @@ -0,0 +1,16 @@ +
    + +
    +

    ANSIBLE VAULT:
    ENCRYPTION

    +

    Using ansible-vault to keep your secrets a secret.


    + + + +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/050_Vault_2Types.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/050_Vault_2Types.html new file mode 100644 index 0000000..7c6ff62 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/050_Vault_2Types.html @@ -0,0 +1,52 @@ +
    +
    +

    ANSIBLE VAULT

    +

    Encrypt an entire file or just a string inside a non-encrypted file.

    +
    +
    +
    +
    Ansible Terminal : Encrypted File
    +
    +
    + cat ronin_secret.yml 
    +
    +$ANSIBLE_VAULT;1.1;AES256
    +61336361663664353737663637366334383030393736353532345
    +66532383233393438653965633333326632363935663132326163
    +306637626335643365630a653066396533313738316266653
    +
    +
    +
    + + + +
    +
    +
    Ansible Terminal : Encrypted Variable
    +
    +
    + cat fonzie_secret.yml
    +
    +fonzie_show: Happy Days
    +fonzie_secret: !vault |
    +          $ANSIBLE_VAULT;1.1;AES256
    +          6632363237353065633237633937353164646564633
    +          6376461326235386539663733343539666234332664
    +          6664616266316339383838373734653364346163633
    +          6663864366534633063303062363238393139
    +          
    +
    +
    + + + +

    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/100__Red_Slide_AnsibleVault_Ronin.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/100__Red_Slide_AnsibleVault_Ronin.html new file mode 100644 index 0000000..06065fc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/100__Red_Slide_AnsibleVault_Ronin.html @@ -0,0 +1,17 @@ +
    + +
    +

    ANSIBLE VAULT:
    ENCRYPTED FILE EXAMPLE

    +

    (Variables In An Encrypted File)


    + + +

    Encrypting Sensitive Variables Inside Your Playbooks With An Entire Encrypted YAML File

    + +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/110_Init_Ronin_Vault.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/110_Init_Ronin_Vault.html new file mode 100644 index 0000000..b3803c4 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/110_Init_Ronin_Vault.html @@ -0,0 +1,124 @@ +
    +

    ANSIBLE VAULT: COMPLETE ENCRYPTED FILE

    +

    Encrypting an entire file with Ansible Vault.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    + ansible-vault create ronin_secret.yml
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +		    
    +
    + + + + +
    + + + +
    +

    ANSIBLE VAULT: COMPLETE ENCRYPTED FILE

    +

    Encrypting an entire file with Ansible Vault.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    + ansible-vault create ronin_secret.yml
    +
    +New Vault password: redhat
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +		    
    +
    + + + + +
    + + + + +
    +

    ANSIBLE VAULT: COMPLETE ENCRYPTED FILE

    +

    Encrypting an entire file with Ansible Vault.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    + ansible-vault create ronin_secret.yml
    +
    +New Vault password: 
    +Confirm New Vault password: 
    +
    + [WARNING]:  does not exist, creating...
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +		    
    +
    + + + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/120_Edit_Ronin_Vault.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/120_Edit_Ronin_Vault.html new file mode 100644 index 0000000..02e3fa4 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/120_Edit_Ronin_Vault.html @@ -0,0 +1,123 @@ +
    +

    ANSIBLE VAULT: COMPLETE ENCRYPTED FILE

    +

    Editing an encrypted file with Ansible Vault.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    + ansible-vault edit ronin_secret.yml 
    +
    +Vault password: redhat
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +		    
    +
    + + + + +
    + + +
    +

    ANSIBLE VAULT: COMPLETE ENCRYPTED FILE

    +

    Editing an encrypted file with Ansible Vault.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +~                                                                               
    +"/tmp/tmpQD4uZF.yml" 0L, 0C
    +
    + + + + +
    + + +
    +

    ANSIBLE VAULT: COMPLETE ENCRYPTED FILE

    +

    Editing an encrypted file with Ansible Vault.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +movie_title: Ronin
    +movie_phrase: 'Whenever there is any doubt, there is no doubt'
    +movie_actor: Robert De Niro
    +movie_character: Sam
    +~
    +~
    +~
    +~
    +~
    +~
    +~
    +~
    +~
    +~
    +~
    +~
    +~
    +~
    +~
    +~
    +:wq
    +
    + + + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/130_View_Ronin_Vault.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/130_View_Ronin_Vault.html new file mode 100644 index 0000000..b019dbe --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/130_View_Ronin_Vault.html @@ -0,0 +1,84 @@ +
    +

    ANSIBLE VAULT: COMPLETE ENCRYPTED FILE

    +

    Viewing an encrypted file with Ansible Vault without the password.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    + cat ronin_secret.yml 
    +
    +$ANSIBLE_VAULT;1.1;AES256
    +6133636166366435373766363736633438303039373635353234
    +6465306431313861616533653164373061323463356238323530
    +6363383735336661323964330a30303730383863656633613862
    +3339653531346137643334616664366532383233393438653965
    +633333326632363935663132326163306637626335643365630a
    +6530663965333137383162666538303062386539356366313237
    +633531363239
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    + + + + +
    + + + + +
    +

    ANSIBLE VAULT: COMPLETE ENCRYPTED FILE

    +

    Viewing an encrypted file with Ansible Vault with the password.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    + ansible-vault view ronin_secret.yml 
    +
    +Vault password:  redhat
    +
    +movie_title: Ronin
    +movie_phrase: 'Whenever there is any doubt, there is no doubt'
    +movie_actor: Robert De Niro
    +movie_character: Sam
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/140_Include_Ronin_Vars.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/140_Include_Ronin_Vars.html new file mode 100644 index 0000000..aa96ae0 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/140_Include_Ronin_Vars.html @@ -0,0 +1,86 @@ +
    +

    ANSIBLE VAULT: COMPLETE ENCRYPTED FILE

    +

    Including vaulted files in your playbook.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +---
    +- hosts: web
    +  become: yes
    +  vars:
    +    country_setting: France
    +    city_opening_scene: Montmartre
    +
    +  vars_files:
    +    - ronin_secret.yml
    +
    +  tasks:
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +
    +  - name: Verify correct index file is present
    +    template:
    +      src: templates/index.html.j2
    +      dest: /var/www/html/index.html
    +
    +
    +
    + + + + +
    + + + +
    +

    ANSIBLE VAULT: COMPLETE ENCRYPTED FILE

    +

    Including vaulted files in your playbook.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +---
    +- hosts: web
    +  become: yes
    +  vars:
    +    country_setting: France
    +    city_opening_scene: Montmartre
    +
    +  vars_files:          
    +    - ronin_secret.yml 
    +
    +  tasks:
    +  - name: Ensure httpd package is present
    +    yum:
    +      name: httpd
    +      state: latest
    +
    +  - name: Verify correct index file is present
    +    template:
    +      src: templates/index.html.j2
    +      dest: /var/www/html/index.html
    +
    +
    +
    + + + + +
    + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/200__Red_Slide_AnsibleVaultFonzie.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/200__Red_Slide_AnsibleVaultFonzie.html new file mode 100644 index 0000000..ead5e2d --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/200__Red_Slide_AnsibleVaultFonzie.html @@ -0,0 +1,17 @@ +
    + +
    +

    ANSIBLE VAULT:
    ENCRYPTED STRING EXAMPLE

    +

    (Encrypted Variables In A Plain File)


    + + +

    Encrypting Sensitive Variables Inside Your Playbooks With An Encrypted String

    + +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/210_Init_Fonzie_Vault.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/210_Init_Fonzie_Vault.html new file mode 100644 index 0000000..ca12661 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/210_Init_Fonzie_Vault.html @@ -0,0 +1,116 @@ +
    +

    ANSIBLE VAULT: ENCRYPTED VARIABLE

    +

    Use ansible-vault to create a new encrypted variable string.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    + ansible-vault encrypt_string 'Jumped The Shark' --name 'fonzie_stunt'
    +
    +		    
    +
    + +

    Here are the pieces

    +

    A breakdown of the command:

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +ansible-vault
    +     encrypt_string 
    +         'this phrase will be encrypted' 
    +             --name 'variable_name_of_encrypted_var'
    +
    +		    
    +
    + + + + +
    + + + +
    +

    ANSIBLE VAULT: ENCRYPTED VARIABLE

    +

    Use ansible-vault to create a new encrypted variable string.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    + ansible-vault encrypt_string 'Jumped The Shark' --name 'fonzie_stunt'
    +New Vault password: fonzie
    +Confirm New Vault password: fonzie
    +
    +fonzie_stunt: !vault |
    +          $ANSIBLE_VAULT;1.1;AES256
    +          35613061636564623138646464393731633534343261393532343263633763356364633366636633
    +          6430663339613762306235323035326663363665316234650a303434333462343731356632333136
    +          37393966336465386131666635333537636133313864383865303138303262343939326563373730
    +          3063376263303037340a356263333131626239316630653465313931353863623237666464613030
    +          37346537336430386265633239346566313466323764336234346361626666643334
    +
    +Encryption successful
    +
    + 
    +
    +
    +
    +
    +
    +
    +		    
    +
    + + + + +
    + + + + + + +
    +

    ANSIBLE VAULT: ENCRYPTED VARIABLE

    +

    The pipe separates the multi-line value from the variable name.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +
    +fonzie_stunt: !vault | 
    +    $ANSIBLE_VAULT;1.1;AES256
    +    35613061636564623138646464393731633534343261393532343263633763356364633366636633
    +    6430663339613762306235323035326663363665316234650a303434333462343731356632333136
    +    37393966336465386131666635333537636133313864383865303138303262343939326563373730
    +    3063376263303037340a356263333131626239316630653465313931353863623237666464613030
    +    37346537336430386265633239346566313466323764336234346361626666643334               
    +
    +
    +		    
    +
    + + + + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/220_View_Fonzie_Vault.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/220_View_Fonzie_Vault.html new file mode 100644 index 0000000..1c2cc2c --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/220_View_Fonzie_Vault.html @@ -0,0 +1,108 @@ +
    +

    ANSIBLE VAULT: ENCRYPTED VARIABLE

    +

    Including an encrypted variable in a plaintext playbook.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +---
    +- hosts: localhost
    +  connection: local
    +  vars:
    +    fonzie_show: Happy Days
    +    fonzie_transport: Motorcycle
    +    fonzie_stunt: !vault |
    +        $ANSIBLE_VAULT;1.1;AES256
    +        35613061636564623138646464393731633534343261393532343263633763356364633366636633
    +        6430663339613762306235323035326663363665316234650a303434333462343731356632333136
    +        37393966336465386131666635333537636133313864383865303138303262343939326563373730
    +        3063376263303037340a356263333131626239316630653465313931353863623237666464613030
    +        37346537336430386265633239346566313466323764336234346361626666643334               
    +    fonzie_office: Mens room at Arnold's Diner
    +    fonzie_jacket: Leather
    +    fonzie_phrase: Aaaaaaayyyyyyyy
    +
    +  tasks:
    +  - name: prove that we can read the variables within the encrypted string
    +    debug:
    +      msg: |
    +        Fonzie is a character from the TV show, {{ fonzie_show }}. He rides a {{ fonzie_transport }},
    +        wears a {{ fonzie_jacket }} jacket, and has an office in the {{ fonzie_office }}.
    +
    +        Fonzie is most famous for his stunt where he {{ fonzie_stunt }}, after which,
    +        he said {{ fonzie_phrase }}!
    +
    +
    +
    +
    + + + + +
    + + + + + + + + + + + + + +
    +

    ANSIBLE VAULT: ENCRYPTED VARIABLE

    +

    Including an encrypted variable in a plaintext playbook.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +---
    +- hosts: localhost
    +  connection: local
    +  vars:
    +    fonzie_show: Happy Days
    +    fonzie_transport: Motorcycle
    +    fonzie_stunt: !vault |                                                               
    +        $ANSIBLE_VAULT;1.1;AES256                                                        
    +        35613061636564623138646464393731633534343261393532343263633763356364633366636633 
    +        6430663339613762306235323035326663363665316234650a303434333462343731356632333136 
    +        37393966336465386131666635333537636133313864383865303138303262343939326563373730 
    +        3063376263303037340a356263333131626239316630653465313931353863623237666464613030 
    +        37346537336430386265633239346566313466323764336234346361626666643334                    
    +    fonzie_office: Mens room at Arnold's Diner
    +    fonzie_jacket: Leather
    +    fonzie_phrase: Aaaaaaayyyyyyyy
    +
    +  tasks:
    +  - name: prove that we can read the variables within the encrypted string
    +    debug:
    +      msg: |
    +        Fonzie is a character from the TV show, {{ fonzie_show }}. He rides a {{ fonzie_transport }},
    +        wears a {{ fonzie_jacket }} jacket, and has an office in the {{ fonzie_office }}.
    +
    +        Fonzie is most famous for his stunt where he {{ fonzie_stunt }}, after which,
    +        he said {{ fonzie_phrase }}!
    +
    +
    +
    +
    + + + + +
    + \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/230_View_Fonzie_Vault_Playbook.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/230_View_Fonzie_Vault_Playbook.html new file mode 100644 index 0000000..0868653 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/230_View_Fonzie_Vault_Playbook.html @@ -0,0 +1,46 @@ +
    +

    ANSIBLE VAULT: ENCRYPTED VARIABLE

    +

    Including an encrypted variable in a plaintext playbook.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +
    + ansible-playbook --ask-vault-pass fonzie_playbook.yml 
    +
    +Vault password: 
    +
    +PLAY [localhost] *************************************************************************************************************
    +
    +TASK [Gathering Facts] *******************************************************************************************************
    +ok: [localhost]
    +
    +TASK [prove that we can read the variable within the encrypted string] *******************************************************
    +ok: [localhost] => {
    +    "msg": "Fonzie is a character from the TV show, Happy Days. He rides a Motorcycle,
    +    wears a Leather jacket, and has an office in the Mens room at Arnold's Diner.
    +    
    +    Fonzie is most famous for his stunt where he Jumped The Shark, after which, 
    +    he said Aaaaaaayyyyyyyy!"
    +}
    +
    +PLAY RECAP *******************************************************************************************************************
    +localhost                  : ok=2    changed=0    unreachable=0    failed=0   
    +
    +
    +
    +
    +
    + + + + +
    + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/_labs/00_Dynamic_Lab_Placeholder.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/_labs/00_Dynamic_Lab_Placeholder.html new file mode 100644 index 0000000..5a00762 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/110_Ansible_Vault/_labs/00_Dynamic_Lab_Placeholder.html @@ -0,0 +1,15 @@ +
    +

    Lab Sample 01

    + +

    This is sample page #1 that discusses things about and is meant strictly as a lab placeholder to show dynamic content.

    +
    +
    +

    Lab Sample 02

    + +

    This is sample page #2 that discusses things about and is meant strictly as a lab placeholder to show dynamic content.

    +
    +
    +

    Lab Sample 03

    + +

    This is sample page #3 that discusses things about and is meant strictly as a lab placeholder to show dynamic content.

    +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/000_RedIntro-Roles.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/000_RedIntro-Roles.html new file mode 100644 index 0000000..d9e0bbc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/000_RedIntro-Roles.html @@ -0,0 +1,13 @@ +
    +

    USING ROLES:

    + + +
  • Improves readability and maintainability of complex plays
  • +
  • Eases sharing, reuse and standardization of automation processes
  • +
  • Enables Ansible content to exist independently of playbooks, projects -- even organizations
  • +
  • Provides functional conveniences such as file path resolution and default values
  • +
    + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/200__AnatomyOfRoles.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/200__AnatomyOfRoles.html new file mode 100644 index 0000000..50f9525 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/200__AnatomyOfRoles.html @@ -0,0 +1,363 @@ +
    +

    ROLES ANATOMY

    + +
    +
    +
    Ansible Terminal
    +
    +
    + tree example_role/
    +
    +example_role/
    +├── defaults
    +│   └── main.yml
    +├── files
    +├── handlers
    +│   └── main.yml
    +├── meta
    +│   └── main.yml
    +├── README.md
    +├── tasks
    +│   └── main.yml
    +├── templates
    +├── tests
    +│   ├── inventory
    +│   └── test.yml
    +└── vars
    +    └── main.yml
    +		
    +
    + + +
    + + +
    +

    MAIN ROLE DIRECTORY

    + +
    +
    +
    Ansible Terminal
    +
    +
    + tree example_role/
    +
    +example_role/
    +├── defaults
    +│   └── main.yml
    +├── files
    +├── handlers
    +│   └── main.yml
    +├── meta
    +│   └── main.yml
    +├── README.md
    +├── tasks
    +│   └── main.yml
    +├── templates
    +├── tests
    +│   ├── inventory
    +│   └── test.yml
    +└── vars
    +    └── main.yml
    +		
    +
    + + +
    + + + + + + +
    +

    DEFAULTS AND VARIABLES

    + +
    +
    +
    Ansible Terminal
    +
    +
    + tree example_role/
    +
    +example_role/
    +├── defaults
    +│   └── main.yml
    +├── files
    +├── handlers
    +│   └── main.yml
    +├── meta
    +│   └── main.yml
    +├── README.md
    +├── tasks
    +│   └── main.yml
    +├── templates
    +├── tests
    +│   ├── inventory
    +│   └── test.yml
    +└── vars
    +    └── main.yml
    +		
    +
    + + +
    + + + + + + + +
    +

    FILES DIRECTORY

    + +
    +
    +
    Ansible Terminal
    +
    +
    + tree example_role/
    +
    +example_role/
    +├── defaults
    +│   └── main.yml
    +├── files
    +├── handlers
    +│   └── main.yml
    +├── meta
    +│   └── main.yml
    +├── README.md
    +├── tasks
    +│   └── main.yml
    +├── templates
    +├── tests
    +│   ├── inventory
    +│   └── test.yml
    +└── vars
    +    └── main.yml
    +		
    +
    + + +
    + + + + + + +
    +

    HANDLERS DIRECTORY

    + +
    +
    +
    Ansible Terminal
    +
    +
    + tree example_role/
    +
    +example_role/
    +├── defaults
    +│   └── main.yml
    +├── files
    +├── handlers
    +│   └── main.yml
    +├── meta
    +│   └── main.yml
    +├── README.md
    +├── tasks
    +│   └── main.yml
    +├── templates
    +├── tests
    +│   ├── inventory
    +│   └── test.yml
    +└── vars
    +    └── main.yml
    +		
    +
    + + +
    + + + + + + +
    +

    METADATA AND README

    + +
    +
    +
    Ansible Terminal
    +
    +
    + tree example_role/
    +
    +example_role/
    +├── defaults
    +│   └── main.yml
    +├── files
    +├── handlers
    +│   └── main.yml
    +├── meta
    +│   └── main.yml
    +├── README.md
    +├── tasks
    +│   └── main.yml
    +├── templates
    +├── tests
    +│   ├── inventory
    +│   └── test.yml
    +└── vars
    +    └── main.yml
    +		
    +
    + + +
    + + + + + + + + +
    +

    TASKS DIRECTORY

    + +
    +
    +
    Ansible Terminal
    +
    +
    + tree example_role/
    +
    +example_role/
    +├── defaults
    +│   └── main.yml
    +├── files
    +├── handlers
    +│   └── main.yml
    +├── meta
    +│   └── main.yml
    +├── README.md
    +├── tasks
    +│   └── main.yml
    +├── templates
    +├── tests
    +│   ├── inventory
    +│   └── test.yml
    +└── vars
    +    └── main.yml
    +		
    +
    + + +
    + + + + + + + + + +
    +

    TEMPLATES DIRECTORY

    + +
    +
    +
    Ansible Terminal
    +
    +
    + tree example_role/
    +
    +example_role/
    +├── defaults
    +│   └── main.yml
    +├── files
    +├── handlers
    +│   └── main.yml
    +├── meta
    +│   └── main.yml
    +├── README.md
    +├── tasks
    +│   └── main.yml
    +├── templates
    +├── tests
    +│   ├── inventory
    +│   └── test.yml
    +└── vars
    +    └── main.yml
    +		
    +
    + + +
    + + + + + + + + + +
    +

    TESTS DIRECTORY

    + +
    +
    +
    Ansible Terminal
    +
    +
    + tree example_role/
    +
    +example_role/
    +├── defaults
    +│   └── main.yml
    +├── files
    +├── handlers
    +│   └── main.yml
    +├── meta
    +│   └── main.yml
    +├── README.md
    +├── tasks
    +│   └── main.yml
    +├── templates
    +├── tests
    +│   ├── inventory
    +│   └── test.yml
    +└── vars
    +    └── main.yml
    +		
    +
    + + +
    + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/300__InitializingARole.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/300__InitializingARole.html new file mode 100644 index 0000000..6759b27 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/300__InitializingARole.html @@ -0,0 +1,25 @@ +
    +

    INITIALIZING A ROLE

    +

    ansible-galaxy command to init a new role:

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    + ansible-galaxy init example_role
    +
    +- example_role was created successfully
    +
    + 
    +
    +
    +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/400__Roles_Structure.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/400__Roles_Structure.html new file mode 100644 index 0000000..1226bab --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/400__Roles_Structure.html @@ -0,0 +1,41 @@ +
    +

    INTRODUCTION TO ROLES

    +

    Project with Embedded Roles Example:

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    + tree example_role/
    +
    +example_role/
    +├── defaults
    +│   └── main.yml
    +├── files
    +├── handlers
    +│   └── main.yml
    +├── meta
    +│   └── main.yml
    +├── README.md
    +├── tasks
    +│   └── main.yml
    +├── templates
    +├── tests
    +│   ├── inventory
    +│   └── test.yml
    +└── vars
    +    └── main.yml
    +
    +
    +
    +
    +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/500__Project_with_Embedded_Roles_Example.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/500__Project_with_Embedded_Roles_Example.html new file mode 100644 index 0000000..7555a46 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/500__Project_with_Embedded_Roles_Example.html @@ -0,0 +1,79 @@ +
    +

    INTRODUCTION TO ROLES

    +

    Project with Embedded Roles Example:

    +
    + + + + + + +
    +
    +
    +
    Ansible Terminal
    +
    +
    +Some directories omitted for readability
    +├── site.yml
    +└── roles
    +    ├── apache_role
    +    │   ├── defaults
    +    │   │   └── main.yml
    +    │   ├── files
    +    │   │   └── index.html
    +    │   ├── handlers 
    +    │   │   └── main.yml
    +    │   ├── tasks
    +    │   │   └── main.yml
    +    │   ├── templates
    +    │   │   └── http.conf.j2
    +    │   └── vars
    +    │       └── main.yml
    +    └── security_standards
    +        ├── defaults
    +        │   └── main.yml
    +        ├── files
    +        │   ├── sshd_conf
    +        │   └── sudoers
    +        ├── handlers
    +        │   └── main.yml
    +        ├── tasks
    +        │   └── main.yml
    +        ├── templates
    +        │   ├── etc_issue
    +        │   └── etc_motd
    +        └── vars
    +            └── main.yml    
    +
    +
    +
    +
    +
    Some Interesting Files To Consider
    +
    +
    +apache_role
    +    tasks/main.yml
    +    defaults/main.yml
    +    files/index.html
    +    templates/http.conf.j2
    +    vars/main.yml
    + 
    +security_standards
    +    tasks/main.yml
    +    defaults/main.yml
    +    files/sshd_conf
    +    files/sudoers
    +    templates/etc_issue
    +    templates/etc_motd
    +    vars/main.yml
    +    
    +
    +
    + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/600__Project_with_Embedded_Roles_Example.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/600__Project_with_Embedded_Roles_Example.html new file mode 100644 index 0000000..a3a4c52 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/600__Project_with_Embedded_Roles_Example.html @@ -0,0 +1,28 @@ +
    +

    PLAYBOOKS WITH ROLES

    +

    See how simple it is to of a playbook with embedded roles.

    +
    + +
    +
    +
    Ansible Terminal
    +
    +
    +# site.yml
    +---
    +- name: Apply some roles to our web servers
    +  hosts: web
    +  roles:
    +     - security_standards
    +     - apache_role
    +
    +    
    +
    + + + +
    + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/_100__Roles.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/_100__Roles.html new file mode 100644 index 0000000..d33fa12 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/_100__Roles.html @@ -0,0 +1,21 @@ +
    +

    USING ROLES:
    A More Practical Playbook

    +

    Roles are a packages of closely related Ansible content that can be shared more easily than plays alone.

    +
      +
    • Improves readability and maintainability of complex plays
    • +
    • Eases sharing, reuse and standardization of automation processes
    • +
    • Enables Ansible content to exist independently of playbooks, projects -- even organizations
    • +
    • Provides functional conveniences such as file path resolution and default values
    • +
    + + + + +
    + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/_labs/00_Dynamic_Lab_Placeholder.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/_labs/00_Dynamic_Lab_Placeholder.html new file mode 100644 index 0000000..5a00762 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/120_Ansible_Roles/_labs/00_Dynamic_Lab_Placeholder.html @@ -0,0 +1,15 @@ +
    +

    Lab Sample 01

    + +

    This is sample page #1 that discusses things about and is meant strictly as a lab placeholder to show dynamic content.

    +
    +
    +

    Lab Sample 02

    + +

    This is sample page #2 that discusses things about and is meant strictly as a lab placeholder to show dynamic content.

    +
    +
    +

    Lab Sample 03

    + +

    This is sample page #3 that discusses things about and is meant strictly as a lab placeholder to show dynamic content.

    +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/130_Role_Sharing_And_Distribution/000_RedIntro-Roles.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/130_Role_Sharing_And_Distribution/000_RedIntro-Roles.html new file mode 100644 index 0000000..c607fba --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/130_Role_Sharing_And_Distribution/000_RedIntro-Roles.html @@ -0,0 +1,16 @@ +
    +

    SHARING YOUR CONTENT WITH OTHER TEAMS:

    + + +
  • Publishing a catalog of roles
  • +
  • Creating best practices for use of roles
  • +
  • Roles as collaboration
  • +
  • Style guide
  • +
  • Code review
  • +
  • Galaxy or GIT?
  • +
  • requirements.yml and including roles
  • +
    + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/130_Role_Sharing_And_Distribution/700_AnsibleGalaxy.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/130_Role_Sharing_And_Distribution/700_AnsibleGalaxy.html new file mode 100644 index 0000000..eb05928 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/130_Role_Sharing_And_Distribution/700_AnsibleGalaxy.html @@ -0,0 +1,16 @@ +
    + +
    +
    +

    Ansible Galaxy

    +

    http://galaxy.ansible.com

    +

    Ansible Galaxy is a hub for finding, reusing and sharing Ansible content.

    +

    Jump-start your automation project with content contributed and reviewed by the Ansible community.

    +
    +
    + + + +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/900_Closing/000_Closing_Next_Steps.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/900_Closing/000_Closing_Next_Steps.html new file mode 100644 index 0000000..8c6edff --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/html_slides/900_Closing/000_Closing_Next_Steps.html @@ -0,0 +1,8 @@ +
    +

    Next Steps

    +
      +
    • It's easy to get started
      ansible.com/get-started
    • +
    • Join the Ansible community
      ansible.com/community
    • +
    • Would you like to learn a lot more?
      redhat.com/en/services/training/do407-automation-ansible
    • +
    +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/Ansible-Fest-Row-Background.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/Ansible-Fest-Row-Background.png new file mode 100644 index 0000000..8511569 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/Ansible-Fest-Row-Background.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/Ansible-Fest-Row-Background_GoKEV.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/Ansible-Fest-Row-Background_GoKEV.png new file mode 100644 index 0000000..2e6dfc6 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/Ansible-Fest-Row-Background_GoKEV.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleTowerFlow.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleTowerFlow.png new file mode 100644 index 0000000..64023e2 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleTowerFlow.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases.xcf b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases.xcf new file mode 100644 index 0000000..c97efd1 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases.xcf differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases00_Blank.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases00_Blank.png new file mode 100644 index 0000000..14aeb5e Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases00_Blank.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases01_ConfigMgmt.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases01_ConfigMgmt.png new file mode 100644 index 0000000..69be85c Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases01_ConfigMgmt.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases02_AppDeployment.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases02_AppDeployment.png new file mode 100644 index 0000000..873f773 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases02_AppDeployment.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases03_Provisioning.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases03_Provisioning.png new file mode 100644 index 0000000..16ff3d2 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases03_Provisioning.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases04_CICD.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases04_CICD.png new file mode 100644 index 0000000..e118d3f Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases04_CICD.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases05_SecurityCompliance.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases05_SecurityCompliance.png new file mode 100644 index 0000000..ddf1fa0 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases05_SecurityCompliance.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases06_Orchestration.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases06_Orchestration.png new file mode 100644 index 0000000..b5bed6e Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/AnsibleUseCases06_Orchestration.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/BatteriesNotIncluded.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/BatteriesNotIncluded.png new file mode 100644 index 0000000..8ffb196 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/BatteriesNotIncluded.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/GoKEV_Chrome_Color_Ansible.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/GoKEV_Chrome_Color_Ansible.png new file mode 100644 index 0000000..870171b Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/GoKEV_Chrome_Color_Ansible.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-Agentless.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-Agentless.png new file mode 100644 index 0000000..458168e Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-Agentless.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-All.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-All.png new file mode 100644 index 0000000..fe32140 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-All.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-None.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-None.png new file mode 100644 index 0000000..6f0e713 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-None.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-Powerful.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-Powerful.png new file mode 100644 index 0000000..d3a2715 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-Powerful.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-Simple.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-Simple.png new file mode 100644 index 0000000..57f93ff Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/SPA-Simple.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/Tower-3-Dashboard-1x.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/Tower-3-Dashboard-1x.png new file mode 100644 index 0000000..cecda05 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/Tower-3-Dashboard-1x.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansibe-up-and-running-ebook.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansibe-up-and-running-ebook.png new file mode 100644 index 0000000..9cebdb7 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansibe-up-and-running-ebook.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-automation-diagram.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-automation-diagram.svg new file mode 100644 index 0000000..5ff16be --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-automation-diagram.svg @@ -0,0 +1,778 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-logo.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-logo.png new file mode 100644 index 0000000..817c2dc Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-logo.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-logomark-red.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-logomark-red.svg new file mode 100644 index 0000000..780c244 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-logomark-red.svg @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-platform-overview.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-platform-overview.svg new file mode 100644 index 0000000..9672250 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-platform-overview.svg @@ -0,0 +1,962 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Ansible-Platform-Overview + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-tower-platform-diagram.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-tower-platform-diagram.svg new file mode 100644 index 0000000..988a69d --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-tower-platform-diagram.svg @@ -0,0 +1,705 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-tower-rbac.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-tower-rbac.svg new file mode 100644 index 0000000..ea8665f --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-tower-rbac.svg @@ -0,0 +1 @@ +Tower-RBAC \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-tower3-monitor-1x.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-tower3-monitor-1x.png new file mode 100644 index 0000000..0e18810 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-tower3-monitor-1x.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-use-case-diagram.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-use-case-diagram.svg new file mode 100644 index 0000000..96130ea --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-use-case-diagram.svg @@ -0,0 +1,4623 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-wordmark-white.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-wordmark-white.svg new file mode 100644 index 0000000..10fd49b --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible-wordmark-white.svg @@ -0,0 +1,27 @@ + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible_tagging.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible_tagging.png new file mode 100644 index 0000000..8597942 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/ansible_tagging.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/devops-language-diagram.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/devops-language-diagram.svg new file mode 100644 index 0000000..1ffc08d --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/devops-language-diagram.svg @@ -0,0 +1 @@ +DevOps-Language-Diagram \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/fiero.jpg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/fiero.jpg new file mode 100644 index 0000000..b66fac7 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/fiero.jpg differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/fussypants.jpg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/fussypants.jpg new file mode 100644 index 0000000..076070d Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/fussypants.jpg differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/galaxy-10996_1280.jpg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/galaxy-10996_1280.jpg new file mode 100644 index 0000000..7c697cb Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/galaxy-10996_1280.jpg differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-01.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-01.svg new file mode 100644 index 0000000..7998bb2 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-01.svg @@ -0,0 +1,235 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + How-Ansible-Works-Diagram + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-02.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-02.svg new file mode 100644 index 0000000..1ea1b04 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-02.svg @@ -0,0 +1,332 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + How-Ansible-Works-Diagram + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-03.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-03.svg new file mode 100644 index 0000000..bf3d46a --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-03.svg @@ -0,0 +1,356 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + How-Ansible-Works-Diagram + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-04.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-04.svg new file mode 100644 index 0000000..d0d5de7 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-04.svg @@ -0,0 +1,347 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + How-Ansible-Works-Diagram + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-05.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-05.svg new file mode 100644 index 0000000..2f9c43e --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-05.svg @@ -0,0 +1,336 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + How-Ansible-Works-Diagram + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-06.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-06.svg new file mode 100644 index 0000000..14bb4e2 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/how-ansible-works-diagram-06.svg @@ -0,0 +1,343 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + How-Ansible-Works-Diagram-06 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/httpd_conf_template.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/httpd_conf_template.png new file mode 100644 index 0000000..d9424e8 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/httpd_conf_template.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/index.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/index.php new file mode 100644 index 0000000..82677dc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/index.php @@ -0,0 +1,3 @@ + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/most_meetups.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/most_meetups.svg new file mode 100644 index 0000000..0b0a840 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/most_meetups.svg @@ -0,0 +1,4 @@ + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/most_searched.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/most_searched.svg new file mode 100644 index 0000000..402d1a9 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/most_searched.svg @@ -0,0 +1,4 @@ + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/passwordkey.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/passwordkey.png new file mode 100644 index 0000000..f7617b2 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/passwordkey.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/public-private-cloud.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/public-private-cloud.png new file mode 100644 index 0000000..cc5d39d Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/public-private-cloud.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/redhat-logo-rgb-default.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/redhat-logo-rgb-default.svg new file mode 100644 index 0000000..38c97d4 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/redhat-logo-rgb-default.svg @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/simple-powerful-agentless-diagram.svg b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/simple-powerful-agentless-diagram.svg new file mode 100644 index 0000000..e84b966 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/simple-powerful-agentless-diagram.svg @@ -0,0 +1 @@ +Simple-Powerful-Agentless-Diagram \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/activity-stream.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/activity-stream.png new file mode 100644 index 0000000..1c86e7e Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/activity-stream.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/external-logging.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/external-logging.png new file mode 100644 index 0000000..c54a1f9 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/external-logging.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/integrated-notifications.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/integrated-notifications.png new file mode 100644 index 0000000..1b48239 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/integrated-notifications.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/job-status-update.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/job-status-update.png new file mode 100644 index 0000000..9a66481 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/job-status-update.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/manage-track-inventory.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/manage-track-inventory.png new file mode 100644 index 0000000..6d7081c Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/manage-track-inventory.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/multi-playbook-workflows.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/multi-playbook-workflows.png new file mode 100644 index 0000000..f0e3c1c Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/multi-playbook-workflows.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/remote-command-execution.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/remote-command-execution.png new file mode 100644 index 0000000..17e6d04 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/remote-command-execution.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/schedule-jobs.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/schedule-jobs.png new file mode 100644 index 0000000..6e74dff Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/schedule-jobs.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/self-service-it.png b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/self-service-it.png new file mode 100644 index 0000000..93daa1c Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/images/tower/self-service-it.png differ diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/index.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/index.php new file mode 100644 index 0000000..ab57767 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/index.php @@ -0,0 +1,152 @@ + +
    +RUNNING IN DRY RUN with LAB LIMIT SET TO 
    +
    + 0 = No Restrictions		/deck-ansible/
    + 1 = Only Labs			/deck-ansible/?labs/
    + 2 = No Labs, only deck		/deck-ansible/?nolabs/
    +
    +######################################################
    +	 $htmldir){
    +	$pretty_htmldir = preg_replace("/^.*\/html_slides/","",$htmldir);
    +	$pretty_htmldir = preg_replace("/^.*\//","",$pretty_htmldir);
    +	$pretty_htmldir = preg_replace("/^[0-9]+_+/","",$pretty_htmldir);
    +	$pretty_htmldir = preg_replace("/^[0-9]+_+/","",$pretty_htmldir);
    +	$pretty_htmldir = preg_replace("/_+/"," ",$pretty_htmldir);
    +
    +	$html_files = explode("\n",shell_exec("find $htmldir -maxdepth 1 -type f -iname \"*html\" | sort"));
    +	$html_files = array_filter($html_files);
    +
    +	if (($pretty_htmldir) and (count($html_files))){
    +		if ($dry_run) print "\n#$key \"$pretty_htmldir\" contains " . count($html_files) . " slides\n\n";
    +		$labid = "LABS-" . preg_replace("/[^0-9a-zA-Z]+/","", $pretty_htmldir);
    +
    +		foreach( $html_files as $key => $htmlinc){
    +			if (($dry_run) and ($lab_limit != 1)) print "INCLUDE $htmlinc\n";
    +			$localdir = str_replace($html_dir . '/',"",$htmlinc);
    +			$localfile = preg_replace("/^.*\//","",$htmlinc);
    +
    +			if ((! $dry_run) and ($lab_limit != 1 )) {
    +				if ( (file_exists($htmlinc)) and (!preg_match("/^_/",$localdir)) and (!preg_match("/^_/",$localfile)) ) include($htmlinc);
    +			}
    +
    +		}
    +
    +		if ($lab_limit != 2){
    +			$lab_files = explode("\n",shell_exec("find $htmldir/labs -maxdepth 1 -type f -iname \"*html\" | sort"));
    +			$lab_files = array_filter($lab_files);
    +
    +			if (count($lab_files)){
    +				if ($dry_run){
    +					print "\nSTART-LAB-INCLUDE for \"$pretty_htmldir\"\n\n";
    +				}else{
    +					?>
    +	
    +
    +

    LABS:

    +

    Click down arrow to continune into the labs

    + +
    + $labinc){ + if ($dry_run){ + print "INCLUDE-LABS $labinc\n"; + }else{ + $labdir = str_replace($html_dir . '/',"",$labinc); + $labfile = preg_replace("/^.*\//","",$labinc); + + if ( (file_exists($labinc)) and (!preg_match("/^_/",$labdir)) and (!preg_match("/^_/",$labfile)) ) include($labinc); + + // print " $labinc\n"; + } + } + + if ($dry_run){ + print "\nEND-LAB-INCLUDE for \"$pretty_htmldir\"\n"; + }else{ + ?> +
    +

    LABS:
    Complete!

    +

    Click right to continune

    +
    + +
    + \n"; +if (! $dry_run) require_once("page_final.html"); + +?> + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/intro-to-ansible-tower.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/intro-to-ansible-tower.html new file mode 100644 index 0000000..759aefc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/intro-to-ansible-tower.html @@ -0,0 +1,539 @@ + + + + + + + Introduction to Ansible Tower + + + + + + + + + + + + + + + + +
    + + + + + + +
    +
    +
    +
    + +

    Introduction to Ansible Tower

    + +
    +
    +

    What You Will Learn

    +

    Red Hat® Ansible® Tower helps you scale IT automation, manage complex deployments and speed productivity. Centralize and control your IT infrastructure with a visual dashboard, role-based access control, job scheduling and graphical inventory management.


    +
      +
    • What is Ansible Tower
    • +
    • How Ansible Tower Works
    • +
    • Installing Ansible Tower
    • +
    • Key Features
    • +
    + +
    +
    +

    What is Ansible Tower?

    +

    Ansible Tower is an enterprise framework for controlling, securing and managing your Ansible automation – with a UI and RESTful API


    +
      +
    • Role-based access control keeps environments secure, and teams efficient
    • +
    • Non-privileged users can safely deploy entire applications with push-button deployment access
    • +
    • All Ansible automations are centrally logged, ensuring complete auditability and compliance
    • +
    +
    +
    + + +
    +
    +

    Platform Overview

    + + +
    +
    +

    Installing Ansible Tower

    +
    
    +# the most common and preferred way of
    +# installation for RHEL (Preferred) or Ubuntu
    +$ wget https://bit.ly/ansibletower
    +
    +# bundled installer can be downloaded for
    +# RHEL (and select derivatives) at
    +$ wget https://bit.ly/ansibletowerbundle
    +
    +# looking for a specific version? navigate to http://releases.ansible.com/ansible-tower
    +# to see all the versions available for download
    +          
    + +
    +
    +

    Server Requirements

    +
      +
    • Red Hat Enterprise Linux (RHEL) 7 (and select derivatives), Ubuntu 14.04 64-bit, and Ubuntu 16.04 LTS 64-bit support required (kernel and runtime).
    • +
    • A currently supported version of Mozilla Firefox or Google Chrome.
    • +
    • 2 GB RAM minimum (4+ GB RAM highly recommended)
    • +
    • 20 GB of dedicated hard disk space
    • +
    + +
    +
    +
    +

    Demo Time:
    Installing Ansible Tower

    + +
    +
    +

    Workshop:
    Installing Ansible Tower

    + +
    +
    +
    +

    Key Features of Ansible Tower

    +
      +
    • Dashboard and User Interface
    • +
    • User Base -- Organizations, Teams & Users
    • +
    • Credentials
    • +
    • Inventories
    • +
    • Projects
    • +
    • Job Templates & Jobs
    • +
    • Role Based Access Control (RBAC)
    • +
    + +
    +
    +

    Dashboard and User Interface

    +
    + +
    +
    +

    User Base


    +

    A user is an account to access Ansible Tower and its services given the permissions granted to it.

    +

    An organization is a logical collection of users, teams, projects, inventories and more. All entities belong to an organization with the exception of users.

    +

    Teams provide a means to implement role-based access control schemes and delegate responsibilities across organizations.

    + +
    +
    +

    Credentials

    +

    Credentials are utilized by Ansible Tower for authentication with various external resources:

    +
      +
    • Connecting to remote machines to run jobs
    • +
    • Syncing with inventory sources
    • +
    • Importing project content from version control systems
    • +
    • Connecting to and managing networking devices +
    +

    Centralized management of various credentials allows end users to leverage a secret without ever exposing that secret to them.

    + +
    +
    +

    Inventory

    +

    Inventory is a collection of hosts (nodes) with associated data and groupings that Ansible Tower can connect to and manage.

    +
      +
    • Hosts (nodes)
    • +
    • Groups
    • +
    • Inventory-specific data (variables)
    • +
    • Static or dynamic sources
    • +
    + +
    +
    +

    Projects

    +

    A Project is a logical collection of Ansible Playbooks, represented in Ansible Tower.

    +

    You can manage Playbooks and Playbook directories by placing them in a source code management system supported by Ansible Tower, including Git, Subversion, and Mercurial. +

    +
    +
    +

    Job Templates

    +

    A job template is a definition and set of parameters for running an Ansible Playbook.

    Job templates are useful to execute the same job many times and encourage the reuse of Ansible Playbook content and collaboration between teams.

    + +
    +
    +

    Jobs

    +

    A job is an instance of Ansible Tower launching an Ansible Playbook against an inventory of hosts.

    +
      +
    • Job results can be easily viewed
    • +
    • View the standard out for a more in-depth look
    • +
    + +
    +
    +

    Role Based Access Control (RBAC)

    +

    Role-Based Access Controls (RBAC) are built into Ansible Tower and allow administrators to delegate access to server inventories, organizations, and more. These controls allow Ansible Tower to help you increase security and streamline management of your Ansible automation.

    + +
    +
    +

    Role Based Access Control (RBAC)


    + +
    +
    +
    +

    Demo Time:
    Ansible Tower Basic Setup
    & Job Run

    + +
    +
    +

    Workshop:
    Ansible Tower Basic Setup
    & Your First Job Run

    + +
    +
    +
    +
    +

    Dynamic Inventory in Ansible Tower

    +

    Dynamic inventory is a script that queries a service, like a cloud provider API or a management application. This data is formatted in an Ansible-specific JSON data structure and is used in lieu of static inventory files.

    +
      +
    • Groups are generated based on host metadata
    • +
    • Single source of truth saves time, avoids duplication and reduces human error
    • +
    • Dynamic and static inventory sources can be used together
    • +
    + +
    +
    +

    Demo:
    Ansible Tower Dynamic Inventory

    + +
    +
    +
    +
    +

    More with Ansible Tower

    +
      +
    • Job Status Updates
    • +
    • Activity Stream
    • +
    • Integrated Notifications
    • +
    • Schedule Jobs
    • +
    • Manage and Track Your Inventory
    • +
    • Self Service IT (User Surveys)
    • +
    • Remote Command Execution
    • +
    • External Logging
    • +
    • Multi-Playbook Workflows
    • +
    + +
    + + +
    +

    Job Status Update

    +
    +
    +

    Heads-up NOC-style automation dashboard displays everything going on in your Ansible environment.

    +
    +
    + +
    +
    + +
    + + +
    +

    Activity Stream

    +
    +
    + +
    +
    +

    Securely stores every Job that runs, and enables you to view them later, or export details through Ansible Tower’s API.

    +
    +
    + +
    + + +
    +

    Integrated Notifications

    +
    +
    +

    Stay informed of your automation status via integrated notifications. Connect Slack, Hipchat, SMS, email and more.

    +
    +
    + +
    +
    + +
    + + +
    +

    Schedule Jobs

    +
    +
    + +
    +
    +

    Enables you to any Job now, later, or forever.

    +
    +
    + +
    + + +
    +

    Manage and Track Your Inventory

    +
    +
    +

    Ansible Tower’s inventory syncing and provisioning callbacks allow nodes to request configuration on demand, enabling auto-scaling.

    +
    +
    + +
    +
    + +
    + + +
    +

    Self Service IT

    +
    +
    + +
    +
    +

    Ansible Tower lets you launch Playbooks with just a single click. It can prompt you for variables, let you choose from available secure credentials and monitor the resulting deployments.

    +
    +
    + +
    + + +
    +

    Remote Command Execution

    +
    +
    +

    Run simple tasks on any hosts with Ansible Tower's remote command execution. Add users or groups, reset passwords, restart a malfunctioning service or patch a critical security issue, quickly.

    +
    +
    + +
    +
    + +
    + + +
    +

    External Logging

    +
    +
    + +
    +
    +

    Connect Ansible Tower to your external logging and analytics provider to perform analysis of automation and event correlation across your entire environment.

    +
    +
    + +
    + + +
    +

    Multi-Playbook Workflows

    +
    +
    +

    Ansible Tower’s multi-Playbook workflows chains any number of Playbooks together to create a single workflow. Different Jobs can be run depending on success or failure of the prior Playbook.

    +
    +
    + +
    +
    + +
    + + +
    +
    +

    Next Steps

    +
      +
    • It’s easy to get started
      ansible.com/get-started
    • +
    • Try Ansible Tower for free:
      ansible.com/tower-trial
    • +
    • Would you like to learn a lot more?
      redhat.com/en/services/training/do409-automation-ansible-ii-ansible-tower
    • +
    +
    +
    +
    + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/GoKEV.js b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/GoKEV.js new file mode 100644 index 0000000..00c87c5 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/GoKEV.js @@ -0,0 +1,42 @@ +/* Scrolling Terminal Window Function + +Originally discovered via google search and available here + +https://www.sitepoint.com/community/t/auto-scrolling-a-div-with-overflow-scroll-auto/2291/2 +*/ + +function scrollDiv_init(scrollingWindow,ScrollRate) { + DivElmnt = document.getElementById(scrollingWindow,ScrollRate); + ReachedMaxScroll = false; + + DivElmnt.scrollTop = 0; + PreviousScrollTop = 0; + + ScrollInterval = setInterval('scrollDiv()', ScrollRate); +} + +function scrollDiv() { + + if (!ReachedMaxScroll) { + DivElmnt.scrollTop = PreviousScrollTop; + PreviousScrollTop++; + + ReachedMaxScroll = DivElmnt.scrollTop >= (DivElmnt.scrollHeight - DivElmnt.offsetHeight); + } + else { + ReachedMaxScroll = (DivElmnt.scrollTop == 0)?false:true; + + DivElmnt.scrollTop = PreviousScrollTop; + PreviousScrollTop--; + } +} + +function pauseDiv() { + clearInterval(ScrollInterval); +} + +function resumeDiv() { + PreviousScrollTop = DivElmnt.scrollTop; + ScrollInterval = setInterval('scrollDiv()', ScrollRate); +} + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/head.min.js b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/head.min.js new file mode 100644 index 0000000..d9f87ba --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/head.min.js @@ -0,0 +1,9 @@ +/*! head.core - v1.0.2 */ +(function(n,t){"use strict";function r(n){a[a.length]=n}function k(n){var t=new RegExp(" ?\\b"+n+"\\b");c.className=c.className.replace(t,"")}function p(n,t){for(var i=0,r=n.length;in?(i.screensCss.gt&&r("gt-"+n),i.screensCss.gte&&r("gte-"+n)):tt);u.feature("landscape",fe?(i.browserCss.gt&&r("gt-"+f+e),i.browserCss.gte&&r("gte-"+f+e)):h2&&this[u+1]!==t)u&&r(this.slice(u,u+1).join("-").toLowerCase()+i.section);else{var f=n||"index",e=f.indexOf(".");e>0&&(f=f.substring(0,e));c.id=f.toLowerCase()+i.page;u||r("root"+i.section)}});u.screen={height:n.screen.height,width:n.screen.width};tt();b=0;n.addEventListener?n.addEventListener("resize",it,!1):n.attachEvent("onresize",it)})(window); +/*! head.css3 - v1.0.0 */ +(function(n,t){"use strict";function a(n){for(var r in n)if(i[n[r]]!==t)return!0;return!1}function r(n){var t=n.charAt(0).toUpperCase()+n.substr(1),i=(n+" "+c.join(t+" ")+t).split(" ");return!!a(i)}var h=n.document,o=h.createElement("i"),i=o.style,s=" -o- -moz- -ms- -webkit- -khtml- ".split(" "),c="Webkit Moz O ms Khtml".split(" "),l=n.head_conf&&n.head_conf.head||"head",u=n[l],f={gradient:function(){var n="background-image:";return i.cssText=(n+s.join("gradient(linear,left top,right bottom,from(#9f9),to(#fff));"+n)+s.join("linear-gradient(left top,#eee,#fff);"+n)).slice(0,-n.length),!!i.backgroundImage},rgba:function(){return i.cssText="background-color:rgba(0,0,0,0.5)",!!i.backgroundColor},opacity:function(){return o.style.opacity===""},textshadow:function(){return i.textShadow===""},multiplebgs:function(){i.cssText="background:url(https://),url(https://),red url(https://)";var n=(i.background||"").match(/url/g);return Object.prototype.toString.call(n)==="[object Array]"&&n.length===3},boxshadow:function(){return r("boxShadow")},borderimage:function(){return r("borderImage")},borderradius:function(){return r("borderRadius")},cssreflections:function(){return r("boxReflect")},csstransforms:function(){return r("transform")},csstransitions:function(){return r("transition")},touch:function(){return"ontouchstart"in n},retina:function(){return n.devicePixelRatio>1},fontface:function(){var t=u.browser.name,n=u.browser.version;switch(t){case"ie":return n>=9;case"chrome":return n>=13;case"ff":return n>=6;case"ios":return n>=5;case"android":return!1;case"webkit":return n>=5.1;case"opera":return n>=10;default:return!1}}};for(var e in f)f[e]&&u.feature(e,f[e].call(),!0);u.feature()})(window); +/*! head.load - v1.0.3 */ +(function(n,t){"use strict";function w(){}function u(n,t){if(n){typeof n=="object"&&(n=[].slice.call(n));for(var i=0,r=n.length;i' ); + + var leadingWs = text.match( /^\n?(\s*)/ )[1].length, + leadingTabs = text.match( /^\n?(\t*)/ )[1].length; + + if( leadingTabs > 0 ) { + text = text.replace( new RegExp('\\n?\\t{' + leadingTabs + '}','g'), '\n' ); + } + else if( leadingWs > 1 ) { + text = text.replace( new RegExp('\\n? {' + leadingWs + '}', 'g'), '\n' ); + } + + return text; + + } + + /** + * Given a markdown slide section element, this will + * return all arguments that aren't related to markdown + * parsing. Used to forward any other user-defined arguments + * to the output markdown slide. + */ + function getForwardedAttributes( section ) { + + var attributes = section.attributes; + var result = []; + + for( var i = 0, len = attributes.length; i < len; i++ ) { + var name = attributes[i].name, + value = attributes[i].value; + + // disregard attributes that are used for markdown loading/parsing + if( /data\-(markdown|separator|vertical|notes)/gi.test( name ) ) continue; + + if( value ) { + result.push( name + '="' + value + '"' ); + } + else { + result.push( name ); + } + } + + return result.join( ' ' ); + + } + + /** + * Inspects the given options and fills out default + * values for what's not defined. + */ + function getSlidifyOptions( options ) { + + options = options || {}; + options.separator = options.separator || DEFAULT_SLIDE_SEPARATOR; + options.notesSeparator = options.notesSeparator || DEFAULT_NOTES_SEPARATOR; + options.attributes = options.attributes || ''; + + return options; + + } + + /** + * Helper function for constructing a markdown slide. + */ + function createMarkdownSlide( content, options ) { + + options = getSlidifyOptions( options ); + + var notesMatch = content.split( new RegExp( options.notesSeparator, 'mgi' ) ); + + if( notesMatch.length === 2 ) { + content = notesMatch[0] + ''; + } + + // prevent script end tags in the content from interfering + // with parsing + content = content.replace( /<\/script>/g, SCRIPT_END_PLACEHOLDER ); + + return ''; + + } + + /** + * Parses a data string into multiple slides based + * on the passed in separator arguments. + */ + function slidify( markdown, options ) { + + options = getSlidifyOptions( options ); + + var separatorRegex = new RegExp( options.separator + ( options.verticalSeparator ? '|' + options.verticalSeparator : '' ), 'mg' ), + horizontalSeparatorRegex = new RegExp( options.separator ); + + var matches, + lastIndex = 0, + isHorizontal, + wasHorizontal = true, + content, + sectionStack = []; + + // iterate until all blocks between separators are stacked up + while( matches = separatorRegex.exec( markdown ) ) { + notes = null; + + // determine direction (horizontal by default) + isHorizontal = horizontalSeparatorRegex.test( matches[0] ); + + if( !isHorizontal && wasHorizontal ) { + // create vertical stack + sectionStack.push( [] ); + } + + // pluck slide content from markdown input + content = markdown.substring( lastIndex, matches.index ); + + if( isHorizontal && wasHorizontal ) { + // add to horizontal stack + sectionStack.push( content ); + } + else { + // add to vertical stack + sectionStack[sectionStack.length-1].push( content ); + } + + lastIndex = separatorRegex.lastIndex; + wasHorizontal = isHorizontal; + } + + // add the remaining slide + ( wasHorizontal ? sectionStack : sectionStack[sectionStack.length-1] ).push( markdown.substring( lastIndex ) ); + + var markdownSections = ''; + + // flatten the hierarchical stack, and insert
    tags + for( var i = 0, len = sectionStack.length; i < len; i++ ) { + // vertical + if( sectionStack[i] instanceof Array ) { + markdownSections += '
    '; + + sectionStack[i].forEach( function( child ) { + markdownSections += '
    ' + createMarkdownSlide( child, options ) + '
    '; + } ); + + markdownSections += '
    '; + } + else { + markdownSections += '
    ' + createMarkdownSlide( sectionStack[i], options ) + '
    '; + } + } + + return markdownSections; + + } + + /** + * Parses any current data-markdown slides, splits + * multi-slide markdown into separate sections and + * handles loading of external markdown. + */ + function processSlides() { + + var sections = document.querySelectorAll( '[data-markdown]'), + section; + + for( var i = 0, len = sections.length; i < len; i++ ) { + + section = sections[i]; + + if( section.getAttribute( 'data-markdown' ).length ) { + + var xhr = new XMLHttpRequest(), + url = section.getAttribute( 'data-markdown' ); + + datacharset = section.getAttribute( 'data-charset' ); + + // see https://developer.mozilla.org/en-US/docs/Web/API/element.getAttribute#Notes + if( datacharset != null && datacharset != '' ) { + xhr.overrideMimeType( 'text/html; charset=' + datacharset ); + } + + xhr.onreadystatechange = function() { + if( xhr.readyState === 4 ) { + // file protocol yields status code 0 (useful for local debug, mobile applications etc.) + if ( ( xhr.status >= 200 && xhr.status < 300 ) || xhr.status === 0 ) { + + section.outerHTML = slidify( xhr.responseText, { + separator: section.getAttribute( 'data-separator' ), + verticalSeparator: section.getAttribute( 'data-separator-vertical' ), + notesSeparator: section.getAttribute( 'data-separator-notes' ), + attributes: getForwardedAttributes( section ) + }); + + } + else { + + section.outerHTML = '
    ' + + 'ERROR: The attempt to fetch ' + url + ' failed with HTTP status ' + xhr.status + '.' + + 'Check your browser\'s JavaScript console for more details.' + + '

    Remember that you need to serve the presentation HTML from a HTTP server.

    ' + + '
    '; + + } + } + }; + + xhr.open( 'GET', url, false ); + + try { + xhr.send(); + } + catch ( e ) { + alert( 'Failed to get the Markdown file ' + url + '. Make sure that the presentation and the file are served by a HTTP server and the file can be found there. ' + e ); + } + + } + else if( section.getAttribute( 'data-separator' ) || section.getAttribute( 'data-separator-vertical' ) || section.getAttribute( 'data-separator-notes' ) ) { + + section.outerHTML = slidify( getMarkdownFromSlide( section ), { + separator: section.getAttribute( 'data-separator' ), + verticalSeparator: section.getAttribute( 'data-separator-vertical' ), + notesSeparator: section.getAttribute( 'data-separator-notes' ), + attributes: getForwardedAttributes( section ) + }); + + } + else { + section.innerHTML = createMarkdownSlide( getMarkdownFromSlide( section ) ); + } + } + + } + + /** + * Check if a node value has the attributes pattern. + * If yes, extract it and add that value as one or several attributes + * the the terget element. + * + * You need Cache Killer on Chrome to see the effect on any FOM transformation + * directly on refresh (F5) + * http://stackoverflow.com/questions/5690269/disabling-chrome-cache-for-website-development/7000899#answer-11786277 + */ + function addAttributeInElement( node, elementTarget, separator ) { + + var mardownClassesInElementsRegex = new RegExp( separator, 'mg' ); + var mardownClassRegex = new RegExp( "([^\"= ]+?)=\"([^\"=]+?)\"", 'mg' ); + var nodeValue = node.nodeValue; + if( matches = mardownClassesInElementsRegex.exec( nodeValue ) ) { + + var classes = matches[1]; + nodeValue = nodeValue.substring( 0, matches.index ) + nodeValue.substring( mardownClassesInElementsRegex.lastIndex ); + node.nodeValue = nodeValue; + while( matchesClass = mardownClassRegex.exec( classes ) ) { + elementTarget.setAttribute( matchesClass[1], matchesClass[2] ); + } + return true; + } + return false; + } + + /** + * Add attributes to the parent element of a text node, + * or the element of an attribute node. + */ + function addAttributes( section, element, previousElement, separatorElementAttributes, separatorSectionAttributes ) { + + if ( element != null && element.childNodes != undefined && element.childNodes.length > 0 ) { + previousParentElement = element; + for( var i = 0; i < element.childNodes.length; i++ ) { + childElement = element.childNodes[i]; + if ( i > 0 ) { + j = i - 1; + while ( j >= 0 ) { + aPreviousChildElement = element.childNodes[j]; + if ( typeof aPreviousChildElement.setAttribute == 'function' && aPreviousChildElement.tagName != "BR" ) { + previousParentElement = aPreviousChildElement; + break; + } + j = j - 1; + } + } + parentSection = section; + if( childElement.nodeName == "section" ) { + parentSection = childElement ; + previousParentElement = childElement ; + } + if ( typeof childElement.setAttribute == 'function' || childElement.nodeType == Node.COMMENT_NODE ) { + addAttributes( parentSection, childElement, previousParentElement, separatorElementAttributes, separatorSectionAttributes ); + } + } + } + + if ( element.nodeType == Node.COMMENT_NODE ) { + if ( addAttributeInElement( element, previousElement, separatorElementAttributes ) == false ) { + addAttributeInElement( element, section, separatorSectionAttributes ); + } + } + } + + /** + * Converts any current data-markdown slides in the + * DOM to HTML. + */ + function convertSlides() { + + var sections = document.querySelectorAll( '[data-markdown]'); + + for( var i = 0, len = sections.length; i < len; i++ ) { + + var section = sections[i]; + + // Only parse the same slide once + if( !section.getAttribute( 'data-markdown-parsed' ) ) { + + section.setAttribute( 'data-markdown-parsed', true ) + + var notes = section.querySelector( 'aside.notes' ); + var markdown = getMarkdownFromSlide( section ); + + section.innerHTML = marked( markdown ); + addAttributes( section, section, null, section.getAttribute( 'data-element-attributes' ) || + section.parentNode.getAttribute( 'data-element-attributes' ) || + DEFAULT_ELEMENT_ATTRIBUTES_SEPARATOR, + section.getAttribute( 'data-attributes' ) || + section.parentNode.getAttribute( 'data-attributes' ) || + DEFAULT_SLIDE_ATTRIBUTES_SEPARATOR); + + // If there were notes, we need to re-add them after + // having overwritten the section's HTML + if( notes ) { + section.appendChild( notes ); + } + + } + + } + + } + + // API + return { + + initialize: function() { + if( typeof marked === 'undefined' ) { + throw 'The reveal.js Markdown plugin requires marked to be loaded'; + } + + if( typeof hljs !== 'undefined' ) { + marked.setOptions({ + highlight: function( code, lang ) { + return hljs.highlightAuto( code, [lang] ).value; + } + }); + } + + var options = Reveal.getConfig().markdown; + + if ( options ) { + marked.setOptions( options ); + } + + processSlides(); + convertSlides(); + }, + + // TODO: Do these belong in the API? + processSlides: processSlides, + convertSlides: convertSlides, + slidify: slidify + + }; + +})); diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/marked.js b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/marked.js new file mode 100644 index 0000000..555c1dc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/marked.js @@ -0,0 +1,6 @@ +/** + * marked - a markdown parser + * Copyright (c) 2011-2014, Christopher Jeffrey. (MIT Licensed) + * https://github.com/chjj/marked + */ +(function(){var block={newline:/^\n+/,code:/^( {4}[^\n]+\n*)+/,fences:noop,hr:/^( *[-*_]){3,} *(?:\n+|$)/,heading:/^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)/,nptable:noop,lheading:/^([^\n]+)\n *(=|-){2,} *(?:\n+|$)/,blockquote:/^( *>[^\n]+(\n(?!def)[^\n]+)*\n*)+/,list:/^( *)(bull) [\s\S]+?(?:hr|def|\n{2,}(?! )(?!\1bull )\n*|\s*$)/,html:/^ *(?:comment *(?:\n|\s*$)|closed *(?:\n{2,}|\s*$)|closing *(?:\n{2,}|\s*$))/,def:/^ *\[([^\]]+)\]: *]+)>?(?: +["(]([^\n]+)[")])? *(?:\n+|$)/,table:noop,paragraph:/^((?:[^\n]+\n?(?!hr|heading|lheading|blockquote|tag|def))+)\n*/,text:/^[^\n]+/};block.bullet=/(?:[*+-]|\d+\.)/;block.item=/^( *)(bull) [^\n]*(?:\n(?!\1bull )[^\n]*)*/;block.item=replace(block.item,"gm")(/bull/g,block.bullet)();block.list=replace(block.list)(/bull/g,block.bullet)("hr","\\n+(?=\\1?(?:[-*_] *){3,}(?:\\n+|$))")("def","\\n+(?="+block.def.source+")")();block.blockquote=replace(block.blockquote)("def",block.def)();block._tag="(?!(?:"+"a|em|strong|small|s|cite|q|dfn|abbr|data|time|code"+"|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo"+"|span|br|wbr|ins|del|img)\\b)\\w+(?!:/|[^\\w\\s@]*@)\\b";block.html=replace(block.html)("comment",//)("closed",/<(tag)[\s\S]+?<\/\1>/)("closing",/])*?>/)(/tag/g,block._tag)();block.paragraph=replace(block.paragraph)("hr",block.hr)("heading",block.heading)("lheading",block.lheading)("blockquote",block.blockquote)("tag","<"+block._tag)("def",block.def)();block.normal=merge({},block);block.gfm=merge({},block.normal,{fences:/^ *(`{3,}|~{3,})[ \.]*(\S+)? *\n([\s\S]*?)\s*\1 *(?:\n+|$)/,paragraph:/^/,heading:/^ *(#{1,6}) +([^\n]+?) *#* *(?:\n+|$)/});block.gfm.paragraph=replace(block.paragraph)("(?!","(?!"+block.gfm.fences.source.replace("\\1","\\2")+"|"+block.list.source.replace("\\1","\\3")+"|")();block.tables=merge({},block.gfm,{nptable:/^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*/,table:/^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*/});function Lexer(options){this.tokens=[];this.tokens.links={};this.options=options||marked.defaults;this.rules=block.normal;if(this.options.gfm){if(this.options.tables){this.rules=block.tables}else{this.rules=block.gfm}}}Lexer.rules=block;Lexer.lex=function(src,options){var lexer=new Lexer(options);return lexer.lex(src)};Lexer.prototype.lex=function(src){src=src.replace(/\r\n|\r/g,"\n").replace(/\t/g," ").replace(/\u00a0/g," ").replace(/\u2424/g,"\n");return this.token(src,true)};Lexer.prototype.token=function(src,top,bq){var src=src.replace(/^ +$/gm,""),next,loose,cap,bull,b,item,space,i,l;while(src){if(cap=this.rules.newline.exec(src)){src=src.substring(cap[0].length);if(cap[0].length>1){this.tokens.push({type:"space"})}}if(cap=this.rules.code.exec(src)){src=src.substring(cap[0].length);cap=cap[0].replace(/^ {4}/gm,"");this.tokens.push({type:"code",text:!this.options.pedantic?cap.replace(/\n+$/,""):cap});continue}if(cap=this.rules.fences.exec(src)){src=src.substring(cap[0].length);this.tokens.push({type:"code",lang:cap[2],text:cap[3]||""});continue}if(cap=this.rules.heading.exec(src)){src=src.substring(cap[0].length);this.tokens.push({type:"heading",depth:cap[1].length,text:cap[2]});continue}if(top&&(cap=this.rules.nptable.exec(src))){src=src.substring(cap[0].length);item={type:"table",header:cap[1].replace(/^ *| *\| *$/g,"").split(/ *\| */),align:cap[2].replace(/^ *|\| *$/g,"").split(/ *\| */),cells:cap[3].replace(/\n$/,"").split("\n")};for(i=0;i ?/gm,"");this.token(cap,top,true);this.tokens.push({type:"blockquote_end"});continue}if(cap=this.rules.list.exec(src)){src=src.substring(cap[0].length);bull=cap[2];this.tokens.push({type:"list_start",ordered:bull.length>1});cap=cap[0].match(this.rules.item);next=false;l=cap.length;i=0;for(;i1&&b.length>1)){src=cap.slice(i+1).join("\n")+src;i=l-1}}loose=next||/\n\n(?!\s*$)/.test(item);if(i!==l-1){next=item.charAt(item.length-1)==="\n";if(!loose)loose=next}this.tokens.push({type:loose?"loose_item_start":"list_item_start"});this.token(item,false,bq);this.tokens.push({type:"list_item_end"})}this.tokens.push({type:"list_end"});continue}if(cap=this.rules.html.exec(src)){src=src.substring(cap[0].length);this.tokens.push({type:this.options.sanitize?"paragraph":"html",pre:!this.options.sanitizer&&(cap[1]==="pre"||cap[1]==="script"||cap[1]==="style"),text:cap[0]});continue}if(!bq&&top&&(cap=this.rules.def.exec(src))){src=src.substring(cap[0].length);this.tokens.links[cap[1].toLowerCase()]={href:cap[2],title:cap[3]};continue}if(top&&(cap=this.rules.table.exec(src))){src=src.substring(cap[0].length);item={type:"table",header:cap[1].replace(/^ *| *\| *$/g,"").split(/ *\| */),align:cap[2].replace(/^ *|\| *$/g,"").split(/ *\| */),cells:cap[3].replace(/(?: *\| *)?\n$/,"").split("\n")};for(i=0;i])/,autolink:/^<([^ >]+(@|:\/)[^ >]+)>/,url:noop,tag:/^|^<\/?\w+(?:"[^"]*"|'[^']*'|[^'">])*?>/,link:/^!?\[(inside)\]\(href\)/,reflink:/^!?\[(inside)\]\s*\[([^\]]*)\]/,nolink:/^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]/,strong:/^__([\s\S]+?)__(?!_)|^\*\*([\s\S]+?)\*\*(?!\*)/,em:/^\b_((?:[^_]|__)+?)_\b|^\*((?:\*\*|[\s\S])+?)\*(?!\*)/,code:/^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)/,br:/^ {2,}\n(?!\s*$)/,del:noop,text:/^[\s\S]+?(?=[\\?(?:\s+['"]([\s\S]*?)['"])?\s*/;inline.link=replace(inline.link)("inside",inline._inside)("href",inline._href)();inline.reflink=replace(inline.reflink)("inside",inline._inside)();inline.normal=merge({},inline);inline.pedantic=merge({},inline.normal,{strong:/^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/,em:/^_(?=\S)([\s\S]*?\S)_(?!_)|^\*(?=\S)([\s\S]*?\S)\*(?!\*)/});inline.gfm=merge({},inline.normal,{escape:replace(inline.escape)("])","~|])")(),url:/^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])/,del:/^~~(?=\S)([\s\S]*?\S)~~/,text:replace(inline.text)("]|","~]|")("|","|https?://|")()});inline.breaks=merge({},inline.gfm,{br:replace(inline.br)("{2,}","*")(),text:replace(inline.gfm.text)("{2,}","*")()});function InlineLexer(links,options){this.options=options||marked.defaults;this.links=links;this.rules=inline.normal;this.renderer=this.options.renderer||new Renderer;this.renderer.options=this.options;if(!this.links){throw new Error("Tokens array requires a `links` property.")}if(this.options.gfm){if(this.options.breaks){this.rules=inline.breaks}else{this.rules=inline.gfm}}else if(this.options.pedantic){this.rules=inline.pedantic}}InlineLexer.rules=inline;InlineLexer.output=function(src,links,options){var inline=new InlineLexer(links,options);return inline.output(src)};InlineLexer.prototype.output=function(src){var out="",link,text,href,cap;while(src){if(cap=this.rules.escape.exec(src)){src=src.substring(cap[0].length);out+=cap[1];continue}if(cap=this.rules.autolink.exec(src)){src=src.substring(cap[0].length);if(cap[2]==="@"){text=cap[1].charAt(6)===":"?this.mangle(cap[1].substring(7)):this.mangle(cap[1]);href=this.mangle("mailto:")+text}else{text=escape(cap[1]);href=text}out+=this.renderer.link(href,null,text);continue}if(!this.inLink&&(cap=this.rules.url.exec(src))){src=src.substring(cap[0].length);text=escape(cap[1]);href=text;out+=this.renderer.link(href,null,text);continue}if(cap=this.rules.tag.exec(src)){if(!this.inLink&&/^/i.test(cap[0])){this.inLink=false}src=src.substring(cap[0].length);out+=this.options.sanitize?this.options.sanitizer?this.options.sanitizer(cap[0]):escape(cap[0]):cap[0];continue}if(cap=this.rules.link.exec(src)){src=src.substring(cap[0].length);this.inLink=true;out+=this.outputLink(cap,{href:cap[2],title:cap[3]});this.inLink=false;continue}if((cap=this.rules.reflink.exec(src))||(cap=this.rules.nolink.exec(src))){src=src.substring(cap[0].length);link=(cap[2]||cap[1]).replace(/\s+/g," ");link=this.links[link.toLowerCase()];if(!link||!link.href){out+=cap[0].charAt(0);src=cap[0].substring(1)+src;continue}this.inLink=true;out+=this.outputLink(cap,link);this.inLink=false;continue}if(cap=this.rules.strong.exec(src)){src=src.substring(cap[0].length);out+=this.renderer.strong(this.output(cap[2]||cap[1]));continue}if(cap=this.rules.em.exec(src)){src=src.substring(cap[0].length);out+=this.renderer.em(this.output(cap[2]||cap[1]));continue}if(cap=this.rules.code.exec(src)){src=src.substring(cap[0].length);out+=this.renderer.codespan(escape(cap[2],true));continue}if(cap=this.rules.br.exec(src)){src=src.substring(cap[0].length);out+=this.renderer.br();continue}if(cap=this.rules.del.exec(src)){src=src.substring(cap[0].length);out+=this.renderer.del(this.output(cap[1]));continue}if(cap=this.rules.text.exec(src)){src=src.substring(cap[0].length);out+=this.renderer.text(escape(this.smartypants(cap[0])));continue}if(src){throw new Error("Infinite loop on byte: "+src.charCodeAt(0))}}return out};InlineLexer.prototype.outputLink=function(cap,link){var href=escape(link.href),title=link.title?escape(link.title):null;return cap[0].charAt(0)!=="!"?this.renderer.link(href,title,this.output(cap[1])):this.renderer.image(href,title,escape(cap[1]))};InlineLexer.prototype.smartypants=function(text){if(!this.options.smartypants)return text;return text.replace(/---/g,"—").replace(/--/g,"–").replace(/(^|[-\u2014/(\[{"\s])'/g,"$1‘").replace(/'/g,"’").replace(/(^|[-\u2014/(\[{\u2018\s])"/g,"$1“").replace(/"/g,"”").replace(/\.{3}/g,"…")};InlineLexer.prototype.mangle=function(text){if(!this.options.mangle)return text;var out="",l=text.length,i=0,ch;for(;i.5){ch="x"+ch.toString(16)}out+="&#"+ch+";"}return out};function Renderer(options){this.options=options||{}}Renderer.prototype.code=function(code,lang,escaped){if(this.options.highlight){var out=this.options.highlight(code,lang);if(out!=null&&out!==code){escaped=true;code=out}}if(!lang){return"
    "+(escaped?code:escape(code,true))+"\n
    "}return'
    '+(escaped?code:escape(code,true))+"\n
    \n"};Renderer.prototype.blockquote=function(quote){return"
    \n"+quote+"
    \n"};Renderer.prototype.html=function(html){return html};Renderer.prototype.heading=function(text,level,raw){return"'+text+"\n"};Renderer.prototype.hr=function(){return this.options.xhtml?"
    \n":"
    \n"};Renderer.prototype.list=function(body,ordered){var type=ordered?"ol":"ul";return"<"+type+">\n"+body+"\n"};Renderer.prototype.listitem=function(text){return"
  • "+text+"
  • \n"};Renderer.prototype.paragraph=function(text){return"

    "+text+"

    \n"};Renderer.prototype.table=function(header,body){return"\n"+"\n"+header+"\n"+"\n"+body+"\n"+"
    \n"};Renderer.prototype.tablerow=function(content){return"\n"+content+"\n"};Renderer.prototype.tablecell=function(content,flags){var type=flags.header?"th":"td";var tag=flags.align?"<"+type+' style="text-align:'+flags.align+'">':"<"+type+">";return tag+content+"\n"};Renderer.prototype.strong=function(text){return""+text+""};Renderer.prototype.em=function(text){return""+text+""};Renderer.prototype.codespan=function(text){return""+text+""};Renderer.prototype.br=function(){return this.options.xhtml?"
    ":"
    "};Renderer.prototype.del=function(text){return""+text+""};Renderer.prototype.link=function(href,title,text){if(this.options.sanitize){try{var prot=decodeURIComponent(unescape(href)).replace(/[^\w:]/g,"").toLowerCase()}catch(e){return""}if(prot.indexOf("javascript:")===0||prot.indexOf("vbscript:")===0){return""}}var out='
    ";return out};Renderer.prototype.image=function(href,title,text){var out=''+text+'":">";return out};Renderer.prototype.text=function(text){return text};function Parser(options){this.tokens=[];this.token=null;this.options=options||marked.defaults;this.options.renderer=this.options.renderer||new Renderer;this.renderer=this.options.renderer;this.renderer.options=this.options}Parser.parse=function(src,options,renderer){var parser=new Parser(options,renderer);return parser.parse(src)};Parser.prototype.parse=function(src){this.inline=new InlineLexer(src.links,this.options,this.renderer);this.tokens=src.reverse();var out="";while(this.next()){out+=this.tok()}return out};Parser.prototype.next=function(){return this.token=this.tokens.pop()};Parser.prototype.peek=function(){return this.tokens[this.tokens.length-1]||0};Parser.prototype.parseText=function(){var body=this.token.text;while(this.peek().type==="text"){body+="\n"+this.next().text}return this.inline.output(body)};Parser.prototype.tok=function(){switch(this.token.type){case"space":{return""}case"hr":{return this.renderer.hr()}case"heading":{return this.renderer.heading(this.inline.output(this.token.text),this.token.depth,this.token.text)}case"code":{return this.renderer.code(this.token.text,this.token.lang,this.token.escaped)}case"table":{var header="",body="",i,row,cell,flags,j;cell="";for(i=0;i/g,">").replace(/"/g,""").replace(/'/g,"'")}function unescape(html){return html.replace(/&([#\w]+);/g,function(_,n){n=n.toLowerCase();if(n==="colon")return":";if(n.charAt(0)==="#"){return n.charAt(1)==="x"?String.fromCharCode(parseInt(n.substring(2),16)):String.fromCharCode(+n.substring(1))}return""})}function replace(regex,opt){regex=regex.source;opt=opt||"";return function self(name,val){if(!name)return new RegExp(regex,opt);val=val.source||val;val=val.replace(/(^|[^\[])\^/g,"$1");regex=regex.replace(name,val);return self}}function noop(){}noop.exec=noop;function merge(obj){var i=1,target,key;for(;iAn error occured:

    "+escape(e.message+"",true)+"
    "}throw e}}marked.options=marked.setOptions=function(opt){merge(marked.defaults,opt);return marked};marked.defaults={gfm:true,tables:true,breaks:false,pedantic:false,sanitize:false,sanitizer:null,mangle:true,smartLists:false,silent:false,highlight:null,langPrefix:"lang-",smartypants:false,headerPrefix:"",renderer:new Renderer,xhtml:false};marked.Parser=Parser;marked.parser=Parser.parse;marked.Renderer=Renderer;marked.Lexer=Lexer;marked.lexer=Lexer.lex;marked.InlineLexer=InlineLexer;marked.inlineLexer=InlineLexer.output;marked.parse=marked;if(typeof module!=="undefined"&&typeof exports==="object"){module.exports=marked}else if(typeof define==="function"&&define.amd){define(function(){return marked})}else{this.marked=marked}}).call(function(){return this||(typeof window!=="undefined"?window:global)}()); \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/notes.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/notes.html new file mode 100644 index 0000000..4fda869 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/notes.html @@ -0,0 +1,609 @@ + + + + + + reveal.js - Slide Notes + + + + + + +
    +
    Upcoming
    +
    +
    +

    Time Click to Reset

    +
    + 0:00 AM +
    +
    + 00:00:00 +
    +
    +
    + + +
    +
    + + +
    + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/notes.js b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/notes.js new file mode 100644 index 0000000..46bf5de --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/notes.js @@ -0,0 +1,145 @@ +/** + * Handles opening of and synchronization with the reveal.js + * notes window. + * + * Handshake process: + * 1. This window posts 'connect' to notes window + * - Includes URL of presentation to show + * 2. Notes window responds with 'connected' when it is available + * 3. This window proceeds to send the current presentation state + * to the notes window + */ +var RevealNotes = (function() { + + function openNotes( notesFilePath ) { + + if( !notesFilePath ) { + var jsFileLocation = document.querySelector('script[src$="notes.js"]').src; // this js file path + jsFileLocation = jsFileLocation.replace(/notes\.js(\?.*)?$/, ''); // the js folder path + notesFilePath = jsFileLocation + 'notes.html'; + } + + var notesPopup = window.open( notesFilePath, 'reveal.js - Notes', 'width=1100,height=700' ); + + /** + * Connect to the notes window through a postmessage handshake. + * Using postmessage enables us to work in situations where the + * origins differ, such as a presentation being opened from the + * file system. + */ + function connect() { + // Keep trying to connect until we get a 'connected' message back + var connectInterval = setInterval( function() { + notesPopup.postMessage( JSON.stringify( { + namespace: 'reveal-notes', + type: 'connect', + url: window.location.protocol + '//' + window.location.host + window.location.pathname + window.location.search, + state: Reveal.getState() + } ), '*' ); + }, 500 ); + + window.addEventListener( 'message', function( event ) { + var data = JSON.parse( event.data ); + if( data && data.namespace === 'reveal-notes' && data.type === 'connected' ) { + clearInterval( connectInterval ); + onConnected(); + } + } ); + } + + /** + * Posts the current slide data to the notes window + */ + function post(event) { + + var slideElement = Reveal.getCurrentSlide(), + notesElement = slideElement.querySelector( 'aside.notes' ); + + var messageData = { + namespace: 'reveal-notes', + type: 'state', + notes: '', + markdown: false, + whitespace: 'normal', + state: Reveal.getState() + }; + + // Look for notes defined in a fragment, if it is a fragmentshown event + if (event && event.hasOwnProperty('fragment')) { + var innerNotes = event.fragment.querySelector( 'aside.notes' ); + + if ( innerNotes) { + notesElement = innerNotes; + } + } + + // Look for notes defined in a slide attribute + if( slideElement.hasAttribute( 'data-notes' ) ) { + messageData.notes = slideElement.getAttribute( 'data-notes' ); + messageData.whitespace = 'pre-wrap'; + } + + // Look for notes defined in an aside element + if( notesElement ) { + messageData.notes = notesElement.innerHTML; + messageData.markdown = typeof notesElement.getAttribute( 'data-markdown' ) === 'string'; + } + + notesPopup.postMessage( JSON.stringify( messageData ), '*' ); + + } + + /** + * Called once we have established a connection to the notes + * window. + */ + function onConnected() { + + // Monitor events that trigger a change in state + Reveal.addEventListener( 'slidechanged', post ); + Reveal.addEventListener( 'fragmentshown', post ); + Reveal.addEventListener( 'fragmenthidden', post ); + Reveal.addEventListener( 'overviewhidden', post ); + Reveal.addEventListener( 'overviewshown', post ); + Reveal.addEventListener( 'paused', post ); + Reveal.addEventListener( 'resumed', post ); + + // Post the initial state + post(); + + } + + connect(); + + } + + if( !/receiver/i.test( window.location.search ) ) { + + // If the there's a 'notes' query set, open directly + if( window.location.search.match( /(\?|\&)notes/gi ) !== null ) { + openNotes(); + } + + // Open the notes when the 's' key is hit + document.addEventListener( 'keydown', function( event ) { + // Disregard the event if the target is editable or a + // modifier is present + if ( document.querySelector( ':focus' ) !== null || event.shiftKey || event.altKey || event.ctrlKey || event.metaKey ) return; + + // Disregard the event if keyboard is disabled + if ( Reveal.getConfig().keyboard === false ) return; + + if( event.keyCode === 83 ) { + event.preventDefault(); + openNotes(); + } + }, false ); + + // Show our keyboard shortcut in the reveal.js help overlay + if( window.Reveal ) Reveal.registerKeyboardShortcut( 'S', 'Speaker notes view' ); + + } + + return { open: openNotes }; + +})(); diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/prism-yaml.min.js b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/prism-yaml.min.js new file mode 100644 index 0000000..f5c9e72 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/prism-yaml.min.js @@ -0,0 +1 @@ +Prism.languages.yaml={scalar:{pattern:/([\-:]\s*(![^\s]+)?[ \t]*[|>])[ \t]*(?:((?:\r?\n|\r)[ \t]+)[^\r\n]+(?:\3[^\r\n]+)*)/,lookbehind:!0,alias:"string"},comment:/#.*/,key:{pattern:/(\s*(?:^|[:\-,[{\r\n?])[ \t]*(![^\s]+)?[ \t]*)[^\r\n{[\]},#\s]+?(?=\s*:\s)/,lookbehind:!0,alias:"atrule"},directive:{pattern:/(^[ \t]*)%.+/m,lookbehind:!0,alias:"important"},datetime:{pattern:/([:\-,[{]\s*(![^\s]+)?[ \t]*)(\d{4}-\d\d?-\d\d?([tT]|[ \t]+)\d\d?:\d{2}:\d{2}(\.\d*)?[ \t]*(Z|[-+]\d\d?(:\d{2})?)?|\d{4}-\d{2}-\d{2}|\d\d?:\d{2}(:\d{2}(\.\d*)?)?)(?=[ \t]*($|,|]|}))/m,lookbehind:!0,alias:"number"},"boolean":{pattern:/([:\-,[{]\s*(![^\s]+)?[ \t]*)(true|false)[ \t]*(?=$|,|]|})/im,lookbehind:!0,alias:"important"},"null":{pattern:/([:\-,[{]\s*(![^\s]+)?[ \t]*)(null|~)[ \t]*(?=$|,|]|})/im,lookbehind:!0,alias:"important"},string:{pattern:/([:\-,[{]\s*(![^\s]+)?[ \t]*)("(?:[^"\\]|\\.)*"|'(?:[^'\\]|\\.)*')(?=[ \t]*($|,|]|}))/m,lookbehind:!0},number:{pattern:/([:\-,[{]\s*(![^\s]+)?[ \t]*)[+\-]?(0x[\da-f]+|0o[0-7]+|(\d+\.?\d*|\.?\d+)(e[\+\-]?\d+)?|\.inf|\.nan)[ \t]*(?=$|,|]|})/im,lookbehind:!0},tag:/![^\s]+/,important:/[&*][\w]+/,punctuation:/---|[:[\]{}\-,|>?]|\.\.\./}; \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/prism.min.js b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/prism.min.js new file mode 100644 index 0000000..32cb868 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/prism.min.js @@ -0,0 +1,2 @@ +var _self="undefined"!=typeof window?window:"undefined"!=typeof WorkerGlobalScope&&self instanceof WorkerGlobalScope?self:{},Prism=function(){var e=/\blang(?:uage)?-(\w+)\b/i,t=0,a=_self.Prism={util:{encode:function(e){return e instanceof n?new n(e.type,a.util.encode(e.content),e.alias):"Array"===a.util.type(e)?e.map(a.util.encode):e.replace(/&/g,"&").replace(/e.length)break e;if(!(v instanceof r)){g.lastIndex=0;var w=g.exec(v),k=1;if(!w&&d&&y!=i.length-1){if(g.lastIndex=b,w=g.exec(e),!w)break;for(var P=w.index+(p?w[1].length:0),x=w.index+w[0].length,A=y,j=b,_=i.length;A<_&&j=j&&(++y,b=j);if(i[y]instanceof r||i[A-1].greedy)continue;k=A-y,v=e.slice(b,j),w.index-=b}if(w){p&&(m=w[1].length);var P=w.index+m,w=w[0].slice(m),x=P+w.length,C=v.slice(0,P),E=v.slice(x),N=[y,k];C&&N.push(C);var S=new r(l,c?a.tokenize(w,c):w,h,w,d);N.push(S),E&&N.push(E),Array.prototype.splice.apply(i,N)}}}}}return i},hooks:{all:{},add:function(e,t){var n=a.hooks.all;n[e]=n[e]||[],n[e].push(t)},run:function(e,t){var n=a.hooks.all[e];if(n&&n.length)for(var r,i=0;r=n[i++];)r(t)}}},n=a.Token=function(e,t,a,n,r){this.type=e,this.content=t,this.alias=a,this.length=0|(n||"").length,this.greedy=!!r};if(n.stringify=function(e,t,r){if("string"==typeof e)return e;if("Array"===a.util.type(e))return e.map(function(a){return n.stringify(a,t,e)}).join("");var i={type:e.type,content:n.stringify(e.content,t,r),tag:"span",classes:["token",e.type],attributes:{},language:t,parent:r};if("comment"==i.type&&(i.attributes.spellcheck="true"),e.alias){var s="Array"===a.util.type(e.alias)?e.alias:[e.alias];Array.prototype.push.apply(i.classes,s)}a.hooks.run("wrap",i);var l=Object.keys(i.attributes).map(function(e){return e+'="'+(i.attributes[e]||"").replace(/"/g,""")+'"'}).join(" ");return"<"+i.tag+' class="'+i.classes.join(" ")+'"'+(l?" "+l:"")+">"+i.content+""},!_self.document)return _self.addEventListener?(_self.addEventListener("message",function(e){var t=JSON.parse(e.data),n=t.language,r=t.code,i=t.immediateClose;_self.postMessage(a.highlight(r,a.languages[n],n)),i&&_self.close()},!1),_self.Prism):_self.Prism;var r=document.currentScript||[].slice.call(document.getElementsByTagName("script")).pop();return r&&(a.filename=r.src,document.addEventListener&&!r.hasAttribute("data-manual")&&("loading"!==document.readyState?window.requestAnimationFrame?window.requestAnimationFrame(a.highlightAll):window.setTimeout(a.highlightAll,16):document.addEventListener("DOMContentLoaded",a.highlightAll))),_self.Prism}();"undefined"!=typeof module&&module.exports&&(module.exports=Prism),"undefined"!=typeof global&&(global.Prism=Prism),Prism.languages.markup={comment://,prolog:/<\?[\w\W]+?\?>/,doctype://i,cdata://i,tag:{pattern:/<\/?(?!\d)[^\s>\/=$<]+(?:\s+[^\s>\/=]+(?:=(?:("|')(?:\\\1|\\?(?!\1)[\w\W])*\1|[^\s'">=]+))?)*\s*\/?>/i,inside:{tag:{pattern:/^<\/?[^\s>\/]+/i,inside:{punctuation:/^<\/?/,namespace:/^[^\s>\/:]+:/}},"attr-value":{pattern:/=(?:('|")[\w\W]*?(\1)|[^\s>]+)/i,inside:{punctuation:/[=>"']/}},punctuation:/\/?>/,"attr-name":{pattern:/[^\s>\/]+/,inside:{namespace:/^[^\s>\/:]+:/}}}},entity:/&#?[\da-z]{1,8};/i},Prism.hooks.add("wrap",function(e){"entity"===e.type&&(e.attributes.title=e.content.replace(/&/,"&"))}),Prism.languages.xml=Prism.languages.markup,Prism.languages.html=Prism.languages.markup,Prism.languages.mathml=Prism.languages.markup,Prism.languages.svg=Prism.languages.markup,Prism.languages.css={comment:/\/\*[\w\W]*?\*\//,atrule:{pattern:/@[\w-]+?.*?(;|(?=\s*\{))/i,inside:{rule:/@[\w-]+/}},url:/url\((?:(["'])(\\(?:\r\n|[\w\W])|(?!\1)[^\\\r\n])*\1|.*?)\)/i,selector:/[^\{\}\s][^\{\};]*?(?=\s*\{)/,string:{pattern:/("|')(\\(?:\r\n|[\w\W])|(?!\1)[^\\\r\n])*\1/,greedy:!0},property:/(\b|\B)[\w-]+(?=\s*:)/i,important:/\B!important\b/i,function:/[-a-z0-9]+(?=\()/i,punctuation:/[(){};:]/},Prism.languages.css.atrule.inside.rest=Prism.util.clone(Prism.languages.css),Prism.languages.markup&&(Prism.languages.insertBefore("markup","tag",{style:{pattern:/()[\w\W]*?(?=<\/style>)/i,lookbehind:!0,inside:Prism.languages.css,alias:"language-css"}}),Prism.languages.insertBefore("inside","attr-value",{"style-attr":{pattern:/\s*style=("|').*?\1/i,inside:{"attr-name":{pattern:/^\s*style/i,inside:Prism.languages.markup.tag.inside},punctuation:/^\s*=\s*['"]|['"]\s*$/,"attr-value":{pattern:/.+/i,inside:Prism.languages.css}},alias:"language-css"}},Prism.languages.markup.tag)),Prism.languages.clike={comment:[{pattern:/(^|[^\\])\/\*[\w\W]*?\*\//,lookbehind:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0}],string:{pattern:/(["'])(\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},"class-name":{pattern:/((?:\b(?:class|interface|extends|implements|trait|instanceof|new)\s+)|(?:catch\s+\())[a-z0-9_\.\\]+/i,lookbehind:!0,inside:{punctuation:/(\.|\\)/}},keyword:/\b(if|else|while|do|for|return|in|instanceof|function|new|try|throw|catch|finally|null|break|continue)\b/,boolean:/\b(true|false)\b/,function:/[a-z0-9_]+(?=\()/i,number:/\b-?(?:0x[\da-f]+|\d*\.?\d+(?:e[+-]?\d+)?)\b/i,operator:/--?|\+\+?|!=?=?|<=?|>=?|==?=?|&&?|\|\|?|\?|\*|\/|~|\^|%/,punctuation:/[{}[\];(),.:]/},Prism.languages.javascript=Prism.languages.extend("clike",{keyword:/\b(as|async|await|break|case|catch|class|const|continue|debugger|default|delete|do|else|enum|export|extends|finally|for|from|function|get|if|implements|import|in|instanceof|interface|let|new|null|of|package|private|protected|public|return|set|static|super|switch|this|throw|try|typeof|var|void|while|with|yield)\b/,number:/\b-?(0x[\dA-Fa-f]+|0b[01]+|0o[0-7]+|\d*\.?\d+([Ee][+-]?\d+)?|NaN|Infinity)\b/,function:/[_$a-zA-Z\xA0-\uFFFF][_$a-zA-Z0-9\xA0-\uFFFF]*(?=\()/i,operator:/--?|\+\+?|!=?=?|<=?|>=?|==?=?|&&?|\|\|?|\?|\*\*?|\/|~|\^|%|\.{3}/}),Prism.languages.insertBefore("javascript","keyword",{regex:{pattern:/(^|[^\/])\/(?!\/)(\[.+?]|\\.|[^\/\\\r\n])+\/[gimyu]{0,5}(?=\s*($|[\r\n,.;})]))/,lookbehind:!0,greedy:!0}}),Prism.languages.insertBefore("javascript","string",{"template-string":{pattern:/`(?:\\\\|\\?[^\\])*?`/,greedy:!0,inside:{interpolation:{pattern:/\$\{[^}]+\}/,inside:{"interpolation-punctuation":{pattern:/^\$\{|\}$/,alias:"punctuation"},rest:Prism.languages.javascript}},string:/[\s\S]+/}}}),Prism.languages.markup&&Prism.languages.insertBefore("markup","tag",{script:{pattern:/()[\w\W]*?(?=<\/script>)/i,lookbehind:!0,inside:Prism.languages.javascript,alias:"language-javascript"}}),Prism.languages.js=Prism.languages.javascript,function(){"undefined"!=typeof self&&self.Prism&&self.document&&document.querySelector&&(self.Prism.fileHighlight=function(){var e={js:"javascript",py:"python",rb:"ruby",ps1:"powershell",psm1:"powershell",sh:"bash",bat:"batch",h:"c",tex:"latex"};Array.prototype.forEach&&Array.prototype.slice.call(document.querySelectorAll("pre[data-src]")).forEach(function(t){for(var a,n=t.getAttribute("data-src"),r=t,i=/\blang(?:uage)?-(?!\*)(\w+)\b/i;r&&!i.test(r.className);)r=r.parentNode;if(r&&(a=(t.className.match(i)||[,""])[1]),!a){var s=(n.match(/\.(\w+)$/)||[,""])[1];a=e[s]||s}var l=document.createElement("code");l.className="language-"+a,t.textContent="",l.textContent="Loading…",t.appendChild(l);var o=new XMLHttpRequest;o.open("GET",n,!0),o.onreadystatechange=function(){4==o.readyState&&(o.status<400&&o.responseText?(l.textContent=o.responseText,Prism.highlightElement(l)):o.status>=400?l.textContent="✖ Error "+o.status+" while fetching file: "+o.statusText:l.textContent="✖ Error: File does not exist or is empty")},o.send(null)})},document.addEventListener("DOMContentLoaded",self.Prism.fileHighlight))}(); +//# sourceMappingURL=prism.min.js.map \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/reveal.js b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/reveal.js new file mode 100644 index 0000000..9251dc0 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/js/reveal.js @@ -0,0 +1,4961 @@ +/*! + * reveal.js + * http://lab.hakim.se/reveal-js + * MIT licensed + * + * Copyright (C) 2016 Hakim El Hattab, http://hakim.se + */ +(function( root, factory ) { + if( typeof define === 'function' && define.amd ) { + // AMD. Register as an anonymous module. + define( function() { + root.Reveal = factory(); + return root.Reveal; + } ); + } else if( typeof exports === 'object' ) { + // Node. Does not work with strict CommonJS. + module.exports = factory(); + } else { + // Browser globals. + root.Reveal = factory(); + } +}( this, function() { + + 'use strict'; + + var Reveal; + + // The reveal.js version + var VERSION = '3.4.1'; + + var SLIDES_SELECTOR = '.slides section', + HORIZONTAL_SLIDES_SELECTOR = '.slides>section', + VERTICAL_SLIDES_SELECTOR = '.slides>section.present>section', + HOME_SLIDE_SELECTOR = '.slides>section:first-of-type', + UA = navigator.userAgent, + + // Configuration defaults, can be overridden at initialization time + config = { + + // The "normal" size of the presentation, aspect ratio will be preserved + // when the presentation is scaled to fit different resolutions + width: 960, + height: 700, + + // Factor of the display size that should remain empty around the content + margin: 0.04, + + // Bounds for smallest/largest possible scale to apply to content + minScale: 0.2, + maxScale: 2.0, + + // Display controls in the bottom right corner + controls: true, + + // Display a presentation progress bar + progress: true, + + // Display the page number of the current slide + slideNumber: false, + + // Push each slide change to the browser history + history: false, + + // Enable keyboard shortcuts for navigation + keyboard: true, + + // Optional function that blocks keyboard events when retuning false + keyboardCondition: null, + + // Enable the slide overview mode + overview: true, + + // Vertical centering of slides + center: true, + + // Enables touch navigation on devices with touch input + touch: true, + + // Loop the presentation + loop: false, + + // Change the presentation direction to be RTL + rtl: false, + + // Randomizes the order of slides each time the presentation loads + shuffle: false, + + // Turns fragments on and off globally + fragments: true, + + // Flags if the presentation is running in an embedded mode, + // i.e. contained within a limited portion of the screen + embedded: false, + + // Flags if we should show a help overlay when the question-mark + // key is pressed + help: true, + + // Flags if it should be possible to pause the presentation (blackout) + pause: true, + + // Flags if speaker notes should be visible to all viewers + showNotes: false, + + // Number of milliseconds between automatically proceeding to the + // next slide, disabled when set to 0, this value can be overwritten + // by using a data-autoslide attribute on your slides + autoSlide: 0, + + // Stop auto-sliding after user input + autoSlideStoppable: true, + + // Use this method for navigation when auto-sliding (defaults to navigateNext) + autoSlideMethod: null, + + // Enable slide navigation via mouse wheel + mouseWheel: false, + + // Apply a 3D roll to links on hover + rollingLinks: false, + + // Hides the address bar on mobile devices + hideAddressBar: true, + + // Opens links in an iframe preview overlay + previewLinks: false, + + // Exposes the reveal.js API through window.postMessage + postMessage: true, + + // Dispatches all reveal.js events to the parent window through postMessage + postMessageEvents: false, + + // Focuses body when page changes visibility to ensure keyboard shortcuts work + focusBodyOnPageVisibilityChange: true, + + // Transition style + transition: 'slide', // none/fade/slide/convex/concave/zoom + + // Transition speed + transitionSpeed: 'default', // default/fast/slow + + // Transition style for full page slide backgrounds + backgroundTransition: 'fade', // none/fade/slide/convex/concave/zoom + + // Parallax background image + parallaxBackgroundImage: '', // CSS syntax, e.g. "a.jpg" + + // Parallax background size + parallaxBackgroundSize: '', // CSS syntax, e.g. "3000px 2000px" + + // Amount of pixels to move the parallax background per slide step + parallaxBackgroundHorizontal: null, + parallaxBackgroundVertical: null, + + // The maximum number of pages a single slide can expand onto when printing + // to PDF, unlimited by default + pdfMaxPagesPerSlide: Number.POSITIVE_INFINITY, + + // Number of slides away from the current that are visible + viewDistance: 3, + + // Script dependencies to load + dependencies: [] + + }, + + // Flags if Reveal.initialize() has been called + initialized = false, + + // Flags if reveal.js is loaded (has dispatched the 'ready' event) + loaded = false, + + // Flags if the overview mode is currently active + overview = false, + + // Holds the dimensions of our overview slides, including margins + overviewSlideWidth = null, + overviewSlideHeight = null, + + // The horizontal and vertical index of the currently active slide + indexh, + indexv, + + // The previous and current slide HTML elements + previousSlide, + currentSlide, + + previousBackground, + + // Slides may hold a data-state attribute which we pick up and apply + // as a class to the body. This list contains the combined state of + // all current slides. + state = [], + + // The current scale of the presentation (see width/height config) + scale = 1, + + // CSS transform that is currently applied to the slides container, + // split into two groups + slidesTransform = { layout: '', overview: '' }, + + // Cached references to DOM elements + dom = {}, + + // Features supported by the browser, see #checkCapabilities() + features = {}, + + // Client is a mobile device, see #checkCapabilities() + isMobileDevice, + + // Client is a desktop Chrome, see #checkCapabilities() + isChrome, + + // Throttles mouse wheel navigation + lastMouseWheelStep = 0, + + // Delays updates to the URL due to a Chrome thumbnailer bug + writeURLTimeout = 0, + + // Flags if the interaction event listeners are bound + eventsAreBound = false, + + // The current auto-slide duration + autoSlide = 0, + + // Auto slide properties + autoSlidePlayer, + autoSlideTimeout = 0, + autoSlideStartTime = -1, + autoSlidePaused = false, + + // Holds information about the currently ongoing touch input + touch = { + startX: 0, + startY: 0, + startSpan: 0, + startCount: 0, + captured: false, + threshold: 40 + }, + + // Holds information about the keyboard shortcuts + keyboardShortcuts = { + 'N , SPACE': 'Next slide', + 'P': 'Previous slide', + '← , H': 'Navigate left', + '→ , L': 'Navigate right', + '↑ , K': 'Navigate up', + '↓ , J': 'Navigate down', + 'Home': 'First slide', + 'End': 'Last slide', + 'B , .': 'Pause', + 'F': 'Fullscreen', + 'ESC, O': 'Slide overview' + }; + + /** + * Starts up the presentation if the client is capable. + */ + function initialize( options ) { + + // Make sure we only initialize once + if( initialized === true ) return; + + initialized = true; + + checkCapabilities(); + + if( !features.transforms2d && !features.transforms3d ) { + document.body.setAttribute( 'class', 'no-transforms' ); + + // Since JS won't be running any further, we load all lazy + // loading elements upfront + var images = toArray( document.getElementsByTagName( 'img' ) ), + iframes = toArray( document.getElementsByTagName( 'iframe' ) ); + + var lazyLoadable = images.concat( iframes ); + + for( var i = 0, len = lazyLoadable.length; i < len; i++ ) { + var element = lazyLoadable[i]; + if( element.getAttribute( 'data-src' ) ) { + element.setAttribute( 'src', element.getAttribute( 'data-src' ) ); + element.removeAttribute( 'data-src' ); + } + } + + // If the browser doesn't support core features we won't be + // using JavaScript to control the presentation + return; + } + + // Cache references to key DOM elements + dom.wrapper = document.querySelector( '.reveal' ); + dom.slides = document.querySelector( '.reveal .slides' ); + + // Force a layout when the whole page, incl fonts, has loaded + window.addEventListener( 'load', layout, false ); + + var query = Reveal.getQueryHash(); + + // Do not accept new dependencies via query config to avoid + // the potential of malicious script injection + if( typeof query['dependencies'] !== 'undefined' ) delete query['dependencies']; + + // Copy options over to our config object + extend( config, options ); + extend( config, query ); + + // Hide the address bar in mobile browsers + hideAddressBar(); + + // Loads the dependencies and continues to #start() once done + load(); + + } + + /** + * Inspect the client to see what it's capable of, this + * should only happens once per runtime. + */ + function checkCapabilities() { + + isMobileDevice = /(iphone|ipod|ipad|android)/gi.test( UA ); + isChrome = /chrome/i.test( UA ) && !/edge/i.test( UA ); + + var testElement = document.createElement( 'div' ); + + features.transforms3d = 'WebkitPerspective' in testElement.style || + 'MozPerspective' in testElement.style || + 'msPerspective' in testElement.style || + 'OPerspective' in testElement.style || + 'perspective' in testElement.style; + + features.transforms2d = 'WebkitTransform' in testElement.style || + 'MozTransform' in testElement.style || + 'msTransform' in testElement.style || + 'OTransform' in testElement.style || + 'transform' in testElement.style; + + features.requestAnimationFrameMethod = window.requestAnimationFrame || window.webkitRequestAnimationFrame || window.mozRequestAnimationFrame; + features.requestAnimationFrame = typeof features.requestAnimationFrameMethod === 'function'; + + features.canvas = !!document.createElement( 'canvas' ).getContext; + + // Transitions in the overview are disabled in desktop and + // Safari due to lag + features.overviewTransitions = !/Version\/[\d\.]+.*Safari/.test( UA ); + + // Flags if we should use zoom instead of transform to scale + // up slides. Zoom produces crisper results but has a lot of + // xbrowser quirks so we only use it in whitelsited browsers. + features.zoom = 'zoom' in testElement.style && !isMobileDevice && + ( isChrome || /Version\/[\d\.]+.*Safari/.test( UA ) ); + + } + + /** + * Loads the dependencies of reveal.js. Dependencies are + * defined via the configuration option 'dependencies' + * and will be loaded prior to starting/binding reveal.js. + * Some dependencies may have an 'async' flag, if so they + * will load after reveal.js has been started up. + */ + function load() { + + var scripts = [], + scriptsAsync = [], + scriptsToPreload = 0; + + // Called once synchronous scripts finish loading + function proceed() { + if( scriptsAsync.length ) { + // Load asynchronous scripts + head.js.apply( null, scriptsAsync ); + } + + start(); + } + + function loadScript( s ) { + head.ready( s.src.match( /([\w\d_\-]*)\.?js$|[^\\\/]*$/i )[0], function() { + // Extension may contain callback functions + if( typeof s.callback === 'function' ) { + s.callback.apply( this ); + } + + if( --scriptsToPreload === 0 ) { + proceed(); + } + }); + } + + for( var i = 0, len = config.dependencies.length; i < len; i++ ) { + var s = config.dependencies[i]; + + // Load if there's no condition or the condition is truthy + if( !s.condition || s.condition() ) { + if( s.async ) { + scriptsAsync.push( s.src ); + } + else { + scripts.push( s.src ); + } + + loadScript( s ); + } + } + + if( scripts.length ) { + scriptsToPreload = scripts.length; + + // Load synchronous scripts + head.js.apply( null, scripts ); + } + else { + proceed(); + } + + } + + /** + * Starts up reveal.js by binding input events and navigating + * to the current URL deeplink if there is one. + */ + function start() { + + // Make sure we've got all the DOM elements we need + setupDOM(); + + // Listen to messages posted to this window + setupPostMessage(); + + // Prevent the slides from being scrolled out of view + setupScrollPrevention(); + + // Resets all vertical slides so that only the first is visible + resetVerticalSlides(); + + // Updates the presentation to match the current configuration values + configure(); + + // Read the initial hash + readURL(); + + // Update all backgrounds + updateBackground( true ); + + // Notify listeners that the presentation is ready but use a 1ms + // timeout to ensure it's not fired synchronously after #initialize() + setTimeout( function() { + // Enable transitions now that we're loaded + dom.slides.classList.remove( 'no-transition' ); + + loaded = true; + + dom.wrapper.classList.add( 'ready' ); + + dispatchEvent( 'ready', { + 'indexh': indexh, + 'indexv': indexv, + 'currentSlide': currentSlide + } ); + }, 1 ); + + // Special setup and config is required when printing to PDF + if( isPrintingPDF() ) { + removeEventListeners(); + + // The document needs to have loaded for the PDF layout + // measurements to be accurate + if( document.readyState === 'complete' ) { + setupPDF(); + } + else { + window.addEventListener( 'load', setupPDF ); + } + } + + } + + /** + * Finds and stores references to DOM elements which are + * required by the presentation. If a required element is + * not found, it is created. + */ + function setupDOM() { + + // Prevent transitions while we're loading + dom.slides.classList.add( 'no-transition' ); + + // Background element + dom.background = createSingletonNode( dom.wrapper, 'div', 'backgrounds', null ); + + // Progress bar + dom.progress = createSingletonNode( dom.wrapper, 'div', 'progress', '' ); + dom.progressbar = dom.progress.querySelector( 'span' ); + + // Arrow controls + createSingletonNode( dom.wrapper, 'aside', 'controls', + '' + + '' + + '' + + '' ); + + // Slide number + dom.slideNumber = createSingletonNode( dom.wrapper, 'div', 'slide-number', '' ); + + // Element containing notes that are visible to the audience + dom.speakerNotes = createSingletonNode( dom.wrapper, 'div', 'speaker-notes', null ); + dom.speakerNotes.setAttribute( 'data-prevent-swipe', '' ); + dom.speakerNotes.setAttribute( 'tabindex', '0' ); + + // Overlay graphic which is displayed during the paused mode + createSingletonNode( dom.wrapper, 'div', 'pause-overlay', null ); + + // Cache references to elements + dom.controls = document.querySelector( '.reveal .controls' ); + + dom.wrapper.setAttribute( 'role', 'application' ); + + // There can be multiple instances of controls throughout the page + dom.controlsLeft = toArray( document.querySelectorAll( '.navigate-left' ) ); + dom.controlsRight = toArray( document.querySelectorAll( '.navigate-right' ) ); + dom.controlsUp = toArray( document.querySelectorAll( '.navigate-up' ) ); + dom.controlsDown = toArray( document.querySelectorAll( '.navigate-down' ) ); + dom.controlsPrev = toArray( document.querySelectorAll( '.navigate-prev' ) ); + dom.controlsNext = toArray( document.querySelectorAll( '.navigate-next' ) ); + + dom.statusDiv = createStatusDiv(); + } + + /** + * Creates a hidden div with role aria-live to announce the + * current slide content. Hide the div off-screen to make it + * available only to Assistive Technologies. + * + * @return {HTMLElement} + */ + function createStatusDiv() { + + var statusDiv = document.getElementById( 'aria-status-div' ); + if( !statusDiv ) { + statusDiv = document.createElement( 'div' ); + statusDiv.style.position = 'absolute'; + statusDiv.style.height = '1px'; + statusDiv.style.width = '1px'; + statusDiv.style.overflow = 'hidden'; + statusDiv.style.clip = 'rect( 1px, 1px, 1px, 1px )'; + statusDiv.setAttribute( 'id', 'aria-status-div' ); + statusDiv.setAttribute( 'aria-live', 'polite' ); + statusDiv.setAttribute( 'aria-atomic','true' ); + dom.wrapper.appendChild( statusDiv ); + } + return statusDiv; + + } + + /** + * Converts the given HTML element into a string of text + * that can be announced to a screen reader. Hidden + * elements are excluded. + */ + function getStatusText( node ) { + + var text = ''; + + // Text node + if( node.nodeType === 3 ) { + text += node.textContent; + } + // Element node + else if( node.nodeType === 1 ) { + + var isAriaHidden = node.getAttribute( 'aria-hidden' ); + var isDisplayHidden = window.getComputedStyle( node )['display'] === 'none'; + if( isAriaHidden !== 'true' && !isDisplayHidden ) { + + toArray( node.childNodes ).forEach( function( child ) { + text += getStatusText( child ); + } ); + + } + + } + + return text; + + } + + /** + * Configures the presentation for printing to a static + * PDF. + */ + function setupPDF() { + + var slideSize = getComputedSlideSize( window.innerWidth, window.innerHeight ); + + // Dimensions of the PDF pages + var pageWidth = Math.floor( slideSize.width * ( 1 + config.margin ) ), + pageHeight = Math.floor( slideSize.height * ( 1 + config.margin ) ); + + // Dimensions of slides within the pages + var slideWidth = slideSize.width, + slideHeight = slideSize.height; + + // Let the browser know what page size we want to print + injectStyleSheet( '@page{size:'+ pageWidth +'px '+ pageHeight +'px; margin: 0 0 -1px 0;}' ); + + // Limit the size of certain elements to the dimensions of the slide + injectStyleSheet( '.reveal section>img, .reveal section>video, .reveal section>iframe{max-width: '+ slideWidth +'px; max-height:'+ slideHeight +'px}' ); + + document.body.classList.add( 'print-pdf' ); + document.body.style.width = pageWidth + 'px'; + document.body.style.height = pageHeight + 'px'; + + // Add each slide's index as attributes on itself, we need these + // indices to generate slide numbers below + toArray( dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ).forEach( function( hslide, h ) { + hslide.setAttribute( 'data-index-h', h ); + + if( hslide.classList.contains( 'stack' ) ) { + toArray( hslide.querySelectorAll( 'section' ) ).forEach( function( vslide, v ) { + vslide.setAttribute( 'data-index-h', h ); + vslide.setAttribute( 'data-index-v', v ); + } ); + } + } ); + + // Slide and slide background layout + toArray( dom.wrapper.querySelectorAll( SLIDES_SELECTOR ) ).forEach( function( slide ) { + + // Vertical stacks are not centred since their section + // children will be + if( slide.classList.contains( 'stack' ) === false ) { + // Center the slide inside of the page, giving the slide some margin + var left = ( pageWidth - slideWidth ) / 2, + top = ( pageHeight - slideHeight ) / 2; + + var contentHeight = slide.scrollHeight; + var numberOfPages = Math.max( Math.ceil( contentHeight / pageHeight ), 1 ); + + // Adhere to configured pages per slide limit + numberOfPages = Math.min( numberOfPages, config.pdfMaxPagesPerSlide ); + + // Center slides vertically + if( numberOfPages === 1 && config.center || slide.classList.contains( 'center' ) ) { + top = Math.max( ( pageHeight - contentHeight ) / 2, 0 ); + } + + // Wrap the slide in a page element and hide its overflow + // so that no page ever flows onto another + var page = document.createElement( 'div' ); + page.className = 'pdf-page'; + page.style.height = ( pageHeight * numberOfPages ) + 'px'; + slide.parentNode.insertBefore( page, slide ); + page.appendChild( slide ); + + // Position the slide inside of the page + slide.style.left = left + 'px'; + slide.style.top = top + 'px'; + slide.style.width = slideWidth + 'px'; + + if( slide.slideBackgroundElement ) { + page.insertBefore( slide.slideBackgroundElement, slide ); + } + + // Inject notes if `showNotes` is enabled + if( config.showNotes ) { + + // Are there notes for this slide? + var notes = getSlideNotes( slide ); + if( notes ) { + + var notesSpacing = 8; + var notesLayout = typeof config.showNotes === 'string' ? config.showNotes : 'inline'; + var notesElement = document.createElement( 'div' ); + notesElement.classList.add( 'speaker-notes' ); + notesElement.classList.add( 'speaker-notes-pdf' ); + notesElement.setAttribute( 'data-layout', notesLayout ); + notesElement.innerHTML = notes; + + if( notesLayout === 'separate-page' ) { + page.parentNode.insertBefore( notesElement, page.nextSibling ); + } + else { + notesElement.style.left = notesSpacing + 'px'; + notesElement.style.bottom = notesSpacing + 'px'; + notesElement.style.width = ( pageWidth - notesSpacing*2 ) + 'px'; + page.appendChild( notesElement ); + } + + } + + } + + // Inject slide numbers if `slideNumbers` are enabled + if( config.slideNumber ) { + var slideNumberH = parseInt( slide.getAttribute( 'data-index-h' ), 10 ) + 1, + slideNumberV = parseInt( slide.getAttribute( 'data-index-v' ), 10 ) + 1; + + var numberElement = document.createElement( 'div' ); + numberElement.classList.add( 'slide-number' ); + numberElement.classList.add( 'slide-number-pdf' ); + numberElement.innerHTML = formatSlideNumber( slideNumberH, '.', slideNumberV ); + page.appendChild( numberElement ); + } + } + + } ); + + // Show all fragments + toArray( dom.wrapper.querySelectorAll( SLIDES_SELECTOR + ' .fragment' ) ).forEach( function( fragment ) { + fragment.classList.add( 'visible' ); + } ); + + // Notify subscribers that the PDF layout is good to go + dispatchEvent( 'pdf-ready' ); + + } + + /** + * This is an unfortunate necessity. Some actions – such as + * an input field being focused in an iframe or using the + * keyboard to expand text selection beyond the bounds of + * a slide – can trigger our content to be pushed out of view. + * This scrolling can not be prevented by hiding overflow in + * CSS (we already do) so we have to resort to repeatedly + * checking if the slides have been offset :( + */ + function setupScrollPrevention() { + + setInterval( function() { + if( dom.wrapper.scrollTop !== 0 || dom.wrapper.scrollLeft !== 0 ) { + dom.wrapper.scrollTop = 0; + dom.wrapper.scrollLeft = 0; + } + }, 1000 ); + + } + + /** + * Creates an HTML element and returns a reference to it. + * If the element already exists the existing instance will + * be returned. + * + * @param {HTMLElement} container + * @param {string} tagname + * @param {string} classname + * @param {string} innerHTML + * + * @return {HTMLElement} + */ + function createSingletonNode( container, tagname, classname, innerHTML ) { + + // Find all nodes matching the description + var nodes = container.querySelectorAll( '.' + classname ); + + // Check all matches to find one which is a direct child of + // the specified container + for( var i = 0; i < nodes.length; i++ ) { + var testNode = nodes[i]; + if( testNode.parentNode === container ) { + return testNode; + } + } + + // If no node was found, create it now + var node = document.createElement( tagname ); + node.classList.add( classname ); + if( typeof innerHTML === 'string' ) { + node.innerHTML = innerHTML; + } + container.appendChild( node ); + + return node; + + } + + /** + * Creates the slide background elements and appends them + * to the background container. One element is created per + * slide no matter if the given slide has visible background. + */ + function createBackgrounds() { + + var printMode = isPrintingPDF(); + + // Clear prior backgrounds + dom.background.innerHTML = ''; + dom.background.classList.add( 'no-transition' ); + + // Iterate over all horizontal slides + toArray( dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ).forEach( function( slideh ) { + + var backgroundStack = createBackground( slideh, dom.background ); + + // Iterate over all vertical slides + toArray( slideh.querySelectorAll( 'section' ) ).forEach( function( slidev ) { + + createBackground( slidev, backgroundStack ); + + backgroundStack.classList.add( 'stack' ); + + } ); + + } ); + + // Add parallax background if specified + if( config.parallaxBackgroundImage ) { + + dom.background.style.backgroundImage = 'url("' + config.parallaxBackgroundImage + '")'; + dom.background.style.backgroundSize = config.parallaxBackgroundSize; + + // Make sure the below properties are set on the element - these properties are + // needed for proper transitions to be set on the element via CSS. To remove + // annoying background slide-in effect when the presentation starts, apply + // these properties after short time delay + setTimeout( function() { + dom.wrapper.classList.add( 'has-parallax-background' ); + }, 1 ); + + } + else { + + dom.background.style.backgroundImage = ''; + dom.wrapper.classList.remove( 'has-parallax-background' ); + + } + + } + + /** + * Creates a background for the given slide. + * + * @param {HTMLElement} slide + * @param {HTMLElement} container The element that the background + * should be appended to + * @return {HTMLElement} New background div + */ + function createBackground( slide, container ) { + + var data = { + background: slide.getAttribute( 'data-background' ), + backgroundSize: slide.getAttribute( 'data-background-size' ), + backgroundImage: slide.getAttribute( 'data-background-image' ), + backgroundVideo: slide.getAttribute( 'data-background-video' ), + backgroundIframe: slide.getAttribute( 'data-background-iframe' ), + backgroundColor: slide.getAttribute( 'data-background-color' ), + backgroundRepeat: slide.getAttribute( 'data-background-repeat' ), + backgroundPosition: slide.getAttribute( 'data-background-position' ), + backgroundTransition: slide.getAttribute( 'data-background-transition' ) + }; + + var element = document.createElement( 'div' ); + + // Carry over custom classes from the slide to the background + element.className = 'slide-background ' + slide.className.replace( /present|past|future/, '' ); + + if( data.background ) { + // Auto-wrap image urls in url(...) + if( /^(http|file|\/\/)/gi.test( data.background ) || /\.(svg|png|jpg|jpeg|gif|bmp)$/gi.test( data.background ) ) { + slide.setAttribute( 'data-background-image', data.background ); + } + else { + element.style.background = data.background; + } + } + + // Create a hash for this combination of background settings. + // This is used to determine when two slide backgrounds are + // the same. + if( data.background || data.backgroundColor || data.backgroundImage || data.backgroundVideo || data.backgroundIframe ) { + element.setAttribute( 'data-background-hash', data.background + + data.backgroundSize + + data.backgroundImage + + data.backgroundVideo + + data.backgroundIframe + + data.backgroundColor + + data.backgroundRepeat + + data.backgroundPosition + + data.backgroundTransition ); + } + + // Additional and optional background properties + if( data.backgroundSize ) element.style.backgroundSize = data.backgroundSize; + if( data.backgroundColor ) element.style.backgroundColor = data.backgroundColor; + if( data.backgroundRepeat ) element.style.backgroundRepeat = data.backgroundRepeat; + if( data.backgroundPosition ) element.style.backgroundPosition = data.backgroundPosition; + if( data.backgroundTransition ) element.setAttribute( 'data-background-transition', data.backgroundTransition ); + + container.appendChild( element ); + + // If backgrounds are being recreated, clear old classes + slide.classList.remove( 'has-dark-background' ); + slide.classList.remove( 'has-light-background' ); + + slide.slideBackgroundElement = element; + + // If this slide has a background color, add a class that + // signals if it is light or dark. If the slide has no background + // color, no class will be set + var computedBackgroundStyle = window.getComputedStyle( element ); + if( computedBackgroundStyle && computedBackgroundStyle.backgroundColor ) { + var rgb = colorToRgb( computedBackgroundStyle.backgroundColor ); + + // Ignore fully transparent backgrounds. Some browsers return + // rgba(0,0,0,0) when reading the computed background color of + // an element with no background + if( rgb && rgb.a !== 0 ) { + if( colorBrightness( computedBackgroundStyle.backgroundColor ) < 128 ) { + slide.classList.add( 'has-dark-background' ); + } + else { + slide.classList.add( 'has-light-background' ); + } + } + } + + return element; + + } + + /** + * Registers a listener to postMessage events, this makes it + * possible to call all reveal.js API methods from another + * window. For example: + * + * revealWindow.postMessage( JSON.stringify({ + * method: 'slide', + * args: [ 2 ] + * }), '*' ); + */ + function setupPostMessage() { + + if( config.postMessage ) { + window.addEventListener( 'message', function ( event ) { + var data = event.data; + + // Make sure we're dealing with JSON + if( typeof data === 'string' && data.charAt( 0 ) === '{' && data.charAt( data.length - 1 ) === '}' ) { + data = JSON.parse( data ); + + // Check if the requested method can be found + if( data.method && typeof Reveal[data.method] === 'function' ) { + Reveal[data.method].apply( Reveal, data.args ); + } + } + }, false ); + } + + } + + /** + * Applies the configuration settings from the config + * object. May be called multiple times. + * + * @param {object} options + */ + function configure( options ) { + + var numberOfSlides = dom.wrapper.querySelectorAll( SLIDES_SELECTOR ).length; + + dom.wrapper.classList.remove( config.transition ); + + // New config options may be passed when this method + // is invoked through the API after initialization + if( typeof options === 'object' ) extend( config, options ); + + // Force linear transition based on browser capabilities + if( features.transforms3d === false ) config.transition = 'linear'; + + dom.wrapper.classList.add( config.transition ); + + dom.wrapper.setAttribute( 'data-transition-speed', config.transitionSpeed ); + dom.wrapper.setAttribute( 'data-background-transition', config.backgroundTransition ); + + dom.controls.style.display = config.controls ? 'block' : 'none'; + dom.progress.style.display = config.progress ? 'block' : 'none'; + dom.slideNumber.style.display = config.slideNumber && !isPrintingPDF() ? 'block' : 'none'; + + if( config.shuffle ) { + shuffle(); + } + + if( config.rtl ) { + dom.wrapper.classList.add( 'rtl' ); + } + else { + dom.wrapper.classList.remove( 'rtl' ); + } + + if( config.center ) { + dom.wrapper.classList.add( 'center' ); + } + else { + dom.wrapper.classList.remove( 'center' ); + } + + // Exit the paused mode if it was configured off + if( config.pause === false ) { + resume(); + } + + if( config.showNotes ) { + dom.speakerNotes.classList.add( 'visible' ); + dom.speakerNotes.setAttribute( 'data-layout', typeof config.showNotes === 'string' ? config.showNotes : 'inline' ); + } + else { + dom.speakerNotes.classList.remove( 'visible' ); + } + + if( config.mouseWheel ) { + document.addEventListener( 'DOMMouseScroll', onDocumentMouseScroll, false ); // FF + document.addEventListener( 'mousewheel', onDocumentMouseScroll, false ); + } + else { + document.removeEventListener( 'DOMMouseScroll', onDocumentMouseScroll, false ); // FF + document.removeEventListener( 'mousewheel', onDocumentMouseScroll, false ); + } + + // Rolling 3D links + if( config.rollingLinks ) { + enableRollingLinks(); + } + else { + disableRollingLinks(); + } + + // Iframe link previews + if( config.previewLinks ) { + enablePreviewLinks(); + } + else { + disablePreviewLinks(); + enablePreviewLinks( '[data-preview-link]' ); + } + + // Remove existing auto-slide controls + if( autoSlidePlayer ) { + autoSlidePlayer.destroy(); + autoSlidePlayer = null; + } + + // Generate auto-slide controls if needed + if( numberOfSlides > 1 && config.autoSlide && config.autoSlideStoppable && features.canvas && features.requestAnimationFrame ) { + autoSlidePlayer = new Playback( dom.wrapper, function() { + return Math.min( Math.max( ( Date.now() - autoSlideStartTime ) / autoSlide, 0 ), 1 ); + } ); + + autoSlidePlayer.on( 'click', onAutoSlidePlayerClick ); + autoSlidePaused = false; + } + + // When fragments are turned off they should be visible + if( config.fragments === false ) { + toArray( dom.slides.querySelectorAll( '.fragment' ) ).forEach( function( element ) { + element.classList.add( 'visible' ); + element.classList.remove( 'current-fragment' ); + } ); + } + + sync(); + + } + + /** + * Binds all event listeners. + */ + function addEventListeners() { + + eventsAreBound = true; + + window.addEventListener( 'hashchange', onWindowHashChange, false ); + window.addEventListener( 'resize', onWindowResize, false ); + + if( config.touch ) { + dom.wrapper.addEventListener( 'touchstart', onTouchStart, false ); + dom.wrapper.addEventListener( 'touchmove', onTouchMove, false ); + dom.wrapper.addEventListener( 'touchend', onTouchEnd, false ); + + // Support pointer-style touch interaction as well + if( window.navigator.pointerEnabled ) { + // IE 11 uses un-prefixed version of pointer events + dom.wrapper.addEventListener( 'pointerdown', onPointerDown, false ); + dom.wrapper.addEventListener( 'pointermove', onPointerMove, false ); + dom.wrapper.addEventListener( 'pointerup', onPointerUp, false ); + } + else if( window.navigator.msPointerEnabled ) { + // IE 10 uses prefixed version of pointer events + dom.wrapper.addEventListener( 'MSPointerDown', onPointerDown, false ); + dom.wrapper.addEventListener( 'MSPointerMove', onPointerMove, false ); + dom.wrapper.addEventListener( 'MSPointerUp', onPointerUp, false ); + } + } + + if( config.keyboard ) { + document.addEventListener( 'keydown', onDocumentKeyDown, false ); + document.addEventListener( 'keypress', onDocumentKeyPress, false ); + } + + if( config.progress && dom.progress ) { + dom.progress.addEventListener( 'click', onProgressClicked, false ); + } + + if( config.focusBodyOnPageVisibilityChange ) { + var visibilityChange; + + if( 'hidden' in document ) { + visibilityChange = 'visibilitychange'; + } + else if( 'msHidden' in document ) { + visibilityChange = 'msvisibilitychange'; + } + else if( 'webkitHidden' in document ) { + visibilityChange = 'webkitvisibilitychange'; + } + + if( visibilityChange ) { + document.addEventListener( visibilityChange, onPageVisibilityChange, false ); + } + } + + // Listen to both touch and click events, in case the device + // supports both + var pointerEvents = [ 'touchstart', 'click' ]; + + // Only support touch for Android, fixes double navigations in + // stock browser + if( UA.match( /android/gi ) ) { + pointerEvents = [ 'touchstart' ]; + } + + pointerEvents.forEach( function( eventName ) { + dom.controlsLeft.forEach( function( el ) { el.addEventListener( eventName, onNavigateLeftClicked, false ); } ); + dom.controlsRight.forEach( function( el ) { el.addEventListener( eventName, onNavigateRightClicked, false ); } ); + dom.controlsUp.forEach( function( el ) { el.addEventListener( eventName, onNavigateUpClicked, false ); } ); + dom.controlsDown.forEach( function( el ) { el.addEventListener( eventName, onNavigateDownClicked, false ); } ); + dom.controlsPrev.forEach( function( el ) { el.addEventListener( eventName, onNavigatePrevClicked, false ); } ); + dom.controlsNext.forEach( function( el ) { el.addEventListener( eventName, onNavigateNextClicked, false ); } ); + } ); + + } + + /** + * Unbinds all event listeners. + */ + function removeEventListeners() { + + eventsAreBound = false; + + document.removeEventListener( 'keydown', onDocumentKeyDown, false ); + document.removeEventListener( 'keypress', onDocumentKeyPress, false ); + window.removeEventListener( 'hashchange', onWindowHashChange, false ); + window.removeEventListener( 'resize', onWindowResize, false ); + + dom.wrapper.removeEventListener( 'touchstart', onTouchStart, false ); + dom.wrapper.removeEventListener( 'touchmove', onTouchMove, false ); + dom.wrapper.removeEventListener( 'touchend', onTouchEnd, false ); + + // IE11 + if( window.navigator.pointerEnabled ) { + dom.wrapper.removeEventListener( 'pointerdown', onPointerDown, false ); + dom.wrapper.removeEventListener( 'pointermove', onPointerMove, false ); + dom.wrapper.removeEventListener( 'pointerup', onPointerUp, false ); + } + // IE10 + else if( window.navigator.msPointerEnabled ) { + dom.wrapper.removeEventListener( 'MSPointerDown', onPointerDown, false ); + dom.wrapper.removeEventListener( 'MSPointerMove', onPointerMove, false ); + dom.wrapper.removeEventListener( 'MSPointerUp', onPointerUp, false ); + } + + if ( config.progress && dom.progress ) { + dom.progress.removeEventListener( 'click', onProgressClicked, false ); + } + + [ 'touchstart', 'click' ].forEach( function( eventName ) { + dom.controlsLeft.forEach( function( el ) { el.removeEventListener( eventName, onNavigateLeftClicked, false ); } ); + dom.controlsRight.forEach( function( el ) { el.removeEventListener( eventName, onNavigateRightClicked, false ); } ); + dom.controlsUp.forEach( function( el ) { el.removeEventListener( eventName, onNavigateUpClicked, false ); } ); + dom.controlsDown.forEach( function( el ) { el.removeEventListener( eventName, onNavigateDownClicked, false ); } ); + dom.controlsPrev.forEach( function( el ) { el.removeEventListener( eventName, onNavigatePrevClicked, false ); } ); + dom.controlsNext.forEach( function( el ) { el.removeEventListener( eventName, onNavigateNextClicked, false ); } ); + } ); + + } + + /** + * Extend object a with the properties of object b. + * If there's a conflict, object b takes precedence. + * + * @param {object} a + * @param {object} b + */ + function extend( a, b ) { + + for( var i in b ) { + a[ i ] = b[ i ]; + } + + } + + /** + * Converts the target object to an array. + * + * @param {object} o + * @return {object[]} + */ + function toArray( o ) { + + return Array.prototype.slice.call( o ); + + } + + /** + * Utility for deserializing a value. + * + * @param {*} value + * @return {*} + */ + function deserialize( value ) { + + if( typeof value === 'string' ) { + if( value === 'null' ) return null; + else if( value === 'true' ) return true; + else if( value === 'false' ) return false; + else if( value.match( /^\d+$/ ) ) return parseFloat( value ); + } + + return value; + + } + + /** + * Measures the distance in pixels between point a + * and point b. + * + * @param {object} a point with x/y properties + * @param {object} b point with x/y properties + * + * @return {number} + */ + function distanceBetween( a, b ) { + + var dx = a.x - b.x, + dy = a.y - b.y; + + return Math.sqrt( dx*dx + dy*dy ); + + } + + /** + * Applies a CSS transform to the target element. + * + * @param {HTMLElement} element + * @param {string} transform + */ + function transformElement( element, transform ) { + + element.style.WebkitTransform = transform; + element.style.MozTransform = transform; + element.style.msTransform = transform; + element.style.transform = transform; + + } + + /** + * Applies CSS transforms to the slides container. The container + * is transformed from two separate sources: layout and the overview + * mode. + * + * @param {object} transforms + */ + function transformSlides( transforms ) { + + // Pick up new transforms from arguments + if( typeof transforms.layout === 'string' ) slidesTransform.layout = transforms.layout; + if( typeof transforms.overview === 'string' ) slidesTransform.overview = transforms.overview; + + // Apply the transforms to the slides container + if( slidesTransform.layout ) { + transformElement( dom.slides, slidesTransform.layout + ' ' + slidesTransform.overview ); + } + else { + transformElement( dom.slides, slidesTransform.overview ); + } + + } + + /** + * Injects the given CSS styles into the DOM. + * + * @param {string} value + */ + function injectStyleSheet( value ) { + + var tag = document.createElement( 'style' ); + tag.type = 'text/css'; + if( tag.styleSheet ) { + tag.styleSheet.cssText = value; + } + else { + tag.appendChild( document.createTextNode( value ) ); + } + document.getElementsByTagName( 'head' )[0].appendChild( tag ); + + } + + /** + * Find the closest parent that matches the given + * selector. + * + * @param {HTMLElement} target The child element + * @param {String} selector The CSS selector to match + * the parents against + * + * @return {HTMLElement} The matched parent or null + * if no matching parent was found + */ + function closestParent( target, selector ) { + + var parent = target.parentNode; + + while( parent ) { + + // There's some overhead doing this each time, we don't + // want to rewrite the element prototype but should still + // be enough to feature detect once at startup... + var matchesMethod = parent.matches || parent.matchesSelector || parent.msMatchesSelector; + + // If we find a match, we're all set + if( matchesMethod && matchesMethod.call( parent, selector ) ) { + return parent; + } + + // Keep searching + parent = parent.parentNode; + + } + + return null; + + } + + /** + * Converts various color input formats to an {r:0,g:0,b:0} object. + * + * @param {string} color The string representation of a color + * @example + * colorToRgb('#000'); + * @example + * colorToRgb('#000000'); + * @example + * colorToRgb('rgb(0,0,0)'); + * @example + * colorToRgb('rgba(0,0,0)'); + * + * @return {{r: number, g: number, b: number, [a]: number}|null} + */ + function colorToRgb( color ) { + + var hex3 = color.match( /^#([0-9a-f]{3})$/i ); + if( hex3 && hex3[1] ) { + hex3 = hex3[1]; + return { + r: parseInt( hex3.charAt( 0 ), 16 ) * 0x11, + g: parseInt( hex3.charAt( 1 ), 16 ) * 0x11, + b: parseInt( hex3.charAt( 2 ), 16 ) * 0x11 + }; + } + + var hex6 = color.match( /^#([0-9a-f]{6})$/i ); + if( hex6 && hex6[1] ) { + hex6 = hex6[1]; + return { + r: parseInt( hex6.substr( 0, 2 ), 16 ), + g: parseInt( hex6.substr( 2, 2 ), 16 ), + b: parseInt( hex6.substr( 4, 2 ), 16 ) + }; + } + + var rgb = color.match( /^rgb\s*\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$/i ); + if( rgb ) { + return { + r: parseInt( rgb[1], 10 ), + g: parseInt( rgb[2], 10 ), + b: parseInt( rgb[3], 10 ) + }; + } + + var rgba = color.match( /^rgba\s*\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\,\s*([\d]+|[\d]*.[\d]+)\s*\)$/i ); + if( rgba ) { + return { + r: parseInt( rgba[1], 10 ), + g: parseInt( rgba[2], 10 ), + b: parseInt( rgba[3], 10 ), + a: parseFloat( rgba[4] ) + }; + } + + return null; + + } + + /** + * Calculates brightness on a scale of 0-255. + * + * @param {string} color See colorToRgb for supported formats. + * @see {@link colorToRgb} + */ + function colorBrightness( color ) { + + if( typeof color === 'string' ) color = colorToRgb( color ); + + if( color ) { + return ( color.r * 299 + color.g * 587 + color.b * 114 ) / 1000; + } + + return null; + + } + + /** + * Returns the remaining height within the parent of the + * target element. + * + * remaining height = [ configured parent height ] - [ current parent height ] + * + * @param {HTMLElement} element + * @param {number} [height] + */ + function getRemainingHeight( element, height ) { + + height = height || 0; + + if( element ) { + var newHeight, oldHeight = element.style.height; + + // Change the .stretch element height to 0 in order find the height of all + // the other elements + element.style.height = '0px'; + newHeight = height - element.parentNode.offsetHeight; + + // Restore the old height, just in case + element.style.height = oldHeight + 'px'; + + return newHeight; + } + + return height; + + } + + /** + * Checks if this instance is being used to print a PDF. + */ + function isPrintingPDF() { + + return ( /print-pdf/gi ).test( window.location.search ); + + } + + /** + * Hides the address bar if we're on a mobile device. + */ + function hideAddressBar() { + + if( config.hideAddressBar && isMobileDevice ) { + // Events that should trigger the address bar to hide + window.addEventListener( 'load', removeAddressBar, false ); + window.addEventListener( 'orientationchange', removeAddressBar, false ); + } + + } + + /** + * Causes the address bar to hide on mobile devices, + * more vertical space ftw. + */ + function removeAddressBar() { + + setTimeout( function() { + window.scrollTo( 0, 1 ); + }, 10 ); + + } + + /** + * Dispatches an event of the specified type from the + * reveal DOM element. + */ + function dispatchEvent( type, args ) { + + var event = document.createEvent( 'HTMLEvents', 1, 2 ); + event.initEvent( type, true, true ); + extend( event, args ); + dom.wrapper.dispatchEvent( event ); + + // If we're in an iframe, post each reveal.js event to the + // parent window. Used by the notes plugin + if( config.postMessageEvents && window.parent !== window.self ) { + window.parent.postMessage( JSON.stringify({ namespace: 'reveal', eventName: type, state: getState() }), '*' ); + } + + } + + /** + * Wrap all links in 3D goodness. + */ + function enableRollingLinks() { + + if( features.transforms3d && !( 'msPerspective' in document.body.style ) ) { + var anchors = dom.wrapper.querySelectorAll( SLIDES_SELECTOR + ' a' ); + + for( var i = 0, len = anchors.length; i < len; i++ ) { + var anchor = anchors[i]; + + if( anchor.textContent && !anchor.querySelector( '*' ) && ( !anchor.className || !anchor.classList.contains( anchor, 'roll' ) ) ) { + var span = document.createElement('span'); + span.setAttribute('data-title', anchor.text); + span.innerHTML = anchor.innerHTML; + + anchor.classList.add( 'roll' ); + anchor.innerHTML = ''; + anchor.appendChild(span); + } + } + } + + } + + /** + * Unwrap all 3D links. + */ + function disableRollingLinks() { + + var anchors = dom.wrapper.querySelectorAll( SLIDES_SELECTOR + ' a.roll' ); + + for( var i = 0, len = anchors.length; i < len; i++ ) { + var anchor = anchors[i]; + var span = anchor.querySelector( 'span' ); + + if( span ) { + anchor.classList.remove( 'roll' ); + anchor.innerHTML = span.innerHTML; + } + } + + } + + /** + * Bind preview frame links. + * + * @param {string} [selector=a] - selector for anchors + */ + function enablePreviewLinks( selector ) { + + var anchors = toArray( document.querySelectorAll( selector ? selector : 'a' ) ); + + anchors.forEach( function( element ) { + if( /^(http|www)/gi.test( element.getAttribute( 'href' ) ) ) { + element.addEventListener( 'click', onPreviewLinkClicked, false ); + } + } ); + + } + + /** + * Unbind preview frame links. + */ + function disablePreviewLinks() { + + var anchors = toArray( document.querySelectorAll( 'a' ) ); + + anchors.forEach( function( element ) { + if( /^(http|www)/gi.test( element.getAttribute( 'href' ) ) ) { + element.removeEventListener( 'click', onPreviewLinkClicked, false ); + } + } ); + + } + + /** + * Opens a preview window for the target URL. + * + * @param {string} url - url for preview iframe src + */ + function showPreview( url ) { + + closeOverlay(); + + dom.overlay = document.createElement( 'div' ); + dom.overlay.classList.add( 'overlay' ); + dom.overlay.classList.add( 'overlay-preview' ); + dom.wrapper.appendChild( dom.overlay ); + + dom.overlay.innerHTML = [ + '
    ', + '', + '', + '
    ', + '
    ', + '
    ', + '', + '', + 'Unable to load iframe. This is likely due to the site\'s policy (x-frame-options).', + '', + '
    ' + ].join(''); + + dom.overlay.querySelector( 'iframe' ).addEventListener( 'load', function( event ) { + dom.overlay.classList.add( 'loaded' ); + }, false ); + + dom.overlay.querySelector( '.close' ).addEventListener( 'click', function( event ) { + closeOverlay(); + event.preventDefault(); + }, false ); + + dom.overlay.querySelector( '.external' ).addEventListener( 'click', function( event ) { + closeOverlay(); + }, false ); + + setTimeout( function() { + dom.overlay.classList.add( 'visible' ); + }, 1 ); + + } + + /** + * Opens an overlay window with help material. + */ + function showHelp() { + + if( config.help ) { + + closeOverlay(); + + dom.overlay = document.createElement( 'div' ); + dom.overlay.classList.add( 'overlay' ); + dom.overlay.classList.add( 'overlay-help' ); + dom.wrapper.appendChild( dom.overlay ); + + var html = '

    Keyboard Shortcuts


    '; + + html += ''; + for( var key in keyboardShortcuts ) { + html += ''; + } + + html += '
    KEYACTION
    ' + key + '' + keyboardShortcuts[ key ] + '
    '; + + dom.overlay.innerHTML = [ + '
    ', + '', + '
    ', + '
    ', + '
    '+ html +'
    ', + '
    ' + ].join(''); + + dom.overlay.querySelector( '.close' ).addEventListener( 'click', function( event ) { + closeOverlay(); + event.preventDefault(); + }, false ); + + setTimeout( function() { + dom.overlay.classList.add( 'visible' ); + }, 1 ); + + } + + } + + /** + * Closes any currently open overlay. + */ + function closeOverlay() { + + if( dom.overlay ) { + dom.overlay.parentNode.removeChild( dom.overlay ); + dom.overlay = null; + } + + } + + /** + * Applies JavaScript-controlled layout rules to the + * presentation. + */ + function layout() { + + if( dom.wrapper && !isPrintingPDF() ) { + + var size = getComputedSlideSize(); + + // Layout the contents of the slides + layoutSlideContents( config.width, config.height ); + + dom.slides.style.width = size.width + 'px'; + dom.slides.style.height = size.height + 'px'; + + // Determine scale of content to fit within available space + scale = Math.min( size.presentationWidth / size.width, size.presentationHeight / size.height ); + + // Respect max/min scale settings + scale = Math.max( scale, config.minScale ); + scale = Math.min( scale, config.maxScale ); + + // Don't apply any scaling styles if scale is 1 + if( scale === 1 ) { + dom.slides.style.zoom = ''; + dom.slides.style.left = ''; + dom.slides.style.top = ''; + dom.slides.style.bottom = ''; + dom.slides.style.right = ''; + transformSlides( { layout: '' } ); + } + else { + // Prefer zoom for scaling up so that content remains crisp. + // Don't use zoom to scale down since that can lead to shifts + // in text layout/line breaks. + if( scale > 1 && features.zoom ) { + dom.slides.style.zoom = scale; + dom.slides.style.left = ''; + dom.slides.style.top = ''; + dom.slides.style.bottom = ''; + dom.slides.style.right = ''; + transformSlides( { layout: '' } ); + } + // Apply scale transform as a fallback + else { + dom.slides.style.zoom = ''; + dom.slides.style.left = '50%'; + dom.slides.style.top = '50%'; + dom.slides.style.bottom = 'auto'; + dom.slides.style.right = 'auto'; + transformSlides( { layout: 'translate(-50%, -50%) scale('+ scale +')' } ); + } + } + + // Select all slides, vertical and horizontal + var slides = toArray( dom.wrapper.querySelectorAll( SLIDES_SELECTOR ) ); + + for( var i = 0, len = slides.length; i < len; i++ ) { + var slide = slides[ i ]; + + // Don't bother updating invisible slides + if( slide.style.display === 'none' ) { + continue; + } + + if( config.center || slide.classList.contains( 'center' ) ) { + // Vertical stacks are not centred since their section + // children will be + if( slide.classList.contains( 'stack' ) ) { + slide.style.top = 0; + } + else { + slide.style.top = Math.max( ( size.height - slide.scrollHeight ) / 2, 0 ) + 'px'; + } + } + else { + slide.style.top = ''; + } + + } + + updateProgress(); + updateParallax(); + + } + + } + + /** + * Applies layout logic to the contents of all slides in + * the presentation. + * + * @param {string|number} width + * @param {string|number} height + */ + function layoutSlideContents( width, height ) { + + // Handle sizing of elements with the 'stretch' class + toArray( dom.slides.querySelectorAll( 'section > .stretch' ) ).forEach( function( element ) { + + // Determine how much vertical space we can use + var remainingHeight = getRemainingHeight( element, height ); + + // Consider the aspect ratio of media elements + if( /(img|video)/gi.test( element.nodeName ) ) { + var nw = element.naturalWidth || element.videoWidth, + nh = element.naturalHeight || element.videoHeight; + + var es = Math.min( width / nw, remainingHeight / nh ); + + element.style.width = ( nw * es ) + 'px'; + element.style.height = ( nh * es ) + 'px'; + + } + else { + element.style.width = width + 'px'; + element.style.height = remainingHeight + 'px'; + } + + } ); + + } + + /** + * Calculates the computed pixel size of our slides. These + * values are based on the width and height configuration + * options. + * + * @param {number} [presentationWidth=dom.wrapper.offsetWidth] + * @param {number} [presentationHeight=dom.wrapper.offsetHeight] + */ + function getComputedSlideSize( presentationWidth, presentationHeight ) { + + var size = { + // Slide size + width: config.width, + height: config.height, + + // Presentation size + presentationWidth: presentationWidth || dom.wrapper.offsetWidth, + presentationHeight: presentationHeight || dom.wrapper.offsetHeight + }; + + // Reduce available space by margin + size.presentationWidth -= ( size.presentationWidth * config.margin ); + size.presentationHeight -= ( size.presentationHeight * config.margin ); + + // Slide width may be a percentage of available width + if( typeof size.width === 'string' && /%$/.test( size.width ) ) { + size.width = parseInt( size.width, 10 ) / 100 * size.presentationWidth; + } + + // Slide height may be a percentage of available height + if( typeof size.height === 'string' && /%$/.test( size.height ) ) { + size.height = parseInt( size.height, 10 ) / 100 * size.presentationHeight; + } + + return size; + + } + + /** + * Stores the vertical index of a stack so that the same + * vertical slide can be selected when navigating to and + * from the stack. + * + * @param {HTMLElement} stack The vertical stack element + * @param {string|number} [v=0] Index to memorize + */ + function setPreviousVerticalIndex( stack, v ) { + + if( typeof stack === 'object' && typeof stack.setAttribute === 'function' ) { + stack.setAttribute( 'data-previous-indexv', v || 0 ); + } + + } + + /** + * Retrieves the vertical index which was stored using + * #setPreviousVerticalIndex() or 0 if no previous index + * exists. + * + * @param {HTMLElement} stack The vertical stack element + */ + function getPreviousVerticalIndex( stack ) { + + if( typeof stack === 'object' && typeof stack.setAttribute === 'function' && stack.classList.contains( 'stack' ) ) { + // Prefer manually defined start-indexv + var attributeName = stack.hasAttribute( 'data-start-indexv' ) ? 'data-start-indexv' : 'data-previous-indexv'; + + return parseInt( stack.getAttribute( attributeName ) || 0, 10 ); + } + + return 0; + + } + + /** + * Displays the overview of slides (quick nav) by scaling + * down and arranging all slide elements. + */ + function activateOverview() { + + // Only proceed if enabled in config + if( config.overview && !isOverview() ) { + + overview = true; + + dom.wrapper.classList.add( 'overview' ); + dom.wrapper.classList.remove( 'overview-deactivating' ); + + if( features.overviewTransitions ) { + setTimeout( function() { + dom.wrapper.classList.add( 'overview-animated' ); + }, 1 ); + } + + // Don't auto-slide while in overview mode + cancelAutoSlide(); + + // Move the backgrounds element into the slide container to + // that the same scaling is applied + dom.slides.appendChild( dom.background ); + + // Clicking on an overview slide navigates to it + toArray( dom.wrapper.querySelectorAll( SLIDES_SELECTOR ) ).forEach( function( slide ) { + if( !slide.classList.contains( 'stack' ) ) { + slide.addEventListener( 'click', onOverviewSlideClicked, true ); + } + } ); + + // Calculate slide sizes + var margin = 70; + var slideSize = getComputedSlideSize(); + overviewSlideWidth = slideSize.width + margin; + overviewSlideHeight = slideSize.height + margin; + + // Reverse in RTL mode + if( config.rtl ) { + overviewSlideWidth = -overviewSlideWidth; + } + + updateSlidesVisibility(); + layoutOverview(); + updateOverview(); + + layout(); + + // Notify observers of the overview showing + dispatchEvent( 'overviewshown', { + 'indexh': indexh, + 'indexv': indexv, + 'currentSlide': currentSlide + } ); + + } + + } + + /** + * Uses CSS transforms to position all slides in a grid for + * display inside of the overview mode. + */ + function layoutOverview() { + + // Layout slides + toArray( dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ).forEach( function( hslide, h ) { + hslide.setAttribute( 'data-index-h', h ); + transformElement( hslide, 'translate3d(' + ( h * overviewSlideWidth ) + 'px, 0, 0)' ); + + if( hslide.classList.contains( 'stack' ) ) { + + toArray( hslide.querySelectorAll( 'section' ) ).forEach( function( vslide, v ) { + vslide.setAttribute( 'data-index-h', h ); + vslide.setAttribute( 'data-index-v', v ); + + transformElement( vslide, 'translate3d(0, ' + ( v * overviewSlideHeight ) + 'px, 0)' ); + } ); + + } + } ); + + // Layout slide backgrounds + toArray( dom.background.childNodes ).forEach( function( hbackground, h ) { + transformElement( hbackground, 'translate3d(' + ( h * overviewSlideWidth ) + 'px, 0, 0)' ); + + toArray( hbackground.querySelectorAll( '.slide-background' ) ).forEach( function( vbackground, v ) { + transformElement( vbackground, 'translate3d(0, ' + ( v * overviewSlideHeight ) + 'px, 0)' ); + } ); + } ); + + } + + /** + * Moves the overview viewport to the current slides. + * Called each time the current slide changes. + */ + function updateOverview() { + + transformSlides( { + overview: [ + 'translateX('+ ( -indexh * overviewSlideWidth ) +'px)', + 'translateY('+ ( -indexv * overviewSlideHeight ) +'px)', + 'translateZ('+ ( window.innerWidth < 400 ? -1000 : -2500 ) +'px)' + ].join( ' ' ) + } ); + + } + + /** + * Exits the slide overview and enters the currently + * active slide. + */ + function deactivateOverview() { + + // Only proceed if enabled in config + if( config.overview ) { + + overview = false; + + dom.wrapper.classList.remove( 'overview' ); + dom.wrapper.classList.remove( 'overview-animated' ); + + // Temporarily add a class so that transitions can do different things + // depending on whether they are exiting/entering overview, or just + // moving from slide to slide + dom.wrapper.classList.add( 'overview-deactivating' ); + + setTimeout( function () { + dom.wrapper.classList.remove( 'overview-deactivating' ); + }, 1 ); + + // Move the background element back out + dom.wrapper.appendChild( dom.background ); + + // Clean up changes made to slides + toArray( dom.wrapper.querySelectorAll( SLIDES_SELECTOR ) ).forEach( function( slide ) { + transformElement( slide, '' ); + + slide.removeEventListener( 'click', onOverviewSlideClicked, true ); + } ); + + // Clean up changes made to backgrounds + toArray( dom.background.querySelectorAll( '.slide-background' ) ).forEach( function( background ) { + transformElement( background, '' ); + } ); + + transformSlides( { overview: '' } ); + + slide( indexh, indexv ); + + layout(); + + cueAutoSlide(); + + // Notify observers of the overview hiding + dispatchEvent( 'overviewhidden', { + 'indexh': indexh, + 'indexv': indexv, + 'currentSlide': currentSlide + } ); + + } + } + + /** + * Toggles the slide overview mode on and off. + * + * @param {Boolean} [override] Flag which overrides the + * toggle logic and forcibly sets the desired state. True means + * overview is open, false means it's closed. + */ + function toggleOverview( override ) { + + if( typeof override === 'boolean' ) { + override ? activateOverview() : deactivateOverview(); + } + else { + isOverview() ? deactivateOverview() : activateOverview(); + } + + } + + /** + * Checks if the overview is currently active. + * + * @return {Boolean} true if the overview is active, + * false otherwise + */ + function isOverview() { + + return overview; + + } + + /** + * Checks if the current or specified slide is vertical + * (nested within another slide). + * + * @param {HTMLElement} [slide=currentSlide] The slide to check + * orientation of + * @return {Boolean} + */ + function isVerticalSlide( slide ) { + + // Prefer slide argument, otherwise use current slide + slide = slide ? slide : currentSlide; + + return slide && slide.parentNode && !!slide.parentNode.nodeName.match( /section/i ); + + } + + /** + * Handling the fullscreen functionality via the fullscreen API + * + * @see http://fullscreen.spec.whatwg.org/ + * @see https://developer.mozilla.org/en-US/docs/DOM/Using_fullscreen_mode + */ + function enterFullscreen() { + + var element = document.documentElement; + + // Check which implementation is available + var requestMethod = element.requestFullscreen || + element.webkitRequestFullscreen || + element.webkitRequestFullScreen || + element.mozRequestFullScreen || + element.msRequestFullscreen; + + if( requestMethod ) { + requestMethod.apply( element ); + } + + } + + /** + * Enters the paused mode which fades everything on screen to + * black. + */ + function pause() { + + if( config.pause ) { + var wasPaused = dom.wrapper.classList.contains( 'paused' ); + + cancelAutoSlide(); + dom.wrapper.classList.add( 'paused' ); + + if( wasPaused === false ) { + dispatchEvent( 'paused' ); + } + } + + } + + /** + * Exits from the paused mode. + */ + function resume() { + + var wasPaused = dom.wrapper.classList.contains( 'paused' ); + dom.wrapper.classList.remove( 'paused' ); + + cueAutoSlide(); + + if( wasPaused ) { + dispatchEvent( 'resumed' ); + } + + } + + /** + * Toggles the paused mode on and off. + */ + function togglePause( override ) { + + if( typeof override === 'boolean' ) { + override ? pause() : resume(); + } + else { + isPaused() ? resume() : pause(); + } + + } + + /** + * Checks if we are currently in the paused mode. + * + * @return {Boolean} + */ + function isPaused() { + + return dom.wrapper.classList.contains( 'paused' ); + + } + + /** + * Toggles the auto slide mode on and off. + * + * @param {Boolean} [override] Flag which sets the desired state. + * True means autoplay starts, false means it stops. + */ + + function toggleAutoSlide( override ) { + + if( typeof override === 'boolean' ) { + override ? resumeAutoSlide() : pauseAutoSlide(); + } + + else { + autoSlidePaused ? resumeAutoSlide() : pauseAutoSlide(); + } + + } + + /** + * Checks if the auto slide mode is currently on. + * + * @return {Boolean} + */ + function isAutoSliding() { + + return !!( autoSlide && !autoSlidePaused ); + + } + + /** + * Steps from the current point in the presentation to the + * slide which matches the specified horizontal and vertical + * indices. + * + * @param {number} [h=indexh] Horizontal index of the target slide + * @param {number} [v=indexv] Vertical index of the target slide + * @param {number} [f] Index of a fragment within the + * target slide to activate + * @param {number} [o] Origin for use in multimaster environments + */ + function slide( h, v, f, o ) { + + // Remember where we were at before + previousSlide = currentSlide; + + // Query all horizontal slides in the deck + var horizontalSlides = dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ); + + // Abort if there are no slides + if( horizontalSlides.length === 0 ) return; + + // If no vertical index is specified and the upcoming slide is a + // stack, resume at its previous vertical index + if( v === undefined && !isOverview() ) { + v = getPreviousVerticalIndex( horizontalSlides[ h ] ); + } + + // If we were on a vertical stack, remember what vertical index + // it was on so we can resume at the same position when returning + if( previousSlide && previousSlide.parentNode && previousSlide.parentNode.classList.contains( 'stack' ) ) { + setPreviousVerticalIndex( previousSlide.parentNode, indexv ); + } + + // Remember the state before this slide + var stateBefore = state.concat(); + + // Reset the state array + state.length = 0; + + var indexhBefore = indexh || 0, + indexvBefore = indexv || 0; + + // Activate and transition to the new slide + indexh = updateSlides( HORIZONTAL_SLIDES_SELECTOR, h === undefined ? indexh : h ); + indexv = updateSlides( VERTICAL_SLIDES_SELECTOR, v === undefined ? indexv : v ); + + // Update the visibility of slides now that the indices have changed + updateSlidesVisibility(); + + layout(); + + // Apply the new state + stateLoop: for( var i = 0, len = state.length; i < len; i++ ) { + // Check if this state existed on the previous slide. If it + // did, we will avoid adding it repeatedly + for( var j = 0; j < stateBefore.length; j++ ) { + if( stateBefore[j] === state[i] ) { + stateBefore.splice( j, 1 ); + continue stateLoop; + } + } + + document.documentElement.classList.add( state[i] ); + + // Dispatch custom event matching the state's name + dispatchEvent( state[i] ); + } + + // Clean up the remains of the previous state + while( stateBefore.length ) { + document.documentElement.classList.remove( stateBefore.pop() ); + } + + // Update the overview if it's currently active + if( isOverview() ) { + updateOverview(); + } + + // Find the current horizontal slide and any possible vertical slides + // within it + var currentHorizontalSlide = horizontalSlides[ indexh ], + currentVerticalSlides = currentHorizontalSlide.querySelectorAll( 'section' ); + + // Store references to the previous and current slides + currentSlide = currentVerticalSlides[ indexv ] || currentHorizontalSlide; + + // Show fragment, if specified + if( typeof f !== 'undefined' ) { + navigateFragment( f ); + } + + // Dispatch an event if the slide changed + var slideChanged = ( indexh !== indexhBefore || indexv !== indexvBefore ); + if( slideChanged ) { + dispatchEvent( 'slidechanged', { + 'indexh': indexh, + 'indexv': indexv, + 'previousSlide': previousSlide, + 'currentSlide': currentSlide, + 'origin': o + } ); + } + else { + // Ensure that the previous slide is never the same as the current + previousSlide = null; + } + + // Solves an edge case where the previous slide maintains the + // 'present' class when navigating between adjacent vertical + // stacks + if( previousSlide ) { + previousSlide.classList.remove( 'present' ); + previousSlide.setAttribute( 'aria-hidden', 'true' ); + + // Reset all slides upon navigate to home + // Issue: #285 + if ( dom.wrapper.querySelector( HOME_SLIDE_SELECTOR ).classList.contains( 'present' ) ) { + // Launch async task + setTimeout( function () { + var slides = toArray( dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR + '.stack') ), i; + for( i in slides ) { + if( slides[i] ) { + // Reset stack + setPreviousVerticalIndex( slides[i], 0 ); + } + } + }, 0 ); + } + } + + // Handle embedded content + if( slideChanged || !previousSlide ) { + stopEmbeddedContent( previousSlide ); + startEmbeddedContent( currentSlide ); + } + + // Announce the current slide contents, for screen readers + dom.statusDiv.textContent = getStatusText( currentSlide ); + + updateControls(); + updateProgress(); + updateBackground(); + updateParallax(); + updateSlideNumber(); + updateNotes(); + + // Update the URL hash + writeURL(); + + cueAutoSlide(); + + } + + /** + * Syncs the presentation with the current DOM. Useful + * when new slides or control elements are added or when + * the configuration has changed. + */ + function sync() { + + // Subscribe to input + removeEventListeners(); + addEventListeners(); + + // Force a layout to make sure the current config is accounted for + layout(); + + // Reflect the current autoSlide value + autoSlide = config.autoSlide; + + // Start auto-sliding if it's enabled + cueAutoSlide(); + + // Re-create the slide backgrounds + createBackgrounds(); + + // Write the current hash to the URL + writeURL(); + + sortAllFragments(); + + updateControls(); + updateProgress(); + updateBackground( true ); + updateSlideNumber(); + updateSlidesVisibility(); + updateNotes(); + + formatEmbeddedContent(); + startEmbeddedContent( currentSlide ); + + if( isOverview() ) { + layoutOverview(); + } + + } + + /** + * Resets all vertical slides so that only the first + * is visible. + */ + function resetVerticalSlides() { + + var horizontalSlides = toArray( dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ); + horizontalSlides.forEach( function( horizontalSlide ) { + + var verticalSlides = toArray( horizontalSlide.querySelectorAll( 'section' ) ); + verticalSlides.forEach( function( verticalSlide, y ) { + + if( y > 0 ) { + verticalSlide.classList.remove( 'present' ); + verticalSlide.classList.remove( 'past' ); + verticalSlide.classList.add( 'future' ); + verticalSlide.setAttribute( 'aria-hidden', 'true' ); + } + + } ); + + } ); + + } + + /** + * Sorts and formats all of fragments in the + * presentation. + */ + function sortAllFragments() { + + var horizontalSlides = toArray( dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ); + horizontalSlides.forEach( function( horizontalSlide ) { + + var verticalSlides = toArray( horizontalSlide.querySelectorAll( 'section' ) ); + verticalSlides.forEach( function( verticalSlide, y ) { + + sortFragments( verticalSlide.querySelectorAll( '.fragment' ) ); + + } ); + + if( verticalSlides.length === 0 ) sortFragments( horizontalSlide.querySelectorAll( '.fragment' ) ); + + } ); + + } + + /** + * Randomly shuffles all slides in the deck. + */ + function shuffle() { + + var slides = toArray( dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ); + + slides.forEach( function( slide ) { + + // Insert this slide next to another random slide. This may + // cause the slide to insert before itself but that's fine. + dom.slides.insertBefore( slide, slides[ Math.floor( Math.random() * slides.length ) ] ); + + } ); + + } + + /** + * Updates one dimension of slides by showing the slide + * with the specified index. + * + * @param {string} selector A CSS selector that will fetch + * the group of slides we are working with + * @param {number} index The index of the slide that should be + * shown + * + * @return {number} The index of the slide that is now shown, + * might differ from the passed in index if it was out of + * bounds. + */ + function updateSlides( selector, index ) { + + // Select all slides and convert the NodeList result to + // an array + var slides = toArray( dom.wrapper.querySelectorAll( selector ) ), + slidesLength = slides.length; + + var printMode = isPrintingPDF(); + + if( slidesLength ) { + + // Should the index loop? + if( config.loop ) { + index %= slidesLength; + + if( index < 0 ) { + index = slidesLength + index; + } + } + + // Enforce max and minimum index bounds + index = Math.max( Math.min( index, slidesLength - 1 ), 0 ); + + for( var i = 0; i < slidesLength; i++ ) { + var element = slides[i]; + + var reverse = config.rtl && !isVerticalSlide( element ); + + element.classList.remove( 'past' ); + element.classList.remove( 'present' ); + element.classList.remove( 'future' ); + + // http://www.w3.org/html/wg/drafts/html/master/editing.html#the-hidden-attribute + element.setAttribute( 'hidden', '' ); + element.setAttribute( 'aria-hidden', 'true' ); + + // If this element contains vertical slides + if( element.querySelector( 'section' ) ) { + element.classList.add( 'stack' ); + } + + // If we're printing static slides, all slides are "present" + if( printMode ) { + element.classList.add( 'present' ); + continue; + } + + if( i < index ) { + // Any element previous to index is given the 'past' class + element.classList.add( reverse ? 'future' : 'past' ); + + if( config.fragments ) { + var pastFragments = toArray( element.querySelectorAll( '.fragment' ) ); + + // Show all fragments on prior slides + while( pastFragments.length ) { + var pastFragment = pastFragments.pop(); + pastFragment.classList.add( 'visible' ); + pastFragment.classList.remove( 'current-fragment' ); + } + } + } + else if( i > index ) { + // Any element subsequent to index is given the 'future' class + element.classList.add( reverse ? 'past' : 'future' ); + + if( config.fragments ) { + var futureFragments = toArray( element.querySelectorAll( '.fragment.visible' ) ); + + // No fragments in future slides should be visible ahead of time + while( futureFragments.length ) { + var futureFragment = futureFragments.pop(); + futureFragment.classList.remove( 'visible' ); + futureFragment.classList.remove( 'current-fragment' ); + } + } + } + } + + // Mark the current slide as present + slides[index].classList.add( 'present' ); + slides[index].removeAttribute( 'hidden' ); + slides[index].removeAttribute( 'aria-hidden' ); + + // If this slide has a state associated with it, add it + // onto the current state of the deck + var slideState = slides[index].getAttribute( 'data-state' ); + if( slideState ) { + state = state.concat( slideState.split( ' ' ) ); + } + + } + else { + // Since there are no slides we can't be anywhere beyond the + // zeroth index + index = 0; + } + + return index; + + } + + /** + * Optimization method; hide all slides that are far away + * from the present slide. + */ + function updateSlidesVisibility() { + + // Select all slides and convert the NodeList result to + // an array + var horizontalSlides = toArray( dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ), + horizontalSlidesLength = horizontalSlides.length, + distanceX, + distanceY; + + if( horizontalSlidesLength && typeof indexh !== 'undefined' ) { + + // The number of steps away from the present slide that will + // be visible + var viewDistance = isOverview() ? 10 : config.viewDistance; + + // Limit view distance on weaker devices + if( isMobileDevice ) { + viewDistance = isOverview() ? 6 : 2; + } + + // All slides need to be visible when exporting to PDF + if( isPrintingPDF() ) { + viewDistance = Number.MAX_VALUE; + } + + for( var x = 0; x < horizontalSlidesLength; x++ ) { + var horizontalSlide = horizontalSlides[x]; + + var verticalSlides = toArray( horizontalSlide.querySelectorAll( 'section' ) ), + verticalSlidesLength = verticalSlides.length; + + // Determine how far away this slide is from the present + distanceX = Math.abs( ( indexh || 0 ) - x ) || 0; + + // If the presentation is looped, distance should measure + // 1 between the first and last slides + if( config.loop ) { + distanceX = Math.abs( ( ( indexh || 0 ) - x ) % ( horizontalSlidesLength - viewDistance ) ) || 0; + } + + // Show the horizontal slide if it's within the view distance + if( distanceX < viewDistance ) { + showSlide( horizontalSlide ); + } + else { + hideSlide( horizontalSlide ); + } + + if( verticalSlidesLength ) { + + var oy = getPreviousVerticalIndex( horizontalSlide ); + + for( var y = 0; y < verticalSlidesLength; y++ ) { + var verticalSlide = verticalSlides[y]; + + distanceY = x === ( indexh || 0 ) ? Math.abs( ( indexv || 0 ) - y ) : Math.abs( y - oy ); + + if( distanceX + distanceY < viewDistance ) { + showSlide( verticalSlide ); + } + else { + hideSlide( verticalSlide ); + } + } + + } + } + + } + + } + + /** + * Pick up notes from the current slide and display them + * to the viewer. + * + * @see {@link config.showNotes} + */ + function updateNotes() { + + if( config.showNotes && dom.speakerNotes && currentSlide && !isPrintingPDF() ) { + + dom.speakerNotes.innerHTML = getSlideNotes() || ''; + + } + + } + + /** + * Updates the progress bar to reflect the current slide. + */ + function updateProgress() { + + // Update progress if enabled + if( config.progress && dom.progressbar ) { + + dom.progressbar.style.width = getProgress() * dom.wrapper.offsetWidth + 'px'; + + } + + } + + /** + * Updates the slide number div to reflect the current slide. + * + * The following slide number formats are available: + * "h.v": horizontal . vertical slide number (default) + * "h/v": horizontal / vertical slide number + * "c": flattened slide number + * "c/t": flattened slide number / total slides + */ + function updateSlideNumber() { + + // Update slide number if enabled + if( config.slideNumber && dom.slideNumber ) { + + var value = []; + var format = 'h.v'; + + // Check if a custom number format is available + if( typeof config.slideNumber === 'string' ) { + format = config.slideNumber; + } + + switch( format ) { + case 'c': + value.push( getSlidePastCount() + 1 ); + break; + case 'c/t': + value.push( getSlidePastCount() + 1, '/', getTotalSlides() ); + break; + case 'h/v': + value.push( indexh + 1 ); + if( isVerticalSlide() ) value.push( '/', indexv + 1 ); + break; + default: + value.push( indexh + 1 ); + if( isVerticalSlide() ) value.push( '.', indexv + 1 ); + } + + dom.slideNumber.innerHTML = formatSlideNumber( value[0], value[1], value[2] ); + } + + } + + /** + * Applies HTML formatting to a slide number before it's + * written to the DOM. + * + * @param {number} a Current slide + * @param {string} delimiter Character to separate slide numbers + * @param {(number|*)} b Total slides + * @return {string} HTML string fragment + */ + function formatSlideNumber( a, delimiter, b ) { + + if( typeof b === 'number' && !isNaN( b ) ) { + return ''+ a +'' + + ''+ delimiter +'' + + ''+ b +''; + } + else { + return ''+ a +''; + } + + } + + /** + * Updates the state of all control/navigation arrows. + */ + function updateControls() { + + var routes = availableRoutes(); + var fragments = availableFragments(); + + // Remove the 'enabled' class from all directions + dom.controlsLeft.concat( dom.controlsRight ) + .concat( dom.controlsUp ) + .concat( dom.controlsDown ) + .concat( dom.controlsPrev ) + .concat( dom.controlsNext ).forEach( function( node ) { + node.classList.remove( 'enabled' ); + node.classList.remove( 'fragmented' ); + + // Set 'disabled' attribute on all directions + node.setAttribute( 'disabled', 'disabled' ); + } ); + + // Add the 'enabled' class to the available routes; remove 'disabled' attribute to enable buttons + if( routes.left ) dom.controlsLeft.forEach( function( el ) { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } ); + if( routes.right ) dom.controlsRight.forEach( function( el ) { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } ); + if( routes.up ) dom.controlsUp.forEach( function( el ) { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } ); + if( routes.down ) dom.controlsDown.forEach( function( el ) { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } ); + + // Prev/next buttons + if( routes.left || routes.up ) dom.controlsPrev.forEach( function( el ) { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } ); + if( routes.right || routes.down ) dom.controlsNext.forEach( function( el ) { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } ); + + // Highlight fragment directions + if( currentSlide ) { + + // Always apply fragment decorator to prev/next buttons + if( fragments.prev ) dom.controlsPrev.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } ); + if( fragments.next ) dom.controlsNext.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } ); + + // Apply fragment decorators to directional buttons based on + // what slide axis they are in + if( isVerticalSlide( currentSlide ) ) { + if( fragments.prev ) dom.controlsUp.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } ); + if( fragments.next ) dom.controlsDown.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } ); + } + else { + if( fragments.prev ) dom.controlsLeft.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } ); + if( fragments.next ) dom.controlsRight.forEach( function( el ) { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } ); + } + + } + + } + + /** + * Updates the background elements to reflect the current + * slide. + * + * @param {boolean} includeAll If true, the backgrounds of + * all vertical slides (not just the present) will be updated. + */ + function updateBackground( includeAll ) { + + var currentBackground = null; + + // Reverse past/future classes when in RTL mode + var horizontalPast = config.rtl ? 'future' : 'past', + horizontalFuture = config.rtl ? 'past' : 'future'; + + // Update the classes of all backgrounds to match the + // states of their slides (past/present/future) + toArray( dom.background.childNodes ).forEach( function( backgroundh, h ) { + + backgroundh.classList.remove( 'past' ); + backgroundh.classList.remove( 'present' ); + backgroundh.classList.remove( 'future' ); + + if( h < indexh ) { + backgroundh.classList.add( horizontalPast ); + } + else if ( h > indexh ) { + backgroundh.classList.add( horizontalFuture ); + } + else { + backgroundh.classList.add( 'present' ); + + // Store a reference to the current background element + currentBackground = backgroundh; + } + + if( includeAll || h === indexh ) { + toArray( backgroundh.querySelectorAll( '.slide-background' ) ).forEach( function( backgroundv, v ) { + + backgroundv.classList.remove( 'past' ); + backgroundv.classList.remove( 'present' ); + backgroundv.classList.remove( 'future' ); + + if( v < indexv ) { + backgroundv.classList.add( 'past' ); + } + else if ( v > indexv ) { + backgroundv.classList.add( 'future' ); + } + else { + backgroundv.classList.add( 'present' ); + + // Only if this is the present horizontal and vertical slide + if( h === indexh ) currentBackground = backgroundv; + } + + } ); + } + + } ); + + // Stop any currently playing video background + if( previousBackground ) { + + var previousVideo = previousBackground.querySelector( 'video' ); + if( previousVideo ) previousVideo.pause(); + + } + + if( currentBackground ) { + + // Start video playback + var currentVideo = currentBackground.querySelector( 'video' ); + if( currentVideo ) { + + var startVideo = function() { + currentVideo.currentTime = 0; + currentVideo.play(); + currentVideo.removeEventListener( 'loadeddata', startVideo ); + }; + + if( currentVideo.readyState > 1 ) { + startVideo(); + } + else { + currentVideo.addEventListener( 'loadeddata', startVideo ); + } + + } + + var backgroundImageURL = currentBackground.style.backgroundImage || ''; + + // Restart GIFs (doesn't work in Firefox) + if( /\.gif/i.test( backgroundImageURL ) ) { + currentBackground.style.backgroundImage = ''; + window.getComputedStyle( currentBackground ).opacity; + currentBackground.style.backgroundImage = backgroundImageURL; + } + + // Don't transition between identical backgrounds. This + // prevents unwanted flicker. + var previousBackgroundHash = previousBackground ? previousBackground.getAttribute( 'data-background-hash' ) : null; + var currentBackgroundHash = currentBackground.getAttribute( 'data-background-hash' ); + if( currentBackgroundHash && currentBackgroundHash === previousBackgroundHash && currentBackground !== previousBackground ) { + dom.background.classList.add( 'no-transition' ); + } + + previousBackground = currentBackground; + + } + + // If there's a background brightness flag for this slide, + // bubble it to the .reveal container + if( currentSlide ) { + [ 'has-light-background', 'has-dark-background' ].forEach( function( classToBubble ) { + if( currentSlide.classList.contains( classToBubble ) ) { + dom.wrapper.classList.add( classToBubble ); + } + else { + dom.wrapper.classList.remove( classToBubble ); + } + } ); + } + + // Allow the first background to apply without transition + setTimeout( function() { + dom.background.classList.remove( 'no-transition' ); + }, 1 ); + + } + + /** + * Updates the position of the parallax background based + * on the current slide index. + */ + function updateParallax() { + + if( config.parallaxBackgroundImage ) { + + var horizontalSlides = dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ), + verticalSlides = dom.wrapper.querySelectorAll( VERTICAL_SLIDES_SELECTOR ); + + var backgroundSize = dom.background.style.backgroundSize.split( ' ' ), + backgroundWidth, backgroundHeight; + + if( backgroundSize.length === 1 ) { + backgroundWidth = backgroundHeight = parseInt( backgroundSize[0], 10 ); + } + else { + backgroundWidth = parseInt( backgroundSize[0], 10 ); + backgroundHeight = parseInt( backgroundSize[1], 10 ); + } + + var slideWidth = dom.background.offsetWidth, + horizontalSlideCount = horizontalSlides.length, + horizontalOffsetMultiplier, + horizontalOffset; + + if( typeof config.parallaxBackgroundHorizontal === 'number' ) { + horizontalOffsetMultiplier = config.parallaxBackgroundHorizontal; + } + else { + horizontalOffsetMultiplier = horizontalSlideCount > 1 ? ( backgroundWidth - slideWidth ) / ( horizontalSlideCount-1 ) : 0; + } + + horizontalOffset = horizontalOffsetMultiplier * indexh * -1; + + var slideHeight = dom.background.offsetHeight, + verticalSlideCount = verticalSlides.length, + verticalOffsetMultiplier, + verticalOffset; + + if( typeof config.parallaxBackgroundVertical === 'number' ) { + verticalOffsetMultiplier = config.parallaxBackgroundVertical; + } + else { + verticalOffsetMultiplier = ( backgroundHeight - slideHeight ) / ( verticalSlideCount-1 ); + } + + verticalOffset = verticalSlideCount > 0 ? verticalOffsetMultiplier * indexv : 0; + + dom.background.style.backgroundPosition = horizontalOffset + 'px ' + -verticalOffset + 'px'; + + } + + } + + /** + * Called when the given slide is within the configured view + * distance. Shows the slide element and loads any content + * that is set to load lazily (data-src). + * + * @param {HTMLElement} slide Slide to show + */ + function showSlide( slide ) { + + // Show the slide element + slide.style.display = 'block'; + + // Media elements with data-src attributes + toArray( slide.querySelectorAll( 'img[data-src], video[data-src], audio[data-src]' ) ).forEach( function( element ) { + element.setAttribute( 'src', element.getAttribute( 'data-src' ) ); + element.removeAttribute( 'data-src' ); + } ); + + // Media elements with children + toArray( slide.querySelectorAll( 'video, audio' ) ).forEach( function( media ) { + var sources = 0; + + toArray( media.querySelectorAll( 'source[data-src]' ) ).forEach( function( source ) { + source.setAttribute( 'src', source.getAttribute( 'data-src' ) ); + source.removeAttribute( 'data-src' ); + sources += 1; + } ); + + // If we rewrote sources for this video/audio element, we need + // to manually tell it to load from its new origin + if( sources > 0 ) { + media.load(); + } + } ); + + + // Show the corresponding background element + var indices = getIndices( slide ); + var background = getSlideBackground( indices.h, indices.v ); + if( background ) { + background.style.display = 'block'; + + // If the background contains media, load it + if( background.hasAttribute( 'data-loaded' ) === false ) { + background.setAttribute( 'data-loaded', 'true' ); + + var backgroundImage = slide.getAttribute( 'data-background-image' ), + backgroundVideo = slide.getAttribute( 'data-background-video' ), + backgroundVideoLoop = slide.hasAttribute( 'data-background-video-loop' ), + backgroundVideoMuted = slide.hasAttribute( 'data-background-video-muted' ), + backgroundIframe = slide.getAttribute( 'data-background-iframe' ); + + // Images + if( backgroundImage ) { + background.style.backgroundImage = 'url('+ backgroundImage +')'; + } + // Videos + else if ( backgroundVideo && !isSpeakerNotes() ) { + var video = document.createElement( 'video' ); + + if( backgroundVideoLoop ) { + video.setAttribute( 'loop', '' ); + } + + if( backgroundVideoMuted ) { + video.muted = true; + } + + // Support comma separated lists of video sources + backgroundVideo.split( ',' ).forEach( function( source ) { + video.innerHTML += ''; + } ); + + background.appendChild( video ); + } + // Iframes + else if( backgroundIframe ) { + var iframe = document.createElement( 'iframe' ); + iframe.setAttribute( 'src', backgroundIframe ); + iframe.style.width = '100%'; + iframe.style.height = '100%'; + iframe.style.maxHeight = '100%'; + iframe.style.maxWidth = '100%'; + + background.appendChild( iframe ); + } + } + } + + } + + /** + * Called when the given slide is moved outside of the + * configured view distance. + * + * @param {HTMLElement} slide + */ + function hideSlide( slide ) { + + // Hide the slide element + slide.style.display = 'none'; + + // Hide the corresponding background element + var indices = getIndices( slide ); + var background = getSlideBackground( indices.h, indices.v ); + if( background ) { + background.style.display = 'none'; + } + + } + + /** + * Determine what available routes there are for navigation. + * + * @return {{left: boolean, right: boolean, up: boolean, down: boolean}} + */ + function availableRoutes() { + + var horizontalSlides = dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ), + verticalSlides = dom.wrapper.querySelectorAll( VERTICAL_SLIDES_SELECTOR ); + + var routes = { + left: indexh > 0 || config.loop, + right: indexh < horizontalSlides.length - 1 || config.loop, + up: indexv > 0, + down: indexv < verticalSlides.length - 1 + }; + + // reverse horizontal controls for rtl + if( config.rtl ) { + var left = routes.left; + routes.left = routes.right; + routes.right = left; + } + + return routes; + + } + + /** + * Returns an object describing the available fragment + * directions. + * + * @return {{prev: boolean, next: boolean}} + */ + function availableFragments() { + + if( currentSlide && config.fragments ) { + var fragments = currentSlide.querySelectorAll( '.fragment' ); + var hiddenFragments = currentSlide.querySelectorAll( '.fragment:not(.visible)' ); + + return { + prev: fragments.length - hiddenFragments.length > 0, + next: !!hiddenFragments.length + }; + } + else { + return { prev: false, next: false }; + } + + } + + /** + * Enforces origin-specific format rules for embedded media. + */ + function formatEmbeddedContent() { + + var _appendParamToIframeSource = function( sourceAttribute, sourceURL, param ) { + toArray( dom.slides.querySelectorAll( 'iframe['+ sourceAttribute +'*="'+ sourceURL +'"]' ) ).forEach( function( el ) { + var src = el.getAttribute( sourceAttribute ); + if( src && src.indexOf( param ) === -1 ) { + el.setAttribute( sourceAttribute, src + ( !/\?/.test( src ) ? '?' : '&' ) + param ); + } + }); + }; + + // YouTube frames must include "?enablejsapi=1" + _appendParamToIframeSource( 'src', 'youtube.com/embed/', 'enablejsapi=1' ); + _appendParamToIframeSource( 'data-src', 'youtube.com/embed/', 'enablejsapi=1' ); + + // Vimeo frames must include "?api=1" + _appendParamToIframeSource( 'src', 'player.vimeo.com/', 'api=1' ); + _appendParamToIframeSource( 'data-src', 'player.vimeo.com/', 'api=1' ); + + } + + /** + * Start playback of any embedded content inside of + * the given element. + * + * @param {HTMLElement} slide + */ + function startEmbeddedContent( element ) { + + if( element && !isSpeakerNotes() ) { + // Restart GIFs + toArray( element.querySelectorAll( 'img[src$=".gif"]' ) ).forEach( function( el ) { + // Setting the same unchanged source like this was confirmed + // to work in Chrome, FF & Safari + el.setAttribute( 'src', el.getAttribute( 'src' ) ); + } ); + + // HTML5 media elements + toArray( element.querySelectorAll( 'video, audio' ) ).forEach( function( el ) { + if( closestParent( el, '.fragment' ) && !closestParent( el, '.fragment.visible' ) ) { + return; + } + + if( el.hasAttribute( 'data-autoplay' ) && typeof el.play === 'function' ) { + el.play(); + } + } ); + + // Normal iframes + toArray( element.querySelectorAll( 'iframe[src]' ) ).forEach( function( el ) { + if( closestParent( el, '.fragment' ) && !closestParent( el, '.fragment.visible' ) ) { + return; + } + + startEmbeddedIframe( { target: el } ); + } ); + + // Lazy loading iframes + toArray( element.querySelectorAll( 'iframe[data-src]' ) ).forEach( function( el ) { + if( closestParent( el, '.fragment' ) && !closestParent( el, '.fragment.visible' ) ) { + return; + } + + if( el.getAttribute( 'src' ) !== el.getAttribute( 'data-src' ) ) { + el.removeEventListener( 'load', startEmbeddedIframe ); // remove first to avoid dupes + el.addEventListener( 'load', startEmbeddedIframe ); + el.setAttribute( 'src', el.getAttribute( 'data-src' ) ); + } + } ); + } + + } + + /** + * "Starts" the content of an embedded iframe using the + * postMessage API. + * + * @param {object} event - postMessage API event + */ + function startEmbeddedIframe( event ) { + + var iframe = event.target; + + if( iframe && iframe.contentWindow ) { + + // YouTube postMessage API + if( /youtube\.com\/embed\//.test( iframe.getAttribute( 'src' ) ) && iframe.hasAttribute( 'data-autoplay' ) ) { + iframe.contentWindow.postMessage( '{"event":"command","func":"playVideo","args":""}', '*' ); + } + // Vimeo postMessage API + else if( /player\.vimeo\.com\//.test( iframe.getAttribute( 'src' ) ) && iframe.hasAttribute( 'data-autoplay' ) ) { + iframe.contentWindow.postMessage( '{"method":"play"}', '*' ); + } + // Generic postMessage API + else { + iframe.contentWindow.postMessage( 'slide:start', '*' ); + } + + } + + } + + /** + * Stop playback of any embedded content inside of + * the targeted slide. + * + * @param {HTMLElement} slide + */ + function stopEmbeddedContent( slide ) { + + if( slide && slide.parentNode ) { + // HTML5 media elements + toArray( slide.querySelectorAll( 'video, audio' ) ).forEach( function( el ) { + if( !el.hasAttribute( 'data-ignore' ) && typeof el.pause === 'function' ) { + el.pause(); + } + } ); + + // Generic postMessage API for non-lazy loaded iframes + toArray( slide.querySelectorAll( 'iframe' ) ).forEach( function( el ) { + el.contentWindow.postMessage( 'slide:stop', '*' ); + el.removeEventListener( 'load', startEmbeddedIframe ); + }); + + // YouTube postMessage API + toArray( slide.querySelectorAll( 'iframe[src*="youtube.com/embed/"]' ) ).forEach( function( el ) { + if( !el.hasAttribute( 'data-ignore' ) && typeof el.contentWindow.postMessage === 'function' ) { + el.contentWindow.postMessage( '{"event":"command","func":"pauseVideo","args":""}', '*' ); + } + }); + + // Vimeo postMessage API + toArray( slide.querySelectorAll( 'iframe[src*="player.vimeo.com/"]' ) ).forEach( function( el ) { + if( !el.hasAttribute( 'data-ignore' ) && typeof el.contentWindow.postMessage === 'function' ) { + el.contentWindow.postMessage( '{"method":"pause"}', '*' ); + } + }); + + // Lazy loading iframes + toArray( slide.querySelectorAll( 'iframe[data-src]' ) ).forEach( function( el ) { + // Only removing the src doesn't actually unload the frame + // in all browsers (Firefox) so we set it to blank first + el.setAttribute( 'src', 'about:blank' ); + el.removeAttribute( 'src' ); + } ); + } + + } + + /** + * Returns the number of past slides. This can be used as a global + * flattened index for slides. + * + * @return {number} Past slide count + */ + function getSlidePastCount() { + + var horizontalSlides = toArray( dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ); + + // The number of past slides + var pastCount = 0; + + // Step through all slides and count the past ones + mainLoop: for( var i = 0; i < horizontalSlides.length; i++ ) { + + var horizontalSlide = horizontalSlides[i]; + var verticalSlides = toArray( horizontalSlide.querySelectorAll( 'section' ) ); + + for( var j = 0; j < verticalSlides.length; j++ ) { + + // Stop as soon as we arrive at the present + if( verticalSlides[j].classList.contains( 'present' ) ) { + break mainLoop; + } + + pastCount++; + + } + + // Stop as soon as we arrive at the present + if( horizontalSlide.classList.contains( 'present' ) ) { + break; + } + + // Don't count the wrapping section for vertical slides + if( horizontalSlide.classList.contains( 'stack' ) === false ) { + pastCount++; + } + + } + + return pastCount; + + } + + /** + * Returns a value ranging from 0-1 that represents + * how far into the presentation we have navigated. + * + * @return {number} + */ + function getProgress() { + + // The number of past and total slides + var totalCount = getTotalSlides(); + var pastCount = getSlidePastCount(); + + if( currentSlide ) { + + var allFragments = currentSlide.querySelectorAll( '.fragment' ); + + // If there are fragments in the current slide those should be + // accounted for in the progress. + if( allFragments.length > 0 ) { + var visibleFragments = currentSlide.querySelectorAll( '.fragment.visible' ); + + // This value represents how big a portion of the slide progress + // that is made up by its fragments (0-1) + var fragmentWeight = 0.9; + + // Add fragment progress to the past slide count + pastCount += ( visibleFragments.length / allFragments.length ) * fragmentWeight; + } + + } + + return pastCount / ( totalCount - 1 ); + + } + + /** + * Checks if this presentation is running inside of the + * speaker notes window. + * + * @return {boolean} + */ + function isSpeakerNotes() { + + return !!window.location.search.match( /receiver/gi ); + + } + + /** + * Reads the current URL (hash) and navigates accordingly. + */ + function readURL() { + + var hash = window.location.hash; + + // Attempt to parse the hash as either an index or name + var bits = hash.slice( 2 ).split( '/' ), + name = hash.replace( /#|\//gi, '' ); + + // If the first bit is invalid and there is a name we can + // assume that this is a named link + if( isNaN( parseInt( bits[0], 10 ) ) && name.length ) { + var element; + + // Ensure the named link is a valid HTML ID attribute + if( /^[a-zA-Z][\w:.-]*$/.test( name ) ) { + // Find the slide with the specified ID + element = document.getElementById( name ); + } + + if( element ) { + // Find the position of the named slide and navigate to it + var indices = Reveal.getIndices( element ); + slide( indices.h, indices.v ); + } + // If the slide doesn't exist, navigate to the current slide + else { + slide( indexh || 0, indexv || 0 ); + } + } + else { + // Read the index components of the hash + var h = parseInt( bits[0], 10 ) || 0, + v = parseInt( bits[1], 10 ) || 0; + + if( h !== indexh || v !== indexv ) { + slide( h, v ); + } + } + + } + + /** + * Updates the page URL (hash) to reflect the current + * state. + * + * @param {number} delay The time in ms to wait before + * writing the hash + */ + function writeURL( delay ) { + + if( config.history ) { + + // Make sure there's never more than one timeout running + clearTimeout( writeURLTimeout ); + + // If a delay is specified, timeout this call + if( typeof delay === 'number' ) { + writeURLTimeout = setTimeout( writeURL, delay ); + } + else if( currentSlide ) { + var url = '/'; + + // Attempt to create a named link based on the slide's ID + var id = currentSlide.getAttribute( 'id' ); + if( id ) { + id = id.replace( /[^a-zA-Z0-9\-\_\:\.]/g, '' ); + } + + // If the current slide has an ID, use that as a named link + if( typeof id === 'string' && id.length ) { + url = '/' + id; + } + // Otherwise use the /h/v index + else { + if( indexh > 0 || indexv > 0 ) url += indexh; + if( indexv > 0 ) url += '/' + indexv; + } + + window.location.hash = url; + } + } + + } + /** + * Retrieves the h/v location and fragment of the current, + * or specified, slide. + * + * @param {HTMLElement} [slide] If specified, the returned + * index will be for this slide rather than the currently + * active one + * + * @return {{h: number, v: number, f: number}} + */ + function getIndices( slide ) { + + // By default, return the current indices + var h = indexh, + v = indexv, + f; + + // If a slide is specified, return the indices of that slide + if( slide ) { + var isVertical = isVerticalSlide( slide ); + var slideh = isVertical ? slide.parentNode : slide; + + // Select all horizontal slides + var horizontalSlides = toArray( dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR ) ); + + // Now that we know which the horizontal slide is, get its index + h = Math.max( horizontalSlides.indexOf( slideh ), 0 ); + + // Assume we're not vertical + v = undefined; + + // If this is a vertical slide, grab the vertical index + if( isVertical ) { + v = Math.max( toArray( slide.parentNode.querySelectorAll( 'section' ) ).indexOf( slide ), 0 ); + } + } + + if( !slide && currentSlide ) { + var hasFragments = currentSlide.querySelectorAll( '.fragment' ).length > 0; + if( hasFragments ) { + var currentFragment = currentSlide.querySelector( '.current-fragment' ); + if( currentFragment && currentFragment.hasAttribute( 'data-fragment-index' ) ) { + f = parseInt( currentFragment.getAttribute( 'data-fragment-index' ), 10 ); + } + else { + f = currentSlide.querySelectorAll( '.fragment.visible' ).length - 1; + } + } + } + + return { h: h, v: v, f: f }; + + } + + /** + * Retrieves the total number of slides in this presentation. + * + * @return {number} + */ + function getTotalSlides() { + + return dom.wrapper.querySelectorAll( SLIDES_SELECTOR + ':not(.stack)' ).length; + + } + + /** + * Returns the slide element matching the specified index. + * + * @return {HTMLElement} + */ + function getSlide( x, y ) { + + var horizontalSlide = dom.wrapper.querySelectorAll( HORIZONTAL_SLIDES_SELECTOR )[ x ]; + var verticalSlides = horizontalSlide && horizontalSlide.querySelectorAll( 'section' ); + + if( verticalSlides && verticalSlides.length && typeof y === 'number' ) { + return verticalSlides ? verticalSlides[ y ] : undefined; + } + + return horizontalSlide; + + } + + /** + * Returns the background element for the given slide. + * All slides, even the ones with no background properties + * defined, have a background element so as long as the + * index is valid an element will be returned. + * + * @param {number} x Horizontal background index + * @param {number} y Vertical background index + * @return {(HTMLElement[]|*)} + */ + function getSlideBackground( x, y ) { + + // When printing to PDF the slide backgrounds are nested + // inside of the slides + if( isPrintingPDF() ) { + var slide = getSlide( x, y ); + if( slide ) { + return slide.slideBackgroundElement; + } + + return undefined; + } + + var horizontalBackground = dom.wrapper.querySelectorAll( '.backgrounds>.slide-background' )[ x ]; + var verticalBackgrounds = horizontalBackground && horizontalBackground.querySelectorAll( '.slide-background' ); + + if( verticalBackgrounds && verticalBackgrounds.length && typeof y === 'number' ) { + return verticalBackgrounds ? verticalBackgrounds[ y ] : undefined; + } + + return horizontalBackground; + + } + + /** + * Retrieves the speaker notes from a slide. Notes can be + * defined in two ways: + * 1. As a data-notes attribute on the slide
    + * 2. As an
    + + + + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/page_first.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/page_first.html new file mode 100644 index 0000000..eb54703 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/page_first.html @@ -0,0 +1,55 @@ + + + + + + + Ansible Essentials Workshop + + + + + + + + + + + + + + + + + + + +
    + + + + + + +
    +
    +
    +
    + +

    +

    +

    +

    +
    diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/plugin/index.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/plugin/index.php new file mode 100644 index 0000000..82677dc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/plugin/index.php @@ -0,0 +1,3 @@ + + + + + + reveal.js - Slide Notes + + + + + + +
    +
    Upcoming
    +
    +
    +

    Time Click to Reset

    +
    + 0:00 AM +
    +
    + 00:00:00 +
    +
    +
    + + +
    +
    + + +
    + + + + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/plugin/notes/notes.js b/roles/lightbulb-ansiblered-deck/files/deck-ansible/plugin/notes/notes.js new file mode 100644 index 0000000..46bf5de --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/plugin/notes/notes.js @@ -0,0 +1,145 @@ +/** + * Handles opening of and synchronization with the reveal.js + * notes window. + * + * Handshake process: + * 1. This window posts 'connect' to notes window + * - Includes URL of presentation to show + * 2. Notes window responds with 'connected' when it is available + * 3. This window proceeds to send the current presentation state + * to the notes window + */ +var RevealNotes = (function() { + + function openNotes( notesFilePath ) { + + if( !notesFilePath ) { + var jsFileLocation = document.querySelector('script[src$="notes.js"]').src; // this js file path + jsFileLocation = jsFileLocation.replace(/notes\.js(\?.*)?$/, ''); // the js folder path + notesFilePath = jsFileLocation + 'notes.html'; + } + + var notesPopup = window.open( notesFilePath, 'reveal.js - Notes', 'width=1100,height=700' ); + + /** + * Connect to the notes window through a postmessage handshake. + * Using postmessage enables us to work in situations where the + * origins differ, such as a presentation being opened from the + * file system. + */ + function connect() { + // Keep trying to connect until we get a 'connected' message back + var connectInterval = setInterval( function() { + notesPopup.postMessage( JSON.stringify( { + namespace: 'reveal-notes', + type: 'connect', + url: window.location.protocol + '//' + window.location.host + window.location.pathname + window.location.search, + state: Reveal.getState() + } ), '*' ); + }, 500 ); + + window.addEventListener( 'message', function( event ) { + var data = JSON.parse( event.data ); + if( data && data.namespace === 'reveal-notes' && data.type === 'connected' ) { + clearInterval( connectInterval ); + onConnected(); + } + } ); + } + + /** + * Posts the current slide data to the notes window + */ + function post(event) { + + var slideElement = Reveal.getCurrentSlide(), + notesElement = slideElement.querySelector( 'aside.notes' ); + + var messageData = { + namespace: 'reveal-notes', + type: 'state', + notes: '', + markdown: false, + whitespace: 'normal', + state: Reveal.getState() + }; + + // Look for notes defined in a fragment, if it is a fragmentshown event + if (event && event.hasOwnProperty('fragment')) { + var innerNotes = event.fragment.querySelector( 'aside.notes' ); + + if ( innerNotes) { + notesElement = innerNotes; + } + } + + // Look for notes defined in a slide attribute + if( slideElement.hasAttribute( 'data-notes' ) ) { + messageData.notes = slideElement.getAttribute( 'data-notes' ); + messageData.whitespace = 'pre-wrap'; + } + + // Look for notes defined in an aside element + if( notesElement ) { + messageData.notes = notesElement.innerHTML; + messageData.markdown = typeof notesElement.getAttribute( 'data-markdown' ) === 'string'; + } + + notesPopup.postMessage( JSON.stringify( messageData ), '*' ); + + } + + /** + * Called once we have established a connection to the notes + * window. + */ + function onConnected() { + + // Monitor events that trigger a change in state + Reveal.addEventListener( 'slidechanged', post ); + Reveal.addEventListener( 'fragmentshown', post ); + Reveal.addEventListener( 'fragmenthidden', post ); + Reveal.addEventListener( 'overviewhidden', post ); + Reveal.addEventListener( 'overviewshown', post ); + Reveal.addEventListener( 'paused', post ); + Reveal.addEventListener( 'resumed', post ); + + // Post the initial state + post(); + + } + + connect(); + + } + + if( !/receiver/i.test( window.location.search ) ) { + + // If the there's a 'notes' query set, open directly + if( window.location.search.match( /(\?|\&)notes/gi ) !== null ) { + openNotes(); + } + + // Open the notes when the 's' key is hit + document.addEventListener( 'keydown', function( event ) { + // Disregard the event if the target is editable or a + // modifier is present + if ( document.querySelector( ':focus' ) !== null || event.shiftKey || event.altKey || event.ctrlKey || event.metaKey ) return; + + // Disregard the event if keyboard is disabled + if ( Reveal.getConfig().keyboard === false ) return; + + if( event.keyCode === 83 ) { + event.preventDefault(); + openNotes(); + } + }, false ); + + // Show our keyboard shortcut in the reveal.js help overlay + if( window.Reveal ) Reveal.registerKeyboardShortcut( 'S', 'Speaker notes view' ); + + } + + return { open: openNotes }; + +})(); diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/default.prefs.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/default.prefs.php new file mode 100644 index 0000000..b9ecd8b --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/default.prefs.php @@ -0,0 +1,11 @@ + + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/index.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/index.php new file mode 100644 index 0000000..82677dc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/index.php @@ -0,0 +1,3 @@ + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/lee.prefs.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/lee.prefs.php new file mode 100644 index 0000000..9224844 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/lee.prefs.php @@ -0,0 +1,10 @@ + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/med.prefs.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/med.prefs.php new file mode 100644 index 0000000..7ced860 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/med.prefs.php @@ -0,0 +1,10 @@ + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/msavage.prefs.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/msavage.prefs.php new file mode 100644 index 0000000..a2a7969 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/msavage.prefs.php @@ -0,0 +1,12 @@ + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/mstonge.prefs.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/mstonge.prefs.php new file mode 100644 index 0000000..30a1fa5 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/mstonge.prefs.php @@ -0,0 +1,12 @@ + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/phil.prefs.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/phil.prefs.php new file mode 100644 index 0000000..6b672bc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/phil.prefs.php @@ -0,0 +1,10 @@ + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/shadd.prefs.php b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/shadd.prefs.php new file mode 100644 index 0000000..eef9c11 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/prefs/shadd.prefs.php @@ -0,0 +1,10 @@ + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/sponsor.html b/roles/lightbulb-ansiblered-deck/files/deck-ansible/sponsor.html new file mode 100644 index 0000000..00052a1 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/files/deck-ansible/sponsor.html @@ -0,0 +1,10 @@ +
    + +

    Why I Love Kevin Holmes

    +

    Without Kev, my life would be boring.

    +

    I would like to take just a few minutes to talk about Kevin and how wonderful of a friend he is.

    +

    GoKEV.com           YouTube/GoKEV

    +
    + diff --git a/roles/lightbulb-ansiblered-deck/files/deck-ansible/video/yuminstallansible.mp4 b/roles/lightbulb-ansiblered-deck/files/deck-ansible/video/yuminstallansible.mp4 new file mode 100644 index 0000000..c297944 Binary files /dev/null and b/roles/lightbulb-ansiblered-deck/files/deck-ansible/video/yuminstallansible.mp4 differ diff --git a/roles/lightbulb-ansiblered-deck/handlers/main.yml b/roles/lightbulb-ansiblered-deck/handlers/main.yml new file mode 100644 index 0000000..2334665 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for ansiblered-deck-ansible \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/meta/.galaxy_install_info b/roles/lightbulb-ansiblered-deck/meta/.galaxy_install_info new file mode 100644 index 0000000..7a615dc --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Wed Jun 24 18:44:30 2020 +version: '' diff --git a/roles/lightbulb-ansiblered-deck/meta/main.yml b/roles/lightbulb-ansiblered-deck/meta/main.yml new file mode 100644 index 0000000..7223799 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/meta/main.yml @@ -0,0 +1,57 @@ +galaxy_info: + author: your name + description: your description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + + min_ansible_version: 1.2 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If Travis integration is configured, only notifications for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + + # + # platforms is a list of platforms, and each platform has a name and a list of versions. + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/tasks/main.yml b/roles/lightbulb-ansiblered-deck/tasks/main.yml new file mode 100644 index 0000000..065c473 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/tasks/main.yml @@ -0,0 +1,80 @@ +--- +# tasks file for ansiblered-deck-ansible--- +- name: Assure PHP is installed for deck and dynamic content + yum: + name: php + state: latest + +- name: Assure our preferences files directory exists + file: + path: "{{ workshop_web_path }}/deck-ansible/prefs/" + state: directory + +- name: deploy the PHP default preferences file template for the slide deck + template: + src: templates/deck-ansible_prefs_default.prefs.php + dest: "{{ workshop_web_path }}/deck-ansible/prefs/default.prefs.php" + mode: 0644 + backup: yes + +- name: synchronize html basics deck + synchronize: + src: files/deck-ansible/ + dest: "{{ workshop_web_path }}/deck-ansible/" + delete: yes + recursive: yes + rsync_opts: + - "--no-motd" + - "--exclude=prefs/default.prefs.php" + +### Ansible SELinux module does not appear to work recursively so we use ugly raw +- name: "Set selinux context recursively httpd_sys_rw_content_t {{ workshop_web_path }}" + shell: "chcon -R -t httpd_sys_rw_content_t {{ workshop_web_path }}" + changed_when: no + failed_when: no + + +### The optional PHP redirect will go in the {{ workshop_web_path }} path and redirect to the PHP deck dir with tag: phpredirect +- name: Copy index.php to root dir + template: + src: templates/index_redirect.php + dest: "{{ workshop_web_path }}/index.php" + owner: root + group: wheel + mode: 0644 + tags: [ 'never', 'phpredirect' ] + +### The optional PHP daemon to run this without apache will only install explicityly with tag: phpdaemon +- name: Copy PHP systemd service file + template: + src: templates/ansible-php-deck.service + dest: /usr/lib/systemd/system/ansible-php-deck.service + owner: root + group: wheel + mode: 0644 + tags: [ 'never', 'phpdaemon' ] + +- name: Copy PHP script file to launch daemon + template: + src: templates/ansible-php-deck.sh + dest: /root/ansible-php-deck.sh + owner: root + group: wheel + mode: 0755 + tags: [ 'never', 'phpdaemon' ] + +- name: Start service ansible-php-deck, if not started + service: + name: ansible-php-deck + state: started + tags: [ 'never', 'phpdaemon' ] + +- name: Enable service ansible-php-deck, if not enabled + service: + name: ansible-php-deck + enabled: yes + tags: [ 'never', 'phpdaemon' ] + + + + diff --git a/roles/lightbulb-ansiblered-deck/templates/ansible-php-deck.service b/roles/lightbulb-ansiblered-deck/templates/ansible-php-deck.service new file mode 100644 index 0000000..5d87c8b --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/templates/ansible-php-deck.service @@ -0,0 +1,9 @@ +[Unit] +Description=PHP daemon to serve the Ansible deck + +[Service] +ExecStart=/root/ansible-php-deck.sh + +[Install] +WantedBy=multi-user.target + diff --git a/roles/lightbulb-ansiblered-deck/templates/ansible-php-deck.sh b/roles/lightbulb-ansiblered-deck/templates/ansible-php-deck.sh new file mode 100644 index 0000000..a38e3b7 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/templates/ansible-php-deck.sh @@ -0,0 +1,3 @@ +#!/bin/sh +sudo /bin/php -S 0.0.0.0:{{ php_port }} -t {{ workshop_web_path }}/deck-ansible/ > /var/log/php_deck.log 2>&1 + diff --git a/roles/lightbulb-ansiblered-deck/templates/deck-ansible_prefs_default.prefs.php b/roles/lightbulb-ansiblered-deck/templates/deck-ansible_prefs_default.prefs.php new file mode 100644 index 0000000..2b20dfb --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/templates/deck-ansible_prefs_default.prefs.php @@ -0,0 +1,26 @@ + + diff --git a/roles/lightbulb-ansiblered-deck/templates/index_redirect.php b/roles/lightbulb-ansiblered-deck/templates/index_redirect.php new file mode 100644 index 0000000..5f86d52 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/templates/index_redirect.php @@ -0,0 +1,5 @@ + diff --git a/roles/lightbulb-ansiblered-deck/tests/inventory b/roles/lightbulb-ansiblered-deck/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/roles/lightbulb-ansiblered-deck/tests/test.yml b/roles/lightbulb-ansiblered-deck/tests/test.yml new file mode 100644 index 0000000..87233ce --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - ansiblered-deck-ansible \ No newline at end of file diff --git a/roles/lightbulb-ansiblered-deck/vars/main.yml b/roles/lightbulb-ansiblered-deck/vars/main.yml new file mode 100644 index 0000000..a227267 --- /dev/null +++ b/roles/lightbulb-ansiblered-deck/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ansiblered-deck-ansible \ No newline at end of file diff --git a/roles/linux-system-roles.network/.gitignore b/roles/linux-system-roles.network/.gitignore new file mode 100644 index 0000000..b54b2fb --- /dev/null +++ b/roles/linux-system-roles.network/.gitignore @@ -0,0 +1,11 @@ +/.cache +/.coverage +*.pyc +/.pytest_cache +/tests/.coverage +/tests/htmlcov* +/tests/__pycache__/ +/tests/remote-coveragedata-* +/tests/tmp_merge_coveragerc +/tests/total-*coveragedata +/.tox diff --git a/roles/linux-system-roles.network/.travis.yml b/roles/linux-system-roles.network/.travis.yml new file mode 100644 index 0000000..8e76d66 --- /dev/null +++ b/roles/linux-system-roles.network/.travis.yml @@ -0,0 +1,27 @@ +--- +dist: xenial +language: python +matrix: + include: + - python: 2.6 + dist: trusty + - python: 2.7 + - python: 3.5 + env: aptpkgs=python3-selinux + - python: 3.6 + - python: 3.7 + - python: 3.7-dev + - python: 3.8-dev + # - python: nightly + +services: + - docker + +before_install: + - if [ -n "${aptpkgs}" ]; then sudo apt-get install -y python3-selinux; fi + +install: + - pip install tox tox-travis + +script: + - tox diff --git a/roles/linux-system-roles.network/LICENSE b/roles/linux-system-roles.network/LICENSE new file mode 100644 index 0000000..6117e71 --- /dev/null +++ b/roles/linux-system-roles.network/LICENSE @@ -0,0 +1,28 @@ +BSD-3-Clause License + +Copyright (c) 2017-2018 Red Hat, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/roles/linux-system-roles.network/README.md b/roles/linux-system-roles.network/README.md new file mode 100644 index 0000000..2830b4e --- /dev/null +++ b/roles/linux-system-roles.network/README.md @@ -0,0 +1,682 @@ +linux-system-roles/network +========================== +[![Coverage Status](https://coveralls.io/repos/github/linux-system-roles/network/badge.svg)](https://coveralls.io/github/linux-system-roles/network) +[![Travis Build Status](https://travis-ci.org/linux-system-roles/network.svg?branch=master)](https://travis-ci.org/linux-system-roles/network) +[![Code Style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black) + +Overview +-------- + +The `network` role enables users to configure network on the target machines. +This role can be used to configure: + +- Ethernet interfaces +- Bridge interfaces +- Bonded interfaces +- VLAN interfaces +- MacVLAN interfaces +- Infiniband interfaces +- IP configuration + +Introduction +------------ +The `network` role supports two providers: `nm` and `initscripts`. `nm` is +used by default in RHEL7 and `initscripts` in RHEL6. These providers can be +configured per host via the [`network_provider`](#provider) variable. In +absence of explicit configuration, it is autodetected based on the +distribution. However, note that either `nm` or `initscripts` is not tied to a certain +distribution. The `network` role works everywhere the required API is available. +This means that `nm` requires at least NetworkManager's API version 1.2 available. +For `initscripts`, the legacy network service is required as used in Fedora or RHEL. + +For each host a list of networking profiles can be configured via the +`network_connections` variable. + +- For `initscripts`, profiles correspond to ifcfg files in the `/etc/sysconfig/network-scripts/ifcfg-*` directory. + +- For `NetworkManager`, profiles correspond to connection profiles as handled by + NetworkManager. Fedora and RHEL use the `ifcfg-rh-plugin` for NetworkManager, + which also writes or reads configuration files to `/etc/sysconfig/network-scripts/ifcfg-*` + for compatibility. + +Note that the `network` role primarily operates on networking profiles (connections) and +not on devices, but it uses the profile name by default as the interface name. +It is also possible to create generic profiles, by creating for example a +profile with a certain IP configuration without activating the profile. To +apply the configuration to the actual networking interface, use the `nmcli` +commands on the target system. + +**Warning**: The `network` role updates or creates all connection profiles on +the target system as specified in the `network_connections` variable. Therefore, +the `network` role removes options from the specified profiles if the options are +only present on the system but not in the `network_connections` variable. +Exceptions are mentioned below. + +Variables +--------- +The `network` role is configured via variables starting with `network_` as the name prefix. +List of variables: + +* `network_provider` - The `network_provider` variable allows to set a specific + provider (`nm` or `initscripts`) . Setting it to `{{ network_provider_os_default }}`, + the provider is set depending on the operating system. This is usually `nm` + except for RHEL 6 or CentOS 6 systems. + +* `network_connections` - The connection profiles are configured as `network_connections`, + which is a list of dictionaries that include specific options. + + +Examples of Variables +--------------------- + +Setting the variables + +```yaml +network_provider: nm +network_connections: + - name: eth0 + #... +``` + +Options +------- +The `network_connections` variable is a list of dictionaries that include the following options. +List of options: + +### `name` (required) + +The `name` option identifies the connection profile. It is not the name of the +networking interface for which the profile applies, though we can associate +the profile with an interface and give them the same name. +Note that you can have multiple profiles for the same device, but only +one profile can be active on the device each time. +For NetworkManager, a connection can only be active at one device each time. + +* For `NetworkManager`, the `name` option corresponds to the + [`connection.id`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.connection.id) + property option. + Although NetworkManager supports multiple connections with the same `connection.id`, + the `network` role cannot handle a duplicate `name`. Specifying a `name` multiple + times refers to the same connection profile. + +* For `initscripts`, the `name` option determines the ifcfg file name `/etc/sysconfig/network-scripts/ifcfg-$NAME`. + Note that the `name` does not specify the `DEVICE` but a filename. As a consequence, + `'/'` is not a valid character for the `name`. + +You can also use the same connection profile multiple times. Therefore, it is possible to create a profile and activate it separately. + +### `state` + +The `state` option identifies what is the runtime state of each connection profile. The `state` option (optional) can be set to the following values: + +* `up` - the connection profile is activated +* `down` - the connection profile is deactivated + +#### `state: up` +- For `NetworkManager`, this corresponds to `nmcli connection id {{name}} up`. + +- For `initscripts`, this corresponds to `ifup {{name}}`. + +When the `state` option is set to `up`, you can also specify the `wait` option (optional): + +* `wait: 0` - initiates only the activation, but does not wait until the device is fully connected. +The connection will be completed in the background, for example after a DHCP lease was received. +* `wait: ` is a timeout that enables you to decide how long you give the device to +activate. The default is using a suitable timeout. Note that the `wait` option is +only supported by NetworkManager. + +Note that `state: up` always re-activates the profile and possibly changes the +networking configuration, even if the profile was already active before. As +a consequence, `state: up` always changes the system. + +#### `state: down` + +- For `NetworkManager`, it corresponds to `nmcli connection id {{name}} down`. + +- For `initscripts`, it corresponds to call `ifdown {{name}}`. + +You can deactivate a connection profile, even if is currently not active. As a consequence, `state: down` always changes the system. + +Note that if the `state` option is unset, the connection profile’s runtime state will not be changed. + + +### `persistent_state` + +The `persistent_state` option identifies if a connection profile is persistent (saved on disk). The `persistent_state` option can be set to the following values: + +#### `persistent_state: present` (default) + +Note that if `persistent_state` is `present` and the connection profile contains +the `type` option, the profile will be created or updated. If the connection profile is +incomplete (no `type` option), the behavior is undefined. Also, the `present` value +does not directly result in a change in the network configuration. If the `state` option +is not set to `up`, the profile is only created or modified, not activated. + +For NetworkManager, the new connection profile is created with the `autoconnect` +option enabled by default. Therefore, NetworkManager can activate the new +profile on a currently disconnected device. ([rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515)). + +#### `persistent_state: absent` + +The `absent` value ensures that the profile is not present on the +target host. If a profile with the given `name` exists, it will be deleted. In this case: + +- `NetworkManager` deletes all connection profiles with the corresponding `connection.id`. + Deleting a profile usually does not change the current networking configuration, unless + the profile was currently activated on a device. Deleting the currently + active connection profile disconnects the device. That makes the device eligible + to autoconnect another connection (for more details, see [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515)). + +- `initscripts` deletes the ifcfg file in most cases with no impact on the runtime state of the system unless some component is watching the sysconfig directory. + +**Note**: For profiles that only contain a `state` option, the `network` role only activates +or deactivates the connection without changing its configuration. + + +### `type` + +The `type` option can be set to the following values: + + - `ethernet` + - `bridge` + - `bond` + - `team` + - `vlan` + - `macvlan` + - `infiniband` + +#### `type: ethernet` + +If the type is `ethernet`, then there can be an extra `ethernet` dictionary with the following +items (options): `autoneg`, `speed` and `duplex`, which correspond to the +settings of the `ethtool` utility with the same name. + +* `autoneg`: `yes` (default) or `no` [if auto-negotiation is enabled or disabled] +* `speed`: speed in Mbit/s +* `duplex`: `half` or `full` + +Note that the `speed` and `duplex` link settings are required when autonegotiation is disabled (autoneg:no). + +#### `type: bridge`, `type: bond`, `type: team` + +The `bridge`, `bond`, `team` device types work similar. Note that `team` is not supported in RHEL6 kernels. + +For slaves, the `slave_type` and `master` properties must be set. Note that slaves should not have `ip` settings. + +The `master` refers to the `name` of a profile in the Ansible +playbook. It is neither an interface-name nor a connection-id of +NetworkManager. + +- For NetworkManager, `master` will be converted to the `connection.uuid` + of the corresponding profile. + +- For initscripts, the master is looked up as the `DEVICE` from the corresponding + ifcfg file. + +As `master` refers to other profiles of the same or another play, +the order of the `connections` list matters. Also, `--check` ignores +the value of the `master` and assumes it will be present during a real +run. That means, in presence of an invalid `master`, `--check` may +signal success but the actual play run fails. + +#### `type: vlan` + +Similar to `master`, the `parent` references the connection profile in the ansible +role. + +#### `type: macvlan` + +Similar to `master` and `vlan`, the `parent` references the connection profile in the ansible +role. + + +### `autoconnect` + +By default, profiles are created with autoconnect enabled. + +- For `NetworkManager`, this corresponds to the `connection.autoconnect` property. + +- For `initscripts`, this corresponds to the `ONBOOT` property. + +### `mac` + +The `mac` address is optional and restricts the profile to be usable only on +devices with the given MAC address. `mac` is only allowed for `type` +`ethernet` or `infiniband` to match a non-virtual device with the +profile. + +- For `NetworkManager`, `mac` is the permanent MAC address, `ethernet.mac-address`. + +- For `initscripts`, `mac` is the currently configured MAC address of the device (`HWADDR`). + +### `interface_name` + +For the `ethernet` and `infiniband` types, the `interface_name` option restricts the profile to +the given interface by name. This argument is optional and by default the +profile name is used unless a mac address is specified using the `mac` key. +Specifying an empty string (`""`) means that the profile is not +restricted to a network interface. + +**Note:** With [persistent interface naming](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/ch-Consistent_Network_Device_Naming.html), +the interface is predictable based on the hardware configuration. +Otherwise, the `mac` address might be an option. + +For virtual interface types such as bridges, the `interface_name` is the name of the created +interface. In case of a missing `interface_name`, the `name` of the profile name is used. + +**Note:** The `name` (the profile name) and the `interface_name` (the device name) may be +different or the profile may not be tied to an interface at all. + +### `zone` + +The `zone` option sets the firewalld zone for the interface. + +Slaves to the bridge, bond or team devices cannot specify a zone. + + +### `ip` + +The IP configuration supports the following options: + +* `address` + + Manual addressing can be specified via a list of addresses under the `address` option. + +* `dhcp4` and `auto6` + + Also, manual addressing can be specified by setting either `dhcp4` or `auto6`. + The `dhcp4` key is for DHCPv4 and `auto6` for StateLess Address Auto Configuration + (SLAAC). Note that the `dhcp4` and `auto6` keys can be omitted and the default key + depends on the presence of manual addresses. + + +* `dhcp4_send_hostname` + + If `dhcp4` is enabled, it can be configured whether the DHCPv4 request includes + the hostname via the `dhcp4_send_hostname` option. Note that `dhcp4_send_hostname` + is only supported by the `nm` provider and corresponds to + [`ipv4.dhcp-send-hostname`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv4.dhcp-send-hostname) + property. + +* `dns` and `dns_search` + + Manual DNS configuration can be specified via a list of addresses + given in the `dns` option and a list of domains to search given in the + `dns_search` option. + + +* `route_metric4` and `route_metric6` + + - For `NetworkManager`, `route_metric4` and `route_metric6` corresponds to the + [`ipv4.route-metric`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv4.route-metric) and + [`ipv6.route-metric`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv6.route-metric) + properties, respectively. If specified, it determines the route metric for DHCP + assigned routes and the default route, and thus the priority for multiple interfaces. + +* `route` + + Static route configuration can be specified via a list of routes given in the `route` + option. The default value is an empty list. Each route is a dictionary with the following + entries: `network`, `prefix`, `gateway` and `metric`. `network` and `prefix` specify + the destination network. + Note that Classless inter-domain routing (CIDR) notation or network mask notation are not supported yet. + +* `route_append_only` + + The `route_append_only` option allows only to add new routes to the + existing routes on the system. + + If the `route_append_only` boolean option is set to `yes`, the specified routes are appended to the existing routes. + If `route_append_only` is set to `no` (default), the current routes are replaced. + Note that setting `route_append_only` to `yes` without setting `route` has the effect of preserving the current static routes. + +* `rule_append_only` + + The `rule_append_only` boolean option allows to preserve the current routing rules. + Note that specifying routing rules is not supported yet. + +**Note:** When `route_append_only` or `rule_append_only` is not specified, the `network` role deletes the current routes or routing rules. + +**Note:** Slaves to the bridge, bond or team devices cannot specify `ip` settings. + +### `ethtool` + +The ethtool settings allow to enable or disable varios features. The names +correspond to the names used by the `ethtool` utility. Depending on the actual +kernel and device, changing some features might not be supported. + +```yaml + ethtool: + features: + esp-hw-offload: yes|no # optional + esp-tx-csum-hw-offload: yes|no # optional + fcoe-mtu: yes|no # optional + gro: yes|no # optional + gso: yes|no # optional + highdma: yes|no # optional + hw-tc-offload: yes|no # optional + l2-fwd-offload: yes|no # optional + loopback: yes|no # optional + lro: yes|no # optional + ntuple: yes|no # optional + rx: yes|no # optional + rx-all: yes|no # optional + rx-fcs: yes|no # optional + rx-gro-hw: yes|no # optional + rx-udp_tunnel-port-offload: yes|no # optional + rx-vlan-filter: yes|no # optional + rx-vlan-stag-filter: yes|no # optional + rx-vlan-stag-hw-parse: yes|no # optional + rxhash: yes|no # optional + rxvlan: yes|no # optional + sg: yes|no # optional + tls-hw-record: yes|no # optional + tls-hw-tx-offload: yes|no # optional + tso: yes|no # optional + tx: yes|no # optional + tx-checksum-fcoe-crc: yes|no # optional + tx-checksum-ip-generic: yes|no # optional + tx-checksum-ipv4: yes|no # optional + tx-checksum-ipv6: yes|no # optional + tx-checksum-sctp: yes|no # optional + tx-esp-segmentation: yes|no # optional + tx-fcoe-segmentation: yes|no # optional + tx-gre-csum-segmentation: yes|no # optional + tx-gre-segmentation: yes|no # optional + tx-gso-partial: yes|no # optional + tx-gso-robust: yes|no # optional + tx-ipxip4-segmentation: yes|no # optional + tx-ipxip6-segmentation: yes|no # optional + tx-nocache-copy: yes|no # optional + tx-scatter-gather: yes|no # optional + tx-scatter-gather-fraglist: yes|no # optional + tx-sctp-segmentation: yes|no # optional + tx-tcp-ecn-segmentation: yes|no # optional + tx-tcp-mangleid-segmentation: yes|no # optional + tx-tcp-segmentation: yes|no # optional + tx-tcp6-segmentation: yes|no # optional + tx-udp-segmentation: yes|no # optional + tx-udp_tnl-csum-segmentation: yes|no # optional + tx-udp_tnl-segmentation: yes|no # optional + tx-vlan-stag-hw-insert: yes|no # optional + txvlan: yes|no # optional +``` + +Examples of Options +------------------- + +Setting the same connection profile multiple times: + +```yaml +network_connections: + - name: Wired0 + type: ethernet + interface_name: eth0 + ip: + dhcp4: yes + + - name: Wired0 + state: up +``` + +Activating a preexisting connection profile: + +```yaml +network_connections: + - name: eth0 + state: up +``` + +Deactivating a preexisting connection profile: + +```yaml +network_connections: + - name: eth0 + state: down +``` + +Creating a persistent connection profile: + +```yaml +network_connections: + - name: eth0 + #persistent_state: present # default + type: ethernet + autoconnect: yes + mac: 00:00:5e:00:53:5d + ip: + dhcp4: yes +``` + +Deleting a connection profile named `eth0` (if it exists): + +```yaml +network_connections: + - name: eth0 + persistent_state: absent +``` + +Configuring the Ethernet link settings: + +```yaml +network_connections: + - name: eth0 + type: ethernet + + ethernet: + autoneg: no + speed: 1000 + duplex: full +``` + +Creating a bridge connection: + +```yaml +network_connections: + - name: br0 + type: bridge + #interface_name: br0 # defaults to the connection name +``` + + +Configuring a bridge connection: + +```yaml +network_connections: + - name: internal-br0 + interface_name: br0 + type: bridge + ip: + dhcp4: no + auto6: no +``` + +Setting `master` and `slave_type`: + +```yaml +network_connections: + - name: br0-bond0 + type: bond + interface_name: bond0 + master: internal-br0 + slave_type: bridge + + - name: br0-bond0-eth1 + type: ethernet + interface_name: eth1 + master: br0-bond0 + slave_type: bond +``` + +Configuring VLANs: + +```yaml +network_connections: + - name: eth1-profile + autoconnet: no + type: ethernet + interface_name: eth1 + ip: + dhcp4: no + auto6: no + + - name: eth1.6 + autoconnect: no + type: vlan + parent: eth1-profile + vlan: + id: 6 + ip: + address: + - 192.0.2.5/24 + auto6: no +``` + +Configuring MACVLAN: + +```yaml +network_connections: + - name: eth0-profile + type: ethernet + interface_name: eth0 + ip: + address: + - 192.168.0.1/24 + + - name: veth0 + type: macvlan + parent: eth0-profile + macvlan: + mode: bridge + promiscuous: yes + tap: no + ip: + address: + - 192.168.1.1/24 +``` + +Setting the IP configuration: + +```yaml +network_connections: + - name: eth0 + type: ethernet + ip: + route_metric4: 100 + dhcp4: no + #dhcp4_send_hostname: no + gateway4: 192.0.2.1 + + dns: + - 192.0.2.2 + - 198.51.100.5 + dns_search: + - example.com + - subdomain.example.com + + route_metric6: -1 + auto6: no + gateway6: 2001:db8::1 + + address: + - 192.0.2.3/24 + - 198.51.100.3/26 + - 2001:db8::80/7 + + route: + - network: 198.51.100.128 + prefix: 26 + gateway: 198.51.100.1 + metric: 2 + - network: 198.51.100.64 + prefix: 26 + gateway: 198.51.100.6 + metric: 4 + route_append_only: no + rule_append_only: yes +``` + +### Invalid and Wrong Configuration + +The `network` role rejects invalid configurations. It is recommended to test the role +with `--check` first. There is no protection against wrong (but valid) configuration. +Double-check your configuration before applying it. + + +Compatibility +------------- + +The `network` role supports the same configuration scheme for both providers (`nm` +and `initscripts`). That means, you can use the same playbook with NetworkManager +and initscripts. However, note that not every option is handled exactly the same +by every provider. Do a test run first with `--check`. + +It is not supported to create a configuration for one provider, and expect another +provider to handle them. For example, creating profiles with the `initscripts` provider, +and later enabling NetworkManager is not guaranteed to work automatically. Possibly, +you have to adjust the configuration so that it can be used by another provider. + +For example, configuring a RHEL6 host with initscripts and upgrading to +RHEL7 while continuing to use initscripts in RHEL7 is an acceptable scenario. What +is not guaranteed is to upgrade to RHEL7, disable initscripts and expect NetworkManager +to take over the configuration automatically. + +Depending on NetworkManager's configuration, connections may be stored as ifcfg files +as well, but it is not guaranteed that plain initscripts can handle these ifcfg files +after disabling the NetworkManager service. + +Limitations +----------- + +As Ansible usually works via the network, for example via SSH, there are some limitations to be considered: + +The `network` role does not support bootstraping networking configuration. One +option may be [ansible-pull](https://docs.ansible.com/ansible/playbooks_intro.html#ansible-pull). +Another option maybe be to initially auto-configure the host during installation +(ISO based, kickstart, etc.), so that the host is connected to a management LAN +or VLAN. It strongly depends on your environment. + +For `initscripts` provider, deploying a profile merely means to create the ifcfg +files. Nothing happens automatically until the play issues `ifup` or `ifdown` +via the `up` or `down` [states](#state) -- unless there are other +components that rely on the ifcfg files and react on changes. + +The `initscripts` provider requires the different profiles to be in the right +order when they depend on each other. For example the bonding master device +needs to be specified before the slave devices. + +When removing a profile for NetworkManager it also takes the connection +down and possibly removes virtual interfaces. With the `initscripts` provider +removing a profile does not change its current runtime state (this is a future +feature for NetworkManager as well). + +For NetworkManager, modifying a connection with autoconnect enabled +may result in the activation of a new profile on a previously disconnected +interface. Also, deleting a NetworkManager connection that is currently active +results in removing the interface. Therefore, the order of the steps should be +followed, and carefully handling of [autoconnect](#autoconnect) property may be +necessary. This should be improved in NetworkManager RFE [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515). + +It seems difficult to change networking of the target host in a way that breaks +the current SSH connection of ansible. If you want to do that, ansible-pull might +be a solution. Alternatively, a combination of `async`/`poll` with changing +the `ansible_host` midway of the play. + +**TODO** The current role does not yet support to easily split the +play in a pre-configure step, and a second step to activate the new configuration. + +In general, to successfully run the play, determine which configuration is +active in the first place, and then carefully configure a sequence of steps to change to +the new configuration. The actual solution depends strongly on your environment. + +### Handling potential problems + +When something goes wrong while configuring networking remotely, you might need +to get physical access to the machine to recover. + +**TODO** NetworkManager supports a +[checkpoint/rollback](https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.html#gdbus-method-org-freedesktop-NetworkManager.CheckpointCreate) +feature. At the beginning of the play we could create a checkpoint and if we lose +connectivity due to an error, NetworkManager would automatically rollback after +timeout. The limitations is that this would only work with NetworkManager, and +it is not clear that rollback will result in a working configuration. diff --git a/roles/linux-system-roles.network/defaults/main.yml b/roles/linux-system-roles.network/defaults/main.yml new file mode 100644 index 0000000..e5c8c6f --- /dev/null +++ b/roles/linux-system-roles.network/defaults/main.yml @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +network_connections: [] + +# Use initscripts for RHEL/CentOS < 7, nm otherwise +network_provider_os_default: "{{ + 'initscripts' if ansible_distribution in ['RedHat', 'CentOS'] and + ansible_distribution_major_version is version('7', '<') + else 'nm' }}" +# If NetworkManager.service is running, assume that 'nm' is currently in-use, +# otherwise initscripts +network_provider_current: "{{ + 'nm' if 'NetworkManager.service' in ansible_facts.services and + ansible_facts.services['NetworkManager.service']['state'] == 'running' + else 'initscripts' + }}" +# Default to the auto-detected value +network_provider: "{{ network_provider_current }}" + +# The python-gobject-base package depends on the python version and +# distribution: +# - python-gobject-base on RHEL7 (no python2-gobject-base :-/) +# - python-gobject-base or python2-gobject-base on Fedora 27 +# - python3-gobject-base on Fedora 28+ +network_service_name_default_nm: NetworkManager +network_packages_default_nm: + - ethtool + - NetworkManager + - "python{{ ansible_python['version']['major'] | replace('2', '') }}-gobject-base" + +network_service_name_default_initscripts: network + +# initscripts requires bridge-utils to manage bridges, install it when the +# 'bridge' type is used in network_connections +_network_packages_default_initscripts_bridge: ["{% if ['bridge'] in network_connections|json_query('[*][type]') and +( + (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('7', '<=')) or + (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('28', '<=')) +) +%}bridge-utils{% endif %}"] +_network_packages_default_initscripts_network_scripts: ["{% +if (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('7', '<=')) or + (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('28', '<=')) +%}initscripts{% else %}network-scripts{% endif %}"] +# convert _network_packages_default_initscripts_bridge to an empty list if it +# contains only the empty string and add it to the default package list +# |select() filters the list to include only values that evaluate to true +# (the empty string is false) +# |list() converts the generator that |select() creates to a list +network_packages_default_initscripts: "{{ ['ethtool'] ++ _network_packages_default_initscripts_bridge|select()|list() ++ _network_packages_default_initscripts_network_scripts|select()|list() +}}" + + +# The user can explicitly set host variables "network_provider", +# "network_service_name" and "network_packages". +# +# Usually, the user only wants to select the "network_provider" +# (or not set it at all and let it be autodetected via the +# internal variable "{{ network_provider_current }}". Hence, +# depending on the "network_provider", a different set of +# service-name and packages is chosen. +# +# That is done via the internal "_network_provider_setup" dictionary. +# If the user doesn't explicitly set "network_service_name" or +# "network_packages" (which he usually wouldn't), then the defaults +# from "network_service_name_default_*" and "network_packages_default_*" +# apply. These values are hard-coded in this file, but they also could +# be overwritten as host variables or via vars/*.yml. +_network_provider_setup: + nm: + service_name: "{{ network_service_name_default_nm }}" + packages: "{{ network_packages_default_nm }}" + initscripts: + service_name: "{{ network_service_name_default_initscripts }}" + packages: "{{ network_packages_default_initscripts }}" + +network_packages: "{{ + _network_provider_setup[network_provider]['packages'] }}" +network_service_name: "{{ + _network_provider_setup[network_provider]['service_name'] }}" diff --git a/roles/linux-system-roles.network/examples/bond-with-vlan.yml b/roles/linux-system-roles.network/examples/bond-with-vlan.yml new file mode 100644 index 0000000..2e6be23 --- /dev/null +++ b/roles/linux-system-roles.network/examples/bond-with-vlan.yml @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: network-test + vars: + network_connections: + + # Create a bond profile, which is the parent of VLAN. + - name: prod2 + state: up + type: bond + interface_name: bond2 + ip: + dhcp4: no + auto6: no + bond: + mode: active-backup + miimon: 110 + + # enslave an ethernet to the bond + - name: prod2-slave1 + state: up + type: ethernet + interface_name: "{{ network_interface_name2 }}" + master: prod2 + + # on top of it, create a VLAN with ID 100 and static + # addressing + - name: prod2.100 + state: up + type: vlan + parent: prod2 + vlan_id: 100 + ip: + address: + - "192.0.2.{{ network_iphost }}/24" + + roles: + - linux-system-roles.network diff --git a/roles/linux-system-roles.network/examples/bridge-with-vlan.yml b/roles/linux-system-roles.network/examples/bridge-with-vlan.yml new file mode 100644 index 0000000..037ff8e --- /dev/null +++ b/roles/linux-system-roles.network/examples/bridge-with-vlan.yml @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: network-test + vars: + network_connections: + + # Create a bridge profile, which is the parent of VLAN. + - name: prod2 + state: up + type: bridge + interface_name: bridge2 + ip: + dhcp4: no + auto6: no + + # enslave an ethernet to the bridge + - name: prod2-slave1 + state: up + type: ethernet + interface_name: "{{ network_interface_name2 }}" + master: prod2 + slave_type: bridge + + # on top of it, create a VLAN with ID 100 and static + # addressing + - name: prod2.100 + state: up + type: vlan + parent: prod2 + vlan_id: 100 + ip: + address: + - "192.0.2.{{ network_iphost }}/24" + + roles: + - linux-system-roles.network diff --git a/roles/linux-system-roles.network/examples/down-profile.yml b/roles/linux-system-roles.network/examples/down-profile.yml new file mode 120000 index 0000000..d5d2ed7 --- /dev/null +++ b/roles/linux-system-roles.network/examples/down-profile.yml @@ -0,0 +1 @@ +../tests/down-profile.yml \ No newline at end of file diff --git a/roles/linux-system-roles.network/examples/eth-simple-auto.yml b/roles/linux-system-roles.network/examples/eth-simple-auto.yml new file mode 100644 index 0000000..0ba168a --- /dev/null +++ b/roles/linux-system-roles.network/examples/eth-simple-auto.yml @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: network-test + vars: + network_connections: + + # Create one ethernet profile and activate it. + # The profile uses automatic IP addressing + # and is tied to the interface by MAC address. + - name: prod1 + state: up + type: ethernet + autoconnect: yes + mac: "{{ network_mac1 }}" + mtu: 1450 + + roles: + - linux-system-roles.network diff --git a/roles/linux-system-roles.network/examples/eth-with-vlan.yml b/roles/linux-system-roles.network/examples/eth-with-vlan.yml new file mode 100644 index 0000000..69da673 --- /dev/null +++ b/roles/linux-system-roles.network/examples/eth-with-vlan.yml @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: network-test + vars: + network_connections: + + # Create a profile for the underlying device of the VLAN. + - name: prod2 + type: ethernet + autoconnect: no + state: up + interface_name: "{{ network_interface_name2 }}" + ip: + dhcp4: no + auto6: no + + # on top of it, create a VLAN with ID 100 and static + # addressing + - name: prod2.100 + state: up + type: vlan + parent: prod2 + vlan_id: 100 + ip: + address: + - "192.0.2.{{ network_iphost }}/24" + + roles: + - linux-system-roles.network diff --git a/roles/linux-system-roles.network/examples/ethtool-features-default.yml b/roles/linux-system-roles.network/examples/ethtool-features-default.yml new file mode 100644 index 0000000..78965e6 --- /dev/null +++ b/roles/linux-system-roles.network/examples/ethtool-features-default.yml @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: all + tasks: + - include_role: + name: linux-system-roles.network + vars: + network_connections: + - name: "{{ network_interface_name1 }}" + state: up + type: ethernet + ip: + dhcp4: "no" + auto6: "no" diff --git a/roles/linux-system-roles.network/examples/ethtool-features.yml b/roles/linux-system-roles.network/examples/ethtool-features.yml new file mode 100644 index 0000000..d8842c2 --- /dev/null +++ b/roles/linux-system-roles.network/examples/ethtool-features.yml @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: all + tasks: + - include_role: + name: linux-system-roles.network + vars: + network_connections: + - name: "{{ network_interface_name1 }}" + state: up + type: ethernet + ip: + dhcp4: "no" + auto6: "no" + ethtool: + features: + gro: "no" + gso: "yes" + tx-sctp-segmentation: "no" diff --git a/roles/linux-system-roles.network/examples/infiniband.yml b/roles/linux-system-roles.network/examples/infiniband.yml new file mode 100644 index 0000000..22603d9 --- /dev/null +++ b/roles/linux-system-roles.network/examples/infiniband.yml @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: network-test + vars: + network_connections: + + - name: ib0 + type: infiniband + interface_name: ib0 + + # Create a simple infiniband profile + - name: ib0-10 + interface_name: ib0.000a + type: infiniband + autoconnect: yes + infiniband_p_key: 10 + parent: ib0 + state: up + ip: + dhcp4: no + auto6: no + address: + - 198.51.100.133/30 + + roles: + - linux-system-roles.network diff --git a/roles/linux-system-roles.network/examples/inventory b/roles/linux-system-roles.network/examples/inventory new file mode 100644 index 0000000..52dae27 --- /dev/null +++ b/roles/linux-system-roles.network/examples/inventory @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: BSD-3-Clause +# an inventory for the examples. +[network-test] +v-rhel6 ansible_user=root network_iphost=196 network_mac1=00:00:5e:00:53:00 network_interface_name1=eth0 network_interface_name2=eth1 +v-rhel7 ansible_user=root network_iphost=97 network_mac1=00:00:5e:00:53:01 network_interface_name1=eth0 network_interface_name2=eth1 diff --git a/roles/linux-system-roles.network/examples/macvlan.yml b/roles/linux-system-roles.network/examples/macvlan.yml new file mode 100644 index 0000000..90cd09d --- /dev/null +++ b/roles/linux-system-roles.network/examples/macvlan.yml @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: network-test + vars: + network_connections: + + - name: eth0 + type: ethernet + state: up + interface_name: eth0 + ip: + address: + - 192.168.0.1/24 + + # Create a virtual ethernet card bound to eth0 + - name: veth0 + type: macvlan + state: up + parent: eth0 + macvlan: + mode: bridge + promiscuous: True + tap: False + ip: + address: + - 192.168.1.1/24 + + roles: + - linux-system-roles.network diff --git a/roles/linux-system-roles.network/examples/remove-profile.yml b/roles/linux-system-roles.network/examples/remove-profile.yml new file mode 120000 index 0000000..f2cf478 --- /dev/null +++ b/roles/linux-system-roles.network/examples/remove-profile.yml @@ -0,0 +1 @@ +../tests/remove-profile.yml \ No newline at end of file diff --git a/roles/linux-system-roles.network/examples/roles b/roles/linux-system-roles.network/examples/roles new file mode 120000 index 0000000..a82c5f8 --- /dev/null +++ b/roles/linux-system-roles.network/examples/roles @@ -0,0 +1 @@ +../tests/roles/ \ No newline at end of file diff --git a/roles/linux-system-roles.network/library/network_connections.py b/roles/linux-system-roles.network/library/network_connections.py new file mode 100644 index 0000000..39e81e8 --- /dev/null +++ b/roles/linux-system-roles.network/library/network_connections.py @@ -0,0 +1,2380 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: BSD-3-Clause + +import functools +import os +import socket +import time +import traceback + +# pylint: disable=import-error, no-name-in-module +from ansible.module_utils.network_lsr import MyError + +# pylint: disable=import-error +from ansible.module_utils.network_lsr.argument_validator import ( + ArgUtil, + ArgValidator_ListConnections, + ValidationError, +) + +# pylint: disable=import-error +from ansible.module_utils.network_lsr.utils import Util +from ansible.module_utils.network_lsr import nm_provider + +DOCUMENTATION = """ +--- +module: network_connections +author: "Thomas Haller (thaller@redhat.com)" +short_description: module for network role to manage connection profiles +requirements: for 'nm' provider requires pygobject, dbus and NetworkManager. +version_added: "2.0" +description: Manage networking profiles (connections) for NetworkManager and + initscripts networking providers. +options: Documentation needs to be written. Note that the network_connections + module tightly integrates with the network role and currently it is not + expected to use this module outside the role. Thus, consult README.md for + examples for the role. +""" + + +############################################################################### + +DEFAULT_ACTIVATION_TIMEOUT = 90 + + +class CheckMode: + PREPARE = "prepare" + DRY_RUN = "dry-run" + PRE_RUN = "pre-run" + REAL_RUN = "real-run" + DONE = "done" + + +class LogLevel: + ERROR = "error" + WARN = "warn" + INFO = "info" + DEBUG = "debug" + + @staticmethod + def fmt(level): + return "<%-6s" % (str(level) + ">") + + +# cmp() is not available in python 3 anymore +if "cmp" not in dir(__builtins__): + + def cmp(x, y): + """ + Replacement for built-in function cmp that was removed in Python 3 + + Compare the two objects x and y and return an integer according to + the outcome. The return value is negative if x < y, zero if x == y + and strictly positive if x > y. + """ + + return (x > y) - (x < y) + + +class SysUtil: + @staticmethod + def _sysctl_read(filename): + try_count = 0 + while True: + try_count += 1 + try: + with open(filename, "r") as f: + return f.read() + except Exception: + if try_count < 5: + continue + raise + + @staticmethod + def _link_read_ifindex(ifname): + c = SysUtil._sysctl_read("/sys/class/net/" + ifname + "/ifindex") + return int(c.strip()) + + @staticmethod + def _link_read_address(ifname): + c = SysUtil._sysctl_read("/sys/class/net/" + ifname + "/address") + return Util.mac_norm(c.strip()) + + @staticmethod + def _link_read_permaddress(ifname): + try: + out = Util.check_output(["ethtool", "-P", ifname]) + except MyError: + return None + import re + + m = re.match("^Permanent address: ([0-9A-Fa-f:]*)\n$", out) + if not m: + return None + return Util.mac_norm(m.group(1)) + + @staticmethod + def _link_infos_fetch(): + links = {} + for ifname in os.listdir("/sys/class/net/"): + if not os.path.islink("/sys/class/net/" + ifname): + # /sys/class/net may contain certain entries that are not + # interface names, like 'bonding_master'. Skip over files + # that are not links. + continue + links[ifname] = { + "ifindex": SysUtil._link_read_ifindex(ifname), + "ifname": ifname, + "address": SysUtil._link_read_address(ifname), + "perm-address": SysUtil._link_read_permaddress(ifname), + } + return links + + @classmethod + def link_infos(cls, refresh=False): + if refresh: + linkinfos = None + else: + linkinfos = getattr(cls, "_link_infos", None) + if linkinfos is None: + try_count = 0 + b = None + while True: + try_count += 1 + try: + # there is a race in that we lookup properties by ifname + # and interfaces can be renamed. Try to avoid that by + # fetching the info twice and repeat until we get the same + # result. + if b is None: + b = SysUtil._link_infos_fetch() + linkinfos = SysUtil._link_infos_fetch() + if linkinfos != b: + b = linkinfos + raise Exception( + "cannot read stable link-infos. They keep changing" + ) + except Exception: + if try_count < 50: + raise + continue + break + cls._link_infos = linkinfos + return linkinfos + + @classmethod + def link_info_find(cls, refresh=False, mac=None, ifname=None): + if mac is not None: + mac = Util.mac_norm(mac) + for li in cls.link_infos(refresh).values(): + if mac is not None and mac not in [ + li.get("perm-address", None), + li.get("address", None), + ]: + continue + if ifname is not None and ifname != li.get("ifname", None): + continue + return li + return None + + +############################################################################### + + +############################################################################### + + +class IfcfgUtil: + + FILE_TYPES = ["ifcfg", "keys", "route", "route6", "rule", "rule6"] + + @classmethod + def _file_types(cls, file_type): + if file_type is None: + return cls.FILE_TYPES + else: + return [file_type] + + @classmethod + def ifcfg_paths(cls, name, file_types=None): + paths = [] + if file_types is None: + file_types = cls.FILE_TYPES + for f in file_types: + paths.append(cls.ifcfg_path(name, f)) + return paths + + @classmethod + def ifcfg_path(cls, name, file_type=None): + n = str(name) + if not name or n == "." or n == ".." or n.find("/") != -1: + raise MyError("invalid ifcfg-name %s" % (name)) + if file_type is None: + file_type = "ifcfg" + if file_type not in cls.FILE_TYPES: + raise MyError("invalid file-type %s" % (file_type)) + return "/etc/sysconfig/network-scripts/" + file_type + "-" + n + + @classmethod + def KeyValid(cls, name): + r = getattr(cls, "_CHECKSTR_VALID_KEY", None) + if r is None: + import re + + r = re.compile("^[a-zA-Z][a-zA-Z0-9_]*$") + cls._CHECKSTR_VALID_KEY = r + return bool(r.match(name)) + + @classmethod + def ValueEscape(cls, value): + + r = getattr(cls, "_re_ValueEscape", None) + if r is None: + import re + + r = re.compile("^[a-zA-Z_0-9-.]*$") + cls._re_ValueEscape = r + + if r.match(value): + return value + + if any([ord(c) < ord(" ") for c in value]): + # needs ansic escaping due to ANSI control caracters (newline) + s = "$'" + for c in value: + if ord(c) < ord(c): + s += "\\" + str(ord(c)) + elif c == "\\" or c == "'": + s += "\\" + c + else: + # non-unicode chars are fine too to take literally + # as utf8 + s += c + s += "'" + else: + # double quoting + s = '"' + for c in value: + if c == '"' or c == "\\" or c == "$" or c == "`": + s += "\\" + c + else: + # non-unicode chars are fine too to take literally + # as utf8 + s += c + s += '"' + return s + + @classmethod + def _ifcfg_route_merge(cls, route, append_only, current): + if not append_only or current is None: + if not route: + return None + return "\n".join(route) + "\n" + + if route: + # the 'route' file is processed line by line by initscripts' + # ifup-route. Hence, the order of the route matters. + # _ifcfg_route_merge() is not sophisticated enough to understand + # pre-existing lines. It will only append lines that don't exist + # yet, which hopefully is correct. It's better to always rewrite + # the entire file with route_append_only=False. + changed = False + c_lines = list(current.split("\n")) + for r in route: + if r not in c_lines: + changed = True + c_lines.append(r) + if changed: + return "\n".join(c_lines) + "\n" + + return current + + @classmethod + def ifcfg_create( + cls, connections, idx, warn_fcn=lambda msg: None, content_current=None + ): + connection = connections[idx] + ip = connection["ip"] + + ifcfg = {} + keys_file = None + route4_file = None + route6_file = None + rule4_file = None + rule6_file = None + + if ip["dhcp4_send_hostname"] is not None: + warn_fcn("ip.dhcp4_send_hostname is not supported by initscripts provider") + if ip["route_metric4"] is not None and ip["route_metric4"] >= 0: + warn_fcn("ip.route_metric4 is not supported by initscripts provider") + if ip["route_metric6"] is not None and ip["route_metric6"] >= 0: + warn_fcn("ip.route_metric6 is not supported by initscripts provider") + + ifcfg["NM_CONTROLLED"] = "no" + + if connection["autoconnect"]: + ifcfg["ONBOOT"] = "yes" + else: + ifcfg["ONBOOT"] = "no" + + ifcfg["DEVICE"] = connection["interface_name"] + + if connection["type"] == "ethernet": + ifcfg["TYPE"] = "Ethernet" + ifcfg["HWADDR"] = connection["mac"] + elif connection["type"] == "infiniband": + ifcfg["TYPE"] = "InfiniBand" + ifcfg["HWADDR"] = connection["mac"] + ifcfg["CONNECTED_MODE"] = ( + "yes" + if (connection["infiniband"]["transport_mode"] == "connected") + else "no" + ) + if connection["infiniband"]["p_key"] != -1: + ifcfg["PKEY"] = "yes" + ifcfg["PKEY_ID"] = str(connection["infiniband"]["p_key"]) + if connection["parent"]: + ifcfg["PHYSDEV"] = ArgUtil.connection_find_master( + connection["parent"], connections, idx + ) + elif connection["type"] == "bridge": + ifcfg["TYPE"] = "Bridge" + elif connection["type"] == "bond": + ifcfg["TYPE"] = "Bond" + ifcfg["BONDING_MASTER"] = "yes" + opts = ["mode=%s" % (connection["bond"]["mode"])] + if connection["bond"]["miimon"] is not None: + opts.append(" miimon=%s" % (connection["bond"]["miimon"])) + ifcfg["BONDING_OPTS"] = " ".join(opts) + elif connection["type"] == "team": + ifcfg["DEVICETYPE"] = "Team" + elif connection["type"] == "vlan": + ifcfg["VLAN"] = "yes" + ifcfg["TYPE"] = "Vlan" + ifcfg["PHYSDEV"] = ArgUtil.connection_find_master( + connection["parent"], connections, idx + ) + ifcfg["VID"] = str(connection["vlan"]["id"]) + else: + raise MyError("unsupported type %s" % (connection["type"])) + + if connection["mtu"]: + ifcfg["MTU"] = str(connection["mtu"]) + + ethtool_options = "" + if "ethernet" in connection: + if connection["ethernet"]["autoneg"] is not None: + if connection["ethernet"]["autoneg"]: + ethtool_options = "autoneg on" + else: + ethtool_options = "autoneg off speed %s duplex %s" % ( + connection["ethernet"]["speed"], + connection["ethernet"]["duplex"], + ) + + ethtool_features = connection["ethtool"]["features"] + configured_features = [] + for feature, setting in ethtool_features.items(): + value = "" + if setting: + value = "on" + elif setting is not None: + value = "off" + + if value: + configured_features.append("%s %s" % (feature, value)) + + if configured_features: + if ethtool_options: + ethtool_options += " ; " + ethtool_options += "-K %s %s" % ( + connection["interface_name"], + " ".join(configured_features), + ) + + if ethtool_options: + ifcfg["ETHTOOL_OPTS"] = ethtool_options + + if connection["master"] is not None: + m = ArgUtil.connection_find_master(connection["master"], connections, idx) + if connection["slave_type"] == "bridge": + ifcfg["BRIDGE"] = m + elif connection["slave_type"] == "bond": + ifcfg["MASTER"] = m + ifcfg["SLAVE"] = "yes" + elif connection["slave_type"] == "team": + ifcfg["TEAM_MASTER"] = m + if "TYPE" in ifcfg: + del ifcfg["TYPE"] + if connection["type"] != "team": + ifcfg["DEVICETYPE"] = "TeamPort" + else: + raise MyError("invalid slave_type '%s'" % (connection["slave_type"])) + + if ip["route_append_only"] and content_current: + route4_file = content_current["route"] + route6_file = content_current["route6"] + else: + if connection["zone"]: + ifcfg["ZONE"] = connection["zone"] + + addrs4 = list([a for a in ip["address"] if a["family"] == socket.AF_INET]) + addrs6 = list([a for a in ip["address"] if a["family"] == socket.AF_INET6]) + + if ip["dhcp4"]: + ifcfg["BOOTPROTO"] = "dhcp" + elif addrs4: + ifcfg["BOOTPROTO"] = "static" + else: + ifcfg["BOOTPROTO"] = "none" + for i in range(0, len(addrs4)): + addr = addrs4[i] + ifcfg["IPADDR" + ("" if i == 0 else str(i))] = addr["address"] + ifcfg["PREFIX" + ("" if i == 0 else str(i))] = str(addr["prefix"]) + if ip["gateway4"] is not None: + ifcfg["GATEWAY"] = ip["gateway4"] + + for idx, dns in enumerate(ip["dns"]): + ifcfg["DNS" + str(idx + 1)] = dns["address"] + if ip["dns_search"]: + ifcfg["DOMAIN"] = " ".join(ip["dns_search"]) + + if ip["auto6"]: + ifcfg["IPV6INIT"] = "yes" + ifcfg["IPV6_AUTOCONF"] = "yes" + elif addrs6: + ifcfg["IPV6INIT"] = "yes" + ifcfg["IPV6_AUTOCONF"] = "no" + else: + ifcfg["IPV6INIT"] = "no" + if addrs6: + ifcfg["IPVADDR"] = addrs6[0]["address"] + "/" + str(addrs6[0]["prefix"]) + if len(addrs6) > 1: + ifcfg["IPVADDR_SECONDARIES"] = " ".join( + [a["address"] + "/" + str(a["prefix"]) for a in addrs6[1:]] + ) + if ip["gateway6"] is not None: + ifcfg["IPV6_DEFAULTGW"] = ip["gateway6"] + + route4 = [] + route6 = [] + for r in ip["route"]: + line = r["network"] + "/" + str(r["prefix"]) + if r["gateway"]: + line += " via " + r["gateway"] + if r["metric"] != -1: + line += " metric " + str(r["metric"]) + + if r["family"] == socket.AF_INET: + route4.append(line) + else: + route6.append(line) + + route4_file = cls._ifcfg_route_merge( + route4, + ip["route_append_only"] and content_current, + content_current["route"] if content_current else None, + ) + route6_file = cls._ifcfg_route_merge( + route6, + ip["route_append_only"] and content_current, + content_current["route6"] if content_current else None, + ) + + if ip["rule_append_only"] and content_current: + rule4_file = content_current["rule"] + rule6_file = content_current["rule6"] + + for key in list(ifcfg.keys()): + v = ifcfg[key] + if v is None: + del ifcfg[key] + continue + if isinstance(v, bool): + ifcfg[key] = "yes" if v else "no" + + return { + "ifcfg": ifcfg, + "keys": keys_file, + "route": route4_file, + "route6": route6_file, + "rule": rule4_file, + "rule6": rule6_file, + } + + @classmethod + def ifcfg_parse_line(cls, line): + r1 = getattr(cls, "_re_parse_line1", None) + if r1 is None: + import re + import shlex + + r1 = re.compile("^[ \t]*([a-zA-Z_][a-zA-Z_0-9]*)=(.*)$") + cls._re_parse_line1 = r1 + cls._shlex = shlex + m = r1.match(line) + if not m: + return None + key = m.group(1) + val = m.group(2) + val = val.rstrip() + + # shlex isn't up to the task of parsing shell. Whatever, + # we can only parse shell to a certain degree and this is + # good enough for now. + try: + c = list(cls._shlex.split(val, comments=True, posix=True)) + except Exception: + return None + if len(c) != 1: + return None + return (key, c[0]) + + @classmethod + def ifcfg_parse(cls, content): + if content is None: + return None + ifcfg = {} + for line in content.splitlines(): + val = cls.ifcfg_parse_line(line) + if val: + ifcfg[val[0]] = val[1] + return ifcfg + + @classmethod + def content_from_dict(cls, ifcfg_all, file_type=None, header=None): + content = {} + for file_type in cls._file_types(file_type): + h = ifcfg_all[file_type] + if file_type == "ifcfg": + if header is not None: + s = header + "\n" + else: + s = "" + for key in sorted(h.keys()): + value = h[key] + if not cls.KeyValid(key): + raise MyError("invalid ifcfg key %s" % (key)) + if value is not None: + s += key + "=" + cls.ValueEscape(value) + "\n" + content[file_type] = s + else: + content[file_type] = h + + return content + + @classmethod + def content_to_dict(cls, content, file_type=None): + ifcfg_all = {} + for file_type in cls._file_types(file_type): + ifcfg_all[file_type] = cls.ifcfg_parse(content[file_type]) + return ifcfg_all + + @classmethod + def content_from_file(cls, name, file_type=None): + """ + Return dictionary with all file contents for an initscripts profile + """ + content = {} + for file_type in cls._file_types(file_type): + path = cls.ifcfg_path(name, file_type) + try: + with open(path, "r") as content_file: + i_content = content_file.read() + except Exception: + i_content = None + content[file_type] = i_content + return content + + @classmethod + def content_to_file(cls, name, content, file_type=None): + for file_type in cls._file_types(file_type): + path = cls.ifcfg_path(name, file_type) + h = content[file_type] + if h is None: + try: + os.unlink(path) + except OSError as e: + import errno + + if e.errno != errno.ENOENT: + raise + else: + with open(path, "w") as text_file: + text_file.write(h) + + @classmethod + def connection_seems_active(cls, name): + # we don't know whether a ifcfg file is currently active, + # and we also don't know which. + # + # Do a very basic guess based on whether the interface + # is in operstate "up". + # + # But first we need to find the interface name. Do + # some naive parsing and check for DEVICE setting. + content = cls.content_from_file(name, "ifcfg") + if content["ifcfg"] is not None: + content = cls.ifcfg_parse(content["ifcfg"]) + else: + content = {} + if "DEVICE" not in content: + return None + path = "/sys/class/net/" + content["DEVICE"] + "/operstate" + try: + with open(path, "r") as content_file: + i_content = str(content_file.read()) + except Exception: + return None + + if i_content.strip() != "up": + return False + + return True + + +############################################################################### + + +class NMUtil: + def __init__(self, nmclient=None): + if nmclient is None: + nmclient = Util.NM().Client.new(None) + self.nmclient = nmclient + + def setting_ip_config_get_routes(self, s_ip): + if s_ip is not None: + for i in range(0, s_ip.get_num_routes()): + yield s_ip.get_route(i) + + def connection_ensure_setting(self, connection, setting_type): + setting = connection.get_setting(setting_type) + if not setting: + setting = setting_type.new() + connection.add_setting(setting) + return setting + + def device_is_master_type(self, dev): + if dev: + NM = Util.NM() + GObject = Util.GObject() + if ( + GObject.type_is_a(dev, NM.DeviceBond) + or GObject.type_is_a(dev, NM.DeviceBridge) + or GObject.type_is_a(dev, NM.DeviceTeam) + ): + return True + return False + + def active_connection_list(self, connections=None, black_list=None): + active_cons = self.nmclient.get_active_connections() + if connections: + connections = set(connections) + active_cons = [ + ac for ac in active_cons if ac.get_connection() in connections + ] + if black_list: + active_cons = [ac for ac in active_cons if ac not in black_list] + return list(active_cons) + + def connection_list( + self, + name=None, + uuid=None, + black_list=None, + black_list_names=None, + black_list_uuids=None, + ): + cons = self.nmclient.get_connections() + if name is not None: + cons = [c for c in cons if c.get_id() == name] + if uuid is not None: + cons = [c for c in cons if c.get_uuid() == uuid] + + if black_list: + cons = [c for c in cons if c not in black_list] + if black_list_uuids: + cons = [c for c in cons if c.get_uuid() not in black_list_uuids] + if black_list_names: + cons = [c for c in cons if c.get_id() not in black_list_names] + + cons = list(cons) + + def _cmp(a, b): + s_a = a.get_setting_connection() + s_b = b.get_setting_connection() + if not s_a and not s_b: + return 0 + if not s_a: + return 1 + if not s_b: + return -1 + t_a = s_a.get_timestamp() + t_b = s_b.get_timestamp() + if t_a == t_b: + return 0 + if t_a <= 0: + return 1 + if t_b <= 0: + return -1 + return cmp(t_a, t_b) + + if Util.PY3: + # functools.cmp_to_key does not exist in Python 2.6 + cons.sort(key=functools.cmp_to_key(_cmp)) + else: + cons.sort(cmp=_cmp) + return cons + + def connection_compare( + self, con_a, con_b, normalize_a=False, normalize_b=False, compare_flags=None + ): + NM = Util.NM() + + if normalize_a: + con_a = NM.SimpleConnection.new_clone(con_a) + try: + con_a.normalize() + except Exception: + pass + if normalize_b: + con_b = NM.SimpleConnection.new_clone(con_b) + try: + con_b.normalize() + except Exception: + pass + if compare_flags is None: + compare_flags = NM.SettingCompareFlags.IGNORE_TIMESTAMP + + return not (not (con_a.compare(con_b, compare_flags))) + + def connection_is_active(self, con): + NM = Util.NM() + for ac in self.active_connection_list(connections=[con]): + if ( + ac.get_state() >= NM.ActiveConnectionState.ACTIVATING + and ac.get_state() <= NM.ActiveConnectionState.ACTIVATED + ): + return True + return False + + def connection_create(self, connections, idx, connection_current=None): + NM = Util.NM() + + connection = connections[idx] + + con = NM.SimpleConnection.new() + s_con = self.connection_ensure_setting(con, NM.SettingConnection) + + s_con.set_property(NM.SETTING_CONNECTION_ID, connection["name"]) + s_con.set_property(NM.SETTING_CONNECTION_UUID, connection["nm.uuid"]) + s_con.set_property(NM.SETTING_CONNECTION_AUTOCONNECT, connection["autoconnect"]) + s_con.set_property( + NM.SETTING_CONNECTION_INTERFACE_NAME, connection["interface_name"] + ) + + if connection["type"] == "ethernet": + s_con.set_property( + NM.SETTING_CONNECTION_TYPE, NM.SETTING_WIRED_SETTING_NAME + ) + s_wired = self.connection_ensure_setting(con, NM.SettingWired) + s_wired.set_property(NM.SETTING_WIRED_MAC_ADDRESS, connection["mac"]) + elif connection["type"] == "infiniband": + s_con.set_property( + NM.SETTING_CONNECTION_TYPE, NM.SETTING_INFINIBAND_SETTING_NAME + ) + s_infiniband = self.connection_ensure_setting(con, NM.SettingInfiniband) + s_infiniband.set_property( + NM.SETTING_INFINIBAND_MAC_ADDRESS, connection["mac"] + ) + s_infiniband.set_property( + NM.SETTING_INFINIBAND_TRANSPORT_MODE, + connection["infiniband"]["transport_mode"], + ) + if connection["infiniband"]["p_key"] != -1: + s_infiniband.set_property( + NM.SETTING_INFINIBAND_P_KEY, connection["infiniband"]["p_key"] + ) + if connection["parent"]: + s_infiniband.set_property( + NM.SETTING_INFINIBAND_PARENT, + ArgUtil.connection_find_master( + connection["parent"], connections, idx + ), + ) + elif connection["type"] == "bridge": + s_con.set_property( + NM.SETTING_CONNECTION_TYPE, NM.SETTING_BRIDGE_SETTING_NAME + ) + s_bridge = self.connection_ensure_setting(con, NM.SettingBridge) + s_bridge.set_property(NM.SETTING_BRIDGE_STP, False) + elif connection["type"] == "bond": + s_con.set_property(NM.SETTING_CONNECTION_TYPE, NM.SETTING_BOND_SETTING_NAME) + s_bond = self.connection_ensure_setting(con, NM.SettingBond) + s_bond.add_option("mode", connection["bond"]["mode"]) + if connection["bond"]["miimon"] is not None: + s_bond.add_option("miimon", str(connection["bond"]["miimon"])) + elif connection["type"] == "team": + s_con.set_property(NM.SETTING_CONNECTION_TYPE, NM.SETTING_TEAM_SETTING_NAME) + elif connection["type"] == "vlan": + s_con.set_property(NM.SETTING_CONNECTION_TYPE, NM.SETTING_VLAN_SETTING_NAME) + s_vlan = self.connection_ensure_setting(con, NM.SettingVlan) + s_vlan.set_property(NM.SETTING_VLAN_ID, connection["vlan"]["id"]) + s_vlan.set_property( + NM.SETTING_VLAN_PARENT, + ArgUtil.connection_find_master_uuid( + connection["parent"], connections, idx + ), + ) + elif connection["type"] == "macvlan": + s_con.set_property( + NM.SETTING_CONNECTION_TYPE, NM.SETTING_MACVLAN_SETTING_NAME + ) + # convert mode name to a number (which is actually expected by nm) + mode = connection["macvlan"]["mode"] + try: + mode_id = int(getattr(NM.SettingMacvlanMode, mode.upper())) + except AttributeError: + raise MyError("Macvlan mode '%s' is not recognized" % (mode)) + s_macvlan = self.connection_ensure_setting(con, NM.SettingMacvlan) + s_macvlan.set_property(NM.SETTING_MACVLAN_MODE, mode_id) + s_macvlan.set_property( + NM.SETTING_MACVLAN_PROMISCUOUS, connection["macvlan"]["promiscuous"] + ) + s_macvlan.set_property(NM.SETTING_MACVLAN_TAP, connection["macvlan"]["tap"]) + s_macvlan.set_property( + NM.SETTING_MACVLAN_PARENT, + ArgUtil.connection_find_master(connection["parent"], connections, idx), + ) + else: + raise MyError("unsupported type %s" % (connection["type"])) + + if "ethernet" in connection: + if connection["ethernet"]["autoneg"] is not None: + s_wired = self.connection_ensure_setting(con, NM.SettingWired) + s_wired.set_property( + NM.SETTING_WIRED_AUTO_NEGOTIATE, connection["ethernet"]["autoneg"] + ) + s_wired.set_property( + NM.SETTING_WIRED_DUPLEX, connection["ethernet"]["duplex"] + ) + s_wired.set_property( + NM.SETTING_WIRED_SPEED, connection["ethernet"]["speed"] + ) + + if hasattr(NM, "SettingEthtool"): + s_ethtool = self.connection_ensure_setting(con, NM.SettingEthtool) + + for feature, setting in connection["ethtool"]["features"].items(): + nm_feature = nm_provider.get_nm_ethtool_feature(feature) + + if setting is None: + if nm_feature: + s_ethtool.set_feature(nm_feature, NM.Ternary.DEFAULT) + elif setting: + s_ethtool.set_feature(nm_feature, NM.Ternary.TRUE) + else: + s_ethtool.set_feature(nm_feature, NM.Ternary.FALSE) + + if connection["mtu"]: + if connection["type"] == "infiniband": + s_infiniband = self.connection_ensure_setting(con, NM.SettingInfiniband) + s_infiniband.set_property(NM.SETTING_INFINIBAND_MTU, connection["mtu"]) + else: + s_wired = self.connection_ensure_setting(con, NM.SettingWired) + s_wired.set_property(NM.SETTING_WIRED_MTU, connection["mtu"]) + + if connection["master"] is not None: + s_con.set_property( + NM.SETTING_CONNECTION_SLAVE_TYPE, connection["slave_type"] + ) + s_con.set_property( + NM.SETTING_CONNECTION_MASTER, + ArgUtil.connection_find_master_uuid( + connection["master"], connections, idx + ), + ) + else: + if connection["zone"]: + s_con.set_property(NM.SETTING_CONNECTION_ZONE, connection["zone"]) + + ip = connection["ip"] + + s_ip4 = self.connection_ensure_setting(con, NM.SettingIP4Config) + s_ip6 = self.connection_ensure_setting(con, NM.SettingIP6Config) + + s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, "auto") + s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "auto") + + addrs4 = list([a for a in ip["address"] if a["family"] == socket.AF_INET]) + addrs6 = list([a for a in ip["address"] if a["family"] == socket.AF_INET6]) + + if ip["dhcp4"]: + s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, "auto") + s_ip4.set_property( + NM.SETTING_IP_CONFIG_DHCP_SEND_HOSTNAME, + ip["dhcp4_send_hostname"] is not False, + ) + elif addrs4: + s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, "manual") + else: + s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, "disabled") + for a in addrs4: + s_ip4.add_address( + NM.IPAddress.new(a["family"], a["address"], a["prefix"]) + ) + if ip["gateway4"] is not None: + s_ip4.set_property(NM.SETTING_IP_CONFIG_GATEWAY, ip["gateway4"]) + if ip["route_metric4"] is not None and ip["route_metric4"] >= 0: + s_ip4.set_property( + NM.SETTING_IP_CONFIG_ROUTE_METRIC, ip["route_metric4"] + ) + for d in ip["dns"]: + if d["family"] == socket.AF_INET: + s_ip4.add_dns(d["address"]) + for s in ip["dns_search"]: + s_ip4.add_dns_search(s) + + if ip["auto6"]: + s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "auto") + elif addrs6: + s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "manual") + else: + s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "ignore") + for a in addrs6: + s_ip6.add_address( + NM.IPAddress.new(a["family"], a["address"], a["prefix"]) + ) + if ip["gateway6"] is not None: + s_ip6.set_property(NM.SETTING_IP_CONFIG_GATEWAY, ip["gateway6"]) + if ip["route_metric6"] is not None and ip["route_metric6"] >= 0: + s_ip6.set_property( + NM.SETTING_IP_CONFIG_ROUTE_METRIC, ip["route_metric6"] + ) + for d in ip["dns"]: + if d["family"] == socket.AF_INET6: + s_ip6.add_dns(d["address"]) + + if ip["route_append_only"] and connection_current: + for r in self.setting_ip_config_get_routes( + connection_current.get_setting(NM.SettingIP4Config) + ): + s_ip4.add_route(r) + for r in self.setting_ip_config_get_routes( + connection_current.get_setting(NM.SettingIP6Config) + ): + s_ip6.add_route(r) + for r in ip["route"]: + rr = NM.IPRoute.new( + r["family"], r["network"], r["prefix"], r["gateway"], r["metric"] + ) + if r["family"] == socket.AF_INET: + s_ip4.add_route(rr) + else: + s_ip6.add_route(rr) + + try: + con.normalize() + except Exception as e: + raise MyError("created connection failed to normalize: %s" % (e)) + return con + + def connection_add(self, con, timeout=10): + def add_cb(client, result, cb_args): + con = None + try: + con = client.add_connection_finish(result) + except Exception as e: + if Util.error_is_cancelled(e): + return + cb_args["error"] = str(e) + cb_args["con"] = con + Util.GMainLoop().quit() + + cancellable = Util.create_cancellable() + cb_args = {} + self.nmclient.add_connection_async(con, True, cancellable, add_cb, cb_args) + if not Util.GMainLoop_run(timeout): + cancellable.cancel() + raise MyError("failure to add connection: %s" % ("timeout")) + if not cb_args.get("con", None): + raise MyError( + "failure to add connection: %s" + % (cb_args.get("error", "unknown error")) + ) + return cb_args["con"] + + def connection_update(self, con, con_new, timeout=10): + con.replace_settings_from_connection(con_new) + + def update_cb(connection, result, cb_args): + success = False + try: + success = connection.commit_changes_finish(result) + except Exception as e: + if Util.error_is_cancelled(e): + return + cb_args["error"] = str(e) + cb_args["success"] = success + Util.GMainLoop().quit() + + cancellable = Util.create_cancellable() + cb_args = {} + con.commit_changes_async(True, cancellable, update_cb, cb_args) + if not Util.GMainLoop_run(timeout): + cancellable.cancel() + raise MyError("failure to update connection: %s" % ("timeout")) + if not cb_args.get("success", False): + raise MyError( + "failure to update connection: %s" + % (cb_args.get("error", "unknown error")) + ) + return True + + def connection_delete(self, connection, timeout=10): + + # Do nothing, if the connection is already gone + if connection not in self.connection_list(): + return + + if "update2" in dir(connection): + return self.volatilize_connection(connection, timeout) + + delete_cb = Util.create_callback("delete_finish") + + cancellable = Util.create_cancellable() + cb_args = {} + connection.delete_async(cancellable, delete_cb, cb_args) + if not Util.GMainLoop_run(timeout): + cancellable.cancel() + raise MyError("failure to delete connection: %s" % ("timeout")) + if not cb_args.get("success", False): + raise MyError( + "failure to delete connection: %s" + % (cb_args.get("error", "unknown error")) + ) + + # workaround libnm oddity. The connection may not yet be gone if the + # connection was active and is deactivating. Wait. + c_uuid = connection.get_uuid() + gone = self.wait_till_connection_is_gone(c_uuid) + if not gone: + raise MyError( + "connection %s was supposedly deleted successfully, but it's still here" + % (c_uuid) + ) + + def volatilize_connection(self, connection, timeout=10): + update2_cb = Util.create_callback("update2_finish") + + cancellable = Util.create_cancellable() + cb_args = {} + + connection.update2( + None, # settings + Util.NM().SettingsUpdate2Flags.IN_MEMORY_ONLY + | Util.NM().SettingsUpdate2Flags.VOLATILE, # flags + None, # args + cancellable, + update2_cb, + cb_args, + ) + + if not Util.GMainLoop_run(timeout): + cancellable.cancel() + raise MyError("failure to volatilize connection: %s" % ("timeout")) + + Util.GMainLoop_iterate_all() + + # Do not check of success if the connection does not exist anymore This + # can happen if the connection was already volatile and set to down + # during the module call + if connection not in self.connection_list(): + return + + # update2_finish returns None on failure and a GLib.Variant of type + # a{sv} with the result otherwise (which can be empty) + if cb_args.get("success", None) is None: + raise MyError( + "failure to volatilize connection: %s: %r" + % (cb_args.get("error", "unknown error"), cb_args) + ) + + def create_checkpoint(self, timeout): + """ Create a new checkpoint """ + checkpoint = Util.call_async_method( + self.nmclient, + "checkpoint_create", + [ + [], # devices, empty list is all devices + timeout, + Util.NM().CheckpointCreateFlags.DELETE_NEW_CONNECTIONS + | Util.NM().CheckpointCreateFlags.DISCONNECT_NEW_DEVICES, + ], + ) + + if checkpoint: + return checkpoint.get_path() + return None + + def destroy_checkpoint(self, path): + """ Destroy the specified checkpoint """ + Util.call_async_method(self.nmclient, "checkpoint_destroy", [path]) + + def rollback_checkpoint(self, path): + """ Rollback the specified checkpoint """ + Util.call_async_method( + self.nmclient, + "checkpoint_rollback", + [path], + mainloop_timeout=DEFAULT_ACTIVATION_TIMEOUT, + ) + + def wait_till_connection_is_gone(self, uuid, timeout=10): + """ + Wait until a connection is gone or until the timeout elapsed + + :param uuid: UUID of the connection that to wait for to be gone + :param timeout: Timeout in seconds to wait for + :returns: True when connection is gone, False when timeout elapsed + :rtype: bool + """ + + def _poll_timeout_cb(unused): + if not self.connection_list(uuid=uuid): + Util.GMainLoop().quit() + + poll_timeout_id = Util.GLib().timeout_add(100, _poll_timeout_cb, None) + gone = Util.GMainLoop_run(timeout) + Util.GLib().source_remove(poll_timeout_id) + return gone + + def connection_activate(self, connection, timeout=15, wait_time=None): + + already_retried = False + + while True: + + def activate_cb(client, result, cb_args): + active_connection = None + try: + active_connection = client.activate_connection_finish(result) + except Exception as e: + if Util.error_is_cancelled(e): + return + cb_args["error"] = str(e) + cb_args["active_connection"] = active_connection + Util.GMainLoop().quit() + + cancellable = Util.create_cancellable() + cb_args = {} + self.nmclient.activate_connection_async( + connection, None, None, cancellable, activate_cb, cb_args + ) + if not Util.GMainLoop_run(timeout): + cancellable.cancel() + raise MyError("failure to activate connection: %s" % ("timeout")) + + if cb_args.get("active_connection", None): + ac = cb_args["active_connection"] + self.connection_activate_wait(ac, wait_time) + return ac + + # there is a bug in NetworkManager, that the connection + # might already be in the process of activating. In that + # case, NM would reject the activation request with + # "Connection '$PROFILE' is not available on the device $DEV + # at this time." + # + # Try to work around it by waiting a bit and retrying. + if already_retried: + raise MyError( + "failure to activate connection: %s" + % (cb_args.get("error", "unknown error")) + ) + + already_retried = True + + time.sleep(1) + + def connection_activate_wait(self, ac, wait_time): + + if not wait_time: + return + + NM = Util.NM() + + state = ac.get_state() + if state == NM.ActiveConnectionState.ACTIVATED: + return + if state != NM.ActiveConnectionState.ACTIVATING: + raise MyError("activation is in unexpected state '%s'" % (state)) + + def check_activated(ac, dev): + ac_state = ac.get_state() + + # the state reason was for active-connection was introduced + # in NM 1.8 API. Work around for older library version. + try: + ac_reason = ac.get_state_reason() + except AttributeError: + ac_reason = None + + if dev: + dev_state = dev.get_state() + + if ac_state == NM.ActiveConnectionState.ACTIVATING: + if ( + self.device_is_master_type(dev) + and dev_state >= NM.DeviceState.IP_CONFIG + and dev_state <= NM.DeviceState.ACTIVATED + ): + # master connections qualify as activated once they + # reach IP-Config state. That is because they may + # wait for slave devices to attach + return True, None + # fall through + elif ac_state == NM.ActiveConnectionState.ACTIVATED: + return True, None + elif ac_state == NM.ActiveConnectionState.DEACTIVATED: + if ( + not dev + or ( + ac_reason is not None + and ac_reason + != NM.ActiveConnectionStateReason.DEVICE_DISCONNECTED + ) + or dev.get_active_connection() is not ac + ): + return ( + True, + ( + (ac_reason.value_nick if ac_reason else None) + or "unknown reason" + ), + ) + # the state of the active connection is not very helpful. + # see if the device-state is better. + if ( + dev_state <= NM.DeviceState.DISCONNECTED + or dev_state > NM.DeviceState.DEACTIVATING + ): + return ( + True, + ( + dev.get_state_reason().value_nick + or (ac_reason.value_nick if ac_reason else None) + or "unknown reason" + ), + ) + # fall through, wait longer for a better state reason. + + # wait longer. + return False, None + + dev = Util.first(ac.get_devices()) + + complete, failure_reason = check_activated(ac, dev) + + if not complete: + + cb_out = [] + + def check_activated_cb(): + complete, failure_reason = check_activated(ac, dev) + if complete: + cb_out.append(failure_reason) + Util.GMainLoop().quit() + + try: + # 'state-changed' signal is 1.8 API. Workaround for + # older libnm API version + ac_id = ac.connect( + "state-changed", lambda source, state, reason: check_activated_cb() + ) + except Exception: + ac_id = None + if dev: + dev_id = dev.connect( + "notify::state", lambda source, pspec: check_activated_cb() + ) + + try: + if not Util.GMainLoop_run(wait_time): + raise MyError("connection not fully activated after timeout") + finally: + if dev: + dev.handler_disconnect(dev_id) + if ac_id is not None: + ac.handler_disconnect(ac_id) + + failure_reason = cb_out[0] + + if failure_reason: + raise MyError("connection not activated: %s" % (failure_reason)) + + def active_connection_deactivate(self, ac, timeout=10, wait_time=None): + def deactivate_cb(client, result, cb_args): + success = False + try: + success = client.deactivate_connection_finish(result) + except Exception as e: + if Util.error_is_cancelled(e): + return + cb_args["error"] = str(e) + cb_args["success"] = success + Util.GMainLoop().quit() + + cancellable = Util.create_cancellable() + cb_args = {} + self.nmclient.deactivate_connection_async( + ac, cancellable, deactivate_cb, cb_args + ) + if not Util.GMainLoop_run(timeout): + cancellable.cancel() + raise MyError("failure to deactivate connection: %s" % (timeout)) + if not cb_args.get("success", False): + raise MyError( + "failure to deactivate connection: %s" + % (cb_args.get("error", "unknown error")) + ) + + self.active_connection_deactivate_wait(ac, wait_time) + return True + + def active_connection_deactivate_wait(self, ac, wait_time): + + if not wait_time: + return + + NM = Util.NM() + + def check_deactivated(ac): + return ac.get_state() >= NM.ActiveConnectionState.DEACTIVATED + + if not check_deactivated(ac): + + def check_deactivated_cb(): + if check_deactivated(ac): + Util.GMainLoop().quit() + + ac_id = ac.connect( + "notify::state", lambda source, pspec: check_deactivated_cb() + ) + + try: + if not Util.GMainLoop_run(wait_time): + raise MyError("connection not fully deactivated after timeout") + finally: + ac.handler_disconnect(ac_id) + + +############################################################################### + + +class RunEnvironment: + def __init__(self): + self._check_mode = None + + @property + def ifcfg_header(self): + return None + + def log( + self, + connections, + idx, + severity, + msg, + is_changed=False, + ignore_errors=False, + warn_traceback=False, + force_fail=False, + ): + raise NotImplementedError() + + def run_command(self, argv, encoding=None): + raise NotImplementedError() + + def _check_mode_changed(self, old_check_mode, new_check_mode, connections): + raise NotImplementedError() + + def check_mode_set(self, check_mode, connections=None): + c = self._check_mode + self._check_mode = check_mode + assert ( + (c is None and check_mode in [CheckMode.PREPARE]) + or ( + c == CheckMode.PREPARE + and check_mode in [CheckMode.PRE_RUN, CheckMode.DRY_RUN] + ) + or (c == CheckMode.PRE_RUN and check_mode in [CheckMode.REAL_RUN]) + or (c == CheckMode.REAL_RUN and check_mode in [CheckMode.DONE]) + or (c == CheckMode.DRY_RUN and check_mode in [CheckMode.DONE]) + ) + self._check_mode_changed(c, check_mode, connections) + + +class RunEnvironmentAnsible(RunEnvironment): + + ARGS = { + "ignore_errors": {"required": False, "default": False, "type": "bool"}, + "force_state_change": {"required": False, "default": False, "type": "bool"}, + "provider": {"required": True, "default": None, "type": "str"}, + "connections": {"required": False, "default": None, "type": "list"}, + } + + def __init__(self): + RunEnvironment.__init__(self) + self._run_results = [] + self._log_idx = 0 + + from ansible.module_utils.basic import AnsibleModule + + module = AnsibleModule(argument_spec=self.ARGS, supports_check_mode=True) + self.module = module + + @property + def ifcfg_header(self): + return "# this file was created by ansible" + + def run_command(self, argv, encoding=None): + return self.module.run_command(argv, encoding=encoding) + + def _run_results_push(self, n_connections): + c = [] + for cc in range(0, n_connections + 1): + c.append({"log": []}) + self._run_results.append(c) + + @property + def run_results(self): + return self._run_results[-1] + + def _check_mode_changed(self, old_check_mode, new_check_mode, connections): + if old_check_mode is None: + self._run_results_push(len(connections)) + elif old_check_mode == CheckMode.PREPARE: + self._run_results_push(len(self.run_results) - 1) + elif old_check_mode == CheckMode.PRE_RUN: + # when switching from RRE_RUN to REAL_RUN, we drop the run-results + # we just collected and reset to empty. The PRE_RUN succeeded. + n_connections = len(self.run_results) - 1 + del self._run_results[-1] + self._run_results_push(n_connections) + + def log( + self, + connections, + idx, + severity, + msg, + is_changed=False, + ignore_errors=False, + warn_traceback=False, + force_fail=False, + ): + assert idx >= -1 + self._log_idx += 1 + self.run_results[idx]["log"].append((severity, msg, self._log_idx)) + if severity == LogLevel.ERROR: + if force_fail or not ignore_errors: + self.fail_json( + connections, + "error: %s" % (msg), + changed=is_changed, + warn_traceback=warn_traceback, + ) + + def _complete_kwargs_loglines(self, rr, connections, idx): + if idx == len(connections): + prefix = "#" + else: + c = connections[idx] + prefix = "#%s, state:%s persistent_state:%s" % ( + idx, + c["state"], + c["persistent_state"], + ) + prefix = prefix + (", '%s'" % (c["name"])) + for r in rr["log"]: + yield (r[2], "[%03d] %s %s: %s" % (r[2], LogLevel.fmt(r[0]), prefix, r[1])) + + def _complete_kwargs(self, connections, kwargs, traceback_msg=None): + if "warnings" in kwargs: + logs = list(kwargs["warnings"]) + else: + logs = [] + + loglines = [] + for res in self._run_results: + for idx, rr in enumerate(res): + loglines.extend(self._complete_kwargs_loglines(rr, connections, idx)) + loglines.sort(key=lambda x: x[0]) + logs.extend([x[1] for x in loglines]) + if traceback_msg is not None: + logs.append(traceback_msg) + kwargs["warnings"] = logs + return kwargs + + def exit_json(self, connections, changed=False, **kwargs): + kwargs["changed"] = changed + self.module.exit_json(**self._complete_kwargs(connections, kwargs)) + + def fail_json( + self, connections, msg, changed=False, warn_traceback=False, **kwargs + ): + traceback_msg = None + if warn_traceback: + traceback_msg = "exception: %s" % (traceback.format_exc()) + kwargs["msg"] = msg + kwargs["changed"] = changed + self.module.fail_json( + **self._complete_kwargs(connections, kwargs, traceback_msg) + ) + + +############################################################################### + + +class Cmd: + def __init__( + self, + run_env, + connections_unvalidated, + connection_validator, + is_check_mode=False, + ignore_errors=False, + force_state_change=False, + ): + self.run_env = run_env + self.validate_one_type = None + self._connections_unvalidated = connections_unvalidated + self._connection_validator = connection_validator + self._is_check_mode = is_check_mode + self._ignore_errors = Util.boolean(ignore_errors) + self._force_state_change = Util.boolean(force_state_change) + + self._connections = None + self._connections_data = None + self._check_mode = CheckMode.PREPARE + self._is_changed_modified_system = False + + def run_command(self, argv, encoding=None): + return self.run_env.run_command(argv, encoding=encoding) + + @property + def is_changed_modified_system(self): + return self._is_changed_modified_system + + @property + def connections(self): + c = self._connections + if c is None: + try: + c = self._connection_validator.validate(self._connections_unvalidated) + except ValidationError as e: + raise MyError("configuration error: %s" % (e)) + self._connections = c + return c + + @property + def connections_data(self): + c = self._connections_data + if c is None: + assert self.check_mode in [ + CheckMode.DRY_RUN, + CheckMode.PRE_RUN, + CheckMode.REAL_RUN, + ] + c = [] + for _ in range(0, len(self.connections)): + c.append({"changed": False}) + self._connections_data = c + return c + + def connections_data_reset(self): + for c in self.connections_data: + c["changed"] = False + + def connections_data_set_changed(self, idx, changed=True): + assert self._check_mode in [ + CheckMode.PRE_RUN, + CheckMode.DRY_RUN, + CheckMode.REAL_RUN, + ] + if not changed: + return + self.connections_data[idx]["changed"] = changed + if changed and self._check_mode in [CheckMode.DRY_RUN, CheckMode.REAL_RUN]: + # we only do actual modifications during the REAL_RUN step. + # And as a special exception, during the DRY_RUN step, which + # is like REAL_RUN, except not not actually changing anything. + self._is_changed_modified_system = True + + def log_debug(self, idx, msg): + self.log(idx, LogLevel.DEBUG, msg) + + def log_info(self, idx, msg): + self.log(idx, LogLevel.INFO, msg) + + def log_warn(self, idx, msg): + self.log(idx, LogLevel.WARN, msg) + + def log_error(self, idx, msg, warn_traceback=False, force_fail=False): + self.log( + idx, + LogLevel.ERROR, + msg, + warn_traceback=warn_traceback, + force_fail=force_fail, + ) + + def log_fatal(self, idx, msg, warn_traceback=False): + self.log( + idx, LogLevel.ERROR, msg, warn_traceback=warn_traceback, force_fail=True + ) + + def log(self, idx, severity, msg, warn_traceback=False, force_fail=False): + self.run_env.log( + self.connections, + idx, + severity, + msg, + is_changed=self.is_changed_modified_system, + ignore_errors=self.connection_ignore_errors(self.connections[idx]), + warn_traceback=warn_traceback, + force_fail=force_fail, + ) + + @staticmethod + def create(provider, **kwargs): + if provider == "nm": + return Cmd_nm(**kwargs) + elif provider == "initscripts": + return Cmd_initscripts(**kwargs) + raise MyError("unsupported provider %s" % (provider)) + + def connection_force_state_change(self, connection): + v = connection["force_state_change"] + if v is not None: + return v + return self._force_state_change + + def connection_ignore_errors(self, connection): + v = connection["ignore_errors"] + if v is not None: + return v + return self._ignore_errors + + def connection_modified_earlier(self, idx): + # for index @idx, check if any of the previous profiles [0..idx[ + # modify the connection. + + con = self.connections[idx] + assert con["state"] in ["up", "down"] + + # also check, if the current profile is 'up' with a 'type' (which + # possibly modifies the connection as well) + if ( + con["state"] == "up" + and "type" in con + and self.connections_data[idx]["changed"] + ): + return True + + for i in reversed(range(idx)): + c = self.connections[i] + if "name" not in c: + continue + if c["name"] != con["name"]: + continue + + c_state = c["state"] + c_pstate = c["persistent_state"] + if c_state == "up" and "type" not in c: + pass + elif c_state == "down": + return True + elif c_pstate == "absent": + return True + elif c_state == "up" or c_pstate == "present": + if self.connections_data[idx]["changed"]: + return True + + return False + + @property + def check_mode(self): + return self._check_mode + + def check_mode_next(self): + if self._check_mode == CheckMode.PREPARE: + if self._is_check_mode: + c = CheckMode.DRY_RUN + else: + c = CheckMode.PRE_RUN + elif self.check_mode == CheckMode.PRE_RUN: + self.connections_data_reset() + c = CheckMode.REAL_RUN + elif self._check_mode != CheckMode.DONE: + c = CheckMode.DONE + else: + assert False + self._check_mode = c + self.run_env.check_mode_set(c) + return c + + def run(self): + self.run_env.check_mode_set(CheckMode.PREPARE, self.connections) + for idx, connection in enumerate(self.connections): + try: + self._connection_validator.validate_connection_one( + self.validate_one_type, self.connections, idx + ) + except ValidationError as e: + self.log_fatal(idx, str(e)) + self.run_prepare() + while self.check_mode_next() != CheckMode.DONE: + if self.check_mode == CheckMode.REAL_RUN: + self.start_transaction() + + for idx, connection in enumerate(self.connections): + try: + for action in connection["actions"]: + if action == "absent": + self.run_action_absent(idx) + elif action == "present": + self.run_action_present(idx) + elif action == "up": + self.run_action_up(idx) + elif action == "down": + self.run_action_down(idx) + else: + assert False + except Exception as error: + if self.check_mode == CheckMode.REAL_RUN: + self.rollback_transaction(idx, action, error) + raise + + if self.check_mode == CheckMode.REAL_RUN: + self.finish_transaction() + + def run_prepare(self): + for idx, connection in enumerate(self.connections): + if "type" in connection and connection["check_iface_exists"]: + # when the profile is tied to a certain interface via + # 'interface_name' or 'mac', check that such an interface + # exists. + # + # This check has many flaws, as we don't check whether the + # existing interface has the right device type. Also, there is + # some ambiguity between the current MAC address and the + # permanent MAC address. + li_mac = None + li_ifname = None + if connection["mac"]: + li_mac = SysUtil.link_info_find(mac=connection["mac"]) + if not li_mac: + self.log_fatal( + idx, + "profile specifies mac '%s' but no such interface exists" + % (connection["mac"]), + ) + if connection["interface_name"]: + li_ifname = SysUtil.link_info_find( + ifname=connection["interface_name"] + ) + if not li_ifname: + if connection["type"] == "ethernet": + self.log_fatal( + idx, + "profile specifies interface_name '%s' but no such " + "interface exists" % (connection["interface_name"]), + ) + elif connection["type"] == "infiniband": + if connection["infiniband"]["p_key"] != -1: + self.log_fatal( + idx, + "profile specifies interface_name '%s' but no such " + "infiniband interface exists" + % (connection["interface_name"]), + ) + if li_mac and li_ifname and li_mac != li_ifname: + self.log_fatal( + idx, + "profile specifies interface_name '%s' and mac '%s' but no " + "such interface exists" + % (connection["interface_name"], connection["mac"]), + ) + + def start_transaction(self): + """ Hook before making changes """ + + def finish_transaction(self): + """ Hook for after all changes where made successfuly """ + + def rollback_transaction(self, idx, action, error): + """ Hook if configuring a profile results in an error + + :param idx: Index of the connection that triggered the error + :param action: Action that triggered the error + :param error: The error + + :type idx: int + :type action: str + :type error: Exception + + """ + self.log_warn( + idx, "failure: %s (%s) [[%s]]" % (error, action, traceback.format_exc()) + ) + + def run_action_absent(self, idx): + raise NotImplementedError() + + def run_action_present(self, idx): + raise NotImplementedError() + + def run_action_down(self, idx): + raise NotImplementedError() + + def run_action_up(self, idx): + raise NotImplementedError() + + +############################################################################### + + +class Cmd_nm(Cmd): + def __init__(self, **kwargs): + Cmd.__init__(self, **kwargs) + self._nmutil = None + self.validate_one_type = ArgValidator_ListConnections.VALIDATE_ONE_MODE_NM + self._checkpoint = None + + @property + def nmutil(self): + if self._nmutil is None: + try: + nmclient = Util.NM().Client.new(None) + except Exception as e: + raise MyError("failure loading libnm library: %s" % (e)) + self._nmutil = NMUtil(nmclient) + return self._nmutil + + def run_prepare(self): + Cmd.run_prepare(self) + + names = {} + for idx, connection in enumerate(self.connections): + self._check_ethtool_setting_support(idx, connection) + + name = connection["name"] + if not name: + assert connection["persistent_state"] == "absent" + continue + if name in names: + exists = names[name]["nm.exists"] + uuid = names[name]["nm.uuid"] + else: + c = Util.first(self.nmutil.connection_list(name=name)) + + exists = c is not None + if c is not None: + uuid = c.get_uuid() + else: + uuid = Util.create_uuid() + names[name] = {"nm.exists": exists, "nm.uuid": uuid} + connection["nm.exists"] = exists + connection["nm.uuid"] = uuid + + def start_transaction(self): + Cmd.start_transaction(self) + self._checkpoint = self.nmutil.create_checkpoint( + len(self.connections) * DEFAULT_ACTIVATION_TIMEOUT + ) + + def rollback_transaction(self, idx, action, error): + Cmd.rollback_transaction(self, idx, action, error) + if self._checkpoint: + try: + self.nmutil.rollback_checkpoint(self._checkpoint) + finally: + self._checkpoint = None + + def finish_transaction(self): + Cmd.finish_transaction(self) + if self._checkpoint: + try: + self.nmutil.destroy_checkpoint(self._checkpoint) + finally: + self._checkpoint = None + + def _check_ethtool_setting_support(self, idx, connection): + """ Check if SettingEthtool support is needed and available + + If any feature is specified, the SettingEthtool setting needs to be + available. Also NM needs to know about each specified setting. Do not + check if NM knows about any defaults. + + """ + NM = Util.NM() + + # If the profile is not completely specified, for example if only the + # runtime change is specified, the ethtool subtree might be missing. + # Then no checks are required. + if "ethtool" not in connection: + return + + ethtool_features = connection["ethtool"]["features"] + specified_features = dict( + [(k, v) for k, v in ethtool_features.items() if v is not None] + ) + + if specified_features and not hasattr(NM, "SettingEthtool"): + self.log_fatal(idx, "ethtool.features specified but not supported by NM") + + for feature, setting in specified_features.items(): + nm_feature = nm_provider.get_nm_ethtool_feature(feature) + if not nm_feature: + self.log_fatal( + idx, "ethtool feature %s specified but not support by NM" % feature + ) + + def run_action_absent(self, idx): + seen = set() + name = self.connections[idx]["name"] + black_list_names = None + if not name: + name = None + black_list_names = ArgUtil.connection_get_non_absent_names(self.connections) + while True: + connections = self.nmutil.connection_list( + name=name, black_list_names=black_list_names, black_list=seen + ) + if not connections: + break + c = connections[-1] + seen.add(c) + self.log_info(idx, "delete connection %s, %s" % (c.get_id(), c.get_uuid())) + self.connections_data_set_changed(idx) + if self.check_mode == CheckMode.REAL_RUN: + try: + self.nmutil.connection_delete(c) + except MyError as e: + self.log_error(idx, "delete connection failed: %s" % (e)) + if not seen: + self.log_info(idx, "no connection '%s'" % (name)) + + def run_action_present(self, idx): + connection = self.connections[idx] + con_cur = Util.first( + self.nmutil.connection_list( + name=connection["name"], uuid=connection["nm.uuid"] + ) + ) + + if not connection.get("type"): + # if the type is not specified, just check that the connection was + # found + if not con_cur: + self.log_error( + idx, "Connection not found on system and 'type' not specified" + ) + return + + con_new = self.nmutil.connection_create(self.connections, idx, con_cur) + if con_cur is None: + self.log_info( + idx, + "add connection %s, %s" % (connection["name"], connection["nm.uuid"]), + ) + self.connections_data_set_changed(idx) + if self.check_mode == CheckMode.REAL_RUN: + try: + con_cur = self.nmutil.connection_add(con_new) + except MyError as e: + self.log_error(idx, "adding connection failed: %s" % (e)) + elif not self.nmutil.connection_compare(con_cur, con_new, normalize_a=True): + self.log_info( + idx, "update connection %s, %s" % (con_cur.get_id(), con_cur.get_uuid()) + ) + self.connections_data_set_changed(idx) + if self.check_mode == CheckMode.REAL_RUN: + try: + self.nmutil.connection_update(con_cur, con_new) + except MyError as e: + self.log_error(idx, "updating connection failed: %s" % (e)) + else: + self.log_info( + idx, + "connection %s, %s already up to date" + % (con_cur.get_id(), con_cur.get_uuid()), + ) + + seen = set() + if con_cur is not None: + seen.add(con_cur) + + while True: + connections = self.nmutil.connection_list( + name=connection["name"], + black_list=seen, + black_list_uuids=[connection["nm.uuid"]], + ) + if not connections: + break + c = connections[-1] + self.log_info( + idx, "delete duplicate connection %s, %s" % (c.get_id(), c.get_uuid()) + ) + self.connections_data_set_changed(idx) + if self.check_mode == CheckMode.REAL_RUN: + try: + self.nmutil.connection_delete(c) + except MyError as e: + self.log_error(idx, "delete duplicate connection failed: %s" % (e)) + seen.add(c) + + def run_action_up(self, idx): + connection = self.connections[idx] + + con = Util.first( + self.nmutil.connection_list( + name=connection["name"], uuid=connection["nm.uuid"] + ) + ) + if not con: + if self.check_mode == CheckMode.REAL_RUN: + self.log_error( + idx, + "up connection %s, %s failed: no connection" + % (connection["name"], connection["nm.uuid"]), + ) + else: + self.log_info( + idx, + "up connection %s, %s" + % (connection["name"], connection["nm.uuid"]), + ) + return + + is_active = self.nmutil.connection_is_active(con) + is_modified = self.connection_modified_earlier(idx) + force_state_change = self.connection_force_state_change(connection) + + if is_active and not force_state_change and not is_modified: + self.log_info( + idx, + "up connection %s, %s skipped because already active" + % (con.get_id(), con.get_uuid()), + ) + return + + self.log_info( + idx, + "up connection %s, %s (%s)" + % ( + con.get_id(), + con.get_uuid(), + "not-active" + if not is_active + else "is-modified" + if is_modified + else "force-state-change", + ), + ) + self.connections_data_set_changed(idx) + if self.check_mode == CheckMode.REAL_RUN: + try: + ac = self.nmutil.connection_activate(con) + except MyError as e: + self.log_error(idx, "up connection failed: %s" % (e)) + + wait_time = connection["wait"] + if wait_time is None: + wait_time = DEFAULT_ACTIVATION_TIMEOUT + + try: + self.nmutil.connection_activate_wait(ac, wait_time) + except MyError as e: + self.log_error(idx, "up connection failed while waiting: %s" % (e)) + + def run_action_down(self, idx): + connection = self.connections[idx] + + cons = self.nmutil.connection_list(name=connection["name"]) + changed = False + if cons: + seen = set() + while True: + ac = Util.first( + self.nmutil.active_connection_list( + connections=cons, black_list=seen + ) + ) + if ac is None: + break + seen.add(ac) + self.log_info( + idx, "down connection %s: %s" % (connection["name"], ac.get_path()) + ) + changed = True + self.connections_data_set_changed(idx) + if self.check_mode == CheckMode.REAL_RUN: + try: + self.nmutil.active_connection_deactivate(ac) + except MyError as e: + self.log_error(idx, "down connection failed: %s" % (e)) + + wait_time = connection["wait"] + if wait_time is None: + wait_time = 10 + + try: + self.nmutil.active_connection_deactivate_wait(ac, wait_time) + except MyError as e: + self.log_error( + idx, "down connection failed while waiting: %s" % (e) + ) + + cons = self.nmutil.connection_list(name=connection["name"]) + if not changed: + self.log_error( + idx, + "down connection %s failed: connection not found" + % (connection["name"]), + ) + + +############################################################################### + + +class Cmd_initscripts(Cmd): + def __init__(self, **kwargs): + Cmd.__init__(self, **kwargs) + self.validate_one_type = ( + ArgValidator_ListConnections.VALIDATE_ONE_MODE_INITSCRIPTS + ) + + def run_prepare(self): + Cmd.run_prepare(self) + for idx, connection in enumerate(self.connections): + if connection.get("type") in ["macvlan"]: + self.log_fatal( + idx, + "unsupported type %s for initscripts provider" + % (connection["type"]), + ) + + def check_name(self, idx, name=None): + if name is None: + name = self.connections[idx]["name"] + try: + f = IfcfgUtil.ifcfg_path(name) + except MyError: + self.log_error(idx, "invalid name %s for connection" % (name)) + return None + return f + + def run_action_absent(self, idx): + n = self.connections[idx]["name"] + name = n + if not name: + names = [] + black_list_names = ArgUtil.connection_get_non_absent_names(self.connections) + for f in os.listdir("/etc/sysconfig/network-scripts"): + if not f.startswith("ifcfg-"): + continue + name = f[6:] + if name in black_list_names: + continue + if name == "lo": + continue + names.append(name) + else: + if not self.check_name(idx): + return + names = [name] + + changed = False + for name in names: + for path in IfcfgUtil.ifcfg_paths(name): + if not os.path.isfile(path): + continue + changed = True + self.log_info(idx, "delete ifcfg-rh file '%s'" % (path)) + self.connections_data_set_changed(idx) + if self.check_mode == CheckMode.REAL_RUN: + try: + os.unlink(path) + except Exception as e: + self.log_error( + idx, "delete ifcfg-rh file '%s' failed: %s" % (path, e) + ) + + if not changed: + self.log_info( + idx, + "delete ifcfg-rh files for %s (no files present)" + % ("'" + n + "'" if n else "*"), + ) + + def run_action_present(self, idx): + if not self.check_name(idx): + return + + connection = self.connections[idx] + name = connection["name"] + + old_content = IfcfgUtil.content_from_file(name) + + if not connection.get("type"): + # if the type is not specified, just check that the connection was + # found + if not old_content.get("ifcfg"): + self.log_error( + idx, "Connection not found on system and 'type' not present" + ) + return + + ifcfg_all = IfcfgUtil.ifcfg_create( + self.connections, idx, lambda msg: self.log_warn(idx, msg), old_content + ) + + new_content = IfcfgUtil.content_from_dict( + ifcfg_all, header=self.run_env.ifcfg_header + ) + + if old_content == new_content: + self.log_info(idx, "ifcfg-rh profile '%s' already up to date" % (name)) + return + + op = "add" if (old_content["ifcfg"] is None) else "update" + + self.log_info(idx, "%s ifcfg-rh profile '%s'" % (op, name)) + + self.connections_data_set_changed(idx) + if self.check_mode == CheckMode.REAL_RUN: + try: + IfcfgUtil.content_to_file(name, new_content) + except MyError as e: + self.log_error( + idx, "%s ifcfg-rh profile '%s' failed: %s" % (op, name, e) + ) + + def _run_action_updown(self, idx, do_up): + if not self.check_name(idx): + return + + connection = self.connections[idx] + name = connection["name"] + + if connection["wait"] is not None: + # initscripts don't support wait, they always block until the ifup/ifdown + # command completes. Silently ignore the argument. + pass + + path = IfcfgUtil.ifcfg_path(name) + if not os.path.isfile(path): + if self.check_mode == CheckMode.REAL_RUN: + self.log_error(idx, "ifcfg file '%s' does not exist" % (path)) + else: + self.log_info( + idx, "ifcfg file '%s' does not exist in check mode" % (path) + ) + return + + is_active = IfcfgUtil.connection_seems_active(name) + is_modified = self.connection_modified_earlier(idx) + force_state_change = self.connection_force_state_change(connection) + + if do_up: + if is_active is True and not force_state_change and not is_modified: + self.log_info( + idx, "up connection %s skipped because already active" % (name) + ) + return + + self.log_info( + idx, + "up connection %s (%s)" + % ( + name, + "not-active" + if is_active is not True + else "is-modified" + if is_modified + else "force-state-change", + ), + ) + cmd = "ifup" + else: + if is_active is False and not force_state_change: + self.log_info( + idx, "down connection %s skipped because not active" % (name) + ) + return + + self.log_info( + idx, + "up connection %s (%s)" + % (name, "active" if is_active is not False else "force-state-change"), + ) + cmd = "ifdown" + + self.connections_data_set_changed(idx) + if self.check_mode == CheckMode.REAL_RUN: + rc, out, err = self.run_env.run_command([cmd, name]) + self.log_info( + idx, + "call '%s %s': rc=%d, out='%s', err='%s'" % (cmd, name, rc, out, err), + ) + if rc != 0: + self.log_error( + idx, "call '%s %s' failed with exit status %d" % (cmd, name, rc) + ) + + def run_action_up(self, idx): + self._run_action_updown(idx, True) + + def run_action_down(self, idx): + self._run_action_updown(idx, False) + + +############################################################################### + + +def main(): + connections = None + cmd = None + run_env_ansible = RunEnvironmentAnsible() + try: + params = run_env_ansible.module.params + cmd = Cmd.create( + params["provider"], + run_env=run_env_ansible, + connections_unvalidated=params["connections"], + connection_validator=ArgValidator_ListConnections(), + is_check_mode=run_env_ansible.module.check_mode, + ignore_errors=params["ignore_errors"], + force_state_change=params["force_state_change"], + ) + connections = cmd.connections + cmd.run() + except Exception as e: + run_env_ansible.fail_json( + connections, + "fatal error: %s" % (e), + changed=(cmd is not None and cmd.is_changed_modified_system), + warn_traceback=not isinstance(e, MyError), + ) + run_env_ansible.exit_json( + connections, changed=(cmd is not None and cmd.is_changed_modified_system) + ) + + +if __name__ == "__main__": + main() diff --git a/roles/linux-system-roles.network/meta/.galaxy_install_info b/roles/linux-system-roles.network/meta/.galaxy_install_info new file mode 100644 index 0000000..d30e58b --- /dev/null +++ b/roles/linux-system-roles.network/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Wed Jul 1 18:41:54 2020 +version: 1.1.0 diff --git a/roles/linux-system-roles.network/meta/main.yml b/roles/linux-system-roles.network/meta/main.yml new file mode 100644 index 0000000..b350d57 --- /dev/null +++ b/roles/linux-system-roles.network/meta/main.yml @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +galaxy_info: + author: Thomas Haller , Till Maas + description: Configure networking + company: Red Hat, Inc. + license: BSD-3-Clause + min_ansible_version: 2.5 + galaxy_tags: + - centos + - fedora + - network + - networking + - redhat + - rhel + - system + platforms: + - name: Fedora + versions: + - 28 + - 29 + - 30 + - name: EL + versions: + - 6 + - 7 + - 8 diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/__init__.py b/roles/linux-system-roles.network/module_utils/network_lsr/__init__.py new file mode 100644 index 0000000..22c717c --- /dev/null +++ b/roles/linux-system-roles.network/module_utils/network_lsr/__init__.py @@ -0,0 +1,7 @@ +#!/usr/bin/python3 -tt +# vim: fileencoding=utf8 +# SPDX-License-Identifier: BSD-3-Clause + + +class MyError(Exception): + pass diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/argument_validator.py b/roles/linux-system-roles.network/module_utils/network_lsr/argument_validator.py new file mode 100644 index 0000000..b58ec67 --- /dev/null +++ b/roles/linux-system-roles.network/module_utils/network_lsr/argument_validator.py @@ -0,0 +1,1210 @@ +#!/usr/bin/python3 -tt +# vim: fileencoding=utf8 +# SPDX-License-Identifier: BSD-3-Clause + +import socket + +# pylint: disable=import-error, no-name-in-module +from ansible.module_utils.network_lsr import MyError +from ansible.module_utils.network_lsr.utils import Util + + +class ArgUtil: + @staticmethod + def connection_find_by_name(name, connections, n_connections=None): + if not name: + raise ValueError("missing name argument") + c = None + for idx, connection in enumerate(connections): + if n_connections is not None and idx >= n_connections: + break + if "name" not in connection or name != connection["name"]: + continue + + if connection["persistent_state"] == "absent": + c = None + elif connection["persistent_state"] == "present": + c = connection + return c + + @staticmethod + def connection_find_master(name, connections, n_connections=None): + c = ArgUtil.connection_find_by_name(name, connections, n_connections) + if not c: + raise MyError("invalid master/parent '%s'" % (name)) + if c["interface_name"] is None: + raise MyError( + "invalid master/parent '%s' which needs an 'interface_name'" % (name) + ) + if not Util.ifname_valid(c["interface_name"]): + raise MyError( + "invalid master/parent '%s' with invalid 'interface_name' ('%s')" + % (name, c["interface_name"]) + ) + return c["interface_name"] + + @staticmethod + def connection_find_master_uuid(name, connections, n_connections=None): + c = ArgUtil.connection_find_by_name(name, connections, n_connections) + if not c: + raise MyError("invalid master/parent '%s'" % (name)) + return c["nm.uuid"] + + @staticmethod + def connection_get_non_absent_names(connections): + # @idx is the index with state['absent']. This will + # return the names of all explicitly mentioned profiles. + # That is, the names of profiles that should not be deleted. + result = set() + for connection in connections: + if "name" not in connection: + continue + if not connection["name"]: + continue + result.add(connection["name"]) + return result + + +class ValidationError(MyError): + def __init__(self, name, message): + Exception.__init__(self, name + ": " + message) + self.error_message = message + self.name = name + + @staticmethod + def from_connection(idx, message): + return ValidationError("connection[" + str(idx) + "]", message) + + +class ArgValidator: + MISSING = object() + DEFAULT_SENTINEL = object() + + def __init__(self, name=None, required=False, default_value=None): + self.name = name + self.required = required + self.default_value = default_value + + def get_default_value(self): + try: + return self.default_value() + except Exception: # pylint: disable=broad-except + return self.default_value + + def validate(self, value): + return self._validate(value, self.name) + + def _validate(self, value, name): + validated = self._validate_impl(value, name) + return self._validate_post(value, name, validated) + + def _validate_impl(self, value, name): + raise NotImplementedError() + + # pylint: disable=unused-argument,no-self-use + def _validate_post(self, value, name, result): + return result + + +class ArgValidatorStr(ArgValidator): + def __init__( # pylint: disable=too-many-arguments + self, + name, + required=False, + default_value=None, + enum_values=None, + allow_empty=False, + ): + ArgValidator.__init__(self, name, required, default_value) + self.enum_values = enum_values + self.allow_empty = allow_empty + + def _validate_impl(self, value, name): + if not isinstance(value, Util.STRING_TYPE): + raise ValidationError(name, "must be a string but is '%s'" % (value)) + value = str(value) + if self.enum_values is not None and value not in self.enum_values: + raise ValidationError( + name, + "is '%s' but must be one of '%s'" + % (value, "' '".join(sorted(self.enum_values))), + ) + if not self.allow_empty and not value: + raise ValidationError(name, "cannot be empty") + return value + + +class ArgValidatorNum(ArgValidator): + def __init__( # pylint: disable=too-many-arguments + self, + name, + required=False, + val_min=None, + val_max=None, + default_value=ArgValidator.DEFAULT_SENTINEL, + numeric_type=int, + ): + ArgValidator.__init__( + self, + name, + required, + numeric_type(0) + if default_value is ArgValidator.DEFAULT_SENTINEL + else default_value, + ) + self.val_min = val_min + self.val_max = val_max + self.numeric_type = numeric_type + + def _validate_impl(self, value, name): + v = None + try: + if isinstance(value, self.numeric_type): + v = value + else: + v2 = self.numeric_type(value) + if isinstance(value, Util.STRING_TYPE) or v2 == value: + v = v2 + except Exception: + pass + if v is None: + raise ValidationError( + name, "must be an integer number but is '%s'" % (value) + ) + if self.val_min is not None and v < self.val_min: + raise ValidationError( + name, "value is %s but cannot be less then %s" % (value, self.val_min) + ) + if self.val_max is not None and v > self.val_max: + raise ValidationError( + name, + "value is %s but cannot be greater then %s" % (value, self.val_max), + ) + return v + + +class ArgValidatorBool(ArgValidator): + def __init__(self, name, required=False, default_value=False): + ArgValidator.__init__(self, name, required, default_value) + + def _validate_impl(self, value, name): + try: + if isinstance(value, bool): + return value + if isinstance(value, Util.STRING_TYPE) or isinstance(value, int): + return Util.boolean(value) + except Exception: + pass + raise ValidationError(name, "must be an boolean but is '%s'" % (value)) + + +class ArgValidatorDict(ArgValidator): + def __init__( + self, + name=None, + required=False, + nested=None, + default_value=None, + all_missing_during_validate=False, + ): + ArgValidator.__init__(self, name, required, default_value) + if nested is not None: + self.nested = dict([(v.name, v) for v in nested]) + else: + self.nested = {} + self.all_missing_during_validate = all_missing_during_validate + + def _validate_impl(self, value, name): + result = {} + seen_keys = set() + try: + items = list(value.items()) + except AttributeError: + raise ValidationError(name, "invalid content is not a dictionary") + for (k, v) in items: + if k in seen_keys: + raise ValidationError(name, "duplicate key '%s'" % (k)) + seen_keys.add(k) + validator = self.nested.get(k, None) + if validator is None: + raise ValidationError(name, "invalid key '%s'" % (k)) + try: + vv = validator._validate(v, name + "." + k) + except ValidationError as e: + raise ValidationError(e.name, e.error_message) + result[k] = vv + for (k, v) in self.nested.items(): + if k in seen_keys: + continue + if v.required: + raise ValidationError(name, "missing required key '%s'" % (k)) + vv = v.get_default_value() + if not self.all_missing_during_validate and vv is not ArgValidator.MISSING: + result[k] = vv + return result + + +class ArgValidatorList(ArgValidator): + def __init__(self, name, nested, default_value=None): + ArgValidator.__init__(self, name, required=False, default_value=default_value) + self.nested = nested + + def _validate_impl(self, value, name): + + if isinstance(value, Util.STRING_TYPE): + # we expect a list. However, for convenience allow to + # specify a string, separated by space. Escaping is + # not supported. If you need that, define a proper list. + value = [s for s in value.split(" ") if s] + + result = [] + for (idx, v) in enumerate(value): + try: + vv = self.nested._validate(v, name + "[" + str(idx) + "]") + except ValidationError as e: + raise ValidationError(e.name, e.error_message) + result.append(vv) + return result + + +class ArgValidatorIP(ArgValidatorStr): + def __init__( + self, name, family=None, required=False, default_value=None, plain_address=True + ): + ArgValidatorStr.__init__(self, name, required, default_value, None) + self.family = family + self.plain_address = plain_address + + def _validate_impl(self, value, name): + v = ArgValidatorStr._validate_impl(self, value, name) + try: + addr, family = Util.parse_ip(v, self.family) + except Exception: + raise ValidationError( + name, + "value '%s' is not a valid IP%s address" + % (value, Util.addr_family_to_v(self.family)), + ) + if self.plain_address: + return addr + return {"family": family, "address": addr} + + +class ArgValidatorMac(ArgValidatorStr): + def __init__(self, name, force_len=None, required=False, default_value=None): + ArgValidatorStr.__init__(self, name, required, default_value, None) + self.force_len = force_len + + def _validate_impl(self, value, name): + v = ArgValidatorStr._validate_impl(self, value, name) + try: + addr = Util.mac_aton(v, self.force_len) + except MyError: + raise ValidationError( + name, "value '%s' is not a valid MAC address" % (value) + ) + if not addr: + raise ValidationError( + name, "value '%s' is not a valid MAC address" % (value) + ) + return Util.mac_ntoa(addr) + + +class ArgValidatorIPAddr(ArgValidatorDict): + def __init__(self, name, family=None, required=False, default_value=None): + ArgValidatorDict.__init__( + self, + name, + required, + nested=[ + ArgValidatorIP( + "address", family=family, required=True, plain_address=False + ), + ArgValidatorNum("prefix", default_value=None, val_min=0), + ], + ) + self.family = family + + def _validate_impl(self, value, name): + if isinstance(value, Util.STRING_TYPE): + v = str(value) + if not v: + raise ValidationError(name, "cannot be empty") + try: + return Util.parse_address(v, self.family) + except Exception: + raise ValidationError( + name, + "value '%s' is not a valid IP%s address with prefix length" + % (value, Util.addr_family_to_v(self.family)), + ) + v = ArgValidatorDict._validate_impl(self, value, name) + return { + "address": v["address"]["address"], + "family": v["address"]["family"], + "prefix": v["prefix"], + } + + def _validate_post(self, value, name, result): + family = result["family"] + prefix = result["prefix"] + if prefix is None: + prefix = Util.addr_family_default_prefix(family) + result["prefix"] = prefix + elif not Util.addr_family_valid_prefix(family, prefix): + raise ValidationError(name, "invalid prefix %s in '%s'" % (prefix, value)) + return result + + +class ArgValidatorIPRoute(ArgValidatorDict): + def __init__(self, name, family=None, required=False, default_value=None): + ArgValidatorDict.__init__( + self, + name, + required, + nested=[ + ArgValidatorIP( + "network", family=family, required=True, plain_address=False + ), + ArgValidatorNum("prefix", default_value=None, val_min=0), + ArgValidatorIP( + "gateway", family=family, default_value=None, plain_address=False + ), + ArgValidatorNum( + "metric", default_value=-1, val_min=-1, val_max=0xFFFFFFFF + ), + ], + ) + self.family = family + + def _validate_post(self, value, name, result): + network = result["network"] + + family = network["family"] + result["network"] = network["address"] + result["family"] = family + + gateway = result["gateway"] + if gateway is not None: + if family != gateway["family"]: + raise ValidationError( + name, + "conflicting address family between network and gateway '%s'" + % (gateway["address"]), + ) + result["gateway"] = gateway["address"] + + prefix = result["prefix"] + if prefix is None: + prefix = Util.addr_family_default_prefix(family) + result["prefix"] = prefix + elif not Util.addr_family_valid_prefix(family, prefix): + raise ValidationError(name, "invalid prefix %s in '%s'" % (prefix, value)) + + return result + + +class ArgValidator_DictIP(ArgValidatorDict): + def __init__(self): + ArgValidatorDict.__init__( + self, + name="ip", + nested=[ + ArgValidatorBool("dhcp4", default_value=None), + ArgValidatorBool("dhcp4_send_hostname", default_value=None), + ArgValidatorIP("gateway4", family=socket.AF_INET), + ArgValidatorNum( + "route_metric4", val_min=-1, val_max=0xFFFFFFFF, default_value=None + ), + ArgValidatorBool("auto6", default_value=None), + ArgValidatorIP("gateway6", family=socket.AF_INET6), + ArgValidatorNum( + "route_metric6", val_min=-1, val_max=0xFFFFFFFF, default_value=None + ), + ArgValidatorList( + "address", + nested=ArgValidatorIPAddr("address[?]"), + default_value=list, + ), + ArgValidatorList( + "route", nested=ArgValidatorIPRoute("route[?]"), default_value=list + ), + ArgValidatorBool("route_append_only"), + ArgValidatorBool("rule_append_only"), + ArgValidatorList( + "dns", + nested=ArgValidatorIP("dns[?]", plain_address=False), + default_value=list, + ), + ArgValidatorList( + "dns_search", + nested=ArgValidatorStr("dns_search[?]"), + default_value=list, + ), + ], + default_value=lambda: { + "dhcp4": True, + "dhcp4_send_hostname": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "gateway6": None, + "route_metric6": None, + "address": [], + "route": [], + "route_append_only": False, + "rule_append_only": False, + "dns": [], + "dns_search": [], + }, + ) + + def _validate_post(self, value, name, result): + if result["dhcp4"] is None: + result["dhcp4"] = result["dhcp4_send_hostname"] is not None or not any( + [a for a in result["address"] if a["family"] == socket.AF_INET] + ) + if result["auto6"] is None: + result["auto6"] = not any( + [a for a in result["address"] if a["family"] == socket.AF_INET6] + ) + if result["dhcp4_send_hostname"] is not None: + if not result["dhcp4"]: + raise ValidationError( + name, "'dhcp4_send_hostname' is only valid if 'dhcp4' is enabled" + ) + return result + + +class ArgValidator_DictEthernet(ArgValidatorDict): + def __init__(self): + ArgValidatorDict.__init__( + self, + name="ethernet", + nested=[ + ArgValidatorBool("autoneg", default_value=None), + ArgValidatorNum( + "speed", val_min=0, val_max=0xFFFFFFFF, default_value=0 + ), + ArgValidatorStr( + "duplex", enum_values=["half", "full"], default_value=None + ), + ], + default_value=ArgValidator.MISSING, + ) + + def get_default_ethernet(self): + return dict([(k, v.default_value) for k, v in self.nested.items()]) + + def _validate_post(self, value, name, result): + has_speed_or_duplex = result["speed"] != 0 or result["duplex"] is not None + if result["autoneg"] is None: + if has_speed_or_duplex: + result["autoneg"] = False + elif result["autoneg"]: + if has_speed_or_duplex: + raise ValidationError( + name, + "cannot specify '%s' with 'autoneg' enabled" + % ("duplex" if result["duplex"] is not None else "speed"), + ) + else: + if not has_speed_or_duplex: + raise ValidationError( + name, "need to specify 'duplex' and 'speed' with 'autoneg' enabled" + ) + if has_speed_or_duplex and (result["speed"] == 0 or result["duplex"] is None): + raise ValidationError( + name, + "need to specify both 'speed' and 'duplex' with 'autoneg' disabled", + ) + return result + + +class ArgValidator_DictEthtool(ArgValidatorDict): + def __init__(self): + ArgValidatorDict.__init__( + self, + name="ethtool", + nested=[ArgValidator_DictEthtoolFeatures()], + default_value=ArgValidator.MISSING, + ) + + self.default_value = dict( + [(k, v.default_value) for k, v in self.nested.items()] + ) + + +class ArgValidator_DictEthtoolFeatures(ArgValidatorDict): + # List of features created with: + # nmcli connection modify "virbr0" ethtool.feature- on |& \ + # sed -e 's_[,:]_\n_g' | \ # split output in newlines + # grep ^\ f | \ # select only lines starting with " f" + # tr -d " ." | \ # remove spaces and fullstops + # sed -e 's,feature-,ArgValidatorBool(",' \ # add Python code + # -e 's/$/", default_value=None)],/' + def __init__(self): + ArgValidatorDict.__init__( + self, + name="features", + nested=[ + ArgValidatorBool("esp-hw-offload", default_value=None), + ArgValidatorBool("esp-tx-csum-hw-offload", default_value=None), + ArgValidatorBool("fcoe-mtu", default_value=None), + ArgValidatorBool("gro", default_value=None), + ArgValidatorBool("gso", default_value=None), + ArgValidatorBool("highdma", default_value=None), + ArgValidatorBool("hw-tc-offload", default_value=None), + ArgValidatorBool("l2-fwd-offload", default_value=None), + ArgValidatorBool("loopback", default_value=None), + ArgValidatorBool("lro", default_value=None), + ArgValidatorBool("ntuple", default_value=None), + ArgValidatorBool("rx", default_value=None), + ArgValidatorBool("rxhash", default_value=None), + ArgValidatorBool("rxvlan", default_value=None), + ArgValidatorBool("rx-all", default_value=None), + ArgValidatorBool("rx-fcs", default_value=None), + ArgValidatorBool("rx-gro-hw", default_value=None), + ArgValidatorBool("rx-udp_tunnel-port-offload", default_value=None), + ArgValidatorBool("rx-vlan-filter", default_value=None), + ArgValidatorBool("rx-vlan-stag-filter", default_value=None), + ArgValidatorBool("rx-vlan-stag-hw-parse", default_value=None), + ArgValidatorBool("sg", default_value=None), + ArgValidatorBool("tls-hw-record", default_value=None), + ArgValidatorBool("tls-hw-tx-offload", default_value=None), + ArgValidatorBool("tso", default_value=None), + ArgValidatorBool("tx", default_value=None), + ArgValidatorBool("txvlan", default_value=None), + ArgValidatorBool("tx-checksum-fcoe-crc", default_value=None), + ArgValidatorBool("tx-checksum-ipv4", default_value=None), + ArgValidatorBool("tx-checksum-ipv6", default_value=None), + ArgValidatorBool("tx-checksum-ip-generic", default_value=None), + ArgValidatorBool("tx-checksum-sctp", default_value=None), + ArgValidatorBool("tx-esp-segmentation", default_value=None), + ArgValidatorBool("tx-fcoe-segmentation", default_value=None), + ArgValidatorBool("tx-gre-csum-segmentation", default_value=None), + ArgValidatorBool("tx-gre-segmentation", default_value=None), + ArgValidatorBool("tx-gso-partial", default_value=None), + ArgValidatorBool("tx-gso-robust", default_value=None), + ArgValidatorBool("tx-ipxip4-segmentation", default_value=None), + ArgValidatorBool("tx-ipxip6-segmentation", default_value=None), + ArgValidatorBool("tx-nocache-copy", default_value=None), + ArgValidatorBool("tx-scatter-gather", default_value=None), + ArgValidatorBool("tx-scatter-gather-fraglist", default_value=None), + ArgValidatorBool("tx-sctp-segmentation", default_value=None), + ArgValidatorBool("tx-tcp6-segmentation", default_value=None), + ArgValidatorBool("tx-tcp-ecn-segmentation", default_value=None), + ArgValidatorBool("tx-tcp-mangleid-segmentation", default_value=None), + ArgValidatorBool("tx-tcp-segmentation", default_value=None), + ArgValidatorBool("tx-udp-segmentation", default_value=None), + ArgValidatorBool("tx-udp_tnl-csum-segmentation", default_value=None), + ArgValidatorBool("tx-udp_tnl-segmentation", default_value=None), + ArgValidatorBool("tx-vlan-stag-hw-insert", default_value=None), + ], + ) + self.default_value = dict( + [(k, v.default_value) for k, v in self.nested.items()] + ) + + +class ArgValidator_DictBond(ArgValidatorDict): + + VALID_MODES = [ + "balance-rr", + "active-backup", + "balance-xor", + "broadcast", + "802.3ad", + "balance-tlb", + "balance-alb", + ] + + def __init__(self): + ArgValidatorDict.__init__( + self, + name="bond", + nested=[ + ArgValidatorStr("mode", enum_values=ArgValidator_DictBond.VALID_MODES), + ArgValidatorNum( + "miimon", val_min=0, val_max=1000000, default_value=None + ), + ], + default_value=ArgValidator.MISSING, + ) + + def get_default_bond(self): + return {"mode": ArgValidator_DictBond.VALID_MODES[0], "miimon": None} + + +class ArgValidator_DictInfiniband(ArgValidatorDict): + def __init__(self): + ArgValidatorDict.__init__( + self, + name="infiniband", + nested=[ + ArgValidatorStr( + "transport_mode", enum_values=["datagram", "connected"] + ), + ArgValidatorNum("p_key", val_min=-1, val_max=0xFFFF, default_value=-1), + ], + default_value=ArgValidator.MISSING, + ) + + def get_default_infiniband(self): + return {"transport_mode": "datagram", "p_key": -1} + + +class ArgValidator_DictVlan(ArgValidatorDict): + def __init__(self): + ArgValidatorDict.__init__( + self, + name="vlan", + nested=[ArgValidatorNum("id", val_min=0, val_max=4094, required=True)], + default_value=ArgValidator.MISSING, + ) + + def get_default_vlan(self): + return {"id": None} + + +class ArgValidator_DictMacvlan(ArgValidatorDict): + + VALID_MODES = ["vepa", "bridge", "private", "passthru", "source"] + + def __init__(self): + ArgValidatorDict.__init__( + self, + name="macvlan", + nested=[ + ArgValidatorStr( + "mode", + enum_values=ArgValidator_DictMacvlan.VALID_MODES, + default_value="bridge", + ), + ArgValidatorBool("promiscuous", default_value=True), + ArgValidatorBool("tap", default_value=False), + ], + default_value=ArgValidator.MISSING, + ) + + def get_default_macvlan(self): + return {"mode": "bridge", "promiscuous": True, "tap": False} + + def _validate_post(self, value, name, result): + if result["promiscuous"] is False and result["mode"] != "passthru": + raise ValidationError( + name, "non promiscuous operation is allowed only in passthru mode" + ) + return result + + +class ArgValidator_DictConnection(ArgValidatorDict): + + VALID_PERSISTENT_STATES = ["absent", "present"] + VALID_STATES = VALID_PERSISTENT_STATES + ["up", "down"] + VALID_TYPES = [ + "ethernet", + "infiniband", + "bridge", + "team", + "bond", + "vlan", + "macvlan", + ] + VALID_SLAVE_TYPES = ["bridge", "bond", "team"] + + def __init__(self): + ArgValidatorDict.__init__( + self, + name="connection", + nested=[ + ArgValidatorStr("name"), + ArgValidatorStr( + "state", enum_values=ArgValidator_DictConnection.VALID_STATES + ), + ArgValidatorStr( + "persistent_state", + enum_values=ArgValidator_DictConnection.VALID_PERSISTENT_STATES, + ), + ArgValidatorBool("force_state_change", default_value=None), + ArgValidatorNum( + "wait", + val_min=0, + val_max=3600, + numeric_type=float, + default_value=None, + ), + ArgValidatorStr( + "type", enum_values=ArgValidator_DictConnection.VALID_TYPES + ), + ArgValidatorBool("autoconnect", default_value=True), + ArgValidatorStr( + "slave_type", + enum_values=ArgValidator_DictConnection.VALID_SLAVE_TYPES, + ), + ArgValidatorStr("master"), + ArgValidatorStr("interface_name", allow_empty=True), + ArgValidatorMac("mac"), + ArgValidatorNum( + "mtu", val_min=0, val_max=0xFFFFFFFF, default_value=None + ), + ArgValidatorStr("zone"), + ArgValidatorBool("check_iface_exists", default_value=True), + ArgValidatorStr("parent"), + ArgValidatorBool("ignore_errors", default_value=None), + ArgValidator_DictIP(), + ArgValidator_DictEthernet(), + ArgValidator_DictEthtool(), + ArgValidator_DictBond(), + ArgValidator_DictInfiniband(), + ArgValidator_DictVlan(), + ArgValidator_DictMacvlan(), + # deprecated options: + ArgValidatorStr( + "infiniband_transport_mode", + enum_values=["datagram", "connected"], + default_value=ArgValidator.MISSING, + ), + ArgValidatorNum( + "infiniband_p_key", + val_min=-1, + val_max=0xFFFF, + default_value=ArgValidator.MISSING, + ), + ArgValidatorNum( + "vlan_id", + val_min=0, + val_max=4094, + default_value=ArgValidator.MISSING, + ), + ], + default_value=dict, + all_missing_during_validate=True, + ) + + # valid field based on specified state, used to set defaults and reject + # bad values + self.VALID_FIELDS = [] + + def _validate_post_state(self, value, name, result): + """ + Validate state definitions and create a corresponding list of actions. + """ + actions = [] + state = result.get("state") + if state in self.VALID_PERSISTENT_STATES: + del result["state"] + persistent_state_default = state + state = None + else: + persistent_state_default = None + + persistent_state = result.get("persistent_state", persistent_state_default) + + # default persistent_state to present (not done via default_value in the + # ArgValidatorStr, the value will only be set at the end of + # _validate_post() + if not persistent_state: + persistent_state = "present" + + # If the profile is present, it should be ensured first + if persistent_state == "present": + actions.append(persistent_state) + + # If the profile should be absent at the end, it needs to be present in + # the meantime to allow to (de)activate it + if persistent_state == "absent" and state: + actions.append("present") + + # Change the runtime state if necessary + if state: + actions.append(state) + + # Remove the profile in the end if requested + if persistent_state == "absent": + actions.append(persistent_state) + + result["state"] = state + result["persistent_state"] = persistent_state + result["actions"] = actions + + return result + + def _validate_post_fields(self, value, name, result): + """ + Validate the allowed fields (settings depending on the requested state). + FIXME: Maybe it should check whether "up"/"down" is present in the + actions instead of checking the runtime state from "state" to switch + from state to actions after the state parsing is done. + """ + state = result.get("state") + persistent_state = result.get("persistent_state") + + # minimal settings not related to runtime changes + valid_fields = ["actions", "ignore_errors", "name", "persistent_state", "state"] + + # when type is present, a profile is completely specified (using + # defaults or other settings) + if "type" in result: + valid_fields += list(self.nested.keys()) + + # If there are no runtime changes, "wait" and "force_state_change" do + # not make sense + # FIXME: Maybe this restriction can be removed. Need to make sure that + # defaults for wait or force_state_change do not interfer + if not state: + while "wait" in valid_fields: + valid_fields.remove("wait") + while "force_state_change" in valid_fields: + valid_fields.remove("force_state_change") + else: + valid_fields += ["force_state_change", "wait"] + + # FIXME: Maybe just accept all values, even if they are not + # needed/meaningful in the respective context + valid_fields = set(valid_fields) + for k in result: + if k not in valid_fields: + raise ValidationError( + name + "." + k, + "property is not allowed for state '%s' and persistent_state '%s'" + % (state, persistent_state), + ) + + if "name" not in result: + if persistent_state == "absent": + result["name"] = "" # set to empty string to mean *absent all others* + else: + raise ValidationError(name, "missing 'name'") + + # FIXME: Seems to be a duplicate check since "wait" will be removed from + # valid_keys when state is considered to be not True + if "wait" in result and not state: + raise ValidationError( + name + ".wait", + "'wait' is not allowed for state '%s'" % (result["state"]), + ) + + result["state"] = state + result["persistent_state"] = persistent_state + + self.VALID_FIELDS = valid_fields + return result + + def _validate_post(self, value, name, result): + result = self._validate_post_state(value, name, result) + result = self._validate_post_fields(value, name, result) + + if "type" in result: + + if "master" in result: + if "slave_type" not in result: + result["slave_type"] = None + if result["master"] == result["name"]: + raise ValidationError( + name + ".master", '"master" cannot refer to itself' + ) + else: + if "slave_type" in result: + raise ValidationError( + name + ".slave_type", + "'slave_type' requires a 'master' property", + ) + + if "ip" in result: + if "master" in result: + raise ValidationError( + name + ".ip", 'a slave cannot have an "ip" property' + ) + else: + if "master" not in result: + result["ip"] = self.nested["ip"].get_default_value() + + if "zone" in result: + if "master" in result: + raise ValidationError( + name + ".zone", '"zone" cannot be configured for slave types' + ) + else: + result["zone"] = None + + if "mac" in result: + if result["type"] not in ["ethernet", "infiniband"]: + raise ValidationError( + name + ".mac", + "a 'mac' address is only allowed for type 'ethernet' " + "or 'infiniband'", + ) + maclen = len(Util.mac_aton(result["mac"])) + if result["type"] == "ethernet" and maclen != 6: + raise ValidationError( + name + ".mac", + "a 'mac' address for type ethernet requires 6 octets " + "but is '%s'" % result["mac"], + ) + if result["type"] == "infiniband" and maclen != 20: + raise ValidationError( + name + ".mac", + "a 'mac' address for type ethernet requires 20 octets " + "but is '%s'" % result["mac"], + ) + + if result["type"] == "infiniband": + if "infiniband" not in result: + result["infiniband"] = self.nested[ + "infiniband" + ].get_default_infiniband() + if "infiniband_transport_mode" in result: + result["infiniband"]["transport_mode"] = result[ + "infiniband_transport_mode" + ] + del result["infiniband_transport_mode"] + if "infiniband_p_key" in result: + result["infiniband"]["p_key"] = result["infiniband_p_key"] + del result["infiniband_p_key"] + else: + if "infiniband_transport_mode" in result: + raise ValidationError( + name + ".infiniband_transport_mode", + "cannot mix deprecated 'infiniband_transport_mode' " + "property with 'infiniband' settings", + ) + if "infiniband_p_key" in result: + raise ValidationError( + name + ".infiniband_p_key", + "cannot mix deprecated 'infiniband_p_key' property " + "with 'infiniband' settings", + ) + if result["infiniband"]["transport_mode"] is None: + result["infiniband"]["transport_mode"] = "datagram" + if result["infiniband"]["p_key"] != -1: + if "mac" not in result and "parent" not in result: + raise ValidationError( + name + ".infiniband.p_key", + "a infiniband device with 'infiniband.p_key' " + "property also needs 'mac' or 'parent' property", + ) + else: + if "infiniband" in result: + raise ValidationError( + name + ".infiniband", + "'infiniband' settings are only allowed for type 'infiniband'", + ) + if "infiniband_transport_mode" in result: + raise ValidationError( + name + ".infiniband_transport_mode", + "a 'infiniband_transport_mode' property is only " + "allowed for type 'infiniband'", + ) + if "infiniband_p_key" in result: + raise ValidationError( + name + ".infiniband_p_key", + "a 'infiniband_p_key' property is only allowed for " + "type 'infiniband'", + ) + + if "interface_name" in result: + # Ignore empty interface_name + if result["interface_name"] == "": + del result["interface_name"] + elif not Util.ifname_valid(result["interface_name"]): + raise ValidationError( + name + ".interface_name", + "invalid 'interface_name' '%s'" % (result["interface_name"]), + ) + else: + if not result.get("mac"): + if not Util.ifname_valid(result["name"]): + raise ValidationError( + name + ".interface_name", + "'interface_name' as 'name' '%s' is not valid" + % (result["name"]), + ) + result["interface_name"] = result["name"] + + if "interface_name" not in result and result["type"] in [ + "bond", + "bridge", + "macvlan", + "team", + "vlan", + ]: + raise ValidationError( + name + ".interface_name", + "type '%s' requires 'interface_name'" % (result["type"]), + ) + + if result["type"] == "vlan": + if "vlan" not in result: + if "vlan_id" not in result: + raise ValidationError( + name + ".vlan", 'missing "vlan" settings for "type" "vlan"' + ) + result["vlan"] = self.nested["vlan"].get_default_vlan() + result["vlan"]["id"] = result["vlan_id"] + del result["vlan_id"] + else: + if "vlan_id" in result: + raise ValidationError( + name + ".vlan_id", + "don't use the deprecated 'vlan_id' together with the " + "'vlan' settings'", + ) + if "parent" not in result: + raise ValidationError( + name + ".parent", 'missing "parent" for "type" "vlan"' + ) + else: + if "vlan" in result: + raise ValidationError( + name + ".vlan", '"vlan" is only allowed for "type" "vlan"' + ) + if "vlan_id" in result: + raise ValidationError( + name + ".vlan_id", '"vlan_id" is only allowed for "type" "vlan"' + ) + + if "parent" in result: + if result["type"] not in ["vlan", "macvlan", "infiniband"]: + raise ValidationError( + name + ".parent", + "'parent' is only allowed for type 'vlan', 'macvlan' or " + "'infiniband'", + ) + if result["parent"] == result["name"]: + raise ValidationError( + name + ".parent", '"parent" cannot refer to itself' + ) + + if result["type"] == "bond": + if "bond" not in result: + result["bond"] = self.nested["bond"].get_default_bond() + else: + if "bond" in result: + raise ValidationError( + name + ".bond", + "'bond' settings are not allowed for 'type' '%s'" + % (result["type"]), + ) + + if result["type"] in ["ethernet", "vlan", "bridge", "bond", "team"]: + if "ethernet" not in result: + result["ethernet"] = self.nested["ethernet"].get_default_ethernet() + else: + if "ethernet" in result: + raise ValidationError( + name + ".ethernet", + "'ethernet' settings are not allowed for 'type' '%s'" + % (result["type"]), + ) + + if result["type"] == "macvlan": + if "macvlan" not in result: + result["macvlan"] = self.nested["macvlan"].get_default_macvlan() + else: + if "macvlan" in result: + raise ValidationError( + name + ".macvlan", + "'macvlan' settings are not allowed for 'type' '%s'" + % (result["type"]), + ) + + for k in self.VALID_FIELDS: + if k in result: + continue + v = self.nested[k] + vv = v.get_default_value() + if vv is not ArgValidator.MISSING: + result[k] = vv + + return result + + +class ArgValidator_ListConnections(ArgValidatorList): + def __init__(self): + ArgValidatorList.__init__( + self, + name="connections", + nested=ArgValidator_DictConnection(), + default_value=list, + ) + + def _validate_post(self, value, name, result): + for idx, connection in enumerate(result): + if "type" in connection: + if connection["master"]: + c = ArgUtil.connection_find_by_name( + connection["master"], result, idx + ) + if not c: + raise ValidationError( + name + "[" + str(idx) + "].master", + "references non-existing 'master' connection '%s'" + % (connection["master"]), + ) + if c["type"] not in ArgValidator_DictConnection.VALID_SLAVE_TYPES: + raise ValidationError( + name + "[" + str(idx) + "].master", + "references 'master' connection '%s' which is not a master " + "type by '%s'" % (connection["master"], c["type"]), + ) + if connection["slave_type"] is None: + connection["slave_type"] = c["type"] + elif connection["slave_type"] != c["type"]: + raise ValidationError( + name + "[" + str(idx) + "].master", + "references 'master' connection '%s' which is of type '%s' " + "instead of slave_type '%s'" + % ( + connection["master"], + c["type"], + connection["slave_type"], + ), + ) + if connection["parent"]: + if not ArgUtil.connection_find_by_name( + connection["parent"], result, idx + ): + raise ValidationError( + name + "[" + str(idx) + "].parent", + "references non-existing 'parent' connection '%s'" + % (connection["parent"]), + ) + return result + + VALIDATE_ONE_MODE_NM = "nm" + VALIDATE_ONE_MODE_INITSCRIPTS = "initscripts" + + def validate_connection_one(self, mode, connections, idx): + connection = connections[idx] + if "type" not in connection: + return + + if (connection["parent"]) and ( + ( + (mode == self.VALIDATE_ONE_MODE_INITSCRIPTS) + and (connection["type"] == "vlan") + ) + or ( + (connection["type"] == "infiniband") + and (connection["infiniband"]["p_key"] != -1) + ) + ): + try: + ArgUtil.connection_find_master(connection["parent"], connections, idx) + except MyError: + raise ValidationError.from_connection( + idx, + "profile references a parent '%s' which has 'interface_name' " + "missing" % (connection["parent"]), + ) + + if (connection["master"]) and (mode == self.VALIDATE_ONE_MODE_INITSCRIPTS): + try: + ArgUtil.connection_find_master(connection["master"], connections, idx) + except MyError: + raise ValidationError.from_connection( + idx, + "profile references a master '%s' which has 'interface_name' " + "missing" % (connection["master"]), + ) diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/nm_provider.py b/roles/linux-system-roles.network/module_utils/network_lsr/nm_provider.py new file mode 100644 index 0000000..9f9b028 --- /dev/null +++ b/roles/linux-system-roles.network/module_utils/network_lsr/nm_provider.py @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: BSD-3-Clause +""" Support for NetworkManager aka the NM provider """ + +# pylint: disable=import-error, no-name-in-module +from ansible.module_utils.network_lsr.utils import Util + +ETHTOOL_FEATURE_PREFIX = "ETHTOOL_OPTNAME_FEATURE_" + + +def get_nm_ethtool_feature(name): + """ + Translate ethtool feature into Network Manager name + + :param name: Name of the feature + :type name: str + :returns: Name of the feature to be used by `NM.SettingEthtool.set_feature()` + :rtype: str + """ + + name = ETHTOOL_FEATURE_PREFIX + name.upper().replace("-", "_") + + feature = getattr(Util.NM(), name, None) + return feature diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/utils.py b/roles/linux-system-roles.network/module_utils/network_lsr/utils.py new file mode 100644 index 0000000..bd1887d --- /dev/null +++ b/roles/linux-system-roles.network/module_utils/network_lsr/utils.py @@ -0,0 +1,338 @@ +#!/usr/bin/python3 -tt +# SPDX-License-Identifier: BSD-3-Clause +# vim: fileencoding=utf8 + +import os +import socket +import sys +import uuid + +# pylint: disable=import-error, no-name-in-module +from ansible.module_utils.network_lsr import MyError + + +class Util: + + PY3 = sys.version_info[0] == 3 + + STRING_TYPE = str if PY3 else basestring # noqa:F821 + + @staticmethod + def first(iterable, default=None, pred=None): + for v in iterable: + if pred is None or pred(v): + return v + return default + + @staticmethod + def check_output(argv): + # subprocess.check_output is python 2.7. + with open("/dev/null", "wb") as DEVNULL: + import subprocess + + env = os.environ.copy() + env["LANG"] = "C" + p = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=DEVNULL, env=env) + # FIXME: Can we assume this to always be UTF-8? + out = p.communicate()[0].decode("UTF-8") + if p.returncode != 0: + raise MyError("failure calling %s: exit with %s" % (argv, p.returncode)) + return out + + @classmethod + def create_uuid(cls): + return str(uuid.uuid4()) + + @classmethod + def NM(cls): + n = getattr(cls, "_NM", None) + if n is None: + # Installing pygobject in a tox virtualenv does not work out of the + # box + # pylint: disable=import-error + import gi + + gi.require_version("NM", "1.0") + from gi.repository import NM, GLib, Gio, GObject + + cls._NM = NM + cls._GLib = GLib + cls._Gio = Gio + cls._GObject = GObject + n = NM + return n + + @classmethod + def GLib(cls): + cls.NM() + return cls._GLib + + @classmethod + def Gio(cls): + cls.NM() + return cls._Gio + + @classmethod + def GObject(cls): + cls.NM() + return cls._GObject + + @classmethod + def Timestamp(cls): + return cls.GLib().get_monotonic_time() + + @classmethod + def GMainLoop(cls): + gmainloop = getattr(cls, "_GMainLoop", None) + if gmainloop is None: + gmainloop = cls.GLib().MainLoop() + cls._GMainLoop = gmainloop + return gmainloop + + @classmethod + def GMainLoop_run(cls, timeout=None): + if timeout is None: + cls.GMainLoop().run() + return True + + GLib = cls.GLib() + timeout_reached = [] + loop = cls.GMainLoop() + + def _timeout_cb(unused): + timeout_reached.append(1) + loop.quit() + return False + + timeout_id = GLib.timeout_add(int(timeout * 1000), _timeout_cb, None) + loop.run() + if not timeout_reached: + GLib.source_remove(timeout_id) + return not timeout_reached + + @classmethod + def GMainLoop_iterate(cls, may_block=False): + return cls.GMainLoop().get_context().iteration(may_block) + + @classmethod + def GMainLoop_iterate_all(cls): + c = 0 + while cls.GMainLoop_iterate(): + c += 1 + return c + + @classmethod + def call_async_method(cls, object_, action, args, mainloop_timeout=10): + """ Asynchronously call a NetworkManager method """ + cancellable = cls.create_cancellable() + async_action = action + "_async" + # NM does not use a uniform naming for the async methods, + # for checkpoints it is: + # NMClient.checkpoint_create() and NMClient.checkpoint_create_finish(), + # but for reapply it is: + # NMDevice.reapply_async() and NMDevice.reapply_finish() + # NMDevice.reapply() is a synchronous version + # Therefore check if there is a method if an `async` suffix and use the + # one without the suffix otherwise + if not hasattr(object_, async_action): + async_action = action + finish = action + "_finish" + user_data = {} + + fullargs = [] + fullargs += args + fullargs += (cancellable, cls.create_callback(finish), user_data) + + getattr(object_, async_action)(*fullargs) + + if not cls.GMainLoop_run(mainloop_timeout): + cancellable.cancel() + raise MyError("failure to call %s.%s(): timeout" % object_, async_action) + + success = user_data.get("success", None) + if success is not None: + return success + + raise MyError( + "failure to %s checkpoint: %s: %r" + % (action, user_data.get("error", "unknown error"), user_data) + ) + + @classmethod + def create_cancellable(cls): + return cls.Gio().Cancellable.new() + + @classmethod + def create_callback(cls, finish_method): + """ + Create a callback that will return the result of the finish method and + quit the GMainLoop + + :param finish_method str: Name of the finish method to call from the + source object in the callback + """ + + def callback(source_object, res, user_data): + success = None + try: + success = getattr(source_object, finish_method)(res) + except Exception as e: + if cls.error_is_cancelled(e): + return + user_data["error"] = str(e) + user_data["success"] = success + cls.GMainLoop().quit() + + return callback + + @classmethod + def error_is_cancelled(cls, e): + GLib = cls.GLib() + if isinstance(e, GLib.GError): + if ( + e.domain == "g-io-error-quark" + and e.code == cls.Gio().IOErrorEnum.CANCELLED + ): + return True + return False + + @staticmethod + def ifname_valid(ifname): + # see dev_valid_name() in kernel's net/core/dev.c + if not ifname: + return False + if ifname in [".", ".."]: + return False + if len(ifname) >= 16: + return False + if any([c == "/" or c == ":" or c.isspace() for c in ifname]): + return False + # FIXME: encoding issues regarding python unicode string + return True + + @staticmethod + def mac_aton(mac_str, force_len=None): + # we also accept None and '' for convenience. + # - None yiels None + # - '' yields [] + if mac_str is None: + return mac_str + i = 0 + b = [] + for c in mac_str: + if i == 2: + if c != ":": + raise MyError("not a valid MAC address: '%s'" % (mac_str)) + i = 0 + continue + try: + if i == 0: + n = int(c, 16) * 16 + i = 1 + else: + assert i == 1 + n = n + int(c, 16) + i = 2 + b.append(n) + except Exception: + raise MyError("not a valid MAC address: '%s'" % (mac_str)) + if i == 1: + raise MyError("not a valid MAC address: '%s'" % (mac_str)) + if force_len is not None: + if force_len != len(b): + raise MyError( + "not a valid MAC address of length %s: '%s'" % (force_len, mac_str) + ) + return b + + @staticmethod + def mac_ntoa(mac): + if mac is None: + return None + return ":".join(["%02x" % c for c in mac]) + + @staticmethod + def mac_norm(mac_str, force_len=None): + return Util.mac_ntoa(Util.mac_aton(mac_str, force_len)) + + @staticmethod + def boolean(arg): + if arg is None or isinstance(arg, bool): + return arg + arg0 = arg + if isinstance(arg, Util.STRING_TYPE): + arg = arg.lower() + + if arg in ["y", "yes", "on", "1", "true", 1, True]: + return True + if arg in ["n", "no", "off", "0", "false", 0, False]: + return False + + raise MyError("value '%s' is not a boolean" % (arg0)) + + @staticmethod + def parse_ip(addr, family=None): + if addr is None: + return (None, None) + if family is not None: + Util.addr_family_check(family) + a = socket.inet_pton(family, addr) + else: + a = None + family = None + try: + a = socket.inet_pton(socket.AF_INET, addr) + family = socket.AF_INET + except Exception: + a = socket.inet_pton(socket.AF_INET6, addr) + family = socket.AF_INET6 + return (socket.inet_ntop(family, a), family) + + @staticmethod + def addr_family_check(family): + if family != socket.AF_INET and family != socket.AF_INET6: + raise MyError("invalid address family %s" % (family)) + + @staticmethod + def addr_family_to_v(family): + if family is None: + return "" + if family == socket.AF_INET: + return "v4" + if family == socket.AF_INET6: + return "v6" + raise MyError("invalid address family '%s'" % (family)) + + @staticmethod + def addr_family_default_prefix(family): + Util.addr_family_check(family) + if family == socket.AF_INET: + return 24 + else: + return 64 + + @staticmethod + def addr_family_valid_prefix(family, prefix): + Util.addr_family_check(family) + if family == socket.AF_INET: + m = 32 + else: + m = 128 + return prefix >= 0 and prefix <= m + + @staticmethod + def parse_address(address, family=None): + try: + parts = address.split() + addr_parts = parts[0].split("/") + if len(addr_parts) != 2: + raise MyError("expect two addr-parts: ADDR/PLEN") + a, family = Util.parse_ip(addr_parts[0], family) + prefix = int(addr_parts[1]) + if not Util.addr_family_valid_prefix(family, prefix): + raise MyError("invalid prefix %s" % (prefix)) + if len(parts) > 1: + raise MyError("too many parts") + return {"address": a, "family": family, "prefix": prefix} + except Exception: + raise MyError("invalid address '%s'" % (address)) diff --git a/roles/linux-system-roles.network/molecule/default/Dockerfile.j2 b/roles/linux-system-roles.network/molecule/default/Dockerfile.j2 new file mode 100644 index 0000000..0a60553 --- /dev/null +++ b/roles/linux-system-roles.network/molecule/default/Dockerfile.j2 @@ -0,0 +1,14 @@ +# Molecule managed + +{% if item.registry is defined %} +FROM {{ item.registry.url }}/{{ item.image }} +{% else %} +FROM {{ item.image }} +{% endif %} + +RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \ + elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python2-dnf bash && dnf clean all; \ + elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ + elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \ + elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ + elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi diff --git a/roles/linux-system-roles.network/molecule/default/molecule.yml b/roles/linux-system-roles.network/molecule/default/molecule.yml new file mode 100644 index 0000000..066964a --- /dev/null +++ b/roles/linux-system-roles.network/molecule/default/molecule.yml @@ -0,0 +1,38 @@ +--- +dependency: + name: galaxy +driver: + name: docker +lint: + name: yamllint + options: + config-file: molecule/default/yamllint.yml +platforms: + - name: centos-6 + image: linuxsystemroles/centos-6 + privileged: true + - name: centos-7 + image: linuxsystemroles/centos-7 + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true +provisioner: + name: ansible + log: true + lint: + name: ansible-lint + playbooks: + converge: ../../tests/tests_default.yml +scenario: + name: default + test_sequence: + - destroy + - create + - converge + - idempotence + - check + - destroy +verifier: + name: testinfra + lint: + name: flake8 diff --git a/roles/linux-system-roles.network/molecule/default/yamllint.yml b/roles/linux-system-roles.network/molecule/default/yamllint.yml new file mode 100644 index 0000000..e00a5a9 --- /dev/null +++ b/roles/linux-system-roles.network/molecule/default/yamllint.yml @@ -0,0 +1,12 @@ +--- +extends: default +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable + truthy: disable + document-start: disable diff --git a/roles/linux-system-roles.network/pylintrc b/roles/linux-system-roles.network/pylintrc new file mode 100644 index 0000000..2f07798 --- /dev/null +++ b/roles/linux-system-roles.network/pylintrc @@ -0,0 +1,545 @@ +# This file was generated using `pylint --generate-rcfile > pylintrc` command. +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +init-hook="from pylint.config import find_pylintrc; import os, sys; sys.path.append(os.path.dirname(find_pylintrc()) + '/library'); sys.path.append(os.path.dirname(find_pylintrc()) + '/module_utils'); sys.path.append(os.path.dirname(find_pylintrc()) + '/tests')" + + +# Use multiple processes to speed up Pylint. +jobs=1 + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Specify a configuration file. +#rcfile= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable= +#disable=print-statement, +# parameter-unpacking, +# unpacking-in-except, +# old-raise-syntax, +# backtick, +# long-suffix, +# old-ne-operator, +# old-octal-literal, +# import-star-module-level, +# non-ascii-bytes-literal, +# raw-checker-failed, +# bad-inline-option, +# locally-disabled, +# locally-enabled, +# file-ignored, +# suppressed-message, +# useless-suppression, +# deprecated-pragma, +# apply-builtin, +# basestring-builtin, +# buffer-builtin, +# cmp-builtin, +# coerce-builtin, +# execfile-builtin, +# file-builtin, +# long-builtin, +# raw_input-builtin, +# reduce-builtin, +# standarderror-builtin, +# unicode-builtin, +# xrange-builtin, +# coerce-method, +# delslice-method, +# getslice-method, +# setslice-method, +# no-absolute-import, +# old-division, +# dict-iter-method, +# dict-view-method, +# next-method-called, +# metaclass-assignment, +# indexing-exception, +# raising-string, +# reload-builtin, +# oct-method, +# hex-method, +# nonzero-method, +# cmp-method, +# input-builtin, +# round-builtin, +# intern-builtin, +# unichr-builtin, +# map-builtin-not-iterating, +# zip-builtin-not-iterating, +# range-builtin-not-iterating, +# filter-builtin-not-iterating, +# using-cmp-argument, +# eq-without-hash, +# div-method, +# idiv-method, +# rdiv-method, +# exception-message-attribute, +# invalid-str-codec, +# sys-max-int, +# bad-python3-import, +# deprecated-string-function, +# deprecated-str-translate-call, +# deprecated-itertools-function, +# deprecated-types-field, +# next-method-defined, +# dict-items-not-iterating, +# dict-keys-not-iterating, +# dict-values-not-iterating + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio).You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=optparse.Values,sys.exit + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module +max-module-lines=1000 + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma, + dict-separator + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins + + +[BASIC] + +# Naming style matching correct argument names +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style +#argument-rgx= + +# Naming style matching correct attribute names +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Naming style matching correct class attribute names +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style +#class-attribute-rgx= + +# Naming style matching correct class names +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming-style +#class-rgx= + +# Naming style matching correct constant names +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma +good-names=i, + j, + k, + ex, + Run, + _ + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# Naming style matching correct inline iteration names +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style +#inlinevar-rgx= + +# Naming style matching correct method names +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style +#method-rgx= + +# Naming style matching correct module names +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style +#variable-rgx= + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + + +[IMPORTS] + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub, + TERMIOS, + Bastion, + rexec + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of statements in function / method body +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/roles/linux-system-roles.network/tasks/main.yml b/roles/linux-system-roles.network/tasks/main.yml new file mode 100644 index 0000000..f7f041f --- /dev/null +++ b/roles/linux-system-roles.network/tasks/main.yml @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: BSD-3-Clause +# get service facts, used in defaults/main.yml +--- +- name: Check which services are running + service_facts: + no_log: true + +# needed for ansible_facts.packages +- name: Check which packages are installed + package_facts: + no_log: true + +- name: Print network provider + debug: + msg: "Using network provider: {{ network_provider }}" + +# Depending on the plugins, checking installed packages might be slow +# for example subscription manager might slow this down +# Therefore install packages only when rpm does not find them +- name: Install packages + package: + name: "{{ network_packages }}" + state: present + when: + - not network_packages is subset(ansible_facts.packages.keys()) + +- name: Enable and start NetworkManager + service: + name: "{{ network_service_name }}" + state: started + enabled: true + when: + - network_provider == "nm" + +- name: Enable network service + service: + name: "{{ network_service_name }}" + enabled: true + when: + - network_provider == "initscripts" + +- name: Ensure initscripts network file dependency is present + copy: + dest: /etc/sysconfig/network + content: "# Created by network system role" + force: false + when: + - network_provider == "initscripts" + +- name: Configure networking connection profiles + network_connections: + provider: "{{ network_provider | mandatory }}" + ignore_errors: "{{ network_ignore_errors | default(omit) }}" + force_state_change: "{{ network_force_state_change | default(omit) }}" + connections: "{{ network_connections | default([]) }}" + +- name: Re-test connectivity + ping: diff --git a/roles/linux-system-roles.network/tests/.gitignore b/roles/linux-system-roles.network/tests/.gitignore new file mode 100644 index 0000000..cb7340c --- /dev/null +++ b/roles/linux-system-roles.network/tests/.gitignore @@ -0,0 +1,2 @@ +/*.retry +/inventory diff --git a/roles/linux-system-roles.network/tests/ansible_module_network_connections.py b/roles/linux-system-roles.network/tests/ansible_module_network_connections.py new file mode 120000 index 0000000..b30a744 --- /dev/null +++ b/roles/linux-system-roles.network/tests/ansible_module_network_connections.py @@ -0,0 +1 @@ +roles/linux-system-roles.network/library/network_connections.py \ No newline at end of file diff --git a/roles/linux-system-roles.network/tests/covstats b/roles/linux-system-roles.network/tests/covstats new file mode 100755 index 0000000..310327d --- /dev/null +++ b/roles/linux-system-roles.network/tests/covstats @@ -0,0 +1,16 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +if [ "$#" -lt 1 ] +then + echo "USAGE: ${0} coverage_data_file..." + echo "Show Statistics for each coverage data file" + exit 1 +fi + +for coverage_file in "${@}" +do + echo "coverage statistics for: ${coverage_file}:" + COVERAGE_FILE="${coverage_file}" coverage report + echo +done diff --git a/roles/linux-system-roles.network/tests/down-profile.yml b/roles/linux-system-roles.network/tests/down-profile.yml new file mode 100644 index 0000000..5087240 --- /dev/null +++ b/roles/linux-system-roles.network/tests/down-profile.yml @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- name: Set {{ profile }} down + hosts: all + vars: + network_connections: + - name: "{{ profile }}" + state: down + roles: + - linux-system-roles.network diff --git a/roles/linux-system-roles.network/tests/ensure_non_running_provider.py b/roles/linux-system-roles.network/tests/ensure_non_running_provider.py new file mode 100755 index 0000000..9048c90 --- /dev/null +++ b/roles/linux-system-roles.network/tests/ensure_non_running_provider.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: BSD-3-Clause +""" Check that there is a playbook to run all role tests with the non-default +provider as well """ +# vim: fileencoding=utf8 + +import glob +import os +import sys + + +import yaml + +OTHER_PROVIDER_SUFFIX = "_other_provider.yml" + +IGNORE = [ + "tests_helpers-and-asserts.yml", + "tests_states.yml", + "tests_unit.yml", + "tests_vlan_mtu_initscripts.yml", + "tests_vlan_mtu_nm.yml", + "tests_ethtool_features_initscripts.yml", + "tests_ethtool_features_nm.yml", +] + +OTHER_PLAYBOOK = """ +# SPDX-License-Identifier: BSD-3-Clause +--- +- name: Run playbook '{tests_playbook}' with non-default provider + hosts: all + vars: + network_provider_current: + tasks: + # required for the code to set network_provider_current + - name: Get service facts + service_facts: + - name: Set network provider + set_fact: + network_provider: '{{{{ "initscripts" if network_provider_current == "nm" + else "nm" }}}}' + +- import_playbook: "{tests_playbook}" + when: + - ansible_distribution_major_version != '6' +""" # noqa: E501 # ignore that the line is too long + + +def get_current_provider_code(): + with open("../defaults/main.yml") as defaults: + yaml_defaults = yaml.safe_load(defaults) + current_provider = yaml_defaults["network_provider_current"] + return current_provider + + +def generate_nominal_other_playbook(tests_playbook): + nominal_other_testfile_data = OTHER_PLAYBOOK.format(tests_playbook=tests_playbook) + nominal = yaml.safe_load(nominal_other_testfile_data) + nominal[0]["vars"]["network_provider_current"] = get_current_provider_code() + return yaml.dump(nominal, default_flow_style=False, explicit_start=True, width=80) + + +def main(): + testsfiles = glob.glob("tests_*.yml") + missing = [] + returncode = 0 + + # Generate files when specified + generate = bool(len(sys.argv) > 1 and sys.argv[1] == "generate") + + if not testsfiles: + print("ERROR: No tests found") + returncode = 1 + + for filename in testsfiles: + if filename.endswith(OTHER_PROVIDER_SUFFIX): + continue + + if filename in IGNORE: + continue + + fileroot = os.path.splitext(filename)[0] + other_testfile = fileroot + OTHER_PROVIDER_SUFFIX + nominal_other_testfile_data = generate_nominal_other_playbook(filename) + + if generate: + with open(other_testfile, "w") as ofile: + ofile.write(nominal_other_testfile_data) + + if other_testfile not in testsfiles and not generate: + missing.append(filename) + else: + with open(other_testfile) as ifile: + testdata = ifile.read() + if testdata != nominal_other_testfile_data: + print( + "ERROR: Playbook does not match nominal value " + other_testfile + ) + returncode = 1 + + if missing: + print("ERROR: No tests for other provider found for:\n" + ", \n".join(missing)) + print("Try to generate them with '{} generate'".format(sys.argv[0])) + returncode = 1 + + return returncode + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/roles/linux-system-roles.network/tests/get-coverage.sh b/roles/linux-system-roles.network/tests/get-coverage.sh new file mode 100755 index 0000000..7b6cd21 --- /dev/null +++ b/roles/linux-system-roles.network/tests/get-coverage.sh @@ -0,0 +1,68 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +if [ -n "${DEBUG}" ] +then + set -x +fi +set -e + +if [ "$#" -lt 2 ] +then + echo "USAGE: ${0} host playbook" + echo "Get coverage info from host for playbook" + exit 1 +fi + +host="${1}" +shift +playbook="${1}" + +coverage_data="remote-coveragedata-${host}-${playbook%.yml}" +coverage="/root/.local/bin/coverage" + +echo "Getting coverage for ${playbook} on ${host}" >&2 + +call_ansible() { + local module="${1}" + shift + local args="${1}" + shift + ansible -m "${module}" -i "${host}", -a "${args}" all "${@}" +} + +remote_coverage_dir="$(mktemp -d /tmp/remote_coverage-XXXXXX)" +trap "rm -rf '${remote_coverage_dir}'" EXIT +ansible-playbook -i "${host}", get-coverage.yml -e "test_playbook=${playbook} destdir=${remote_coverage_dir}" + +#COVERAGE_FILE=remote-coverage coverage combine remote-coverage/tests_*/*/root/.coverage +./merge-coverage.sh coverage "${coverage_data}"-tmp $(find "${remote_coverage_dir}" -type f | tr , _) + +# When https://github.com/nedbat/coveragepy/pull/49 is merged, this can be simplified: +if false +then +cat > tmp_merge_coveragerc < tmp_merge_coveragerc <> tmp_merge_coveragerc +done +fi + +COVERAGE_FILE="${coverage_data}" coverage combine --rcfile tmp_merge_coveragerc "${coverage_data}"-tmp +rm tmp_merge_coveragerc + +COVERAGE_FILE="${coverage_data}" coverage report ||: +COVERAGE_FILE="${coverage_data}" coverage html --directory "htmlcov-${coverage_data}" ||: + +echo "Coverage collected in: ${coverage_data}" diff --git a/roles/linux-system-roles.network/tests/get-coverage.yml b/roles/linux-system-roles.network/tests/get-coverage.yml new file mode 100644 index 0000000..4845c62 --- /dev/null +++ b/roles/linux-system-roles.network/tests/get-coverage.yml @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +# This expects the variable test_playbook to be set from the outside +- name: Prepare for coverage extraction + hosts: all + tasks: + # Use set_fact to set variables to make them available in all plays + # 'vars:' Would only set variables for the current play + - name: set facts + set_fact: + coverage_module: network_connections + coverage: /root/.local/bin/coverage + destdir: "remote_coverage/{{ test_playbook }}" + + # This uses variables from the other set_fact task, therefore it needs to + # be its own task + - name: set more facts + set_fact: + coverage_file: ansible-coverage-{{ coverage_module }}-{{ test_playbook|replace('.yml', '') }} + + - name: debug info + debug: + msg: Getting coverage for '{{ coverage_module }}' with '{{ test_playbook }}' + + # combine data in case old data is left there + - command: "{{ coverage }} combine" + environment: + COVERAGE_FILE: "{{ coverage_file }}" + ignore_errors: yes + + - name: remove old data + file: + state: absent + path: "{{ coverage_file }}" + + - name: remove old data + shell: rm -f .coverage.* + + - name: copy coveragerc + copy: + content: "[run]\ndisable_warnings = no-data-collected\n" + dest: .coveragerc + + - name: install latest pip + pip: + name: coverage + extra_args: --user --upgrade + +- import_playbook: "{{ test_playbook }}" + vars: + ansible_python_interpreter: "{{ coverage }} run -p --include *ansible_module_{{ coverage_module }}.py" + +- name: Gather coverage data + hosts: all + tasks: + - shell: "{{ coverage }} combine .coverage.*" + environment: + COVERAGE_FILE: "{{ coverage_file }}" + +- name: Get coverage data + hosts: all + tasks: + - fetch: + src: "{{ coverage_file }}" + dest: "{{ destdir }}" + flat: no diff --git a/roles/linux-system-roles.network/tests/get-total-coverage.sh b/roles/linux-system-roles.network/tests/get-total-coverage.sh new file mode 100755 index 0000000..c3dacfe --- /dev/null +++ b/roles/linux-system-roles.network/tests/get-total-coverage.sh @@ -0,0 +1,34 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +set -e +coverage_data=total-coveragedata +testhost="${1}" + +if [ "$#" -lt 1 ] +then + echo "USAGE: ${0} host" + echo "Get local and all remote coverage data for host" + exit 1 +fi + +rm -f remote-coveragedata* "${coveragedata}" + + +# collect pytest coverage +tox -e py26,py27,py36,py37 -- --cov-append + +for test_playbook in tests_*.yml +do + ./get-coverage.sh "${testhost}" "${test_playbook}" +done + +./merge-coverage.sh coverage "total-remote-coveragedata" remote-coveragedata-* +./covstats .coverage remote-coveragedata-* "total-remote-coveragedata" + +./merge-coverage.sh coverage "${coverage_data}" .coverage remote-coveragedata-* +echo "Total coverage:" +COVERAGE_FILE="${coverage_data}" coverage report ||: +COVERAGE_FILE="${coverage_data}" coverage html --directory "htmlcov-${coverage_data}" ||: +echo "Open HTML report with:" +echo "xdg-open htmlcov-${coverage_data}/index.html" diff --git a/roles/linux-system-roles.network/tests/merge-coverage.sh b/roles/linux-system-roles.network/tests/merge-coverage.sh new file mode 100755 index 0000000..a33e94d --- /dev/null +++ b/roles/linux-system-roles.network/tests/merge-coverage.sh @@ -0,0 +1,35 @@ +#! /bin/bash +# SPDX-License-Identifier: BSD-3-Clause + +if [ -n "${DEBUG}" ] +then + set -x +fi +set -e + +if [ "$#" -lt 3 ] +then + echo "USAGE: ${0} path_to_coverage_binary output_file input_files..." + echo "Merges all input_files into output file without removing input_files" + exit 1 +fi + +# path to coverage binary +coverage="${1}" +shift + +# read by coverage binary +export COVERAGE_FILE="${1}" +shift + +tempdir="$(mktemp -d /tmp/coverage_merge-XXXXXX)" +trap "rm -rf '${tempdir}'" EXIT + +cp --backup=numbered -- "${@}" "${tempdir}" +# FIXME: Would not work if coverage files are not hidden but they are by +# default +shopt -s dotglob +"${coverage}" combine "${tempdir}/"* + +echo "Merged data into ${COVERAGE_FILE}" +./covstats "${COVERAGE_FILE}" diff --git a/roles/linux-system-roles.network/tests/playbooks/roles b/roles/linux-system-roles.network/tests/playbooks/roles new file mode 120000 index 0000000..7b9ade8 --- /dev/null +++ b/roles/linux-system-roles.network/tests/playbooks/roles @@ -0,0 +1 @@ +../roles/ \ No newline at end of file diff --git a/roles/linux-system-roles.network/tests/playbooks/tasks b/roles/linux-system-roles.network/tests/playbooks/tasks new file mode 120000 index 0000000..93c76d6 --- /dev/null +++ b/roles/linux-system-roles.network/tests/playbooks/tasks @@ -0,0 +1 @@ +../tasks/ \ No newline at end of file diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_features.yml b/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_features.yml new file mode 100644 index 0000000..ba0c6c3 --- /dev/null +++ b/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_features.yml @@ -0,0 +1,111 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: all + vars: + interface: lsrfeat1 + type: veth + tasks: + - name: "INIT: Ethtool feeatures tests" + debug: + msg: "##################################################" + - include_tasks: tasks/show-interfaces.yml + - include_tasks: tasks/manage-test-interface.yml + vars: + state: present + - include_tasks: tasks/assert-device_present.yml + - name: Install ethtool (test dependency) + package: + name: ethtool + state: present + - block: + - name: "TEST: I can create a profile without changing the ethtool features." + debug: + msg: "##################################################" + - name: Get current device features + command: "ethtool --show-features {{ interface }}" + register: original_ethtool_features + - import_role: + name: linux-system-roles.network + vars: + network_connections: + - name: "{{ interface }}" + state: up + type: ethernet + ip: + dhcp4: "no" + auto6: "no" + - name: Get current device features + command: "ethtool --show-features {{ interface }}" + register: ethtool_features + - name: "ASSERT: The profile does not change the ethtool features" + assert: + that: + - original_ethtool_features.stdout == ethtool_features.stdout + - name: "TEST: I can disable gro and tx-tcp-segmentation and enable gso." + debug: + msg: "##################################################" + - import_role: + name: linux-system-roles.network + vars: + network_connections: + - name: "{{ interface }}" + state: up + type: ethernet + ip: + dhcp4: "no" + auto6: "no" + ethtool: + features: + gro: "no" + gso: "yes" + tx-tcp-segmentation: "no" + - name: Get current device features + command: "ethtool --show-features {{ interface }}" + register: ethtool_features + - name: + debug: + var: ethtool_features.stdout_lines + - name: Assert device features + assert: + that: + - "'generic-receive-offload: off' in ethtool_features.stdout_lines" + - "'generic-segmentation-offload: on' in ethtool_features.stdout_lines" + - "'tx-tcp-segmentation: off' in ethtool_features.stdout_lines | map('trim')" + - name: "TEST: I can reset features to their original value." + debug: + msg: "##################################################" + - import_role: + name: linux-system-roles.network + vars: + network_connections: + - name: "{{ interface }}" + state: up + type: ethernet + ip: + dhcp4: "no" + auto6: "no" + - name: Get current device features + command: "ethtool --show-features {{ interface }}" + register: ethtool_features + # Resetting the ethtools only works with NetworkManager + - name: "ASSERT: The profile does not change the ethtool features" + assert: + that: + - original_ethtool_features.stdout == ethtool_features.stdout + when: + network_provider == 'nm' + always: + - block: + - import_role: + name: linux-system-roles.network + vars: + network_connections: + - name: "{{ interface }}" + persistent_state: absent + state: down + ignore_errors: true + - include_tasks: tasks/manage-test-interface.yml + vars: + state: absent + tags: + - "tests::cleanup" diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_states.yml b/roles/linux-system-roles.network/tests/playbooks/tests_states.yml new file mode 100644 index 0000000..7a1e207 --- /dev/null +++ b/roles/linux-system-roles.network/tests/playbooks/tests_states.yml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: all + vars: + interface: statebr + profile: "{{ interface }}" + network_provider: nm + tasks: + - debug: + msg: Inside states tests + - include_tasks: tasks/show-interfaces.yml + - include_tasks: tasks/assert-device_absent.yml + + # create test profile + - include_role: + name: linux-system-roles.network + vars: + network_connections: + - name: statebr + state: up + type: bridge + ip: + dhcp4: false + auto6: false + - include_tasks: tasks/assert-device_present.yml + - include_tasks: tasks/assert-profile_present.yml + + # test case (remove profile but keep it up) + # I can remove a profile but keep the configuration active. + - include_role: + name: linux-system-roles.network + vars: + network_connections: + - name: statebr + persistent_state: absent + - include_tasks: tasks/assert-device_present.yml + - include_tasks: tasks/assert-profile_absent.yml + + # test case + # I can set a profile down that is up and absent. + - name: Set down + include_role: + name: linux-system-roles.network + vars: + network_connections: + - name: statebr + state: down + - include_tasks: tasks/assert-device_absent.yml + - include_tasks: tasks/assert-profile_absent.yml diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_vlan_mtu.yml b/roles/linux-system-roles.network/tests/playbooks/tests_vlan_mtu.yml new file mode 100644 index 0000000..ae0322e --- /dev/null +++ b/roles/linux-system-roles.network/tests/playbooks/tests_vlan_mtu.yml @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: all + vars: + type: veth + interface: lsr101 + vlan_interface: lsr101.90 + tasks: + - include_tasks: tasks/show-interfaces.yml + - include_tasks: tasks/manage-test-interface.yml + vars: + state: present + - include_tasks: tasks/assert-device_present.yml + - name: "TEST: I can configure the MTU for a vlan interface without autoconnect." + debug: + msg: "##################################################" + - import_role: + name: linux-system-roles.network + vars: + network_connections: + - name: "{{ interface }}" + type: ethernet + state: up + mtu: 1492 + autoconnect: false + ip: + dhcp4: false + auto6: false + + - name: "{{ vlan_interface }}" + parent: "{{ interface }}" + type: vlan + vlan_id: 90 + mtu: 1280 + state: up + autoconnect: false + ip: + dhcp4: false + auto6: false + - include_tasks: tasks/assert-device_present.yml + vars: + interface: "{{ vlan_interface }}" + - include_tasks: tasks/assert-profile_present.yml + vars: + profile: "{{ item }}" + loop: + - "{{ interface }}" + - "{{ vlan_interface }}" + + - name: "TEARDOWN: remove profiles." + debug: + msg: "##################################################" + - import_role: + name: linux-system-roles.network + vars: + network_connections: + - name: "{{ interface }}" + persistent_state: absent + state: down + - name: "{{ vlan_interface }}" + persistent_state: absent + state: down + ignore_errors: true + - include_tasks: tasks/manage-test-interface.yml + vars: + state: absent diff --git a/roles/linux-system-roles.network/tests/remove-profile.yml b/roles/linux-system-roles.network/tests/remove-profile.yml new file mode 100644 index 0000000..a50e848 --- /dev/null +++ b/roles/linux-system-roles.network/tests/remove-profile.yml @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- name: Remove {{ profile }} + hosts: all + vars: + network_connections: + - name: "{{ profile }}" + persistent_state: absent + roles: + - linux-system-roles.network diff --git a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/defaults b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/defaults new file mode 120000 index 0000000..feb6623 --- /dev/null +++ b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/defaults @@ -0,0 +1 @@ +../../../defaults/ \ No newline at end of file diff --git a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/library b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/library new file mode 120000 index 0000000..d0b7393 --- /dev/null +++ b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/library @@ -0,0 +1 @@ +../../../library/ \ No newline at end of file diff --git a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/meta b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/meta new file mode 120000 index 0000000..a8df40c --- /dev/null +++ b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/meta @@ -0,0 +1 @@ +../../../meta/ \ No newline at end of file diff --git a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/module_utils b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/module_utils new file mode 120000 index 0000000..ad35115 --- /dev/null +++ b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/module_utils @@ -0,0 +1 @@ +../../../module_utils/ \ No newline at end of file diff --git a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/tasks b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/tasks new file mode 120000 index 0000000..f5bbba4 --- /dev/null +++ b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/tasks @@ -0,0 +1 @@ +../../../tasks/ \ No newline at end of file diff --git a/roles/linux-system-roles.network/tests/run-tasks.yml b/roles/linux-system-roles.network/tests/run-tasks.yml new file mode 100644 index 0000000..ea56720 --- /dev/null +++ b/roles/linux-system-roles.network/tests/run-tasks.yml @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- name: Run the tasklist {{ task }} + hosts: all + tasks: + - include_tasks: "{{ task }}" diff --git a/roles/linux-system-roles.network/tests/tasks/assert-device_absent.yml b/roles/linux-system-roles.network/tests/tasks/assert-device_absent.yml new file mode 100644 index 0000000..67b83ad --- /dev/null +++ b/roles/linux-system-roles.network/tests/tasks/assert-device_absent.yml @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- include: get-interface_stat.yml +- name: "assert that interface {{ interface }} is absent" + assert: + that: not interface_stat.stat.exists + msg: "{{ interface }} exists" diff --git a/roles/linux-system-roles.network/tests/tasks/assert-device_present.yml b/roles/linux-system-roles.network/tests/tasks/assert-device_present.yml new file mode 100644 index 0000000..e0d4097 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tasks/assert-device_present.yml @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- include: get-interface_stat.yml +- name: "assert that interface {{ interface }} is present" + assert: + that: interface_stat.stat.exists + msg: "{{ interface }} does not exist" diff --git a/roles/linux-system-roles.network/tests/tasks/assert-profile_absent.yml b/roles/linux-system-roles.network/tests/tasks/assert-profile_absent.yml new file mode 100644 index 0000000..e7a6fde --- /dev/null +++ b/roles/linux-system-roles.network/tests/tasks/assert-profile_absent.yml @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- include: get-profile_stat.yml +- name: "assert that profile '{{ profile }}' is absent" + assert: + that: not profile_stat.stat.exists + msg: "profile {{ profile_path }} does exist" diff --git a/roles/linux-system-roles.network/tests/tasks/assert-profile_present.yml b/roles/linux-system-roles.network/tests/tasks/assert-profile_present.yml new file mode 100644 index 0000000..c84c080 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tasks/assert-profile_present.yml @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- include: get-profile_stat.yml +- name: "assert that profile '{{ profile }}' is present" + assert: + that: profile_stat.stat.exists + msg: "profile {{ profile_path }} does not exist" diff --git a/roles/linux-system-roles.network/tests/tasks/create-and-remove-interface.yml b/roles/linux-system-roles.network/tests/tasks/create-and-remove-interface.yml new file mode 100644 index 0000000..9bebf6e --- /dev/null +++ b/roles/linux-system-roles.network/tests/tasks/create-and-remove-interface.yml @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- include_tasks: show-interfaces.yml +- include_tasks: manage-test-interface.yml + vars: + state: absent +- include_tasks: show-interfaces.yml +- include_tasks: assert-device_absent.yml + +- include_tasks: manage-test-interface.yml + vars: + state: present +- include_tasks: show-interfaces.yml +- include_tasks: assert-device_present.yml + +- include_tasks: manage-test-interface.yml + vars: + state: absent +- include_tasks: show-interfaces.yml +- include_tasks: assert-device_absent.yml diff --git a/roles/linux-system-roles.network/tests/tasks/get-current_interfaces.yml b/roles/linux-system-roles.network/tests/tasks/get-current_interfaces.yml new file mode 100644 index 0000000..33a4a76 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tasks/get-current_interfaces.yml @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- command: ls -1 + args: + chdir: /sys/class/net + register: _current_interfaces +- set_fact: + current_interfaces: "{{ _current_interfaces.stdout_lines }}" diff --git a/roles/linux-system-roles.network/tests/tasks/get-interface_stat.yml b/roles/linux-system-roles.network/tests/tasks/get-interface_stat.yml new file mode 100644 index 0000000..a8b8e5b --- /dev/null +++ b/roles/linux-system-roles.network/tests/tasks/get-interface_stat.yml @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- name: "Get stat for interface {{ interface }}" + stat: + get_attributes: false + get_checksum: false + get_mime: false + path: "/sys/class/net/{{ interface }}" + register: interface_stat diff --git a/roles/linux-system-roles.network/tests/tasks/get-profile_stat.yml b/roles/linux-system-roles.network/tests/tasks/get-profile_stat.yml new file mode 100644 index 0000000..bd33a32 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tasks/get-profile_stat.yml @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- name: "Get stat for network-scripts" + stat: + get_attributes: false + get_checksum: false + get_mime: false + path: "/etc/sysconfig/network-scripts" + register: network_scripts_stat +- name: Set profile path (network-scripts) + set_fact: + profile_path: /etc/sysconfig/network-scripts/ifcfg-{{ profile }} + when: + - network_scripts_stat.stat.exists +- name: Set profile path (NetworkManager system-connections) + set_fact: + profile_path: /etc/NetworkManager/system-connections/{{ profile }} + when: + - not network_scripts_stat.stat.exists +- name: stat profile file + stat: + get_attributes: false + get_checksum: false + get_mime: false + path: "{{ profile_path }}" + register: profile_stat diff --git a/roles/linux-system-roles.network/tests/tasks/manage-test-interface.yml b/roles/linux-system-roles.network/tests/tasks/manage-test-interface.yml new file mode 100644 index 0000000..e7b40f0 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tasks/manage-test-interface.yml @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- fail: + msg: "state needs to be present or absent, not '{{ state }}'" + when: state not in ["present", "absent"] + +- fail: + msg: "type needs to be dummy, tap or veth, not '{{ type }}'" + when: type not in ["dummy", "tap", "veth"] + +# - include: get-current_interfaces.yml +- include: show-interfaces.yml + +- name: Install iproute + package: + name: iproute + state: present + +# veth +- name: Create veth interface {{ interface }} + shell: ip link add {{ interface }} type veth peer name peer{{ interface }} + when: "type == 'veth' and state == 'present' and + interface not in current_interfaces" + +- name: Delete veth interface {{ interface }} + shell: ip link del {{ interface }} type veth + when: "type == 'veth' and state == 'absent' and + interface in current_interfaces" + +# dummy +- name: Create dummy interface {{ interface }} + shell: ip link add "{{ interface }}" type dummy + when: "type == 'dummy' and state == 'present' and + interface not in current_interfaces" + +- name: Delete dummy interface {{ interface }} + shell: ip link del "{{ interface }}" type dummy + when: "type == 'dummy' and state == 'absent' and + interface in current_interfaces" + +# tap +- name: Create tap interface {{ interface }} + shell: ip tuntap add dev {{ interface }} mode tap + when: "type == 'tap' and state == 'present' + and interface not in current_interfaces" + +- name: Delete tap interface {{ interface }} + shell: ip tuntap del dev {{ interface }} mode tap + when: "type == 'tap' and state == 'absent' and + interface in current_interfaces" diff --git a/roles/linux-system-roles.network/tests/tasks/show-interfaces.yml b/roles/linux-system-roles.network/tests/tasks/show-interfaces.yml new file mode 100644 index 0000000..704e8c5 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tasks/show-interfaces.yml @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- include: get-current_interfaces.yml +- debug: + msg: "current_interfaces: {{ current_interfaces }}" diff --git a/roles/linux-system-roles.network/tests/tests_bridge.yml b/roles/linux-system-roles.network/tests/tests_bridge.yml new file mode 100644 index 0000000..9ead308 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_bridge.yml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- name: Test configuring bridges + hosts: all + vars: + interface: LSR-TST-br31 + + tasks: + - name: "set interface={{ interface }}" + set_fact: + interface: "{{ interface }}" + - include_tasks: tasks/show-interfaces.yml + - include_tasks: tasks/assert-device_absent.yml + +- name: Add test bridge + hosts: all + vars: + network_connections: + - name: "{{ interface }}" + interface_name: "{{ interface }}" + state: up + type: bridge + ip: + dhcp4: no + auto6: yes + roles: + - linux-system-roles.network + +- import_playbook: run-tasks.yml + vars: + task: tasks/assert-device_present.yml + +- import_playbook: run-tasks.yml + vars: + profile: "{{ interface }}" + task: tasks/assert-profile_present.yml + +- import_playbook: down-profile.yml + vars: + profile: "{{ interface }}" +# FIXME: assert profile/device down + +- import_playbook: remove-profile.yml + vars: + profile: "{{ interface }}" + +- import_playbook: run-tasks.yml + vars: + profile: "{{ interface }}" + task: tasks/assert-profile_absent.yml + +# FIXME: Devices might still be left when profile is absent +#- import_playbook: run-tasks.yml +# vars: +# task: tasks/assert-device_absent.yml diff --git a/roles/linux-system-roles.network/tests/tests_bridge_other_provider.yml b/roles/linux-system-roles.network/tests/tests_bridge_other_provider.yml new file mode 100644 index 0000000..e5a4ad7 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_bridge_other_provider.yml @@ -0,0 +1,17 @@ +--- +- hosts: all + name: Run playbook 'tests_bridge.yml' with non-default provider + tasks: + - name: Get service facts + service_facts: null + - name: Set network provider + set_fact: + network_provider: '{{ "initscripts" if network_provider_current == "nm" else + "nm" }}' + vars: + network_provider_current: '{{ ''nm'' if ''NetworkManager.service'' in ansible_facts.services + and ansible_facts.services[''NetworkManager.service''][''state''] == ''running'' + else ''initscripts'' }}' +- import_playbook: tests_bridge.yml + when: + - ansible_distribution_major_version != '6' diff --git a/roles/linux-system-roles.network/tests/tests_default.yml b/roles/linux-system-roles.network/tests/tests_default.yml new file mode 100644 index 0000000..fda6ed5 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_default.yml @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- name: Test executing the role with default parameters + hosts: all + roles: + - linux-system-roles.network diff --git a/roles/linux-system-roles.network/tests/tests_default_other_provider.yml b/roles/linux-system-roles.network/tests/tests_default_other_provider.yml new file mode 100644 index 0000000..697bc57 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_default_other_provider.yml @@ -0,0 +1,17 @@ +--- +- hosts: all + name: Run playbook 'tests_default.yml' with non-default provider + tasks: + - name: Get service facts + service_facts: null + - name: Set network provider + set_fact: + network_provider: '{{ "initscripts" if network_provider_current == "nm" else + "nm" }}' + vars: + network_provider_current: '{{ ''nm'' if ''NetworkManager.service'' in ansible_facts.services + and ansible_facts.services[''NetworkManager.service''][''state''] == ''running'' + else ''initscripts'' }}' +- import_playbook: tests_default.yml + when: + - ansible_distribution_major_version != '6' diff --git a/roles/linux-system-roles.network/tests/tests_ethernet.yml b/roles/linux-system-roles.network/tests/tests_ethernet.yml new file mode 100644 index 0000000..25f117d --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_ethernet.yml @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: all + tasks: + - debug: + msg: Inside ethernet tests + - debug: + var: network_provider + +- name: Test configuring ethernet devices + hosts: all + vars: + type: veth + interface: lsr27 + + tasks: + - name: "set type={{ type }} and interface={{ interface }}" + set_fact: + type: "{{ type }}" + interface: "{{ interface }}" + - include_tasks: tasks/show-interfaces.yml + - include_tasks: tasks/manage-test-interface.yml + vars: + state: present + - include_tasks: tasks/assert-device_present.yml + +- name: Test static interface up + hosts: all + vars: + network_connections: + - name: "{{ interface }}" + interface_name: "{{ interface }}" + state: up + type: ethernet + autoconnect: yes + ip: + address: 192.0.2.1/24 + roles: + - linux-system-roles.network + +- hosts: all + tasks: + - debug: + var: network_provider + +# FIXME: assert profile present +# FIXME: assert profile/device up + IP address +- import_playbook: down-profile.yml + vars: + profile: "{{ interface }}" +# FIXME: assert profile/device down +- import_playbook: remove-profile.yml + vars: + profile: "{{ interface }}" +# FIXME: assert profile away +- name: Remove interfaces + hosts: all + tasks: + - include_tasks: tasks/manage-test-interface.yml + vars: + state: absent + - include_tasks: tasks/assert-device_absent.yml diff --git a/roles/linux-system-roles.network/tests/tests_ethernet_other_provider.yml b/roles/linux-system-roles.network/tests/tests_ethernet_other_provider.yml new file mode 100644 index 0000000..456b052 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_ethernet_other_provider.yml @@ -0,0 +1,17 @@ +--- +- hosts: all + name: Run playbook 'tests_ethernet.yml' with non-default provider + tasks: + - name: Get service facts + service_facts: null + - name: Set network provider + set_fact: + network_provider: '{{ "initscripts" if network_provider_current == "nm" else + "nm" }}' + vars: + network_provider_current: '{{ ''nm'' if ''NetworkManager.service'' in ansible_facts.services + and ansible_facts.services[''NetworkManager.service''][''state''] == ''running'' + else ''initscripts'' }}' +- import_playbook: tests_ethernet.yml + when: + - ansible_distribution_major_version != '6' diff --git a/roles/linux-system-roles.network/tests/tests_ethtool_features_initscripts.yml b/roles/linux-system-roles.network/tests/tests_ethtool_features_initscripts.yml new file mode 100644 index 0000000..6aea73b --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_ethtool_features_initscripts.yml @@ -0,0 +1,13 @@ +--- +# set network provider and gather facts +- hosts: all + tasks: + - name: Set network provider to 'initscripts' + set_fact: + network_provider: initscripts + +# workaround for: https://github.com/ansible/ansible/issues/27973 +# There is no way in Ansible to abort a playbook hosts with specific OS +# releases Therefore we include the playbook with the tests only if the hosts +# would support it. +- import_playbook: playbooks/tests_ethtool_features.yml diff --git a/roles/linux-system-roles.network/tests/tests_ethtool_features_nm.yml b/roles/linux-system-roles.network/tests/tests_ethtool_features_nm.yml new file mode 100644 index 0000000..12e5042 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_ethtool_features_nm.yml @@ -0,0 +1,28 @@ +--- +# set network provider and gather facts +- hosts: all + tasks: + - name: Set network provider to 'nm' + set_fact: + network_provider: nm + - name: Install NetworkManager + package: + name: NetworkManager + state: present + - name: Get NetworkManager version + command: rpm -q --qf "%{version}" NetworkManager + args: + warn: "no" + when: true + register: NetworkManager_version + +# workaround for: https://github.com/ansible/ansible/issues/27973 +# There is no way in Ansible to abort a playbook hosts with specific OS +# releases Therefore we include the playbook with the tests only if the hosts +# would support it. +# The test should run with NetworkManager, therefore it cannot run on RHEL 6 or CentOS 6. +- import_playbook: playbooks/tests_ethtool_features.yml + when: + - ansible_distribution_major_version != '6' + # NetworkManager 1.20.0 introduced ethtool settings support + - NetworkManager_version.stdout is version('1.20.0', '>=') diff --git a/roles/linux-system-roles.network/tests/tests_helpers-and-asserts.yml b/roles/linux-system-roles.network/tests/tests_helpers-and-asserts.yml new file mode 100644 index 0000000..36f02c2 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_helpers-and-asserts.yml @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- name: Check that creating and removing test devices and assertions work + hosts: all + tasks: + - name: test veth interface management + include_tasks: tasks/create-and-remove-interface.yml + vars: + type: veth + interface: veth1298 + + - name: test veth interface management + include_tasks: tasks/create-and-remove-interface.yml + vars: + type: dummy + interface: dummy1298 + +# FIXME: when: does not seem to work with include_tasks, therefore this cannot be safely tested for now +# - name: test tap interfaces +# include_tasks: tasks/create-and-remove-interface.yml +# vars: +# - type: tap +# - interface: tap1298 +# when: ansible_distribution_major_version > 6 +# # ip tuntap does not exist on RHEL6 +# # FIXME: Maybe use some other tool to manage devices, openvpn can do this, +# # but it is in EPEL diff --git a/roles/linux-system-roles.network/tests/tests_states.yml b/roles/linux-system-roles.network/tests/tests_states.yml new file mode 100644 index 0000000..eff3436 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_states.yml @@ -0,0 +1,11 @@ +--- +# empty playbook to gather facts for import_playbook when clause +- hosts: all + +# workaround for: https://github.com/ansible/ansible/issues/27973 +# There is no way in Ansible to abort a playbook hosts with specific OS +# releases Therefore we include the playbook with the tests only if the hosts +# would support it. +# The test requires NetworkManager, therefore it cannot run on RHEL 6 or CentOS 6. +- import_playbook: playbooks/tests_states.yml + when: ansible_distribution_major_version != '6' diff --git a/roles/linux-system-roles.network/tests/tests_unit.yml b/roles/linux-system-roles.network/tests/tests_unit.yml new file mode 100644 index 0000000..c6ea4ef --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_unit.yml @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: BSD-3-Clause +--- +- hosts: all + name: Setup for test running + tasks: + - name: Install EPEL on enterprise Linux for python2-mock + command: yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm + args: + warn: false + creates: /etc/yum.repos.d/epel.repo + when: + - ansible_distribution in ['RedHat', 'CentOS'] + - ansible_distribution_major_version in ['6', '7'] + + - name: Install dependencies + package: + name: "{{ item }}" + state: present + # Ignore error because some package names might not be available + ignore_errors: true + loop: + - NetworkManager-libnm + - python2-gobject-base + - python3-gobject-base + - python-gobject-base + - python2-mock + +- hosts: all + name: execute python unit tests + tasks: + - name: Copy python modules + copy: + src: "{{ item }}" + dest: /tmp/test-unit-1/ + local_follow: false + loop: + - ../library/network_connections.py + - unit/test_network_connections.py + - ../module_utils/network_lsr + + - name: Create helpers directory + file: + state: directory + dest: /tmp/test-unit-1/helpers + + - name: Copy helpers + copy: + src: "{{ item }}" + dest: /tmp/test-unit-1/helpers + mode: 0755 + with_fileglob: + - unit/helpers/* + + - name: Check if python2 is available + command: python2 --version + ignore_errors: true + register: python2_available + when: true + + - name: Run python2 unit tests + command: python2 /tmp/test-unit-1/test_network_connections.py --verbose + when: python2_available is succeeded + register: python2_result + + - name: Check if python3 is available + command: python3 --version + ignore_errors: true + register: python3_available + when: true + + - name: Run python3 unit tests + command: python3 /tmp/test-unit-1/test_network_connections.py --verbose + when: python3_available is succeeded + register: python3_result + + - name: Show python2 unit test results + debug: + var: python2_result.stderr_lines + when: python2_result is succeeded + + - name: Show python3 unit test results + debug: + var: python3_result.stderr_lines + when: python3_result is succeeded + + - name: Ensure that at least one python unit test ran + fail: + msg: Tests did not run with python2 or python3 + when: not (python2_available is succeeded or python3_available is succeeded) diff --git a/roles/linux-system-roles.network/tests/tests_vlan_mtu_initscripts.yml b/roles/linux-system-roles.network/tests/tests_vlan_mtu_initscripts.yml new file mode 100644 index 0000000..a57db4b --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_vlan_mtu_initscripts.yml @@ -0,0 +1,13 @@ +--- +# set network provider and gather facts +- hosts: all + tasks: + - name: Set network provider to 'initscripts' + set_fact: + network_provider: initscripts + +# workaround for: https://github.com/ansible/ansible/issues/27973 +# There is no way in Ansible to abort a playbook hosts with specific OS +# releases Therefore we include the playbook with the tests only if the hosts +# would support it. +- import_playbook: playbooks/tests_vlan_mtu.yml diff --git a/roles/linux-system-roles.network/tests/tests_vlan_mtu_nm.yml b/roles/linux-system-roles.network/tests/tests_vlan_mtu_nm.yml new file mode 100644 index 0000000..d830817 --- /dev/null +++ b/roles/linux-system-roles.network/tests/tests_vlan_mtu_nm.yml @@ -0,0 +1,15 @@ +--- +# set network provider and gather facts +- hosts: all + tasks: + - name: Set network provider to 'nm' + set_fact: + network_provider: nm + +# workaround for: https://github.com/ansible/ansible/issues/27973 +# There is no way in Ansible to abort a playbook hosts with specific OS +# releases Therefore we include the playbook with the tests only if the hosts +# would support it. +# The test requires NetworkManager, therefore it cannot run on RHEL 6 or CentOS 6. +- import_playbook: playbooks/tests_vlan_mtu.yml + when: ansible_distribution_major_version != '6' diff --git a/roles/linux-system-roles.network/tests/unit/helpers/ethtool b/roles/linux-system-roles.network/tests/unit/helpers/ethtool new file mode 100755 index 0000000..874561f --- /dev/null +++ b/roles/linux-system-roles.network/tests/unit/helpers/ethtool @@ -0,0 +1,6 @@ +#! /bin/bash + +if [ "${1}" == "-P" ] && [ "${2}" != "" ] +then + echo "Permanent address: 23:00:00:00:00:00" +fi diff --git a/roles/linux-system-roles.network/tests/unit/test_network_connections.py b/roles/linux-system-roles.network/tests/unit/test_network_connections.py new file mode 100755 index 0000000..9c2f0ed --- /dev/null +++ b/roles/linux-system-roles.network/tests/unit/test_network_connections.py @@ -0,0 +1,2236 @@ +#!/usr/bin/env python +""" Tests for network_connections Ansible module """ +# SPDX-License-Identifier: BSD-3-Clause + +import itertools +import os +import pprint as pprint_ +import socket +import sys +import unittest + +TESTS_BASEDIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(1, os.path.join(TESTS_BASEDIR, "../..", "library")) +sys.path.insert(1, os.path.join(TESTS_BASEDIR, "../..", "module_utils")) + +try: + from unittest import mock +except ImportError: # py2 + import mock + +sys.modules["ansible"] = mock.Mock() +sys.modules["ansible.module_utils.basic"] = mock.Mock() +sys.modules["ansible.module_utils"] = mock.Mock() +sys.modules["ansible.module_utils.network_lsr"] = __import__("network_lsr") + +# pylint: disable=import-error, wrong-import-position +import network_lsr +import network_connections as n + +from network_connections import SysUtil +from network_connections import Util + + +try: + my_test_skipIf = unittest.skipIf +except AttributeError: + # python 2.6 workaround + def my_test_skipIf(condition, reason): + if condition: + return lambda x: None + else: + return lambda x: x + + +try: + nmutil = n.NMUtil() + assert nmutil +except Exception: + # NMUtil is not supported, for example on RHEL 6 or without + # pygobject. + nmutil = None + +if nmutil: + NM = n.Util.NM() + GObject = n.Util.GObject() + + +def pprint(msg, obj): + print("PRINT: %s\n" % (msg)) + + p = pprint_.PrettyPrinter(indent=4) + p.pprint(obj) + if nmutil is not None and isinstance(obj, NM.Connection): + obj.dump() + + +ARGS_CONNECTIONS = network_lsr.argument_validator.ArgValidator_ListConnections() +VALIDATE_ONE_MODE_INITSCRIPTS = ARGS_CONNECTIONS.VALIDATE_ONE_MODE_INITSCRIPTS + +ETHTOOL_FEATURES_DEFAULTS = { + "esp-hw-offload": None, + "esp-tx-csum-hw-offload": None, + "fcoe-mtu": None, + "gro": None, + "gso": None, + "highdma": None, + "hw-tc-offload": None, + "l2-fwd-offload": None, + "loopback": None, + "lro": None, + "ntuple": None, + "rx": None, + "rx-all": None, + "rx-fcs": None, + "rx-gro-hw": None, + "rx-udp_tunnel-port-offload": None, + "rx-vlan-filter": None, + "rx-vlan-stag-filter": None, + "rx-vlan-stag-hw-parse": None, + "rxhash": None, + "rxvlan": None, + "sg": None, + "tls-hw-record": None, + "tls-hw-tx-offload": None, + "tso": None, + "tx": None, + "tx-checksum-fcoe-crc": None, + "tx-checksum-ip-generic": None, + "tx-checksum-ipv4": None, + "tx-checksum-ipv6": None, + "tx-checksum-sctp": None, + "tx-esp-segmentation": None, + "tx-fcoe-segmentation": None, + "tx-gre-csum-segmentation": None, + "tx-gre-segmentation": None, + "tx-gso-partial": None, + "tx-gso-robust": None, + "tx-ipxip4-segmentation": None, + "tx-ipxip6-segmentation": None, + "tx-nocache-copy": None, + "tx-scatter-gather": None, + "tx-scatter-gather-fraglist": None, + "tx-sctp-segmentation": None, + "tx-tcp-ecn-segmentation": None, + "tx-tcp-mangleid-segmentation": None, + "tx-tcp-segmentation": None, + "tx-tcp6-segmentation": None, + "tx-udp-segmentation": None, + "tx-udp_tnl-csum-segmentation": None, + "tx-udp_tnl-segmentation": None, + "tx-vlan-stag-hw-insert": None, + "txvlan": None, +} + +ETHTOOL_DEFAULTS = {"features": ETHTOOL_FEATURES_DEFAULTS} + +ETHERNET_DEFAULTS = {"autoneg": None, "duplex": None, "speed": 0} + + +class TestValidator(unittest.TestCase): + def setUp(self): + # default values when "type" is specified and state is not + self.default_connection_settings = { + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "ignore_errors": None, + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + "dns": [], + "dns_search": [], + }, + "mac": None, + "master": None, + "mtu": None, + "name": "5", + "parent": None, + "slave_type": None, + "zone": None, + } + + def assertValidationError(self, v, value): + self.assertRaises(n.ValidationError, v.validate, value) + + def assert_nm_connection_routes_expected(self, connection, route_list_expected): + parser = network_lsr.argument_validator.ArgValidatorIPRoute("route[?]") + route_list_exp = [parser.validate(r) for r in route_list_expected] + route_list_new = itertools.chain( + nmutil.setting_ip_config_get_routes( + connection.get_setting(NM.SettingIP4Config) + ), + nmutil.setting_ip_config_get_routes( + connection.get_setting(NM.SettingIP6Config) + ), + ) + route_list_new = [ + { + "family": r.get_family(), + "network": r.get_dest(), + "prefix": int(r.get_prefix()), + "gateway": r.get_next_hop(), + "metric": int(r.get_metric()), + } + for r in route_list_new + ] + self.assertEqual(route_list_exp, route_list_new) + + def do_connections_check_invalid(self, input_connections): + self.assertValidationError(ARGS_CONNECTIONS, input_connections) + + def do_connections_validate_nm(self, input_connections, **kwargs): + if not nmutil: + return + connections = ARGS_CONNECTIONS.validate(input_connections) + for connection in connections: + if "type" in connection: + connection["nm.exists"] = False + connection["nm.uuid"] = n.Util.create_uuid() + mode = VALIDATE_ONE_MODE_INITSCRIPTS + for idx, connection in enumerate(connections): + try: + ARGS_CONNECTIONS.validate_connection_one(mode, connections, idx) + except n.ValidationError: + continue + if "type" in connection: + con_new = nmutil.connection_create(connections, idx) + self.assertTrue(con_new) + self.assertTrue(con_new.verify()) + if "nm_route_list_current" in kwargs: + parser = network_lsr.argument_validator.ArgValidatorIPRoute( + "route[?]" + ) + s4 = con_new.get_setting(NM.SettingIP4Config) + s6 = con_new.get_setting(NM.SettingIP6Config) + s4.clear_routes() + s6.clear_routes() + for r in kwargs["nm_route_list_current"][idx]: + r = parser.validate(r) + r = NM.IPRoute.new( + r["family"], + r["network"], + r["prefix"], + r["gateway"], + r["metric"], + ) + if r.get_family() == socket.AF_INET: + s4.add_route(r) + else: + s6.add_route(r) + con_new = nmutil.connection_create( + connections, idx, connection_current=con_new + ) + self.assertTrue(con_new) + self.assertTrue(con_new.verify()) + if "nm_route_list_expected" in kwargs: + self.assert_nm_connection_routes_expected( + con_new, kwargs["nm_route_list_expected"][idx] + ) + + def do_connections_validate_ifcfg(self, input_connections, **kwargs): + mode = VALIDATE_ONE_MODE_INITSCRIPTS + connections = ARGS_CONNECTIONS.validate(input_connections) + for idx, connection in enumerate(connections): + try: + ARGS_CONNECTIONS.validate_connection_one(mode, connections, idx) + except n.ValidationError: + continue + if "type" not in connection: + continue + if connection["type"] in ["macvlan"]: + # initscripts do not support this type. Skip the test. + continue + content_current = kwargs.get("initscripts_content_current", None) + if content_current: + content_current = content_current[idx] + c = n.IfcfgUtil.ifcfg_create( + connections, idx, content_current=content_current + ) + # pprint("con[%s] = \"%s\"" % (idx, connections[idx]['name']), c) + exp = kwargs.get("initscripts_dict_expected", None) + if exp is not None: + self.assertEqual(exp[idx], c) + + def do_connections_validate( + self, expected_connections, input_connections, **kwargs + ): + connections = ARGS_CONNECTIONS.validate(input_connections) + self.assertEqual(expected_connections, connections) + self.do_connections_validate_nm(input_connections, **kwargs) + self.do_connections_validate_ifcfg(input_connections, **kwargs) + + def test_validate_str(self): + + v = network_lsr.argument_validator.ArgValidatorStr("state") + self.assertEqual("a", v.validate("a")) + self.assertValidationError(v, 1) + self.assertValidationError(v, None) + + v = network_lsr.argument_validator.ArgValidatorStr("state", required=True) + self.assertValidationError(v, None) + + def test_validate_int(self): + + v = network_lsr.argument_validator.ArgValidatorNum( + "state", default_value=None, numeric_type=float + ) + self.assertEqual(1, v.validate(1)) + self.assertEqual(1.5, v.validate(1.5)) + self.assertEqual(1.5, v.validate("1.5")) + self.assertValidationError(v, None) + self.assertValidationError(v, "1a") + + v = network_lsr.argument_validator.ArgValidatorNum("state", default_value=None) + self.assertEqual(1, v.validate(1)) + self.assertEqual(1, v.validate(1.0)) + self.assertEqual(1, v.validate("1")) + self.assertValidationError(v, None) + self.assertValidationError(v, None) + self.assertValidationError(v, 1.5) + self.assertValidationError(v, "1.5") + + v = network_lsr.argument_validator.ArgValidatorNum("state", required=True) + self.assertValidationError(v, None) + + def test_validate_bool(self): + + v = network_lsr.argument_validator.ArgValidatorBool("state") + self.assertEqual(True, v.validate("yes")) + self.assertEqual(True, v.validate("yeS")) + self.assertEqual(True, v.validate("Y")) + self.assertEqual(True, v.validate(True)) + self.assertEqual(True, v.validate("True")) + self.assertEqual(True, v.validate("1")) + self.assertEqual(True, v.validate(1)) + + self.assertEqual(False, v.validate("no")) + self.assertEqual(False, v.validate("nO")) + self.assertEqual(False, v.validate("N")) + self.assertEqual(False, v.validate(False)) + self.assertEqual(False, v.validate("False")) + self.assertEqual(False, v.validate("0")) + self.assertEqual(False, v.validate(0)) + + self.assertValidationError(v, 2) + self.assertValidationError(v, -1) + self.assertValidationError(v, "Ye") + self.assertValidationError(v, "") + self.assertValidationError(v, None) + v = network_lsr.argument_validator.ArgValidatorBool("state", required=True) + self.assertValidationError(v, None) + + def test_validate_dict(self): + + v = network_lsr.argument_validator.ArgValidatorDict( + "dict", + nested=[ + network_lsr.argument_validator.ArgValidatorNum("i", required=True), + network_lsr.argument_validator.ArgValidatorStr( + "s", required=False, default_value="s_default" + ), + network_lsr.argument_validator.ArgValidatorStr( + "l", + required=False, + default_value=network_lsr.argument_validator.ArgValidator.MISSING, + ), + ], + ) + + self.assertEqual({"i": 5, "s": "s_default"}, v.validate({"i": "5"})) + self.assertEqual( + {"i": 5, "s": "s_default", "l": "6"}, v.validate({"i": "5", "l": "6"}) + ) + self.assertValidationError(v, {"k": 1}) + + def test_validate_list(self): + + v = network_lsr.argument_validator.ArgValidatorList( + "list", nested=network_lsr.argument_validator.ArgValidatorNum("i") + ) + self.assertEqual([1, 5], v.validate(["1", 5])) + self.assertValidationError(v, [1, "s"]) + + def test_empty(self): + self.maxDiff = None + self.do_connections_validate([], []) + + def test_ethernet_two_defaults(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "ignore_errors": None, + "interface_name": "5", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + "dns": [], + "dns_search": [], + }, + "mac": None, + "master": None, + "mtu": None, + "name": "5", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": None, + "type": "ethernet", + "zone": None, + }, + { + "actions": ["present"], + "ignore_errors": None, + "name": "5", + "persistent_state": "present", + "state": None, + }, + ], + [{"name": "5", "type": "ethernet"}, {"name": "5"}], + ) + + def test_up_ethernet(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "5", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "dns": [], + "dns_search": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "master": None, + "mtu": None, + "name": "5", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + } + ], + [{"name": "5", "state": "up", "type": "ethernet"}], + ) + + def test_up_ethernet_no_autoconnect(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": False, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "5", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "dns": [], + "dns_search": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "master": None, + "mtu": None, + "name": "5", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + } + ], + [{"name": "5", "state": "up", "type": "ethernet", "autoconnect": "no"}], + initscripts_dict_expected=[ + { + "ifcfg": { + "BOOTPROTO": "dhcp", + "IPV6INIT": "yes", + "IPV6_AUTOCONF": "yes", + "NM_CONTROLLED": "no", + "ONBOOT": "no", + "TYPE": "Ethernet", + "DEVICE": "5", + }, + "keys": None, + "route": None, + "route6": None, + "rule": None, + "rule6": None, + } + ], + ) + + def test_invalid_autoconnect(self): + self.maxDiff = None + self.do_connections_check_invalid([{"name": "a", "autoconnect": True}]) + + def test_absent(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["absent"], + "ignore_errors": None, + "name": "5", + "persistent_state": "absent", + "state": None, + } + ], + [{"name": "5", "persistent_state": "absent"}], + ) + + def test_up_ethernet_mac_mtu_static_ip(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": None, + "ip": { + "dhcp4": False, + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "auto6": True, + "dns": [], + "address": [ + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.174.5", + } + ], + "route_append_only": False, + "rule_append_only": False, + "route": [], + }, + "mac": "52:54:00:44:9f:ba", + "master": None, + "mtu": 1450, + "name": "prod1", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "prod1", + "state": "up", + "type": "ethernet", + "autoconnect": "yes", + "mac": "52:54:00:44:9f:ba", + "mtu": 1450, + "ip": {"address": "192.168.174.5/24"}, + } + ], + ) + + def test_up_single_v4_dns(self): + self.maxDiff = None + # set single IPv4 DNS server + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "prod1", + "ip": { + "dhcp4": False, + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "auto6": True, + "dns": [{"address": "192.168.174.1", "family": socket.AF_INET}], + "address": [ + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.174.5", + } + ], + "route_append_only": False, + "rule_append_only": False, + "route": [], + }, + "mac": None, + "master": None, + "mtu": None, + "name": "prod1", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "prod1", + "state": "up", + "type": "ethernet", + "autoconnect": "yes", + "ip": {"address": "192.168.174.5/24", "dns": "192.168.174.1"}, + } + ], + ) + + def test_routes(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": None, + "ip": { + "dhcp4": False, + "auto6": True, + "address": [ + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.176.5", + }, + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.177.5", + }, + ], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "dns": [], + }, + "mac": "52:54:00:44:9f:ba", + "master": None, + "mtu": 1450, + "name": "prod1", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + }, + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "prod.100", + "ip": { + "dhcp4": False, + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "auto6": False, + "dns": [], + "address": [ + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.174.5", + }, + { + "prefix": 65, + "family": socket.AF_INET6, + "address": "a:b:c::6", + }, + ], + "route_append_only": False, + "rule_append_only": False, + "route": [ + { + "family": socket.AF_INET, + "network": "192.168.5.0", + "prefix": 24, + "gateway": None, + "metric": -1, + } + ], + }, + "mac": None, + "master": None, + "mtu": None, + "name": "prod.100", + "parent": "prod1", + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "vlan", + "vlan": {"id": 100}, + "wait": None, + "zone": None, + }, + ], + [ + { + "name": "prod1", + "state": "up", + "type": "ethernet", + "autoconnect": "yes", + "mac": "52:54:00:44:9f:ba", + "mtu": 1450, + "ip": {"address": "192.168.176.5/24 192.168.177.5/24"}, + }, + { + "name": "prod.100", + "state": "up", + "type": "vlan", + "parent": "prod1", + "vlan": {"id": "100"}, + "ip": { + "address": [ + "192.168.174.5/24", + {"address": "a:b:c::6", "prefix": 65}, + ], + "route_append_only": False, + "rule_append_only": False, + "route": [{"network": "192.168.5.0"}], + }, + }, + ], + ) + + def test_vlan(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": None, + "ip": { + "dhcp4": False, + "auto6": True, + "address": [ + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.176.5", + }, + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.177.5", + }, + ], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "dns": [], + }, + "mac": "52:54:00:44:9f:ba", + "master": None, + "mtu": 1450, + "name": "prod1", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + }, + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "prod.100", + "ip": { + "dhcp4": False, + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "auto6": False, + "dns": [], + "address": [ + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.174.5", + }, + { + "prefix": 65, + "family": socket.AF_INET6, + "address": "a:b:c::6", + }, + ], + "route_append_only": False, + "rule_append_only": False, + "route": [ + { + "family": socket.AF_INET, + "network": "192.168.5.0", + "prefix": 24, + "gateway": None, + "metric": -1, + } + ], + }, + "mac": None, + "master": None, + "mtu": None, + "name": "prod.100", + "parent": "prod1", + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "vlan", + "vlan": {"id": 101}, + "wait": None, + "zone": None, + }, + ], + [ + { + "name": "prod1", + "state": "up", + "type": "ethernet", + "autoconnect": "yes", + "mac": "52:54:00:44:9f:ba", + "mtu": 1450, + "ip": {"address": "192.168.176.5/24 192.168.177.5/24"}, + }, + { + "name": "prod.100", + "state": "up", + "type": "vlan", + "parent": "prod1", + "vlan_id": 101, + "ip": { + "address": [ + "192.168.174.5/24", + {"address": "a:b:c::6", "prefix": 65}, + ], + "route_append_only": False, + "rule_append_only": False, + "route": [{"network": "192.168.5.0"}], + }, + }, + ], + ) + + def test_macvlan(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "eth0", + "ip": { + "dhcp4": False, + "auto6": False, + "address": [ + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.122.3", + } + ], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "dns": [], + }, + "mac": "33:24:10:24:2f:b9", + "master": None, + "mtu": 1450, + "name": "eth0-parent", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + }, + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "veth0", + "ip": { + "dhcp4": False, + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "auto6": False, + "dns": [], + "address": [ + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.244.1", + } + ], + "route_append_only": False, + "rule_append_only": False, + "route": [ + { + "family": socket.AF_INET, + "network": "192.168.244.0", + "prefix": 24, + "gateway": None, + "metric": -1, + } + ], + }, + "mac": None, + "macvlan": {"mode": "bridge", "promiscuous": True, "tap": False}, + "master": None, + "mtu": None, + "name": "veth0.0", + "parent": "eth0-parent", + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "macvlan", + "wait": None, + "zone": None, + }, + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "veth1", + "ip": { + "dhcp4": False, + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "auto6": False, + "dns": [], + "address": [ + { + "prefix": 24, + "family": socket.AF_INET, + "address": "192.168.245.7", + } + ], + "route_append_only": False, + "rule_append_only": False, + "route": [ + { + "family": socket.AF_INET, + "network": "192.168.245.0", + "prefix": 24, + "gateway": None, + "metric": -1, + } + ], + }, + "mac": None, + "macvlan": {"mode": "passthru", "promiscuous": False, "tap": True}, + "master": None, + "mtu": None, + "name": "veth0.1", + "parent": "eth0-parent", + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "macvlan", + "wait": None, + "zone": None, + }, + ], + [ + { + "name": "eth0-parent", + "state": "up", + "type": "ethernet", + "autoconnect": "yes", + "interface_name": "eth0", + "mac": "33:24:10:24:2f:b9", + "mtu": 1450, + "ip": {"address": "192.168.122.3/24", "auto6": False}, + }, + { + "name": "veth0.0", + "state": "up", + "type": "macvlan", + "parent": "eth0-parent", + "interface_name": "veth0", + "macvlan": {"mode": "bridge", "promiscuous": True, "tap": False}, + "ip": { + "address": "192.168.244.1/24", + "auto6": False, + "route_append_only": False, + "rule_append_only": False, + "route": [{"network": "192.168.244.0"}], + }, + }, + { + "name": "veth0.1", + "state": "up", + "type": "macvlan", + "parent": "eth0-parent", + "interface_name": "veth1", + "macvlan": {"mode": "passthru", "promiscuous": False, "tap": True}, + "ip": { + "address": "192.168.245.7/24", + "auto6": False, + "route_append_only": False, + "rule_append_only": False, + "route": [{"network": "192.168.245.0"}], + }, + }, + ], + ) + + def test_bridge_no_dhcp4_auto6(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "bridge2", + "ip": { + "address": [], + "auto6": False, + "dhcp4": False, + "dhcp4_send_hostname": None, + "dns": [], + "dns_search": [], + "gateway4": None, + "gateway6": None, + "route": [], + "route_append_only": False, + "route_metric4": None, + "route_metric6": None, + "rule_append_only": False, + }, + "mac": None, + "master": None, + "mtu": None, + "name": "prod2", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "bridge", + "wait": None, + "zone": None, + }, + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "eth1", + "ip": { + "address": [], + "auto6": True, + "dhcp4": True, + "dhcp4_send_hostname": None, + "dns": [], + "dns_search": [], + "gateway4": None, + "gateway6": None, + "route": [], + "route_append_only": False, + "route_metric4": None, + "route_metric6": None, + "rule_append_only": False, + }, + "mac": None, + "master": "prod2", + "mtu": None, + "name": "prod2-slave1", + "parent": None, + "persistent_state": "present", + "slave_type": "bridge", + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + }, + ], + [ + { + "name": "prod2", + "state": "up", + "type": "bridge", + "interface_name": "bridge2", + "ip": {"dhcp4": False, "auto6": False}, + }, + { + "name": "prod2-slave1", + "state": "up", + "type": "ethernet", + "interface_name": "eth1", + "master": "prod2", + }, + ], + ) + + def test_bond(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "bond": {"mode": "balance-rr", "miimon": None}, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "bond1", + "ip": { + "dhcp4": True, + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "auto6": True, + "dns": [], + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + }, + "mac": None, + "master": None, + "mtu": None, + "name": "bond1", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "bond", + "wait": None, + "zone": None, + } + ], + [{"name": "bond1", "state": "up", "type": "bond"}], + ) + + def test_bond_active_backup(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "bond": {"mode": "active-backup", "miimon": None}, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "bond1", + "ip": { + "dhcp4": True, + "route_metric6": None, + "route_metric4": None, + "dns_search": [], + "dhcp4_send_hostname": None, + "gateway6": None, + "gateway4": None, + "auto6": True, + "dns": [], + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + }, + "mac": None, + "master": None, + "mtu": None, + "name": "bond1", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "bond", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "bond1", + "state": "up", + "type": "bond", + "bond": {"mode": "active-backup"}, + } + ], + ) + + def test_invalid_values(self): + self.maxDiff = None + self.do_connections_check_invalid([{}]) + self.do_connections_check_invalid([{"name": "b", "xxx": 5}]) + + def test_ethernet_mac_address(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "ignore_errors": None, + "interface_name": None, + "ip": { + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "auto6": True, + "dhcp4": True, + "dhcp4_send_hostname": None, + "gateway4": None, + "gateway6": None, + "route_metric4": None, + "route_metric6": None, + "dns": [], + "dns_search": [], + }, + "mac": "aa:bb:cc:dd:ee:ff", + "master": None, + "mtu": None, + "name": "5", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": None, + "type": "ethernet", + "zone": None, + } + ], + [{"name": "5", "type": "ethernet", "mac": "AA:bb:cC:DD:ee:FF"}], + ) + + def test_ethernet_speed_settings(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": {"autoneg": False, "duplex": "half", "speed": 400}, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "5", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "dns": [], + "dns_search": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "master": None, + "mtu": None, + "name": "5", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "5", + "state": "up", + "type": "ethernet", + "ip": {}, + "ethernet": {"duplex": "half", "speed": 400}, + } + ], + initscripts_dict_expected=[ + { + "ifcfg": { + "BOOTPROTO": "dhcp", + "ETHTOOL_OPTS": "autoneg off speed 400 duplex half", + "IPV6INIT": "yes", + "IPV6_AUTOCONF": "yes", + "NM_CONTROLLED": "no", + "ONBOOT": "yes", + "TYPE": "Ethernet", + "DEVICE": "5", + }, + "keys": None, + "route": None, + "route6": None, + "rule": None, + "rule6": None, + } + ], + ) + + def test_bridge2(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "6643-master", + "ip": { + "address": [], + "auto6": True, + "dhcp4": True, + "dhcp4_send_hostname": None, + "dns": [], + "dns_search": [], + "gateway4": None, + "gateway6": None, + "route": [], + "route_append_only": False, + "route_metric4": None, + "route_metric6": None, + "rule_append_only": False, + }, + "mac": None, + "master": None, + "mtu": None, + "name": "6643-master", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "bridge", + "wait": None, + "zone": None, + }, + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "6643", + "ip": { + "address": [], + "auto6": True, + "dhcp4_send_hostname": None, + "dhcp4": True, + "dns": [], + "dns_search": [], + "gateway4": None, + "gateway6": None, + "route": [], + "route_append_only": False, + "route_metric4": None, + "route_metric6": None, + "rule_append_only": False, + }, + "mac": None, + "master": "6643-master", + "mtu": None, + "name": "6643", + "parent": None, + "persistent_state": "present", + "slave_type": "bridge", + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + }, + ], + [ + {"name": "6643-master", "state": "up", "type": "bridge"}, + { + "name": "6643", + "state": "up", + "type": "ethernet", + "master": "6643-master", + }, + ], + ) + + def test_infiniband(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "infiniband": {"p_key": -1, "transport_mode": "datagram"}, + "interface_name": None, + "ip": { + "address": [], + "auto6": True, + "dhcp4": True, + "dhcp4_send_hostname": None, + "dns": [], + "dns_search": [], + "gateway4": None, + "gateway6": None, + "route": [], + "route_append_only": False, + "route_metric4": None, + "route_metric6": None, + "rule_append_only": False, + }, + "mac": None, + "master": None, + "mtu": None, + "name": "infiniband.1", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "infiniband", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "infiniband.1", + "interface_name": "", + "state": "up", + "type": "infiniband", + } + ], + initscripts_dict_expected=[ + { + "ifcfg": { + "BOOTPROTO": "dhcp", + "CONNECTED_MODE": "no", + "IPV6INIT": "yes", + "IPV6_AUTOCONF": "yes", + "NM_CONTROLLED": "no", + "ONBOOT": "yes", + "TYPE": "InfiniBand", + }, + "keys": None, + "route": None, + "route6": None, + "rule": None, + "rule6": None, + } + ], + ) + + def test_infiniband2(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "infiniband": {"p_key": 5, "transport_mode": "datagram"}, + "interface_name": None, + "ip": { + "address": [], + "auto6": True, + "dhcp4": True, + "dhcp4_send_hostname": None, + "dns": [], + "dns_search": [], + "gateway4": None, + "gateway6": None, + "route": [], + "route_append_only": False, + "route_metric4": None, + "route_metric6": None, + "rule_append_only": False, + }, + "mac": "11:22:33:44:55:66:77:88:99:00:" + "11:22:33:44:55:66:77:88:99:00", + "master": None, + "mtu": None, + "name": "infiniband.2", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "infiniband", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "infiniband.2", + "state": "up", + "type": "infiniband", + "mac": "11:22:33:44:55:66:77:88:99:00:" + "11:22:33:44:55:66:77:88:99:00", + "infiniband_p_key": 5, + } + ], + initscripts_dict_expected=[ + { + "ifcfg": { + "BOOTPROTO": "dhcp", + "CONNECTED_MODE": "no", + "HWADDR": "11:22:33:44:55:66:77:88:99:00:" + "11:22:33:44:55:66:77:88:99:00", + "IPV6INIT": "yes", + "IPV6_AUTOCONF": "yes", + "NM_CONTROLLED": "no", + "ONBOOT": "yes", + "PKEY": "yes", + "PKEY_ID": "5", + "TYPE": "InfiniBand", + }, + "keys": None, + "route": None, + "route6": None, + "rule": None, + "rule6": None, + } + ], + ) + + def test_route_metric_prefix(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "555", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [ + { + "family": socket.AF_INET, + "network": "192.168.45.0", + "prefix": 24, + "gateway": None, + "metric": 545, + }, + { + "family": socket.AF_INET, + "network": "192.168.46.0", + "prefix": 30, + "gateway": None, + "metric": -1, + }, + ], + "dns": [], + "dns_search": ["aa", "bb"], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "master": None, + "mtu": None, + "name": "555", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "555", + "state": "up", + "type": "ethernet", + "ip": { + "dns_search": ["aa", "bb"], + "route": [ + {"network": "192.168.45.0", "metric": 545}, + {"network": "192.168.46.0", "prefix": 30}, + ], + }, + } + ], + initscripts_dict_expected=[ + { + "ifcfg": { + "BOOTPROTO": "dhcp", + "DOMAIN": "aa bb", + "IPV6INIT": "yes", + "IPV6_AUTOCONF": "yes", + "NM_CONTROLLED": "no", + "ONBOOT": "yes", + "TYPE": "Ethernet", + "DEVICE": "555", + }, + "keys": None, + "route": "192.168.45.0/24 metric 545\n192.168.46.0/30\n", + "route6": None, + "rule": None, + "rule6": None, + } + ], + ) + + def test_route_v6(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "e556", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "dhcp4": True, + "address": [], + "route_append_only": True, + "rule_append_only": False, + "route": [ + { + "family": socket.AF_INET, + "network": "192.168.45.0", + "prefix": 24, + "gateway": None, + "metric": 545, + }, + { + "family": socket.AF_INET, + "network": "192.168.46.0", + "prefix": 30, + "gateway": None, + "metric": -1, + }, + { + "family": socket.AF_INET6, + "network": "a:b:c:d::", + "prefix": 64, + "gateway": None, + "metric": -1, + }, + ], + "dns": [], + "dns_search": ["aa", "bb"], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "master": None, + "mtu": None, + "name": "e556", + "parent": None, + "persistent_state": "present", + "slave_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": "external", + } + ], + [ + { + "name": "e556", + "state": "up", + "type": "ethernet", + "zone": "external", + "ip": { + "dns_search": ["aa", "bb"], + "route_append_only": True, + "rule_append_only": False, + "route": [ + {"network": "192.168.45.0", "metric": 545}, + {"network": "192.168.46.0", "prefix": 30}, + {"network": "a:b:c:d::"}, + ], + }, + } + ], + nm_route_list_current=[ + [ + {"network": "192.168.40.0", "prefix": 24, "metric": 545}, + {"network": "192.168.46.0", "prefix": 30}, + {"network": "a:b:c:f::"}, + ] + ], + nm_route_list_expected=[ + [ + {"network": "192.168.40.0", "prefix": 24, "metric": 545}, + {"network": "192.168.46.0", "prefix": 30}, + {"network": "192.168.45.0", "prefix": 24, "metric": 545}, + {"network": "a:b:c:f::"}, + {"network": "a:b:c:d::"}, + ] + ], + initscripts_content_current=[ + { + "ifcfg": "", + "keys": None, + "route": "192.168.40.0/24 metric 545\n192.168.46.0/30", + "route6": "a:b:c:f::/64", + "rule": None, + "rule6": None, + } + ], + initscripts_dict_expected=[ + { + "ifcfg": { + "BOOTPROTO": "dhcp", + "DOMAIN": "aa bb", + "IPV6INIT": "yes", + "IPV6_AUTOCONF": "yes", + "NM_CONTROLLED": "no", + "ONBOOT": "yes", + "TYPE": "Ethernet", + "ZONE": "external", + "DEVICE": "e556", + }, + "keys": None, + "route": "192.168.40.0/24 metric 545\n192.168.46.0/30\n" + "192.168.45.0/24 metric 545\n", + "route6": "a:b:c:f::/64\na:b:c:d::/64\n", + "rule": None, + "rule6": None, + } + ], + ) + + def test_invalid_mac(self): + self.maxDiff = None + self.do_connections_check_invalid( + [{"name": "b", "type": "ethernet", "mac": "aa:b"}] + ) + + def test_interface_name_ethernet_default(self): + """ Use the profile name as interface_name for ethernet profiles """ + cons_without_interface_name = [{"name": "eth0", "type": "ethernet"}] + connections = ARGS_CONNECTIONS.validate(cons_without_interface_name) + self.assertTrue(connections[0]["interface_name"] == "eth0") + + def test_interface_name_ethernet_mac(self): + """ Do not set interface_name when mac is specified """ + cons_without_interface_name = [ + {"name": "eth0", "type": "ethernet", "mac": "3b:0b:88:16:6d:1a"} + ] + connections = ARGS_CONNECTIONS.validate(cons_without_interface_name) + self.assertTrue(connections[0]["interface_name"] is None) + + def test_interface_name_ethernet_empty(self): + """ Allow not to restrict the profile to an interface """ + network_connections = [ + {"name": "internal_network", "type": "ethernet", "interface_name": ""} + ] + connections = ARGS_CONNECTIONS.validate(network_connections) + + self.assertTrue(connections[0]["interface_name"] is None) + + def test_interface_name_ethernet_None(self): + """ Check that interface_name cannot be None """ + network_connections = [ + {"name": "internal_network", "type": "ethernet", "interface_name": None} + ] + self.assertRaises( + n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ) + + def test_interface_name_ethernet_explicit(self): + """ Use the explicitly provided interface name """ + network_connections = [ + {"name": "internal", "type": "ethernet", "interface_name": "eth0"} + ] + connections = ARGS_CONNECTIONS.validate(network_connections) + self.assertEqual(connections[0]["interface_name"], "eth0") + + def test_interface_name_ethernet_invalid_profile(self): + """ Require explicit interface_name when the profile name is not a + valid interface_name """ + network_connections = [{"name": "internal:main", "type": "ethernet"}] + self.assertRaises( + n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ) + network_connections = [ + {"name": "internal:main", "type": "ethernet", "interface_name": "eth0"} + ] + connections = ARGS_CONNECTIONS.validate(network_connections) + self.assertTrue(connections[0]["interface_name"] == "eth0") + + def test_interface_name_ethernet_invalid_interface_name(self): + network_connections = [ + {"name": "internal", "type": "ethernet", "interface_name": "invalid:name"} + ] + self.assertRaises( + n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ) + + def test_interface_name_bond_empty_interface_name(self): + network_connections = [ + {"name": "internal", "type": "bond", "interface_name": "invalid:name"} + ] + self.assertRaises( + n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ) + + def test_interface_name_bond_profile_as_interface_name(self): + network_connections = [{"name": "internal", "type": "bond"}] + connections = ARGS_CONNECTIONS.validate(network_connections) + self.assertEqual(connections[0]["interface_name"], "internal") + + def check_connection(self, connection, expected): + reduced_connection = {} + for setting in expected: + reduced_connection[setting] = connection[setting] + self.assertEqual(reduced_connection, expected) + + def check_partial_connection_zero(self, network_config, expected): + connections = ARGS_CONNECTIONS.validate([network_config]) + self.check_connection(connections[0], expected) + + def check_one_connection_with_defaults( + self, network_config, expected_changed_settings + ): + self.maxDiff = None + expected = self.default_connection_settings + expected.update(expected_changed_settings) + + self.do_connections_validate([expected], [network_config]) + + def test_default_states(self): + self.check_partial_connection_zero( + {"name": "eth0"}, + {"actions": ["present"], "persistent_state": "present", "state": None}, + ) + + def test_invalid_persistent_state_up(self): + network_connections = [{"name": "internal", "persistent_state": "up"}] + self.assertRaises( + n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ) + + def test_invalid_persistent_state_down(self): + network_connections = [{"name": "internal", "persistent_state": "down"}] + self.assertRaises( + n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ) + + def test_invalid_state_test(self): + network_connections = [{"name": "internal", "state": "test"}] + self.assertRaises( + n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ) + + def test_default_states_type(self): + self.check_partial_connection_zero( + {"name": "eth0", "type": "ethernet"}, + {"actions": ["present"], "persistent_state": "present", "state": None}, + ) + + def test_persistent_state_present(self): + self.check_partial_connection_zero( + {"name": "eth0", "persistent_state": "present", "type": "ethernet"}, + {"actions": ["present"], "persistent_state": "present", "state": None}, + ) + + def test_state_present(self): + self.check_partial_connection_zero( + {"name": "eth0", "state": "present", "type": "ethernet"}, + {"actions": ["present"], "persistent_state": "present", "state": None}, + ) + + def test_state_absent(self): + self.check_partial_connection_zero( + {"name": "eth0", "state": "absent"}, + {"actions": ["absent"], "persistent_state": "absent", "state": None}, + ) + + def test_persistent_state_absent(self): + self.check_partial_connection_zero( + {"name": "eth0", "persistent_state": "absent"}, + {"actions": ["absent"], "persistent_state": "absent", "state": None}, + ) + + def test_state_present_up(self): + self.check_partial_connection_zero( + { + "name": "eth0", + "persistent_state": "present", + "state": "up", + "type": "ethernet", + }, + { + "actions": ["present", "up"], + "persistent_state": "present", + "state": "up", + }, + ) + + def test_state_present_down(self): + self.check_partial_connection_zero( + { + "name": "eth0", + "persistent_state": "present", + "state": "down", + "type": "ethernet", + }, + { + "actions": ["present", "down"], + "persistent_state": "present", + "state": "down", + }, + ) + + def test_state_absent_up_no_type(self): + self.check_partial_connection_zero( + {"name": "eth0", "persistent_state": "absent", "state": "up"}, + { + "actions": ["present", "up", "absent"], + "persistent_state": "absent", + "state": "up", + }, + ) + + def test_state_absent_up_type(self): + # if type is specified, present should happen, too + self.check_partial_connection_zero( + { + "name": "eth0", + "persistent_state": "absent", + "state": "up", + "type": "ethernet", + }, + { + "actions": ["present", "up", "absent"], + "persistent_state": "absent", + "state": "up", + }, + ) + + def test_state_absent_down(self): + # if type is specified, present should happen, too + self.check_partial_connection_zero( + {"name": "eth0", "persistent_state": "absent", "state": "down"}, + { + "actions": ["present", "down", "absent"], + "persistent_state": "absent", + "state": "down", + }, + ) + + def test_state_up_no_type(self): + self.check_partial_connection_zero( + {"name": "eth0", "state": "up"}, + { + "actions": ["present", "up"], + "persistent_state": "present", + "state": "up", + }, + ) + + def test_state_up_type(self): + self.check_partial_connection_zero( + {"name": "eth0", "state": "up", "type": "ethernet"}, + { + "actions": ["present", "up"], + "persistent_state": "present", + "state": "up", + }, + ) + + def test_state_down_no_type(self): + self.check_partial_connection_zero( + {"name": "eth0", "state": "down"}, + { + "actions": ["present", "down"], + "persistent_state": "present", + "state": "down", + }, + ) + + def test_full_state_present_no_type(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present"], + "ignore_errors": None, + "name": "eth0", + "state": None, + "persistent_state": "present", + } + ], + [{"name": "eth0", "persistent_state": "present"}], + ) + + def test_full_state_present_type_defaults(self): + self.check_one_connection_with_defaults( + {"name": "eth0", "type": "ethernet", "persistent_state": "present"}, + { + "actions": ["present"], + "interface_name": "eth0", + "name": "eth0", + "persistent_state": "present", + "state": None, + "type": "ethernet", + }, + ) + + def test_full_state_absent_no_type(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["absent"], + "ignore_errors": None, + "name": "eth0", + "state": None, + "persistent_state": "absent", + } + ], + [{"name": "eth0", "persistent_state": "absent"}], + ) + + def test_full_state_absent_defaults(self): + self.maxDiff = None + self.check_one_connection_with_defaults( + {"name": "eth0", "persistent_state": "absent", "type": "ethernet"}, + { + "actions": ["absent"], + "ignore_errors": None, + "name": "eth0", + "state": None, + "persistent_state": "absent", + "type": "ethernet", + "interface_name": "eth0", + }, + ) + + +@my_test_skipIf(nmutil is None, "no support for NM (libnm via pygobject)") +class TestNM(unittest.TestCase): + def test_connection_ensure_setting(self): + con = NM.SimpleConnection.new() + self.assertIsNotNone(con) + self.assertTrue(GObject.type_is_a(con, NM.Connection)) + + s = nmutil.connection_ensure_setting(con, NM.SettingWired) + self.assertIsNotNone(s) + self.assertTrue(GObject.type_is_a(s, NM.SettingWired)) + + s2 = nmutil.connection_ensure_setting(con, NM.SettingWired) + self.assertIsNotNone(s2) + self.assertIs(s, s2) + self.assertTrue(GObject.type_is_a(s, NM.SettingWired)) + + def test_connection_list(self): + connections = nmutil.connection_list() + self.assertIsNotNone(connections) + + +class TestUtils(unittest.TestCase): + def test_check_output(self): + res = Util.check_output(["echo", "test"]) + self.assertEqual(res, "test\n") + self.assertRaises(n.MyError, Util.check_output, ["false"]) + + +class TestSysUtils(unittest.TestCase): + def test_link_read_permaddress(self): + # Manipulate PATH to use ethtool mock script to avoid hard dependency on + # ethtool + os.environ["PATH"] = TESTS_BASEDIR + "/helpers:" + os.environ["PATH"] + self.assertEqual(SysUtil._link_read_permaddress("lo"), "23:00:00:00:00:00") + + +if __name__ == "__main__": + unittest.main() diff --git a/roles/linux-system-roles.network/tests/unit/test_nm_provider.py b/roles/linux-system-roles.network/tests/unit/test_nm_provider.py new file mode 100644 index 0000000..0a2679a --- /dev/null +++ b/roles/linux-system-roles.network/tests/unit/test_nm_provider.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +""" Tests for network_connections Ansible module """ +# SPDX-License-Identifier: BSD-3-Clause + +import os +import sys + +TESTS_BASEDIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(1, os.path.join(TESTS_BASEDIR, "../..", "library")) +sys.path.insert(1, os.path.join(TESTS_BASEDIR, "../..", "module_utils")) + +try: + from unittest import mock +except ImportError: # py2 + import mock + +sys.modules["ansible"] = mock.Mock() +sys.modules["ansible.module_utils.basic"] = mock.Mock() +sys.modules["ansible.module_utils"] = mock.Mock() +sys.modules["ansible.module_utils.network_lsr"] = __import__("network_lsr") + +with mock.patch.dict("sys.modules", {"gi": mock.Mock(), "gi.repository": mock.Mock()}): + # pylint: disable=import-error, wrong-import-position + from network_lsr import nm_provider + + +def test_get_nm_ethtool_feature(): + """ Test get_nm_ethtool_feature() """ + with mock.patch.object(nm_provider.Util, "NM") as nm_mock: + nm_feature = nm_provider.get_nm_ethtool_feature("esp-hw-offload") + assert nm_feature == nm_mock.return_value.ETHTOOL_OPTNAME_FEATURE_ESP_HW_OFFLOAD diff --git a/roles/linux-system-roles.network/tox.ini b/roles/linux-system-roles.network/tox.ini new file mode 100644 index 0000000..604c295 --- /dev/null +++ b/roles/linux-system-roles.network/tox.ini @@ -0,0 +1,181 @@ +[tox] +envlist = black, flake8, pylint, py{26,27,36,37}, ensure_non_running_provider +skipsdist = true +skip_missing_interpreters = True + +[testenv] +basepython = python3 +deps = + py{26,27,36,37,38}: pytest-cov + py{27,36,37,38}: pytest>=3.5.1 + py{26,27}: mock + py26: pytest + molecule_{lint,syntax,test}: docker + molecule_{lint,syntax,test}: jmespath + molecule_{lint,syntax,test}: molecule + # The selinux pypi shim does not work with Ubuntu (as used by Travis), yet. + # Therefore use a fork with Ubuntu support. This can be changed once the + # update is available on PyPi. + # molecule_{lint,syntax,test}: selinux + molecule_{lint,syntax,test}: git+https://github.com/tyll/selinux-pypi-shim@fulllocation + +[base] +passenv = * +setenv = + PYTHONPATH = {toxinidir}/library:{toxinidir}/module_utils + LC_ALL = C +changedir = {toxinidir}/tests +covtarget = {toxinidir}/library --cov {toxinidir}/module_utils +pytesttarget = . + +[testenv:black] +deps = black + +commands = black --check --diff --include "^[^.].*\.py$" . + +[testenv:py26] +install_command = pip install {opts} {packages} +list_dependencies_command = pip freeze +basepython = python2.6 +passenv = {[base]passenv} +setenv = + {[base]setenv} +changedir = {[base]changedir} +commands = + pytest \ + --durations=5 \ + --cov={[base]covtarget} \ + --cov-report=html:htmlcov-py26 --cov-report=term \ + {posargs} \ + {[base]pytesttarget} + +[testenv:py27] +basepython = python2.7 +passenv = {[base]passenv} +setenv = + {[base]setenv} +changedir = {[base]changedir} +commands = + pytest \ + --durations=5 \ + --cov={[base]covtarget} \ + --cov-report=html:htmlcov-py27 --cov-report=term \ + {posargs} \ + {[base]pytesttarget} + +[testenv:py36] +basepython = python3.6 +passenv = {[base]passenv} +setenv = + {[base]setenv} +changedir = {[base]changedir} +commands = + pytest \ + --durations=5 \ + --cov={[base]covtarget} \ + --cov-report=html:htmlcov-py36 --cov-report=term \ + {posargs} \ + {[base]pytesttarget} + +[testenv:py37] +basepython = python3.7 +passenv = {[base]passenv} +setenv = + {[base]setenv} +changedir = {[base]changedir} +commands = + pytest \ + --durations=5 \ + --cov={[base]covtarget} \ + --cov-report=html:htmlcov-py37 --cov-report=term \ + {posargs} \ + {[base]pytesttarget} + +[testenv:py38] +passenv = {[base]passenv} +setenv = + {[base]setenv} +changedir = {[base]changedir} +basepython = python3.8 +commands = + pytest \ + --durations=5 \ + --cov={[base]covtarget} \ + --cov-report=html:htmlcov-py38 --cov-report=term \ + {posargs} \ + {[base]pytesttarget} + +[testenv:pylint] +basepython = python2.7 +setenv = + {[base]setenv} +deps = + pylint>=1.8.4 + ansible +commands = + pylint \ + --errors-only \ + {posargs} \ + library/network_connections.py \ + module_utils/network_lsr \ + tests/unit/test_network_connections.py + +[testenv:flake8] +basepython = python2.7 +deps = + flake8>=3.5 +whitelist_externals = flake8 +commands= + flake8 --statistics {posargs} \ + . + +[testenv:coveralls] +basepython = python2.7 +passenv = TRAVIS TRAVIS_* +deps = + coveralls +changedir = {[base]changedir} +commands = + coveralls + +[testenv:ensure_non_running_provider] +deps = + PyYAML +changedir = {toxinidir}/tests +commands = {toxinidir}/tests/ensure_non_running_provider.py + +[testenv:molecule_lint] +commands_pre = + molecule --version + ansible --version +commands = molecule {posargs} lint + +[testenv:molecule_syntax] +commands = molecule {posargs} syntax + +[testenv:molecule_test] +commands = molecule {posargs} test + +[pytest] +addopts = -rxs + +[flake8] +show_source = True +max-line-length = 88 +ignore = E402,W503 + +[pylint] +max-line-length = 88 +disable = wrong-import-position + +[pycodestyle] +max-line-length = 88 + +[travis] +python = + 2.6: py26 + 2.7: py27,coveralls,flake8,pylint + 3.5: molecule_lint,molecule_syntax,molecule_test + 3.6: py36,black,ensure_non_running_provider + 3.7: py37 + 3.8: py38 diff --git a/roles/oatakan.windows_ovirt_guest_agent/.travis.yml b/roles/oatakan.windows_ovirt_guest_agent/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_guest_agent/LICENSE b/roles/oatakan.windows_ovirt_guest_agent/LICENSE new file mode 100644 index 0000000..dc1b6e7 --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Orcun Atakan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/roles/oatakan.windows_ovirt_guest_agent/README.md b/roles/oatakan.windows_ovirt_guest_agent/README.md new file mode 100644 index 0000000..70effcd --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/README.md @@ -0,0 +1,41 @@ +# ansible-role-windows_ovirt_guest_agent +This repo contains an Ansible role that installs ovirt agent for Windows images. + +Role Name + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of roles that this role utilizes: + +- oatakan.windows_virtio + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - oatakan.windows_ovirt_guest_agent + +License +------- + +MIT + +Author Information +------------------ + +Orcun Atakan + diff --git a/roles/oatakan.windows_ovirt_guest_agent/defaults/main.yml b/roles/oatakan.windows_ovirt_guest_agent/defaults/main.yml new file mode 100644 index 0000000..38d5370 --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/defaults/main.yml @@ -0,0 +1,13 @@ +--- + +ovirt_package: ovirt-guest-agent +ovirt_guest_agent_service_name: ovirt-guest-agent +ovirt_win_iso_url: https://resources.ovirt.org/pub/ovirt-4.3/iso/oVirt-toolsSetup/4.3-3/el7/oVirt-toolsSetup-4.3-3.el7.iso +ovirt_win_iso_name: oVirt-toolsSetup.iso + +virtio_role: oatakan.windows_virtio + +windows_service_status_code: + 1: Stopped + 4: Running + 9: Not Installed \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_guest_agent/handlers/main.yml b/roles/oatakan.windows_ovirt_guest_agent/handlers/main.yml new file mode 100644 index 0000000..66dcf2f --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/handlers/main.yml @@ -0,0 +1,7 @@ +--- + +- name: restart Ovirt Guest Agent + service: name={{ ovirt_guest_agent_service_name }} state=restarted + +- name: enabled Ovirt Guest Agent + service: name={{ ovirt_guest_agent_service_name }} enabled=yes diff --git a/roles/oatakan.windows_ovirt_guest_agent/meta/.galaxy_install_info b/roles/oatakan.windows_ovirt_guest_agent/meta/.galaxy_install_info new file mode 100644 index 0000000..872777a --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Wed Jun 24 18:44:35 2020 +version: master diff --git a/roles/oatakan.windows_ovirt_guest_agent/meta/main.yml b/roles/oatakan.windows_ovirt_guest_agent/meta/main.yml new file mode 100644 index 0000000..c1cd130 --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/meta/main.yml @@ -0,0 +1,28 @@ +--- +galaxy_info: + author: Orcun Atakan + description: Ansible galaxy role for installing ovirt agent on Windows images. + role_name: windows_ovirt_guest_agent + company: Red Hat + + license: MIT + + min_ansible_version: 2.5 + + platforms: + - name: Windows + versions: + - all + + cloud_platforms: + - ovirt + + galaxy_tags: + - windows + - ovirt + - rhev + - rhv + - cloud + - multicloud + +dependencies: [] diff --git a/roles/oatakan.windows_ovirt_guest_agent/tasks/Linux.yml b/roles/oatakan.windows_ovirt_guest_agent/tasks/Linux.yml new file mode 100644 index 0000000..df364b1 --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/tasks/Linux.yml @@ -0,0 +1,10 @@ +--- + +- name: install Ovirt Guest Agent + become: true + package: + name: "{{ item }}" + with_items: "{{ ovirt_package }}" + notify: + - enabled Ovirt Guest Agent + - restart Ovirt Guest Agent \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_guest_agent/tasks/Windows.yml b/roles/oatakan.windows_ovirt_guest_agent/tasks/Windows.yml new file mode 100644 index 0000000..1fdc179 --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/tasks/Windows.yml @@ -0,0 +1,61 @@ +--- + +- name: "{{ ansible_distribution | lower }} | import virtio role" + import_role: + name: "{{ virtio_role }}" + vars: + virtio_win_iso_url: "{{ ovirt_win_iso_url }}" + virtio_win_iso_name: "{{ ovirt_win_iso_name }}" + virtio_win_ovirt: true + virtio_win_iso_path: '' + +- debug: + var: virtio_win_iso_path + +#- name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent" +# win_dsc: +# resource_name: Package +# Path: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe' +# ProductId: '{9B265631-958D-415B-9925-53DEEC43E31D}' +# Name: QEMU guest agent +# Arguments: > +# /S + +- name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent" + win_shell: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe /S' + args: + executable: cmd + creates: "{{ ansible_env['ProgramFiles(x86)'] }}\\oVirt Guest Tools" + async: 1 + poll: 0 + ignore_errors: yes + +- name: "{{ ansible_distribution | lower }} | wait for system to be online" + wait_for_connection: + connect_timeout: 10 + sleep: 5 + delay: 90 + timeout: 300 + +- name: "{{ ansible_distribution | lower }} | get service information" + win_shell: Get-Service OVirtGuestService | ConvertTo-Json + register: register_service_info + ignore_errors: yes + +- name: "{{ ansible_distribution | lower }} | set fact from service info" + set_fact: + ovirt_guest_agent_service_status: "{{ register_service_info.stdout | default('DEFAULT') | from_json }}" + when: register_service_info is success + ignore_errors: yes + +- name: "{{ ansible_distribution | lower }} | set fact as not installed" + set_fact: + ovirt_guest_agent_service_status: + DisplayName: 'ovirt-guest-agent' + Status: 9 + when: register_service_info is undefined + +- name: "{{ ansible_distribution | lower }} | service display status" + debug: + msg: "{{ ovirt_guest_agent_service_status['DisplayName'] }} is {{ windows_service_status_code[ovirt_guest_agent_service_status['Status']] | lower }}" + when: ovirt_guest_agent_service_status is defined \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_guest_agent/tasks/main.yml b/roles/oatakan.windows_ovirt_guest_agent/tasks/main.yml new file mode 100644 index 0000000..569774b --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/tasks/main.yml @@ -0,0 +1,7 @@ +--- + +- name: include distribution task + include_tasks: "{{ ansible_os_family }}.yml" + +- name: force all notified handlers to run here + meta: flush_handlers \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_guest_agent/tests/inventory b/roles/oatakan.windows_ovirt_guest_agent/tests/inventory new file mode 100644 index 0000000..d18580b --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/tests/inventory @@ -0,0 +1 @@ +localhost \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_guest_agent/tests/test.yml b/roles/oatakan.windows_ovirt_guest_agent/tests/test.yml new file mode 100644 index 0000000..d17df2e --- /dev/null +++ b/roles/oatakan.windows_ovirt_guest_agent/tests/test.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + remote_user: Administrator + vars: + ansible_port: 5986 + ansible_connection: winrm + ansible_winrm_transport: credssp + ansible_winrm_server_cert_validation: ignore + roles: + - oatakan.windows_ovirt_guest_agent \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/.travis.yml b/roles/oatakan.windows_ovirt_template/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/LICENSE b/roles/oatakan.windows_ovirt_template/LICENSE new file mode 100644 index 0000000..8a6444e --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Orcun Atakan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/README.md b/roles/oatakan.windows_ovirt_template/README.md new file mode 100644 index 0000000..f6b2110 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/README.md @@ -0,0 +1,95 @@ +# windows_ovirt_template +This repo contains an Ansible role that builds a Windows VM template from an ISO file on Ovirt/RHV. +You can run this role as a part of CI/CD pipeline for building Windows templates on Ovirt/RHV from an ISO file. + +> **_Note:_** This role is provided as an example only. Do not use this in production. You can fork/clone and add/remove steps for your environment based on your organization's security and operational requirements. + +Requirements +------------ + +You need to have the following packages installed on your ansible control machine: + +- mkisofs + +You need to enable qemu_cmdline hook on your RHV/Ovirt environment, this is required to enable attaching multiple iso files. Follow the instructions documented here: + +https://www.ovirt.org/develop/developer-guide/vdsm/hook/qemucmdline.html + +Before you can use this role, you need to make sure you have Windows install media iso file uploaded to a iso domain on your RHV/Ovirt environment. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of roles that this role utilizes, make sure to call this out in requirements.yml file under roles directory or download manually: + +- oatakan.windows_template_build + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - name: create a ovirt windows template + hosts: all + gather_facts: False + connection: local + become: no + vars: + template_force: yes #overwrite existing template with the same name + export_ovf: no # export the template to export domain upon creation + local_account_password: '' + local_administrator_password: '' + windows_distro_name: 2019_standard # this needs to be one of the standard values see 'os_short_names' var + template_vm_name: win2019_template + template_vm_root_disk_size: 30 + template_vm_guest_id: windows_2019x64 + template_vm_memory: 4096 + template_vm_efi: false # you need to install efi file to use this, false should be fine in most cases + iso_file_name: '' # name of the iso file + iso_image_index: '' # put index number here from the order inside the iso, for example 1 - standard, 2 - core etc + iso_product_key: '' + vm_ansible_port: 5986 + vm_ansible_winrm_transport: credssp + vm_upgrade_powershell: false # only needed for 2008 R2 + install_updates: false # it will take longer to build with the updates, set to true if you want the updates + + ovirt_datacenter: '' # name of the datacenter + ovirt_cluster: '' # name of the cluster + ovirt_data_domain: '' # name of the data domain + ovirt_export_domain: '' # name of the iso domain + ovirt_iso_domain: '' # this is deprecated as of 4.3 you can omit if not used + + template_vm_network_name: ovirtmgmt + template_vm_ip_address: 192.168.10.95 # static ip is required + template_vm_netmask: 255.255.255.0 + template_vm_gateway: 192.168.10.254 + template_vm_domain: example.com + template_vm_dns_servers: + - 8.8.4.4 + - 8.8.8.8 + + roles: + - oatakan.windows_ovirt_template + +For disconnected environments, you can overwrite this variable to point to a local copy of a script to enable winrm: + +**winrm_enable_script_url:** https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1 + +you can also localize virtio-win and update the virtio_iso_url variable to point to your local url: + +**virtio_iso_url:** https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.173-2/virtio-win.iso + +License +------- + +MIT + +Author Information +------------------ + +Orcun Atakan diff --git a/roles/oatakan.windows_ovirt_template/defaults/main.yml b/roles/oatakan.windows_ovirt_template/defaults/main.yml new file mode 100644 index 0000000..fa97544 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/defaults/main.yml @@ -0,0 +1,68 @@ +--- + +instance_wait_retry_limit: 300 +instance_wait_connection_timeout: 400 + +# this will remove existing template with the same name +template_force: no +template_found: no + +export_ovf: no + +enable_auto_logon: yes + +remove_vm_on_error: yes +vm_failed: no + +virtio_iso_url: https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.173-2/virtio-win.iso +winrm_enable_script_url: https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1 + +windows_build_role: oatakan.windows_template_build + +local_administrator_password: Chang3MyP@ssw0rd21 +local_account_username: ansible +local_account_password: Chang3MyP@ssw0rd21 + +windows_distro_name: 2019_standard_core +iso_file_name: 17763.253.190108-0006.rs5_release_svc_refresh_SERVER_EVAL_x64FRE_en-us.iso +windows_sysprep_template_folder: windows_server + +vm_ansible_port: 5986 +vm_ansible_winrm_transport: credssp +vm_upgrade_powershell: no + +template_vm_name: windows-2019-standard-core-auto +template_vm_root_disk_size: 30 +template_vm_root_disk_format: cow +template_vm_root_disk_interface: virtio +template_vm_memory: 4096 +template_vm_cpu: 2 +template_vm_guest_id: windows_2019x64 +template_vm_efi: no +template_vm_network_name: ovirtmgmt +template_vm_ip_address: 192.168.10.95 +template_vm_netmask: 255.255.255.0 +template_vm_gateway: 192.168.10.254 +template_vm_domain: home.ad +template_vm_dns_servers: + - 192.168.1.254 + - 8.8.8.8 + +template_convert_timeout: 900 +template_convert_seal: no +template_timezone: 'GMT Standard Time' + +ovirt_datacenter: mydatacenter +ovirt_cluster: production +ovirt_data_domain: data_domain +ovirt_export_domain: export_domain +ovirt_iso_domain: iso_domain + +os_short_names: + 2008_r2_standard: 2k8R2 + 2012_r2_standard: 2k12R2 + 2012_r2_datacenter: 2k12R2 + 2016_standard: 2k16 + 2016_standard_core: 2k16 + 2019_standard: 2k19 + 2019_standard_core: 2k19 \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/meta/.galaxy_install_info b/roles/oatakan.windows_ovirt_template/meta/.galaxy_install_info new file mode 100644 index 0000000..e85bc2e --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Wed Jun 24 18:44:33 2020 +version: master diff --git a/roles/oatakan.windows_ovirt_template/meta/main.yml b/roles/oatakan.windows_ovirt_template/meta/main.yml new file mode 100644 index 0000000..a62662a --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/meta/main.yml @@ -0,0 +1,28 @@ +--- +galaxy_info: + author: Orcun Atakan + description: Ansible galaxy role for building a Windows VM templates from an ISO file on Ovirt/RHV. + role_name: windows_ovirt_template + company: Red Hat + + license: MIT + + min_ansible_version: 2.5 + + platforms: + - name: Windows + versions: + - all + + cloud_platforms: + - oVirt + + galaxy_tags: + - windows + - ovirt + - rhv + - cloud + - multicloud + - template + +dependencies: [] diff --git a/roles/oatakan.windows_ovirt_template/tasks/convert_to_template.yml b/roles/oatakan.windows_ovirt_template/tasks/convert_to_template.yml new file mode 100644 index 0000000..3881ce5 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/convert_to_template.yml @@ -0,0 +1,10 @@ +--- +- name: convert to template + ovirt_template: + auth: "{{ ovirt_auth }}" + name: "{{ template.name }}" + vm: "{{ template.name }}" + cluster: "{{ providers.ovirt.cluster }}" + timeout: "{{ template_convert_timeout }}" + seal: "{{ template_convert_seal }}" + when: template is defined \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/datastore_iso_remove.yml b/roles/oatakan.windows_ovirt_template/tasks/datastore_iso_remove.yml new file mode 100644 index 0000000..d22c708 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/datastore_iso_remove.yml @@ -0,0 +1,22 @@ +--- + +- block: + - name: remove iso file from data_domain + ovirt_disk: + auth: "{{ ovirt_auth }}" + name: "{{ iso_file }}" + storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}" + state: absent + rescue: + - include_tasks: wait_iso_disk_unlock_pre29.yml + when: ansible_version.full is version('2.9', '<') + + - include_tasks: wait_iso_disk_unlock.yml + when: ansible_version.full is version('2.9', '>=') + + - name: remove iso file from data_domain + ovirt_disk: + auth: "{{ ovirt_auth }}" + name: "{{ iso_file }}" + storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}" + state: absent \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/datastore_upload.yml b/roles/oatakan.windows_ovirt_template/tasks/datastore_upload.yml new file mode 100644 index 0000000..b69b2ab --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/datastore_upload.yml @@ -0,0 +1,22 @@ +--- + +- name: upload iso file to data_domain + ovirt_disk: + auth: "{{ ovirt_auth }}" + name: "{{ iso_file }}" + upload_image_path: "{{ playbook_dir }}/{{ temp_directory }}/windows_{{ windows_distro_name }}_autounattend_autogen.iso" + storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}" + size: 20MiB + wait: true + bootable: true + format: raw + content_type: iso + force: yes + register: disk_iso_file + +- name: set iso file disk id + set_fact: + ks_iso_file_disk_id: "{{ disk_iso_file.disk.id }}" + ks_iso_file_image_id: "{{ disk_iso_file.disk.image_id }}" + ovirt_datacenter_id: "{{ disk_iso_file.disk.quota.href | regex_replace('^/ovirt-engine/api/datacenters/(.*)/quotas.*$', '\\1') }}" + ovirt_datastore_id: "{{ disk_iso_file.disk.storage_domains[0].id }}" \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/export_ovf.yml b/roles/oatakan.windows_ovirt_template/tasks/export_ovf.yml new file mode 100644 index 0000000..3f3f5b2 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/export_ovf.yml @@ -0,0 +1,20 @@ +--- + +- name: export template to export domain + ovirt_template: + auth: "{{ ovirt_auth }}" + state: exported + name: "{{ template.name }}" + export_domain: "{{ providers.ovirt.export_domain }}" + cluster: "{{ providers.ovirt.cluster }}" + async: 7200 + poll: 0 + register: export_ovf_file + +- name: wait for export to complete + async_status: + jid: "{{ export_ovf_file.ansible_job_id }}" + register: ovf + until: ovf.finished + retries: "{{ instance_wait_retry_limit }}" + delay: 10 \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/main.yml b/roles/oatakan.windows_ovirt_template/tasks/main.yml new file mode 100644 index 0000000..6d54d0b --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/main.yml @@ -0,0 +1,119 @@ +--- + +- name: obtain SSO token with using username/password credentials + ovirt_auth: + url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}" + username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}" + password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}" + insecure: yes + +- include_tasks: preflight_check_pre29.yml + when: ansible_version.full is version('2.9', '<') + +- include_tasks: preflight_check.yml + when: ansible_version.full is version('2.9', '>=') + +# remove existing template +- block: + + - include_tasks: remove_template.yml + + when: + - template_force|bool + - template_found|bool + +- block: + - include_tasks: make_iso.yml + + - include_tasks: provision_vm.yml + + - name: refresh inventory + meta: refresh_inventory + + - name: clear gathered facts + meta: clear_facts + + - name: clear any host errors + meta: clear_host_errors + + - name: add host + add_host: + hostname: template_host + ansible_host: "{{ template_vm_ip_address }}" + ansible_user: "{{ unattend.local_accounts[0].name }}" + ansible_password: "{{ unattend.local_accounts[0].password }}" + ansible_port: "{{ vm_ansible_port | default('5986') }}" + ansible_connection: winrm + ansible_winrm_transport: "{{ vm_ansible_winrm_transport | default('credssp') }}" + ansible_winrm_server_cert_validation: ignore + ansible_winrm_operation_timeout_sec: 250 + ansible_winrm_read_timeout_sec: 280 + ansible_win_async_startup_timeout: 60 + + - include_role: + name: "{{ windows_build_role }}" + apply: + vars: + target_ovirt: yes + install_updates: yes + remove_apps: yes + clean_up_components: yes + upgrade_powershell: "{{ vm_upgrade_powershell | default('no') }}" + delegate_to: template_host + + - name: refresh SSO credentials + ovirt_auth: + url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}" + username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}" + password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}" + insecure: yes + + - include_tasks: stop_vm.yml + + - include_tasks: convert_to_template.yml + + - include_tasks: export_ovf.yml + when: export_ovf|bool + + rescue: + - name: refresh SSO credentials + ovirt_auth: + url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}" + username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}" + password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}" + insecure: yes + + - include_tasks: remove_template.yml + when: remove_vm_on_error|bool + + - name: set vm_failed variable + set_fact: + vm_failed: yes + + always: + - name: refresh SSO credentials + ovirt_auth: + url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}" + username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}" + password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}" + insecure: yes + + - include_tasks: remove_vm.yml + when: remove_vm_on_error|bool or (not remove_vm_on_error|bool and not vm_failed|bool) + + - include_tasks: datastore_iso_remove.yml + + - name: remove temporary directory + file: + path: "{{ temp_directory }}" + state: absent + + - name: logout from oVirt + ovirt_auth: + state: absent + ovirt_auth: "{{ ovirt_auth }}" + +- name: fail if needed + fail: + msg: "fail to create a template." + when: vm_failed|bool \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/make_iso.yml b/roles/oatakan.windows_ovirt_template/tasks/make_iso.yml new file mode 100644 index 0000000..f486e9a --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/make_iso.yml @@ -0,0 +1,30 @@ +--- +- block: + - name: create temporary directory + file: + path: "{{ temp_directory }}/ks_iso" + state: directory + + - name: create Autounattend.xml file + template: + src: "{{ windows_sysprep_template_folder }}/Autounattend.xml.j2" + dest: "{{ temp_directory }}/ks_iso/Autounattend.xml" + + - name: include virtio drivers + include_tasks: virtio_drivers.yml + + - name: create iso + command: mkisofs -V ADDISO -r -iso-level 4 -o {{ playbook_dir }}/{{ temp_directory }}/windows_{{ windows_distro_name }}_autounattend_autogen.iso . + args: + chdir: "{{ playbook_dir }}/{{ temp_directory }}/ks_iso" + + - include_tasks: datastore_upload.yml + always: + - name: remove temporary files + file: + path: "{{ temp_directory }}/{{ item }}" + state: absent + loop: + - windows_{{ windows_distro_name }}_autounattend_autogen.iso + - virtio_win.iso + - ks_iso/ \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/preflight_check.yml b/roles/oatakan.windows_ovirt_template/tasks/preflight_check.yml new file mode 100644 index 0000000..d9a12be --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/preflight_check.yml @@ -0,0 +1,73 @@ +--- + +- name: get the datacenter name + ovirt_datacenter_info: + auth: "{{ ovirt_auth }}" + pattern: "Clusters.name = {{ providers.ovirt.cluster }}" + register: datacenter_info + +- name: get storage information + ovirt_storage_domain_info: + auth: "{{ ovirt_auth }}" + pattern: "datacenter={{ datacenter_info.ovirt_datacenters[0].name }}" + register: storage_info + when: + - template_disk_storage is undefined + +- name: get data domain + set_fact: + disk_storage_domain: "{{ storage_info.ovirt_storage_domains|json_query(the_query)|list|first|default(None) }}" + when: + - template_disk_storage is undefined + vars: + the_query: "[?type=='data']" + +- name: get iso domain (deprecated as of oVirt/RHV 4.3) + set_fact: + iso_domain: "{{ storage_info.ovirt_storage_domains|json_query(the_query)|list|first|default(None) }}" + vars: + the_query: "[?type=='iso']" + +- name: check if template already exists + ovirt_template_info: + auth: "{{ ovirt_auth }}" + pattern: "name={{ template.name }} and datacenter={{ datacenter_info.ovirt_datacenters[0].name }}" + register: template_info + +- block: + - name: set template_found to yes + set_fact: + template_found: yes + + - name: fail with message + fail: + msg: "Existing template found on ovirt/rhv: {{ template.name }}" + when: not template_force|bool + when: + - template_info.ovirt_templates is defined + - template_info.ovirt_templates | length > 0 + +- name: check iso file on data domain + ovirt_disk_info: + auth: "{{ ovirt_auth }}" + pattern: "name={{ iso_file_name }}" + register: ovirt_disk_main_iso + when: iso_file_name is defined + +- debug: + msg: "{{ ovirt_disk_main_iso }}" + +- name: set file id of the iso file + set_fact: + iso_file_id: "{{ ovirt_disk_main_iso.ovirt_disks[0].id }}" + when: + - ovirt_disk_main_iso.ovirt_disks | length > 0 + - ovirt_disk_main_iso.ovirt_disks[0].id is defined + - ovirt_disk_main_iso.ovirt_disks[0].content_type == 'iso' + +- name: fail with message + fail: + msg: "iso file ({{ iso_file_name }}) could not be found on the data domain and iso domain does not exists" + when: + - iso_file_id is undefined + - iso_domain is undefined or iso_domain|length == 0 \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/preflight_check_pre29.yml b/roles/oatakan.windows_ovirt_template/tasks/preflight_check_pre29.yml new file mode 100644 index 0000000..f7a3c17 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/preflight_check_pre29.yml @@ -0,0 +1,69 @@ +--- + +- name: get the datacenter name (<2.9) + ovirt_datacenter_facts: + auth: "{{ ovirt_auth }}" + pattern: "Clusters.name = {{ providers.ovirt.cluster }}" + +- name: get storage information (<2.9) + ovirt_storage_domain_facts: + auth: "{{ ovirt_auth }}" + pattern: "datacenter={{ ovirt_datacenters[0].name }}" + when: + - template_disk_storage is undefined + +- name: get data domain (<2.9) + set_fact: + disk_storage_domain: "{{ ovirt_storage_domains|json_query(the_query)|list|first }}" + when: + - template_disk_storage is undefined + vars: + the_query: "[?type=='data']" + +- name: get iso domain (deprecated as of oVirt/RHV 4.3) (<2.9) + set_fact: + iso_domain: "{{ ovirt_storage_domains|json_query(the_query)|list|first }}" + vars: + the_query: "[?type=='iso']" + +- name: check if template already exists (<2.9) + ovirt_template_facts: + auth: "{{ ovirt_auth }}" + pattern: "name={{ template.name }} and datacenter={{ ovirt_datacenters[0].name }}" + +- block: + - name: set template_found to yes + set_fact: + template_found: yes + + - name: fail with message + fail: + msg: "Existing template found on ovirt/rhv: {{ template.name }}" + when: not template_force|bool + when: + - ovirt_templates is defined + - ovirt_templates | length > 0 + +- name: check iso file on data domain + ovirt_disk_facts: + auth: "{{ ovirt_auth }}" + pattern: "name={{ iso_file_name }}" + when: iso_file_name is defined + +- debug: + msg: "{{ ovirt_disks }}" + +- name: set file id of the iso file + set_fact: + iso_file_id: "{{ ovirt_disks[0].id }}" + when: + - ovirt_disks | length > 0 + - ovirt_disks[0].id is defined + - ovirt_disks[0].content_type == 'iso' + +- name: fail with message + fail: + msg: "iso file ({{ iso_file_name }}) could not be found on the data domain and iso domain does not exists" + when: + - iso_file_id is undefined + - iso_domain is undefined or iso_domain|length == 0 \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/provision_vm.yml b/roles/oatakan.windows_ovirt_template/tasks/provision_vm.yml new file mode 100644 index 0000000..4923002 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/provision_vm.yml @@ -0,0 +1,123 @@ +--- + +- name: provision a new vm + ovirt_vm: + auth: "{{ ovirt_auth }}" + name: "{{ template.name }}" + cluster: "{{ providers.ovirt.cluster|default('Default') }}" + state: present + wait: yes + memory: "{{ template.memory }}MiB" + cpu_sockets: "{{ template.cpu }}" + boot_devices: + - hd + - cdrom + cd_iso: "{{ template.cd_iso }}" + type: server + high_availability: true + nics: + - name: nic1 + profile_name: "{{ template.networks[0].name }}" + network: "{{ template.networks[0].name }}" + custom_properties: "{{ custom_properties | default(omit) }}" + operating_system: "{{ template_vm_guest_id | default(omit) }}" + timezone: "{{ template_timezone | default(omit) }}" + async: 7200 + poll: 0 + register: deploy + +- name: wait for instance creation to complete + async_status: jid="{{ deploy.ansible_job_id }}" + register: instance + until: instance.finished + retries: "{{ instance_wait_retry_limit }}" + delay: 10 + +- name: create a disk + ovirt_disk: + auth: "{{ ovirt_auth }}" + name: "{% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}" + vm_name: "{{ template.name }}" + size: "{{ item.size | default(omit) }}" + format: "{{ item.format | default(omit) }}" + interface: "{{ item.interface | default(omit) }}" + bootable: "{{ item.bootable | default(omit) }}" + storage_domain: "{{ item.storage_domain | default(omit) }}" + activate: yes + state: present + wait: yes + async: 7200 + poll: 0 + register: create_disks + loop: "{{ template.disks }}" + when: + - template is defined + - template.disks is defined + +- name: wait for disk creation to complete + async_status: + jid: "{{ item.ansible_job_id }}" + register: disks_creation + until: disks_creation.finished + retries: "{{ instance_wait_retry_limit }}" + delay: 10 + loop: "{{ create_disks.results }}" + when: + - template is defined + - create_disks.results is defined + - item.ansible_job_id is defined + +- include_tasks: wait_disk_unlock_pre29.yml + when: + - ansible_version.full is version('2.9', '<') + - template is defined + - template.disks is defined + - disks_creation.results is defined + +- include_tasks: wait_disk_unlock.yml + when: + - ansible_version.full is version('2.9', '>=') + - template is defined + - template.disks is defined + - disks_creation.results is defined + +- name: assign tags to provisioned vms + ovirt_tag: + name: "{{ item }}_{{ instance.item.item[item] }}" + vms: ["{{ instance.item.item.name }}"] + state: attached + loop: + - app_name + - role + when: + - template is defined + - instance is defined + - instance.vm is defined + - instance.item.item[item] is defined + +- name: start vm + ovirt_vm: + auth: "{{ ovirt_auth }}" + name: "{{ template.name }}" + cluster: "{{ providers.ovirt.cluster|default('Default') }}" + state: running + async: 7200 + poll: 0 + register: start + +- name: wait for instance creation to complete + async_status: jid="{{ start.ansible_job_id }}" + register: instance + until: instance.finished + retries: "{{ instance_wait_retry_limit }}" + delay: 10 + +- name: waiting for server to come online + wait_for: + host: "{{ template.networks[0].ip }}" + port: "{{ template.ansible_port | default(vm_ansible_port) | default(ansible_port) | default('5986') }}" + timeout: "{{ instance_wait_connection_timeout }}" + when: + - instance is changed + - template is defined + ignore_errors: yes \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/remove_template.yml b/roles/oatakan.windows_ovirt_template/tasks/remove_template.yml new file mode 100644 index 0000000..36e4eae --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/remove_template.yml @@ -0,0 +1,20 @@ +--- + +- name: remove template + ovirt_template: + auth: "{{ ovirt_auth }}" + cluster: "{{ providers.ovirt.cluster }}" + name: "{{ template.name }}" + state: absent + async: 7200 + poll: 0 + register: undeploy + when: template is defined + +- name: wait for template deletion to complete + async_status: + jid: "{{ undeploy.ansible_job_id }}" + register: instance + until: instance.finished + retries: "{{ instance_wait_retry_limit }}" + delay: 10 \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/remove_vm.yml b/roles/oatakan.windows_ovirt_template/tasks/remove_vm.yml new file mode 100644 index 0000000..d13838d --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/remove_vm.yml @@ -0,0 +1,20 @@ +--- + +- name: remove vm + ovirt_vm: + auth: "{{ ovirt_auth }}" + cluster: "{{ providers.ovirt.cluster }}" + name: "{{ template.name }}" + state: absent + async: 7200 + poll: 0 + register: undeploy + when: template is defined + +- name: wait for template deletion to complete + async_status: + jid: "{{ undeploy.ansible_job_id }}" + register: instance + until: instance.finished + retries: "{{ instance_wait_retry_limit }}" + delay: 10 \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/stop_vm.yml b/roles/oatakan.windows_ovirt_template/tasks/stop_vm.yml new file mode 100644 index 0000000..eaa5835 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/stop_vm.yml @@ -0,0 +1,35 @@ +--- + +- block: + + - name: wait for server to stop responding + wait_for: + host: "{{ template_vm_ip_address }}" + port: "{{ vm_ansible_port | default('5986') }}" + timeout: 120 + state: stopped + ignore_errors: yes + + - include_tasks: wait_vm_poweredoff_pre29.yml + when: ansible_version.full is version('2.9', '<') + + - include_tasks: wait_vm_poweredoff.yml + when: ansible_version.full is version('2.9', '>=') + + rescue: + - name: ignoring any error + debug: + msg: "ignoring error..." + +- name: reconfigure vm + ovirt_vm: + auth: "{{ ovirt_auth }}" + cluster: "{{ providers.ovirt.cluster }}" + name: "{{ template.name }}" + boot_devices: + - hd + cd_iso: "" + custom_properties: "{{ custom_properties_efi if template_vm_efi|bool else '' }}" + force: yes + state: present + when: template is defined \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/virtio_drivers.yml b/roles/oatakan.windows_ovirt_template/tasks/virtio_drivers.yml new file mode 100644 index 0000000..62f5085 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/virtio_drivers.yml @@ -0,0 +1,40 @@ +--- + +- name: download virtio win iso file + get_url: + url: "{{ virtio_iso_url }}" + dest: "{{ temp_directory }}/virtio_win.iso" + register: download_virtio_iso + until: download_virtio_iso is success + delay: 3 + retries: 5 + +- name: set list of directories to copy + set_fact: + virtio_iso_list_of_directories_to_extract: + - /viostor/{{ os_short_names[windows_distro_name] | default('2k16') }}/amd64 + - /NetKVM/{{ os_short_names[windows_distro_name] | default('2k16') }}/amd64 + +- name: get a list of files from template iso + shell: > + set -o pipefail && + isoinfo -f -R -i {{ playbook_dir }}/{{ temp_directory }}/virtio_win.iso | + grep -E "^{{ virtio_iso_list_of_directories_to_extract | join('|^') }}" + changed_when: False + register: virtio_iso_list_of_files + +- name: copy files from virtio iso to target + shell: | + set -o pipefail && + isoinfo -f -R -i {{ playbook_dir }}/{{ temp_directory }}/virtio_win.iso |\ + grep -E "^{{ virtio_iso_list_of_directories_to_extract | join('|^') }}" | while read line; do + d=$(dirname $line) + od=".${d}" + [ -f $od ] && rm -f $od + [ -d $od ] || mkdir -p $od + [ -d ".${line}" ] || isoinfo -R -i \ + {{ playbook_dir }}/{{ temp_directory }}/virtio_win.iso -x $line > ".${line}" + done + changed_when: True + args: + chdir: "{{ playbook_dir }}/{{ temp_directory }}/ks_iso" \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock.yml new file mode 100644 index 0000000..96bc7de --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock.yml @@ -0,0 +1,11 @@ +--- + +- name: wait until the image is unlocked by the oVirt engine + ovirt_disk_info: + auth: "{{ ovirt_auth }}" + pattern: "name={% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}" + register: ovirt_disk_info + until: (ovirt_disk_info.ovirt_disks is defined) and (ovirt_disk_info.ovirt_disks | length > 0) and (ovirt_disk_info.ovirt_disks[0].status != "locked") + retries: 10 + delay: 3 + loop: "{{ template.disks }}" \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock_pre29.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock_pre29.yml new file mode 100644 index 0000000..693d74c --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock_pre29.yml @@ -0,0 +1,10 @@ +--- + +- name: wait until the image is unlocked by the oVirt engine (<2.9) + ovirt_disk_facts: + auth: "{{ ovirt_auth }}" + pattern: "name={% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}" + until: (ovirt_disks is defined) and (ovirt_disks | length > 0) and (ovirt_disks[0].status != "locked") + retries: 10 + delay: 3 + loop: "{{ template.disks }}" \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock.yml new file mode 100644 index 0000000..c85807d --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock.yml @@ -0,0 +1,11 @@ +--- + +- name: wait until the disk is unlocked by the oVirt engine + ovirt_disk_info: + auth: "{{ ovirt_auth }}" + pattern: "name={{ iso_file }}" + register: ovirt_disk_info + until: (ovirt_disk_info.ovirt_disks is defined) and (ovirt_disk_info.ovirt_disks | length > 0) and (ovirt_disk_info.ovirt_disks[0].status != "locked") + retries: 10 + delay: 3 + when: iso_file is defined \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock_pre29.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock_pre29.yml new file mode 100644 index 0000000..ff7a35f --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock_pre29.yml @@ -0,0 +1,10 @@ +--- + +- name: wait until the disk is unlocked by the oVirt engine (<2.9) + ovirt_disk_facts: + auth: "{{ ovirt_auth }}" + pattern: "name={{ iso_file }}" + until: (ovirt_disks is defined) and (ovirt_disks | length > 0) and (ovirt_disks[0].status != "locked") + retries: 10 + delay: 3 + when: iso_file is defined \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff.yml new file mode 100644 index 0000000..eaba261 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff.yml @@ -0,0 +1,13 @@ +--- + +- name: wait for vm status to be poweredoff + ovirt_vm_info: + auth: "{{ ovirt_auth }}" + pattern: name={{ template.name }} and cluster={{ providers.ovirt.cluster }} + register: ovirt_vm_info_result + until: + - ovirt_vm_info_result.ovirt_vms is defined + - ovirt_vm_info_result.ovirt_vms|length > 0 + - ovirt_vm_info_result.ovirt_vms[0].status == 'down' + delay: 5 + retries: 30 \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff_pre29.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff_pre29.yml new file mode 100644 index 0000000..264fef9 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff_pre29.yml @@ -0,0 +1,12 @@ +--- + +- name: wait for vm status to be poweredoff + ovirt_vm_facts: + auth: "{{ ovirt_auth }}" + pattern: name={{ template.name }} and cluster={{ providers.ovirt.cluster }} + until: + - ovirt_vms is defined + - ovirt_vms|length > 0 + - ovirt_vms[0].status == 'down' + delay: 5 + retries: 30 \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/templates/windows_server/Autounattend.xml.j2 b/roles/oatakan.windows_ovirt_template/templates/windows_server/Autounattend.xml.j2 new file mode 100644 index 0000000..5d19485 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/templates/windows_server/Autounattend.xml.j2 @@ -0,0 +1,404 @@ + + + + + + + + E:\ + + + + + + + en-US + + en-US + en-US + en-US + en-US + en-US + + + + +{% if template_vm_efi is undefined or not template_vm_efi|bool %} + + + true + NTFS + + 1 + 1 + + + NTFS + + C + 2 + 2 + + + 0 + true + + + Primary + 1 + 350 + + + 2 + Primary + true + + +{% else %} + + + 1 + NTFS + 1 + + + + 2 + FAT32 + 2 + + + + 3 + 3 + + + NTFS + C + 4 + 4 + + + + 0 + true + + + 1 + Primary + 300 + + + 2 + EFI + 100 + + + 3 + MSR + 128 + + + 4 + Primary + true + + +{% endif %} + + + + + + + /IMAGE/INDEX + {{ iso_image_index }} + + + + 0 +{% if template_vm_efi is undefined or not template_vm_efi|bool %} + 2 +{% else %} + 4 +{% endif %} + + + + + true + Ansible + Your Org. + +{% if unattend.product_key is defined and unattend.product_key|length %} + {{ unattend.product_key }} +{% endif %} + OnError + + + + + + + 1 + + + false + false + + + + + en-US + en-US + en-US + en-US + + + + true +{% if not '2008' in windows_distro_name %} + true + true + true +{% endif %} + true + Home + 1 + + {{ settings.time_zone | default('UTC') }} + +{% if unattend.administrator_password is defined %} + + {{ unattend.administrator_password }} + true</PlainText> + </AdministratorPassword> +{% endif %} +{% if unattend.local_accounts is defined %} + <LocalAccounts> +{% for local_account in unattend.local_accounts %} + <LocalAccount wcm:action="add"> +{% if local_account.password is defined %} + <Password> + <Value>{{ local_account.password }}</Value> + <PlainText>true</PlainText> + </Password> +{% endif %} +{% if local_account.description is defined %} + <Description>{{ local_account.description }}</Description> +{% endif %} +{% if local_account.display_name is defined %} + <DisplayName>{{ local_account.display_name }}</DisplayName> +{% endif %} +{% if local_account.group is defined %} + <Group>{{ local_account.group }}</Group> +{% endif %} +{% if local_account.name is defined %} + <Name>{{ local_account.name }}</Name> +{% endif %} + </LocalAccount> +{% endfor %} + </LocalAccounts> +{% endif %} + </UserAccounts> +{% if enable_auto_logon and unattend.local_accounts and unattend.local_accounts[0].name and unattend.local_accounts[0].password %} + <AutoLogon> + <Password> + <Value>{{ unattend.local_accounts[0].password }}</Value> + <PlainText>true</PlainText> + </Password> + <Username>{{ unattend.local_accounts[0].name }}</Username> + <Enabled>true</Enabled> + </AutoLogon> +{% endif %} + <FirstLogonCommands> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c powershell -Command "Set-NetConnectionProfile -NetworkCategory Private"</CommandLine> + <Description>Set network connection profile to private</Description> + <Order>1</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c powershell -Command "Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Force"</CommandLine> + <Description>Set Execution Policy 64 Bit</Description> + <Order>2</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> +{% if '2008' in windows_distro_name %} + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c winrm quickconfig -q</CommandLine> + <Description>winrm quickconfig -q</Description> + <Order>4</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c winrm quickconfig -transport:http</CommandLine> + <Description>winrm quickconfig -transport:http</Description> + <Order>5</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c winrm set winrm/config @{MaxTimeoutms="1800000"}</CommandLine> + <Description>Win RM MaxTimoutms</Description> + <Order>6</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c winrm set winrm/config/winrs @{MaxMemoryPerShellMB="800"}</CommandLine> + <Description>Win RM MaxMemoryPerShellMB</Description> + <Order>7</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c winrm set winrm/config/service @{AllowUnencrypted="true"}</CommandLine> + <Description>Win RM AllowUnencrypted</Description> + <Order>8</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c winrm set winrm/config/service/auth @{Basic="true"}</CommandLine> + <Description>Win RM auth Basic</Description> + <Order>9</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c winrm set winrm/config/client/auth @{Basic="true"}</CommandLine> + <Description>Win RM client auth Basic</Description> + <Order>10</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c winrm set winrm/config/listener?Address=*+Transport=HTTP @{Port="5985"} </CommandLine> + <Description>Win RM listener Address/Port</Description> + <Order>11</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c netsh firewall add portopening TCP 5985 "Port 5985"</CommandLine> + <Description>Win RM port open</Description> + <Order>12</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c net stop winrm</CommandLine> + <Description>Stop Win RM Service </Description> + <Order>13</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c sc config winrm start= auto</CommandLine> + <Description>Win RM Autostart</Description> + <Order>14</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c net start winrm</CommandLine> + <Description>Start Win RM Service</Description> + <Order>15</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> +{% endif %} + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c powershell -Command "& $([scriptblock]::Create((New-Object Net.WebClient).DownloadString('{{ winrm_enable_script_url }}'))) -ForceNewSSLCert -EnableCredSSP"</CommandLine> + <Description>Enable winrm</Description> + <Order>20</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c powershell -Command "Enable-WSManCredSSP -Role Server -Force"</CommandLine> + <Description>Enable winrm server role</Description> + <Order>21</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c powershell -Command "Set-Item -Path 'WSMan:\localhost\Service\Auth\CredSSP' -Value $true"</CommandLine> + <Description>Enable credssp authentication</Description> + <Order>22</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> +{% if template.networks is defined and template.networks[0].ip is defined and template.networks[0].gateway is defined and template.networks[0].netmask is defined %} +{% if not '2008' in windows_distro_name %} + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c powershell -Command "New-NetIPAddress –IPAddress {{ template.networks[0].ip }} -DefaultGateway {{ template.networks[0].gateway }} -PrefixLength {{ (template.networks[0].ip + '/' + template.networks[0].netmask) | ipaddr('prefix') }} -InterfaceIndex (Get-NetAdapter).InterfaceIndex"</CommandLine> + <Description>Set static ip</Description> + <Order>50</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> +{% else %} + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c netsh int ipv4 set address "Local Area connection" static {{ template.networks[0].ip }} {{ template.networks[0].netmask }} {{ template.networks[0].gateway }}</CommandLine> + <Description>Set static ip</Description> + <Order>50</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> +{% endif %} +{% if template.networks[0].dns_servers is defined %} +{% if not '2008' in windows_distro_name %} + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c powershell -Command "Set-DNSClientServerAddress –InterfaceIndex (Get-NetAdapter).InterfaceIndex –ServerAddresses {{ template.networks[0].dns_servers|join(',') }}"</CommandLine> + <Description>Set static ip</Description> + <Order>51</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> +{% else %} + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c netsh int ipv4 set dns "Local Area connection" static {{ template.networks[0].dns_servers[0] }}</CommandLine> + <Description>Set static ip</Description> + <Order>51</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> +{% endif %} +{% endif %} +{% endif %} + </FirstLogonCommands> + </component> + </settings> + <settings pass="specialize"> + <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-ServerManager-SvrMgrNc" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> + <DoNotOpenServerManagerAtLogon>true</DoNotOpenServerManagerAtLogon> + </component> + <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-IE-ESC" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> + <IEHardenAdmin>false</IEHardenAdmin> + <IEHardenUser>false</IEHardenUser> + </component> + <component name="Microsoft-Windows-IE-InternetExplorer" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS" xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> + <SearchScopes> + <Scope wcm:action="add"> + <ScopeDefault>true</ScopeDefault> + <ScopeDisplayName>Google</ScopeDisplayName> + <ScopeKey>Google</ScopeKey> + <ScopeUrl>http://www.google.com/search?q={searchTerms}</ScopeUrl> + </Scope> + </SearchScopes> + <DisableAccelerators>true</DisableAccelerators> + <DisableFirstRunWizard>true</DisableFirstRunWizard> + <Home_Page>about:blank</Home_Page> + </component> + <component name="Microsoft-Windows-TerminalServices-LocalSessionManager" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS" xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> + <fDenyTSConnections>false</fDenyTSConnections> + </component> + <component name="Microsoft-Windows-TerminalServices-RDP-WinStationExtensions" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS" xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> + <UserAuthentication>0</UserAuthentication> + </component> + <component name="Networking-MPSSVC-Svc" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS" xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> + <FirewallGroups> + <FirewallGroup wcm:action="add" wcm:keyValue="RemoteDesktop"> + <Active>true</Active> + <Group>Remote Desktop</Group> + <Profile>all</Profile> + </FirewallGroup> + </FirewallGroups> + </component> + <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-OutOfBoxExperience" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> + <DoNotOpenInitialConfigurationTasksAtLogon>true</DoNotOpenInitialConfigurationTasksAtLogon> + </component> + <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-Security-SPP-UX" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> + <SkipAutoActivation>{{ settings.skip_auto_activation | default('true') }}</SkipAutoActivation> + </component> + </settings> + <settings pass="offlineServicing"> + <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-LUA-Settings" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> + <EnableLUA>false</EnableLUA> + </component> + </settings> +</unattend> \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tests/inventory b/roles/oatakan.windows_ovirt_template/tests/inventory new file mode 100644 index 0000000..d18580b --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tests/inventory @@ -0,0 +1 @@ +localhost \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tests/test.yml b/roles/oatakan.windows_ovirt_template/tests/test.yml new file mode 100644 index 0000000..2c0141f --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/tests/test.yml @@ -0,0 +1,7 @@ +--- +- hosts: localhost + gather_facts: False + connection: local + become: no + roles: + - ../. \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/vars/main.yml b/roles/oatakan.windows_ovirt_template/vars/main.yml new file mode 100644 index 0000000..e50e6a2 --- /dev/null +++ b/roles/oatakan.windows_ovirt_template/vars/main.yml @@ -0,0 +1,71 @@ +--- + +temp_directory: tmp{{ awx_job_id | default('') }} + +iso_file: "windows_{{ windows_distro_name }}_autounattend{{ awx_job_id | default('') }}.iso" + +export_dir: "{{ playbook_dir }}/{{ temp_directory }}" + +unattend: + administrator_password: "{{ local_administrator_password }}" + local_accounts: + - name: "{{ local_account_username }}" + display_name: "{{ local_account_username }}" + description: "{{ local_account_username }} user" + group: Administrators + password: "{{ local_account_password }}" + settings: + computer_name: wintemp + time_zone: UTC + skip_auto_activation: true + product_key: "{{ iso_product_key | default('') }}" + +providers: + ovirt: + datacenter: "{{ ovirt_datacenter }}" + cluster: "{{ ovirt_cluster }}" + data_domain: "{{ ovirt_data_domain }}" + export_domain: "{{ ovirt_export_domain }}" + iso_domain: "{{ ovirt_iso_domain }}" + +template: + name: "{{ template_vm_name }}" + role: windows_template + app_name: windows_template_generate + domain: "{{ template_vm_domain }}" + disks: + - name: "{{ template_vm_name }}" + size: "{{ template_vm_root_disk_size }}GiB" + format: "{{ template_vm_root_disk_format }}" + interface: "{{ template_vm_root_disk_interface | default('virtio') }}" + bootable: yes + storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}" + memory: "{{ template_vm_memory }}" + cpu: "{{ template_vm_cpu }}" + networks: + - name: "{{ template_vm_network_name }}" + ip: "{{ template_vm_ip_address }}" + netmask: "{{ template_vm_netmask }}" + gateway: "{{ template_vm_gateway }}" + domain: "{{ template_vm_domain }}" + device_type: e1000 + dns_servers: "{{ template_vm_dns_servers }}" + cd_iso: "{{ iso_file_id | default(iso_file_name) }}" # if using data domain, file name does not work, need to use id + +qemu_cmdline_second_iso: + - -device + - ide-cd,bus=ide.3,unit=0,drive=drive-ua-0001,id=ua-0001,bootindex=3 + - -drive + - format=raw,if=none,id=drive-ua-0001,werror=report,rerror=report,readonly=on,file=/rhev/data-center/{{ ovirt_datacenter_id }}/{{ ovirt_datastore_id }}/images/{{ ks_iso_file_disk_id }}/{{ ks_iso_file_image_id }} + +qemu_cmdline_efi: + - -drive + - if=pflash,format=raw,readonly,file=/usr/share/edk2.git/ovmf-x64/OVMF_CODE-pure-efi.fd + +custom_properties: + - name: qemu_cmdline + value: "{{ ((qemu_cmdline_second_iso + qemu_cmdline_efi) | to_json) if template_vm_efi|bool else (qemu_cmdline_second_iso | to_json) }}" + +custom_properties_efi: + - name: qemu_cmdline + value: "{{ qemu_cmdline_efi | to_json }}" \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/.travis.yml b/roles/oatakan.windows_template_build/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/roles/oatakan.windows_template_build/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/LICENSE b/roles/oatakan.windows_template_build/LICENSE new file mode 100644 index 0000000..dc1b6e7 --- /dev/null +++ b/roles/oatakan.windows_template_build/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Orcun Atakan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/roles/oatakan.windows_template_build/README.md b/roles/oatakan.windows_template_build/README.md new file mode 100644 index 0000000..e4c3094 --- /dev/null +++ b/roles/oatakan.windows_template_build/README.md @@ -0,0 +1,46 @@ +# windows_template_build +This repo contains an Ansible role that build a Windows template on any cloud platform(ovirt/rhev, VMware, EC2, Azure etc.) +You can run this role as part of VMware template build role or packer role as part of CI/CD pipeline for building Windows templates. + +> **_Note:_** This role is provided as an example only. Do not use this in production. You can fork/clone and add/remove steps for your environment based on your organization's security and operational requirements. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of roles that this role utilizes: + +- oatakan.windows_ec2_ena_driver +- oatakan.windows_ovirt_guest_agent +- oatakan.windows_virtio +- oatakan.windows_vmware_tools +- oatakan.windows_virtualbox_guest_additions + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - oatakan.windows_template_build + +License +------- + +MIT + +Author Information +------------------ + +Orcun Atakan + diff --git a/roles/oatakan.windows_template_build/defaults/main.yml b/roles/oatakan.windows_template_build/defaults/main.yml new file mode 100644 index 0000000..17c9136 --- /dev/null +++ b/roles/oatakan.windows_template_build/defaults/main.yml @@ -0,0 +1,83 @@ +--- + +install_updates: yes +remove_apps: no +clean_up_components: yes +upgrade_powershell: no +powershell_target_version: 4.0 +temp_directory: "{{ ansible_env.TEMP }}" +update_retry_limit: 10 + +powershell_script_url: https://raw.githubusercontent.com/jborean93/ansible-windows/master/scripts/Upgrade-PowerShell.ps1 +powershell_upgrade_script_file: 'C:\Upgrade-PowerShell.ps1' + +enable_tlsv12_hotfix_download_location: "{{ ansible_env.TEMP }}" +enable_tlsv12_hotfix: + kb: KB3080079 + file: Windows6.1-KB3080079-x64.msu + url: https://download.microsoft.com/download/F/4/1/F4154AD2-2119-48B4-BF99-CC15F68E110D/Windows6.1-KB3080079-x64.msu + +#sdelete_download_url: http://web.archive.org/web/20140902022253/http://download.sysinternals.com/files/SDelete.zip +bleachbit_download_url: https://download.bleachbit.org/BleachBit-2.2-portable.zip +sdelete_download_url: https://download.sysinternals.com/files/SDelete.zip +#ultradefrag_download_url: http://downloads.sourceforge.net/project/ultradefrag/stable-release/6.1.0/ultradefrag-portable-6.1.0.bin.amd64.zip +ultradefrag_download_url: https://astuteinternet.dl.sourceforge.net/project/ultradefrag/stable-release/7.1.3/ultradefrag-portable-7.1.3.bin.amd64.zip + +enable_auto_logon: yes + +target_ovirt: no +target_qemu: no +target_ec2: no +target_vagrant: no + +bleachbit_clean: yes +bleachbit_free_disk_space: yes + +ec2_ena_driver_role: oatakan.windows_ec2_ena_driver +ovirt_guest_agent_role: oatakan.windows_ovirt_guest_agent +virtio_role: oatakan.windows_virtio +vmware_tools_role: oatakan.windows_vmware_tools +virtualbox_guest_additions_role: oatakan.windows_virtualbox_guest_additions + +policy: + allow_unauthenticated_guest_access: no + +local_administrator_password: Chang3MyP@ssw0rd21 +local_account_username: ansible +local_account_password: Chang3MyP@ssw0rd21 + +shutdown_instance: yes + +winsxs_cleanmgr_file: + 2008r2: '{{ ansible_env.windir }}\winsxs\amd64_microsoft-windows-cleanmgr_31bf3856ad364e35_6.1.7600.16385_none_c9392808773cd7da\cleanmgr.exe' + 2012: '{{ ansible_env.windir }}\WinSxS\amd64_microsoft-windows-cleanmgr_31bf3856ad364e35_6.2.9200.16384_none_c60dddc5e750072a\cleanmgr.exe' +winsxs_cleanmgr_mui_file: + 2008r2: '{{ ansible_env.windir }}\winsxs\amd64_microsoft-windows-cleanmgr.resources_31bf3856ad364e35_6.1.7600.16385_en-us_b9cb6194b257cc63\cleanmgr.exe.mui' + 2012: '{{ ansible_env.windir }}\WinSxS\amd64_microsoft-windows-cleanmgr.resources_31bf3856ad364e35_6.2.9200.16384_en-us_b6a01752226afbb3\cleanmgr.exe.mui' + +cleanup_registry_keys: + - Active Setup Temp Folders + - BranchCache + - Downloaded Program Files + - Internet Cache Files + - Memory Dump Files + - Old ChkDsk Files + - Previous Installations + - Recycle Bin + - Service Pack Cleanup + - Setup Log Files + - System error memory dump files + - System error minidump files + - Temporary Files + - Temporary Setup Files + - Thumbnail Cache + - Update Cleanup + - Upgrade Discarded Files + - User file versions + - Windows Defender + - Windows Error Reporting Archive Files + - Windows Error Reporting Queue Files + - Windows Error Reporting System Archive Files + - Windows Error Reporting System Queue Files + - Windows ESD installation files + - Windows Upgrade Log Files \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/files/RemoveUserApps.ps1 b/roles/oatakan.windows_template_build/files/RemoveUserApps.ps1 new file mode 100755 index 0000000..8ab3572 --- /dev/null +++ b/roles/oatakan.windows_template_build/files/RemoveUserApps.ps1 @@ -0,0 +1,49 @@ + +function Get-LogDir +{ + try + { + $ts = New-Object -ComObject Microsoft.SMS.TSEnvironment -ErrorAction Stop + if ($ts.Value("LogPath") -ne "") + { + $logDir = $ts.Value("LogPath") + } + else + { + $logDir = $ts.Value("_SMSTSLogPath") + } + } + catch + { + $logDir = $env:TEMP + } + return $logDir +} + + +$logDir = Get-LogDir +Start-Transcript "$logDir\RemoveUserApps.log" + +# Get the list of provisioned packages +$provisioned = Get-AppxProvisionedPackage -online + +# Check each installed app +$count = 0 + +for ($i=1; $i -ile 2; $i++) { + # Check each app (two loops just in case there are dependencies that can't be removed until the + # main app is removed) + Get-AppxPackage | ? {$_.SignatureKind -ne 'System'} | ForEach-Object { + $current = $_ + $found = $provisioned | ? {$_.DisplayName -eq $current.Name -and $_.Version -eq $current.Version} + if ($found.Count -eq 0) + { + Write-Host "$($current.Name) version $($current.Version) is not provisioned, removing." + Remove-AppxPackage -Package $current.PackageFullName + $count++ + } + } +} +Write-Host "Number of apps removed: $count" + +Stop-Transcript diff --git a/roles/oatakan.windows_template_build/files/win-updates.ps1 b/roles/oatakan.windows_template_build/files/win-updates.ps1 new file mode 100644 index 0000000..9fbdb15 --- /dev/null +++ b/roles/oatakan.windows_template_build/files/win-updates.ps1 @@ -0,0 +1,229 @@ +param($global:RestartRequired=0, + $global:MoreUpdates=0, + $global:MaxCycles=5, + $MaxUpdatesPerCycle=500) + +$Logfile = "C:\Windows\Temp\win-updates.log" + +function LogWrite { + Param ([string]$logstring) + $now = Get-Date -format s + Add-Content $Logfile -value "$now $logstring" + Write-Host $logstring +} + +function Check-ContinueRestartOrEnd() { + $RegistryKey = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update" + $RegistryEntry = "CustomRebootRequired" + switch ($global:RestartRequired) { + 0 { + $prop = (Get-ItemProperty $RegistryKey).$RegistryEntry + if ($prop) { + LogWrite "Restart Registry Entry Exists - Removing It" + Remove-ItemProperty -Path $RegistryKey -Name $RegistryEntry -ErrorAction SilentlyContinue + } + + LogWrite "No Restart Required" + Check-WindowsUpdates + + if (($global:MoreUpdates -eq 1) -and ($script:Cycles -le $global:MaxCycles)) { + Install-WindowsUpdates + } elseif ($script:Cycles -gt $global:MaxCycles) { + LogWrite "Exceeded Cycle Count - Stopping" + } else { + LogWrite "Done Installing Windows Updates" + } + } + 1 { + $prop = (Get-ItemProperty $RegistryKey).$RegistryEntry + if (-not $prop) { + LogWrite "Restart Registry Entry Does Not Exist - Creating It" + Set-ItemProperty -Path $RegistryKey -Name $RegistryEntry -Value "1" + } else { + LogWrite "Restart Registry Entry Exists Already" + } + + #LogWrite "Restart Required - Restarting..." + #Restart-Computer + } + default { + LogWrite "Unsure If A Restart Is Required" + break + } + } +} + +function Install-WindowsUpdates() { + $script:Cycles++ + LogWrite "Evaluating Available Updates with limit of $($MaxUpdatesPerCycle):" + $UpdatesToDownload = New-Object -ComObject 'Microsoft.Update.UpdateColl' + $script:i = 0; + $CurrentUpdates = $SearchResult.Updates + while($script:i -lt $CurrentUpdates.Count -and $script:CycleUpdateCount -lt $MaxUpdatesPerCycle) { + $Update = $CurrentUpdates.Item($script:i) + if (($Update -ne $null) -and (!$Update.IsDownloaded)) { + [bool]$addThisUpdate = $false + if ($Update.InstallationBehavior.CanRequestUserInput) { + LogWrite "> Skipping: $($Update.Title) because it requires user input" + } else { + if (!($Update.EulaAccepted)) { + LogWrite "> Note: $($Update.Title) has a license agreement that must be accepted. Accepting the license." + $Update.AcceptEula() + [bool]$addThisUpdate = $true + $script:CycleUpdateCount++ + } else { + [bool]$addThisUpdate = $true + $script:CycleUpdateCount++ + } + } + + if ([bool]$addThisUpdate) { + LogWrite "Adding: $($Update.Title)" + $UpdatesToDownload.Add($Update) |Out-Null + } + } + $script:i++ + } + + if ($UpdatesToDownload.Count -eq 0) { + LogWrite "No Updates To Download..." + } else { + LogWrite 'Downloading Updates...' + $ok = 0; + while (! $ok) { + try { + $Downloader = $UpdateSession.CreateUpdateDownloader() + $Downloader.Updates = $UpdatesToDownload + $Downloader.Download() + $ok = 1; + } catch { + LogWrite $_.Exception | Format-List -force + LogWrite "Error downloading updates. Retrying in 30s." + $script:attempts = $script:attempts + 1 + Start-Sleep -s 30 + } + } + } + + $UpdatesToInstall = New-Object -ComObject 'Microsoft.Update.UpdateColl' + [bool]$rebootMayBeRequired = $false + LogWrite 'The following updates are downloaded and ready to be installed:' + foreach ($Update in $SearchResult.Updates) { + if (($Update.IsDownloaded)) { + LogWrite "> $($Update.Title)" + $UpdatesToInstall.Add($Update) |Out-Null + + if ($Update.InstallationBehavior.RebootBehavior -gt 0){ + [bool]$rebootMayBeRequired = $true + } + } + } + + if ($UpdatesToInstall.Count -eq 0) { + LogWrite 'No updates available to install...' + $global:MoreUpdates=0 + $global:RestartRequired=0 + break + } + + if ($rebootMayBeRequired) { + LogWrite 'These updates may require a reboot' + $global:RestartRequired=1 + } + + LogWrite 'Installing updates...' + + $Installer = $script:UpdateSession.CreateUpdateInstaller() + $Installer.Updates = $UpdatesToInstall + $InstallationResult = $Installer.Install() + + LogWrite "Installation Result: $($InstallationResult.ResultCode)" + LogWrite "Reboot Required: $($InstallationResult.RebootRequired)" + LogWrite 'Listing of updates installed and individual installation results:' + if ($InstallationResult.RebootRequired) { + $global:RestartRequired=1 + } else { + $global:RestartRequired=0 + } + + for($i=0; $i -lt $UpdatesToInstall.Count; $i++) { + New-Object -TypeName PSObject -Property @{ + Title = $UpdatesToInstall.Item($i).Title + Result = $InstallationResult.GetUpdateResult($i).ResultCode + } + LogWrite "Item: " $UpdatesToInstall.Item($i).Title + LogWrite "Result: " $InstallationResult.GetUpdateResult($i).ResultCode; + } + + Check-ContinueRestartOrEnd +} + +function Check-WindowsUpdates() { + LogWrite "Checking For Windows Updates" + $Username = $env:USERDOMAIN + "\" + $env:USERNAME + + New-EventLog -Source $ScriptName -LogName 'Windows Powershell' -ErrorAction SilentlyContinue + + $Message = "Script: " + $ScriptPath + "`nScript User: " + $Username + "`nStarted: " + (Get-Date).toString() + + Write-EventLog -LogName 'Windows Powershell' -Source $ScriptName -EventID "104" -EntryType "Information" -Message $Message + LogWrite $Message + + $script:UpdateSearcher = $script:UpdateSession.CreateUpdateSearcher() + $script:successful = $FALSE + $script:attempts = 0 + $script:maxAttempts = 12 + while(-not $script:successful -and $script:attempts -lt $script:maxAttempts) { + try { + $script:SearchResult = $script:UpdateSearcher.Search("IsInstalled=0 and Type='Software' and IsHidden=0") + $script:successful = $TRUE + } catch { + LogWrite $_.Exception | Format-List -force + LogWrite "Search call to UpdateSearcher was unsuccessful. Retrying in 10s." + $script:attempts = $script:attempts + 1 + Start-Sleep -s 10 + } + } + + if ($SearchResult.Updates.Count -ne 0) { + $Message = "There are " + $SearchResult.Updates.Count + " more updates." + LogWrite $Message + try { + for($i=0; $i -lt $script:SearchResult.Updates.Count; $i++) { + LogWrite $script:SearchResult.Updates.Item($i).Title + LogWrite $script:SearchResult.Updates.Item($i).Description + LogWrite $script:SearchResult.Updates.Item($i).RebootRequired + LogWrite $script:SearchResult.Updates.Item($i).EulaAccepted + } + $global:MoreUpdates=1 + } catch { + LogWrite $_.Exception | Format-List -force + LogWrite "Showing SearchResult was unsuccessful. Rebooting." + $global:RestartRequired=1 + $global:MoreUpdates=0 + Check-ContinueRestartOrEnd + LogWrite "Show never happen to see this text!" + Restart-Computer + } + } else { + LogWrite 'There are no applicable updates' + $global:RestartRequired=0 + $global:MoreUpdates=0 + } +} + +$script:ScriptName = $MyInvocation.MyCommand.ToString() +$script:ScriptPath = $MyInvocation.MyCommand.Path +$script:UpdateSession = New-Object -ComObject 'Microsoft.Update.Session' +$script:UpdateSession.ClientApplicationID = 'Packer Windows Update Installer' +$script:UpdateSearcher = $script:UpdateSession.CreateUpdateSearcher() +$script:SearchResult = New-Object -ComObject 'Microsoft.Update.UpdateColl' +$script:Cycles = 0 +$script:CycleUpdateCount = 0 + +Check-WindowsUpdates +if ($global:MoreUpdates -eq 1) { + Install-WindowsUpdates +} else { + Check-ContinueRestartOrEnd +} \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/meta/.galaxy_install_info b/roles/oatakan.windows_template_build/meta/.galaxy_install_info new file mode 100644 index 0000000..b2324ea --- /dev/null +++ b/roles/oatakan.windows_template_build/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Wed Jun 24 18:44:34 2020 +version: master diff --git a/roles/oatakan.windows_template_build/meta/main.yml b/roles/oatakan.windows_template_build/meta/main.yml new file mode 100644 index 0000000..64b84ad --- /dev/null +++ b/roles/oatakan.windows_template_build/meta/main.yml @@ -0,0 +1,39 @@ +--- +galaxy_info: + author: Orcun Atakan + description: Ansible galaxy role for building a Windows template on any cloud platform(ovirt/rhev, VMware, EC2, Azure etc.) + role_name: windows_template_build + company: Red Hat + + license: MIT + + min_ansible_version: 2.5 + + platforms: + - name: Windows + versions: + - all + + cloud_platforms: + - amazon + - google + - azure + - azure + - vmware + - ovirt + + galaxy_tags: + - windows + - ec2 + - vmware + - azure + - microsoft + - rhev + - rhv + - ovirt + - aws + - cloud + - multicloud + - template + +dependencies: [] diff --git a/roles/oatakan.windows_template_build/tasks/clean-up-components.yml b/roles/oatakan.windows_template_build/tasks/clean-up-components.yml new file mode 100644 index 0000000..38d22bb --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/clean-up-components.yml @@ -0,0 +1,14 @@ +--- + +- name: clean up components and update files + win_shell: Dism.exe /online /Cleanup-Image /StartComponentCleanup /ResetBase + when: "'Windows Server 2008' not in ansible_distribution" + ignore_errors: yes + +- include_tasks: clean-up-with-cleanmgr.yml + when: "'Windows Server 2008' in ansible_distribution" + +- name: clean up components and update files + win_shell: Dism.exe /online /Cleanup-Image /SpSuperseded + when: "'Windows Server 2008' in ansible_distribution" + ignore_errors: yes \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/clean-up-with-cleanmgr.yml b/roles/oatakan.windows_template_build/tasks/clean-up-with-cleanmgr.yml new file mode 100644 index 0000000..14358f1 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/clean-up-with-cleanmgr.yml @@ -0,0 +1,64 @@ +--- + +- block: + + - name: check for cleanmgr executable + win_stat: + path: '{{ ansible_env.windir }}\System32\cleanmgr.exe' + register: check_cleanmgr_file + + - include_tasks: copy_cleanmgr.yml + vars: + os_short_name: 2008r2 + when: + - not check_cleanmgr_file.stat.exists + - ('Windows Server 2008 R2' in ansible_distribution) + + - include_tasks: copy_cleanmgr.yml + vars: + os_short_name: 2012 + when: + - not check_cleanmgr_file.stat.exists + - ('Windows Server 2012' in ansible_distribution) + - (not 'Windows Server 2012 R2' in ansible_distribution) + + - name: get free space + win_shell: Get-PSDrive C | Select-Object Free | ConvertTo-Json + register: free_space_before_cleanup + + - name: ensure cleanup registry paths exist + win_regedit: + path: HKLM:\Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\{{ item }} + loop: "{{ cleanup_registry_keys }}" + + - name: set cleanup registry keys + win_regedit: + path: HKLM:\Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\{{ item }} + name: StateFlags0012 + data: 2 + type: dword + loop: "{{ cleanup_registry_keys }}" + + - name: run cleanmgr + win_shell: cleanmgr /sagerun:12 + + - name: wait for cleanmgr to finish + win_shell: (get-wmiobject win32_process | where-object {$_.processname -eq 'cleanmgr.exe'} | measure).count + register: check_cleanmgr_process + until: check_cleanmgr_process.stdout is defined and check_cleanmgr_process.stdout|int == 0 + delay: 5 + retries: 300 + + - name: get free space + win_shell: Get-PSDrive C | Select-Object Free | ConvertTo-Json + register: free_space_after_cleanup + + - debug: + msg: + - "Free space before cleanup: {{ ((free_space_before_cleanup.stdout | from_json)['Free']|int / (1024*1024*1024)) | round(2, 'floor') }} GB" + - "Free space after cleanup: {{ ((free_space_after_cleanup.stdout | from_json)['Free']|int / (1024*1024*1024)) | round(2, 'floor') }} GB" + + rescue: + - name: ignore any errors + debug: + msg: "ignoring any error with clean up with cleanmgr" \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/clean-up.yml b/roles/oatakan.windows_template_build/tasks/clean-up.yml new file mode 100644 index 0000000..1af40d1 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/clean-up.yml @@ -0,0 +1,41 @@ +--- + +- name: remove page file + win_regedit: + path: HKLM:\System\CurrentControlSet\Control\Session Manager\Memory Management + name: PagingFiles + data: "" + state: present + register: cleanup_pagefile_removal + +- name: reboot server after clearing page file + win_reboot: + when: cleanup_pagefile_removal is changed + +- name: cleanup the temp folders + win_file: + path: '{{ item }}' + state: absent + ignore_errors: yes + loop: + - C:\Temp + - C:\Windows\Panther + - C:\Windows\Temp + +- name: cleanup the C:\Recovery folder + win_shell: Remove-Item -Path C:\Recovery -Force -Recurse + ignore_errors: yes + +- name: check to see if WinSXS ManifestCache folder exist + win_stat: + path: '{{ ansible_env.windir }}\winsxs\ManifestCache' + register: winsxs_dir + +- name: clear out the WinSXS ManifestCache folder + win_shell: | + &cmd.exe /c Takeown /f %windir%\winsxs\ManifestCache\* + &cmd.exe /c Icacls %windir%\winsxs\ManifestCache\* /GRANT administrators:F + &cmd.exe /c Del /q %windir%\winsxs\ManifestCache\* + when: + - winsxs_dir.stat is defined + - winsxs_dir.stat.exists \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/cloudbase-init.yml b/roles/oatakan.windows_template_build/tasks/cloudbase-init.yml new file mode 100644 index 0000000..3ec21d2 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/cloudbase-init.yml @@ -0,0 +1,9 @@ +--- + +- name: install cloudbase init package + win_package: + path: https://cloudbase.it/downloads/CloudbaseInitSetup_x64.msi + product_id: '{ED85F19F-057A-4EE6-BC8D-F576DEACE78D}' + arguments: + - /qn + state: present diff --git a/roles/oatakan.windows_template_build/tasks/compact-alt.yml b/roles/oatakan.windows_template_build/tasks/compact-alt.yml new file mode 100644 index 0000000..a9a1d5c --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/compact-alt.yml @@ -0,0 +1,36 @@ +--- + +- name: see if Optimize-Volume cmdlet is available + win_command: powershell.exe "Get-Command -Name Optimize-Volume" + register: cleanup_defrag_cmdlet + failed_when: False + +- name: defrag C with PS cmdlet + win_command: powershell.exe "Optimize-Volume -DriveLetter C" + when: cleanup_defrag_cmdlet.rc == 0 + +- name: defrag C with legacy exe + win_command: 'Defrag.exe C:' + when: cleanup_defrag_cmdlet.rc != 0 + +- name: 0 out empty space for later compression + win_shell: | + $path = "C:\zero" + $volume = Get-WmiObject -Class Win32_LogicalDisk -Filter "DeviceID='C:'" + $block_size = 64kb + $leftover_size = $volume.Size * 0.05 + $file_size = $volume.FreeSpace - $leftover_size + $data_array = New-Object -TypeName byte[]($block_size) + $stream = [System.IO.File]::OpenWrite($path) + try { + $current_file_size = 0 + while ($current_file_size -lt $file_size) { + $stream.Write($data_array, 0, $data_array.Length) + $current_file_size += $data_array.Length + } + } finally { + if ($stream) { + $stream.Close() + } + } + Remove-Item -Path $path -Force | Out-Null \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/compact.yml b/roles/oatakan.windows_template_build/tasks/compact.yml new file mode 100644 index 0000000..574d5ca --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/compact.yml @@ -0,0 +1,164 @@ +--- + +- name: ensure temp directory exists + win_file: + path: '{{ temp_directory }}' + state: directory + +- name: download bleachbit + win_get_url: + url: '{{ bleachbit_download_url }}' + dest: '{{ temp_directory }}\BleachBit-portable.zip' + register: download_bleachbit + until: download_bleachbit is success + delay: 3 + retries: 5 + when: bleachbit_download_url is defined + +- name: unzip bleachbit + win_unzip: + src: '{{ temp_directory }}\BleachBit-portable.zip' + dest: '{{ ansible_user_dir }}' + when: download_bleachbit is success + +# This is needed on 2008 for bleachbit to work +- name: install the visual C libraries + win_package: + path: https://download.microsoft.com/download/5/D/8/5D8C65CB-C849-4025-8E95-C3966CAFD8AE/vcredist_x86.exe + product_id: '{9BE518E6-ECC6-35A9-88E4-87755C07200F}' + arguments: '/qb!' + when: "'Windows Server 2008' in ansible_distribution" + +- name: stop windows update service + win_service: + name: wuauserv + state: stopped + ignore_errors: yes + +- name: delete update directory + win_file: + path: C:\Windows\SoftwareDistribution\Download + state: absent + ignore_errors: yes + +- name: remove windows update settings + win_regedit: + path: HKLM:\Software\Microsoft\Windows\CurrentVersion\WindowsUpdate + name: "{{ item }}" + state: absent + loop: + - SusClientId + - PingID + - AccountDomainSid + +- name: start windows update service + win_service: + name: wuauserv + state: started + ignore_errors: yes + +- name: create update directory + win_file: + path: C:\Windows\SoftwareDistribution\Download + state: directory + ignore_errors: yes + +- name: reset windows update + win_shell: wuauclt /resetauthorization /detectnow + ignore_errors: yes + +- name: clean with bleachbit + win_shell: > + '{{ ansible_user_dir }}\BleachBit-Portable\bleachbit_console.exe --clean deepscan.backup + deepscan.ds_store deepscan.thumbs_db deepscan.tmp internet_explorer.cookies internet_explorer.forms + internet_explorer.history internet_explorer.temporary_files system.clipboard system.custom system.logs + system.memory_dump system.muicache system.prefetch system.recycle_bin system.tmp system.updates + windows_defender.history windows_explorer.mru windows_explorer.recent_documents windows_explorer.run + windows_explorer.search_history windows_explorer.thumbnails > NUL' + args: + executable: cmd + when: + - bleachbit_clean|bool + - download_bleachbit is success + ignore_errors: yes + +- name: create temp directory + win_file: + path: '{{ temp_directory }}\win_build' + state: directory + +- name: download ultradefrag + win_get_url: + url: '{{ ultradefrag_download_url }}' + dest: '{{ temp_directory }}\win_build\ultradefrag.zip' + register: download_ultradefrag + until: download_ultradefrag is success + delay: 3 + retries: 5 + +- name: unzip ultradefrag + win_unzip: + src: '{{ temp_directory }}\win_build\ultradefrag.zip' + dest: '{{ temp_directory }}\win_build' + +- name: set udefrag extract directory + set_fact: + udefrag_dir: '{{ temp_directory }}\win_build\ultradefrag-portable-7.1.3.amd64' + +- name: defrag with ultradefrag + win_shell: '{{ udefrag_dir }}\udefrag.exe --optimize --repeat C:' + args: + executable: cmd + +- name: download sdelete + win_get_url: + url: '{{ sdelete_download_url }}' + dest: '{{ temp_directory }}\win_build\SDelete.zip' + register: download_sdelete + until: download_sdelete is success + delay: 3 + retries: 5 + when: sdelete_download_url is defined + +- name: copy sdelete + win_copy: + src: SDelete.zip + dest: '{{ temp_directory }}\win_build\SDelete.zip' + when: sdelete_download_url is undefined + +- name: unzip sdelete + win_unzip: + src: '{{ temp_directory }}\win_build\SDelete.zip' + dest: '{{ temp_directory }}\win_build' + +- name: accept sdelete eula + win_regedit: + path: HKCU:\Software\Sysinternals\SDelete + name: EulaAccepted + data: 1 + type: dword + +- name: compact with sdelete + win_shell: '{{ temp_directory }}\win_build\sdelete.exe -q -z C:' + args: + executable: cmd + +- name: remove temp files + win_file: + path: '{{ temp_directory }}\win_build' + state: absent + +- name: free disk space with bleachbit + win_shell: '{{ ansible_user_dir }}\BleachBit-Portable\bleachbit_console.exe --clean system.free_disk_space' + args: + executable: cmd + when: + - bleachbit_free_disk_space|bool + - download_bleachbit is success + ignore_errors: yes + +- name: remove bleachbit files + win_file: + path: '{{ ansible_user_dir }}\BleachBit-Portable' + state: absent + when: download_bleachbit is success \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/copy_cleanmgr.yml b/roles/oatakan.windows_template_build/tasks/copy_cleanmgr.yml new file mode 100644 index 0000000..e4d79df --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/copy_cleanmgr.yml @@ -0,0 +1,29 @@ +--- + +- name: check winsxs cleanmgr file + win_stat: + path: "{{ winsxs_cleanmgr_file[os_short_name] }}" + register: check_winsxs_cleanmgr_file + +- name: check winsxs cleanmgr mui file + win_stat: + path: "{{ winsxs_cleanmgr_mui_file[os_short_name] }}" + register: check_winsxs_cleanmgr_mui_file + +- name: copy cleanmgr file from winsxs folder + win_copy: + src: "{{ winsxs_cleanmgr_file[os_short_name] }}" + dest: '{{ ansible_env.windir }}\System32\cleanmgr.exe' + remote_src: yes + when: + - check_winsxs_cleanmgr_file.stat.exists + - check_winsxs_cleanmgr_mui_file.stat.exists + +- name: copy cleanmgr mui file from winsxs folder + win_copy: + src: "{{ winsxs_cleanmgr_mui_file[os_short_name] }}" + dest: '{{ ansible_env.windir }}\System32\en-US\cleanmgr.exe.mui' + remote_src: yes + when: + - check_winsxs_cleanmgr_file.stat.exists + - check_winsxs_cleanmgr_mui_file.stat.exists \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/disable-auto-logon.yml b/roles/oatakan.windows_template_build/tasks/disable-auto-logon.yml new file mode 100644 index 0000000..952846e --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/disable-auto-logon.yml @@ -0,0 +1,8 @@ +--- + +- name: disable auto login + win_regedit: + path: HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon + name: "{{ item.name }}" + state: absent + loop: "{{ autologin_registry }}" \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/enable-rdp.yml b/roles/oatakan.windows_template_build/tasks/enable-rdp.yml new file mode 100644 index 0000000..922bec9 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/enable-rdp.yml @@ -0,0 +1,18 @@ +--- + +- name: enable RDP port + win_firewall_rule: + name: Remote Desktop + localport: 3389 + action: allow + direction: in + protocol: tcp + state: present + enabled: yes + +- name: enable RDP + win_regedit: + path: HKLM:\System\CurrentControlSet\Control\Terminal Server + name: fDenyTSConnections + data: 0 + type: dword diff --git a/roles/oatakan.windows_template_build/tasks/enable-tlsv12.yml b/roles/oatakan.windows_template_build/tasks/enable-tlsv12.yml new file mode 100644 index 0000000..d9a7ef4 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/enable-tlsv12.yml @@ -0,0 +1,69 @@ +--- + +- name: ensure Windows ADK with DISM is installed + win_chocolatey: + name: windows-adk-deploy + state: present + version: 10.0.17134.0 + register: install_windows_adk_deploy + +- name: ensure PATH contains Windows ADK + win_path: + scope: machine + state: present + elements: "C:\\Program Files (x86)\\Windows Kits\\10\\Assessment and Deployment Kit\\Deployment Tools\\amd64\\DISM" + +- name: download hotfix + win_get_url: + url: '{{ enable_tlsv12_hotfix.url }}' + dest: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }}' + +- name: install hotfix + win_hotfix: + source: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }}' + state: present + register: hotfix_install + +- name: debug hotfix installation result + debug: + var: hotfix_install + +- name: ensure hotfix file is removed + win_file: + path: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }}' + state: absent + +- name: reboot if needed + win_reboot: + when: hotfix_install.reboot_required + +- name: enable TLSv1.2 support + win_regedit: + path: HKLM:\SYSTEM\CurrentControlSet\Control\SecurityProviders\SCHANNEL\Protocols\TLS 1.2\{{ item.type }} + name: '{{ item.property }}' + data: '{{ item.value }}' + type: dword + state: present + register: enable_tls12 + loop: + - type: Server + property: Enabled + value: 1 + - type: Server + property: DisabledByDefault + value: 0 + - type: Client + property: Enabled + value: 1 + - type: Client + property: DisabledByDefault + value: 0 + +- name: ensure Windows ADK with DISM is removed + win_chocolatey: + name: windows-adk-deploy + state: absent + +- name: reboot if TLS config was applied + win_reboot: + when: enable_tls12 is changed \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/main.yml b/roles/oatakan.windows_template_build/tasks/main.yml new file mode 100644 index 0000000..708ce16 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/main.yml @@ -0,0 +1,69 @@ +--- + +- include_tasks: powershell-upgrade.yml + when: upgrade_powershell | bool + +- name: run setup module + setup: + +- include_tasks: enable-tlsv12.yml + when: upgrade_powershell | bool + +- include_tasks: disable-auto-logon.yml + +- include_tasks: updates.yml + when: install_updates | bool + +- include_role: + name: "{{ ovirt_guest_agent_role }}" + when: target_ovirt | bool and not target_qemu | bool + +- include_role: + name: "{{ virtio_role }}" + when: target_qemu | bool or ('KubeVirt' in ansible_system_vendor | default('')) + +- include_role: + name: "{{ virtualbox_guest_additions_role }}" + when: "'VirtualBox' in ansible_product_name" + +- include_role: + name: "{{ vmware_tools_role }}" + when: "'VMware' in ansible_product_name" + +- include_tasks: startup.yml + +- include_tasks: policy.yml + +- include_tasks: power.yml + when: "'Windows 10' in ansible_distribution" + +- include_tasks: enable-rdp.yml + +- include_tasks: cloudbase-init.yml + when: + - "'VMware' not in ansible_product_name" + - "'VirtualBox' not in ansible_product_name" + - ('KubeVirt' not in ansible_system_vendor | default(False)) + - not target_ovirt | bool + - not target_vagrant | bool + +- include_tasks: remove-apps-alt-2.yml + when: + - remove_apps | bool + - "'Windows 10' in ansible_distribution" + +- include_role: + name: "{{ ec2_ena_driver_role }}" + when: target_ec2 | bool + +- include_tasks: clean-up-components.yml + when: clean_up_components | bool + +- include_tasks: clean-up.yml + +- include_tasks: sysprep.yml + +- include_tasks: compact.yml + +- include_tasks: shutdown.yml + when: shutdown_instance | bool \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/policy.yml b/roles/oatakan.windows_template_build/tasks/policy.yml new file mode 100644 index 0000000..3c00090 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/policy.yml @@ -0,0 +1,20 @@ +--- + +# do not enable this by default +- name: allow unauthenticated guest access + win_regedit: + path: HKLM:\SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters + name: AllowInsecureGuestAuth + data: 1 + type: dword + when: policy.allow_unauthenticated_guest_access|bool + +- name: set connection profile to private + win_shell: Set-NetConnectionProfile -NetworkCategory Private + when: + - "'Windows 10' in ansible_distribution" + +- name: Ensure local account password doesn't expire + win_user: + name: "{{ ansible_user }}" + password_never_expires: yes \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/power.yml b/roles/oatakan.windows_template_build/tasks/power.yml new file mode 100644 index 0000000..9a01f68 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/power.yml @@ -0,0 +1,6 @@ +--- + +- name: change power plan to high performance + win_power_plan: + name: high performance + ignore_errors: yes \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/powershell-upgrade.yml b/roles/oatakan.windows_template_build/tasks/powershell-upgrade.yml new file mode 100644 index 0000000..3b065be --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/powershell-upgrade.yml @@ -0,0 +1,70 @@ +--- + +- name: download script + raw: '(New-Object -TypeName System.Net.WebClient).DownloadFile("{{ powershell_script_url }}", "{{ powershell_upgrade_script_file }}")' + changed_when: False + check_mode: no + register: download_script + +- name: set execution policy + raw: 'Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Force' + changed_when: False + check_mode: no + ignore_errors: yes + +- name: delete scheduled task if it exists + raw: 'SCHTASKS /Delete /TN upgrade' + args: + executable: cmd.exe + changed_when: False + check_mode: no + ignore_errors: yes + +- name: create a scheduled task to run powershell script + raw: > + SCHTASKS /Create /SC MONTHLY /MO first /D SUN /TN upgrade /TR "powershell.exe -Command + '& {{ powershell_upgrade_script_file }} -Version {{ powershell_target_version }} + -Username {{ ansible_user }} -Password {{ ansible_password }}'" + args: + executable: cmd.exe + changed_when: False + check_mode: no + +- name: run scheduled task + raw: 'SCHTASKS /Run /TN upgrade' + args: + executable: cmd.exe + changed_when: False + check_mode: no + +- name: wait for system to reboot after upgrade + wait_for_connection: + delay: 300 + sleep: 30 + timeout: 300 + +- name: delete scheduled task + win_scheduled_task: + name: upgrade + state: absent + +- name: delete script + win_file: + path: "{{ powershell_upgrade_script_file }}" + state: absent + +- name: ensure auto login is disabled + win_regedit: + path: HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon + name: AutoAdminLogon + data: 0 + type: string + +- name: ensure auto login creds are removed + win_regedit: + path: HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon + name: "{{ item }}" + state: absent + loop: + - DefaultUserName + - DefaultPassword \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/remove-apps-alt-2.yml b/roles/oatakan.windows_template_build/tasks/remove-apps-alt-2.yml new file mode 100644 index 0000000..d425737 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/remove-apps-alt-2.yml @@ -0,0 +1,96 @@ +--- + +- name: remove default apps + win_shell: | + $ErrorActionPreference = "Stop" + $apps = @( + "Microsoft.3DBuilder", + "Microsoft.Appconnector", + "Microsoft.BingFinance", + "Microsoft.BingNews", + "Microsoft.BingSports", + "Microsoft.BingWeather", + "Microsoft.FreshPaint", + "Microsoft.Getstarted", + "Microsoft.MicrosoftOfficeHub", + "Microsoft.MicrosoftSolitaireCollection", + "Microsoft.MicrosoftStickyNotes", + "Microsoft.Office.OneNote", + "Microsoft.OneConnect", + "Microsoft.People", + "Microsoft.SkypeApp", + "Microsoft.Windows.Photos", + "Microsoft.WindowsAlarms", + "Microsoft.WindowsCalculator", + "Microsoft.WindowsCamera", + "Microsoft.WindowsMaps", + "Microsoft.WindowsPhone", + "Microsoft.WindowsSoundRecorder", + "Microsoft.XboxApp", + "Microsoft.ZuneMusic", + "Microsoft.ZuneVideo", + "Microsoft.WindowsCommunicationsApps", + "Microsoft.MinecraftUWP", + "Microsoft.MicrosoftPowerBIForWindows", + "Microsoft.NetworkSpeedTest", + "Microsoft.CommsPhone", + "Microsoft.ConnectivityStore", + "Microsoft.Messaging", + "Microsoft.Office.Sway", + "Microsoft.OneConnect", + "Microsoft.WindowsFeedbackHub", + "Microsoft.BingFoodAndDrink", + "Microsoft.BingTravel", + "Microsoft.BingHealthAndFitness", + "Microsoft.WindowsReadingList", + "Microsoft.MSPaint", + "Microsoft.Microsoft3DViewer", + "Microsoft.Print3D", + "9E2F88E3.Twitter", + "PandoraMediaInc.29680B314EFC2", + "Flipboard.Flipboard", + "ShazamEntertainmentLtd.Shazam", + "king.com.CandyCrushSaga", + "king.com.CandyCrushSodaSaga", + "king.com.*", + "ClearChannelRadioDigital.iHeartRadio", + "4DF9E0F8.Netflix", + "6Wunderkinder.Wunderlist", + "Drawboard.DrawboardPDF", + "2FE3CB00.PicsArt-PhotoStudio", + "D52A8D61.FarmVille2CountryEscape", + "TuneIn.TuneInRadio", + "GAMELOFTSA.Asphalt8Airborne", + "TheNewYorkTimes.NYTCrossword", + "DB6EA5DB.CyberLinkMediaSuiteEssentials", + "Facebook.Facebook", + "flaregamesGmbH.RoyalRevolt2", + "Playtika.CaesarsSlotsFreeCasino", + "A278AB0D.MarchofEmpires", + "KeeperSecurityInc.Keeper", + "ThumbmunkeysLtd.PhototasticCollage", + "XINGAG.XING", + "89006A2E.AutodeskSketchBook", + "D5EA27B7.Duolingo-LearnLanguagesforFree", + "46928bounde.EclipseManager", + "ActiproSoftwareLLC.562882FEEB491" + ) + foreach ($app in $apps) { + Get-AppxPackage -Name $app -AllUsers | Remove-AppxPackage -AllUsers + Get-AppxProvisionedPackage -Online | Where-Object { $_.DisplayName -like $app } | Remove-AppxProvisionedPackage -Online + } + register: cleanup_win10_remove + until: cleanup_win10_remove is successful + retries: 5 + delay: 1 + ignore_errors: yes + +- name: prevent suggested applications from returning + win_regedit: + path: HKLM:\SOFTWARE\Policies\Microsoft\Windows\Cloud Content + name: DisableWindowsConsumerFeatures + data: 1 + datatype: dword + +- name: reboot to effect pending changes + win_reboot: \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/remove-apps-alt.yml b/roles/oatakan.windows_template_build/tasks/remove-apps-alt.yml new file mode 100644 index 0000000..f891088 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/remove-apps-alt.yml @@ -0,0 +1,30 @@ +--- + +- name: remove user apps + script: RemoveUserApps.ps1 + register: cleanup_win10_remove + until: cleanup_win10_remove is successful + retries: 3 + delay: 1 + ignore_errors: yes + +#- name: disable windows store +# win_regedit: +# path: HKLM:\Software\Policies\Microsoft\WindowsStore +# name: AutoDownload +# data: 00000002 +# type: dword +# +#- name: disable content delivery manager +# win_regedit: +# path: HKCU:\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager +# name: SilentInstalledAppsEnabled +# data: 00000000 +# type: dword +# +#- name: disable windows store +# win_regedit: +# path: HKLM:\Software\Policies\Microsoft\Windows\CloudContent +# name: DisableWindowsConsumerFeatures +# data: 00000001 +# type: dword \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/remove-apps.yml b/roles/oatakan.windows_template_build/tasks/remove-apps.yml new file mode 100644 index 0000000..3857d13 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/remove-apps.yml @@ -0,0 +1,97 @@ +--- + +- name: Setup the xWebAdministration module + win_psmodule: + name: DSCR_AppxPackage + state: present + +- name: remove packages + win_dsc: + resource_name: cAppxProvisionedPackageSet + Ensure: Absent + PackageName: + - Microsoft.3DBuilder + - Microsoft.Appconnector + - Microsoft.BingFinance + - Microsoft.BingNews + - Microsoft.BingSports + - Microsoft.BingWeather + - Microsoft.FreshPaint + - Microsoft.Getstarted + - Microsoft.MicrosoftOfficeHub + - Microsoft.MicrosoftSolitaireCollection + - Microsoft.MicrosoftStickyNotes + - Microsoft.Office.OneNote + - Microsoft.OneConnect + - Microsoft.People + - Microsoft.SkypeApp + - Microsoft.Windows.Photos + - Microsoft.WindowsAlarms + - Microsoft.WindowsCalculator + - Microsoft.WindowsCamera + - Microsoft.WindowsMaps + - Microsoft.WindowsPhone + - Microsoft.WindowsSoundRecorder + - Microsoft.XboxApp + - Microsoft.ZuneMusic + - Microsoft.ZuneVideo + - Microsoft.WindowsCommunicationsApps + - Microsoft.MinecraftUWP + - Microsoft.MicrosoftPowerBIForWindows + - Microsoft.NetworkSpeedTest + - Microsoft.CommsPhone + - Microsoft.ConnectivityStore + - Microsoft.Messaging + - Microsoft.Office.Sway + - Microsoft.OneConnect + - Microsoft.WindowsFeedbackHub + - Microsoft.BingFoodAndDrink + - Microsoft.BingTravel + - Microsoft.BingHealthAndFitness + - Microsoft.WindowsReadingList + - Microsoft.MSPaint + - Microsoft.Microsoft3DViewer + - Microsoft.Print3D + - 9E2F88E3.Twitter + - PandoraMediaInc.29680B314EFC2 + - Flipboard.Flipboard + - ShazamEntertainmentLtd.Shazam + - king.com.CandyCrushSaga + - king.com.CandyCrushSodaSaga + - king.com.* + - ClearChannelRadioDigital.iHeartRadio + - 4DF9E0F8.Netflix + - 6Wunderkinder.Wunderlist + - Drawboard.DrawboardPDF + - 2FE3CB00.PicsArt-PhotoStudio + - D52A8D61.FarmVille2CountryEscape + - TuneIn.TuneInRadio + - GAMELOFTSA.Asphalt8Airborne + - TheNewYorkTimes.NYTCrossword + - DB6EA5DB.CyberLinkMediaSuiteEssentials + - Facebook.Facebook + - flaregamesGmbH.RoyalRevolt2 + - Playtika.CaesarsSlotsFreeCasino + - A278AB0D.MarchofEmpires + - KeeperSecurityInc.Keeper + - ThumbmunkeysLtd.PhototasticCollage + - XINGAG.XING + - 89006A2E.AutodeskSketchBook + - D5EA27B7.Duolingo-LearnLanguagesforFree + - 46928bounde.EclipseManager + - ActiproSoftwareLLC.562882FEEB491- + register: cleanup_win10_remove + until: cleanup_win10_remove is successful + retries: 3 + delay: 1 + ignore_errors: yes + +- name: prevent suggested applications from returning + win_regedit: + path: HKLM:\SOFTWARE\Policies\Microsoft\Windows\Cloud Content + name: DisableWindowsConsumerFeatures + data: 1 + datatype: dword + +- name: reboot to effect pending changes + win_reboot: \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/remove-onedrive.yml b/roles/oatakan.windows_template_build/tasks/remove-onedrive.yml new file mode 100644 index 0000000..e4dfa8e --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/remove-onedrive.yml @@ -0,0 +1,29 @@ +--- + +- name: kill onedrive process + win_shell: Stop-Process -Name OneDrive + ignore_errors: yes + +- name: uninstall onedrive + win_shell: '{{ ansible_env.SystemRoot }}\SysWOW64\OneDriveSetup.exe /uninstall' + ignore_errors: yes + +- name: remove onedrive directories + win_file: + path: '{{ item }}' + state: absent + ignore_errors: yes + loop: + - '{{ ansible_env.USERPROFILE }}\OneDrive' + - '{{ ansible_env.LOCALAPPDATA }}\Microsoft\OneDrive' + - '{{ ansible_env.ProgramData }}\Microsoft OneDrive' + - C:\OneDriveTemp + +- name: delete registry keys + win_regedit: + path: '{{ item }}' + state: absent + delete_key: yes + loop: + - HKCR:\CLSID\{018D5C66-4533-4307-9B53-224DE2ED1FE6} + - HKCR:\Wow6432Node\CLSID\{018D5C66-4533-4307-9B53-224DE2ED1FE6} \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/shutdown.yml b/roles/oatakan.windows_template_build/tasks/shutdown.yml new file mode 100644 index 0000000..ba0da79 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/shutdown.yml @@ -0,0 +1,5 @@ +--- + +- name: run sysprep-shutdown scheduled task + win_shell: schtasks.exe /Run /TN "sysprep-shutdown" + ignore_errors: yes diff --git a/roles/oatakan.windows_template_build/tasks/startup.yml b/roles/oatakan.windows_template_build/tasks/startup.yml new file mode 100644 index 0000000..0369032 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/startup.yml @@ -0,0 +1,7 @@ +--- + +- name: remove essentials setup wizard from start up + win_regedit: + path: HKLM:\Software\Microsoft\Windows\CurrentVersion\Run + name: EssentialsRoleConfigWizard + state: absent \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/sysprep.yml b/roles/oatakan.windows_template_build/tasks/sysprep.yml new file mode 100644 index 0000000..a199c71 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/sysprep.yml @@ -0,0 +1,76 @@ +--- + +- name: recompile .NET assemblies + win_dotnet_ngen: + +#- name: enable custom answer file +# win_regedit: +# path: HKLM:\System\Setup +# name: UnattendFile +# data: C:\Windows\system32\sysprep\unattend.xml +# type: string + +- name: ensure Panther directory exists + win_file: + path: c:\Windows\Panther + state: directory + +- name: enable winrm + win_shell: '& $([scriptblock]::Create((New-Object Net.WebClient).DownloadString("https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1"))) -ForceNewSSLCert -EnableCredSSP' + ignore_errors: yes + when: "'Windows Server 2008' in ansible_distribution" + +- name: copy unattend.xml + win_template: + src: unattend.xml.j2 + dest: C:\Windows\system32\sysprep\unattend.xml + when: + - ('VMware' not in ansible_product_name) or ('VMware' in ansible_product_name and target_vagrant | bool) + - not target_ovirt | bool + - not ('KubeVirt' in ansible_system_vendor | default('')) + +#- name: run sysprep +# win_shell: C:\Windows\system32\sysprep\sysprep.exe /generalize /shutdown /oobe /quiet +# args: +# executable: cmd +# async: 1200 +# poll: 0 + +- name: create scheduled task to delete WinRM listeners and shutdown + win_scheduled_task: + name: sysprep-shutdown + username: SYSTEM + disallow_start_if_on_batteries: no + stop_if_going_on_batteries: no + actions: + - path: powershell.exe + arguments: Remove-Item -Path WSMan:\localhost\Listener\* -Recurse -Force + - path: C:\windows\system32\sysprep\sysprep.exe + arguments: /generalize /oobe /quiet /shutdown + when: + - ('VMware' not in ansible_product_name) or ('VMware' in ansible_product_name and target_vagrant | bool) or (target_ovirt | bool) or ('KubeVirt' in ansible_system_vendor | default('')) + +- name: create scheduled task to delete WinRM listeners and shutdown + win_scheduled_task: + name: sysprep-shutdown + username: SYSTEM + disallow_start_if_on_batteries: no + stop_if_going_on_batteries: no + actions: + - path: powershell.exe + arguments: Remove-Item -Path WSMan:\localhost\Listener\* -Recurse -Force + - path: shutdown.exe + arguments: /s /t 10 /f /d p:4:1 /c "Ansible Shutdown" + when: + - "'VMware' in ansible_product_name" + - not target_vagrant | bool + - not target_ovirt | bool + - not ('KubeVirt' in ansible_system_vendor | default('')) + +- name: set flag to recreate pagefile after next sysprep + win_shell: | + $system = Get-WmiObject -Class Win32_ComputerSystem -EnableAllPrivileges + if ($system -ne $null) { + $system.AutomaticManagedPagefile = $true + $system.Put() + } \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/updates-all.yml b/roles/oatakan.windows_template_build/tasks/updates-all.yml new file mode 100644 index 0000000..c43c19a --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/updates-all.yml @@ -0,0 +1,43 @@ +--- + +- name: check for available updates + win_updates: + category_names: "{{ win_update_category_names }}" + blacklist: "{{ win_update_blacklist | default(omit) }}" + state: searched + register: available_updates + +- debug: + msg: | + {{ inventory_hostname }} has {{ available_updates.found_update_count }} updates available. + {% for key, value in available_updates.updates.items() %} + - {{ value.title }} + {% endfor %} + when: available_updates.updates is defined + +- include_tasks: updates-with-retry.yml + when: + - available_updates.updates is defined + - available_updates.found_update_count > 0 + +- name: check for missing updates. + win_updates: + state: searched + register: available_updates + +- name: list missing updates + debug: + var: available_updates + +- name: check to see if update is finished + win_shell: gwmi -Class win32_computersystem -ComputerName 127.0.0.1 | select -ExpandProperty username -ErrorAction Stop + register: logon_status + until: logon_status is success + delay: 10 + retries: 100 + ignore_errors: yes + when: "'Windows 10' in ansible_distribution" + +- name: reboot windows + win_reboot: + when: "'Windows 10' in ansible_distribution" \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/updates-powershell.yml b/roles/oatakan.windows_template_build/tasks/updates-powershell.yml new file mode 100644 index 0000000..2f1a45c --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/updates-powershell.yml @@ -0,0 +1,98 @@ +--- + +- name: update over multiple reboots + block: + - name: check for available updates + win_updates: + category_names: + - CriticalUpdates + - DefinitionUpdates + - SecurityUpdates + - UpdateRollups + - Updates + state: searched + register: available_updates + + - debug: + msg: | + {{ inventory_hostname }} has {{ available_updates.found_update_count }} updates available. + {% for key, value in available_updates.updates.items() %} + - {{ value.title }} + {% endfor %} + when: available_updates.updates is defined + + - block: + - name: install windows updates using powershell script + script: win-updates.ps1 + become: yes + become_method: runas + become_user: SYSTEM + when: + - available_updates.updates is defined + - available_updates.found_update_count > 0 + + rescue: + - name: reboot the system to recover from a failed update + win_reboot: + reboot_timeout: 7200 + + - name: wait for system to be responsive after update + wait_for_connection: + delay: 60 + sleep: 10 + timeout: 600 + + - name: check to see if reboot is required + win_reg_stat: + path: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update + name: CustomRebootRequired + register: update_reboot_required_key + + - name: reboot the system to continue with the update + win_reboot: + reboot_timeout: 7200 + when: update_reboot_required_key.exists + + - name: check for missing updates + win_updates: + category_names: + - CriticalUpdates + - DefinitionUpdates + - SecurityUpdates + - UpdateRollups + - Updates + state: searched + register: missing_updates + + - debug: + msg: | + {{ inventory_hostname }} has {{ missing_updates.found_update_count }} updates still missing. + {% for key, value in missing_updates.updates.items() %} + - {{ value.title }} + {% endfor %} + when: missing_updates.updates is defined + + - block: + - name: set update count + set_fact: + update_retry_count: '{{ update_retry_count | default(0) | int + 1 }}' + + - name: still more updates - need to retry + fail: + msg: > + '{{ inventory_hostname }} has {{ missing_updates.found_update_count }} updates still missing. + {{ (update_retry_limit | int) - (update_retry_count | int) }} more retries left' + when: ((update_retry_limit | int) - (update_retry_count | int) > 0) + when: missing_updates.found_update_count > 0 + + - name: ensure the CustomRebootRequired key doesn't exist + win_regedit: + path: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update + name: CustomRebootRequired + state: absent + + rescue: + - debug: + msg: "Still more updates remaining - retrying..." + + - include_tasks: updates-powershell.yml \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/updates-win2008r2.yml b/roles/oatakan.windows_template_build/tasks/updates-win2008r2.yml new file mode 100644 index 0000000..e66b516 --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/updates-win2008r2.yml @@ -0,0 +1,74 @@ +--- + +- name: ensure Windows ADK with DISM is installed + win_chocolatey: + name: windows-adk-deploy + state: present + version: 10.0.17134.0 + register: install_windows_adk_deploy + +- name: ensure PATH contains Windows ADK + win_path: + scope: machine + state: present + elements: "C:\\Program Files (x86)\\Windows Kits\\10\\Assessment and Deployment Kit\\Deployment Tools\\amd64\\DISM" + +- name: download hotfix group 1 + win_get_url: + url: '{{ item.url }}' + dest: '{{ hotfix_download_location }}\{{ item.file }}' + loop: "{{ hotfixes_group_1 }}" + +- name: install hotfix group 1 + win_hotfix: + source: '{{ hotfix_download_location }}\{{ item.file }}' + state: present + register: hotfix_install_group_1 + loop: "{{ hotfixes_group_1 }}" + +- name: debug hotfix installation result + debug: + var: hotfix_install_group_1 + +- name: ensure hotfix file is removed (group 1) + win_file: + path: '{{ hotfix_download_location }}\{{ item.file }}' + state: absent + loop: "{{ hotfixes_group_1 }}" + +- name: reboot from starting update + win_reboot: + +- name: check for available updates + win_updates: + category_names: "{{ win_update_category_names }}" + blacklist: "{{ win_update_blacklist | default(omit) }}" + state: searched + register: available_updates + +- debug: + msg: | + {{ inventory_hostname }} has {{ available_updates.found_update_count }} updates available. + {% for key, value in available_updates.updates.items() %} + - {{ value.title }} + {% endfor %} + when: available_updates.updates is defined + +- include_tasks: updates-with-retry.yml + when: + - available_updates.updates is defined + - available_updates.found_update_count > 0 + +- name: check for missing updates. + win_updates: + state: searched + register: available_updates + +- name: list missing updates + debug: + var: available_updates + +- name: make sure Windows ADK with DISM for Server 2008 R2 is not installed + win_chocolatey: + name: windows-adk-deploy + state: absent \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/updates-with-retry.yml b/roles/oatakan.windows_template_build/tasks/updates-with-retry.yml new file mode 100644 index 0000000..facf18d --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/updates-with-retry.yml @@ -0,0 +1,84 @@ +--- + +- name: update over multiple reboots + block: + - block: + - name: install all windows updates + win_updates: + category_names: "{{ win_update_category_names }}" + blacklist: "{{ (win_update_blacklist | default([])) + (failed_kb | default([])) }}" + whitelist: "{{ win_update_whitelist | default(omit) }}" + reboot: yes + register: installed_updates + + rescue: + - name: reboot the system to recover from a failed update + win_reboot: + reboot_timeout: 7200 + + - name: set failed KB to skip + set_fact: + failed_kb: "{{ failed_kb|default([]) + [installed_updates.msg | regex_replace('^.*\\((KB.*)\\).*','\\1')] }}" + when: + - installed_updates.msg is defined + - ('Failed' in installed_updates.msg) + - ('KB' in installed_updates.msg) + + - name: fail to retry + fail: + msg: "There are failed updates: {{ failed_kb | join(' ') }}" + when: + - failed_kb is defined + - failed_kb | length > 0 + + - name: wait for system to be responsive after update + wait_for_connection: + delay: 60 + sleep: 10 + timeout: 600 + + - name: work on any skipped KB + win_updates: + category_names: "{{ win_update_category_names }}" + blacklist: "{{ win_update_blacklist | default(omit) }}" + whitelist: "{{ failed_kb | default([]) }}" + reboot: yes + register: installed_updates_retry_skipped + when: + - failed_kb is defined + - failed_kb | length > 0 + + - name: check for missing updates + win_updates: + category_names: "{{ win_update_category_names }}" + blacklist: "{{ win_update_blacklist | default(omit) }}" + state: searched + register: missing_updates + + - debug: + msg: | + {{ inventory_hostname }} has {{ missing_updates.found_update_count }} updates still missing. + {% for key, value in missing_updates.updates.items() %} + - {{ value.title }} + {% endfor %} + when: missing_updates.updates is defined + + - name: still more updates - need to retry + fail: + msg: > + '{{ inventory_hostname }} has {{ missing_updates.found_update_count }} updates still missing. + {{ (update_retry_limit | int) - (update_retry_count | int) }} more retries left' + when: + - missing_updates.found_update_count > 0 + - ((update_retry_limit | int) - (update_retry_count | int) >= 0) + + rescue: + - name: set update count + set_fact: + update_retry_count: '{{ update_retry_count | default(0) | int + 1 }}' + + - debug: + msg: "Still more updates remaining - retrying... ({{ update_retry_count }}/{{ update_retry_limit }})" + + - include_tasks: updates-with-retry.yml + when: ((update_retry_limit | int) - (update_retry_count | int) >= 0) \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/updates.yml b/roles/oatakan.windows_template_build/tasks/updates.yml new file mode 100644 index 0000000..94ea25f --- /dev/null +++ b/roles/oatakan.windows_template_build/tasks/updates.yml @@ -0,0 +1,89 @@ +--- + +- name: disable firewall for Domain, Public and Private profiles + win_shell: Set-NetFirewallProfile -Profile Domain,Public,Private -Enabled False + when: "'Windows Server 2012' in ansible_distribution" + +- name: disable firewall for Domain, Public and Private profiles + win_shell: netsh advfirewall set allprofiles state off + when: "'Windows Server 2008' in ansible_distribution" + +- name: get used space before update + win_shell: Get-PSDrive C | Select-Object Used | ConvertTo-Json + register: used_space_before_update + ignore_errors: yes + +- name: update Windows Update Agent on 2008 + win_package: + path: http://download.windowsupdate.com/windowsupdate/redist/standalone/7.6.7600.320/windowsupdateagent-7.6-x64.exe + arguments: + - /quiet + - /norestart + - /wuforce + creates_path: C:\Windows\System32\wuaueng.dll + creates_version: 7.6.7600.320 + when: "'Windows Server 2008' in ansible_distribution" + +- include_tasks: updates-all.yml + vars: + win_update_category_names: + - CriticalUpdates + - DefinitionUpdates + - SecurityUpdates + - UpdateRollups + - Updates + when: + - install_updates | bool + - "'Windows Server 2008' not in ansible_distribution" + +#- include_tasks: updates-powershell.yml +# when: +# - install_updates | bool +# - "'Windows Server 2008' in ansible_distribution" + +- include_tasks: updates-win2008r2.yml + vars: + win_update_category_names: + - CriticalUpdates + - DefinitionUpdates + - SecurityUpdates + - UpdateRollups + - Updates + hotfix_download_location: "{{ ansible_env.TEMP }}" + hotfixes_group_1: + - kb: KB3020369 + file: Windows6.1-KB3020369-x64.msu + url: https://download.microsoft.com/download/F/D/3/FD3728D5-0D2F-44A6-B7DA-1215CC0C9B75/Windows6.1-KB3020369-x64.msu + - kb: KB3125574 + file: windows6.1-kb3125574-v4-x64_2dafb1d203c8964239af3048b5dd4b1264cd93b9.msu + url: http://download.windowsupdate.com/d/msdownload/update/software/updt/2016/05/windows6.1-kb3125574-v4-x64_2dafb1d203c8964239af3048b5dd4b1264cd93b9.msu + - kb: KB4474419 + file: windows6.1-kb4474419-v3-x64_b5614c6cea5cb4e198717789633dca16308ef79c.msu + url: http://download.windowsupdate.com/c/msdownload/update/software/secu/2019/09/windows6.1-kb4474419-v3-x64_b5614c6cea5cb4e198717789633dca16308ef79c.msu + - kb: KB4490628 + file: windows6.1-kb4490628-x64_d3de52d6987f7c8bdc2c015dca69eac96047c76e.msu + url: http://download.windowsupdate.com/c/msdownload/update/software/secu/2019/03/windows6.1-kb4490628-x64_d3de52d6987f7c8bdc2c015dca69eac96047c76e.msu + when: + - install_updates | bool + - "'Windows Server 2008' in ansible_distribution" + +- name: get used space after update + win_shell: Get-PSDrive C | Select-Object Used | ConvertTo-Json + register: used_space_after_update + ignore_errors: yes + +- debug: + msg: + - "Used space before update: {{ ((used_space_before_update.stdout | from_json)['Used']|int / (1024*1024*1024)) | round(2, 'floor') }} GB" + - "Used space after update: {{ ((used_space_after_update.stdout | from_json)['Used']|int / (1024*1024*1024)) | round(2, 'floor') }} GB" + when: + - used_space_before_update.stdout is defined + - used_space_after_update.stdout is defined + +- name: enabled firewall for Domain, Public and Private profiles + win_shell: Set-NetFirewallProfile -Profile Domain,Public,Private -Enabled True + when: "'Windows Server 2012' in ansible_distribution" + +- name: enable firewall for Domain, Public and Private profiles + win_shell: netsh advfirewall set allprofiles state on + when: "'Windows Server 2008' in ansible_distribution" \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/templates/unattend.xml.j2 b/roles/oatakan.windows_template_build/templates/unattend.xml.j2 new file mode 100644 index 0000000..f259546 --- /dev/null +++ b/roles/oatakan.windows_template_build/templates/unattend.xml.j2 @@ -0,0 +1,98 @@ +<?xml version="1.0" encoding="utf-8"?> +<unattend xmlns="urn:schemas-microsoft-com:unattend"> + <settings pass="oobeSystem"> + <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-Shell-Setup" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> + <UserAccounts> +{% if unattend.administrator_password is defined %} + <AdministratorPassword> + <Value>{{ unattend.administrator_password }}</Value> + <PlainText>true</PlainText> + </AdministratorPassword> +{% endif %} +{% if unattend.local_accounts is defined %} + <LocalAccounts> +{% for local_account in unattend.local_accounts %} + <LocalAccount wcm:action="add"> +{% if local_account.password is defined %} + <Password> + <Value>{{ local_account.password }}</Value> + <PlainText>true</PlainText> + </Password> +{% endif %} +{% if local_account.description is defined %} + <Description>{{ local_account.description }}</Description> +{% endif %} +{% if local_account.display_name is defined %} + <DisplayName>{{ local_account.display_name }}</DisplayName> +{% endif %} +{% if local_account.group is defined %} + <Group>{{ local_account.group }}</Group> +{% endif %} +{% if local_account.name is defined %} + <Name>{{ local_account.name }}</Name> +{% endif %} + </LocalAccount> +{% endfor %} + </LocalAccounts> +{% endif %} + </UserAccounts> + <OOBE> + <HideEULAPage>true</HideEULAPage> + <HideWirelessSetupInOOBE>true</HideWirelessSetupInOOBE> + <NetworkLocation>Home</NetworkLocation> + <ProtectYourPC>1</ProtectYourPC> + <HideLocalAccountScreen>true</HideLocalAccountScreen> + <HideOEMRegistrationScreen>true</HideOEMRegistrationScreen> + <HideOnlineAccountScreens>true</HideOnlineAccountScreens> + <SkipMachineOOBE>true</SkipMachineOOBE> + <SkipUserOOBE>true</SkipUserOOBE> + </OOBE> +{% if enable_auto_logon and unattend.local_accounts and unattend.local_accounts[0].name and unattend.local_accounts[0].password %} + <AutoLogon> + <Password> + <Value>{{ unattend.local_accounts[0].password }}</Value> + <PlainText>true</PlainText> + </Password> + <Username>{{ unattend.local_accounts[0].name }}</Username> + <Enabled>true</Enabled> + </AutoLogon> +{% endif %} + <FirstLogonCommands> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c powershell -Command "& $([scriptblock]::Create((New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'))) -ForceNewSSLCert -EnableCredSSP"</CommandLine> + <Description>Enable winrm</Description> + <Order>1</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c powershell -Command "Enable-WSManCredSSP -Role Server -Force"</CommandLine> + <Description>Enable winrm server role</Description> + <Order>2</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + <SynchronousCommand wcm:action="add"> + <CommandLine>cmd.exe /c powershell -Command "Set-Item -Path 'WSMan:\localhost\Service\Auth\CredSSP' -Value $true"</CommandLine> + <Description>Enable credssp authentication</Description> + <Order>3</Order> + <RequiresUserInput>true</RequiresUserInput> + </SynchronousCommand> + </FirstLogonCommands> + <ShowWindowsLive>false</ShowWindowsLive> + </component> + </settings> + <settings pass="specialize"> + <component name="Microsoft-Windows-Shell-Setup" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> + <OEMInformation> + <HelpCustomized>false</HelpCustomized> + </OEMInformation> + <!-- Rename computer here. --> + <ComputerName>{{ settings.computer_name | default('windows') }}</ComputerName> + <TimeZone>{{ settings.time_zone | default('Central Standard Time') }}</TimeZone> + <RegisteredOwner/> + </component> + <component xmlns:wcm="http://schemas.microsoft.com/WMIConfig/2002/State" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Microsoft-Windows-Security-SPP-UX" processorArchitecture="amd64" publicKeyToken="31bf3856ad364e35" language="neutral" versionScope="nonSxS"> + <SkipAutoActivation>{{ settings.skip_auto_activation | default('true') }}</SkipAutoActivation> + </component> + </settings> + <cpi:offlineImage xmlns:cpi="urn:schemas-microsoft-com:cpi" cpi:source="catalog:d:/sources/install_windows 7 ENTERPRISE.clg"/> +</unattend> \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tests/inventory b/roles/oatakan.windows_template_build/tests/inventory new file mode 100644 index 0000000..d18580b --- /dev/null +++ b/roles/oatakan.windows_template_build/tests/inventory @@ -0,0 +1 @@ +localhost \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tests/test.yml b/roles/oatakan.windows_template_build/tests/test.yml new file mode 100644 index 0000000..80b7ee9 --- /dev/null +++ b/roles/oatakan.windows_template_build/tests/test.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + remote_user: Administrator + vars: + ansible_port: 5986 + ansible_connection: winrm + ansible_winrm_transport: credssp + ansible_winrm_server_cert_validation: ignore + roles: + - ansible-role-windows_template_build \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/vars/main.yml b/roles/oatakan.windows_template_build/vars/main.yml new file mode 100644 index 0000000..a571171 --- /dev/null +++ b/roles/oatakan.windows_template_build/vars/main.yml @@ -0,0 +1,22 @@ +--- + +unattend: + administrator_password: "{{ local_administrator_password }}" + local_accounts: + - name: "{{ local_account_username }}" + display_name: "{{ local_account_username }}" + description: "{{ local_account_username }} user" + group: Administrators + password: "{{ local_account_password }}" + settings: + computer_name: wintemp + time_zone: Central Standard Time + skip_auto_activation: true + +autologin_registry: + - name: AutoAdminLogon + data: 1 + - name: DefaultUserName + data: "{{ unattend.local_accounts[0].name }}" + - name: DefaultPassword + data: "{{ unattend.local_accounts[0].password }}" \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/.ansible-lint b/roles/oatakan.windows_virtio/.ansible-lint new file mode 100644 index 0000000..3cf9b56 --- /dev/null +++ b/roles/oatakan.windows_virtio/.ansible-lint @@ -0,0 +1,2 @@ +skip_list: + - '204' \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/.travis.yml b/roles/oatakan.windows_virtio/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/roles/oatakan.windows_virtio/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/LICENSE b/roles/oatakan.windows_virtio/LICENSE new file mode 100644 index 0000000..dc1b6e7 --- /dev/null +++ b/roles/oatakan.windows_virtio/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Orcun Atakan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/roles/oatakan.windows_virtio/README.md b/roles/oatakan.windows_virtio/README.md new file mode 100644 index 0000000..484f20f --- /dev/null +++ b/roles/oatakan.windows_virtio/README.md @@ -0,0 +1,37 @@ +# windows_virtio +This repo contains an Ansible role that installs virtio drivers on Windows images. +Role Name + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - oatakan.windows_virtio + +License +------- + +MIT + +Author Information +------------------ + +Orcun Atakan diff --git a/roles/oatakan.windows_virtio/defaults/main.yml b/roles/oatakan.windows_virtio/defaults/main.yml new file mode 100644 index 0000000..221828e --- /dev/null +++ b/roles/oatakan.windows_virtio/defaults/main.yml @@ -0,0 +1,26 @@ +--- + +# this takes precedence, if a mounted drive is provided, we'll install from there +virtio_iso_mount_drive: '' + +virtio_changelog_url: https://fedorapeople.org/groups/virt/virtio-win/CHANGELOG +virtio_changelog_query: '(\d+).(\d+).(\d+)-(\d+)' +virtio_win_iso_url: "https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-{{ virtio_version }}/virtio-win.iso" +virtio_win_iso_name: virtio-win.iso +virtio_win_ovirt: false +virtio_win_facts: "{{ ansible_env.SystemDrive }}\\{{ source_of_supply_name | default('Support') }}\\facts.d" + +virtio_driver_directory: >- + {% if 'Windows Server 2019' in ansible_distribution -%} + {% set virt_dir = '2k19' %} + {% elif 'Windows Server 2016' in ansible_distribution -%} + {% set virt_dir = '2k16' %} + {% elif 'Windows Server 2012 R2' in ansible_distribution -%} + {% set virt_dir = '2k12R2' %} + {% elif 'Windows Server 2008 R2' in ansible_distribution -%} + {% set virt_dir = '2k8R2' %} + {% elif 'Windows 10' in ansible_distribution -%} + {% set virt_dir = 'w10' %} + {% else -%} + {% set virt_dir = 'w10' %} + {%- endif %}{{ virt_dir }} diff --git a/roles/oatakan.windows_virtio/handlers/main.yml b/roles/oatakan.windows_virtio/handlers/main.yml new file mode 100644 index 0000000..d1c8713 --- /dev/null +++ b/roles/oatakan.windows_virtio/handlers/main.yml @@ -0,0 +1,18 @@ +--- + +- name: Unmount + win_disk_image: + image_path: "{{ ansible_env.TEMP }}\\{{ virtio_win_iso_name }}" + state: absent + when: win_disk_image.mount_path is defined + +- name: Delete downloaded + win_file: + path: "{{ item }}" + state: absent + when: virtio_iso_mount_drive | length == 0 + with_items: + - "{{ ansible_env.TEMP }}\\redhat_balloon.cer" + - "{{ ansible_env.TEMP }}\\redhat_qxldod.cer" + - "{{ ansible_env.TEMP }}\\{{ virtio_win_iso_name }}" + - "{{ ansible_env.TEMP }}\\virtio_iso_extract" \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/meta/.galaxy_install_info b/roles/oatakan.windows_virtio/meta/.galaxy_install_info new file mode 100644 index 0000000..a867d99 --- /dev/null +++ b/roles/oatakan.windows_virtio/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Wed Jun 24 18:44:36 2020 +version: master diff --git a/roles/oatakan.windows_virtio/meta/main.yml b/roles/oatakan.windows_virtio/meta/main.yml new file mode 100644 index 0000000..0132af4 --- /dev/null +++ b/roles/oatakan.windows_virtio/meta/main.yml @@ -0,0 +1,29 @@ +--- +galaxy_info: + author: Orcun Atakan + description: Ansible galaxy role for installing virtio drivers on Windows images. + role_name: windows_virtio + company: Red Hat + + license: MIT + + min_ansible_version: 2.5 + + platforms: + - name: Windows + versions: + - all + + cloud_platforms: + - ovirt + + galaxy_tags: + - windows + - ovirt + - rhev + - rhv + - kvm + - cloud + - multicloud + +dependencies: [] diff --git a/roles/oatakan.windows_virtio/tasks/download.yml b/roles/oatakan.windows_virtio/tasks/download.yml new file mode 100644 index 0000000..a9a0187 --- /dev/null +++ b/roles/oatakan.windows_virtio/tasks/download.yml @@ -0,0 +1,23 @@ +--- + +- name: Download {{ virtio_win_iso_name }} + win_get_url: + url: '{{ virtio_win_iso_url }}' + force: false + dest: '{{ ansible_env.TEMP }}\{{ virtio_win_iso_name }}' + notify: + - Delete downloaded + +- name: Mount {{ virtio_win_iso_name }} + win_disk_image: + image_path: '{{ ansible_env.TEMP }}\{{ virtio_win_iso_name }}' + register: win_disk_image + until: win_disk_image is success + delay: 3 + retries: 5 + notify: + - Unmount + when: ('Windows Server 2008' not in ansible_distribution) + +- include_tasks: extract_iso.yml + when: ('Windows Server 2008' in ansible_distribution) \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/tasks/extract_iso.yml b/roles/oatakan.windows_virtio/tasks/extract_iso.yml new file mode 100644 index 0000000..5a6822b --- /dev/null +++ b/roles/oatakan.windows_virtio/tasks/extract_iso.yml @@ -0,0 +1,20 @@ +--- + +- name: Install 7-zip + win_chocolatey: + name: 7zip + state: present + +- name: Ensure temp directory exists for iso + win_file: + path: '{{ ansible_env.TEMP }}\virtio_iso_extract' + state: directory + +- name: Extract iso + win_shell: > + 7z.exe x -y '{{ ansible_env.TEMP }}\{{ virtio_win_iso_name }}' -o'{{ ansible_env.TEMP }}\virtio_iso_extract' + +- name: Remove 7-zip + win_chocolatey: + name: 7zip + state: absent \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/tasks/install.yml b/roles/oatakan.windows_virtio/tasks/install.yml new file mode 100644 index 0000000..273ef7f --- /dev/null +++ b/roles/oatakan.windows_virtio/tasks/install.yml @@ -0,0 +1,33 @@ +--- + +- name: Set the virtio_win_iso_path and virtio_win_virtio_path + set_fact: + virtio_win_iso_path: '{{ win_disk_image.mount_path | default(virtio_iso_mount_drive) }}' + virtio_win_virtio_path: "{{ (win_disk_image.mount_path | default(virtio_iso_mount_drive)) + '\\virtio' if virtio_win_ovirt else (win_disk_image.mount_path | default(virtio_iso_mount_drive)) }}" + virtio_win_iso_name: "{{ virtio_win_iso_name }}" + when: + - virtio_iso_mount_drive | length > 0 or ('Windows Server 2008' not in ansible_distribution) + +- name: Set the virtio_win_iso_path and virtio_win_virtio_path + set_fact: + virtio_win_iso_path: '{{ ansible_env.TEMP }}\virtio_iso_extract' + virtio_win_virtio_path: "{{ ansible_env.TEMP + '\\virtio_iso_extract\\virtio' if virtio_win_ovirt else ansible_env.TEMP + '\\virtio_iso_extract' }}" + virtio_win_iso_name: "{{ virtio_win_iso_name }}" + when: + - virtio_iso_mount_drive | length == 0 + - ('Windows Server 2008' in ansible_distribution) + +- name: Get list of all drivers + win_command: driverquery /V + changed_when: false + register: driver_list + +- name: Check if Red Hat certificate is not already installed + win_shell: 'Get-ChildItem -Path Cert:\LocalMachine\TrustedPublisher' + changed_when: false + register: cert_check + +- include_tasks: install_cert.yml + when: cert_check.stdout is not search("Red Hat") + +- include_tasks: install_drivers.yml \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/tasks/install_cert.yml b/roles/oatakan.windows_virtio/tasks/install_cert.yml new file mode 100644 index 0000000..d6fdde1 --- /dev/null +++ b/roles/oatakan.windows_virtio/tasks/install_cert.yml @@ -0,0 +1,22 @@ +--- + +- name: Export Cert from qxldod + win_shell: '$cert = (Get-AuthenticodeSignature "{{ virtio_win_virtio_path }}\qxldod\{{ virtio_driver_directory }}\amd64\qxldod.cat").SignerCertificate; [System.IO.File]::WriteAllBytes("{{ ansible_env.TEMP }}\redhat_qxldod.cer", $cert.Export([System.Security.Cryptography.X509Certificates.X509ContentType]::Cert));' + when: virtio_driver_directory != '2k8R2' + +- name: Export Cert from qxl + win_shell: '$cert = (Get-AuthenticodeSignature "{{ virtio_win_virtio_path }}\qxl\{{ virtio_driver_directory }}\amd64\qxl.cat").SignerCertificate; [System.IO.File]::WriteAllBytes("{{ ansible_env.TEMP }}\redhat_qxldod.cer", $cert.Export([System.Security.Cryptography.X509Certificates.X509ContentType]::Cert));' + when: virtio_driver_directory == '2k8R2' + +- name: Export Cert from balloon + win_shell: '$cert = (Get-AuthenticodeSignature "{{ virtio_win_virtio_path }}\Balloon\{{ virtio_driver_directory }}\amd64\blnsvr.exe").SignerCertificate; [System.IO.File]::WriteAllBytes("{{ ansible_env.TEMP }}\redhat_balloon.cer", $cert.Export([System.Security.Cryptography.X509Certificates.X509ContentType]::Cert));' + +- name: Install RH certificate (qxldod) to TrustedPublisher certificate store + win_command: 'certutil.exe -f -addstore "TrustedPublisher" {{ ansible_env.TEMP }}\redhat_qxldod.cer' + notify: + - Delete downloaded + +- name: Install RH certificate (Balloon) to TrustedPublisher certificate store + win_command: 'certutil.exe -f -addstore "TrustedPublisher" {{ ansible_env.TEMP }}\redhat_balloon.cer' + notify: + - Delete downloaded \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/tasks/install_drivers.yml b/roles/oatakan.windows_virtio/tasks/install_drivers.yml new file mode 100644 index 0000000..d5894ee --- /dev/null +++ b/roles/oatakan.windows_virtio/tasks/install_drivers.yml @@ -0,0 +1,45 @@ +--- + +- name: Install the Virtio Network Driver (netkvm) + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\NetKVM\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: driver_list.stdout is not search("netkvm") + +- name: Install the Virtio Block Driver (viostor) + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\viostor\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: driver_list.stdout is not search("viostor") + +- name: Install the QXL Graphics Driver (qxldod) + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\qxldod\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: + - driver_list.stdout is not search("qxldod") + - virtio_driver_directory != '2k8R2' + +- name: Install the QXL Graphics Driver (qxl) + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\qxl\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: + - driver_list.stdout is not search("qxl") + - virtio_driver_directory == '2k8R2' + +- name: Install the Balloon Driver (Balloon) + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\Balloon\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: driver_list.stdout is not search("balloon") + +- name: Install Virtio RNG driver (viorng) + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\viorng\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: driver_list.stdout is not search("viorng") + +- name: Install Virtio serial driver (vioserial) + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\vioserial\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: driver_list.stdout is not search("vioser") + +- name: Install Virtio Input driver (vioinput) + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\vioinput\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: driver_list.stdout is not search("vioinput") + +- name: Install Virtio SCSI Passthrough driver (vioscsi) + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\vioscsi\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: driver_list.stdout is not search("vioscsi") + +- name: Install pvpanic device driver (pvpanic) + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\pvpanic\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: driver_list.stdout is not search("pvpanic") \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/tasks/main.yml b/roles/oatakan.windows_virtio/tasks/main.yml new file mode 100644 index 0000000..4045c9e --- /dev/null +++ b/roles/oatakan.windows_virtio/tasks/main.yml @@ -0,0 +1,25 @@ +--- + +- name: check new version of virtio + uri: + url: "{{ virtio_changelog_url }}" + return_content: true + validate_certs: no + register: register_virtio_changelog + ignore_errors: yes + delegate_to: localhost + become: no + vars: + ansible_connection: local + +- name: set virtio version facts + set_fact: + virtio_version: "{{ register_virtio_changelog.content | regex_search(virtio_changelog_query) | default('0.0') }}" + +- include_tasks: download.yml + when: + - virtio_iso_mount_drive | length == 0 + - (ansible_virtio_version | default()) != virtio_version + +- include_tasks: install.yml + when: virtio_iso_mount_drive | length > 0 or (ansible_virtio_version | default()) != virtio_version \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/tests/inventory b/roles/oatakan.windows_virtio/tests/inventory new file mode 100644 index 0000000..d18580b --- /dev/null +++ b/roles/oatakan.windows_virtio/tests/inventory @@ -0,0 +1 @@ +localhost \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/tests/test.yml b/roles/oatakan.windows_virtio/tests/test.yml new file mode 100644 index 0000000..5fcaf5a --- /dev/null +++ b/roles/oatakan.windows_virtio/tests/test.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + remote_user: Administrator + vars: + ansible_port: 5986 + ansible_connection: winrm + ansible_winrm_transport: credssp + ansible_winrm_server_cert_validation: ignore + roles: + - oatakan.windows_virtio \ No newline at end of file diff --git a/roles/requirements.yml b/roles/requirements.yml index 322dcb8..bb13029 100644 --- a/roles/requirements.yml +++ b/roles/requirements.yml @@ -1,19 +1,19 @@ # Java -- name: geerlingguy.java +#- name: geerlingguy.java # Node.js (Using this repo temporarily, as it fixes a package naming bug (See #95)) # - src: https://github.com/halkeye/ansible-role-nodejs # version: halkeye-patch-1 # Gitlab -- name: geerlingguy.gitlab -# Windows Ovirt Template -- name: oatakan.windows_ovirt_template -- name: oatakan.windows_template_build -- name: oatakan.windows_ovirt_guest_agent -- name: oatakan.windows_virtio -- name: ikke_t.podman_container_systemd -- name: ikke_t.container_image_cleanup +#- name: geerlingguy.gitlab +## Windows Ovirt Template +#- name: oatakan.windows_ovirt_template +#- name: oatakan.windows_template_build +#- name: oatakan.windows_ovirt_guest_agent +#- name: oatakan.windows_virtio +#- name: ikke_t.podman_container_systemd +#- name: ikke_t.container_image_cleanup # Infra -- name: bertvv.bind -- name: bertvv.dhcp -- name: linux-system-roles.network +#- name: bertvv.bind +#- name: bertvv.dhcp +#- name: linux-system-roles.network diff --git a/roles/sage905.dhcp/.gitignore b/roles/sage905.dhcp/.gitignore new file mode 100644 index 0000000..0fb91c3 --- /dev/null +++ b/roles/sage905.dhcp/.gitignore @@ -0,0 +1,13 @@ +# .gitignore + +# Hidden Vagrant-directory +.vagrant + +# Backup files (e.g. Vim, Gedit, etc.) +*~ + +# Vagrant base boxes (you never know when someone puts one in the repository) +*.box + +# Ignore test code (it's a separate branch worktree) +*tests/ diff --git a/roles/sage905.dhcp/.yamllint b/roles/sage905.dhcp/.yamllint new file mode 100644 index 0000000..d3f556e --- /dev/null +++ b/roles/sage905.dhcp/.yamllint @@ -0,0 +1,21 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: {max-spaces-inside: 1, level: error} + brackets: {max-spaces-inside: 1, level: error} + colons: {max-spaces-after: -1, level: error} + commas: {max-spaces-after: -1, level: error} + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: {max: 3, level: error} + hyphens: {level: error} + indentation: disable + key-duplicates: enable + line-length: disable + new-line-at-end-of-file: disable + new-lines: {type: unix} + trailing-spaces: disable + truthy: disable \ No newline at end of file diff --git a/roles/sage905.dhcp/README.md b/roles/sage905.dhcp/README.md new file mode 100644 index 0000000..b6cde07 --- /dev/null +++ b/roles/sage905.dhcp/README.md @@ -0,0 +1,233 @@ +# Ansible role `dhcp` + +Ansible role for setting up ISC DHCPD. The responsibilities of this role are to install packages and manage the configuration ([dhcpd.conf(5)](http://linux.die.net/man/5/dhcpd.conf)). Managing the firewall configuration is NOT a concern of this role. You can do this in your local playbook, or use another role (e.g. [bertvv.rh-base](https://galaxy.ansible.com/bertvv/rh-base). + +Refer to the [change log](CHANGELOG.md) for notable changes in each release. + +Do you use/like this role? Please consider giving it a star. If you [rate this role](https://galaxy.ansible.com/bertvv/dhcp) on Ansible Galaxy and find it lacking in some respect, please consider opening an Issue with actionable feedback or a PR so we can improve it. Thank you! + +## Requirements + +No specific requirements + +## Role Variables + +This role is able to set global options, and to specify subnet declarations. + +See the [test playbook](https://github.com/bertvv/ansible-role-dhcp/blob/vagrant-tests/test.yml) for a working example of a DHCP server in a test environment based on Vagrant and VirtualBox. This section is a reference of all supported options. + +### Global options + +The following variables, when set, will be added to the global section of the DHCP configuration file. If there is no default value specified, the corresponding setting will be left out of `dhcpd.conf(5)`. + +See the [dhcp-options(5)](http://linux.die.net/man/5/dhcp-options) man page for more information about these options. + +| Variable | Comments | +| :--- | :--- | +| `dhcp_global_authoritative` | Global authoritative statement (`authoritative`, `not authoritative`) | +| `dhcp_global_booting` | Global booting (`allow`, `deny`, `ignore`) | +| `dhcp_global_bootp` | Global bootp (`allow`, `deny`, `ignore`) | +| `dhcp_global_broadcast_address` | Global broadcast address | +| `dhcp_global_classes` | Class definitions with a match statement(1) | +| `dhcp_global_default_lease_time` | Default lease time in seconds | +| `dhcp_global_domain_name_servers` | A list of IP addresses of DNS servers(2) | +| `dhcp_global_domain_name` | The domain name the client should use when resolving host names | +| `dhcp_global_domain_search` | A list of domain names to be used by the client to locate non-FQDNs(1) | +| `dhcp_global_failover` | Failover peer settings (3) | +| `dhcp_global_failover_peer` | Name for the failover peer (e.g. `foo`) | +| `dhcp_global_filename` | Filename to request for boot | +| `dhcp_global_includes_missing` | Boolean. Continue if `includes` file(s) missing from role's files/ | +| `dhcp_global_includes` | List of config files to be included (from `dhcp_config_dir`) | +| `dhcp_global_log_facility` | Global log facility (e.g. `daemon`, `syslog`, `user`, ...) | +| `dhcp_global_max_lease_time` | Maximum lease time in seconds | +| `dhcp_global_next_server` | IP for PXEboot server | +| `dhcp_global_ntp_servers` | List of IP addresses of NTP servers | +| `dhcp_global_omapi_port` | OMAPI port | +| `dhcp_global_omapi_secret` | OMAPI secret | +| `dhcp_global_other_options` | Array of arbitrary additional global options | +| `dhcp_global_routers` | IP address of the router | +| `dhcp_global_server_name` | Server name sent to the client | +| `dhcp_global_server_state` | Service state (started, stopped) | +| `dhcp_global_subnet_mask` | Global subnet mask | + +**Remarks** + +(1) This role supports the definition of classes with a match statement, e.g.: + +```Yaml +# Class for VirtualBox VMs +dhcp_global_classes: + - name: vbox + match: 'match if binary-to-ascii(16,8,":",substring(hardware, 1, 3)) = "8:0:27"' +``` + +Class names can be used in the definition of address pools (see below). + +(2) The role variable `dhcp_global_domain_name_servers` may be written either as a list (when you have more than one item), or as a string (when you have only one). The following snippet shows an example of both: + +```Yaml +# A single DNS server +dhcp_global_domain_name_servers: 8.8.8.8 + +# A list of DNS servers +dhcp_global_domain_name_servers: + - 8.8.8.8 + - 8.8.4.4 +``` + +(3) This role also supports the definition of a failover peer, e.g.: + +```Yaml +# Failover peer definition +dhcp_global_failover_peer: failover-group +dhcp_global_failover: + role: primary # | secondary + address: 192.168.222.2 + port: 647 + peer_address: 192.168.222.3 + peer_port: 647 + max_response_delay: 15 + max_unacked_updates: 10 + load_balance_max_seconds: 5 + split: 255 + mclt: 3600 +``` + +The variable `dhcp_global_failover_peer` contains a name for the configured peer, to be used on a per pool basis. The failover declaration options are specified with the variable `dhcp_global_failover`, a dictionary that may contain the following options: + +| Option | Required | Comment | +| :--- | :---: | :-- | +| `address` | no | This server's IP address | +| `hba` | no | colon-separated-hex-list | +| `load_balance_max_seconds` | no | Cutoff after which load balance is disabled (3 to 5 recommended) | +| `max-balance` | no | Failover pool balance statement | +| `max-lease-misbalance` | no | Failover pool balance statement | +| `max-lease-ownership` | no | Failover pool balance statement | +| `max_response_delay` | no | Maximum seconds without contact before engaging failover | +| `max_unacked_updates` | no | Maximum BNDUPD it can send before receiving a BNDACK (10 recommended) | +| `mclt` | no | Maximum Client Lead Time | +| `min-balance` | no | Failover pool balance statement | +| `peer_address` | no | Failover peer's IP addres | +| `peer_port` | no | This server's port (generally 519/520 or 647/847) | +| `port` | no | This server's port (generally 519/520 or 647/847) | +| `role` | no | primary, secondary | +| `split` | no | Load balance split (0-255) | + +The failover peer directive has to be in the definition of address pools (see below). + +### Subnet declarations + +The role variable `dhcp_subnets` contains a list of dicts, specifying the subnet declarations to be added to the DHCP configuration file. Every subnet declaration should have an `ip` and `netmask`, other options are not mandatory. We start this section with an example, a complete overview of supported options follows. + +```Yaml +dhcp_subnets: + - ip: 192.168.222.0 + netmask: 255.255.255.128 + domain_name_servers: + - 10.0.2.3 + - 10.0.2.4 + range_begin: 192.168.222.50 + range_end: 192.168.222.127 + - ip: 192.168.222.128 + default_lease_time: 3600 + max_lease_time: 7200 + netmask: 255.255.255.128 + domain_name_servers: 10.0.2.3 + routers: 192.168.222.129 +``` + +An alphabetical list of supported options in a subnet declaration: + +| Option | Required | Comment | +| :--- | :---: | :-- | +| `booting` | no | allow,deny,ignore | +| `bootp` | no | allow,deny,ignore | +| `default_lease_time` | no | Default lease time for this subnet (in seconds) | +| `domain_name_servers` | no | List of domain name servers for this subnet(1) | +| `domain_search` | no | List of domain names for resolution of non-FQDNs(1) | +| `filename` | no | filename to retrieve from boot server | +| `hosts` | no | List of fixed IP address hosts for each subnet, similar to dhcp_hosts | +| `ip` | yes | **Required.** IP address of the subnet | +| `max_lease_time` | no | Maximum lease time for this subnet (in seconds) | +| `netmask` | yes | **Required.** Network mask of the subnet (in dotted decimal notation) | +| `next_server` | no | IP address of the boot server | +| `range_begin` | no | Lowest address in the range of dynamic IP addresses to be assigned | +| `range_end` | no | Highest address in the range of dynamic IP addresses to be assigned | +| `ranges` | no | If multiple ranges are needed, they can be specified as a list (2) | +| `routers` | no | IP address of the gateway for this subnet | +| `server_name` | no | Server name sent to the client | +| `subnet_mask` | no | Overrides the `netmask` of the subnet declaration | + +You can specify address pools within a subnet by setting the `pools` options. This allows you to specify a pool of addresses that will be treated differently than another pool of addresses, even on the same network segment or subnet. It is a list of dicts with the following keys, all of which are optional: + +| Option | Comment | +| :--- | :--- | +| `allow` | Specifies which hosts are allowed in this pool(1) | +| `default_lease_time` | The default lease time for this pool | +| `deny` | Specifies which hosts are not allowed in this pool | +| `domain_name_servers` | The domain name servers to be used for this pool(1) | +| `max_lease_time` | The maximum lease time for this pool | +| `min_lease_time` | The minimum lease time for this pool | +| `range_begin` | The lowest address in this pool | +| `range_end` | The highest address in this pool | + +(1) For the `allow` and `deny` fields, the options are enumerated in [dhcpd.conf(5)](http://linux.die.net/man/5/dhcpd.conf), but include: + +- `booting` +- `bootp` +- `client-updates` +- `known-clients` +- `members of "CLASS"` +- `unknown-clients` + +(2) For multiple subnet ranges, they can be specified, thus: + +```Yaml +ranges: + - { begin: 192.168.222.50, end: 192.168.222.99 } + - { begin: 192.168.222.110, end: 192.168.222.127 } +``` + +### Host declarations + +You can specify hosts that should get a fixed IP address based on their MAC by setting the `dhcp_hosts` option. This is a list of dicts with the following three keys, of which `name` and `mac` are mandatory: + +| Option | Comment | +| :--- | :--- | +| `name` | The name of the host | +| `mac` | The MAC address of the host | +| `ip` | The IP address to be assigned to the host | + +```Yaml +dhcp_hosts: + - name: cl1 + mac: '00:11:22:33:44:55' + ip: 192.168.222.150 + - name: cl2 + mac: '00:de:ad:be:ef:00' + ip: 192.168.222.151 +``` + +### Specify PXEBoot server + +Setting the variable `dhcp_pxeboot_server`, will redirect PXE clients to the specified PXEBoot server in order to boot over the network. The specified server should have boot images on the expected locations. Use e.g. [bertvv.pxeserver](https://galaxy.ansible.com/bertvv/pxeserver) to configure it. + +## Dependencies + +No dependencies. + +## Example Playbook + +See the [test playbook](https://github.com/bertvv/ansible-role-dhcp/blob/vagrant-tests/test.yml) + +## Testing + +Tests for this role are provided in the form of a Vagrant environment that is kept in a separate branch, `vagrant-tests`. For more information about setting up the test environment and running the tests, refer to the [README](https://github.com/bertvv/ansible-role-dhcp/blob/vagrant-tests/README.md) of the test branch. + +## License + +BSD + +## Contributing + +Issues, feature requests, ideas are appreciated and can be posted in the Issues section. Pull requests are also very welcome. Preferably, create a topic branch and when submitting, squash your commits into one (with a descriptive message). diff --git a/roles/sage905.dhcp/defaults/main.yml b/roles/sage905.dhcp/defaults/main.yml new file mode 100644 index 0000000..eb07550 --- /dev/null +++ b/roles/sage905.dhcp/defaults/main.yml @@ -0,0 +1,5 @@ +# roles/dhcp/defaults/main.yml +--- +dhcp_global_includes_missing: false +dhcp_packages_state: "present" +dhcp_subnets: [] diff --git a/roles/sage905.dhcp/handlers/main.yml b/roles/sage905.dhcp/handlers/main.yml new file mode 100644 index 0000000..6938353 --- /dev/null +++ b/roles/sage905.dhcp/handlers/main.yml @@ -0,0 +1,7 @@ +# roles/dhcp/handlers/main.yml +--- + +- name: restart dhcp + service: + name: "{{ dhcp_service }}" + state: "{{ (dhcp_global_server_state | default('started') == 'started') | ternary('restarted', 'stopped') }}" diff --git a/roles/sage905.dhcp/meta/.galaxy_install_info b/roles/sage905.dhcp/meta/.galaxy_install_info new file mode 100644 index 0000000..3676d72 --- /dev/null +++ b/roles/sage905.dhcp/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Sun Jun 28 14:49:12 2020 +version: v3.0.1 diff --git a/roles/sage905.dhcp/meta/main.yml b/roles/sage905.dhcp/meta/main.yml new file mode 100644 index 0000000..c204e8a --- /dev/null +++ b/roles/sage905.dhcp/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + author: + description: Ansible role for setting up ISC DHCPD. + license: BSD + min_ansible_version: 2.9 + platforms: + - name: EL + versions: + - 7 + - name: Fedora + versions: + - 29 + galaxy_tags: + - system + - networking +dependencies: [] diff --git a/roles/sage905.dhcp/tasks/main.yml b/roles/sage905.dhcp/tasks/main.yml new file mode 100644 index 0000000..2aadce5 --- /dev/null +++ b/roles/sage905.dhcp/tasks/main.yml @@ -0,0 +1,50 @@ +# roles/dhcp/tasks/main.yml +--- + +- name: Load distro-specific variables + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution }}.yml" + - "{{ ansible_os_family }}.yml" + - "{{ default }}.yml" + tags: dhcp + +- name: Install packages + package: + name: "{{ dhcp_packages }}" + state: "{{ dhcp_packages_state }}" + tags: dhcp + +- name: Install includes + copy: + src: "{{ item }}" + dest: "{{ dhcp_config_dir }}/{{ item | basename }}" + with_items: "{{ dhcp_global_includes }}" + when: dhcp_global_includes is defined + ignore_errors: "{{ dhcp_global_includes_missing }}" + tags: dhcp + +- name: Set config directory perms + file: + path: "{{ dhcp_config | dirname }}" + state: directory + mode: 0755 + tags: dhcp + +- name: Install config file + template: + src: etc_dhcp_dhcpd.conf.j2 + dest: "{{ dhcp_config }}" + owner: root + group: root + mode: 0644 + validate: 'dhcpd -t -cf %s' + notify: restart dhcp + tags: dhcp + +- name: "Ensure service is {{ dhcp_global_server_state | default('started') }}" + service: + name: "{{ dhcp_service }}" + state: "{{ dhcp_global_server_state | default('started') }}" + enabled: true + tags: dhcp diff --git a/roles/sage905.dhcp/templates/etc_dhcp_dhcpd.conf.j2 b/roles/sage905.dhcp/templates/etc_dhcp_dhcpd.conf.j2 new file mode 100644 index 0000000..c0760ad --- /dev/null +++ b/roles/sage905.dhcp/templates/etc_dhcp_dhcpd.conf.j2 @@ -0,0 +1,296 @@ +# ISC DHCPD configuration -- don't edit manually! +# +# {{ ansible_managed }} + +# +# Global options +# +{% if dhcp_global_omapi_port is defined %} +omapi-port {{ dhcp_global_omapi_port }}; +{% endif %} +{% if dhcp_global_omapi_secret is defined %} +key omapi_key { + algorithm HMAC-MD5; + secret "{{ dhcp_global_omapi_secret }}"; +}; +{% endif %} +{% if dhcp_global_authoritative is defined %} +{{ dhcp_global_authoritative }}; +{% endif %} +{% if dhcp_global_log_facility is defined %} +log-facility {{ dhcp_global_log_facility }}; +{% endif %} +{% if dhcp_global_bootp is defined %} +{{ dhcp_global_bootp }} bootp; +{% endif %} +{% if dhcp_global_booting is defined %} +{{ dhcp_global_booting }} booting; +{% endif %} +{% if dhcp_global_next_server is defined %} +next-server {{ dhcp_global_next_server}}; +{% endif %} +{% if dhcp_global_filename is defined %} +filename "{{ dhcp_global_filename }}"; +{% endif %} +{% if dhcp_global_default_lease_time is defined %} +default-lease-time {{ dhcp_global_default_lease_time }}; +{% endif %} +{% if dhcp_global_max_lease_time is defined %} +max-lease-time {{ dhcp_global_max_lease_time }}; +{% endif %} +{% if dhcp_global_subnet_mask is defined %} +option subnet-mask {{ dhcp_global_subnet_mask }}; +{% endif %} +{% if dhcp_global_broadcast_address is defined %} +option broadcast-address {{ dhcp_global_broadcast_address }}; +{% endif %} +{% if dhcp_global_routers is defined %} +option routers {{ dhcp_global_routers }}; +{% endif %} +{% if dhcp_global_domain_name is defined %} +option domain-name "{{ dhcp_global_domain_name }}"; +{% endif %} +{% if dhcp_global_ntp_servers is defined %} +{% if dhcp_global_ntp_servers is string %} +option ntp-servers {{ dhcp_global_ntp_servers }}; +{% else %} +option ntp-servers {{ dhcp_global_ntp_servers|join(', ') }}; +{% endif %} +{% endif %} +{% if dhcp_global_domain_name_servers is defined %} +{% if dhcp_global_domain_name_servers is string %} +option domain-name-servers {{ dhcp_global_domain_name_servers }}; +{% else %} +option domain-name-servers {{ dhcp_global_domain_name_servers|join(', ') }}; +{% endif %} +{% endif %} +{% if dhcp_global_domain_search is defined %} +{% if dhcp_global_domain_search is string %} +option domain-search "{{ dhcp_global_domain_search }}"; +{% else %} +option domain-search "{{ dhcp_global_domain_search|join('", "') }}"; +{% endif %} +{% endif %} +{% if dhcp_global_server_name is defined %} +option server-name "{{ dhcp_global_server_name }}"; +{% endif %} +{% if dhcp_global_other_options is defined %} +{% for option in dhcp_global_other_options %} +option {{ option }}; +{% endfor %} +{% endif %} +{% if dhcp_global_failover_peer is defined %} + +# +# DHCP Failover config +# +# Notes: In the past couple years, TCP ports 647 (primary) and 847 (peer) have +# emerged as the standard bindings for DHCP dhcp_global_failover It is worth noting that as +# recently as 2005, the dhcpd.conf(5) man page used ports 519 and 520 in its +# failover example, but 647 and 847 look like good choices as of 2008. However, +# the dhcpd.conf(5) man page says that the primary port and the peer port may be +# the same number. + +failover peer "{{ dhcp_global_failover_peer }}" { +{% if dhcp_global_failover.role is defined %} + # [ primary | secondary ]; + {{ dhcp_global_failover.role }}; +{% endif %} +{% if dhcp_global_failover.address is defined %} + address {{ dhcp_global_failover.address }}; +{% endif %} +{% if dhcp_global_failover.port is defined %} + port {{ dhcp_global_failover.port }}; +{% endif %} +{% if dhcp_global_failover.peer_address is defined %} + peer address {{ dhcp_global_failover.peer_address }}; +{% endif %} +{% if dhcp_global_failover.peer_port is defined %} + peer port {{ dhcp_global_failover.peer_port }}; +{% endif %} +{% if dhcp_global_failover.max_response_delay is defined %} + max-response-delay {{ dhcp_global_failover.max_response_delay }}; +{% endif %} +{% if dhcp_global_failover.max_unacked_updates is defined %} + max-unacked-updates {{ dhcp_global_failover.max_unacked_updates }}; +{% endif %} +{% if dhcp_global_failover.split is defined %} + split {{ dhcp_global_failover.split }}; +{% endif %} +{% if dhcp_global_failover.hba is defined %} + hba {{ dhcp_global_failover.hba }}; +{% endif %} +{% if dhcp_global_failover.mclt is defined %} + mclt {{ dhcp_global_failover.mclt }}; +{% endif %} +{% if dhcp_global_failover.load_balance_max_seconds is defined %} + load balance max seconds {{ dhcp_global_failover.load_balance_max_seconds }}; +{% endif %} +{% if dhcp_global_failover.max_lease_misbalance is defined %} + max-lease-misbalance {{ dhcp_global_failover.max_lease_misbalance }}; +{% endif %} +{% if dhcp_global_failover.max_lease_ownership is defined %} + max-lease-ownership {{ dhcp_global_failover.max_lease_ownership }}; +{% endif %} +{% if dhcp_global_failover.min_balance is defined %} + min-balance {{ dhcp_global_failover.min_balance }}; +{% endif %} +{% if dhcp_global_failover.max_balance is defined %} + max-balance {{ dhcp_global_failover.max_balance }}; +{% endif %} +} +{% endif %} +{% if dhcp_global_includes is defined %} +# +# Includes +# +{% for include in dhcp_global_includes %} +include "{{ dhcp_config_dir }}/{{ include | basename }}"; +{% endfor %} +{% endif %} + +{% if dhcp_global_classes is defined %} +# +# Classes +# +{% for class in dhcp_global_classes %} +class "{{ class.name }}" { +{% if class.match is defined %} + {{ class.match }}; +{% endif %} +} +{% endfor %} +{% endif %} +# +# Subnet declarations +# +{% for subnet in dhcp_subnets %} +subnet {{ subnet.ip }} netmask {{ subnet.netmask }} { +{% if subnet.default_lease_time is defined %} + default-lease-time {{ subnet.default_lease_time }}; +{% endif %} +{% if subnet.max_lease_time is defined %} + max-lease-time {{ subnet.max_lease_time }}; +{% endif %} +{% if subnet.routers is defined %} + option routers {{ subnet.routers }}; +{% endif %} +{% if subnet.subnet_mask is defined %} + option subnet-mask {{ subnet.subnet_mask }}; +{% endif %} +{% if subnet.domain_search is defined %} +{% if subnet.domain_search is string %} + option domain-search "{{ subnet.domain_search }}"; +{% else %} + option domain-search "{{ subnet.domain_search|join('", "') }}"; +{% endif %} +{% endif %} +{% if subnet.domain_name_servers is defined %} +{% if subnet.domain_name_servers is string %} + option domain-name-servers {{ subnet.domain_name_servers }}; +{% else %} + option domain-name-servers {{ subnet.domain_name_servers|join(', ') }}; +{% endif %} +{% endif %} +{% if subnet.range_begin is defined and subnet.range_end is defined %} + range {{ subnet.range_begin }} {{ subnet.range_end }}; +{% endif %} +{% if subnet.ranges is defined %} +{% for range in subnet.ranges %} + range {{ range.begin }} {{ range.end }}; +{% endfor %} +{% endif %} +{% if subnet.server_name is defined %} + server-name {{ subnet.server_name }}; +{% endif %} +{% if subnet.next_server is defined %} + next-server {{ subnet.next_server }}; +{% endif %} +{% if subnet.filename is defined %} + filename "{{ subnet.filename }}"; +{% endif %} +{% if subnet.bootp is defined %} +{{ subnet.bootp }} bootp; +{% endif %} +{% if subnet.booting is defined %} +{{ subnet.booting }} booting; +{% endif %} +{% if subnet.hosts is defined %} +{% for host in subnet.hosts %} + host {{ host.name }} { + hardware ethernet {{ host.mac }}; + fixed-address {{ host.ip }}; + } +{% endfor %} +{% endif %} +{% if subnet.pools is defined %} + # Address pool(s) +{% for pool in subnet.pools %} + pool { +{% if pool.failover_peer is defined %} +# This pool has failover, see above for server details +failover peer "{{ pool.failover_peer }}"; +{% endif %} +{% if pool.domain_name_servers is defined %} +{% if pool.domain_name_servers is string %} + option domain-name-servers {{ pool.domain_name_servers }}; +{% else %} + option domain-name-servers {{ pool.domain_name_servers|join(', ') }}; +{% endif %} +{% endif %} +{% if pool.default_lease_time is defined %} + default-lease-time {{ pool.default_lease_time }}; +{% endif %} +{% if pool.min_lease_time is defined %} + min-lease-time {{ pool.min_lease_time }}; +{% endif %} +{% if pool.max_lease_time is defined %} + max-lease-time {{ pool.max_lease_time }}; +{% endif %} +{% if pool.range_begin is defined and pool.range_end is defined %} + range {{ pool.range_begin }} {{ pool.range_end }}; +{% endif %} +{% if pool.allow is defined %} + allow {{ pool.allow }}; +{% endif %} +{% if pool.deny is defined %} + deny {{ pool.deny }}; +{% endif %} + } +{% endfor %} +{% endif %} +} +{% endfor %} +{% if dhcp_hosts is defined %} + +# +# Host declarations +# +{% for host in dhcp_hosts %} +host {{ host.name | replace (" ","_") | replace ("'","_") | replace (":","_") }} { + hardware ethernet {{ host.mac }}; +{% if host.ip is defined %} + fixed-address {{ host.ip }}; +{% endif %} +} +{% endfor %} +{% endif %} +{% if dhcp_pxeboot_server is defined %} + +# +# PXEBoot server settings +# +option arch code 93 = unsigned integer 16; # RFC4578 + +class "pxeclients" { + match if substring (option vendor-class-identifier, 0, 9) = "PXEClient"; + next-server {{ dhcp_pxeboot_server }}; + + if option arch = 00:07 { + filename "pxelinux/bootx64.efi"; + } else { + filename "pxelinux/pxelinux.0"; + } +} + +{% endif %} diff --git a/roles/sage905.dhcp/vars/RedHat.yml b/roles/sage905.dhcp/vars/RedHat.yml new file mode 100644 index 0000000..0a4a8ae --- /dev/null +++ b/roles/sage905.dhcp/vars/RedHat.yml @@ -0,0 +1,11 @@ +# roles/dhcp/vars/RedHat.yml +--- + +dhcp_packages: + - dhcp + +dhcp_config_dir: /etc/dhcp + +dhcp_config: /etc/dhcp/dhcpd.conf + +dhcp_service: dhcpd diff --git a/roles/sage905.mineos/molecule/default/INSTALL.rst b/roles/sage905.mineos/molecule/default/INSTALL.rst new file mode 100644 index 0000000..d926ca2 --- /dev/null +++ b/roles/sage905.mineos/molecule/default/INSTALL.rst @@ -0,0 +1,22 @@ +******* +Docker driver installation guide +******* + +Requirements +============ + +* Docker Engine + +Install +======= + +Please refer to the `Virtual environment`_ documentation for installation best +practices. If not using a virtual environment, please consider passing the +widely recommended `'--user' flag`_ when invoking ``pip``. + +.. _Virtual environment: https://virtualenv.pypa.io/en/latest/ +.. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site + +.. code-block:: bash + + $ python3 -m pip install 'molecule[docker]' diff --git a/roles/sage905.mineos/molecule/default/converge.yml b/roles/sage905.mineos/molecule/default/converge.yml new file mode 100644 index 0000000..2765193 --- /dev/null +++ b/roles/sage905.mineos/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: Converge + hosts: all + tasks: + - name: "Include sage905.mineos" + include_role: + name: "sage905.mineos" diff --git a/roles/sage905.mineos/molecule/default/molecule.yml b/roles/sage905.mineos/molecule/default/molecule.yml new file mode 100644 index 0000000..0778b64 --- /dev/null +++ b/roles/sage905.mineos/molecule/default/molecule.yml @@ -0,0 +1,13 @@ +--- +dependency: + name: galaxy +driver: + name: podman +platforms: + - name: instance + image: docker.io/pycontribs/centos:7 + pre_build_image: true +provisioner: + name: ansible +verifier: + name: ansible diff --git a/roles/sage905.mineos/molecule/default/verify.yml b/roles/sage905.mineos/molecule/default/verify.yml new file mode 100644 index 0000000..a82dd6f --- /dev/null +++ b/roles/sage905.mineos/molecule/default/verify.yml @@ -0,0 +1,9 @@ +--- +# This is an example playbook to execute Ansible tests. + +- name: Verify + hosts: all + tasks: + - name: Example assertion + assert: + that: true