Compare commits
33 Commits
rhc
...
creds-exis
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
170c24af24 | ||
|
|
c6c3231234 | ||
|
|
f554bc0ee1 | ||
|
|
88b171bb48 | ||
|
|
16553210bd | ||
|
|
4f0df3c8db | ||
|
|
e990f39c60 | ||
|
|
9cd49892c6 | ||
|
|
3468d1c443 | ||
|
|
10f0bb4641 | ||
|
|
018c006e3b | ||
|
|
1af584b4ea | ||
|
|
d60e0c7ca6 | ||
|
|
c198780d72 | ||
|
|
1832bb6199 | ||
|
|
2447d0d511 | ||
|
|
c0cd993c69 | ||
|
|
d5093fa544 | ||
|
|
dd1de852b6 | ||
|
|
e958164cb6 | ||
|
|
98416fcc3c | ||
|
|
5f8bd8929e | ||
|
|
2ee334f6b3 | ||
|
|
d7e9ad637b | ||
|
|
a5aa9564f5 | ||
|
|
44585bf1b9 | ||
|
|
2cd3ec6f72 | ||
|
|
7e4399eac2 | ||
|
|
a78e74e782 | ||
|
|
ddb4c09157 | ||
|
|
f7f95f2593 | ||
|
|
81f35e8d67 | ||
|
|
c0d4493326 |
@@ -1,4 +1,12 @@
|
||||
---
|
||||
profile: production
|
||||
offline: false
|
||||
|
||||
skip_list:
|
||||
- "galaxy[no-changelog]"
|
||||
|
||||
exclude_paths:
|
||||
# would be better to move the roles here to the top-level roles directory
|
||||
- collections/ansible_collections/demo/compliance/roles/
|
||||
- roles/redhatofficial.*
|
||||
- .github/
|
||||
|
||||
13
.devfile.yaml
Normal file
13
.devfile.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
schemaVersion: 2.2.0
|
||||
metadata:
|
||||
name: product-demos
|
||||
components:
|
||||
- name: product-demos-ee
|
||||
container:
|
||||
image: quay.io/mloriedo/ansible-creator-ee:latest # workaround for https://github.com/eclipse/che/issues/21778
|
||||
memoryRequest: 256M
|
||||
memoryLimit: 5Gi
|
||||
cpuRequest: 250m
|
||||
cpuLimit: 2000m
|
||||
args: ['tail', '-f', '/dev/null']
|
||||
BIN
.github/images/project-architecture.png
vendored
Normal file
BIN
.github/images/project-architecture.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 111 KiB |
25
.github/workflows/ansible-lint.yml
vendored
25
.github/workflows/ansible-lint.yml
vendored
@@ -1,25 +0,0 @@
|
||||
---
|
||||
name: Ansible Lint
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
env:
|
||||
ANSIBLE_GALAXY_SERVER_LIST: ah,galaxy
|
||||
ANSIBLE_GALAXY_SERVER_AH_URL: https://console.redhat.com/api/automation-hub/
|
||||
ANSIBLE_GALAXY_SERVER_AH_AUTH_URL: https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token
|
||||
ANSIBLE_GALAXY_SERVER_AH_TOKEN: ${{ secrets.ANSIBLE_GALAXY_SERVER_AH_TOKEN }}
|
||||
ANSIBLE_GALAXY_SERVER_GALAXY_URL: https://galaxy.ansible.com/
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# Important: This sets up your GITHUB_WORKSPACE environment variable
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # needed for progressive mode to work
|
||||
|
||||
- name: Run ansible-lint
|
||||
uses: ansible/ansible-lint-action@v6.11.0
|
||||
50
.github/workflows/linter.yml.old
vendored
50
.github/workflows/linter.yml.old
vendored
@@ -1,50 +0,0 @@
|
||||
---
|
||||
###########################
|
||||
###########################
|
||||
## Linter GitHub Actions ##
|
||||
###########################
|
||||
###########################
|
||||
name: Lint Code Base
|
||||
|
||||
#
|
||||
# Documentation:
|
||||
# https://help.github.com/en/articles/workflow-syntax-for-github-actions
|
||||
#
|
||||
|
||||
#############################
|
||||
# Start the job on all push #
|
||||
#############################
|
||||
on: [push, pull_request]
|
||||
|
||||
###############
|
||||
# Set the Job #
|
||||
###############
|
||||
jobs:
|
||||
build:
|
||||
# Name the Job
|
||||
name: Lint Code Base
|
||||
# Set the agent to run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
##################
|
||||
# Load all steps #
|
||||
##################
|
||||
steps:
|
||||
##########################
|
||||
# Checkout the code base #
|
||||
##########################
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# Full git history is needed to get a proper list of changed files within `super-linter`
|
||||
fetch-depth: 0
|
||||
|
||||
################################
|
||||
# Run Linter against code base #
|
||||
################################
|
||||
- name: Lint Code Base
|
||||
uses: github/super-linter@v4
|
||||
env:
|
||||
VALIDATE_ALL_CODEBASE: false
|
||||
DEFAULT_BRANCH: main
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
17
.github/workflows/pre-commit.yml
vendored
Normal file
17
.github/workflows/pre-commit.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: pre-commit
|
||||
on:
|
||||
- push
|
||||
- pull_request_target
|
||||
|
||||
env:
|
||||
ANSIBLE_GALAXY_SERVER_AH_TOKEN: ${{ secrets.ANSIBLE_GALAXY_SERVER_AH_TOKEN }}
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
name: pre-commit
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v3
|
||||
- uses: pre-commit/action@v3.0.0
|
||||
41
.github/workflows/release.yml
vendored
Normal file
41
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
name: release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
|
||||
workflow_run:
|
||||
workflows: ["pre-commit"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release Job
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install go (required for Changelog parsing)
|
||||
uses: actions/setup-go@v4
|
||||
|
||||
- name: Parse CHANGELOG.md
|
||||
run: |
|
||||
GO111MODULE=on go install github.com/rcmachado/changelog@0.7.0
|
||||
changelog show "$GITHUB_REF_NAME" > ${{ github.workspace }}-CHANGELOG.txt
|
||||
echo "Release note for $GITHUB_REF_NAME :"
|
||||
cat ${{ github.workspace }}-CHANGELOG.txt
|
||||
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
body_path: ${{ github.workspace }}-CHANGELOG.txt
|
||||
files: |
|
||||
LICENSE
|
||||
CHANGELOG.md
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,4 +1,4 @@
|
||||
|
||||
ansible-navigator.log
|
||||
sean_login_info.yml
|
||||
.DS_Store
|
||||
choose_demo.yml
|
||||
@@ -6,4 +6,7 @@ choose_demo_example_azure.yml
|
||||
choose_demo_example_aws.yml
|
||||
.ansible.cfg
|
||||
*.gz
|
||||
|
||||
*artifact*.json
|
||||
**/roles/*
|
||||
!**/roles/requirements.yml
|
||||
.deployment_id
|
||||
|
||||
29
.pre-commit-config.yaml
Normal file
29
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
|
||||
- id: check-yaml
|
||||
exclude: \.j2.(yaml|yml)$|\.(yaml|yml).j2$
|
||||
args: [--unsafe] # see https://github.com/pre-commit/pre-commit-hooks/issues/273
|
||||
|
||||
- id: check-toml
|
||||
- id: check-json
|
||||
- id: check-symlinks
|
||||
|
||||
- repo: https://github.com/ansible/ansible-lint.git
|
||||
# get latest release tag from https://github.com/ansible/ansible-lint/releases/
|
||||
rev: v6.20.3
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
additional_dependencies:
|
||||
- jmespath
|
||||
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 23.11.0
|
||||
hooks:
|
||||
- id: black
|
||||
...
|
||||
7
.vscode/extensions.json
vendored
Normal file
7
.vscode/extensions.json
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"redhat.vscode-yaml",
|
||||
"redhat.ansible",
|
||||
"ms-python.black-formatter"
|
||||
]
|
||||
}
|
||||
3
.vscode/settings.json
vendored
Normal file
3
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"editor.renderWhitespace": "all"
|
||||
}
|
||||
19
.yamllint
Normal file
19
.yamllint
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
line-length: disable
|
||||
trailing-spaces: enable
|
||||
colons:
|
||||
max-spaces-before: 0
|
||||
max-spaces-after: -1
|
||||
indentation:
|
||||
level: error
|
||||
indent-sequences: true # consistent with ansible-lint
|
||||
truthy:
|
||||
level: error
|
||||
allowed-values:
|
||||
- 'true'
|
||||
- 'false'
|
||||
|
||||
...
|
||||
12
CHANGELOG.md
Normal file
12
CHANGELOG.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [v-0.0.1](https://github.com/ansible/product-demos/-/tree/v-0.0.1) - 2024-01-12
|
||||
|
||||
### Added
|
||||
|
||||
- Initial release ([1af584b4ea6d77812bfcb2f6474fee6ee1b13666](https://github.com/ansible/product-demos/-/commit/1af584b4ea6d77812bfcb2f6474fee6ee1b13666))
|
||||
109
CONTRIBUTING.md
109
CONTRIBUTING.md
@@ -1,12 +1,18 @@
|
||||
# Contribution Guidelines
|
||||
This document aims to outline the requirements for the various forms of contribution for this project.
|
||||
|
||||
**ALL** contributions are subject to review via pull request
|
||||
## Project Architecture
|
||||
|
||||

|
||||
|
||||
## Pull Requests
|
||||
|
||||
**ALL** contributions are subject to review via pull request
|
||||
|
||||
### Pull Requests
|
||||
1) Ensure the "base repository" is set to "ansible/product-demos".
|
||||
|
||||
### Pull Request Guidelines
|
||||
#### Pull Request Guidelines
|
||||
- PRs should include the playbook/demo and required entry in corresponding `<demo>/setup.yml`.
|
||||
- PRs should include documentation in corresponding `<demo>/README.md`.
|
||||
- PRs should be rebased against the `main` branch to avoid conflicts.
|
||||
@@ -19,13 +25,15 @@ This document aims to outline the requirements for the various forms of contribu
|
||||
3) Make any changes needed to match the existing standards in the directory.
|
||||
1) Ex: Parameterized hosts
|
||||
```ansible
|
||||
hosts: "{{ HOSTS | default('windows') }}"
|
||||
hosts: "{{ _hosts | default('windows') }}"
|
||||
```
|
||||
4) Create an entry for your playbook in your subdirectories `setup.yml`
|
||||
1) You can copy paste an existing one and edit it.
|
||||
2) Ensure you edit the name, playbook path, survey etc.
|
||||
5) Add any needed roles/collections to the [requirements.yml](/collections/requirements.yml)
|
||||
6) Test via RHPDS, specify your branch name within the project configuration.
|
||||
6) Test via [demo.redhat.com](https://demo.redhat.com/catalog?item=babylon-catalog-prod/sandboxes-gpte.aap-product-demos.prod&utm_source=webapp&utm_medium=share-link), specify your branch name within the project configuration.
|
||||
|
||||
> NOTE: demo.redhat.com is available to Red Hat Associates and Partners with a valid account.
|
||||
|
||||
## New Demo Section/Category
|
||||
1) Create a new subdirectory with no spaces
|
||||
@@ -44,3 +52,96 @@ This document aims to outline the requirements for the various forms of contribu
|
||||
- `controller_components` can be any of the roles defined [here](https://github.com/redhat-cop/controller_configuration/tree/devel/roles)
|
||||
- Add variables for each component listed
|
||||
3) Include a README.md in the subdirectory
|
||||
|
||||
## Testing
|
||||
|
||||
We utilize pre-commit to handle Git hooks, initiating a pre-commit check with each commit, both locally and on CI.
|
||||
|
||||
To install pre-commit, use the following commands:
|
||||
```bash
|
||||
pip install pre-commit
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
For further details, refer to the [pre-commit installation documentation](https://pre-commit.com/#installation).
|
||||
|
||||
To execute ansible-lint (whether within pre-commit or independently), you must configure an environment variable for the token required to connect to Automation Hub. Obtain the token [here](https://console.redhat.com/ansible/automation-hub/token).
|
||||
|
||||
Copy the token value and execute the following command:
|
||||
|
||||
```bash
|
||||
export ANSIBLE_GALAXY_SERVER_AH_TOKEN=<token>
|
||||
```
|
||||
|
||||
## Release Process
|
||||
|
||||
We follow a structured release process for this project. Here are the steps involved:
|
||||
|
||||
1. **Create a Release Branch:**
|
||||
- Start by creating a new release branch from the `main` branch.
|
||||
|
||||
```bash
|
||||
git checkout -b release/v-<version>
|
||||
```
|
||||
|
||||
2. **Update Changelog:**
|
||||
- Open the `CHANGELOG.md` file to manually add your change to the appropriate section.
|
||||
- Our changelog follows the [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format and includes the following categories of changes:
|
||||
|
||||
- `Added` for new features.
|
||||
- `Changed` for changes in existing functionality.
|
||||
- `Deprecated` for features that will be removed in upcoming releases.
|
||||
- `Fixed` for bug fixes.
|
||||
- `Removed` for deprecated features that were removed.
|
||||
- `Security` for security-related changes.
|
||||
|
||||
- Add a new entry under the relevant category. Include a brief summary of the change and the merge request commit tag.
|
||||
|
||||
```markdown
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
|
||||
- New feature or enhancement ([Merge Request Commit](https://github.com/ansible/product-demos/-/commit/<commit-hash>))
|
||||
```
|
||||
|
||||
- Replace `<commit-hash>` with the actual commit hash from the merge request.
|
||||
|
||||
3. **Commit Changes:**
|
||||
- Commit the changes made to the `CHANGELOG.md` file.
|
||||
|
||||
```bash
|
||||
git add CHANGELOG.md
|
||||
git commit -m "Update CHANGELOG for release <version>"
|
||||
```
|
||||
|
||||
4. **Create a Pull Request:**
|
||||
- Open a pull request from the release branch to the `main` branch.
|
||||
|
||||
5. **Review and Merge:**
|
||||
- Review the pull request and merge it into the `main` branch.
|
||||
|
||||
6. **Tag the Release:**
|
||||
- Once the pull request is merged, tag the release with the version number.
|
||||
|
||||
```bash
|
||||
git tag -a v-<version> -m "Release <version>"
|
||||
git push origin v-<version>
|
||||
```
|
||||
|
||||
7. **Publish the Release:**
|
||||
- After the successful completion of the pull request and merging into the `main` branch, an automatic GitHub Action will be triggered to publish the release.
|
||||
|
||||
The GitHub Action will perform the following steps:
|
||||
- Parse the `CHANGELOG.md` file.
|
||||
- Generate a release note based on the changes.
|
||||
- Attach relevant files (such as `LICENSE`, `CHANGELOG.md`, and the generated `CHANGELOG.txt`) to the GitHub Release.
|
||||
|
||||
No manual intervention is required for this step; the GitHub Action will handle the release process automatically.
|
||||
|
||||
8. **Cleanup:**
|
||||
- Delete the release branch.
|
||||
|
||||
```bash
|
||||
git branch -d release/v-<version>
|
||||
```
|
||||
|
||||
44
README.md
44
README.md
@@ -1,6 +1,9 @@
|
||||
[](https://red.ht/aap-product-demos)
|
||||
[](https://workspaces.openshift.com/f?url=https://github.com/ansible/product-demos)
|
||||
|
||||
# Official Ansible Product Demos
|
||||
|
||||
This is a centralized location for all Ansible Product Demos going forward.
|
||||
This is a centralized location for Ansible Product Demos. This project is a collection of use cases implemented with Ansible for use with the Ansible Automation Platform.
|
||||
|
||||
| Demo Name | Description |
|
||||
|-----------|-------------|
|
||||
@@ -8,6 +11,7 @@ This is a centralized location for all Ansible Product Demos going forward.
|
||||
| [Windows](windows/README.md) | Repository of demos for Windows Server automation |
|
||||
| [Cloud](cloud/README.md) | Demo for infrastructure and cloud provisioning automation |
|
||||
| [Network](network/README.md) | Ansible Network automation demos |
|
||||
| [Satellite](satellite/README.md) | Demos of automation with Red Hat Satellite Server |
|
||||
|
||||
## Contributions
|
||||
|
||||
@@ -15,25 +19,49 @@ If you would like to contribute to this project please refer to [contribution gu
|
||||
|
||||
## Using this project
|
||||
|
||||
> This project is tested for compatibility with AAP2 Linux Automation Workshop available to Red Hat Employees and Partners. To use with other Ansible Controller installations, review the [pre-requisite documentation](https://github.com/RedHatGov/ansible-tower-samples/tree/product-demos).
|
||||
This project is tested for compatibility with the [demo.redhat.com Product Demos Sandbox]([red.ht/aap-product-demos](https://demo.redhat.com/catalog?item=babylon-catalog-prod/sandboxes-gpte.aap-product-demos.prod&utm_source=webapp&utm_medium=share-link)) lab environment. To use with other Ansible Controller installations, review the [prerequisite documentation](https://github.com/RedHatGov/ansible-tower-samples).
|
||||
|
||||
> NOTE: demo.redhat.com is available to Red Hat Associates and Partners with a valid account.
|
||||
|
||||
1. First you must create a credential for [Automation Hub](https://console.redhat.com/ansible/automation-hub/) to successfully sync collections used by this project.
|
||||
|
||||
1. In the Credentials section of the Controller UI, add a new Credential called `Automation Hub` with the type `Ansible Galaxy/Automation Hub API Token`
|
||||
2. You can obtain a token [here](https://console.redhat.com/ansible/automation-hub/token). This page will also provide the Server URL and Auth Server URL.
|
||||
3. Next, click on Organizations and edit the `Default` organization. Add your `Automation Hub` credential to the `Galaxy Credentials` section. Don't forget to click Save!!
|
||||
3. Next, click on Organizations and edit the `Default` organization. Add your `Automation Hub` credential to the `Galaxy Credentials` section. Don't forget to click **Save**!!
|
||||
|
||||
2. If it has not been created for you, add a Project called `Ansible official demo project` with this repo as a source. NOTE: if you are using a fork, be sure that you have the correct URL. Update the project.
|
||||
3. Finally, Create a Job Template called `Setup` with the following configuration:
|
||||
> You can also use an execution environment for disconnected environments. To do this, you must disable collection downloads in the Controller. This can be done in `Settings` > `Job Settings`. This setting prevents the controller from downloading collections listed in the [collections/requirements.yml](collections/requirements.yml) file.
|
||||
|
||||
2. If it is not already created for you, add an Execution Environment called `product-demos`
|
||||
|
||||
- Name: product-demos
|
||||
- Image: quay.io/acme_corp/product-demos-ee:latest
|
||||
- Pull: Only pull the image if not present before running
|
||||
|
||||
3. If it is not already created for you, create a Project called `Ansible official demo project` with this repo as a source. NOTE: if you are using a fork, be sure that you have the correct URL. Update the project.
|
||||
|
||||
4. Finally, Create a Job Template called `Setup` with the following configuration:
|
||||
|
||||
- Name: Setup
|
||||
- Inventory: Workshop Inventory
|
||||
- Exec Env: Control Plane EE
|
||||
- Inventory: Demo Inventory
|
||||
- Exec Env: product-demos
|
||||
- Playbook: setup_demo.yml
|
||||
- Credentials:
|
||||
|
||||
- Type: Red Hat Ansible Automation Platform
|
||||
- Name: Controller Credential
|
||||
- Extra vars:
|
||||
|
||||
demo: <linux or windows or cloud or network>
|
||||
|
||||
## Bring Your Own Demo
|
||||
|
||||
Can't find what you're looking for? Customize this repo to make it your own.
|
||||
|
||||
1. Create a fork of this repo.
|
||||
2. Update the URL of the `Ansible official demo project` in the Controller.
|
||||
3. Make changes as needed and run the **Setup** job
|
||||
|
||||
See the [contribution guide](CONTRIBUTING.md) for more details on how to customize the project.
|
||||
|
||||
---
|
||||
|
||||
[Privacy statement](https://www.redhat.com/en/about/privacy-policy) | [Terms of use](https://www.redhat.com/en/about/terms-use) | [Security disclosure](https://www.ansible.com/security?hsLang=en-us) | [All policies and guidelines](https://www.redhat.com/en/about/all-policies-guidelines)
|
||||
|
||||
15
ansible.cfg
15
ansible.cfg
@@ -1,3 +1,16 @@
|
||||
[defaults]
|
||||
collections_paths=./collections
|
||||
collections_path=./collections
|
||||
roles_path=./roles
|
||||
|
||||
[galaxy]
|
||||
server_list = ah,galaxy
|
||||
|
||||
[galaxy_server.ah]
|
||||
# Grab a token at https://console.redhat.com/ansible/automation-hub/token
|
||||
# Then define it using ANSIBLE_GALAXY_SERVER_AH_TOKEN=""
|
||||
|
||||
url=https://console.redhat.com/api/automation-hub/content/published/
|
||||
auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token
|
||||
|
||||
[galaxy_server.galaxy]
|
||||
url=https://galaxy.ansible.com/
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
- [Configure Credentials](#configure-credentials)
|
||||
- [Add Workshop Credential Password](#add-workshop-credential-password)
|
||||
- [Remove Inventory Variables](#remove-inventory-variables)
|
||||
- [Getting your Puiblic Key for Create Infra Job](#getting-your-puiblic-key-for-create-infra-job)
|
||||
- [Getting your Puiblic Key for Create Keypair Job](#getting-your-puiblic-key-for-create-keypair-job)
|
||||
- [Suggested Usage](#suggested-usage)
|
||||
- [Known Issues](#known-issues)
|
||||
|
||||
@@ -20,8 +20,11 @@ This category of demos shows examples of multi-cloud provisioning and management
|
||||
### Jobs
|
||||
|
||||
- [**Cloud / Create Infra**](create_infra.yml) - Creates a VPC with required routing and firewall rules for provisioning VMs
|
||||
- [**Cloud / Create Keypair**](aws_key.yml) - Creates a keypair for connecting to EC2 instances
|
||||
- [**Cloud / Create VM**](create_vm.yml) - Create a VM based on a [blueprint](blueprints/) in the selected cloud provider
|
||||
- [**Cloud / Destroy VM**](destroy_vm.yml) - Destroy a VM that has been created in a cloud provider. VM must be imported into dynamic inventory to be deleted.
|
||||
- [**Cloud / Snapshot EC2**](snapshot_ec2.yml) - Snapshot a VM that has been created in a cloud provider. VM must be imported into dynamic inventory to be snapshot.
|
||||
- [**Cloud / Restore EC2 from Snapshot**](snapshot_ec2.yml) - Restore a VM that has been created in a cloud provider. By default, volumes will be restored from their latest snapshot. VM must be imported into dynamic inventory to be patched.
|
||||
|
||||
### Inventory
|
||||
|
||||
@@ -40,13 +43,13 @@ After running the setup job template, there are a few steps required to make the
|
||||
|
||||
### Add Workshop Credential Password
|
||||
|
||||
1) Add the password used to login to Controller. This allows you to connect to Windows Servers provisioned with Create VM job. Required until [RFE](https://github.com/ansible/workshops/issues/1597]) is complete
|
||||
1) Add a password that meets the [default complexity requirements](https://learn.microsoft.com/en-us/windows/security/threat-protection/security-policy-settings/password-must-meet-complexity-requirements#reference). This allows you to connect to Windows Servers provisioned with Create VM job. Required until [RFE](https://github.com/ansible/workshops/issues/1597]) is complete
|
||||
|
||||
### Remove Inventory Variables
|
||||
|
||||
1) Remove Workshop Inventory variables on the Details page of the inventory. Required until [RFE](https://github.com/ansible/workshops/issues/1597]) is complete
|
||||
|
||||
### Getting your Puiblic Key for Create Infra Job
|
||||
### Getting your Puiblic Key for Create Keypair Job
|
||||
|
||||
1) Connect to the command line of your Controller server. This is easiest to do by opening the VS Code Web Editor from the landing page where you found the Controller login details.
|
||||
2) Open a Terminal Window in the VS Code Web Editor.
|
||||
@@ -56,9 +59,11 @@ After running the setup job template, there are a few steps required to make the
|
||||
|
||||
## Suggested Usage
|
||||
|
||||
**Cloud / Create Infra** -The Create Infra job builds cloud infrastructure based on the provider definition in the included `demo.cloud` collection.
|
||||
**Cloud / Create Keypair** - The Create Keypair job creates an EC2 keypair which can be used when creating EC2 instances to enable SSH access.
|
||||
|
||||
**Cloud / Create VM** - The Create VM job builds a VM in the given provider based on the included `demo.cloud` collection. VM [blueprints](blueprints/) define variables for each provider that override the defaults in the collection. When creating VMs it is recommended to follow naming conventions that can be used as host patterns. (eg. VM names: `win1`, `win2`, `win3`. Host Pattern: `win*` )
|
||||
|
||||
**Cloud / AWS / Patch EC2 Workflow** - Create a VPC and one or more linux VM(s) in AWS using the `Cloud / Create VPC` and `Cloud / Create VM` templates. Run the workflow and observe the instance snapshots followed by patching operation. Optionally, use the survey to force a patch failure in order to demonstrate the restore path. At this time, the workflow does not support patching Windows instances.
|
||||
|
||||
## Known Issues
|
||||
Azure does not work without a custom execution environment that includes the Azure dependencies.
|
||||
@@ -10,7 +10,7 @@
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- aws_key_name is defined
|
||||
- aws_region is defined
|
||||
- create_vm_aws_region is defined
|
||||
- aws_public_key is defined
|
||||
- aws_keypair_owner is defined
|
||||
fail_msg: "Required variables not set"
|
||||
@@ -18,7 +18,7 @@
|
||||
- name: Create AWS keypair
|
||||
amazon.aws.ec2_key:
|
||||
name: "{{ aws_key_name }}"
|
||||
region: "{{ aws_region }}"
|
||||
region: "{{ create_vm_aws_region }}"
|
||||
key_material: "{{ aws_public_key }}"
|
||||
state: present
|
||||
tags:
|
||||
|
||||
6
cloud/blueprints/al2023.yml
Normal file
6
cloud/blueprints/al2023.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
vm_providers:
|
||||
- aws
|
||||
aws_instance_size: t3.micro
|
||||
aws_image_architecture: x86_64
|
||||
aws_image_filter: 'al2023-ami-2023*'
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
- name: Create Cloud Infra
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
vars:
|
||||
infra_provider: undef
|
||||
aws_public_key: undef
|
||||
tasks:
|
||||
- name: Include provider role
|
||||
ansible.builtin.include_role:
|
||||
name: "demo.cloud.{{ infra_provider }}"
|
||||
tasks_from: create_infra
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
- name: Create Cloud Infra
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
vars:
|
||||
vm_name: undef
|
||||
vm_owner: undef
|
||||
vm_provider: undef
|
||||
vm_blueprint: undef
|
||||
|
||||
tasks:
|
||||
- name: "Importing {{ vm_blueprint | upper }}"
|
||||
ansible.builtin.include_vars:
|
||||
file: "blueprints/{{ vm_blueprint }}.yml"
|
||||
|
||||
- name: "Check Provider Compatibility"
|
||||
ansible.builtin.assert:
|
||||
that: "'{{ vm_provider }}' in {{ vm_blueprint_providers }}"
|
||||
fail_msg: "{{ vm_blueprint | upper }} is not available for {{ vm_provider | upper }}"
|
||||
when: "vm_blueprint_providers is defined"
|
||||
|
||||
- name: "Building {{ vm_blueprint | upper }}"
|
||||
ansible.builtin.include_role:
|
||||
name: "demo.cloud.{{ vm_provider }}"
|
||||
tasks_from: create_vm
|
||||
@@ -9,7 +9,6 @@
|
||||
aws_tenancy: default
|
||||
aws_vpc_cidr_block: 10.0.0.0/16
|
||||
aws_subnet_cidr: 10.0.1.0/24
|
||||
aws_region: us-east-1
|
||||
aws_sg_name: aws-test-sg
|
||||
aws_subnet_name: aws-test-subnet
|
||||
aws_rt_name: aws-test-rt
|
||||
@@ -21,7 +20,7 @@
|
||||
name: "{{ aws_vpc_name }}"
|
||||
cidr_block: "{{ aws_vpc_cidr_block }}"
|
||||
tenancy: "{{ aws_tenancy }}"
|
||||
region: "{{ aws_region }}"
|
||||
region: "{{ create_vm_aws_region }}"
|
||||
tags:
|
||||
owner: "{{ aws_owner_tag }}"
|
||||
purpose: "{{ aws_purpose_tag }}"
|
||||
@@ -31,7 +30,7 @@
|
||||
amazon.aws.ec2_vpc_igw:
|
||||
state: present
|
||||
vpc_id: "{{ aws_vpc.vpc.id }}"
|
||||
region: "{{ aws_region }}"
|
||||
region: "{{ create_vm_aws_region }}"
|
||||
tags:
|
||||
Name: "{{ aws_vpc_name }}"
|
||||
owner: "{{ aws_owner_tag }}"
|
||||
@@ -42,7 +41,7 @@
|
||||
amazon.aws.ec2_security_group:
|
||||
state: present
|
||||
name: "{{ aws_sg_name }}"
|
||||
region: "{{ aws_region }}"
|
||||
region: "{{ create_vm_aws_region }}"
|
||||
description: Inbound WinRM and RDP, http for demo servers and internal AD ports
|
||||
rules:
|
||||
- proto: tcp
|
||||
@@ -101,7 +100,7 @@
|
||||
state: present
|
||||
vpc_id: "{{ aws_vpc.vpc.id }}"
|
||||
cidr: "{{ aws_subnet_cidr }}"
|
||||
region: "{{ aws_region }}"
|
||||
region: "{{ create_vm_aws_region }}"
|
||||
map_public: true
|
||||
tags:
|
||||
Name: "{{ aws_subnet_name }}"
|
||||
@@ -113,7 +112,7 @@
|
||||
amazon.aws.ec2_vpc_route_table:
|
||||
state: present
|
||||
vpc_id: "{{ aws_vpc.vpc.id }}"
|
||||
region: "{{ aws_region }}"
|
||||
region: "{{ create_vm_aws_region }}"
|
||||
subnets:
|
||||
- "{{ aws_subnet.subnet.id }}"
|
||||
routes:
|
||||
|
||||
10
cloud/restore_ec2.yml
Normal file
10
cloud/restore_ec2.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: Restore ec2 instance from snapshot
|
||||
hosts: "{{ _hosts | default(omit) }}"
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Include restore from snapshot role
|
||||
ansible.builtin.include_role:
|
||||
name: "demo.cloud.aws"
|
||||
tasks_from: restore_vm
|
||||
310
cloud/setup.yml
310
cloud/setup.yml
@@ -1,14 +1,7 @@
|
||||
---
|
||||
user_message:
|
||||
_deployment_id: "{{ lookup('file', playbook_dir + '/.deployment_id') }}"
|
||||
|
||||
controller_components:
|
||||
- execution_environments
|
||||
- projects
|
||||
- credentials
|
||||
- inventory_sources
|
||||
- groups
|
||||
- job_templates
|
||||
- workflow_job_templates
|
||||
user_message:
|
||||
|
||||
controller_execution_environments:
|
||||
- name: Cloud Services Execution Environment
|
||||
@@ -19,8 +12,7 @@ controller_projects:
|
||||
organization: Default
|
||||
scm_type: git
|
||||
wait: true
|
||||
# scm_url: https://github.com/ansible-content-lab/aws.infrastructure_config_demos.git
|
||||
scm_url: https://github.com/willtome/aws.infrastructure_config_demos.git
|
||||
scm_url: https://github.com/ansible-content-lab/aws.infrastructure_config_demos.git
|
||||
default_environment: Cloud Services Execution Environment
|
||||
|
||||
controller_credentials:
|
||||
@@ -28,6 +20,7 @@ controller_credentials:
|
||||
credential_type: Amazon Web Services
|
||||
organization: Default
|
||||
update_secrets: false
|
||||
state: exists
|
||||
inputs:
|
||||
username: REPLACEME
|
||||
password: REPLACEME
|
||||
@@ -43,7 +36,7 @@ controller_inventory_sources:
|
||||
- name: AWS Inventory
|
||||
organization: Default
|
||||
source: ec2
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
credential: AWS
|
||||
overwrite: true
|
||||
source_vars:
|
||||
@@ -66,7 +59,7 @@ controller_inventory_sources:
|
||||
# - name: Azure Inventory
|
||||
# organization: Default
|
||||
# source: azure_rm
|
||||
# inventory: Workshop Inventory
|
||||
# inventory: Demo Inventory
|
||||
# credential: Azure
|
||||
# execution_environment: Ansible Engine 2.9 execution environment
|
||||
# overwrite: true
|
||||
@@ -82,7 +75,7 @@ controller_inventory_sources:
|
||||
|
||||
controller_groups:
|
||||
- name: cloud_aws
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
variables:
|
||||
ansible_user: ec2-user
|
||||
|
||||
@@ -93,8 +86,8 @@ controller_templates:
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_create_peer_network.yml
|
||||
inventory: Workshop Inventory
|
||||
playbook: playbooks/create_peer_network.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
@@ -109,8 +102,8 @@ controller_templates:
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_delete_peer_network.yml
|
||||
inventory: Workshop Inventory
|
||||
playbook: playbooks/delete_peer_network.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
@@ -123,8 +116,8 @@ controller_templates:
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_create_transit_network.yml
|
||||
inventory: Workshop Inventory
|
||||
playbook: playbooks/create_transit_network.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
@@ -139,8 +132,8 @@ controller_templates:
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_delete_transit_network.yml
|
||||
inventory: Workshop Inventory
|
||||
playbook: playbooks/delete_transit_network.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
@@ -154,7 +147,7 @@ controller_templates:
|
||||
- AWS
|
||||
project: Ansible official demo project
|
||||
playbook: cloud/create_vpc.yml
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
@@ -165,7 +158,7 @@ controller_templates:
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: aws_region
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
@@ -182,25 +175,22 @@ controller_templates:
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
- Workshop Credential
|
||||
- Demo Credential
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_create_vm.yml
|
||||
inventory: Workshop Inventory
|
||||
playbook: playbooks/create_vm.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
allow_simultaneous: true
|
||||
extra_vars:
|
||||
aws_region: us-east-1
|
||||
aws_keypair_name: aws-test-key
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: aws_region
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
@@ -209,19 +199,19 @@ controller_templates:
|
||||
- us-west-2
|
||||
- question_name: Name
|
||||
type: text
|
||||
variable: vm_name
|
||||
variable: create_vm_vm_name
|
||||
required: true
|
||||
- question_name: Owner
|
||||
type: text
|
||||
variable: vm_owner
|
||||
variable: create_vm_vm_owner
|
||||
required: true
|
||||
- question_name: Deployment
|
||||
type: text
|
||||
variable: vm_deployment
|
||||
variable: create_vm_vm_deployment
|
||||
required: true
|
||||
- question_name: Environment
|
||||
type: multiplechoice
|
||||
variable: vm_environment
|
||||
variable: create_vm_vm_environment
|
||||
required: true
|
||||
choices:
|
||||
- Dev
|
||||
@@ -237,32 +227,44 @@ controller_templates:
|
||||
- rhel9
|
||||
- rhel8
|
||||
- rhel7
|
||||
- al2023
|
||||
- question_name: Subnet
|
||||
type: text
|
||||
variable: aws_vpc_subnet_name
|
||||
variable: create_vm_aws_vpc_subnet_name
|
||||
required: true
|
||||
default: aws-test-subnet
|
||||
- question_name: Security Group
|
||||
type: text
|
||||
variable: aws_securitygroup_name
|
||||
variable: create_vm_aws_securitygroup_name
|
||||
required: true
|
||||
default: aws-test-sg
|
||||
- question_name: SSH Keypair
|
||||
type: text
|
||||
variable: create_vm_aws_keypair_name
|
||||
required: true
|
||||
default: aws-test-key
|
||||
- question_name: AWS Instance Type (defaults to blueprint value)
|
||||
type: text
|
||||
variable: create_vm_aws_instance_size
|
||||
required: false
|
||||
- question_name: AWS Image Filter (defaults to blueprint value)
|
||||
type: text
|
||||
variable: create_vm_aws_image_filter
|
||||
required: false
|
||||
|
||||
- name: Cloud / AWS / Delete VM
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
- Workshop Credential
|
||||
- Demo Credential
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_delete_inventory_vm.yml
|
||||
inventory: Workshop Inventory
|
||||
playbook: playbooks/delete_inventory_vm.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
extra_vars:
|
||||
aws_region: us-east-1
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
@@ -278,28 +280,14 @@ controller_templates:
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_create_reports.yml
|
||||
inventory: Workshop Inventory
|
||||
playbook: playbooks/create_reports.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
extra_vars:
|
||||
aws_region: us-east-1
|
||||
aws_report: vpc
|
||||
|
||||
- name: Cloud / AWS / Tags Report
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_create_reports.yml
|
||||
inventory: Workshop Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
extra_vars:
|
||||
aws_report: tags
|
||||
reports_aws_bucket_name: reports-pd-{{ _deployment_id }}
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -307,7 +295,36 @@ controller_templates:
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: aws_region
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
|
||||
- name: Cloud / AWS / Tags Report
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbooks/create_reports.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
extra_vars:
|
||||
aws_report: tags
|
||||
reports_aws_bucket_name: reports-pd-{{ _deployment_id }}
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
@@ -322,7 +339,7 @@ controller_templates:
|
||||
- AWS
|
||||
project: Ansible official demo project
|
||||
playbook: cloud/aws_key.yml
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
@@ -333,7 +350,7 @@ controller_templates:
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: aws_region
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
@@ -354,6 +371,91 @@ controller_templates:
|
||||
variable: aws_keypair_owner
|
||||
required: true
|
||||
|
||||
- name: Cloud / AWS / Snapshot EC2
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible official demo project
|
||||
playbook: cloud/snapshot_ec2.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: aws_region
|
||||
required: true
|
||||
default: us-east-1
|
||||
choices:
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- question_name: Specify target hosts
|
||||
type: text
|
||||
variable: _hosts
|
||||
required: false
|
||||
|
||||
- name: Cloud / AWS / Restore EC2 from Snapshot
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible official demo project
|
||||
playbook: cloud/restore_ec2.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: aws_region
|
||||
required: true
|
||||
default: us-east-1
|
||||
choices:
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- question_name: Specify target hosts
|
||||
type: text
|
||||
variable: _hosts
|
||||
required: false
|
||||
|
||||
- name: "LINUX / Patching"
|
||||
job_type: check
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/patching.yml"
|
||||
execution_environment: Default execution environment
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
use_fact_cache: true
|
||||
ask_job_type_on_launch: true
|
||||
credentials:
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: Server Name or Pattern
|
||||
type: text
|
||||
variable: _hosts
|
||||
required: true
|
||||
|
||||
controller_workflows:
|
||||
- name: Deploy Cloud Stack in AWS
|
||||
description: A workflow to deploy a cloud stack
|
||||
@@ -370,7 +472,7 @@ controller_workflows:
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: aws_region
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
@@ -379,7 +481,7 @@ controller_workflows:
|
||||
- us-west-2
|
||||
- question_name: Owner
|
||||
type: text
|
||||
variable: aws_owner_tag
|
||||
variable: create_vm_aws_owner_tag
|
||||
required: true
|
||||
- question_name: Environment
|
||||
type: multiplechoice
|
||||
@@ -400,8 +502,6 @@ controller_workflows:
|
||||
simplified_workflow_nodes:
|
||||
- identifier: Create Keypair
|
||||
unified_job_template: Cloud / AWS / Create Keypair
|
||||
extra_data:
|
||||
aws_keypair_owner: !unsafe "{{ aws_owner_tag }}"
|
||||
success_nodes:
|
||||
- VPC Report
|
||||
failure_nodes:
|
||||
@@ -419,16 +519,25 @@ controller_workflows:
|
||||
- identifier: VPC Report
|
||||
unified_job_template: Cloud / AWS / VPC Report
|
||||
all_parents_must_converge: true
|
||||
success_nodes:
|
||||
- Deploy Windows Blueprint
|
||||
always_nodes:
|
||||
- Deploy Windows GUI Blueprint
|
||||
- Deploy RHEL8 Blueprint
|
||||
- Deploy RHEL9 Blueprint
|
||||
- identifier: Deploy Windows Blueprint
|
||||
- Deploy Windows Core Blueprint
|
||||
- identifier: Deploy Windows GUI Blueprint
|
||||
unified_job_template: Cloud / AWS / Create VM
|
||||
extra_data:
|
||||
vm_name: aws_win
|
||||
create_vm_vm_name: aws_dc
|
||||
vm_blueprint: windows_full
|
||||
vm_owner: !unsafe "{{ aws_owner_tag }}"
|
||||
success_nodes:
|
||||
- Update Inventory
|
||||
failure_nodes:
|
||||
- Ticket - Instance Failed
|
||||
- identifier: Deploy Windows Core Blueprint
|
||||
unified_job_template: Cloud / AWS / Create VM
|
||||
extra_data:
|
||||
create_vm_vm_name: aws_win1
|
||||
vm_blueprint: windows_core
|
||||
success_nodes:
|
||||
- Update Inventory
|
||||
failure_nodes:
|
||||
@@ -436,9 +545,8 @@ controller_workflows:
|
||||
- identifier: Deploy RHEL8 Blueprint
|
||||
unified_job_template: Cloud / AWS / Create VM
|
||||
extra_data:
|
||||
vm_name: aws_rhel8
|
||||
create_vm_vm_name: aws_rhel8
|
||||
vm_blueprint: rhel8
|
||||
vm_owner: !unsafe "{{ aws_owner_tag }}"
|
||||
success_nodes:
|
||||
- Update Inventory
|
||||
failure_nodes:
|
||||
@@ -446,9 +554,8 @@ controller_workflows:
|
||||
- identifier: Deploy RHEL9 Blueprint
|
||||
unified_job_template: Cloud / AWS / Create VM
|
||||
extra_data:
|
||||
vm_name: aws_rhel9
|
||||
create_vm_vm_name: aws_rhel9
|
||||
vm_blueprint: rhel9
|
||||
vm_owner: !unsafe "{{ aws_owner_tag }}"
|
||||
success_nodes:
|
||||
- Update Inventory
|
||||
failure_nodes:
|
||||
@@ -467,3 +574,56 @@ controller_workflows:
|
||||
feedback: Failed to create AWS instance
|
||||
- identifier: Tag Report
|
||||
unified_job_template: Cloud / AWS / Tags Report
|
||||
|
||||
- name: Cloud / AWS / Patch EC2 Workflow
|
||||
description: A workflow to patch ec2 instances with snapshot and restore on failure.
|
||||
organization: Default
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: aws_region
|
||||
required: true
|
||||
default: us-east-1
|
||||
choices:
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- question_name: Specify target hosts
|
||||
type: text
|
||||
variable: _hosts
|
||||
required: true
|
||||
default: os_linux
|
||||
simplified_workflow_nodes:
|
||||
- identifier: Project Sync
|
||||
unified_job_template: Ansible official demo project
|
||||
success_nodes:
|
||||
- Take Snapshot
|
||||
- identifier: Inventory Sync
|
||||
unified_job_template: AWS Inventory
|
||||
success_nodes:
|
||||
- Take Snapshot
|
||||
- identifier: Take Snapshot
|
||||
unified_job_template: Cloud / AWS / Snapshot EC2
|
||||
success_nodes:
|
||||
- Patch Instance
|
||||
- identifier: Patch Instance
|
||||
unified_job_template: LINUX / Patching
|
||||
job_type: run
|
||||
failure_nodes:
|
||||
- Restore from Snapshot
|
||||
- identifier: Restore from Snapshot
|
||||
unified_job_template: Cloud / AWS / Restore EC2 from Snapshot
|
||||
failure_nodes:
|
||||
- Ticket - Restore Failed
|
||||
- identifier: Ticket - Restore Failed
|
||||
unified_job_template: 'SUBMIT FEEDBACK'
|
||||
extra_data:
|
||||
feedback: Cloud / AWS / Patch EC2 Workflow | Failed to restore ec2 from snapshot
|
||||
|
||||
10
cloud/snapshot_ec2.yml
Normal file
10
cloud/snapshot_ec2.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: Snapshot ec2 instance
|
||||
hosts: "{{ _hosts | default(omit) }}"
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Include snapshot role
|
||||
ansible.builtin.include_role:
|
||||
name: "demo.cloud.aws"
|
||||
tasks_from: snapshot_vm
|
||||
@@ -21,3 +21,4 @@ aws_env_tag: prod
|
||||
aws_purpose_tag: ansible_demo
|
||||
aws_ansiblegroup_tag: cloud
|
||||
aws_ec2_wait: true
|
||||
aws_snapshots: {}
|
||||
|
||||
@@ -0,0 +1,62 @@
|
||||
---
|
||||
- name: AWS | RESTORE VM
|
||||
delegate_to: localhost
|
||||
block:
|
||||
- name: AWS | RESTORE VM | stop vm
|
||||
amazon.aws.ec2_instance:
|
||||
region: "{{ aws_region }}"
|
||||
instance_ids: "{{ instance_id }}"
|
||||
state: stopped
|
||||
wait: true
|
||||
|
||||
- name: AWS | RESTORE VM | get volumes
|
||||
register: r_vol_info
|
||||
amazon.aws.ec2_vol_info:
|
||||
region: "{{ aws_region }}"
|
||||
filters:
|
||||
attachment.instance-id: "{{ instance_id }}"
|
||||
|
||||
- name: AWS | RESTORE VM | detach volumes
|
||||
loop: "{{ r_vol_info.volumes }}"
|
||||
loop_control:
|
||||
loop_var: volume
|
||||
label: "{{ volume.id }}"
|
||||
amazon.aws.ec2_vol:
|
||||
region: "{{ aws_region }}"
|
||||
id: "{{ volume.id }}"
|
||||
instance: None
|
||||
|
||||
- name: AWS | RESTORE VM | attach snapshots from stat
|
||||
when: inventory_hostname in aws_snapshots
|
||||
loop: "{{ aws_snapshots[inventory_hostname] }}"
|
||||
loop_control:
|
||||
loop_var: snap
|
||||
label: "{{ snap.snapshot_id }}"
|
||||
amazon.aws.ec2_vol:
|
||||
region: "{{ aws_region }}"
|
||||
instance: "{{ instance_id }}"
|
||||
snapshot: "{{ snap.snapshot_id }}"
|
||||
device_name: "{{ snap.device }}"
|
||||
|
||||
- name: AWS | RESTORE VM | get all snapshots
|
||||
when: inventory_hostname not in aws_snapshots
|
||||
register: r_snapshots
|
||||
amazon.aws.ec2_snapshot_info:
|
||||
region: "{{ aws_region }}"
|
||||
filters:
|
||||
"tag:Name": "{{ inventory_hostname }}"
|
||||
|
||||
- name: AWS | RESTORE VM | create volume from latest snapshot
|
||||
when: inventory_hostname not in aws_snapshots
|
||||
amazon.aws.ec2_vol:
|
||||
region: "{{ aws_region }}"
|
||||
instance: "{{ instance_id }}"
|
||||
snapshot: "{{ r_snapshots.snapshots[0].snapshot_id }}"
|
||||
device_name: "/dev/sda1"
|
||||
|
||||
- name: AWS | RESTORE VM | start vm
|
||||
amazon.aws.ec2_instance:
|
||||
region: "{{ aws_region }}"
|
||||
instance_ids: "{{ instance_id }}"
|
||||
state: started
|
||||
wait: true
|
||||
@@ -0,0 +1,42 @@
|
||||
---
|
||||
- name: AWS | SNAPSHOT VM
|
||||
delegate_to: localhost
|
||||
block:
|
||||
- name: AWS | SNAPSHOT VM | assert id
|
||||
ansible.builtin.assert:
|
||||
that: instance_id is defined
|
||||
fail_msg: "instance_id is required for snapshot operations"
|
||||
|
||||
- name: AWS | SNAPSHOT VM | include vars
|
||||
ansible.builtin.include_vars:
|
||||
file: snapshot_vm.yml
|
||||
|
||||
- name: AWS | SNAPSHOT VM | get volumes
|
||||
register: r_vol_info
|
||||
amazon.aws.ec2_vol_info:
|
||||
region: "{{ aws_region }}"
|
||||
filters:
|
||||
attachment.instance-id: "{{ instance_id }}"
|
||||
|
||||
- name: AWS | SNAPSHOT VM | take snapshots
|
||||
loop: "{{ r_vol_info.volumes }}"
|
||||
loop_control:
|
||||
loop_var: volume
|
||||
label: "{{ volume.id }}"
|
||||
register: r_snapshots
|
||||
amazon.aws.ec2_snapshot:
|
||||
region: "{{ aws_region }}"
|
||||
volume_id: "{{ volume.id }}"
|
||||
description: "Snapshot taken by Red Hat Product demos"
|
||||
snapshot_tags: "{{ tags }}"
|
||||
|
||||
- name: AWS | SNAPSHOT VM | format snapshot stat
|
||||
ansible.builtin.set_fact:
|
||||
snapshot_stat:
|
||||
- key: "{{ inventory_hostname }}"
|
||||
value: "{{ r_snapshots.results | json_query(aws_ec2_snapshot_query) }}"
|
||||
|
||||
- name: AWS | SNAPSHOT VM | record snapshot with host key
|
||||
ansible.builtin.set_stats:
|
||||
data:
|
||||
aws_snapshots: "{{ snapshot_stat | items2dict }}"
|
||||
@@ -0,0 +1,11 @@
|
||||
---
|
||||
# Set stat_snapshots with model:
|
||||
# [
|
||||
# {
|
||||
# "snapshot_id": "snap-0e981f05704e19ffd",
|
||||
# "vol_id": "vol-0bd55f313bb7bcdd8",
|
||||
# "device": "/dev/sda1"
|
||||
# },
|
||||
# ...
|
||||
# ]
|
||||
aws_ec2_snapshot_query: "[].{snapshot_id: snapshot_id, vol_id: volume.id, device: volume.attachment_set[?instance_id=='{{ instance_id }}'].device | [0]}"
|
||||
@@ -1,4 +1,5 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
@@ -14,61 +15,65 @@ import xml.dom.minidom
|
||||
|
||||
role = "iosxeSTIG"
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'xml'
|
||||
CALLBACK_NAME = 'stig_xml'
|
||||
CALLBACK_TYPE = "xml"
|
||||
CALLBACK_NAME = "stig_xml"
|
||||
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
self.rules = {}
|
||||
self.stig_path = os.environ.get('STIG_PATH')
|
||||
self.XML_path = os.environ.get('XML_PATH')
|
||||
self.stig_path = os.environ.get("STIG_PATH")
|
||||
self.XML_path = os.environ.get("XML_PATH")
|
||||
if self.stig_path is None:
|
||||
self.stig_path = os.path.join(os.getcwd(), "roles", role, "files")
|
||||
self._display.display('Using STIG_PATH: {}'.format(self.stig_path))
|
||||
self._display.display("Using STIG_PATH: {}".format(self.stig_path))
|
||||
if self.XML_path is None:
|
||||
self.XML_path = os.getcwd()
|
||||
self._display.display('Using XML_PATH: {}'.format(self.XML_path))
|
||||
self._display.display("Using XML_PATH: {}".format(self.XML_path))
|
||||
|
||||
print("Writing: {}".format(self.XML_path))
|
||||
STIG_name = os.path.basename(self.stig_path)
|
||||
ET.register_namespace('cdf', 'http://checklists.nist.gov/xccdf/1.2')
|
||||
self.tr = ET.Element('{http://checklists.nist.gov/xccdf/1.2}TestResult')
|
||||
self.tr.set('id', 'xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}'.format(STIG_name))
|
||||
ET.register_namespace("cdf", "http://checklists.nist.gov/xccdf/1.2")
|
||||
self.tr = ET.Element("{http://checklists.nist.gov/xccdf/1.2}TestResult")
|
||||
self.tr.set(
|
||||
"id",
|
||||
"xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}".format(STIG_name),
|
||||
)
|
||||
endtime = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
|
||||
self.tr.set('end-time', endtime)
|
||||
tg = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}target')
|
||||
self.tr.set("end-time", endtime)
|
||||
tg = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}target")
|
||||
tg.text = platform.node()
|
||||
|
||||
def __get_rev(self, nid):
|
||||
rev = '0'
|
||||
rev = "0"
|
||||
# Check all files for the rule number.
|
||||
for file in os.listdir(self.stig_path):
|
||||
with open(os.path.join(self.stig_path, file), 'r') as f:
|
||||
r = 'SV-{}r(?P<rev>\d)_rule'.format(nid)
|
||||
with open(os.path.join(self.stig_path, file), "r") as f:
|
||||
r = "SV-{}r(?P<rev>\d)_rule".format(nid)
|
||||
m = re.search(r, f.read())
|
||||
if m:
|
||||
rev = m.group('rev')
|
||||
rev = m.group("rev")
|
||||
break
|
||||
return rev
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
name = result._task.get_name()
|
||||
m = re.search('stigrule_(?P<id>\d+)', name)
|
||||
m = re.search("stigrule_(?P<id>\d+)", name)
|
||||
if m:
|
||||
nid = m.group('id')
|
||||
nid = m.group("id")
|
||||
else:
|
||||
return
|
||||
rev = self.__get_rev(nid)
|
||||
key = "{}r{}".format(nid, rev)
|
||||
if self.rules.get(key, 'Unknown') != False:
|
||||
if self.rules.get(key, "Unknown") != False:
|
||||
self.rules[key] = result.is_changed()
|
||||
|
||||
def __set_duplicates(self):
|
||||
with open(os.path.join(self.stig_path, 'duplicates.json')) as f:
|
||||
with open(os.path.join(self.stig_path, "duplicates.json")) as f:
|
||||
dups = json.load(f)
|
||||
for d in dups:
|
||||
dup_of = str(dups[d][0])
|
||||
@@ -82,17 +87,19 @@ class CallbackModule(CallbackBase):
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
self.__set_duplicates()
|
||||
for rule, changed in self.rules.items():
|
||||
state = 'fail' if changed else 'pass'
|
||||
rr = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}rule-result')
|
||||
rr.set('idref', 'xccdf_mil.disa.stig_rule_SV-{}_rule'.format(rule))
|
||||
rs = ET.SubElement(rr, '{http://checklists.nist.gov/xccdf/1.2}result')
|
||||
state = "fail" if changed else "pass"
|
||||
rr = ET.SubElement(
|
||||
self.tr, "{http://checklists.nist.gov/xccdf/1.2}rule-result"
|
||||
)
|
||||
rr.set("idref", "xccdf_mil.disa.stig_rule_SV-{}_rule".format(rule))
|
||||
rs = ET.SubElement(rr, "{http://checklists.nist.gov/xccdf/1.2}result")
|
||||
rs.text = state
|
||||
passing = len(self.rules) - sum(self.rules.values())
|
||||
sc = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}score')
|
||||
sc.set('maximum', str(len(self.rules)))
|
||||
sc.set('system', 'urn:xccdf:scoring:flat-unweighted')
|
||||
sc = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}score")
|
||||
sc.set("maximum", str(len(self.rules)))
|
||||
sc.set("system", "urn:xccdf:scoring:flat-unweighted")
|
||||
sc.text = str(passing)
|
||||
with open(os.path.join(self.XML_path, "xccdf-results.xml"), 'w') as f:
|
||||
with open(os.path.join(self.XML_path, "xccdf-results.xml"), "w") as f:
|
||||
out = ET.tostring(self.tr)
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding='utf-8')
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding="utf-8")
|
||||
f.write(pretty)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
@@ -11,76 +12,82 @@ import os
|
||||
import xml.etree.ElementTree as ET
|
||||
import xml.dom.minidom
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'xml'
|
||||
CALLBACK_NAME = 'stig_xml'
|
||||
CALLBACK_TYPE = "xml"
|
||||
CALLBACK_NAME = "stig_xml"
|
||||
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def _get_STIG_path(self):
|
||||
cwd = os.path.abspath('.')
|
||||
cwd = os.path.abspath(".")
|
||||
for dirpath, dirs, files in os.walk(cwd):
|
||||
if os.path.sep + 'files' in dirpath and '.xml' in files[0]:
|
||||
if os.path.sep + "files" in dirpath and ".xml" in files[0]:
|
||||
return os.path.join(cwd, dirpath, files[0])
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
self.rules = {}
|
||||
self.stig_path = os.environ.get('STIG_PATH')
|
||||
self.XML_path = os.environ.get('XML_PATH')
|
||||
self.stig_path = os.environ.get("STIG_PATH")
|
||||
self.XML_path = os.environ.get("XML_PATH")
|
||||
if self.stig_path is None:
|
||||
self.stig_path = self._get_STIG_path()
|
||||
self._display.display('Using STIG_PATH: {}'.format(self.stig_path))
|
||||
self._display.display("Using STIG_PATH: {}".format(self.stig_path))
|
||||
if self.XML_path is None:
|
||||
self.XML_path = tempfile.mkdtemp() + "/xccdf-results.xml"
|
||||
self._display.display('Using XML_PATH: {}'.format(self.XML_path))
|
||||
self._display.display("Using XML_PATH: {}".format(self.XML_path))
|
||||
|
||||
print("Writing: {}".format(self.XML_path))
|
||||
STIG_name = os.path.basename(self.stig_path)
|
||||
ET.register_namespace('cdf', 'http://checklists.nist.gov/xccdf/1.2')
|
||||
self.tr = ET.Element('{http://checklists.nist.gov/xccdf/1.2}TestResult')
|
||||
self.tr.set('id', 'xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}'.format(STIG_name))
|
||||
ET.register_namespace("cdf", "http://checklists.nist.gov/xccdf/1.2")
|
||||
self.tr = ET.Element("{http://checklists.nist.gov/xccdf/1.2}TestResult")
|
||||
self.tr.set(
|
||||
"id",
|
||||
"xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}".format(STIG_name),
|
||||
)
|
||||
endtime = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
|
||||
self.tr.set('end-time', endtime)
|
||||
tg = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}target')
|
||||
self.tr.set("end-time", endtime)
|
||||
tg = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}target")
|
||||
tg.text = platform.node()
|
||||
|
||||
def _get_rev(self, nid):
|
||||
with open(self.stig_path, 'r') as f:
|
||||
r = 'SV-{}r(?P<rev>\d+)_rule'.format(nid)
|
||||
with open(self.stig_path, "r") as f:
|
||||
r = "SV-{}r(?P<rev>\d+)_rule".format(nid)
|
||||
m = re.search(r, f.read())
|
||||
if m:
|
||||
rev = m.group('rev')
|
||||
rev = m.group("rev")
|
||||
else:
|
||||
rev = '0'
|
||||
rev = "0"
|
||||
return rev
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
name = result._task.get_name()
|
||||
m = re.search('stigrule_(?P<id>\d+)', name)
|
||||
m = re.search("stigrule_(?P<id>\d+)", name)
|
||||
if m:
|
||||
nid = m.group('id')
|
||||
nid = m.group("id")
|
||||
else:
|
||||
return
|
||||
rev = self._get_rev(nid)
|
||||
key = "{}r{}".format(nid, rev)
|
||||
if self.rules.get(key, 'Unknown') != False:
|
||||
if self.rules.get(key, "Unknown") != False:
|
||||
self.rules[key] = result.is_changed()
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
for rule, changed in self.rules.items():
|
||||
state = 'fail' if changed else 'pass'
|
||||
rr = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}rule-result')
|
||||
rr.set('idref', 'xccdf_mil.disa.stig_rule_SV-{}_rule'.format(rule))
|
||||
rs = ET.SubElement(rr, '{http://checklists.nist.gov/xccdf/1.2}result')
|
||||
state = "fail" if changed else "pass"
|
||||
rr = ET.SubElement(
|
||||
self.tr, "{http://checklists.nist.gov/xccdf/1.2}rule-result"
|
||||
)
|
||||
rr.set("idref", "xccdf_mil.disa.stig_rule_SV-{}_rule".format(rule))
|
||||
rs = ET.SubElement(rr, "{http://checklists.nist.gov/xccdf/1.2}result")
|
||||
rs.text = state
|
||||
passing = len(self.rules) - sum(self.rules.values())
|
||||
sc = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}score')
|
||||
sc.set('maximum', str(len(self.rules)))
|
||||
sc.set('system', 'urn:xccdf:scoring:flat-unweighted')
|
||||
sc = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}score")
|
||||
sc.set("maximum", str(len(self.rules)))
|
||||
sc.set("system", "urn:xccdf:scoring:flat-unweighted")
|
||||
sc.text = str(passing)
|
||||
with open(self.XML_path, 'wb') as f:
|
||||
with open(self.XML_path, "wb") as f:
|
||||
out = ET.tostring(self.tr)
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding='utf-8')
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding="utf-8")
|
||||
f.write(pretty)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
@@ -11,76 +12,82 @@ import os
|
||||
import xml.etree.ElementTree as ET
|
||||
import xml.dom.minidom
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'xml'
|
||||
CALLBACK_NAME = 'stig_xml'
|
||||
CALLBACK_TYPE = "xml"
|
||||
CALLBACK_NAME = "stig_xml"
|
||||
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def _get_STIG_path(self):
|
||||
cwd = os.path.abspath('.')
|
||||
cwd = os.path.abspath(".")
|
||||
for dirpath, dirs, files in os.walk(cwd):
|
||||
if os.path.sep + 'files' in dirpath and '.xml' in files[0]:
|
||||
if os.path.sep + "files" in dirpath and ".xml" in files[0]:
|
||||
return os.path.join(cwd, dirpath, files[0])
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
self.rules = {}
|
||||
self.stig_path = os.environ.get('STIG_PATH')
|
||||
self.XML_path = os.environ.get('XML_PATH')
|
||||
self.stig_path = os.environ.get("STIG_PATH")
|
||||
self.XML_path = os.environ.get("XML_PATH")
|
||||
if self.stig_path is None:
|
||||
self.stig_path = self._get_STIG_path()
|
||||
self._display.display('Using STIG_PATH: {}'.format(self.stig_path))
|
||||
self._display.display("Using STIG_PATH: {}".format(self.stig_path))
|
||||
if self.XML_path is None:
|
||||
self.XML_path = tempfile.mkdtemp() + "/xccdf-results.xml"
|
||||
self._display.display('Using XML_PATH: {}'.format(self.XML_path))
|
||||
self._display.display("Using XML_PATH: {}".format(self.XML_path))
|
||||
|
||||
print("Writing: {}".format(self.XML_path))
|
||||
STIG_name = os.path.basename(self.stig_path)
|
||||
ET.register_namespace('cdf', 'http://checklists.nist.gov/xccdf/1.2')
|
||||
self.tr = ET.Element('{http://checklists.nist.gov/xccdf/1.2}TestResult')
|
||||
self.tr.set('id', 'xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}'.format(STIG_name))
|
||||
ET.register_namespace("cdf", "http://checklists.nist.gov/xccdf/1.2")
|
||||
self.tr = ET.Element("{http://checklists.nist.gov/xccdf/1.2}TestResult")
|
||||
self.tr.set(
|
||||
"id",
|
||||
"xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}".format(STIG_name),
|
||||
)
|
||||
endtime = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
|
||||
self.tr.set('end-time', endtime)
|
||||
tg = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}target')
|
||||
self.tr.set("end-time", endtime)
|
||||
tg = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}target")
|
||||
tg.text = platform.node()
|
||||
|
||||
def _get_rev(self, nid):
|
||||
with open(self.stig_path, 'r') as f:
|
||||
r = 'SV-{}r(?P<rev>\d+)_rule'.format(nid)
|
||||
with open(self.stig_path, "r") as f:
|
||||
r = "SV-{}r(?P<rev>\d+)_rule".format(nid)
|
||||
m = re.search(r, f.read())
|
||||
if m:
|
||||
rev = m.group('rev')
|
||||
rev = m.group("rev")
|
||||
else:
|
||||
rev = '0'
|
||||
rev = "0"
|
||||
return rev
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
name = result._task.get_name()
|
||||
m = re.search('stigrule_(?P<id>\d+)', name)
|
||||
m = re.search("stigrule_(?P<id>\d+)", name)
|
||||
if m:
|
||||
nid = m.group('id')
|
||||
nid = m.group("id")
|
||||
else:
|
||||
return
|
||||
rev = self._get_rev(nid)
|
||||
key = "{}r{}".format(nid, rev)
|
||||
if self.rules.get(key, 'Unknown') != False:
|
||||
if self.rules.get(key, "Unknown") != False:
|
||||
self.rules[key] = result.is_changed()
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
for rule, changed in self.rules.items():
|
||||
state = 'fail' if changed else 'pass'
|
||||
rr = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}rule-result')
|
||||
rr.set('idref', 'xccdf_mil.disa.stig_rule_SV-{}_rule'.format(rule))
|
||||
rs = ET.SubElement(rr, '{http://checklists.nist.gov/xccdf/1.2}result')
|
||||
state = "fail" if changed else "pass"
|
||||
rr = ET.SubElement(
|
||||
self.tr, "{http://checklists.nist.gov/xccdf/1.2}rule-result"
|
||||
)
|
||||
rr.set("idref", "xccdf_mil.disa.stig_rule_SV-{}_rule".format(rule))
|
||||
rs = ET.SubElement(rr, "{http://checklists.nist.gov/xccdf/1.2}result")
|
||||
rs.text = state
|
||||
passing = len(self.rules) - sum(self.rules.values())
|
||||
sc = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}score')
|
||||
sc.set('maximum', str(len(self.rules)))
|
||||
sc.set('system', 'urn:xccdf:scoring:flat-unweighted')
|
||||
sc = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}score")
|
||||
sc.set("maximum", str(len(self.rules)))
|
||||
sc.set("system", "urn:xccdf:scoring:flat-unweighted")
|
||||
sc.text = str(passing)
|
||||
with open(self.XML_path, 'wb') as f:
|
||||
with open(self.XML_path, "wb") as f:
|
||||
out = ET.tostring(self.tr)
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding='utf-8')
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding="utf-8")
|
||||
f.write(pretty)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
@@ -11,76 +12,82 @@ import os
|
||||
import xml.etree.ElementTree as ET
|
||||
import xml.dom.minidom
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'xml'
|
||||
CALLBACK_NAME = 'stig_xml'
|
||||
CALLBACK_TYPE = "xml"
|
||||
CALLBACK_NAME = "stig_xml"
|
||||
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def _get_STIG_path(self):
|
||||
cwd = os.path.abspath('.')
|
||||
cwd = os.path.abspath(".")
|
||||
for dirpath, dirs, files in os.walk(cwd):
|
||||
if os.path.sep + 'files' in dirpath and '.xml' in files[0]:
|
||||
if os.path.sep + "files" in dirpath and ".xml" in files[0]:
|
||||
return os.path.join(cwd, dirpath, files[0])
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
self.rules = {}
|
||||
self.stig_path = os.environ.get('STIG_PATH')
|
||||
self.XML_path = os.environ.get('XML_PATH')
|
||||
self.stig_path = os.environ.get("STIG_PATH")
|
||||
self.XML_path = os.environ.get("XML_PATH")
|
||||
if self.stig_path is None:
|
||||
self.stig_path = self._get_STIG_path()
|
||||
self._display.display('Using STIG_PATH: {}'.format(self.stig_path))
|
||||
self._display.display("Using STIG_PATH: {}".format(self.stig_path))
|
||||
if self.XML_path is None:
|
||||
self.XML_path = tempfile.mkdtemp() + "/xccdf-results.xml"
|
||||
self._display.display('Using XML_PATH: {}'.format(self.XML_path))
|
||||
self._display.display("Using XML_PATH: {}".format(self.XML_path))
|
||||
|
||||
print("Writing: {}".format(self.XML_path))
|
||||
STIG_name = os.path.basename(self.stig_path)
|
||||
ET.register_namespace('cdf', 'http://checklists.nist.gov/xccdf/1.2')
|
||||
self.tr = ET.Element('{http://checklists.nist.gov/xccdf/1.2}TestResult')
|
||||
self.tr.set('id', 'xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}'.format(STIG_name))
|
||||
ET.register_namespace("cdf", "http://checklists.nist.gov/xccdf/1.2")
|
||||
self.tr = ET.Element("{http://checklists.nist.gov/xccdf/1.2}TestResult")
|
||||
self.tr.set(
|
||||
"id",
|
||||
"xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}".format(STIG_name),
|
||||
)
|
||||
endtime = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
|
||||
self.tr.set('end-time', endtime)
|
||||
tg = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}target')
|
||||
self.tr.set("end-time", endtime)
|
||||
tg = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}target")
|
||||
tg.text = platform.node()
|
||||
|
||||
def _get_rev(self, nid):
|
||||
with open(self.stig_path, 'r') as f:
|
||||
r = 'SV-{}r(?P<rev>\d+)_rule'.format(nid)
|
||||
with open(self.stig_path, "r") as f:
|
||||
r = "SV-{}r(?P<rev>\d+)_rule".format(nid)
|
||||
m = re.search(r, f.read())
|
||||
if m:
|
||||
rev = m.group('rev')
|
||||
rev = m.group("rev")
|
||||
else:
|
||||
rev = '0'
|
||||
rev = "0"
|
||||
return rev
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
name = result._task.get_name()
|
||||
m = re.search('stigrule_(?P<id>\d+)', name)
|
||||
m = re.search("stigrule_(?P<id>\d+)", name)
|
||||
if m:
|
||||
nid = m.group('id')
|
||||
nid = m.group("id")
|
||||
else:
|
||||
return
|
||||
rev = self._get_rev(nid)
|
||||
key = "{}r{}".format(nid, rev)
|
||||
if self.rules.get(key, 'Unknown') != False:
|
||||
if self.rules.get(key, "Unknown") != False:
|
||||
self.rules[key] = result.is_changed()
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
for rule, changed in self.rules.items():
|
||||
state = 'fail' if changed else 'pass'
|
||||
rr = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}rule-result')
|
||||
rr.set('idref', 'xccdf_mil.disa.stig_rule_SV-{}_rule'.format(rule))
|
||||
rs = ET.SubElement(rr, '{http://checklists.nist.gov/xccdf/1.2}result')
|
||||
state = "fail" if changed else "pass"
|
||||
rr = ET.SubElement(
|
||||
self.tr, "{http://checklists.nist.gov/xccdf/1.2}rule-result"
|
||||
)
|
||||
rr.set("idref", "xccdf_mil.disa.stig_rule_SV-{}_rule".format(rule))
|
||||
rs = ET.SubElement(rr, "{http://checklists.nist.gov/xccdf/1.2}result")
|
||||
rs.text = state
|
||||
passing = len(self.rules) - sum(self.rules.values())
|
||||
sc = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}score')
|
||||
sc.set('maximum', str(len(self.rules)))
|
||||
sc.set('system', 'urn:xccdf:scoring:flat-unweighted')
|
||||
sc = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}score")
|
||||
sc.set("maximum", str(len(self.rules)))
|
||||
sc.set("system", "urn:xccdf:scoring:flat-unweighted")
|
||||
sc.text = str(passing)
|
||||
with open(self.XML_path, 'wb') as f:
|
||||
with open(self.XML_path, "wb") as f:
|
||||
out = ET.tostring(self.tr)
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding='utf-8')
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding="utf-8")
|
||||
f.write(pretty)
|
||||
|
||||
@@ -2,15 +2,15 @@
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: scan_packages
|
||||
short_description: Return installed packages information as fact data
|
||||
description:
|
||||
- Return information about installed packages as fact data
|
||||
'''
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
EXAMPLES = """
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
@@ -34,21 +34,23 @@ EXAMPLES = '''
|
||||
# "name": "gcc-4.8-base"
|
||||
# }
|
||||
# ]
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
def rpm_package_list():
|
||||
import rpm
|
||||
|
||||
trans_set = rpm.TransactionSet()
|
||||
installed_packages = []
|
||||
for package in trans_set.dbMatch():
|
||||
package_details = {
|
||||
'name':package[rpm.RPMTAG_NAME],
|
||||
'version':package[rpm.RPMTAG_VERSION],
|
||||
'release':package[rpm.RPMTAG_RELEASE],
|
||||
'epoch':package[rpm.RPMTAG_EPOCH],
|
||||
'arch':package[rpm.RPMTAG_ARCH],
|
||||
'source':'rpm' }
|
||||
"name": package[rpm.RPMTAG_NAME],
|
||||
"version": package[rpm.RPMTAG_VERSION],
|
||||
"release": package[rpm.RPMTAG_RELEASE],
|
||||
"epoch": package[rpm.RPMTAG_EPOCH],
|
||||
"arch": package[rpm.RPMTAG_ARCH],
|
||||
"source": "rpm",
|
||||
}
|
||||
if installed_packages == []:
|
||||
installed_packages = [package_details]
|
||||
else:
|
||||
@@ -58,16 +60,20 @@ def rpm_package_list():
|
||||
|
||||
def deb_package_list():
|
||||
import apt
|
||||
|
||||
apt_cache = apt.Cache()
|
||||
installed_packages = []
|
||||
apt_installed_packages = [pk for pk in apt_cache.keys() if apt_cache[pk].is_installed]
|
||||
apt_installed_packages = [
|
||||
pk for pk in apt_cache.keys() if apt_cache[pk].is_installed
|
||||
]
|
||||
for package in apt_installed_packages:
|
||||
ac_pkg = apt_cache[package].installed
|
||||
package_details = {
|
||||
'name':package,
|
||||
'version':ac_pkg.version,
|
||||
'arch':ac_pkg.architecture,
|
||||
'source':'apt'}
|
||||
"name": package,
|
||||
"version": ac_pkg.version,
|
||||
"arch": ac_pkg.architecture,
|
||||
"source": "apt",
|
||||
}
|
||||
if installed_packages == []:
|
||||
installed_packages = [package_details]
|
||||
else:
|
||||
@@ -76,13 +82,11 @@ def deb_package_list():
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(os_family=dict(required=True))
|
||||
)
|
||||
ans_os = module.params['os_family']
|
||||
if ans_os in ('RedHat', 'Suse', 'openSUSE Leap'):
|
||||
module = AnsibleModule(argument_spec=dict(os_family=dict(required=True)))
|
||||
ans_os = module.params["os_family"]
|
||||
if ans_os in ("RedHat", "Suse", "openSUSE Leap"):
|
||||
packages = rpm_package_list()
|
||||
elif ans_os == 'Debian':
|
||||
elif ans_os == "Debian":
|
||||
packages = deb_package_list()
|
||||
else:
|
||||
packages = None
|
||||
|
||||
@@ -3,16 +3,18 @@
|
||||
import re
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: scan_services
|
||||
short_description: Return service state information as fact data
|
||||
description:
|
||||
- Return service state information as fact data for various service management utilities
|
||||
'''
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
EXAMPLES = """
|
||||
---
|
||||
- monit: scan_services
|
||||
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
@@ -29,18 +31,17 @@ EXAMPLES = '''
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
'''
|
||||
# }
|
||||
"""
|
||||
|
||||
|
||||
class BaseService(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.incomplete_warning = False
|
||||
|
||||
|
||||
class ServiceScanService(BaseService):
|
||||
|
||||
def gather_services(self):
|
||||
services = {}
|
||||
service_path = self.module.get_bin_path("service")
|
||||
@@ -51,7 +52,10 @@ class ServiceScanService(BaseService):
|
||||
|
||||
# sysvinit
|
||||
if service_path is not None and chkconfig_path is None:
|
||||
rc, stdout, stderr = self.module.run_command("%s --status-all 2>&1 | grep -E \"\\[ (\\+|\\-) \\]\"" % service_path, use_unsafe_shell=True)
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
'%s --status-all 2>&1 | grep -E "\\[ (\\+|\\-) \\]"' % service_path,
|
||||
use_unsafe_shell=True,
|
||||
)
|
||||
for line in stdout.split("\n"):
|
||||
line_data = line.split()
|
||||
if len(line_data) < 4:
|
||||
@@ -61,84 +65,112 @@ class ServiceScanService(BaseService):
|
||||
service_state = "running"
|
||||
else:
|
||||
service_state = "stopped"
|
||||
services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
|
||||
services[service_name] = {
|
||||
"name": service_name,
|
||||
"state": service_state,
|
||||
"source": "sysv",
|
||||
}
|
||||
|
||||
# Upstart
|
||||
if initctl_path is not None and chkconfig_path is None:
|
||||
p = re.compile('^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
|
||||
p = re.compile(
|
||||
"^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$"
|
||||
)
|
||||
rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
|
||||
real_stdout = stdout.replace("\r", "")
|
||||
for line in real_stdout.split("\n"):
|
||||
m = p.match(line)
|
||||
if not m:
|
||||
continue
|
||||
service_name = m.group('name')
|
||||
service_goal = m.group('goal')
|
||||
service_state = m.group('state')
|
||||
if m.group('pid'):
|
||||
pid = m.group('pid')
|
||||
service_name = m.group("name")
|
||||
service_goal = m.group("goal")
|
||||
service_state = m.group("state")
|
||||
if m.group("pid"):
|
||||
pid = m.group("pid")
|
||||
else:
|
||||
pid = None # NOQA
|
||||
payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
|
||||
payload = {
|
||||
"name": service_name,
|
||||
"state": service_state,
|
||||
"goal": service_goal,
|
||||
"source": "upstart",
|
||||
}
|
||||
services[service_name] = payload
|
||||
|
||||
# RH sysvinit
|
||||
elif chkconfig_path is not None:
|
||||
# print '%s --status-all | grep -E "is (running|stopped)"' % service_path
|
||||
p = re.compile(
|
||||
'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
|
||||
'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
|
||||
rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
|
||||
"(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+"
|
||||
"[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)"
|
||||
)
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
"%s" % chkconfig_path, use_unsafe_shell=True
|
||||
)
|
||||
# Check for special cases where stdout does not fit pattern
|
||||
match_any = False
|
||||
for line in stdout.split('\n'):
|
||||
for line in stdout.split("\n"):
|
||||
if p.match(line):
|
||||
match_any = True
|
||||
if not match_any:
|
||||
p_simple = re.compile('(?P<service>.*?)\s+(?P<rl0>on|off)')
|
||||
p_simple = re.compile("(?P<service>.*?)\s+(?P<rl0>on|off)")
|
||||
match_any = False
|
||||
for line in stdout.split('\n'):
|
||||
for line in stdout.split("\n"):
|
||||
if p_simple.match(line):
|
||||
match_any = True
|
||||
if match_any:
|
||||
# Try extra flags " -l --allservices" needed for SLES11
|
||||
rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)
|
||||
elif '--list' in stderr:
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
"%s -l --allservices" % chkconfig_path, use_unsafe_shell=True
|
||||
)
|
||||
elif "--list" in stderr:
|
||||
# Extra flag needed for RHEL5
|
||||
rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)
|
||||
for line in stdout.split('\n'):
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
"%s --list" % chkconfig_path, use_unsafe_shell=True
|
||||
)
|
||||
for line in stdout.split("\n"):
|
||||
m = p.match(line)
|
||||
if m:
|
||||
service_name = m.group('service')
|
||||
service_state = 'stopped'
|
||||
if m.group('rl3') == 'on':
|
||||
rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)
|
||||
service_name = m.group("service")
|
||||
service_state = "stopped"
|
||||
if m.group("rl3") == "on":
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
"%s %s status" % (service_path, service_name),
|
||||
use_unsafe_shell=True,
|
||||
)
|
||||
service_state = rc
|
||||
if rc in (0,):
|
||||
service_state = 'running'
|
||||
service_state = "running"
|
||||
# elif rc in (1,3):
|
||||
else:
|
||||
if 'root' in stderr or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():
|
||||
if (
|
||||
"root" in stderr
|
||||
or "permission" in stderr.lower()
|
||||
or "not in sudoers" in stderr.lower()
|
||||
):
|
||||
self.incomplete_warning = True
|
||||
continue
|
||||
else:
|
||||
service_state = 'stopped'
|
||||
service_data = {"name": service_name, "state": service_state, "source": "sysv"}
|
||||
service_state = "stopped"
|
||||
service_data = {
|
||||
"name": service_name,
|
||||
"state": service_state,
|
||||
"source": "sysv",
|
||||
}
|
||||
services[service_name] = service_data
|
||||
return services
|
||||
|
||||
|
||||
class SystemctlScanService(BaseService):
|
||||
|
||||
def systemd_enabled(self):
|
||||
# Check if init is the systemd command, using comm as cmdline could be symlink
|
||||
try:
|
||||
f = open('/proc/1/comm', 'r')
|
||||
f = open("/proc/1/comm", "r")
|
||||
except IOError:
|
||||
# If comm doesn't exist, old kernel, no systemd
|
||||
return False
|
||||
for line in f:
|
||||
if 'systemd' in line:
|
||||
if "systemd" in line:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -146,10 +178,16 @@ class SystemctlScanService(BaseService):
|
||||
services = {}
|
||||
if not self.systemd_enabled():
|
||||
return None
|
||||
systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
|
||||
systemctl_path = self.module.get_bin_path(
|
||||
"systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"]
|
||||
)
|
||||
if systemctl_path is None:
|
||||
return None
|
||||
rc, stdout, stderr = self.module.run_command("%s list-unit-files --type=service | tail -n +2 | head -n -2" % systemctl_path, use_unsafe_shell=True)
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
"%s list-unit-files --type=service | tail -n +2 | head -n -2"
|
||||
% systemctl_path,
|
||||
use_unsafe_shell=True,
|
||||
)
|
||||
for line in stdout.split("\n"):
|
||||
line_data = line.split()
|
||||
if len(line_data) != 2:
|
||||
@@ -158,7 +196,11 @@ class SystemctlScanService(BaseService):
|
||||
state_val = "running"
|
||||
else:
|
||||
state_val = "stopped"
|
||||
services[line_data[0]] = {"name": line_data[0], "state": state_val, "source": "systemd"}
|
||||
services[line_data[0]] = {
|
||||
"name": line_data[0],
|
||||
"state": state_val,
|
||||
"source": "systemd",
|
||||
}
|
||||
return services
|
||||
|
||||
|
||||
@@ -175,11 +217,16 @@ def main():
|
||||
if svcmod.incomplete_warning:
|
||||
incomplete_warning = True
|
||||
if len(all_services) == 0:
|
||||
results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.")
|
||||
results = dict(
|
||||
skipped=True,
|
||||
msg="Failed to find any services. Sometimes this is due to insufficient privileges.",
|
||||
)
|
||||
else:
|
||||
results = dict(ansible_facts=dict(services=all_services))
|
||||
if incomplete_warning:
|
||||
results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
|
||||
results[
|
||||
"msg"
|
||||
] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
|
||||
@@ -1,31 +1,34 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: win_scan_packages
|
||||
short_description: Return Package state information as fact data
|
||||
description:
|
||||
- Return Package state information as fact data for various Packages
|
||||
'''
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
EXAMPLES = """
|
||||
- monit: win_scan_packages
|
||||
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
# "packages": [
|
||||
{
|
||||
"name": "Mozilla Firefox 76.0.1 (x64 en-US)",
|
||||
"version": "76.0.1",
|
||||
"publisher": "Mozilla",
|
||||
"arch": "Win64"
|
||||
},
|
||||
{
|
||||
"name": "Mozilla Maintenance Service",
|
||||
"version": "76.0.1",
|
||||
"publisher": "Mozilla",
|
||||
"arch": "Win64"
|
||||
},
|
||||
# {
|
||||
# "name": "Mozilla Firefox 76.0.1 (x64 en-US)",
|
||||
# "version": "76.0.1",
|
||||
# "publisher": "Mozilla",
|
||||
# "arch": "Win64"
|
||||
# },
|
||||
# {
|
||||
# "name": "Mozilla Maintenance Service",
|
||||
# "version": "76.0.1",
|
||||
# "publisher": "Mozilla",
|
||||
# "arch": "Win64"
|
||||
# }
|
||||
'''
|
||||
# ]
|
||||
# }
|
||||
# }
|
||||
"""
|
||||
|
||||
@@ -1,34 +1,37 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: win_scan_services
|
||||
short_description: Return service state information as fact data
|
||||
description:
|
||||
- Return service state information as fact data for various service management utilities
|
||||
'''
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
EXAMPLES = """
|
||||
- monit: win_scan_services
|
||||
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
# "services": [
|
||||
{
|
||||
"name": "AllJoyn Router Service",
|
||||
"win_svc_name": "AJRouter",
|
||||
"state": "stopped"
|
||||
},
|
||||
{
|
||||
"name": "Application Layer Gateway Service",
|
||||
"win_svc_name": "ALG",
|
||||
"state": "stopped"
|
||||
},
|
||||
{
|
||||
"name": "Application Host Helper Service",
|
||||
"win_svc_name": "AppHostSvc",
|
||||
"state": "running"
|
||||
},
|
||||
# {
|
||||
# "name": "AllJoyn Router Service",
|
||||
# "win_svc_name": "AJRouter",
|
||||
# "state": "stopped"
|
||||
# },
|
||||
# {
|
||||
# "name": "Application Layer Gateway Service",
|
||||
# "win_svc_name": "ALG",
|
||||
# "state": "stopped"
|
||||
# },
|
||||
# {
|
||||
# "name": "Application Host Helper Service",
|
||||
# "win_svc_name": "AppHostSvc",
|
||||
# "state": "running"
|
||||
# }
|
||||
'''
|
||||
# ]
|
||||
# }
|
||||
# }
|
||||
"""
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: Create web directory if it does not exist
|
||||
ansible.builtin.file:
|
||||
path: "{{ file_path }}"
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
file_path: "{{ web_path | default('/var/www/html/reports') }}"
|
||||
vendor:
|
||||
---
|
||||
file_path: "{{ web_path | default('/var/www/html/reports') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
vendor: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
ios: &my_value 'Cisco'
|
||||
nxos: *my_value
|
||||
iosxr: *my_value
|
||||
junos: "Juniper"
|
||||
eos: "Arista"
|
||||
transport:
|
||||
transport: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
cliconf: "Network_CLI"
|
||||
netconf: "NETCONF"
|
||||
nxapi: "NX-API"
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
detailedreport: true
|
||||
detailedreport: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: Create HTML report
|
||||
ansible.builtin.template:
|
||||
src: report.j2
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
file_path: /var/www/html
|
||||
---
|
||||
file_path: /var/www/html # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
email_from: tower@shadowman.dev
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev
|
||||
to_emails_list: "{{ to_emails.split(',') }}"
|
||||
---
|
||||
email_from: tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails_list: "{{ to_emails.split(',') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: Create HTML report
|
||||
ansible.builtin.template:
|
||||
src: report.j2
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
file_path: /var/www/html
|
||||
---
|
||||
file_path: /var/www/html # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
exclude_packages:
|
||||
exclude_packages: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
- authselect
|
||||
- authselect-compat
|
||||
- authselect-libs
|
||||
- fprintd-pam
|
||||
allow_reboot: true
|
||||
allow_reboot: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
win_update_categories:
|
||||
win_update_categories: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
- Application
|
||||
- Connectors
|
||||
- CriticalUpdates
|
||||
@@ -11,4 +11,4 @@ win_update_categories:
|
||||
- Tools
|
||||
- UpdateRollups
|
||||
- Updates
|
||||
allow_reboot: true
|
||||
allow_reboot: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
detailedreport: true
|
||||
detailedreport: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: Create HTML report
|
||||
ansible.builtin.template:
|
||||
src: report.j2
|
||||
@@ -15,7 +16,7 @@
|
||||
|
||||
- name: Copy logos over
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item }}"
|
||||
src: "{{ logo }}"
|
||||
dest: "{{ file_path }}"
|
||||
directory_mode: true
|
||||
mode: "0644"
|
||||
@@ -24,6 +25,8 @@
|
||||
- "redhat-ansible-logo.svg"
|
||||
- "server.png"
|
||||
check_mode: false
|
||||
loop_control:
|
||||
loop_var: logo
|
||||
|
||||
- name: Display link to inventory report
|
||||
ansible.builtin.debug:
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
file_path: /var/www/html/reports
|
||||
---
|
||||
file_path: /var/www/html/reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
email_from: tower@shadowman.dev
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev
|
||||
to_emails_list: "{{ to_emails.split(',') }}"
|
||||
---
|
||||
email_from: tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails_list: "{{ to_emails.split(',') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: Create HTML report
|
||||
ansible.builtin.template:
|
||||
src: report.j2
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
file_path: /var/www/html/reports
|
||||
---
|
||||
file_path: /var/www/html/reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
---
|
||||
doc_root: /var/www/html
|
||||
reports_dir: reports
|
||||
doc_root: /var/www/html # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
reports_dir: reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
---
|
||||
doc_root: C:\Inetpub\wwwroot
|
||||
reports_dir: reports
|
||||
doc_root: C:\Inetpub\wwwroot # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
reports_dir: reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
detailedreport: true
|
||||
detailedreport: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
- name: Copy logos over
|
||||
ansible.windows.win_copy:
|
||||
src: "{{ item }}"
|
||||
src: "{{ logo }}"
|
||||
dest: "{{ file_path }}"
|
||||
directory_mode: true
|
||||
loop:
|
||||
@@ -22,6 +22,8 @@
|
||||
- "redhat-ansible-logo.svg"
|
||||
- "server.png"
|
||||
check_mode: false
|
||||
loop_control:
|
||||
loop_var: logo
|
||||
|
||||
# - name: display link to inventory report
|
||||
# ansible.builtin.debug:
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
file_path: C:\Inetpub\wwwroot\reports
|
||||
file_path: C:\Inetpub\wwwroot\reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
email_from: tower@shadowman.dev
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev
|
||||
to_emails_list: "{{ to_emails.split(',') }}"
|
||||
email_from: tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails_list: "{{ to_emails.split(',') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: Create HTML report
|
||||
ansible.windows.win_template:
|
||||
src: report.j2
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
file_path: C:\Inetpub\wwwroot\reports
|
||||
file_path: C:\Inetpub\wwwroot\reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
instance_name: "{{ inventory_hostname | regex_replace('_', '-') }}"
|
||||
activation_key: "{{ 'RHEL' + ansible_distribution_major_version + '_' + env }}"
|
||||
rex_user: root # "{{ ansible_user }}"
|
||||
force_register: true
|
||||
instance_name: "{{ inventory_hostname | regex_replace('_', '-') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
activation_key: "{{ 'RHEL' + ansible_distribution_major_version + '_' + env }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
rex_user: root # "{{ ansible_user }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
force_register: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
rhsm_enabled_repos:
|
||||
rhsm_enabled_repos: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
- rhel-7-server-rpms
|
||||
# - rhel-7-server-satellite-maintenance-6.11-rpms
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
rhsm_enabled_repos:
|
||||
rhsm_enabled_repos: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
- rhel-8-for-x86_64-baseos-rpms
|
||||
- rhel-8-for-x86_64-appstream-rpms
|
||||
- satellite-client-6-for-rhel-8-x86_64-rpms
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
foreman_server_url: "{{ lookup('env', 'SATELLITE_SERVER') }}"
|
||||
foreman_username: "{{ lookup('env', 'SATELLITE_USERNAME') }}"
|
||||
foreman_password: "{{ lookup('env', 'SATELLITE_PASSWORD') }}"
|
||||
foreman_validate_certs: "{{ lookup('env', 'FOREMAN_VALIDATE_CERTS') | default(true) }}"
|
||||
capsule_server: "{{ foreman_server_url }}"
|
||||
capsule_port: '9090'
|
||||
policy_name: 'all'
|
||||
policy_scan: "{{ policy_name }}"
|
||||
crontab_hour: 2
|
||||
crontab_minute: 0
|
||||
crontab_weekdays: 0
|
||||
foreman_operations_scap_client_secure_logging: true
|
||||
---
|
||||
foreman_server_url: "{{ lookup('env', 'SATELLITE_SERVER') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
foreman_username: "{{ lookup('env', 'SATELLITE_USERNAME') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
foreman_password: "{{ lookup('env', 'SATELLITE_PASSWORD') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
foreman_validate_certs: "{{ lookup('env', 'FOREMAN_VALIDATE_CERTS') | default(true) }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
capsule_server: "{{ foreman_server_url }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
capsule_port: '9090' # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
policy_name: 'all' # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
policy_scan: "{{ policy_name }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
crontab_hour: 2 # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
crontab_minute: 0 # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
crontab_weekdays: 0 # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
foreman_operations_scap_client_secure_logging: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,37 +1,30 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.controller
|
||||
version: 4.3.0
|
||||
- name: infra.ah_configuration
|
||||
- name: infra.controller_configuration
|
||||
- name: redhat_cop.controller_configuration
|
||||
version: 2.2.5
|
||||
# linux
|
||||
- name: redhat.insights
|
||||
version: 1.0.7
|
||||
- name: redhat.rhel_system_roles
|
||||
version: 1.20.0
|
||||
- name: ansible.posix
|
||||
- name: community.general
|
||||
version: 6.3.0
|
||||
version: ">=8.0.0"
|
||||
- name: containers.podman
|
||||
- name: redhat.insights
|
||||
- name: redhat.rhel_system_roles
|
||||
# windows
|
||||
- name: ansible.windows
|
||||
- name: chocolatey.chocolatey
|
||||
- name: community.windows
|
||||
version: 1.12.0
|
||||
- name: ansible.windows
|
||||
version: 1.13.0
|
||||
# cloud
|
||||
- name: azure.azcollection
|
||||
version: 1.14.0
|
||||
- name: amazon.aws
|
||||
version: 5.2.0
|
||||
# satellite
|
||||
- name: redhat.satellite
|
||||
version: 3.8.0
|
||||
# network
|
||||
- name: cisco.ios
|
||||
version: 4.4.0
|
||||
- name: cisco.nxos
|
||||
version: 4.1.0
|
||||
- name: cisco.iosxr
|
||||
version: 5.0.0
|
||||
- name: ansible.netcommon
|
||||
version: 5.0.0
|
||||
- name: cisco.ios
|
||||
- name: cisco.iosxr
|
||||
- name: cisco.nxos
|
||||
# openshift
|
||||
- name: kubernetes.core
|
||||
- name: redhat.openshift
|
||||
- name: redhat.openshift_virtualization
|
||||
|
||||
@@ -26,8 +26,10 @@ This category of demos shows examples of linux operations and management with An
|
||||
- [**Linux / Fact Scan**](https://github.com/ansible/awx-facts-playbooks/blob/master/scan_facts.yml) - Run a fact, package, and service scan against a system and store in fact cache
|
||||
- [**Linux / Podman Webserver**](podman.yml) - Install and run a Podman webserver with given text on the home page
|
||||
- [**Linux / System Roles**](system_roles.yml) - Apply Linux system roles to servers. Must provide variables and role names.
|
||||
- [**Linux / Compliance Enforce**](compliance.yml) - Apply remediation to meet the requirements of a compliance baseline
|
||||
- [**Linux / Insights Compliance Scan**](insights_compliance_scan.yml) - Run a Compliance scan based on the configuration in [Red Hat Insights][https://console.redhat.com]
|
||||
- [**Linux / DISA STIG**](compliance.yml) - Apply the RHEL STIG supplemental content from DISA
|
||||
- [**Linux / Multi-profile compliance**](compliance-enforce.yml) - Apply remediation from [Compliance as Code](https://github.com/ComplianceAsCode/content) to enforce the requirements of a specified compliance profile
|
||||
- [**Linux / Report Compliance**](compliance-report.yml) - Run an OpenSCAP report against a specified compliance profile
|
||||
- [**Linux / Insights Compliance Scan**](insights_compliance_scan.yml) - Run a Compliance scan based on the configuration in [Red Hat Insights](https://console.redhat.com)
|
||||
|
||||
### Inventory
|
||||
|
||||
@@ -86,6 +88,10 @@ timesync_ntp_servers:
|
||||
pool: yes
|
||||
iburst: yes
|
||||
```
|
||||
**Linux / Compliance** - Apply compliance profile hardening configuration from [here](https://galaxy.ansible.com/RedHatOfficial). BE AWARE: this could have unintended results based on the current state of your machine. Always test on a single machine before distributing at scale. For example, AWS instances have NOPASSWD allowed for sudo. Running STIG compliance without adding `sudo_remove_nopasswd: false` to extra_vars on the job template will lock you out of the machine. This variable is configured on the job template by default for this reason.
|
||||
**Linux / DISA STIG** - Apply the RHEL STIG security hardening configuration using the [DISA Supplemental Automation Content](https://public.cyber.mil/stigs/supplemental-automation-content/). BE AWARE: this could have unintended results based on the current state of your machine. Always test on a single machine before distributing at scale. For example, AWS instances have NOPASSWD allowed for sudo. Running STIG compliance without adding `sudo_remove_nopasswd: false` to extra_vars on the job template will lock you out of the machine. This variable is configured on the job template by default for this reason.
|
||||
|
||||
**Linux / Multi-profile Compliance** - Apply security hardening configuration from a [supported compliance profile role](compliance_profiles.md). BE AWARE: this could have unintended results based on the current state of your machine. Always test on a single machine before distributing at scale. For example, AWS instances have NOPASSWD allowed for sudo. Applying certain compliance profiles without adding `sudo_remove_nopasswd: false` to extra_vars on the job template will lock you out of the machine. This variable is configured on the job template by default for this reason.
|
||||
|
||||
**Linux / Report Compliance** - Run this template before running the "**Linux / Multi-profile Compliance**" template and again afterwards to highlight the changes made by the enforcement template. By default, the reports are available by pointing a web browser to the system(s) where the report runs. By setting the `use_httpd` variable to "false" in the template survey the reports will instead be stored on the target node in the /tmp/oscap-reports directory.
|
||||
|
||||
**Linux / Insights Compliance Scan** - Scan the system according to the compliance profile configured via [Red Hat Insights](https://console.redhat.com). NOTE: This job will fail if the systems haven't been registered with Insights and associated with a relevant compliance profile. A survey when running the job will ask if you have configured all systems with a compliance profile, and effectively skip all tasks in the job template if the answer is "No".
|
||||
|
||||
16
linux/compliance-enforce.yml
Normal file
16
linux/compliance-enforce.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Apply compliance profile
|
||||
hosts: "{{ _hosts | default(omit) }}"
|
||||
become: true
|
||||
vars:
|
||||
compliance_profile: undef
|
||||
|
||||
tasks:
|
||||
- name: Check os type
|
||||
ansible.builtin.assert:
|
||||
that: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: Run Compliance Profile
|
||||
ansible.builtin.include_role:
|
||||
name: "redhatofficial.rhel{{ ansible_distribution_major_version }}_{{ compliance_profile }}"
|
||||
...
|
||||
90
linux/compliance-report.yml
Normal file
90
linux/compliance-report.yml
Normal file
@@ -0,0 +1,90 @@
|
||||
---
|
||||
- name: Generate OpenSCAP compliance report
|
||||
hosts: '{{ _hosts | default(omit) }}'
|
||||
become: true
|
||||
|
||||
vars:
|
||||
openscap_packages:
|
||||
- openscap-scanner
|
||||
- openscap-utils
|
||||
- scap-security-guide
|
||||
compliance_profile: ospp
|
||||
use_httpd: true
|
||||
|
||||
tasks:
|
||||
- name: Get our facts straight
|
||||
ansible.builtin.set_fact:
|
||||
_profile: '{{ compliance_profile | replace("pci_dss", "pci-dss") }}'
|
||||
_report_dir: /tmp/oscap-reports
|
||||
|
||||
- name: Ensure OpenSCAP tools are installed
|
||||
ansible.builtin.dnf:
|
||||
name: '{{ openscap_packages }}'
|
||||
state: present
|
||||
|
||||
- name: Configure httpd
|
||||
when: use_httpd | bool
|
||||
block:
|
||||
- name: Install httpd
|
||||
ansible.builtin.dnf:
|
||||
name: httpd
|
||||
state: present
|
||||
notify: Restart httpd
|
||||
|
||||
- name: Override report directory
|
||||
ansible.builtin.set_fact:
|
||||
_report_dir: /var/www/html/oscap-reports
|
||||
|
||||
- name: Gather service facts
|
||||
ansible.builtin.service_facts:
|
||||
|
||||
- name: Enable firewall http service
|
||||
ansible.posix.firewalld:
|
||||
service: http
|
||||
state: enabled
|
||||
immediate: true
|
||||
permanent: true
|
||||
when: "'firewalld.service' in ansible_facts.services"
|
||||
|
||||
- name: Disable httpd welcome page
|
||||
ansible.builtin.file:
|
||||
path: /etc/httpd/conf.d/welcome.conf
|
||||
state: absent
|
||||
notify: Restart httpd
|
||||
|
||||
- name: Ensure report directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ _report_dir }}/{{ _profile }}'
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Set report name
|
||||
ansible.builtin.set_fact:
|
||||
_report: '{{ _report_dir }}/{{ _profile }}/report-{{ ansible_date_time.iso8601 }}.html'
|
||||
|
||||
- name: Generate compliance report
|
||||
ansible.builtin.command: >-
|
||||
oscap xccdf eval --profile {{ _profile }} --report {{ _report }}
|
||||
/usr/share/xml/scap/ssg/content/ssg-rhel{{ ansible_distribution_major_version }}-ds.xml
|
||||
args:
|
||||
creates: '{{ _report }}'
|
||||
register: _oscap
|
||||
failed_when: _oscap.rc not in [0, 2]
|
||||
|
||||
- name: Set report permissions
|
||||
ansible.builtin.file:
|
||||
path: '{{ _report }}'
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
handlers:
|
||||
- name: Restart httpd
|
||||
ansible.builtin.service:
|
||||
name: httpd
|
||||
state: restarted
|
||||
enabled: true
|
||||
|
||||
...
|
||||
15
linux/compliance_profiles.md
Normal file
15
linux/compliance_profiles.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Supported Compliance Profiles
|
||||
|
||||
The following compliance profiles are supported by the [**Linux / Enforce Compliance**](README.md#jobs) job template:
|
||||
|
||||
| **Profile** | **Role Repository** |
|
||||
|-------------|---------------------|
|
||||
| CIS | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-cis |
|
||||
| CJIS | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-cjis |
|
||||
| CUI | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-cui |
|
||||
| HIPAA | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-hipaa |
|
||||
| OSPP | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-ospp |
|
||||
| PCI-DSS | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-pci-dss |
|
||||
| DISA STIG | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-stig |
|
||||
|
||||
These roles are derived from the [Compliance as Code](https://github.com/ComplianceAsCode/content) project, which provides SCAP content used by the [OpenSCAP](https://www.open-scap.org/) `oscap` tool.
|
||||
@@ -30,4 +30,3 @@
|
||||
- name: Printing to terminal application information
|
||||
ansible.builtin.debug:
|
||||
msg: "The application: {{ application }} has been installed"
|
||||
when: result.changed | bool
|
||||
|
||||
@@ -65,7 +65,7 @@
|
||||
- name: Configure Red Hat insights
|
||||
ansible.builtin.import_role:
|
||||
name: redhat.insights.insights_client
|
||||
vars:
|
||||
vars: # noqa var-naming[no-role-prefix]
|
||||
insights_display_name: "{{ inventory_hostname }}"
|
||||
insights_tags:
|
||||
env: "{{ env }}"
|
||||
|
||||
@@ -29,9 +29,22 @@
|
||||
- ansible_local.insights.system_id is defined
|
||||
|
||||
- name: Deploy report server
|
||||
when: not ansible_check_mode
|
||||
delegate_to: "{{ report_server }}"
|
||||
run_once: true # noqa: run-once[task]
|
||||
block:
|
||||
- name: Install firewall dependencies
|
||||
ansible.builtin.dnf:
|
||||
name:
|
||||
- firewalld
|
||||
- python3-firewall
|
||||
state: present
|
||||
|
||||
- name: Start firewalld
|
||||
ansible.builtin.service:
|
||||
name: firewalld
|
||||
state: started
|
||||
|
||||
- name: Build report server
|
||||
ansible.builtin.include_role:
|
||||
name: "{{ item }}"
|
||||
|
||||
156
linux/setup.yml
156
linux/setup.yml
@@ -3,13 +3,9 @@ user_message:
|
||||
- Update the 'activation_key' and 'org_id' extra variables for 'LINUX / Register with Insights'. https://access.redhat.com/management/activation_keys
|
||||
- Update Credential for Insights Inventory with Red Hat account.
|
||||
- Add variables for system_roles. https://console.redhat.com/ansible/automation-hub/repo/published/redhat/rhel_system_roles
|
||||
controller_components:
|
||||
- projects
|
||||
- credential_types
|
||||
- credentials
|
||||
- inventory_sources
|
||||
- job_templates
|
||||
|
||||
# "!unsafe" used to pass raw jinja2 through to the injector definition, see
|
||||
# https://github.com/redhat-cop/controller_configuration/tree/devel/roles/credential_types#formating-injectors
|
||||
controller_credential_types:
|
||||
- name: Insights Collection
|
||||
kind: cloud
|
||||
@@ -24,20 +20,21 @@ controller_credential_types:
|
||||
secret: true
|
||||
injectors:
|
||||
env:
|
||||
INSIGHTS_USER: "{% raw %}{ { insights_user }}{% endraw %}"
|
||||
INSIGHTS_PASSWORD: "{% raw %}{ { insights_password }}{% endraw %}"
|
||||
INSIGHTS_USER: !unsafe '{{ insights_user }}'
|
||||
INSIGHTS_PASSWORD: !unsafe '{{ insights_password }}'
|
||||
|
||||
controller_credentials:
|
||||
- name: Insights Inventory
|
||||
credential_type: Insights Collection
|
||||
organization: Default
|
||||
state: exists
|
||||
inputs:
|
||||
insights_user: REPLACEME
|
||||
insights_password: REPLACEME
|
||||
|
||||
controller_inventory_sources:
|
||||
- name: Insights Inventory
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
source: scm
|
||||
source_project: Ansible official demo project
|
||||
source_path: linux/inventory.insights.yml
|
||||
@@ -46,14 +43,14 @@ controller_inventory_sources:
|
||||
controller_templates:
|
||||
- name: "LINUX / Register with Insights"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/ec2_register.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
extra_vars:
|
||||
activation_key: !unsafe "RHEL{{ ansible_distribution_major_version }}_{{ env }}"
|
||||
@@ -85,7 +82,7 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / Troubleshoot"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/tshoot.yml"
|
||||
notification_templates_started: Telemetry
|
||||
@@ -93,7 +90,7 @@ controller_templates:
|
||||
notification_templates_error: Telemetry
|
||||
use_fact_cache: true
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -106,14 +103,14 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / Temporary Sudo"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/temp_sudo.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -135,7 +132,7 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / Patching"
|
||||
job_type: check
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/patching.yml"
|
||||
execution_environment: Default execution environment
|
||||
@@ -145,7 +142,7 @@ controller_templates:
|
||||
use_fact_cache: true
|
||||
ask_job_type_on_launch: true
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -158,7 +155,7 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / Start Service"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/service_start.yml"
|
||||
notification_templates_started: Telemetry
|
||||
@@ -166,7 +163,7 @@ controller_templates:
|
||||
notification_templates_error: Telemetry
|
||||
use_fact_cache: true
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -183,7 +180,7 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / Stop Service"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/service_stop.yml"
|
||||
notification_templates_started: Telemetry
|
||||
@@ -191,7 +188,7 @@ controller_templates:
|
||||
notification_templates_error: Telemetry
|
||||
use_fact_cache: true
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -208,14 +205,14 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / Run Shell Script"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/run_script.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -233,14 +230,14 @@ controller_templates:
|
||||
- name: "LINUX / Fact Scan"
|
||||
project: "Ansible official demo project"
|
||||
playbook: linux/fact_scan.yml
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
execution_environment: Default execution environment
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
use_fact_cache: true
|
||||
credentials:
|
||||
- Workshop Credential
|
||||
- Demo Credential
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -253,14 +250,14 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / Podman Webserver"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/podman.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -278,7 +275,7 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / System Roles"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/system_roles.yml"
|
||||
notification_templates_started: Telemetry
|
||||
@@ -292,7 +289,7 @@ controller_templates:
|
||||
selinux_policy: targeted
|
||||
selinux_state: enforcing
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -305,7 +302,7 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / Install Web Console (cockpit)"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/system_roles.yml"
|
||||
notification_templates_started: Telemetry
|
||||
@@ -317,7 +314,7 @@ controller_templates:
|
||||
system_roles:
|
||||
- cockpit
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -339,14 +336,14 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / DISA STIG"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/compliance.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
extra_vars:
|
||||
sudo_remove_nopasswd: false
|
||||
survey_enabled: true
|
||||
@@ -359,13 +356,96 @@ controller_templates:
|
||||
variable: _hosts
|
||||
required: true
|
||||
|
||||
- name: "LINUX / Multi-profile Compliance"
|
||||
job_type: run
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/compliance-enforce.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Demo Credential"
|
||||
extra_vars:
|
||||
# used by CIS profile role
|
||||
sudo_require_authentication: false
|
||||
# used by STIG profile role
|
||||
sudo_remove_nopasswd: false
|
||||
sudo_remove_no_authenticate: false
|
||||
# used by CIS and STIG profile role
|
||||
accounts_password_set_max_life_existing: false
|
||||
# used by the CJIS profile role
|
||||
service_firewalld_enabled: false
|
||||
firewalld_sshd_port_enabled: false
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: Server Name or Pattern
|
||||
type: text
|
||||
variable: _hosts
|
||||
required: true
|
||||
- question_name: Compliance Profile
|
||||
type: multiplechoice
|
||||
variable: compliance_profile
|
||||
required: true
|
||||
choices:
|
||||
- cis
|
||||
- cjis
|
||||
- cui
|
||||
- hipaa
|
||||
- ospp
|
||||
- pci_dss
|
||||
- stig
|
||||
|
||||
- name: "LINUX / Multi-profile Compliance Report"
|
||||
job_type: run
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/compliance-report.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: Server Name or Pattern
|
||||
type: text
|
||||
variable: _hosts
|
||||
required: true
|
||||
- question_name: Compliance Profile
|
||||
type: multiplechoice
|
||||
variable: compliance_profile
|
||||
required: true
|
||||
choices:
|
||||
- cis
|
||||
- cjis
|
||||
- cui
|
||||
- hipaa
|
||||
- ospp
|
||||
- pci_dss
|
||||
- stig
|
||||
- question_name: Use httpd on the target host(s) to access reports locally?
|
||||
type: multiplechoice
|
||||
variable: use_httpd
|
||||
required: true
|
||||
choices:
|
||||
- "true"
|
||||
- "false"
|
||||
default: "true"
|
||||
|
||||
- name: "LINUX / Insights Compliance Scan"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/insights_compliance_scan.yml"
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -386,7 +466,7 @@ controller_templates:
|
||||
|
||||
- name: "LINUX / Deploy Application"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "linux/deploy_application.yml"
|
||||
notification_templates_started: Telemetry
|
||||
@@ -394,7 +474,7 @@ controller_templates:
|
||||
notification_templates_error: Telemetry
|
||||
use_fact_cache: true
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -408,3 +488,5 @@ controller_templates:
|
||||
type: text
|
||||
variable: application
|
||||
required: true
|
||||
|
||||
...
|
||||
|
||||
18
multi_select_setup.yml
Normal file
18
multi_select_setup.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: Setup multiple just more than one demo
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
vars:
|
||||
launch_jobs:
|
||||
name: "SETUP"
|
||||
wait: true
|
||||
tasks:
|
||||
- name: Build controller launch jobs
|
||||
ansible.builtin.set_fact:
|
||||
controller_launch_jobs: "{{ (controller_launch_jobs | d([]))
|
||||
+ [launch_jobs | combine( {'extra_vars': { 'demo': item }})] }}"
|
||||
loop: "{{ demos }}"
|
||||
|
||||
- name: Default Components
|
||||
ansible.builtin.include_role:
|
||||
name: "infra.controller_configuration.job_launch"
|
||||
@@ -26,7 +26,7 @@
|
||||
gather_network_resources: all
|
||||
when: ansible_network_os == 'cisco.iosxr.iosxr'
|
||||
|
||||
# The dig lookup requires the python 'dnspython' library
|
||||
# # The dig lookup requires the python 'dnspython' library
|
||||
# - name: Resolve IP address
|
||||
# ansible.builtin.set_fact:
|
||||
# ansible_host: "{{ lookup('community.general.dig', inventory_hostname)}}"
|
||||
|
||||
@@ -1,15 +1,6 @@
|
||||
---
|
||||
user_message:
|
||||
|
||||
controller_components:
|
||||
- execution_environments
|
||||
- projects
|
||||
- inventories
|
||||
- hosts
|
||||
- inventory_sources
|
||||
- inventory_source_update
|
||||
- job_templates
|
||||
|
||||
controller_execution_environments:
|
||||
- name: Networking Execution Environment
|
||||
image: quay.io/nleiva/ee-network-image
|
||||
@@ -88,7 +79,7 @@ controller_templates:
|
||||
execution_environment: Networking Execution Environment
|
||||
use_fact_cache: true
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
|
||||
20
openshift/README.md
Normal file
20
openshift/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# OpenShift Demos
|
||||
|
||||
## Table of Contents
|
||||
- [OpenShift Demos](#openshift-demos)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [About These Demos](#about-these-demos)
|
||||
- [Jobs](#jobs)
|
||||
- [Pre Setup](#pre-setup)
|
||||
|
||||
## About These Demos
|
||||
This category of demos shows examples of openshift operations and management with Ansible Automation Platform. The list of demos can be found below. See the [Suggested Usage](#suggested-usage) section of this document for recommendations on how to best use these demos.
|
||||
|
||||
### Jobs
|
||||
- [**OpenShift / Dev Spaces**](devspaces.yml) - Install and deploy dev spaces on OCP cluster. After this job has run successfully, login to your OCP cluster, click the application icon (to the left of the bell icon in the top right) to access Dev Spaces
|
||||
|
||||
## Pre Setup
|
||||
This demo requires an OpenShift cluster to deploy to. If you do not have a cluster to use, one can be requested from [demo.redhat.com](https://demo.redhat.com).
|
||||
- Search for the [Red Hat OpenShift Container Platform 4.12 Workshop](https://demo.redhat.com/catalog?item=babylon-catalog-prod/sandboxes-gpte.ocp412-wksp.prod&utm_source=webapp&utm_medium=share-link) item in the catalog and request with the number of users you would like for Dev Spaces.
|
||||
- Login using the admin credentials provided. Click the `admin` username at the top right and select `Copy login command`.
|
||||
- Authenticate and click `Display Token`. This information will be used to populate the OpenShift Credential after you run the setup.
|
||||
112
openshift/devspaces.yml
Normal file
112
openshift/devspaces.yml
Normal file
@@ -0,0 +1,112 @@
|
||||
---
|
||||
- name: Deploy Dev Spaces on OCP
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Create namespace
|
||||
redhat.openshift.k8s:
|
||||
name: eclipse-che
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
state: present
|
||||
|
||||
- name: Create dev spaces subscription
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
labels:
|
||||
operators.coreos.com/devspaces.openshift-operators: ''
|
||||
name: devspaces
|
||||
namespace: openshift-operators
|
||||
spec:
|
||||
channel: stable
|
||||
installPlanApproval: Automatic
|
||||
name: devspaces
|
||||
source: redhat-operators
|
||||
sourceNamespace: openshift-marketplace
|
||||
|
||||
- name: Wait for dev spaces operator to install
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
name: checlusters.org.eclipse.che
|
||||
register: crd_che
|
||||
until: crd_che.resources | list | length == 1
|
||||
retries: 10
|
||||
delay: 30
|
||||
|
||||
- name: Wait until devspaces-operator is up
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: v1
|
||||
kind: Deployment
|
||||
name: devspaces-operator
|
||||
namespace: openshift-operators
|
||||
register: pod_list
|
||||
until: pod_list | json_query('resources[*].status.readyReplicas') | unique == [1]
|
||||
retries: 10
|
||||
delay: 30
|
||||
|
||||
- name: Deploy dev spaces
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: org.eclipse.che/v2
|
||||
kind: CheCluster
|
||||
metadata:
|
||||
name: devspaces
|
||||
namespace: eclipse-che
|
||||
spec:
|
||||
components:
|
||||
cheServer:
|
||||
debug: false
|
||||
logLevel: INFO
|
||||
dashboard: {}
|
||||
database:
|
||||
credentialsSecretName: postgres-credentials
|
||||
externalDb: false
|
||||
postgresDb: dbche
|
||||
postgresHostName: postgres
|
||||
postgresPort: '5432'
|
||||
pvc:
|
||||
claimSize: 1Gi
|
||||
devWorkspace: {}
|
||||
devfileRegistry: {}
|
||||
imagePuller:
|
||||
enable: false
|
||||
spec: {}
|
||||
metrics:
|
||||
enable: true
|
||||
pluginRegistry:
|
||||
openVSXURL: 'https://open-vsx.org'
|
||||
containerRegistry: {}
|
||||
devEnvironments:
|
||||
startTimeoutSeconds: 300
|
||||
secondsOfRunBeforeIdling: -1
|
||||
maxNumberOfWorkspacesPerUser: -1
|
||||
containerBuildConfiguration:
|
||||
openShiftSecurityContextConstraint: container-build
|
||||
disableContainerBuildCapabilities: true
|
||||
defaultEditor: che-incubator/che-code/insiders
|
||||
defaultComponents:
|
||||
- container:
|
||||
image: >-
|
||||
registry.redhat.io/devspaces/udi-rhel8@sha256:aa39ede33bcbda6aa2723d271c79ab8d8fd388c7dfcbc3d4ece745b7e9c84193
|
||||
sourceMapping: /projects
|
||||
name: universal-developer-image
|
||||
defaultNamespace:
|
||||
autoProvision: true
|
||||
template: <username>-devspaces
|
||||
secondsOfInactivityBeforeIdling: 1800
|
||||
storage:
|
||||
pvcStrategy: per-user
|
||||
gitServices: {}
|
||||
networking:
|
||||
auth:
|
||||
gateway:
|
||||
configLabels:
|
||||
app: che
|
||||
component: che-gateway-config
|
||||
135
openshift/gitlab.yml
Normal file
135
openshift/gitlab.yml
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
- name: Deploy gitlab on OCP
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Create cert-manager-operator namespace
|
||||
redhat.openshift.k8s:
|
||||
name: cert-manager-operator
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
state: present
|
||||
|
||||
- name: Create OperatorGroup object for cert-manager-operator
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: cert-manager-operator-operatorgroup
|
||||
namespace: cert-manager-operator
|
||||
spec:
|
||||
targetNamespaces:
|
||||
- cert-manager-operator
|
||||
|
||||
- name: Create cert-manager-operator subscription
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
labels:
|
||||
operators.coreos.com/openshift-cert-manager-operator.cert-manager-operator: ''
|
||||
name: openshift-cert-manager-operator
|
||||
namespace: cert-manager-operator
|
||||
spec:
|
||||
channel: stable-v1
|
||||
installPlanApproval: Automatic
|
||||
name: openshift-cert-manager-operator
|
||||
source: redhat-operators
|
||||
sourceNamespace: openshift-marketplace
|
||||
|
||||
- name: Create gitlab-system namespace
|
||||
redhat.openshift.k8s:
|
||||
name: gitlab-system
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
state: present
|
||||
|
||||
- name: Create OperatorGroup object for gitlab-operator-kubernetes
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: gitlab-operator-kubernetes-operatorgroup
|
||||
namespace: gitlab-system
|
||||
spec:
|
||||
targetNamespaces:
|
||||
- gitlab-system
|
||||
|
||||
- name: Create gitlab subscription
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
labels:
|
||||
operators.coreos.com/gitlab-operator-kubernetes.gitlab-system: ''
|
||||
name: gitlab-operator-kubernetes
|
||||
namespace: gitlab-system
|
||||
spec:
|
||||
channel: stable
|
||||
installPlanApproval: Automatic
|
||||
name: gitlab-operator-kubernetes
|
||||
source: community-operators
|
||||
sourceNamespace: openshift-marketplace
|
||||
|
||||
- name: Wait for gitlab operator to install
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
name: gitlabs.apps.gitlab.com
|
||||
register: crd_gitlab
|
||||
until: crd_gitlab.resources | list | length == 1
|
||||
retries: 10
|
||||
delay: 30
|
||||
|
||||
- name: Wait until gitlab-operator is up
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: v1
|
||||
kind: Deployment
|
||||
name: gitlab-controller-manager
|
||||
namespace: gitlab-system
|
||||
register: pod_list
|
||||
until: pod_list | json_query('resources[*].status.readyReplicas') | unique >= [1]
|
||||
retries: 10
|
||||
delay: 30
|
||||
|
||||
- name: Grab url for Gitlab spec
|
||||
ansible.builtin.set_fact:
|
||||
cluster_domain: "apps{{ lookup('ansible.builtin.env', 'K8S_AUTH_HOST') | regex_search('\\.[^:]*') }}"
|
||||
when: cluster_domain is undefined
|
||||
|
||||
- name: Deploy a GitLab instance
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: apps.gitlab.com/v1beta1
|
||||
kind: GitLab
|
||||
metadata:
|
||||
name: gitlab
|
||||
namespace: gitlab-system
|
||||
spec:
|
||||
chart:
|
||||
version: "7.6.2"
|
||||
values:
|
||||
nginx-ingress:
|
||||
enabled: false
|
||||
certmanager:
|
||||
install: false
|
||||
global:
|
||||
hosts:
|
||||
domain: "{{ cluster_domain }}" # apps.cluster-9xrlv.9xrlv.sandbox644.opentlc.com
|
||||
ingress:
|
||||
class: none
|
||||
configureCertmanager: true
|
||||
annotations:
|
||||
route.openshift.io/termination: "edge"
|
||||
certmanager-issuer:
|
||||
email: "{{ cert_email | default('nobody@nowhere.nosite') }}"
|
||||
33
openshift/setup.yml
Normal file
33
openshift/setup.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
controller_credentials:
|
||||
- name: OpenShift Credential
|
||||
organization: Default
|
||||
credential_type: OpenShift or Kubernetes API Bearer Token
|
||||
state: exists
|
||||
inputs:
|
||||
host: CHANGEME
|
||||
bearer_token: CHANGEME
|
||||
verify_ssl: false
|
||||
|
||||
controller_templates:
|
||||
- name: OpenShift / Dev Spaces
|
||||
job_type: run
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "openshift/devspaces.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "OpenShift Credential"
|
||||
|
||||
- name: OpenShift / GitLab
|
||||
job_type: run
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "openshift/gitlab.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "OpenShift Credential"
|
||||
46
roles/requirements.yml
Normal file
46
roles/requirements.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
roles:
|
||||
# RHEL 7 compliance roles from ComplianceAsCode
|
||||
- name: redhatofficial.rhel7_cis
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel7_cjis
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel7_cui
|
||||
version: 0.1.67
|
||||
- name: redhatofficial.rhel7_hipaa
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel7_ospp
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel7_pci_dss
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel7_stig
|
||||
version: 0.1.69
|
||||
# RHEL 8 compliance roles from ComplianceAsCode
|
||||
- name: redhatofficial.rhel8_cis
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel8_cjis
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel8_cui
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel8_hipaa
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel8_ospp
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel8_pci_dss
|
||||
version: 0.1.69
|
||||
- name: redhatofficial.rhel8_stig
|
||||
version: 0.1.69
|
||||
# RHEL 9 compliance roles from ComplianceAsCode
|
||||
- name: redhatofficial.rhel9_cis
|
||||
version: 0.1.68
|
||||
- name: redhatofficial.rhel9_cui
|
||||
version: 0.1.64
|
||||
- name: redhatofficial.rhel9_hipaa
|
||||
version: 0.1.68
|
||||
- name: redhatofficial.rhel9_ospp
|
||||
version: 0.1.68
|
||||
- name: redhatofficial.rhel9_pci_dss
|
||||
version: 0.1.68
|
||||
- name: redhatofficial.rhel9_stig
|
||||
version: 0.1.64
|
||||
...
|
||||
@@ -1,13 +1,6 @@
|
||||
---
|
||||
user_message:
|
||||
|
||||
controller_components:
|
||||
- credential_types
|
||||
- credentials
|
||||
- inventory_sources
|
||||
- job_templates
|
||||
- job_launch
|
||||
- workflow_job_templates
|
||||
|
||||
controller_credential_types:
|
||||
- name: Satellite Collection
|
||||
kind: cloud
|
||||
@@ -38,6 +31,7 @@ controller_credentials:
|
||||
- name: Satellite Inventory
|
||||
credential_type: Red Hat Satellite 6
|
||||
organization: Default
|
||||
state: exists
|
||||
inputs:
|
||||
host: https://satellite.example.com
|
||||
username: admin
|
||||
@@ -45,6 +39,7 @@ controller_credentials:
|
||||
- name: Satellite Credential
|
||||
credential_type: Satellite Collection
|
||||
organization: Default
|
||||
state: exists
|
||||
inputs:
|
||||
host: https://satellite.example.com
|
||||
username: admin
|
||||
@@ -52,7 +47,7 @@ controller_credentials:
|
||||
|
||||
controller_inventory_sources:
|
||||
- name: Satellite Inventory
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
credential: Satellite Inventory
|
||||
source: satellite6
|
||||
update_on_launch: false
|
||||
@@ -81,12 +76,12 @@ controller_templates:
|
||||
- name: LINUX / Register with Satellite
|
||||
project: Ansible official demo project
|
||||
playbook: satellite/server_register.yml
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- Workshop Credential
|
||||
- Demo Credential
|
||||
- Satellite Credential
|
||||
extra_vars:
|
||||
org_id: "Default_Organization"
|
||||
@@ -111,14 +106,14 @@ controller_templates:
|
||||
- name: LINUX / Compliance Scan with Satellite
|
||||
project: Ansible official demo project
|
||||
playbook: satellite/server_openscap.yml
|
||||
inventory: Workshop Inventory
|
||||
execution_environment: Ansible Engine 2.9 execution environment
|
||||
inventory: Demo Inventory
|
||||
# execution_environment: Ansible Engine 2.9 execution environment
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- Satellite Credential
|
||||
- Workshop Credential
|
||||
- Demo Credential
|
||||
extra_vars:
|
||||
policy_scan: all
|
||||
survey_enabled: true
|
||||
@@ -134,7 +129,7 @@ controller_templates:
|
||||
- name: SATELLITE / Publish Content View Version
|
||||
project: Ansible official demo project
|
||||
playbook: satellite/satellite_publish.yml
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
@@ -156,7 +151,7 @@ controller_templates:
|
||||
- name: SATELLITE / Promote Content View Version
|
||||
project: Ansible official demo project
|
||||
playbook: satellite/satellite_promote.yml
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
@@ -186,7 +181,7 @@ controller_templates:
|
||||
- name: SETUP / Satellite
|
||||
project: Ansible official demo project
|
||||
playbook: satellite/setup_satellite.yml
|
||||
inventory: Workshop Inventory
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
- name: Publish CV
|
||||
ansible.builtin.include_role:
|
||||
name: redhat.satellite.content_view_publish
|
||||
vars:
|
||||
vars: # noqa var-naming[no-role-prefix]
|
||||
satellite_content_views:
|
||||
- RHEL7
|
||||
- RHEL8
|
||||
|
||||
@@ -6,16 +6,11 @@
|
||||
tasks:
|
||||
- name: Default Components
|
||||
ansible.builtin.include_role:
|
||||
name: "redhat_cop.controller_configuration.{{ item }}"
|
||||
loop: "{{ controller_components }}"
|
||||
vars:
|
||||
controller_components:
|
||||
- notification_templates
|
||||
- job_templates
|
||||
- settings
|
||||
name: infra.controller_configuration.dispatch
|
||||
vars: # noqa var-naming[no-role-prefix]
|
||||
controller_execution_environments:
|
||||
- name: product-demos
|
||||
image: http://quay.io/acme_corp/product-demos-ee:latest
|
||||
image: quay.io/acme_corp/product-demos-ee:latest
|
||||
controller_organizations:
|
||||
- name: Default
|
||||
default_environment: product-demos
|
||||
@@ -30,7 +25,7 @@
|
||||
controller_templates:
|
||||
- name: "SUBMIT FEEDBACK"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "feedback.yml"
|
||||
execution_environment: Default execution environment
|
||||
@@ -54,15 +49,16 @@
|
||||
- name: "SESSION_COOKIE_AGE"
|
||||
value: 180000
|
||||
|
||||
- name: Create reusable deployment ID
|
||||
ansible.builtin.set_fact:
|
||||
_deployment_id: '{{ lookup("ansible.builtin.password", "{{ playbook_dir }}/.deployment_id", chars=["ascii_lowercase", "digits"], length=5) }}'
|
||||
|
||||
- name: "Include configuration for {{ demo }}"
|
||||
ansible.builtin.include_vars: "{{ demo }}/setup.yml"
|
||||
|
||||
- name: Demo Components
|
||||
ansible.builtin.include_role:
|
||||
name: "redhat_cop.controller_configuration.{{ item }}"
|
||||
loop: "{{ controller_components }}"
|
||||
when:
|
||||
- controller_components | d("") | length > 0
|
||||
name: "infra.controller_configuration.dispatch"
|
||||
|
||||
- name: Log Demo
|
||||
ansible.builtin.uri:
|
||||
|
||||
@@ -19,11 +19,14 @@ This category of demos shows examples of Windows Server operations and managemen
|
||||
- [**WINDOWS / Arbitrary Powershell**](arbitrary_powershell.yml) - Run given Powershell script (default: retrieve cat fact from API)
|
||||
- [**WINDOWS / Powershell Script**](powershell_script.yml) - Run a Powershell script stored in source control to query services
|
||||
- [**WINDOWS / Powershell DSC configuring password requirements**](powershell_dsc.yml) - Configure password complexity with Powershell desired state config
|
||||
- [**WINDOWS / Create Active Directory Domain**](active_directory/create_ad_domain.yml) - Create a new AD Domain
|
||||
- [**WINDOWS / Helpdesk new user portal**](active_directory/helpdesk_new_user_portal.yml) - Create user in AD Domain
|
||||
- [**WINDOWS / Create Active Directory Domain**](create_ad_domain.yml) - Create a new AD Domain
|
||||
- [**WINDOWS / Helpdesk new user portal**](helpdesk_new_user_portal.yml) - Create user in AD Domain
|
||||
- [**WINDOWS / Join Active Directory Domain**](join_ad_domain.yml) - Join computer to AD Domain
|
||||
|
||||
## Suggested Usage
|
||||
|
||||
**WINDOWS / Create Active Directory Domain** - This job can take some to complete. It is recommended to run ahead of time if you would like to demo creating a helpdesk user.
|
||||
|
||||
**WINDOWS / Helpdesk new user portal** - This job is dependant on the Create Active Directory Domain completing before users can be created.
|
||||
|
||||
**WINDOWS / Join Active Directory Domain** - This job is dependant on the Create Active Directory Domain completing before computers can be joined.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
- hosts: windows
|
||||
name: Rollback playbook
|
||||
- name: Rollback playbook
|
||||
hosts: windows
|
||||
tasks:
|
||||
- name: "Rollback this step"
|
||||
ansible.builtin.debug:
|
||||
|
||||
@@ -13,30 +13,23 @@
|
||||
ansible.windows.win_domain:
|
||||
dns_domain_name: ansible.local
|
||||
safe_mode_password: "{{ lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1) }}"
|
||||
register: new_forest
|
||||
notify:
|
||||
- Reboot host
|
||||
- Wait for AD services
|
||||
- Reboot again
|
||||
- Wait for AD services again
|
||||
|
||||
- name: Reboot the target host
|
||||
ansible.windows.win_reboot:
|
||||
reboot_timeout: 3600
|
||||
when: new_forest.reboot_required
|
||||
|
||||
- name: Wait up to 10min for AD web services to start
|
||||
community.windows.win_wait_for_process:
|
||||
process_name_exact: Microsoft.ActiveDirectory.WebServices
|
||||
pre_wait_delay: 60
|
||||
state: present
|
||||
timeout: 600
|
||||
sleep: 10
|
||||
remote_user: Administrator
|
||||
- name: Flush handlers
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- name: Create some groups
|
||||
community.windows.win_domain_group:
|
||||
name: "{{ item.name }}"
|
||||
scope: global
|
||||
loop:
|
||||
- { name: "GroupA" }
|
||||
- { name: "GroupB" }
|
||||
- { name: "GroupC" }
|
||||
- name: "GroupA"
|
||||
- name: "GroupB"
|
||||
- name: "GroupC"
|
||||
retries: 5
|
||||
delay: 10
|
||||
|
||||
@@ -47,8 +40,36 @@
|
||||
password: "{{ lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1) }}"
|
||||
update_password: on_create
|
||||
loop:
|
||||
- { name: "UserA", groups: "GroupA" }
|
||||
- { name: "UserB", groups: "GroupB" }
|
||||
- { name: "UserC", groups: "GroupC" }
|
||||
- name: "UserA"
|
||||
groups: "GroupA"
|
||||
- name: "UserB"
|
||||
groups: "GroupB"
|
||||
- name: "UserC"
|
||||
groups: "GroupC"
|
||||
retries: 5
|
||||
delay: 10
|
||||
|
||||
handlers:
|
||||
- name: Reboot host
|
||||
ansible.windows.win_reboot:
|
||||
reboot_timeout: 3600
|
||||
|
||||
- name: Wait for AD services
|
||||
community.windows.win_wait_for_process:
|
||||
process_name_exact: Microsoft.ActiveDirectory.WebServices
|
||||
pre_wait_delay: 60
|
||||
state: present
|
||||
timeout: 600
|
||||
sleep: 10
|
||||
|
||||
- name: Reboot again
|
||||
ansible.windows.win_reboot:
|
||||
reboot_timeout: 3600
|
||||
|
||||
- name: Wait for AD services again
|
||||
community.windows.win_wait_for_process:
|
||||
process_name_exact: Microsoft.ActiveDirectory.WebServices
|
||||
pre_wait_delay: 60
|
||||
state: present
|
||||
timeout: 600
|
||||
sleep: 10
|
||||
|
||||
30
windows/join_ad_domain.yml
Normal file
30
windows/join_ad_domain.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: Join Active Directory domain
|
||||
hosts: "{{ _hosts | default(omit) }}"
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Set a single address on the adapter named Ethernet
|
||||
ansible.windows.win_dns_client:
|
||||
adapter_names: 'Ethernet*'
|
||||
dns_servers: "{{ hostvars[domain_controller]['private_ip_address'] }}"
|
||||
|
||||
- name: Ensure Demo OU exists
|
||||
delegate_to: "{{ domain_controller }}"
|
||||
community.windows.win_domain_ou:
|
||||
name: Demo
|
||||
state: present
|
||||
|
||||
- name: Join ansible.local domain
|
||||
register: r_domain_membership
|
||||
ansible.windows.win_domain_membership:
|
||||
dns_domain_name: ansible.local
|
||||
hostname: "{{ inventory_hostname }}"
|
||||
domain_admin_user: "{{ ansible_user }}@ansible.local"
|
||||
domain_admin_password: "{{ ansible_password }}"
|
||||
domain_ou_path: "OU=Demo,DC=ansible,DC=local"
|
||||
state: domain
|
||||
|
||||
- name: Reboot windows machine
|
||||
when: r_domain_membership.reboot_required
|
||||
ansible.windows.win_reboot:
|
||||
@@ -4,6 +4,18 @@
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Setup PsGallery
|
||||
ansible.windows.win_powershell:
|
||||
script: |
|
||||
$nuget_version = (Get-PackageProvider -Name NuGet -ListAvailable).version
|
||||
$nuget_target_version = [Version]::new('2.8.5.201')
|
||||
if( $nuget_version -lt $nuget_target_version ){
|
||||
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
|
||||
Install-PackageProvider -Name NuGet -MinimumVersion $nuget_target_version -Force
|
||||
Install-Module -Name packagemanagement -Force
|
||||
Install-Module -Name powershellget -Force
|
||||
}
|
||||
|
||||
- name: Setup the SecurityPolicyDSC module
|
||||
community.windows.win_psmodule:
|
||||
name: SecurityPolicyDSC
|
||||
|
||||
@@ -2,10 +2,6 @@
|
||||
user_message: |
|
||||
''
|
||||
|
||||
controller_components:
|
||||
- projects
|
||||
- job_templates
|
||||
|
||||
controller_projects:
|
||||
- name: Fact Scan
|
||||
organization: Default
|
||||
@@ -15,14 +11,14 @@ controller_projects:
|
||||
controller_templates:
|
||||
- name: "WINDOWS / Install IIS"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/install_iis.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -41,7 +37,7 @@ controller_templates:
|
||||
use_fact_cache: true
|
||||
job_type: check
|
||||
ask_job_type_on_launch: true
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/patching.yml"
|
||||
execution_environment: Default execution environment
|
||||
@@ -49,7 +45,7 @@ controller_templates:
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -87,14 +83,14 @@ controller_templates:
|
||||
|
||||
- name: "WINDOWS / Chocolatey install multiple"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/windows_choco_multiple.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -107,14 +103,14 @@ controller_templates:
|
||||
|
||||
- name: "WINDOWS / Chocolatey install specific"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/windows_choco_specific.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -131,14 +127,14 @@ controller_templates:
|
||||
|
||||
- name: "WINDOWS / Run PowerShell"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/powershell.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -156,14 +152,14 @@ controller_templates:
|
||||
|
||||
- name: "WINDOWS / Query Services"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/powershell_script.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -184,14 +180,14 @@ controller_templates:
|
||||
|
||||
- name: "WINDOWS / Configuring Password Requirements"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/powershell_dsc.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -204,14 +200,14 @@ controller_templates:
|
||||
|
||||
- name: "WINDOWS / AD / Create Domain"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/create_ad_domain.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -222,16 +218,41 @@ controller_templates:
|
||||
variable: _hosts
|
||||
required: false
|
||||
|
||||
- name: "WINDOWS / AD / Join Domain"
|
||||
job_type: run
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/join_ad_domain.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: Server Name or Pattern
|
||||
type: text
|
||||
variable: _hosts
|
||||
required: true
|
||||
- question_name: Domain Controller Inventory Hostname
|
||||
type: text
|
||||
variable: domain_controller
|
||||
required: true
|
||||
description: Inventory hostname for domain controller previously established using the Create Domain template
|
||||
|
||||
- name: "WINDOWS / AD / New User"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/helpdesk_new_user_portal.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -268,14 +289,14 @@ controller_templates:
|
||||
|
||||
- name: "WINDOWS / DISA STIG"
|
||||
job_type: run
|
||||
inventory: "Workshop Inventory"
|
||||
inventory: "Demo Inventory"
|
||||
project: "Ansible official demo project"
|
||||
playbook: "windows/compliance.yml"
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
credentials:
|
||||
- "Workshop Credential"
|
||||
- "Demo Credential"
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
|
||||
Reference in New Issue
Block a user