Merge branch 'main' into usr_app
This commit is contained in:
17
.github/workflows/ansible-lint.yml
vendored
17
.github/workflows/ansible-lint.yml
vendored
@@ -1,17 +0,0 @@
|
||||
---
|
||||
name: Ansible Lint
|
||||
on:
|
||||
- push
|
||||
- pull_request_target
|
||||
|
||||
env:
|
||||
ANSIBLE_GALAXY_SERVER_AH_TOKEN: ${{ secrets.ANSIBLE_GALAXY_SERVER_AH_TOKEN }}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Ansible Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# this action implicitly calls actions/checkout
|
||||
- name: Run ansible-lint
|
||||
uses: ansible/ansible-lint@v6.18.0
|
||||
50
.github/workflows/linter.yml.old
vendored
50
.github/workflows/linter.yml.old
vendored
@@ -1,50 +0,0 @@
|
||||
---
|
||||
###########################
|
||||
###########################
|
||||
## Linter GitHub Actions ##
|
||||
###########################
|
||||
###########################
|
||||
name: Lint Code Base
|
||||
|
||||
#
|
||||
# Documentation:
|
||||
# https://help.github.com/en/articles/workflow-syntax-for-github-actions
|
||||
#
|
||||
|
||||
#############################
|
||||
# Start the job on all push #
|
||||
#############################
|
||||
on: [push, pull_request]
|
||||
|
||||
###############
|
||||
# Set the Job #
|
||||
###############
|
||||
jobs:
|
||||
build:
|
||||
# Name the Job
|
||||
name: Lint Code Base
|
||||
# Set the agent to run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
##################
|
||||
# Load all steps #
|
||||
##################
|
||||
steps:
|
||||
##########################
|
||||
# Checkout the code base #
|
||||
##########################
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# Full git history is needed to get a proper list of changed files within `super-linter`
|
||||
fetch-depth: 0
|
||||
|
||||
################################
|
||||
# Run Linter against code base #
|
||||
################################
|
||||
- name: Lint Code Base
|
||||
uses: github/super-linter@v4
|
||||
env:
|
||||
VALIDATE_ALL_CODEBASE: false
|
||||
DEFAULT_BRANCH: main
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
19
.github/workflows/pre-commit.yml
vendored
Normal file
19
.github/workflows/pre-commit.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
name: pre-commit
|
||||
on:
|
||||
- push
|
||||
- pull_request_target
|
||||
|
||||
env:
|
||||
ANSIBLE_GALAXY_SERVER_AH_TOKEN: ${{ secrets.ANSIBLE_GALAXY_SERVER_AH_TOKEN }}
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
name: pre-commit
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
|
||||
...
|
||||
41
.github/workflows/release.yml
vendored
Normal file
41
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
name: release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
|
||||
workflow_run:
|
||||
workflows: ["pre-commit"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release Job
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install go (required for Changelog parsing)
|
||||
uses: actions/setup-go@v4
|
||||
|
||||
- name: Parse CHANGELOG.md
|
||||
run: |
|
||||
GO111MODULE=on go install github.com/rcmachado/changelog@0.7.0
|
||||
changelog show "$GITHUB_REF_NAME" > ${{ github.workspace }}-CHANGELOG.txt
|
||||
echo "Release note for $GITHUB_REF_NAME :"
|
||||
cat ${{ github.workspace }}-CHANGELOG.txt
|
||||
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
body_path: ${{ github.workspace }}-CHANGELOG.txt
|
||||
files: |
|
||||
LICENSE
|
||||
CHANGELOG.md
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -1,4 +1,4 @@
|
||||
|
||||
ansible-navigator.log
|
||||
sean_login_info.yml
|
||||
.DS_Store
|
||||
choose_demo.yml
|
||||
@@ -6,6 +6,7 @@ choose_demo_example_azure.yml
|
||||
choose_demo_example_aws.yml
|
||||
.ansible.cfg
|
||||
*.gz
|
||||
|
||||
**/roles/*
|
||||
!**/roles/requirements.yml
|
||||
*artifact*.json
|
||||
roles/*
|
||||
!roles/requirements.yml
|
||||
.deployment_id
|
||||
|
||||
@@ -4,13 +4,30 @@ repos:
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer
|
||||
exclude: rhel[89]STIG/.*$
|
||||
|
||||
- id: trailing-whitespace
|
||||
exclude: rhel[89]STIG/.*$
|
||||
|
||||
- id: check-yaml
|
||||
exclude: \.j2.(yaml|yml)$|\.(yaml|yml).j2$
|
||||
args: [--unsafe] # see https://github.com/pre-commit/pre-commit-hooks/issues/273
|
||||
|
||||
- id: check-toml
|
||||
- id: check-json
|
||||
- id: check-symlinks
|
||||
|
||||
- repo: https://github.com/ansible/ansible-lint.git
|
||||
# get latest release tag from https://github.com/ansible/ansible-lint/releases/
|
||||
rev: v6.18.0
|
||||
rev: v6.20.3
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
additional_dependencies:
|
||||
- jmespath
|
||||
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 23.11.0
|
||||
hooks:
|
||||
- id: black
|
||||
exclude: rhel[89]STIG/.*$
|
||||
...
|
||||
|
||||
@@ -18,6 +18,7 @@ This document aims to outline the requirements for the various forms of contribu
|
||||
- PRs should be rebased against the `main` branch to avoid conflicts.
|
||||
- PRs should not impact more than a single directory/demo section.
|
||||
- PRs should not rely on external infrastructure or configuration unless the dependency is automated or specified in the `user_message` of `setup.yml`.
|
||||
- PR titles should describe the work done in the PR. Titles should not be generic ("Added new demo") and should not refer to an issue number ("Fix for issue #123").
|
||||
|
||||
## Adding a New Demo
|
||||
1) Create a new branch based on main. (eg. `git checkout -b <branch name>`)
|
||||
@@ -31,7 +32,7 @@ This document aims to outline the requirements for the various forms of contribu
|
||||
1) You can copy paste an existing one and edit it.
|
||||
2) Ensure you edit the name, playbook path, survey etc.
|
||||
5) Add any needed roles/collections to the [requirements.yml](/collections/requirements.yml)
|
||||
6) Test via [demo.redhat.com](https://demo.redhat.com/catalog?item=babylon-catalog-prod/sandboxes-gpte.aap-product-demos.prod&utm_source=webapp&utm_medium=share-link), specify your branch name within the project configuration.
|
||||
6) Test via [demo.redhat.com](https://demo.redhat.com/catalog?search=product&item=babylon-catalog-prod%2Fopenshift-cnv.aap-product-demos-cnv.prod), specifying your branch name within the project configuration.
|
||||
|
||||
> NOTE: demo.redhat.com is available to Red Hat Associates and Partners with a valid account.
|
||||
|
||||
@@ -43,17 +44,29 @@ This document aims to outline the requirements for the various forms of contribu
|
||||
---
|
||||
user_message: ''
|
||||
|
||||
controller_components:
|
||||
- job_templates
|
||||
|
||||
controller_templates:
|
||||
...
|
||||
```
|
||||
- `controller_components` can be any of the roles defined [here](https://github.com/redhat-cop/controller_configuration/tree/devel/roles)
|
||||
- Configuration variables can be from any of the roles defined in the [infra.controller_configuration collection](https://github.com/redhat-cop/controller_configuration/tree/devel/roles)
|
||||
- Add variables for each component listed
|
||||
3) Include a README.md in the subdirectory
|
||||
|
||||
## Testing
|
||||
To run `ansible-lint` you will need to set an environment variable for the token to connect to Automation Hub. You can get a token from [here](https://console.redhat.com/ansible/automation-hub/token).
|
||||
|
||||
Copy the value of the token and run `export ANSIBLE_GALAXY_SERVER_AH_TOKEN=<token>`
|
||||
We utilize pre-commit to handle Git hooks, initiating a pre-commit check with each commit, both locally and on CI.
|
||||
|
||||
To install pre-commit, use the following commands:
|
||||
```bash
|
||||
pip install pre-commit
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
For further details, refer to the [pre-commit installation documentation](https://pre-commit.com/#installation).
|
||||
|
||||
To execute ansible-lint (whether within pre-commit or independently), you must configure an environment variable for the token required to connect to Automation Hub. Obtain the token [here](https://console.redhat.com/ansible/automation-hub/token).
|
||||
|
||||
Copy the token value and execute the following command:
|
||||
|
||||
```bash
|
||||
export ANSIBLE_GALAXY_SERVER_AH_TOKEN=<token>
|
||||
```
|
||||
|
||||
14
README.md
14
README.md
@@ -19,7 +19,7 @@ If you would like to contribute to this project please refer to [contribution gu
|
||||
|
||||
## Using this project
|
||||
|
||||
This project is tested for compatibility with the [demo.redhat.com Product Demos Sandbox]([red.ht/aap-product-demos](https://demo.redhat.com/catalog?item=babylon-catalog-prod/sandboxes-gpte.aap-product-demos.prod&utm_source=webapp&utm_medium=share-link)) lab environment. To use with other Ansible Controller installations, review the [prerequisite documentation](https://github.com/RedHatGov/ansible-tower-samples).
|
||||
This project is tested for compatibility with the [demo.redhat.com Product Demos Sandbox](https://demo.redhat.com/catalog?search=product+demos&item=babylon-catalog-prod%2Fopenshift-cnv.aap-product-demos-cnv.prod) lab environment. To use with other Ansible Controller installations, review the [prerequisite documentation](https://github.com/RedHatGov/ansible-tower-samples).
|
||||
|
||||
> NOTE: demo.redhat.com is available to Red Hat Associates and Partners with a valid account.
|
||||
|
||||
@@ -31,13 +31,19 @@ This project is tested for compatibility with the [demo.redhat.com Product Demos
|
||||
|
||||
> You can also use an execution environment for disconnected environments. To do this, you must disable collection downloads in the Controller. This can be done in `Settings` > `Job Settings`. This setting prevents the controller from downloading collections listed in the [collections/requirements.yml](collections/requirements.yml) file.
|
||||
|
||||
2. If it is not already created for you, create a Project called `Ansible official demo project` with this repo as a source. NOTE: if you are using a fork, be sure that you have the correct URL. Update the project.
|
||||
2. If it is not already created for you, add an Execution Environment called `product-demos`
|
||||
|
||||
3. Finally, Create a Job Template called `Setup` with the following configuration:
|
||||
- Name: product-demos
|
||||
- Image: quay.io/acme_corp/product-demos-ee:latest
|
||||
- Pull: Only pull the image if not present before running
|
||||
|
||||
3. If it is not already created for you, create a Project called `Ansible official demo project` with this repo as a source. NOTE: if you are using a fork, be sure that you have the correct URL. Update the project.
|
||||
|
||||
4. Finally, Create a Job Template called `Setup` with the following configuration:
|
||||
|
||||
- Name: Setup
|
||||
- Inventory: Demo Inventory
|
||||
- Exec Env: Control Plane EE
|
||||
- Exec Env: product-demos
|
||||
- Playbook: setup_demo.yml
|
||||
- Credentials:
|
||||
- Type: Red Hat Ansible Automation Platform
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
[defaults]
|
||||
collections_paths=./collections
|
||||
collections_path=./collections
|
||||
roles_path=./roles
|
||||
|
||||
[galaxy]
|
||||
server_list = ah,galaxy
|
||||
|
||||
[galaxy_server.ah]
|
||||
#url=https://cloud.redhat.com/api/automation-hub/
|
||||
# Grab a token at https://console.redhat.com/ansible/automation-hub/token
|
||||
# Then define it using ANSIBLE_GALAXY_SERVER_AH_TOKEN=""
|
||||
|
||||
url=https://console.redhat.com/api/automation-hub/content/published/
|
||||
auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
- [Configure Credentials](#configure-credentials)
|
||||
- [Add Workshop Credential Password](#add-workshop-credential-password)
|
||||
- [Remove Inventory Variables](#remove-inventory-variables)
|
||||
- [Getting your Puiblic Key for Create Keypair Job](#getting-your-puiblic-key-for-create-keypair-job)
|
||||
- [Getting your Public Key for Create Keypair Job](#getting-your-public-key-for-create-keypair-job)
|
||||
- [Suggested Usage](#suggested-usage)
|
||||
- [Known Issues](#known-issues)
|
||||
|
||||
@@ -49,11 +49,11 @@ After running the setup job template, there are a few steps required to make the
|
||||
|
||||
1) Remove Workshop Inventory variables on the Details page of the inventory. Required until [RFE](https://github.com/ansible/workshops/issues/1597]) is complete
|
||||
|
||||
### Getting your Puiblic Key for Create Keypair Job
|
||||
### Getting your Public Key for Create Keypair Job
|
||||
|
||||
1) Connect to the command line of your Controller server. This is easiest to do by opening the VS Code Web Editor from the landing page where you found the Controller login details.
|
||||
2) Open a Terminal Window in the VS Code Web Editor.
|
||||
3) SSH to one of your linux nodes (eg. `ssh node1`). This should log you into the node as `ec2-user`
|
||||
3) SSH to one of your linux nodes (eg. `ssh aws_rhel9`). This should log you into the node as `ec2-user`
|
||||
4) `cat .ssh/authorized_keys` and copy the key listed including the `ssh-rsa` prefix
|
||||
|
||||
|
||||
|
||||
@@ -122,3 +122,12 @@
|
||||
Name: "{{ aws_rt_name }}"
|
||||
owner: "{{ aws_owner_tag }}"
|
||||
purpose: "{{ aws_purpose_tag }}"
|
||||
|
||||
- name: Set VPC stats
|
||||
ansible.builtin.set_stats:
|
||||
data:
|
||||
__aws_region: '{{ create_vm_aws_region }}'
|
||||
__aws_vpc_id: '{{ aws_vpc.vpc.id }}'
|
||||
__aws_vpc_cidr: '{{ aws_vpc_cidr_block }}'
|
||||
__aws_subnet_id: '{{ aws_subnet.subnet.id }}'
|
||||
__aws_subnet_cidr: '{{ aws_subnet_cidr }}'
|
||||
|
||||
301
cloud/setup.yml
301
cloud/setup.yml
@@ -1,90 +1,8 @@
|
||||
---
|
||||
_deployment_id: "{{ lookup('file', playbook_dir + '/.deployment_id') }}"
|
||||
|
||||
user_message:
|
||||
|
||||
controller_components:
|
||||
- execution_environments
|
||||
- projects
|
||||
- credentials
|
||||
- inventory_sources
|
||||
- groups
|
||||
- job_templates
|
||||
- workflow_job_templates
|
||||
|
||||
controller_execution_environments:
|
||||
- name: Cloud Services Execution Environment
|
||||
image: quay.io/scottharwell/cloud-ee:latest
|
||||
|
||||
controller_projects:
|
||||
- name: Ansible Cloud Content Lab - AWS
|
||||
organization: Default
|
||||
scm_type: git
|
||||
wait: true
|
||||
scm_url: https://github.com/ansible-content-lab/aws.infrastructure_config_demos.git
|
||||
default_environment: Cloud Services Execution Environment
|
||||
|
||||
controller_credentials:
|
||||
- name: AWS
|
||||
credential_type: Amazon Web Services
|
||||
organization: Default
|
||||
update_secrets: false
|
||||
inputs:
|
||||
username: REPLACEME
|
||||
password: REPLACEME
|
||||
|
||||
# - name: Azure
|
||||
# credential_type: Microsoft Azure Resource Manager
|
||||
# organization: Default
|
||||
# update_secrets: false
|
||||
# inputs:
|
||||
# subscription: REPLACEME
|
||||
|
||||
controller_inventory_sources:
|
||||
- name: AWS Inventory
|
||||
organization: Default
|
||||
source: ec2
|
||||
inventory: Demo Inventory
|
||||
credential: AWS
|
||||
overwrite: true
|
||||
source_vars:
|
||||
hostnames:
|
||||
- tag:Name
|
||||
compose:
|
||||
ansible_host: public_ip_address
|
||||
ansible_user: 'ec2-user'
|
||||
groups:
|
||||
cloud_aws: true
|
||||
os_linux: tags.blueprint.startswith('rhel')
|
||||
keyed_groups:
|
||||
- key: platform
|
||||
prefix: os
|
||||
- key: tags.blueprint
|
||||
prefix: blueprint
|
||||
- key: tags.owner
|
||||
prefix: owner
|
||||
|
||||
# - name: Azure Inventory
|
||||
# organization: Default
|
||||
# source: azure_rm
|
||||
# inventory: Demo Inventory
|
||||
# credential: Azure
|
||||
# execution_environment: Ansible Engine 2.9 execution environment
|
||||
# overwrite: true
|
||||
# source_vars:
|
||||
# hostnames:
|
||||
# - tags.Name
|
||||
# - default
|
||||
# keyed_groups:
|
||||
# - key: os_profile.system
|
||||
# prefix: os
|
||||
# conditional_groups:
|
||||
# cloud_azure: true
|
||||
|
||||
controller_groups:
|
||||
- name: cloud_aws
|
||||
inventory: Demo Inventory
|
||||
variables:
|
||||
ansible_user: ec2-user
|
||||
|
||||
controller_templates:
|
||||
- name: Cloud / AWS / Create Peer Infrastructure
|
||||
job_type: run
|
||||
@@ -92,7 +10,7 @@ controller_templates:
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_create_peer_network.yml
|
||||
playbook: playbooks/create_peer_network.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
@@ -108,7 +26,7 @@ controller_templates:
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_delete_peer_network.yml
|
||||
playbook: playbooks/delete_peer_network.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
@@ -122,7 +40,7 @@ controller_templates:
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_create_transit_network.yml
|
||||
playbook: playbooks/create_transit_network.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
@@ -138,7 +56,7 @@ controller_templates:
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_delete_transit_network.yml
|
||||
playbook: playbooks/delete_transit_network.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
@@ -146,153 +64,20 @@ controller_templates:
|
||||
extra_vars:
|
||||
aws_region: us-east-1
|
||||
|
||||
- name: Cloud / AWS / Create VPC
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible official demo project
|
||||
playbook: cloud/create_vpc.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- question_name: Owner
|
||||
type: text
|
||||
variable: aws_owner_tag
|
||||
required: true
|
||||
|
||||
- name: Cloud / AWS / Create VM
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
- Demo Credential
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_create_vm.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
allow_simultaneous: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- question_name: Name
|
||||
type: text
|
||||
variable: create_vm_vm_name
|
||||
required: true
|
||||
- question_name: Owner
|
||||
type: text
|
||||
variable: create_vm_vm_owner
|
||||
required: true
|
||||
- question_name: Deployment
|
||||
type: text
|
||||
variable: create_vm_vm_deployment
|
||||
required: true
|
||||
- question_name: Environment
|
||||
type: multiplechoice
|
||||
variable: create_vm_vm_environment
|
||||
required: true
|
||||
choices:
|
||||
- Dev
|
||||
- QA
|
||||
- Prod
|
||||
- question_name: Blueprint
|
||||
type: multiplechoice
|
||||
variable: vm_blueprint
|
||||
required: true
|
||||
choices:
|
||||
- windows_core
|
||||
- windows_full
|
||||
- rhel9
|
||||
- rhel8
|
||||
- rhel7
|
||||
- al2023
|
||||
- question_name: Subnet
|
||||
type: text
|
||||
variable: create_vm_aws_vpc_subnet_name
|
||||
required: true
|
||||
default: aws-test-subnet
|
||||
- question_name: Security Group
|
||||
type: text
|
||||
variable: create_vm_aws_securitygroup_name
|
||||
required: true
|
||||
default: aws-test-sg
|
||||
- question_name: SSH Keypair
|
||||
type: text
|
||||
variable: create_vm_aws_keypair_name
|
||||
required: true
|
||||
default: aws-test-key
|
||||
- question_name: AWS Instance Type (defaults to blueprint value)
|
||||
type: text
|
||||
variable: create_vm_aws_instance_size
|
||||
required: false
|
||||
- question_name: AWS Image Filter (defaults to blueprint value)
|
||||
type: text
|
||||
variable: create_vm_aws_image_filter
|
||||
required: false
|
||||
|
||||
- name: Cloud / AWS / Delete VM
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
- Demo Credential
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_delete_inventory_vm.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: Name or Pattern
|
||||
type: text
|
||||
variable: _hosts
|
||||
required: true
|
||||
|
||||
- name: Cloud / AWS / VPC Report
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_create_reports.yml
|
||||
playbook: playbooks/create_reports.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
extra_vars:
|
||||
aws_report: vpc
|
||||
reports_aws_bucket_name: reports-pd-{{ _deployment_id }}
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -314,13 +99,14 @@ controller_templates:
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbook_create_reports.yml
|
||||
playbook: playbooks/create_reports.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
extra_vars:
|
||||
aws_report: tags
|
||||
reports_aws_bucket_name: reports-pd-{{ _deployment_id }}
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -336,45 +122,6 @@ controller_templates:
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
|
||||
- name: Cloud / AWS / Create Keypair
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible official demo project
|
||||
playbook: cloud/aws_key.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- question_name: Keypair Name
|
||||
type: text
|
||||
variable: aws_key_name
|
||||
required: true
|
||||
default: aws-test-key
|
||||
- question_name: Keypair Public Key
|
||||
type: textarea
|
||||
variable: aws_public_key
|
||||
required: true
|
||||
- question_name: Owner
|
||||
type: text
|
||||
variable: aws_keypair_owner
|
||||
required: true
|
||||
|
||||
- name: Cloud / AWS / Snapshot EC2
|
||||
job_type: run
|
||||
organization: Default
|
||||
@@ -523,19 +270,30 @@ controller_workflows:
|
||||
- identifier: VPC Report
|
||||
unified_job_template: Cloud / AWS / VPC Report
|
||||
all_parents_must_converge: true
|
||||
success_nodes:
|
||||
- Deploy Windows Blueprint
|
||||
always_nodes:
|
||||
- Deploy Windows GUI Blueprint
|
||||
- Deploy RHEL8 Blueprint
|
||||
- Deploy RHEL9 Blueprint
|
||||
- identifier: Deploy Windows Blueprint
|
||||
- Deploy Windows Core Blueprint
|
||||
- Deploy Report Server
|
||||
- identifier: Deploy Windows GUI Blueprint
|
||||
unified_job_template: Cloud / AWS / Create VM
|
||||
extra_data:
|
||||
create_vm_vm_name: aws_win
|
||||
create_vm_vm_name: aws_dc
|
||||
vm_blueprint: windows_full
|
||||
success_nodes:
|
||||
- Update Inventory
|
||||
failure_nodes:
|
||||
- Ticket - Instance Failed
|
||||
- identifier: Deploy Windows Core Blueprint
|
||||
unified_job_template: Cloud / AWS / Create VM
|
||||
extra_data:
|
||||
create_vm_vm_name: aws_win1
|
||||
vm_blueprint: windows_core
|
||||
success_nodes:
|
||||
- Update Inventory
|
||||
failure_nodes:
|
||||
- Ticket - Instance Failed
|
||||
- identifier: Deploy RHEL8 Blueprint
|
||||
unified_job_template: Cloud / AWS / Create VM
|
||||
extra_data:
|
||||
@@ -554,6 +312,15 @@ controller_workflows:
|
||||
- Update Inventory
|
||||
failure_nodes:
|
||||
- Ticket - Instance Failed
|
||||
- identifier: Deploy Report Server
|
||||
unified_job_template: Cloud / AWS / Create VM
|
||||
extra_data:
|
||||
create_vm_vm_name: reports
|
||||
vm_blueprint: rhel9
|
||||
success_nodes:
|
||||
- Update Inventory
|
||||
failure_nodes:
|
||||
- Ticket - Instance Failed
|
||||
- identifier: Ticket - VPC Failed
|
||||
unified_job_template: 'SUBMIT FEEDBACK'
|
||||
extra_data:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
@@ -14,61 +15,65 @@ import xml.dom.minidom
|
||||
|
||||
role = "iosxeSTIG"
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'xml'
|
||||
CALLBACK_NAME = 'stig_xml'
|
||||
CALLBACK_TYPE = "xml"
|
||||
CALLBACK_NAME = "stig_xml"
|
||||
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
self.rules = {}
|
||||
self.stig_path = os.environ.get('STIG_PATH')
|
||||
self.XML_path = os.environ.get('XML_PATH')
|
||||
self.stig_path = os.environ.get("STIG_PATH")
|
||||
self.XML_path = os.environ.get("XML_PATH")
|
||||
if self.stig_path is None:
|
||||
self.stig_path = os.path.join(os.getcwd(), "roles", role, "files")
|
||||
self._display.display('Using STIG_PATH: {}'.format(self.stig_path))
|
||||
self._display.display("Using STIG_PATH: {}".format(self.stig_path))
|
||||
if self.XML_path is None:
|
||||
self.XML_path = os.getcwd()
|
||||
self._display.display('Using XML_PATH: {}'.format(self.XML_path))
|
||||
self._display.display("Using XML_PATH: {}".format(self.XML_path))
|
||||
|
||||
print("Writing: {}".format(self.XML_path))
|
||||
STIG_name = os.path.basename(self.stig_path)
|
||||
ET.register_namespace('cdf', 'http://checklists.nist.gov/xccdf/1.2')
|
||||
self.tr = ET.Element('{http://checklists.nist.gov/xccdf/1.2}TestResult')
|
||||
self.tr.set('id', 'xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}'.format(STIG_name))
|
||||
ET.register_namespace("cdf", "http://checklists.nist.gov/xccdf/1.2")
|
||||
self.tr = ET.Element("{http://checklists.nist.gov/xccdf/1.2}TestResult")
|
||||
self.tr.set(
|
||||
"id",
|
||||
"xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}".format(STIG_name),
|
||||
)
|
||||
endtime = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
|
||||
self.tr.set('end-time', endtime)
|
||||
tg = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}target')
|
||||
self.tr.set("end-time", endtime)
|
||||
tg = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}target")
|
||||
tg.text = platform.node()
|
||||
|
||||
def __get_rev(self, nid):
|
||||
rev = '0'
|
||||
rev = "0"
|
||||
# Check all files for the rule number.
|
||||
for file in os.listdir(self.stig_path):
|
||||
with open(os.path.join(self.stig_path, file), 'r') as f:
|
||||
r = 'SV-{}r(?P<rev>\d)_rule'.format(nid)
|
||||
with open(os.path.join(self.stig_path, file), "r") as f:
|
||||
r = "SV-{}r(?P<rev>\d)_rule".format(nid)
|
||||
m = re.search(r, f.read())
|
||||
if m:
|
||||
rev = m.group('rev')
|
||||
rev = m.group("rev")
|
||||
break
|
||||
return rev
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
name = result._task.get_name()
|
||||
m = re.search('stigrule_(?P<id>\d+)', name)
|
||||
m = re.search("stigrule_(?P<id>\d+)", name)
|
||||
if m:
|
||||
nid = m.group('id')
|
||||
nid = m.group("id")
|
||||
else:
|
||||
return
|
||||
rev = self.__get_rev(nid)
|
||||
key = "{}r{}".format(nid, rev)
|
||||
if self.rules.get(key, 'Unknown') != False:
|
||||
if self.rules.get(key, "Unknown") != False:
|
||||
self.rules[key] = result.is_changed()
|
||||
|
||||
def __set_duplicates(self):
|
||||
with open(os.path.join(self.stig_path, 'duplicates.json')) as f:
|
||||
with open(os.path.join(self.stig_path, "duplicates.json")) as f:
|
||||
dups = json.load(f)
|
||||
for d in dups:
|
||||
dup_of = str(dups[d][0])
|
||||
@@ -82,17 +87,19 @@ class CallbackModule(CallbackBase):
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
self.__set_duplicates()
|
||||
for rule, changed in self.rules.items():
|
||||
state = 'fail' if changed else 'pass'
|
||||
rr = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}rule-result')
|
||||
rr.set('idref', 'xccdf_mil.disa.stig_rule_SV-{}_rule'.format(rule))
|
||||
rs = ET.SubElement(rr, '{http://checklists.nist.gov/xccdf/1.2}result')
|
||||
state = "fail" if changed else "pass"
|
||||
rr = ET.SubElement(
|
||||
self.tr, "{http://checklists.nist.gov/xccdf/1.2}rule-result"
|
||||
)
|
||||
rr.set("idref", "xccdf_mil.disa.stig_rule_SV-{}_rule".format(rule))
|
||||
rs = ET.SubElement(rr, "{http://checklists.nist.gov/xccdf/1.2}result")
|
||||
rs.text = state
|
||||
passing = len(self.rules) - sum(self.rules.values())
|
||||
sc = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}score')
|
||||
sc.set('maximum', str(len(self.rules)))
|
||||
sc.set('system', 'urn:xccdf:scoring:flat-unweighted')
|
||||
sc = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}score")
|
||||
sc.set("maximum", str(len(self.rules)))
|
||||
sc.set("system", "urn:xccdf:scoring:flat-unweighted")
|
||||
sc.text = str(passing)
|
||||
with open(os.path.join(self.XML_path, "xccdf-results.xml"), 'w') as f:
|
||||
with open(os.path.join(self.XML_path, "xccdf-results.xml"), "w") as f:
|
||||
out = ET.tostring(self.tr)
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding='utf-8')
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding="utf-8")
|
||||
f.write(pretty)
|
||||
|
||||
@@ -137,14 +137,14 @@
|
||||
- (cmd_result.stdout|join('\n')).find('ip dns server') != -1
|
||||
- iosxeSTIG_stigrule_215823_Manage
|
||||
# R-215823 CISC-ND-000470
|
||||
- name : stigrule_215823_disable_identd
|
||||
ignore_errors: "{{ ignore_all_errors }}"
|
||||
notify: "save configuration"
|
||||
ios_config:
|
||||
defaults: yes
|
||||
lines: "{{ iosxeSTIG_stigrule_215823_disable_identd_Lines }}"
|
||||
when:
|
||||
- iosxeSTIG_stigrule_215823_Manage
|
||||
# - name : stigrule_215823_disable_identd
|
||||
# ignore_errors: "{{ ignore_all_errors }}"
|
||||
# notify: "save configuration"
|
||||
# ios_config:
|
||||
# defaults: yes
|
||||
# lines: "{{ iosxeSTIG_stigrule_215823_disable_identd_Lines }}"
|
||||
# when:
|
||||
# - iosxeSTIG_stigrule_215823_Manage
|
||||
# R-215823 CISC-ND-000470
|
||||
- name : stigrule_215823_disable_finger
|
||||
ignore_errors: "{{ ignore_all_errors }}"
|
||||
@@ -378,9 +378,9 @@
|
||||
- name : stigrule_215837_host
|
||||
ignore_errors: "{{ ignore_all_errors }}"
|
||||
notify: "save configuration"
|
||||
ios_logging:
|
||||
dest: host
|
||||
name: "{{ iosxeSTIG_stigrule_215837_host_Name }}"
|
||||
ios_config:
|
||||
lines:
|
||||
- "logging {{ iosxeSTIG_stigrule_215837_host_Name }}"
|
||||
when: iosxeSTIG_stigrule_215837_Manage
|
||||
# R-215837 CISC-ND-001000
|
||||
# Please configure name IP address to a valid one.
|
||||
@@ -397,16 +397,18 @@
|
||||
- name : stigrule_215838_ntp_server_1
|
||||
ignore_errors: "{{ ignore_all_errors }}"
|
||||
notify: "save configuration"
|
||||
ios_ntp:
|
||||
server: "{{ iosxeSTIG_stigrule_215838_ntp_server_1_Server }}"
|
||||
cisco.ios.ios_config:
|
||||
lines:
|
||||
- "ntp server {{ iosxeSTIG_stigrule_215838_ntp_server_1_Server }}"
|
||||
when: iosxeSTIG_stigrule_215838_Manage
|
||||
# R-215838 CISC-ND-001030
|
||||
# Replace ntp servers' IP address before enabling.
|
||||
- name : stigrule_215838_ntp_server_2
|
||||
ignore_errors: "{{ ignore_all_errors }}"
|
||||
notify: "save configuration"
|
||||
ios_ntp:
|
||||
server: "{{ iosxeSTIG_stigrule_215838_ntp_server_2_Server }}"
|
||||
cisco.ios.ios_config:
|
||||
lines:
|
||||
- "ntp server {{ iosxeSTIG_stigrule_215838_ntp_server_2_Server }}"
|
||||
when: iosxeSTIG_stigrule_215838_Manage
|
||||
# R-215840 CISC-ND-001050
|
||||
# service timestamps log datetime localtime is set in 215817.
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
@@ -11,76 +12,82 @@ import os
|
||||
import xml.etree.ElementTree as ET
|
||||
import xml.dom.minidom
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'xml'
|
||||
CALLBACK_NAME = 'stig_xml'
|
||||
CALLBACK_TYPE = "xml"
|
||||
CALLBACK_NAME = "stig_xml"
|
||||
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def _get_STIG_path(self):
|
||||
cwd = os.path.abspath('.')
|
||||
cwd = os.path.abspath(".")
|
||||
for dirpath, dirs, files in os.walk(cwd):
|
||||
if os.path.sep + 'files' in dirpath and '.xml' in files[0]:
|
||||
if os.path.sep + "files" in dirpath and ".xml" in files[0]:
|
||||
return os.path.join(cwd, dirpath, files[0])
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
self.rules = {}
|
||||
self.stig_path = os.environ.get('STIG_PATH')
|
||||
self.XML_path = os.environ.get('XML_PATH')
|
||||
self.stig_path = os.environ.get("STIG_PATH")
|
||||
self.XML_path = os.environ.get("XML_PATH")
|
||||
if self.stig_path is None:
|
||||
self.stig_path = self._get_STIG_path()
|
||||
self._display.display('Using STIG_PATH: {}'.format(self.stig_path))
|
||||
self._display.display("Using STIG_PATH: {}".format(self.stig_path))
|
||||
if self.XML_path is None:
|
||||
self.XML_path = tempfile.mkdtemp() + "/xccdf-results.xml"
|
||||
self._display.display('Using XML_PATH: {}'.format(self.XML_path))
|
||||
self._display.display("Using XML_PATH: {}".format(self.XML_path))
|
||||
|
||||
print("Writing: {}".format(self.XML_path))
|
||||
STIG_name = os.path.basename(self.stig_path)
|
||||
ET.register_namespace('cdf', 'http://checklists.nist.gov/xccdf/1.2')
|
||||
self.tr = ET.Element('{http://checklists.nist.gov/xccdf/1.2}TestResult')
|
||||
self.tr.set('id', 'xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}'.format(STIG_name))
|
||||
ET.register_namespace("cdf", "http://checklists.nist.gov/xccdf/1.2")
|
||||
self.tr = ET.Element("{http://checklists.nist.gov/xccdf/1.2}TestResult")
|
||||
self.tr.set(
|
||||
"id",
|
||||
"xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}".format(STIG_name),
|
||||
)
|
||||
endtime = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
|
||||
self.tr.set('end-time', endtime)
|
||||
tg = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}target')
|
||||
self.tr.set("end-time", endtime)
|
||||
tg = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}target")
|
||||
tg.text = platform.node()
|
||||
|
||||
def _get_rev(self, nid):
|
||||
with open(self.stig_path, 'r') as f:
|
||||
r = 'SV-{}r(?P<rev>\d+)_rule'.format(nid)
|
||||
with open(self.stig_path, "r") as f:
|
||||
r = "SV-{}r(?P<rev>\d+)_rule".format(nid)
|
||||
m = re.search(r, f.read())
|
||||
if m:
|
||||
rev = m.group('rev')
|
||||
rev = m.group("rev")
|
||||
else:
|
||||
rev = '0'
|
||||
rev = "0"
|
||||
return rev
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
name = result._task.get_name()
|
||||
m = re.search('stigrule_(?P<id>\d+)', name)
|
||||
m = re.search("stigrule_(?P<id>\d+)", name)
|
||||
if m:
|
||||
nid = m.group('id')
|
||||
nid = m.group("id")
|
||||
else:
|
||||
return
|
||||
rev = self._get_rev(nid)
|
||||
key = "{}r{}".format(nid, rev)
|
||||
if self.rules.get(key, 'Unknown') != False:
|
||||
if self.rules.get(key, "Unknown") != False:
|
||||
self.rules[key] = result.is_changed()
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
for rule, changed in self.rules.items():
|
||||
state = 'fail' if changed else 'pass'
|
||||
rr = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}rule-result')
|
||||
rr.set('idref', 'xccdf_mil.disa.stig_rule_SV-{}_rule'.format(rule))
|
||||
rs = ET.SubElement(rr, '{http://checklists.nist.gov/xccdf/1.2}result')
|
||||
state = "fail" if changed else "pass"
|
||||
rr = ET.SubElement(
|
||||
self.tr, "{http://checklists.nist.gov/xccdf/1.2}rule-result"
|
||||
)
|
||||
rr.set("idref", "xccdf_mil.disa.stig_rule_SV-{}_rule".format(rule))
|
||||
rs = ET.SubElement(rr, "{http://checklists.nist.gov/xccdf/1.2}result")
|
||||
rs.text = state
|
||||
passing = len(self.rules) - sum(self.rules.values())
|
||||
sc = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}score')
|
||||
sc.set('maximum', str(len(self.rules)))
|
||||
sc.set('system', 'urn:xccdf:scoring:flat-unweighted')
|
||||
sc = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}score")
|
||||
sc.set("maximum", str(len(self.rules)))
|
||||
sc.set("system", "urn:xccdf:scoring:flat-unweighted")
|
||||
sc.text = str(passing)
|
||||
with open(self.XML_path, 'wb') as f:
|
||||
with open(self.XML_path, "wb") as f:
|
||||
out = ET.tostring(self.tr)
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding='utf-8')
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding="utf-8")
|
||||
f.write(pretty)
|
||||
|
||||
@@ -142,9 +142,6 @@ rhel8STIG_stigrule_230347__etc_dconf_db_local_d_00_screensaver_Value: 'true'
|
||||
rhel8STIG_stigrule_230348_Manage: True
|
||||
rhel8STIG_stigrule_230348_ensure_tmux_is_installed_State: installed
|
||||
rhel8STIG_stigrule_230348__etc_tmux_conf_Line: 'set -g lock-command vlock'
|
||||
# R-230349 RHEL-08-020041
|
||||
rhel8STIG_stigrule_230349_Manage: True
|
||||
rhel8STIG_stigrule_230349__etc_bashrc_Line: '[ -n "$PS1" -a -z "$TMUX" ] && exec tmux'
|
||||
# R-230352 RHEL-08-020060
|
||||
rhel8STIG_stigrule_230352_Manage: True
|
||||
rhel8STIG_stigrule_230352__etc_dconf_db_local_d_00_screensaver_Value: 'uint32 900'
|
||||
@@ -232,9 +229,6 @@ rhel8STIG_stigrule_230394__etc_audit_auditd_conf_Line: 'name_format = hostname'
|
||||
# R-230395 RHEL-08-030063
|
||||
rhel8STIG_stigrule_230395_Manage: True
|
||||
rhel8STIG_stigrule_230395__etc_audit_auditd_conf_Line: 'log_format = ENRICHED'
|
||||
# R-230396 RHEL-08-030070
|
||||
rhel8STIG_stigrule_230396_Manage: True
|
||||
rhel8STIG_stigrule_230396__etc_audit_auditd_conf_Line: 'log_group = root'
|
||||
# R-230398 RHEL-08-030090
|
||||
# A duplicate of 230396
|
||||
# duplicate of 230396
|
||||
@@ -569,3 +563,6 @@ rhel8STIG_stigrule_244553_net_ipv4_conf_all_accept_redirects_Value: 0
|
||||
# R-244554 RHEL-08-040286
|
||||
rhel8STIG_stigrule_244554_Manage: True
|
||||
rhel8STIG_stigrule_244554__etc_sysctl_d_99_sysctl_conf_Line: 'net.core.bpf_jit_harden = 2'
|
||||
# R-256974 RHEL-08-010358
|
||||
rhel8STIG_stigrule_256974_Manage: True
|
||||
rhel8STIG_stigrule_256974_mailx_State: installed
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -4,7 +4,7 @@
|
||||
- name: stigrule_230225_banner
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)banner\s+'
|
||||
regexp: '(?i)^\s*banner\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230225_banner_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -82,7 +82,7 @@
|
||||
- name: stigrule_230244_ClientAliveCountMax
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)ClientAliveCountMax\s+'
|
||||
regexp: '(?i)^\s*ClientAliveCountMax\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230244_ClientAliveCountMax_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -249,7 +249,7 @@
|
||||
- name: stigrule_230288_StrictModes
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)StrictModes\s+'
|
||||
regexp: '(?i)^\s*StrictModes\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230288_StrictModes_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -259,7 +259,7 @@
|
||||
- name: stigrule_230290_IgnoreUserKnownHosts
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)IgnoreUserKnownHosts\s+'
|
||||
regexp: '(?i)^\s*IgnoreUserKnownHosts\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230290_IgnoreUserKnownHosts_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -269,7 +269,7 @@
|
||||
- name: stigrule_230291_KerberosAuthentication
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)KerberosAuthentication\s+'
|
||||
regexp: '(?i)^\s*KerberosAuthentication\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230291_KerberosAuthentication_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -279,7 +279,7 @@
|
||||
- name: stigrule_230296_PermitRootLogin
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)PermitRootLogin\s+'
|
||||
regexp: '(?i)^\s*PermitRootLogin\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230296_PermitRootLogin_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -395,7 +395,7 @@
|
||||
- name: stigrule_230330_PermitUserEnvironment
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)PermitUserEnvironment\s+'
|
||||
regexp: '(?i)^\s*PermitUserEnvironment\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230330_PermitUserEnvironment_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -436,14 +436,6 @@
|
||||
create: yes
|
||||
when:
|
||||
- rhel8STIG_stigrule_230348_Manage
|
||||
# R-230349 RHEL-08-020041
|
||||
- name: stigrule_230349__etc_bashrc
|
||||
lineinfile:
|
||||
path: /etc/bashrc
|
||||
line: "{{ rhel8STIG_stigrule_230349__etc_bashrc_Line }}"
|
||||
create: yes
|
||||
when:
|
||||
- rhel8STIG_stigrule_230349_Manage
|
||||
# R-230352 RHEL-08-020060
|
||||
- name: stigrule_230352__etc_dconf_db_local_d_00_screensaver
|
||||
ini_file:
|
||||
@@ -602,7 +594,7 @@
|
||||
- name: stigrule_230382_PrintLastLog
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)PrintLastLog\s+'
|
||||
regexp: '(?i)^\s*PrintLastLog\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230382_PrintLastLog_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -726,16 +718,6 @@
|
||||
notify: auditd_restart
|
||||
when:
|
||||
- rhel8STIG_stigrule_230395_Manage
|
||||
# R-230396 RHEL-08-030070
|
||||
- name: stigrule_230396__etc_audit_auditd_conf
|
||||
lineinfile:
|
||||
path: /etc/audit/auditd.conf
|
||||
regexp: '^log_group = '
|
||||
line: "{{ rhel8STIG_stigrule_230396__etc_audit_auditd_conf_Line }}"
|
||||
create: yes
|
||||
notify: auditd_restart
|
||||
when:
|
||||
- rhel8STIG_stigrule_230396_Manage
|
||||
# R-230402 RHEL-08-030121
|
||||
- name : stigrule_230402__etc_audit_rules_d_audit_rules_e2
|
||||
lineinfile:
|
||||
@@ -1348,7 +1330,7 @@
|
||||
- name: stigrule_230527_RekeyLimit
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)RekeyLimit\s+'
|
||||
regexp: '(?i)^\s*RekeyLimit\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230527_RekeyLimit_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -1569,7 +1551,7 @@
|
||||
- name: stigrule_230555_X11Forwarding
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)X11Forwarding\s+'
|
||||
regexp: '(?i)^\s*X11Forwarding\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230555_X11Forwarding_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -1579,7 +1561,7 @@
|
||||
- name: stigrule_230556_X11UseLocalhost
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)X11UseLocalhost\s+'
|
||||
regexp: '(?i)^\s*X11UseLocalhost\s+'
|
||||
line: "{{ rhel8STIG_stigrule_230556_X11UseLocalhost_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -1635,7 +1617,7 @@
|
||||
- name: stigrule_244525_ClientAliveInterval
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)ClientAliveInterval\s+'
|
||||
regexp: '(?i)^\s*ClientAliveInterval\s+'
|
||||
line: "{{ rhel8STIG_stigrule_244525_ClientAliveInterval_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -1651,7 +1633,7 @@
|
||||
- name: stigrule_244528_GSSAPIAuthentication
|
||||
lineinfile:
|
||||
path: /etc/ssh/sshd_config
|
||||
regexp: '^\s*(?i)GSSAPIAuthentication\s+'
|
||||
regexp: '(?i)^\s*GSSAPIAuthentication\s+'
|
||||
line: "{{ rhel8STIG_stigrule_244528_GSSAPIAuthentication_Line }}"
|
||||
notify: ssh_restart
|
||||
when:
|
||||
@@ -1798,3 +1780,9 @@
|
||||
create: yes
|
||||
when:
|
||||
- rhel8STIG_stigrule_244554_Manage
|
||||
# R-256974 RHEL-08-010358
|
||||
- name: stigrule_256974_mailx
|
||||
yum:
|
||||
name: mailx
|
||||
state: "{{ rhel8STIG_stigrule_256974_mailx_State }}"
|
||||
when: rhel8STIG_stigrule_256974_Manage
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from time import gmtime, strftime
|
||||
import platform
|
||||
import tempfile
|
||||
import re
|
||||
import sys
|
||||
import os
|
||||
import xml.etree.ElementTree as ET
|
||||
import xml.dom.minidom
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'xml'
|
||||
CALLBACK_NAME = 'stig_xml'
|
||||
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def _get_STIG_path(self):
|
||||
cwd = os.path.abspath('.')
|
||||
for dirpath, dirs, files in os.walk(cwd):
|
||||
if os.path.sep + 'files' in dirpath and '.xml' in files[0]:
|
||||
return os.path.join(cwd, dirpath, files[0])
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
self.rules = {}
|
||||
self.stig_path = os.environ.get('STIG_PATH')
|
||||
self.XML_path = os.environ.get('XML_PATH')
|
||||
if self.stig_path is None:
|
||||
self.stig_path = self._get_STIG_path()
|
||||
self._display.display('Using STIG_PATH: {}'.format(self.stig_path))
|
||||
if self.XML_path is None:
|
||||
self.XML_path = tempfile.mkdtemp() + "/xccdf-results.xml"
|
||||
self._display.display('Using XML_PATH: {}'.format(self.XML_path))
|
||||
|
||||
print("Writing: {}".format(self.XML_path))
|
||||
STIG_name = os.path.basename(self.stig_path)
|
||||
ET.register_namespace('cdf', 'http://checklists.nist.gov/xccdf/1.2')
|
||||
self.tr = ET.Element('{http://checklists.nist.gov/xccdf/1.2}TestResult')
|
||||
self.tr.set('id', 'xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}'.format(STIG_name))
|
||||
endtime = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
|
||||
self.tr.set('end-time', endtime)
|
||||
tg = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}target')
|
||||
tg.text = platform.node()
|
||||
|
||||
def _get_rev(self, nid):
|
||||
with open(self.stig_path, 'r') as f:
|
||||
r = 'SV-{}r(?P<rev>\d+)_rule'.format(nid)
|
||||
m = re.search(r, f.read())
|
||||
if m:
|
||||
rev = m.group('rev')
|
||||
else:
|
||||
rev = '0'
|
||||
return rev
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
name = result._task.get_name()
|
||||
m = re.search('stigrule_(?P<id>\d+)', name)
|
||||
if m:
|
||||
nid = m.group('id')
|
||||
else:
|
||||
return
|
||||
rev = self._get_rev(nid)
|
||||
key = "{}r{}".format(nid, rev)
|
||||
if self.rules.get(key, 'Unknown') != False:
|
||||
self.rules[key] = result.is_changed()
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
for rule, changed in self.rules.items():
|
||||
state = 'fail' if changed else 'pass'
|
||||
rr = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}rule-result')
|
||||
rr.set('idref', 'xccdf_mil.disa.stig_rule_SV-{}_rule'.format(rule))
|
||||
rs = ET.SubElement(rr, '{http://checklists.nist.gov/xccdf/1.2}result')
|
||||
rs.text = state
|
||||
passing = len(self.rules) - sum(self.rules.values())
|
||||
sc = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}score')
|
||||
sc.set('maximum', str(len(self.rules)))
|
||||
sc.set('system', 'urn:xccdf:scoring:flat-unweighted')
|
||||
sc.text = str(passing)
|
||||
with open(self.XML_path, 'wb') as f:
|
||||
out = ET.tostring(self.tr)
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding='utf-8')
|
||||
f.write(pretty)
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -0,0 +1,30 @@
|
||||
- name: dconf_update
|
||||
command: dconf update
|
||||
- name: auditd_restart
|
||||
command: /usr/sbin/service auditd restart
|
||||
- name: ssh_restart
|
||||
service:
|
||||
name: sshd
|
||||
state: restarted
|
||||
- name: rsyslog_restart
|
||||
service:
|
||||
name: rsyslog
|
||||
state: restarted
|
||||
- name: sysctl_load_settings
|
||||
command: sysctl --system
|
||||
- name: daemon_reload
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
- name: networkmanager_reload
|
||||
service:
|
||||
name: NetworkManager
|
||||
state: reloaded
|
||||
- name: logind_restart
|
||||
service:
|
||||
name: systemd-logind
|
||||
state: restarted
|
||||
- name: with_faillock_enable
|
||||
command: authselect enable-feature with-faillock
|
||||
- name: do_reboot
|
||||
reboot:
|
||||
pre_reboot_delay: 60
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
@@ -11,76 +12,82 @@ import os
|
||||
import xml.etree.ElementTree as ET
|
||||
import xml.dom.minidom
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'xml'
|
||||
CALLBACK_NAME = 'stig_xml'
|
||||
CALLBACK_TYPE = "xml"
|
||||
CALLBACK_NAME = "stig_xml"
|
||||
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def _get_STIG_path(self):
|
||||
cwd = os.path.abspath('.')
|
||||
cwd = os.path.abspath(".")
|
||||
for dirpath, dirs, files in os.walk(cwd):
|
||||
if os.path.sep + 'files' in dirpath and '.xml' in files[0]:
|
||||
if os.path.sep + "files" in dirpath and ".xml" in files[0]:
|
||||
return os.path.join(cwd, dirpath, files[0])
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
self.rules = {}
|
||||
self.stig_path = os.environ.get('STIG_PATH')
|
||||
self.XML_path = os.environ.get('XML_PATH')
|
||||
self.stig_path = os.environ.get("STIG_PATH")
|
||||
self.XML_path = os.environ.get("XML_PATH")
|
||||
if self.stig_path is None:
|
||||
self.stig_path = self._get_STIG_path()
|
||||
self._display.display('Using STIG_PATH: {}'.format(self.stig_path))
|
||||
self._display.display("Using STIG_PATH: {}".format(self.stig_path))
|
||||
if self.XML_path is None:
|
||||
self.XML_path = tempfile.mkdtemp() + "/xccdf-results.xml"
|
||||
self._display.display('Using XML_PATH: {}'.format(self.XML_path))
|
||||
self._display.display("Using XML_PATH: {}".format(self.XML_path))
|
||||
|
||||
print("Writing: {}".format(self.XML_path))
|
||||
STIG_name = os.path.basename(self.stig_path)
|
||||
ET.register_namespace('cdf', 'http://checklists.nist.gov/xccdf/1.2')
|
||||
self.tr = ET.Element('{http://checklists.nist.gov/xccdf/1.2}TestResult')
|
||||
self.tr.set('id', 'xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}'.format(STIG_name))
|
||||
ET.register_namespace("cdf", "http://checklists.nist.gov/xccdf/1.2")
|
||||
self.tr = ET.Element("{http://checklists.nist.gov/xccdf/1.2}TestResult")
|
||||
self.tr.set(
|
||||
"id",
|
||||
"xccdf_mil.disa.stig_testresult_scap_mil.disa_comp_{}".format(STIG_name),
|
||||
)
|
||||
endtime = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
|
||||
self.tr.set('end-time', endtime)
|
||||
tg = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}target')
|
||||
self.tr.set("end-time", endtime)
|
||||
tg = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}target")
|
||||
tg.text = platform.node()
|
||||
|
||||
def _get_rev(self, nid):
|
||||
with open(self.stig_path, 'r') as f:
|
||||
r = 'SV-{}r(?P<rev>\d+)_rule'.format(nid)
|
||||
with open(self.stig_path, "r") as f:
|
||||
r = "SV-{}r(?P<rev>\d+)_rule".format(nid)
|
||||
m = re.search(r, f.read())
|
||||
if m:
|
||||
rev = m.group('rev')
|
||||
rev = m.group("rev")
|
||||
else:
|
||||
rev = '0'
|
||||
rev = "0"
|
||||
return rev
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
name = result._task.get_name()
|
||||
m = re.search('stigrule_(?P<id>\d+)', name)
|
||||
m = re.search("stigrule_(?P<id>\d+)", name)
|
||||
if m:
|
||||
nid = m.group('id')
|
||||
nid = m.group("id")
|
||||
else:
|
||||
return
|
||||
rev = self._get_rev(nid)
|
||||
key = "{}r{}".format(nid, rev)
|
||||
if self.rules.get(key, 'Unknown') != False:
|
||||
if self.rules.get(key, "Unknown") != False:
|
||||
self.rules[key] = result.is_changed()
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
for rule, changed in self.rules.items():
|
||||
state = 'fail' if changed else 'pass'
|
||||
rr = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}rule-result')
|
||||
rr.set('idref', 'xccdf_mil.disa.stig_rule_SV-{}_rule'.format(rule))
|
||||
rs = ET.SubElement(rr, '{http://checklists.nist.gov/xccdf/1.2}result')
|
||||
state = "fail" if changed else "pass"
|
||||
rr = ET.SubElement(
|
||||
self.tr, "{http://checklists.nist.gov/xccdf/1.2}rule-result"
|
||||
)
|
||||
rr.set("idref", "xccdf_mil.disa.stig_rule_SV-{}_rule".format(rule))
|
||||
rs = ET.SubElement(rr, "{http://checklists.nist.gov/xccdf/1.2}result")
|
||||
rs.text = state
|
||||
passing = len(self.rules) - sum(self.rules.values())
|
||||
sc = ET.SubElement(self.tr, '{http://checklists.nist.gov/xccdf/1.2}score')
|
||||
sc.set('maximum', str(len(self.rules)))
|
||||
sc.set('system', 'urn:xccdf:scoring:flat-unweighted')
|
||||
sc = ET.SubElement(self.tr, "{http://checklists.nist.gov/xccdf/1.2}score")
|
||||
sc.set("maximum", str(len(self.rules)))
|
||||
sc.set("system", "urn:xccdf:scoring:flat-unweighted")
|
||||
sc.text = str(passing)
|
||||
with open(self.XML_path, 'wb') as f:
|
||||
with open(self.XML_path, "wb") as f:
|
||||
out = ET.tostring(self.tr)
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding='utf-8')
|
||||
pretty = xml.dom.minidom.parseString(out).toprettyxml(encoding="utf-8")
|
||||
f.write(pretty)
|
||||
|
||||
@@ -0,0 +1,131 @@
|
||||
Role Name
|
||||
=========
|
||||
|
||||
This Ansible role helps configure Operators on the Openshift Cluster to support VM migrations. Tasks include
|
||||
- Configure Catalog Sources to use mirroring repository for Operators
|
||||
- Create and configure Operators
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
|
||||
|
||||
Role Variables
|
||||
--------------
|
||||
|
||||
The task `operators/catalog_sources.yml` needs following variables:
|
||||
|
||||
- **Variable Name**: `cluster_config_catalog_sources`
|
||||
- **Type**: List
|
||||
- **Description**: A list of custom CatalogSources configurations used as loop variables to generate Kubernetes manifest files from the template `catalog_source.j2` for CatalogSource. If the variable is not available, no manifest is created.
|
||||
- **Example**:
|
||||
```yaml
|
||||
cluster_config_catalog_sources:
|
||||
- name: redhat-marketplace2
|
||||
source_type: grpc
|
||||
display_name: Mirror to Red Hat Marketplace
|
||||
image_path: internal-registry.example.com/operator:v1
|
||||
priority: '-300'
|
||||
icon:
|
||||
base64data: ''
|
||||
mediatype: ''
|
||||
publisher: redhat
|
||||
address: ''
|
||||
grpc_pod_config: |
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
node-role.kubernetes.io/master: ''
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContextConfig: restricted
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/unreachable
|
||||
operator: Exists
|
||||
tolerationSeconds: 120
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
tolerationSeconds: 120
|
||||
registry_poll_interval: 10m
|
||||
```
|
||||
|
||||
The task `operators/operator_config.yaml` needs following variables:
|
||||
|
||||
- **Variable Name**: `cluster_config_operators`
|
||||
- **Type**: List
|
||||
- **Description**: A list of operators to be installed on OCP cluster
|
||||
- **Variable Name**: `cluster_config_[OPERATOR_NAME]`
|
||||
- **Type**: Dict
|
||||
- **Description**: Configuration specific to each operator listed in `cluster_config_operators`. Includes settings for namespace, operator group, subscription, and any extra resources
|
||||
- **Example**: Assume the `cluster_config_operators` specifies these operators:
|
||||
```yaml
|
||||
cluster_config_operators:
|
||||
- cnv
|
||||
- oadp
|
||||
```
|
||||
then the corresponding `cluster_config_mtv` and `cluster_config_cnv` can be configured as following:
|
||||
```yaml
|
||||
cluster_config_cnv_namespace: openshift-cnv
|
||||
cluster_config_cnv:
|
||||
namespace:
|
||||
name: "{{ cluster_config_cnv_namespace }}"
|
||||
operator_group:
|
||||
name: kubevirt-hyperconverged-group
|
||||
target_namespaces:
|
||||
- "{{ cluster_config_cnv_namespace }}"
|
||||
subscription:
|
||||
name: kubevirt-hyperconverged
|
||||
starting_csv: kubevirt-hyperconverged-operator.v4.13.8
|
||||
extra_resources:
|
||||
- apiVersion: hco.kubevirt.io/v1beta1
|
||||
kind: HyperConverged
|
||||
metadata:
|
||||
name: kubevirt-hyperconverged
|
||||
namespace: "{{ cluster_config_cnv_namespace }}"
|
||||
spec:
|
||||
BareMetalPlatform: true
|
||||
|
||||
cluster_config_oadp_namespace: openshift-adp
|
||||
cluster_config_oadp:
|
||||
namespace:
|
||||
name: "{{ cluster_config_oadp_namespace }}"
|
||||
operator_group:
|
||||
name: redhat-oadp-operator-group
|
||||
target_namespaces:
|
||||
- "{{ cluster_config_oadp_namespace }}"
|
||||
subscription:
|
||||
name: redhat-oadp-operator-subscription
|
||||
spec_name: redhat-oadp-operator
|
||||
```
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
|
||||
|
||||
Example Playbook
|
||||
----------------
|
||||
|
||||
An example of configuring a CatalogSource resource:
|
||||
```
|
||||
- name: Configure Catalog Sources for Operators
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- ansible.builtin.include_role:
|
||||
name: cluster_config
|
||||
tasks_from: operators/catalog_sources
|
||||
```
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
BSD
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# defaults file for cluster_config
|
||||
cluster_config_operators:
|
||||
- cnv
|
||||
|
||||
cluster_config_cnv:
|
||||
checkplan: true
|
||||
namespace:
|
||||
name: &cluster_config_cnv_namespace openshift-cnv
|
||||
operator_group:
|
||||
name: kubevirt-hyperconverged-group
|
||||
target_namespaces:
|
||||
- *cluster_config_cnv_namespace
|
||||
subscription:
|
||||
name: kubevirt-hyperconverged
|
||||
extra_resources:
|
||||
- apiVersion: hco.kubevirt.io/v1beta1
|
||||
kind: HyperConverged
|
||||
metadata:
|
||||
name: kubevirt-hyperconverged
|
||||
namespace: *cluster_config_cnv_namespace
|
||||
spec:
|
||||
BareMetalPlatform: true
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
# handlers file for cluster_config
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: Configure Operators
|
||||
ansible.builtin.import_tasks: operators/operator_config.yml
|
||||
@@ -0,0 +1,37 @@
|
||||
---
|
||||
- name: Retrieve Operator name
|
||||
ansible.builtin.set_fact:
|
||||
_operator: "{{ vars['cluster_config_' + _operator_name] }}"
|
||||
- name: Configure Operator {{ _operator_name }}
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
template:
|
||||
- operators/namespace.yml.j2
|
||||
- operators/operator_group.yml.j2
|
||||
- operators/subscription.yml.j2
|
||||
- name: Query for install plan
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: operators.coreos.com/v1alpha1
|
||||
kind: InstallPlan
|
||||
namespace: "{{ _operator.namespace.name }}"
|
||||
register: r_install_plans
|
||||
retries: 30
|
||||
delay: 5
|
||||
until:
|
||||
- r_install_plans.resources | default([]) | length > 0
|
||||
- r_install_plans.resources[0].status is defined
|
||||
- r_install_plans.resources[0].status.phase == "Complete"
|
||||
when:
|
||||
- _operator.checkplan is defined
|
||||
- _operator.checkplan | bool
|
||||
|
||||
- name: Configure extra resources for Operator {{ _operator_name }}
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition: "{{ item }}"
|
||||
register: creation_result
|
||||
loop: "{{ _operator.extra_resources }}"
|
||||
retries: 30
|
||||
delay: 5
|
||||
until: creation_result is success
|
||||
when: _operator.extra_resources is defined
|
||||
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Configure custom CatalogSource for Operators
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
template: operators/catalog_source.j2
|
||||
loop: "{{ cluster_config_catalog_sources }}"
|
||||
when: cluster_config_catalog_sources is defined
|
||||
@@ -0,0 +1,59 @@
|
||||
---
|
||||
- name: Create node-health-check operator namespace
|
||||
redhat.openshift.k8s:
|
||||
name: openshift-workload-availability
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
state: present
|
||||
|
||||
- name: Create node-health-check operator group
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
generateName: openshift-workload-availability-
|
||||
annotations:
|
||||
olm.providedAPIs: >-
|
||||
NodeHealthCheck.v1alpha1.remediation.medik8s.io,SelfNodeRemediation.v1alpha1.self-node-remediation.medik8s.io,SelfNodeRemediationConfig.v1alpha1.self-node-remediation.medik8s.io,SelfNodeRemediationTemplate.v1alpha1.self-node-remediation.medik8s.io
|
||||
namespace: openshift-workload-availability
|
||||
spec:
|
||||
upgradeStrategy: Default
|
||||
|
||||
- name: Create node-health-check operator subscription
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
labels:
|
||||
operators.coreos.com/node-healthcheck-operator.openshift-workload-availability: ''
|
||||
name: node-health-check-operator
|
||||
namespace: openshift-workload-availability
|
||||
spec:
|
||||
channel: stable
|
||||
installPlanApproval: Automatic
|
||||
name: node-healthcheck-operator
|
||||
source: redhat-operators
|
||||
sourceNamespace: openshift-marketplace
|
||||
|
||||
- name: Create Self Node Remediation subscription
|
||||
redhat.openshift.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: self-node-remediation-stable-redhat-operators-openshift-marketplace
|
||||
namespace: openshift-workload-availability
|
||||
labels:
|
||||
operators.coreos.com/self-node-remediation.openshift-workload-availability: ''
|
||||
spec:
|
||||
channel: stable
|
||||
installPlanApproval: Automatic
|
||||
name: self-node-remediation
|
||||
source: redhat-operators
|
||||
sourceNamespace: openshift-marketplace
|
||||
startingCSV: self-node-remediation.v0.8.0
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Configure Operators
|
||||
ansible.builtin.include_tasks: _operator_config_item.yml
|
||||
loop: "{{ cluster_config_operators }}"
|
||||
loop_control:
|
||||
loop_var: _operator_name
|
||||
@@ -0,0 +1,34 @@
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: CatalogSource
|
||||
metadata:
|
||||
name: {{ item.name }}
|
||||
namespace: openshift-marketplace
|
||||
spec:
|
||||
sourceType: {{ item.source_type | d('grpc',true) }}
|
||||
image: {{ item.image_path }}
|
||||
{% if item.display_name is defined -%}
|
||||
displayName: {{ item.display_name }}
|
||||
{% endif -%}
|
||||
{% if item.priority is defined -%}
|
||||
priority: {{ item.priority }}
|
||||
{% endif -%}
|
||||
{% if item.grpc_pod_config is defined -%}
|
||||
grpcPodConfig:
|
||||
{{ item.grpc_pod_config | indent(4) }}
|
||||
{% endif -%}
|
||||
{% if item.icon is defined -%}
|
||||
icon:
|
||||
base64data: '{{ item.icon.base64data or '' }}'
|
||||
mediatype: '{{ item.icon.mediatype or '' }}'
|
||||
{% endif -%}
|
||||
{% if item.publisher is defined -%}
|
||||
publisher: {{ item.publisher }}
|
||||
{% endif -%}
|
||||
{% if item.address is defined -%}
|
||||
address: {{ item.address }}
|
||||
{% endif -%}
|
||||
{% if item.registry_poll_interval is defined -%}
|
||||
updateStrategy:
|
||||
registryPoll:
|
||||
interval: {{ item.registry_poll_interval }}
|
||||
{% endif -%}
|
||||
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ _operator.namespace.name }}
|
||||
{% if _operator.namespace.labels is defined %}
|
||||
labels:
|
||||
{% for key, value in _operator.namespace.labels.items() -%}
|
||||
{{ key }}: "{{ value }}"
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
@@ -0,0 +1,12 @@
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: {{ _operator.operator_group.name }}
|
||||
namespace: {{ _operator.operator_group.namespace | d(_operator.namespace.name, true) }}
|
||||
spec:
|
||||
{% if _operator.operator_group.target_namespaces is defined -%}
|
||||
targetNamespaces:
|
||||
{% for item in _operator.operator_group.target_namespaces %}
|
||||
- {{ item }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
@@ -0,0 +1,14 @@
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: {{ _operator.subscription.name }}
|
||||
namespace: "{{ _operator.subscription.namespace | d(_operator.namespace.name, true) }}"
|
||||
spec:
|
||||
channel: {{ _operator.subscription.channel | d('stable', true) }}
|
||||
installPlanApproval: {{ _operator.subscription.install_plan_approval | d('Automatic', true) }}
|
||||
name: {{ _operator.subscription.spec_name | d(_operator.subscription.name, true) }}
|
||||
source: {{ _operator.subscription.source | d('redhat-operators', true) }}
|
||||
sourceNamespace: {{ _operator.subscription.source_namespace | d('openshift-marketplace', true) }}
|
||||
{% if _operator.subscription.starting_csv is defined %}
|
||||
startingCSV: {{ _operator.subscription.starting_csv }}
|
||||
{% endif -%}
|
||||
@@ -0,0 +1 @@
|
||||
localhost
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Include cluster_config role
|
||||
hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- cluster_config
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
# vars file for cluster_config
|
||||
@@ -0,0 +1,13 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
comments:
|
||||
require-starting-space: false
|
||||
min-spaces-from-content: 1
|
||||
comments-indentation: disable
|
||||
indentation:
|
||||
indent-sequences: consistent
|
||||
line-length:
|
||||
max: 120
|
||||
allow-non-breakable-inline-mappings: true
|
||||
@@ -0,0 +1,16 @@
|
||||
---
|
||||
# --------------------------------------------------------
|
||||
# Ansible Automation Platform Controller URL
|
||||
# --------------------------------------------------------
|
||||
# eda_controller_aap_controller_url: [Required]
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Workload: eda_controller
|
||||
# --------------------------------------------------------
|
||||
eda_controller_project: "aap"
|
||||
eda_controller_project_app_name: "eda-controller"
|
||||
|
||||
# eda_controller_admin_password: "{{ common_password }}"
|
||||
|
||||
eda_controller_cluster_rolebinding_name: eda_default
|
||||
eda_controller_cluster_rolebinding_role: cluster-admin
|
||||
@@ -0,0 +1,14 @@
|
||||
---
|
||||
galaxy_info:
|
||||
role_name: eda_controller
|
||||
author: Mitesh Sharma (mitsharm@redhat.com)
|
||||
description: |
|
||||
Installs EDA on OpenShift
|
||||
license: GPLv3
|
||||
min_ansible_version: "2.9"
|
||||
platforms: []
|
||||
galaxy_tags:
|
||||
- eda
|
||||
- openshift
|
||||
- aap
|
||||
dependencies: []
|
||||
@@ -0,0 +1,6 @@
|
||||
== eda_controller
|
||||
|
||||
This role installs EDA on OpenShift, mostly copied from https://github.com/redhat-cop/agnosticd/.
|
||||
|
||||
== Dependencies
|
||||
Role: automation_controller_platform
|
||||
@@ -0,0 +1,54 @@
|
||||
---
|
||||
- name: Setup environment vars
|
||||
block:
|
||||
- name: Create secret and Install EDA
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('template', __definition) }}"
|
||||
loop:
|
||||
- eda_admin_secret.j2
|
||||
- eda_controller.j2
|
||||
loop_control:
|
||||
loop_var: __definition
|
||||
|
||||
- name: Retrieve created route
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: "route.openshift.io/v1"
|
||||
kind: Route
|
||||
name: "{{ eda_controller_project_app_name }}"
|
||||
namespace: "{{ eda_controller_project }}"
|
||||
register: r_eda_route
|
||||
until: r_eda_route.resources[0].spec.host is defined
|
||||
retries: 30
|
||||
delay: 45
|
||||
|
||||
- name: Get eda-controller route hostname
|
||||
ansible.builtin.set_fact:
|
||||
eda_controller_hostname: "{{ r_eda_route.resources[0].spec.host }}"
|
||||
|
||||
- name: Wait for eda_controller to be running
|
||||
ansible.builtin.uri:
|
||||
url: https://{{ eda_controller_hostname }}/api/eda/v1/users/me/awx-tokens/
|
||||
user: "admin"
|
||||
password: "{{ lookup('ansible.builtin.env', 'CONTROLLER_PASSWORD') }}"
|
||||
method: GET
|
||||
force_basic_auth: true
|
||||
validate_certs: false
|
||||
body_format: json
|
||||
status_code: 200
|
||||
register: r_result
|
||||
until: not r_result.failed
|
||||
retries: 60
|
||||
delay: 45
|
||||
|
||||
- name: Create Rolebinding for Rulebook Activations
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('template', 'cluster_rolebinding.j2') }}"
|
||||
|
||||
- name: Display EDA Controller URL
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "EDA Controller URL: https://{{ eda_controller_hostname }}"
|
||||
- "EDA Controller Admin Login: admin"
|
||||
- "EDA Controller Admin Password: <same as the Controller Admin password>"
|
||||
@@ -0,0 +1,13 @@
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ eda_controller_cluster_rolebinding_name }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: {{ eda_controller_project }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ eda_controller_cluster_rolebinding_role }}
|
||||
@@ -0,0 +1,15 @@
|
||||
---
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ eda_controller_project_app_name }}-admin-password
|
||||
namespace: {{ eda_controller_project }}
|
||||
labels:
|
||||
app.kubernetes.io/component: eda
|
||||
app.kubernetes.io/managed-by: eda-operator
|
||||
app.kubernetes.io/name: {{ eda_controller_project_app_name }}
|
||||
app.kubernetes.io/operator-version: '2.4'
|
||||
app.kubernetes.io/part-of: {{ eda_controller_project_app_name }}
|
||||
data:
|
||||
password: "{{ lookup('ansible.builtin.env', 'CONTROLLER_PASSWORD') | b64encode }}"
|
||||
type: Opaque
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
apiVersion: eda.ansible.com/v1alpha1
|
||||
kind: EDA
|
||||
metadata:
|
||||
name: {{ eda_controller_project_app_name }}
|
||||
namespace: {{ eda_controller_project }}
|
||||
spec:
|
||||
route_tls_termination_mechanism: Edge
|
||||
ingress_type: Route
|
||||
loadbalancer_port: 80
|
||||
no_log: true
|
||||
image_pull_policy: IfNotPresent
|
||||
ui:
|
||||
replicas: 1
|
||||
set_self_labels: true
|
||||
api:
|
||||
gunicorn_workers: 2
|
||||
replicas: 1
|
||||
redis:
|
||||
replicas: 1
|
||||
admin_user: admin
|
||||
loadbalancer_protocol: http
|
||||
worker:
|
||||
replicas: 3
|
||||
automation_server_url: '{{ lookup('ansible.builtin.env', 'CONTROLLER_HOST') }}'
|
||||
admin_password_secret: {{ eda_controller_project_app_name }}-admin-password
|
||||
@@ -2,15 +2,15 @@
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: scan_packages
|
||||
short_description: Return installed packages information as fact data
|
||||
description:
|
||||
- Return information about installed packages as fact data
|
||||
'''
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
EXAMPLES = """
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
@@ -34,21 +34,23 @@ EXAMPLES = '''
|
||||
# "name": "gcc-4.8-base"
|
||||
# }
|
||||
# ]
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
def rpm_package_list():
|
||||
import rpm
|
||||
|
||||
trans_set = rpm.TransactionSet()
|
||||
installed_packages = []
|
||||
for package in trans_set.dbMatch():
|
||||
package_details = {
|
||||
'name':package[rpm.RPMTAG_NAME],
|
||||
'version':package[rpm.RPMTAG_VERSION],
|
||||
'release':package[rpm.RPMTAG_RELEASE],
|
||||
'epoch':package[rpm.RPMTAG_EPOCH],
|
||||
'arch':package[rpm.RPMTAG_ARCH],
|
||||
'source':'rpm' }
|
||||
"name": package[rpm.RPMTAG_NAME],
|
||||
"version": package[rpm.RPMTAG_VERSION],
|
||||
"release": package[rpm.RPMTAG_RELEASE],
|
||||
"epoch": package[rpm.RPMTAG_EPOCH],
|
||||
"arch": package[rpm.RPMTAG_ARCH],
|
||||
"source": "rpm",
|
||||
}
|
||||
if installed_packages == []:
|
||||
installed_packages = [package_details]
|
||||
else:
|
||||
@@ -58,16 +60,20 @@ def rpm_package_list():
|
||||
|
||||
def deb_package_list():
|
||||
import apt
|
||||
|
||||
apt_cache = apt.Cache()
|
||||
installed_packages = []
|
||||
apt_installed_packages = [pk for pk in apt_cache.keys() if apt_cache[pk].is_installed]
|
||||
apt_installed_packages = [
|
||||
pk for pk in apt_cache.keys() if apt_cache[pk].is_installed
|
||||
]
|
||||
for package in apt_installed_packages:
|
||||
ac_pkg = apt_cache[package].installed
|
||||
package_details = {
|
||||
'name':package,
|
||||
'version':ac_pkg.version,
|
||||
'arch':ac_pkg.architecture,
|
||||
'source':'apt'}
|
||||
"name": package,
|
||||
"version": ac_pkg.version,
|
||||
"arch": ac_pkg.architecture,
|
||||
"source": "apt",
|
||||
}
|
||||
if installed_packages == []:
|
||||
installed_packages = [package_details]
|
||||
else:
|
||||
@@ -76,13 +82,11 @@ def deb_package_list():
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(os_family=dict(required=True))
|
||||
)
|
||||
ans_os = module.params['os_family']
|
||||
if ans_os in ('RedHat', 'Suse', 'openSUSE Leap'):
|
||||
module = AnsibleModule(argument_spec=dict(os_family=dict(required=True)))
|
||||
ans_os = module.params["os_family"]
|
||||
if ans_os in ("RedHat", "Suse", "openSUSE Leap"):
|
||||
packages = rpm_package_list()
|
||||
elif ans_os == 'Debian':
|
||||
elif ans_os == "Debian":
|
||||
packages = deb_package_list()
|
||||
else:
|
||||
packages = None
|
||||
|
||||
@@ -3,16 +3,18 @@
|
||||
import re
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: scan_services
|
||||
short_description: Return service state information as fact data
|
||||
description:
|
||||
- Return service state information as fact data for various service management utilities
|
||||
'''
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
EXAMPLES = """
|
||||
---
|
||||
- monit: scan_services
|
||||
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
@@ -29,18 +31,17 @@ EXAMPLES = '''
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
'''
|
||||
# }
|
||||
"""
|
||||
|
||||
|
||||
class BaseService(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.incomplete_warning = False
|
||||
|
||||
|
||||
class ServiceScanService(BaseService):
|
||||
|
||||
def gather_services(self):
|
||||
services = {}
|
||||
service_path = self.module.get_bin_path("service")
|
||||
@@ -51,7 +52,10 @@ class ServiceScanService(BaseService):
|
||||
|
||||
# sysvinit
|
||||
if service_path is not None and chkconfig_path is None:
|
||||
rc, stdout, stderr = self.module.run_command("%s --status-all 2>&1 | grep -E \"\\[ (\\+|\\-) \\]\"" % service_path, use_unsafe_shell=True)
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
'%s --status-all 2>&1 | grep -E "\\[ (\\+|\\-) \\]"' % service_path,
|
||||
use_unsafe_shell=True,
|
||||
)
|
||||
for line in stdout.split("\n"):
|
||||
line_data = line.split()
|
||||
if len(line_data) < 4:
|
||||
@@ -61,84 +65,112 @@ class ServiceScanService(BaseService):
|
||||
service_state = "running"
|
||||
else:
|
||||
service_state = "stopped"
|
||||
services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
|
||||
services[service_name] = {
|
||||
"name": service_name,
|
||||
"state": service_state,
|
||||
"source": "sysv",
|
||||
}
|
||||
|
||||
# Upstart
|
||||
if initctl_path is not None and chkconfig_path is None:
|
||||
p = re.compile('^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
|
||||
p = re.compile(
|
||||
"^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$"
|
||||
)
|
||||
rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
|
||||
real_stdout = stdout.replace("\r", "")
|
||||
for line in real_stdout.split("\n"):
|
||||
m = p.match(line)
|
||||
if not m:
|
||||
continue
|
||||
service_name = m.group('name')
|
||||
service_goal = m.group('goal')
|
||||
service_state = m.group('state')
|
||||
if m.group('pid'):
|
||||
pid = m.group('pid')
|
||||
service_name = m.group("name")
|
||||
service_goal = m.group("goal")
|
||||
service_state = m.group("state")
|
||||
if m.group("pid"):
|
||||
pid = m.group("pid")
|
||||
else:
|
||||
pid = None # NOQA
|
||||
payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
|
||||
payload = {
|
||||
"name": service_name,
|
||||
"state": service_state,
|
||||
"goal": service_goal,
|
||||
"source": "upstart",
|
||||
}
|
||||
services[service_name] = payload
|
||||
|
||||
# RH sysvinit
|
||||
elif chkconfig_path is not None:
|
||||
# print '%s --status-all | grep -E "is (running|stopped)"' % service_path
|
||||
p = re.compile(
|
||||
'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
|
||||
'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
|
||||
rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
|
||||
"(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+"
|
||||
"[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)"
|
||||
)
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
"%s" % chkconfig_path, use_unsafe_shell=True
|
||||
)
|
||||
# Check for special cases where stdout does not fit pattern
|
||||
match_any = False
|
||||
for line in stdout.split('\n'):
|
||||
for line in stdout.split("\n"):
|
||||
if p.match(line):
|
||||
match_any = True
|
||||
if not match_any:
|
||||
p_simple = re.compile('(?P<service>.*?)\s+(?P<rl0>on|off)')
|
||||
p_simple = re.compile("(?P<service>.*?)\s+(?P<rl0>on|off)")
|
||||
match_any = False
|
||||
for line in stdout.split('\n'):
|
||||
for line in stdout.split("\n"):
|
||||
if p_simple.match(line):
|
||||
match_any = True
|
||||
if match_any:
|
||||
# Try extra flags " -l --allservices" needed for SLES11
|
||||
rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)
|
||||
elif '--list' in stderr:
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
"%s -l --allservices" % chkconfig_path, use_unsafe_shell=True
|
||||
)
|
||||
elif "--list" in stderr:
|
||||
# Extra flag needed for RHEL5
|
||||
rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)
|
||||
for line in stdout.split('\n'):
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
"%s --list" % chkconfig_path, use_unsafe_shell=True
|
||||
)
|
||||
for line in stdout.split("\n"):
|
||||
m = p.match(line)
|
||||
if m:
|
||||
service_name = m.group('service')
|
||||
service_state = 'stopped'
|
||||
if m.group('rl3') == 'on':
|
||||
rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)
|
||||
service_name = m.group("service")
|
||||
service_state = "stopped"
|
||||
if m.group("rl3") == "on":
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
"%s %s status" % (service_path, service_name),
|
||||
use_unsafe_shell=True,
|
||||
)
|
||||
service_state = rc
|
||||
if rc in (0,):
|
||||
service_state = 'running'
|
||||
service_state = "running"
|
||||
# elif rc in (1,3):
|
||||
else:
|
||||
if 'root' in stderr or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():
|
||||
if (
|
||||
"root" in stderr
|
||||
or "permission" in stderr.lower()
|
||||
or "not in sudoers" in stderr.lower()
|
||||
):
|
||||
self.incomplete_warning = True
|
||||
continue
|
||||
else:
|
||||
service_state = 'stopped'
|
||||
service_data = {"name": service_name, "state": service_state, "source": "sysv"}
|
||||
service_state = "stopped"
|
||||
service_data = {
|
||||
"name": service_name,
|
||||
"state": service_state,
|
||||
"source": "sysv",
|
||||
}
|
||||
services[service_name] = service_data
|
||||
return services
|
||||
|
||||
|
||||
class SystemctlScanService(BaseService):
|
||||
|
||||
def systemd_enabled(self):
|
||||
# Check if init is the systemd command, using comm as cmdline could be symlink
|
||||
try:
|
||||
f = open('/proc/1/comm', 'r')
|
||||
f = open("/proc/1/comm", "r")
|
||||
except IOError:
|
||||
# If comm doesn't exist, old kernel, no systemd
|
||||
return False
|
||||
for line in f:
|
||||
if 'systemd' in line:
|
||||
if "systemd" in line:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -146,10 +178,16 @@ class SystemctlScanService(BaseService):
|
||||
services = {}
|
||||
if not self.systemd_enabled():
|
||||
return None
|
||||
systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
|
||||
systemctl_path = self.module.get_bin_path(
|
||||
"systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"]
|
||||
)
|
||||
if systemctl_path is None:
|
||||
return None
|
||||
rc, stdout, stderr = self.module.run_command("%s list-unit-files --type=service | tail -n +2 | head -n -2" % systemctl_path, use_unsafe_shell=True)
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
"%s list-unit-files --type=service | tail -n +2 | head -n -2"
|
||||
% systemctl_path,
|
||||
use_unsafe_shell=True,
|
||||
)
|
||||
for line in stdout.split("\n"):
|
||||
line_data = line.split()
|
||||
if len(line_data) != 2:
|
||||
@@ -158,7 +196,11 @@ class SystemctlScanService(BaseService):
|
||||
state_val = "running"
|
||||
else:
|
||||
state_val = "stopped"
|
||||
services[line_data[0]] = {"name": line_data[0], "state": state_val, "source": "systemd"}
|
||||
services[line_data[0]] = {
|
||||
"name": line_data[0],
|
||||
"state": state_val,
|
||||
"source": "systemd",
|
||||
}
|
||||
return services
|
||||
|
||||
|
||||
@@ -175,11 +217,16 @@ def main():
|
||||
if svcmod.incomplete_warning:
|
||||
incomplete_warning = True
|
||||
if len(all_services) == 0:
|
||||
results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.")
|
||||
results = dict(
|
||||
skipped=True,
|
||||
msg="Failed to find any services. Sometimes this is due to insufficient privileges.",
|
||||
)
|
||||
else:
|
||||
results = dict(ansible_facts=dict(services=all_services))
|
||||
if incomplete_warning:
|
||||
results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
|
||||
results[
|
||||
"msg"
|
||||
] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
|
||||
@@ -1,31 +1,34 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: win_scan_packages
|
||||
short_description: Return Package state information as fact data
|
||||
description:
|
||||
- Return Package state information as fact data for various Packages
|
||||
'''
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
EXAMPLES = """
|
||||
- monit: win_scan_packages
|
||||
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
# "packages": [
|
||||
{
|
||||
"name": "Mozilla Firefox 76.0.1 (x64 en-US)",
|
||||
"version": "76.0.1",
|
||||
"publisher": "Mozilla",
|
||||
"arch": "Win64"
|
||||
},
|
||||
{
|
||||
"name": "Mozilla Maintenance Service",
|
||||
"version": "76.0.1",
|
||||
"publisher": "Mozilla",
|
||||
"arch": "Win64"
|
||||
},
|
||||
# {
|
||||
# "name": "Mozilla Firefox 76.0.1 (x64 en-US)",
|
||||
# "version": "76.0.1",
|
||||
# "publisher": "Mozilla",
|
||||
# "arch": "Win64"
|
||||
# },
|
||||
# {
|
||||
# "name": "Mozilla Maintenance Service",
|
||||
# "version": "76.0.1",
|
||||
# "publisher": "Mozilla",
|
||||
# "arch": "Win64"
|
||||
# }
|
||||
'''
|
||||
# ]
|
||||
# }
|
||||
# }
|
||||
"""
|
||||
|
||||
@@ -1,34 +1,37 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: win_scan_services
|
||||
short_description: Return service state information as fact data
|
||||
description:
|
||||
- Return service state information as fact data for various service management utilities
|
||||
'''
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
EXAMPLES = """
|
||||
- monit: win_scan_services
|
||||
|
||||
# Example fact output:
|
||||
# host | success >> {
|
||||
# "ansible_facts": {
|
||||
# "services": [
|
||||
{
|
||||
"name": "AllJoyn Router Service",
|
||||
"win_svc_name": "AJRouter",
|
||||
"state": "stopped"
|
||||
},
|
||||
{
|
||||
"name": "Application Layer Gateway Service",
|
||||
"win_svc_name": "ALG",
|
||||
"state": "stopped"
|
||||
},
|
||||
{
|
||||
"name": "Application Host Helper Service",
|
||||
"win_svc_name": "AppHostSvc",
|
||||
"state": "running"
|
||||
},
|
||||
# {
|
||||
# "name": "AllJoyn Router Service",
|
||||
# "win_svc_name": "AJRouter",
|
||||
# "state": "stopped"
|
||||
# },
|
||||
# {
|
||||
# "name": "Application Layer Gateway Service",
|
||||
# "win_svc_name": "ALG",
|
||||
# "state": "stopped"
|
||||
# },
|
||||
# {
|
||||
# "name": "Application Host Helper Service",
|
||||
# "win_svc_name": "AppHostSvc",
|
||||
# "state": "running"
|
||||
# }
|
||||
'''
|
||||
# ]
|
||||
# }
|
||||
# }
|
||||
"""
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
---
|
||||
file_path: "{{ web_path | default('/var/www/html/reports') }}"
|
||||
vendor:
|
||||
file_path: "{{ web_path | default('/var/www/html/reports') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
vendor: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
ios: &my_value 'Cisco'
|
||||
nxos: *my_value
|
||||
iosxr: *my_value
|
||||
junos: "Juniper"
|
||||
eos: "Arista"
|
||||
transport:
|
||||
transport: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
cliconf: "Network_CLI"
|
||||
netconf: "NETCONF"
|
||||
nxapi: "NX-API"
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
detailedreport: true
|
||||
detailedreport: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
file_path: /var/www/html
|
||||
file_path: /var/www/html # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
email_from: tower@shadowman.dev
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev
|
||||
to_emails_list: "{{ to_emails.split(',') }}"
|
||||
email_from: tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails_list: "{{ to_emails.split(',') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
file_path: /var/www/html
|
||||
file_path: /var/www/html # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
exclude_packages:
|
||||
exclude_packages: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
- authselect
|
||||
- authselect-compat
|
||||
- authselect-libs
|
||||
- fprintd-pam
|
||||
allow_reboot: true
|
||||
allow_reboot: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
win_update_categories:
|
||||
win_update_categories: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
- Application
|
||||
- Connectors
|
||||
- CriticalUpdates
|
||||
@@ -11,4 +11,4 @@ win_update_categories:
|
||||
- Tools
|
||||
- UpdateRollups
|
||||
- Updates
|
||||
allow_reboot: true
|
||||
allow_reboot: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
detailedreport: true
|
||||
detailedreport: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
file_path: /var/www/html/reports
|
||||
file_path: /var/www/html/reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
email_from: tower@shadowman.dev
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev
|
||||
to_emails_list: "{{ to_emails.split(',') }}"
|
||||
email_from: tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails_list: "{{ to_emails.split(',') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
file_path: /var/www/html/reports
|
||||
file_path: /var/www/html/reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
build_report_linux_patch
|
||||
========
|
||||
|
||||
Installs Apache and creates a report based on facts from Linux patching
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Must run on Apache server
|
||||
|
||||
Role Variables / Configuration
|
||||
--------------
|
||||
|
||||
N/A
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
N/A
|
||||
|
||||
Example Playbook
|
||||
----------------
|
||||
|
||||
The role can be used to create an html report on any number of Linux hosts using any number of Linux servers about their patching results(yum and dnf)
|
||||
|
||||
|
||||
```
|
||||
---
|
||||
- hosts: all
|
||||
|
||||
tasks:
|
||||
- name: Run Windows Report
|
||||
import_role:
|
||||
name: shadowman.reports.build_report_linux_patch
|
||||
|
||||
```
|
||||
@@ -0,0 +1,8 @@
|
||||
---
|
||||
email_from: tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails_list: "{{ to_emails.split(',') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
detailedreport: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
reports: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
- linux.html
|
||||
- linuxpatch.html
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 1.1 MiB |
Binary file not shown.
|
After Width: | Height: | Size: 108 KiB |
@@ -0,0 +1,202 @@
|
||||
p.hostname {
|
||||
color: #000000;
|
||||
font-weight: bolder;
|
||||
font-size: large;
|
||||
margin: auto;
|
||||
width: 50%;
|
||||
}
|
||||
|
||||
#subtable {
|
||||
background: #ebebeb;
|
||||
margin: 0px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
#subtable tbody tr td {
|
||||
padding: 5px 5px 5px 5px;
|
||||
}
|
||||
|
||||
#subtable thead th {
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
* {
|
||||
-moz-box-sizing: border-box;
|
||||
-webkit-box-sizing: border-box;
|
||||
box-sizing: border-box;
|
||||
font-family: "Open Sans", "Helvetica";
|
||||
|
||||
}
|
||||
|
||||
a {
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
p {
|
||||
color: #ffffff;
|
||||
}
|
||||
h1 {
|
||||
text-align: center;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
body {
|
||||
background:#353a40;
|
||||
padding: 0px;
|
||||
margin: 0px;
|
||||
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: separate;
|
||||
background:#fff;
|
||||
@include border-radius(5px);
|
||||
@include box-shadow(0px 0px 5px rgba(0,0,0,0.3));
|
||||
}
|
||||
|
||||
.main_net_table {
|
||||
margin:50px auto;
|
||||
}
|
||||
|
||||
thead {
|
||||
@include border-radius(5px);
|
||||
}
|
||||
|
||||
thead th {
|
||||
font-size:16px;
|
||||
font-weight:400;
|
||||
color:#fff;
|
||||
@include text-shadow(1px 1px 0px rgba(0,0,0,0.5));
|
||||
text-align:left;
|
||||
padding:20px;
|
||||
border-top:1px solid #858d99;
|
||||
background: #353a40;
|
||||
|
||||
&:first-child {
|
||||
@include border-top-left-radius(5px);
|
||||
}
|
||||
|
||||
&:last-child {
|
||||
@include border-top-right-radius(5px);
|
||||
}
|
||||
}
|
||||
|
||||
tbody tr td {
|
||||
font-weight:400;
|
||||
color:#5f6062;
|
||||
font-size:13px;
|
||||
padding:20px 20px 20px 20px;
|
||||
border-bottom:1px solid #e0e0e0;
|
||||
}
|
||||
|
||||
tbody tr:nth-child(2n) {
|
||||
background:#f0f3f5;
|
||||
}
|
||||
|
||||
tbody tr:last-child td {
|
||||
border-bottom:none;
|
||||
&:first-child {
|
||||
@include border-bottom-left-radius(5px);
|
||||
}
|
||||
&:last-child {
|
||||
@include border-bottom-right-radius(5px);
|
||||
}
|
||||
}
|
||||
|
||||
td {
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
span.highlight {
|
||||
background-color: yellow;
|
||||
}
|
||||
|
||||
.expandclass {
|
||||
color: #5f6062;
|
||||
}
|
||||
|
||||
.content{
|
||||
display:none;
|
||||
margin: 10px;
|
||||
}
|
||||
|
||||
header {
|
||||
width: 100%;
|
||||
position: initial;
|
||||
float: initial;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
border-radius: 0;
|
||||
height: 88px;
|
||||
background-color: #171717;
|
||||
}
|
||||
|
||||
.header-container {
|
||||
margin: 0 auto;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
max-width: 1170px;
|
||||
padding: 0;
|
||||
float: initial;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.header-logo {
|
||||
width: 137px;
|
||||
border: 0;
|
||||
margin: 0;
|
||||
margin-left: 15px;
|
||||
}
|
||||
|
||||
.header-link {
|
||||
margin-left: 40px;
|
||||
text-decoration: none;
|
||||
cursor: pointer;
|
||||
text-transform: uppercase;
|
||||
font-size: 15px;
|
||||
font-family: 'Red Hat Text';
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.header-link:hover {
|
||||
text-shadow: 0 0 0.02px white;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
table.net_info td {
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
p.expandclass:hover {
|
||||
text-decoration: underline;
|
||||
color: #EE0000;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.summary_info {
|
||||
}
|
||||
|
||||
.ui-state-active, .ui-widget-content .ui-state-active, .ui-widget-header .ui-state-active, a.ui-button:active, .ui-button:active, .ui-button.ui-state-active:hover {
|
||||
border: 1px solid #5F0000;
|
||||
background: #EE0000;
|
||||
}
|
||||
|
||||
div#net_content {
|
||||
padding: 0px;
|
||||
height: auto !important;
|
||||
}
|
||||
|
||||
img.router_image {
|
||||
vertical-align: middle;
|
||||
padding: 0px 10px 10px 10px;
|
||||
width: 50px;
|
||||
}
|
||||
|
||||
table.net_info {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
p.internal_label {
|
||||
color: #000000;
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 24.0.3, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Logos" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
width="930.2px" height="350px" viewBox="0 0 930.2 350" style="enable-background:new 0 0 930.2 350;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
.st1{fill:#EE0000;}
|
||||
</style>
|
||||
<title>Logo-Red_Hat-Ansible_Automation_Platform-A-Reverse-RGB</title>
|
||||
<path class="st0" d="M383.3,228.5h18.8L446,335.7h-17.5l-12.4-31.4h-48l-12.6,31.4h-16.7L383.3,228.5z M410.9,291l-18.7-47l-18.7,47
|
||||
H410.9z"/>
|
||||
<path class="st0" d="M455.2,257.7h15.3v7.8c6.2-6.2,14.7-9.6,23.5-9.3c17.9,0,30.5,12.4,30.5,30.5v49h-15.3v-46.5
|
||||
c0-12.3-7.5-19.8-19.3-19.8c-7.8-0.3-15.1,3.6-19.3,10.1v56.1h-15.3V257.7z"/>
|
||||
<path class="st0" d="M543,315.5c8.1,6.4,16.7,9.8,25.4,9.8c11,0,18.7-4.8,18.7-11.7c0-5.5-4-8.7-12.6-10l-14.1-2
|
||||
c-15.5-2.3-23.3-9.5-23.3-21.6c0-14.1,12.3-23.6,30.5-23.6c11.3-0.1,22.3,3.4,31.5,9.9l-7.8,10.1c-8.6-5.7-16.4-8.1-24.7-8.1
|
||||
c-9.3,0-15.6,4.3-15.6,10.6c0,5.7,3.7,8.4,12.9,9.8l14.1,2c15.5,2.3,23.6,9.7,23.6,21.7c0,14-14.1,24.5-32.6,24.5
|
||||
c-13.5,0-25.6-4-34.2-11.5L543,315.5z"/>
|
||||
<path class="st0" d="M611.6,235.6c0-5.2,4.1-9.4,9.3-9.5c0,0,0,0,0,0c5.2-0.2,9.7,3.9,9.9,9.1c0.2,5.2-3.9,9.7-9.1,9.9
|
||||
c-0.2,0-0.5,0-0.7,0C615.8,245.1,611.6,240.9,611.6,235.6C611.6,235.7,611.6,235.7,611.6,235.6z M628.6,335.7h-15.3v-78h15.3V335.7z
|
||||
"/>
|
||||
<path class="st0" d="M685.5,336.9c-8.5,0-16.8-2.7-23.6-7.8v6.6h-15.2V228.5l15.3-3.4v40c6.6-5.6,15.1-8.7,23.7-8.6
|
||||
c22.1,0,39.4,17.7,39.4,40.1C725.2,319.1,707.9,336.9,685.5,336.9z M662,279.2v35.2c4.9,5.7,13,9.2,21.8,9.2
|
||||
c15,0,26.4-11.5,26.4-26.8c0-15.3-11.5-27-26.4-27C674.9,269.8,667.1,273.2,662,279.2z"/>
|
||||
<path class="st0" d="M755,335.7h-15.3V228.5l15.3-3.4V335.7z"/>
|
||||
<path class="st0" d="M810.5,337.1c-23,0-40.9-17.7-40.9-40.4c0-22.5,17.2-40.1,39.1-40.1c21.5,0,37.7,17.8,37.7,40.8v4.4h-61.6
|
||||
c2,13,13.2,22.5,26.4,22.4c7.2,0.2,14.2-2.3,19.8-6.8l9.8,9.7C832.1,333.7,821.5,337.4,810.5,337.1z M784.9,290.2h46.3
|
||||
c-2.3-11.9-11.5-20.8-22.8-20.8C796.5,269.4,787.2,277.8,784.9,290.2z"/>
|
||||
<path class="st1" d="M202.8,137.5c18.4,0,45.1-3.8,45.1-25.7c0.1-1.7-0.1-3.4-0.5-5l-11-47.7c-2.5-10.5-4.8-15.2-23.2-24.5
|
||||
c-14.3-7.3-45.5-19.4-54.7-19.4c-8.6,0-11.1,11.1-21.3,11.1c-9.8,0-17.1-8.3-26.4-8.3c-8.8,0-14.6,6-19,18.4c0,0-12.4,34.9-14,40
|
||||
c-0.3,0.9-0.4,1.9-0.4,2.9C77.6,92.9,131.1,137.5,202.8,137.5 M250.8,120.7c2.5,12.1,2.5,13.3,2.5,14.9c0,20.6-23.2,32.1-53.7,32.1
|
||||
c-69,0-129.3-40.3-129.3-67c0-3.7,0.8-7.4,2.2-10.8c-24.8,1.3-56.9,5.7-56.9,34c0,46.4,109.9,103.5,196.9,103.5
|
||||
c66.7,0,83.5-30.2,83.5-54C296.1,154.6,279.9,133.4,250.8,120.7"/>
|
||||
<path d="M250.7,120.7c2.5,12.1,2.5,13.3,2.5,14.9c0,20.6-23.2,32.1-53.7,32.1c-69,0-129.3-40.3-129.3-67c0-3.7,0.8-7.4,2.2-10.8
|
||||
l5.4-13.3c-0.3,0.9-0.4,1.9-0.4,2.8c0,13.6,53.5,58.1,125.2,58.1c18.4,0,45.1-3.8,45.1-25.7c0.1-1.7-0.1-3.4-0.5-5L250.7,120.7z"/>
|
||||
<path class="st0" d="M869.1,151.2c0,17.5,10.5,26,29.7,26c5.9-0.1,11.8-1,17.5-2.5v-20.3c-3.7,1.2-7.5,1.7-11.3,1.7
|
||||
c-7.9,0-10.8-2.5-10.8-9.9v-31.1h22.9V94.2h-22.9V67.7l-25,5.4v21.1h-16.6v20.9h16.6L869.1,151.2z M791,151.7
|
||||
c0-5.4,5.4-8.1,13.6-8.1c5,0,10,0.7,14.9,1.9V156c-4.8,2.6-10.2,3.9-15.6,3.9C795.9,159.9,791.1,156.8,791,151.7 M798.7,177.5
|
||||
c8.8,0,16-1.9,22.6-6.3v5h24.8v-52.5c0-20-13.5-30.9-35.9-30.9c-12.6,0-25,2.9-38.3,9l9,18.4c9.6-4,17.7-6.5,24.8-6.5
|
||||
c10.3,0,15.6,4,15.6,12.2v4c-6.1-1.6-12.3-2.4-18.6-2.3c-21.1,0-33.8,8.8-33.8,24.6C768.9,166.6,780.4,177.6,798.7,177.5
|
||||
M662.5,176.2h26.7v-42.5h44.6v42.5h26.7V67.7h-26.6v41.7h-44.6V67.7h-26.7L662.5,176.2z M561,135.1c0-11.8,9.3-20.8,21.5-20.8
|
||||
c6.4-0.1,12.6,2.1,17.4,6.4v28.6c-4.7,4.4-10.9,6.7-17.4,6.5C570.5,155.8,561,146.8,561,135.1 M600.2,176.1H625V62.3l-25,5.4v30.8
|
||||
c-6.4-3.6-13.6-5.5-20.9-5.4c-23.9,0-42.6,18.4-42.6,42c-0.3,23,18.1,41.9,41.1,42.2c0.2,0,0.5,0,0.7,0c7.9,0,15.6-2.5,22-7.1V176.1
|
||||
z M486.5,113.2c7.9,0,14.6,5.1,17.2,13h-34.2C471.9,118,478.2,113.2,486.5,113.2 M444.2,135.2c0,23.9,19.5,42.5,44.6,42.5
|
||||
c13.8,0,23.9-3.7,34.3-12.4l-16.6-14.7c-3.9,4-9.6,6.2-16.4,6.2c-8.8,0.2-16.8-4.9-20.2-13h58.4v-6.2c0-26-17.5-44.8-41.4-44.8
|
||||
c-23.2-0.4-42.4,18.2-42.7,41.5C444.2,134.6,444.2,134.9,444.2,135.2 M400.9,90.5c8.8,0,13.8,5.6,13.8,12.2s-5,12.2-13.8,12.2h-26.3
|
||||
V90.5H400.9z M347.9,176.2h26.7v-39.5h20.3l20.5,39.5h29.7l-23.9-43.4c12.4-5,20.5-17.1,20.4-30.5c0-19.5-15.3-34.5-38.3-34.5H348
|
||||
L347.9,176.2z"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 4.3 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 2.0 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 3.8 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 45 KiB |
@@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: Define namespace
|
||||
redhat.openshift.k8s:
|
||||
wait: true
|
||||
state: present
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
name: patching-report
|
||||
|
||||
- name: Define deployment resources
|
||||
redhat.openshift.k8s:
|
||||
wait: true
|
||||
state: present
|
||||
namespace: patching-report
|
||||
definition: "{{ lookup('ansible.builtin.template', 'resources.yaml.j2') }}"
|
||||
register: resources_output
|
||||
|
||||
- name: Display link to patching report
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "Patching report availbable at:"
|
||||
- "{{ resources_output.result.results[3].result.spec.port.targetPort }}://{{ resources_output.result.results[3].result.spec.host }}"
|
||||
@@ -0,0 +1,15 @@
|
||||
|
||||
|
||||
<div class="wrapper">
|
||||
<header>
|
||||
<div class="header-container">
|
||||
<a href="https://ansible.com">
|
||||
<img
|
||||
class="header-logo"
|
||||
src="redhat-ansible-logo.svg"
|
||||
title="Red Hat Ansible"
|
||||
alt="Red Hat Ansible"
|
||||
/>
|
||||
</a>
|
||||
</div>
|
||||
</header>
|
||||
@@ -0,0 +1,41 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title> Ansible Linux Automation Report </title>
|
||||
<link rel="stylesheet" type="text/css" href="//fonts.googleapis.com/css?family=Open+Sans" />
|
||||
<link rel="stylesheet" href="//code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css">
|
||||
<link rel="stylesheet" href="new.css">
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.4/jquery.min.js"></script>
|
||||
<script src="https://code.jquery.com/jquery-1.12.4.js"></script>
|
||||
<script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script>
|
||||
<script src="https://www.kryogenix.org/code/browser/sorttable/sorttable.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div class="wrapper">
|
||||
{% include 'header.j2' %}
|
||||
<section>
|
||||
<center>
|
||||
<h1>Ansible Automation Reports</h1>
|
||||
<h3><input type="search" placeholder="Search..." class="form-control search-input" data-table="main_net_table"/>
|
||||
</center>
|
||||
<table class="table table-striped mt32 main_net_table">
|
||||
<tbody>
|
||||
{% for report in reports %}
|
||||
<tr>
|
||||
<td class="summary_info">
|
||||
<div id="hostname">
|
||||
<p class="hostname"> <img class="router_image" src="report.png"></p>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<a href="{{ report }}"> {{ report }} <a>
|
||||
</td>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
<center><p>Created with</p><br><img src="webpage_logo.png" width="300">
|
||||
</center>
|
||||
</section>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@@ -0,0 +1,202 @@
|
||||
p.hostname {
|
||||
color: #000000;
|
||||
font-weight: bolder;
|
||||
font-size: large;
|
||||
margin: auto;
|
||||
width: 50%;
|
||||
}
|
||||
|
||||
#subtable {
|
||||
background: #ebebeb;
|
||||
margin: 0px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
#subtable tbody tr td {
|
||||
padding: 5px 5px 5px 5px;
|
||||
}
|
||||
|
||||
#subtable thead th {
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
* {
|
||||
-moz-box-sizing: border-box;
|
||||
-webkit-box-sizing: border-box;
|
||||
box-sizing: border-box;
|
||||
font-family: "Open Sans", "Helvetica";
|
||||
|
||||
}
|
||||
|
||||
a {
|
||||
color: #000000;
|
||||
}
|
||||
|
||||
p {
|
||||
color: #ffffff;
|
||||
}
|
||||
h1 {
|
||||
text-align: center;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
body {
|
||||
background:#353a40;
|
||||
padding: 0px;
|
||||
margin: 0px;
|
||||
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: separate;
|
||||
background:#fff;
|
||||
@include border-radius(5px);
|
||||
@include box-shadow(0px 0px 5px rgba(0,0,0,0.3));
|
||||
}
|
||||
|
||||
.main_net_table {
|
||||
margin:50px auto;
|
||||
}
|
||||
|
||||
thead {
|
||||
@include border-radius(5px);
|
||||
}
|
||||
|
||||
thead th {
|
||||
font-size:16px;
|
||||
font-weight:400;
|
||||
color:#fff;
|
||||
@include text-shadow(1px 1px 0px rgba(0,0,0,0.5));
|
||||
text-align:left;
|
||||
padding:20px;
|
||||
border-top:1px solid #858d99;
|
||||
background: #353a40;
|
||||
|
||||
&:first-child {
|
||||
@include border-top-left-radius(5px);
|
||||
}
|
||||
|
||||
&:last-child {
|
||||
@include border-top-right-radius(5px);
|
||||
}
|
||||
}
|
||||
|
||||
tbody tr td {
|
||||
font-weight:400;
|
||||
color:#5f6062;
|
||||
font-size:13px;
|
||||
padding:20px 20px 20px 20px;
|
||||
border-bottom:1px solid #e0e0e0;
|
||||
}
|
||||
|
||||
tbody tr:nth-child(2n) {
|
||||
background:#f0f3f5;
|
||||
}
|
||||
|
||||
tbody tr:last-child td {
|
||||
border-bottom:none;
|
||||
&:first-child {
|
||||
@include border-bottom-left-radius(5px);
|
||||
}
|
||||
&:last-child {
|
||||
@include border-bottom-right-radius(5px);
|
||||
}
|
||||
}
|
||||
|
||||
td {
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
span.highlight {
|
||||
background-color: yellow;
|
||||
}
|
||||
|
||||
.expandclass {
|
||||
color: #5f6062;
|
||||
}
|
||||
|
||||
.content{
|
||||
display:none;
|
||||
margin: 10px;
|
||||
}
|
||||
|
||||
header {
|
||||
width: 100%;
|
||||
position: initial;
|
||||
float: initial;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
border-radius: 0;
|
||||
height: 88px;
|
||||
background-color: #171717;
|
||||
}
|
||||
|
||||
.header-container {
|
||||
margin: 0 auto;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
max-width: 1170px;
|
||||
padding: 0;
|
||||
float: initial;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.header-logo {
|
||||
width: 137px;
|
||||
border: 0;
|
||||
margin: 0;
|
||||
margin-left: 15px;
|
||||
}
|
||||
|
||||
.header-link {
|
||||
margin-left: 40px;
|
||||
text-decoration: none;
|
||||
cursor: pointer;
|
||||
text-transform: uppercase;
|
||||
font-size: 15px;
|
||||
font-family: 'Red Hat Text';
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.header-link:hover {
|
||||
text-shadow: 0 0 0.02px white;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
table.net_info td {
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
p.expandclass:hover {
|
||||
text-decoration: underline;
|
||||
color: #EE0000;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.summary_info {
|
||||
}
|
||||
|
||||
.ui-state-active, .ui-widget-content .ui-state-active, .ui-widget-header .ui-state-active, a.ui-button:active, .ui-button:active, .ui-button.ui-state-active:hover {
|
||||
border: 1px solid #5F0000;
|
||||
background: #EE0000;
|
||||
}
|
||||
|
||||
div#net_content {
|
||||
padding: 0px;
|
||||
height: auto !important;
|
||||
}
|
||||
|
||||
img.router_image {
|
||||
vertical-align: middle;
|
||||
padding: 0px 10px 10px 10px;
|
||||
width: 50px;
|
||||
}
|
||||
|
||||
table.net_info {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
p.internal_label {
|
||||
color: #000000;
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
<!–– INTERNAL TABLE FOR PACKAGES --!>
|
||||
<div id="accordion">
|
||||
<div class="ui-accordion ui-widget ui-helper-reset" role="tablist">
|
||||
<h3 class="ui-accordion-header ui-corner-top ui-state-default ui-accordion-icons ui-accordion-header-collapsed ui-corner-all" role="tab" id="ui-id-3" aria-controls="ui-id-4" aria-selected="false" aria-expanded="false" tabindex="0">Package Facts</h3>
|
||||
<div class="net_content ui-accordion-content ui-corner-bottom ui-helper-reset ui-widget-content" id="ui-id-4" aria-labelledby="ui-id-3" role="tabpanel" aria-hidden="true" style="display: none; height: 194px;">
|
||||
<table id="subtable" class="sortable">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Package Name</th>
|
||||
<th>source</th>
|
||||
<th>release</th>
|
||||
<th>version</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% if hostvars[linux_host]['packages'] is defined %}
|
||||
{% for package in hostvars[linux_host]['packages'] %}
|
||||
<tr>
|
||||
<td>{{package['name']}}</td>
|
||||
<td>{{package['source']}}</td>
|
||||
<td>{{package['release']}}</td>
|
||||
<td>{{package['version']}}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<!–– END INTERNAL TABLE FOR PACKAGES --!>
|
||||
@@ -0,0 +1,120 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title> Linux Patch Report </title>
|
||||
</head>
|
||||
<body>
|
||||
<center>
|
||||
<h1>Ansible Linux Patching Report</h1>
|
||||
<style>
|
||||
@media print {
|
||||
.noprint {
|
||||
display: none !important;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
<div class="noprint">
|
||||
<button type="button" onclick="tableToCSV()">Download CSV</button>
|
||||
<input type="button" value="Print" onClick="window.print()">
|
||||
</div>
|
||||
</center>
|
||||
<table border = "1" cellpadding = "5" cellspacing = "5">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Hostname</th>
|
||||
<th>Operating System</th>
|
||||
<th>Operating System Version</th>
|
||||
<th>Required Updates</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for linux_host in ansible_play_hosts |sort %}
|
||||
<tr>
|
||||
<td>{{hostvars[linux_host]['inventory_hostname']}}</td>
|
||||
<td>{{hostvars[linux_host]['ansible_os_family']|default("none")}}</td>
|
||||
<td>{{hostvars[linux_host]['ansible_distribution_version']|default("none")}}</td>
|
||||
<td>
|
||||
<ul>
|
||||
{% if hostvars[linux_host].patchingresult_yum.changed|default("false",true) == true %}
|
||||
{% for packagename in hostvars[linux_host].patchingresult_yum.changes.updated|sort %}
|
||||
<li> {{ packagename[0] }} - {{ packagename[1] }} </li>
|
||||
{% endfor %}
|
||||
{% elif hostvars[linux_host].patchingresult_dnf.changed|default("false",true) == true %}
|
||||
{% for packagename in hostvars[linux_host].patchingresult_dnf.results|sort %}
|
||||
<li> {{ packagename }} </li>
|
||||
{% endfor %}
|
||||
{% elif hostvars[linux_host].patchingresult_dnf.changed is undefined %}
|
||||
<li> Patching Failed </li>
|
||||
{% elif hostvars[linux_host].patchingresult_yum.changed is undefined %}
|
||||
<li> Patching Failed </li>
|
||||
{% else %}
|
||||
<li> Compliant </li>
|
||||
{% endif %}
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
<center><p>Created with Ansible on {{hostvars[inventory_hostname].ansible_date_time.iso8601}}</p></center>
|
||||
<script type="text/javascript">
|
||||
function tableToCSV() {
|
||||
|
||||
// Variable to store the final csv data
|
||||
var csv_data = [];
|
||||
|
||||
// Get each row data
|
||||
var rows = document.getElementsByTagName('tr');
|
||||
for (var i = 0; i < rows.length; i++) {
|
||||
|
||||
// Get each column data
|
||||
var cols = rows[i].querySelectorAll('td,th');
|
||||
|
||||
// Stores each csv row data
|
||||
var csvrow = [];
|
||||
for (var j = 0; j < (cols.length); j++) {
|
||||
|
||||
// Get the text data of each cell of
|
||||
// a row and push it to csvrow
|
||||
if ( j == cols.length-1 && i==0){}
|
||||
else{
|
||||
csvrow.push(cols[j].textContent.replace(/,/g, " "));
|
||||
}
|
||||
|
||||
}
|
||||
csv_data.push(csvrow.join(","));
|
||||
}
|
||||
|
||||
// combine each row data with new line character
|
||||
csv_data = csv_data.join('\n');
|
||||
|
||||
// Call this function to download csv file
|
||||
downloadCSVFile(csv_data);
|
||||
}
|
||||
function downloadCSVFile(csv_data) {
|
||||
|
||||
// Create CSV file object and feed our
|
||||
// csv_data into it
|
||||
CSVFile = new Blob([csv_data], { type: "text/csv" });
|
||||
|
||||
// Create to temporary link to initiate
|
||||
// download process
|
||||
var temp_link = document.createElement('a');
|
||||
var todayDate = new Date().toISOString().slice(0, 10);
|
||||
|
||||
// Download csv file
|
||||
temp_link.download = "linuxpatching-" + todayDate + ".csv";
|
||||
var url = window.URL.createObjectURL(CSVFile);
|
||||
temp_link.href = url;
|
||||
|
||||
// This link should not be displayed
|
||||
temp_link.style.display = "none";
|
||||
document.body.appendChild(temp_link);
|
||||
|
||||
// Automatically click the link to trigger download
|
||||
temp_link.click();
|
||||
document.body.removeChild(temp_link);
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -0,0 +1,105 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title> Ansible Linux Automation Report </title>
|
||||
<link rel="stylesheet" type="text/css" href="//fonts.googleapis.com/css?family=Open+Sans" />
|
||||
<link rel="stylesheet" href="//code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css">
|
||||
<link rel="stylesheet" href="new.css">
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.4/jquery.min.js"></script>
|
||||
<script src="https://code.jquery.com/jquery-1.12.4.js"></script>
|
||||
<script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script>
|
||||
<script src="https://www.kryogenix.org/code/browser/sorttable/sorttable.js"></script>
|
||||
<script>
|
||||
$(function() {
|
||||
$( "#accordion > div" ).accordion({
|
||||
header: "h3",
|
||||
active: false,
|
||||
collapsible: true
|
||||
});
|
||||
});
|
||||
</script>
|
||||
<script>
|
||||
(function(document) {
|
||||
'use strict';
|
||||
|
||||
var TableFilter = (function(myArray) {
|
||||
var search_input;
|
||||
|
||||
function _onInputSearch(e) {
|
||||
search_input = e.target;
|
||||
var tables = document.getElementsByClassName(search_input.getAttribute('data-table'));
|
||||
myArray.forEach.call(tables, function(table) {
|
||||
myArray.forEach.call(table.tBodies, function(tbody) {
|
||||
myArray.forEach.call(tbody.rows, function(row) {
|
||||
var text_content = row.textContent.toLowerCase();
|
||||
var search_val = search_input.value.toLowerCase();
|
||||
row.style.display = text_content.indexOf(search_val) > -1 ? '' : 'none';
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
init: function() {
|
||||
var inputs = document.getElementsByClassName('search-input');
|
||||
myArray.forEach.call(inputs, function(input) {
|
||||
input.oninput = _onInputSearch;
|
||||
});
|
||||
}
|
||||
};
|
||||
})(Array.prototype);
|
||||
|
||||
document.addEventListener('readystatechange', function() {
|
||||
if (document.readyState === 'complete') {
|
||||
TableFilter.init();
|
||||
}
|
||||
});
|
||||
|
||||
})(document);
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<div class="wrapper">
|
||||
{% include 'header.j2' %}
|
||||
<section>
|
||||
<center>
|
||||
<h1>Ansible Linux Automation Report</h1>
|
||||
<h3><input type="search" placeholder="Search..." class="form-control search-input" data-table="main_net_table"/>
|
||||
</center>
|
||||
<table class="table table-striped mt32 main_net_table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Linux Device</th>
|
||||
<th>Package Manager</th>
|
||||
<th>Operating System</th>
|
||||
<th>Operating System Version</th>
|
||||
<th>Operating System Kernel Version</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for linux_host in ansible_play_hosts |sort %}
|
||||
<tr>
|
||||
<td class="summary_info">
|
||||
<div id="hostname">
|
||||
<p class="hostname">
|
||||
<img class="router_image" src="server.png"> {{ hostvars[linux_host]['inventory_hostname'].split('.')[0] }}</p>
|
||||
</div>
|
||||
{% if detailedreport == 'True' %}
|
||||
{% include 'packages.j2' %}
|
||||
{% include 'services.j2' %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td>{{hostvars[linux_host]['ansible_pkg_mgr']|default("none")}}</td>
|
||||
<td>{{hostvars[linux_host]['ansible_os_family']|default("none")}}</td>
|
||||
<td>{{hostvars[linux_host]['ansible_distribution_version']|default("none")}}</td>
|
||||
<td>{{hostvars[linux_host]['ansible_kernel']|default("none")}}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
<center><p>Created with</p><br><img src="webpage_logo.png" width="300">
|
||||
</center>
|
||||
</section>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@@ -0,0 +1,94 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: linux-patching-report
|
||||
labels:
|
||||
app: linux-patching-report
|
||||
data:
|
||||
index.html: |
|
||||
{% filter indent(width=4) %}
|
||||
{%- include 'landing.j2' %}
|
||||
{% endfilter %}
|
||||
|
||||
linux.html: |
|
||||
{% filter indent(width=4) %}
|
||||
{%- include 'report.j2' %}
|
||||
{% endfilter %}
|
||||
|
||||
linuxpatch.html: |
|
||||
{% filter indent(width=4) %}
|
||||
{%- include 'patch.j2' %}
|
||||
{% endfilter %}
|
||||
|
||||
new.css: |
|
||||
{% filter indent(width=4) %}
|
||||
{%- include 'new.css.j2' %}
|
||||
{% endfilter %}
|
||||
|
||||
binaryData:
|
||||
server.png: {{ lookup('ansible.builtin.file', 'server.png') | b64encode }}
|
||||
report.png: {{ lookup('ansible.builtin.file', 'report.png') | b64encode }}
|
||||
webpage_logo.png: {{ lookup('file', 'webpage_logo.png') | b64encode }}
|
||||
redhat-ansible-logo.svg: {{ lookup('ansible.builtin.file', 'redhat-ansible-logo.svg') | b64encode }}
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: linux-patching-report
|
||||
labels:
|
||||
app: linux-patching-report
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: linux-patching-report
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: linux-patching-report
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 1
|
||||
containers:
|
||||
- image: registry.redhat.io/rhel8/httpd-24
|
||||
name: report-server
|
||||
volumeMounts:
|
||||
- name: html
|
||||
mountPath: /var/www/html
|
||||
volumes:
|
||||
- name: html
|
||||
configMap:
|
||||
name: linux-patching-report
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: linux-patching-report
|
||||
name: linux-patching-report
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8080
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: linux-patching-report
|
||||
type: ClusterIP
|
||||
---
|
||||
kind: Route
|
||||
apiVersion: route.openshift.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
app: linux-patching-report
|
||||
name: linux-patching-report
|
||||
spec:
|
||||
to:
|
||||
kind: Service
|
||||
name: linux-patching-report
|
||||
weight: 100
|
||||
port:
|
||||
targetPort: http
|
||||
tls:
|
||||
termination: edge
|
||||
insecureEdgeTerminationPolicy: Redirect
|
||||
@@ -0,0 +1,30 @@
|
||||
<!–– INTERNAL TABLE FOR SERVICES --!>
|
||||
<div id="accordion">
|
||||
<div class="ui-accordion ui-widget ui-helper-reset" role="tablist">
|
||||
<h3 class="ui-accordion-header ui-corner-top ui-state-default ui-accordion-icons ui-accordion-header-collapsed ui-corner-all" role="tab" id="ui-id-3" aria-controls="ui-id-4" aria-selected="false" aria-expanded="false" tabindex="0">Services Facts</h3>
|
||||
<div class="net_content ui-accordion-content ui-corner-bottom ui-helper-reset ui-widget-content" id="ui-id-4" aria-labelledby="ui-id-3" role="tabpanel" aria-hidden="true" style="display: none; height: 194px;">
|
||||
<table id="subtable" class="sortable">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Service Name</th>
|
||||
<th>State</th>
|
||||
<th>Source</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% if hostvars[linux_host]['services'] is defined %}
|
||||
{% for servicesname in hostvars[linux_host]['services']|sort %}
|
||||
{% set service = hostvars[linux_host]['services'][servicesname] %}
|
||||
<tr>
|
||||
<td>{{service['name']}}</td>
|
||||
<td>{{service['state']}}</td>
|
||||
<td>{{service['source']}}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<!–– END INTERNAL TABLE FOR SERVICES --!>
|
||||
@@ -0,0 +1 @@
|
||||
---
|
||||
@@ -1,3 +1,3 @@
|
||||
---
|
||||
doc_root: /var/www/html
|
||||
reports_dir: reports
|
||||
doc_root: /var/www/html # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
reports_dir: reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
---
|
||||
doc_root: C:\Inetpub\wwwroot
|
||||
reports_dir: reports
|
||||
doc_root: C:\Inetpub\wwwroot # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
reports_dir: reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
detailedreport: true
|
||||
detailedreport: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
file_path: C:\Inetpub\wwwroot\reports
|
||||
file_path: C:\Inetpub\wwwroot\reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
email_from: tower@shadowman.dev
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev
|
||||
to_emails_list: "{{ to_emails.split(',') }}"
|
||||
email_from: tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails: alex@shadowman.dev,tower@shadowman.dev # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
to_emails_list: "{{ to_emails.split(',') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
file_path: C:\Inetpub\wwwroot\reports
|
||||
file_path: C:\Inetpub\wwwroot\reports # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
instance_name: "{{ inventory_hostname | regex_replace('_', '-') }}"
|
||||
activation_key: "{{ 'RHEL' + ansible_distribution_major_version + '_' + env }}"
|
||||
rex_user: root # "{{ ansible_user }}"
|
||||
force_register: true
|
||||
instance_name: "{{ inventory_hostname | regex_replace('_', '-') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
activation_key: "{{ 'RHEL' + ansible_distribution_major_version + '_' + env }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
rex_user: root # "{{ ansible_user }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
force_register: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
rhsm_enabled_repos:
|
||||
rhsm_enabled_repos: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
- rhel-7-server-rpms
|
||||
# - rhel-7-server-satellite-maintenance-6.11-rpms
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
rhsm_enabled_repos:
|
||||
rhsm_enabled_repos: # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
- rhel-8-for-x86_64-baseos-rpms
|
||||
- rhel-8-for-x86_64-appstream-rpms
|
||||
- satellite-client-6-for-rhel-8-x86_64-rpms
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
foreman_server_url: "{{ lookup('env', 'SATELLITE_SERVER') }}"
|
||||
foreman_username: "{{ lookup('env', 'SATELLITE_USERNAME') }}"
|
||||
foreman_password: "{{ lookup('env', 'SATELLITE_PASSWORD') }}"
|
||||
foreman_validate_certs: "{{ lookup('env', 'FOREMAN_VALIDATE_CERTS') | default(true) }}"
|
||||
capsule_server: "{{ foreman_server_url }}"
|
||||
capsule_port: '9090'
|
||||
policy_name: 'all'
|
||||
policy_scan: "{{ policy_name }}"
|
||||
crontab_hour: 2
|
||||
crontab_minute: 0
|
||||
crontab_weekdays: 0
|
||||
foreman_operations_scap_client_secure_logging: true
|
||||
foreman_server_url: "{{ lookup('env', 'SATELLITE_SERVER') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
foreman_username: "{{ lookup('env', 'SATELLITE_USERNAME') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
foreman_password: "{{ lookup('env', 'SATELLITE_PASSWORD') }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
foreman_validate_certs: "{{ lookup('env', 'FOREMAN_VALIDATE_CERTS') | default(true) }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
capsule_server: "{{ foreman_server_url }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
capsule_port: '9090' # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
policy_name: 'all' # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
policy_scan: "{{ policy_name }}" # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
crontab_hour: 2 # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
crontab_minute: 0 # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
crontab_weekdays: 0 # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
foreman_operations_scap_client_secure_logging: true # noqa var-naming[no-role-prefix] - TODO : we should rework roles to use variable prefix, until scope is defined, silence is the way
|
||||
|
||||
@@ -1,44 +1,53 @@
|
||||
---
|
||||
# This file is mainly used by product-demos CI,
|
||||
# See cloin/ee-builds/product-demos-ee/requirements.yml
|
||||
# for configuring collections and collection versions.
|
||||
collections:
|
||||
- name: ansible.controller
|
||||
version: 4.4.0
|
||||
version: ">=4.5.5"
|
||||
- name: infra.ah_configuration
|
||||
version: ">=2.0.6"
|
||||
- name: infra.controller_configuration
|
||||
version: ">=2.7.1"
|
||||
- name: redhat_cop.controller_configuration
|
||||
version: 2.3.1
|
||||
version: ">=2.3.1"
|
||||
# linux
|
||||
- name: ansible.posix
|
||||
version: 1.5.4
|
||||
- name: redhat.insights
|
||||
version: 1.0.7
|
||||
- name: redhat.rhel_system_roles
|
||||
version: 1.20.0
|
||||
version: ">=1.5.4"
|
||||
- name: community.general
|
||||
version: 6.3.0
|
||||
version: ">=8.0.0"
|
||||
- name: containers.podman
|
||||
version: ">=1.12.1"
|
||||
- name: redhat.insights
|
||||
version: ">=1.2.2"
|
||||
- name: redhat.rhel_system_roles
|
||||
version: ">=1.23.0"
|
||||
# windows
|
||||
- name: chocolatey.chocolatey
|
||||
- name: community.windows
|
||||
version: 1.12.0
|
||||
- name: ansible.windows
|
||||
version: 1.13.0
|
||||
version: ">=2.3.0"
|
||||
- name: chocolatey.chocolatey
|
||||
version: ">=1.5.1"
|
||||
- name: community.windows
|
||||
version: ">=2.2.0"
|
||||
# cloud
|
||||
- name: azure.azcollection
|
||||
version: 1.14.0
|
||||
- name: amazon.aws
|
||||
version: 5.2.0
|
||||
version: ">=7.5.0"
|
||||
# satellite
|
||||
- name: redhat.satellite
|
||||
version: 3.8.0
|
||||
version: ">=4.0.0"
|
||||
# network
|
||||
- name: cisco.ios
|
||||
version: 4.4.0
|
||||
- name: cisco.nxos
|
||||
version: 4.1.0
|
||||
- name: cisco.iosxr
|
||||
version: 5.0.0
|
||||
- name: ansible.netcommon
|
||||
version: 5.0.0
|
||||
version: ">=6.0.0"
|
||||
- name: cisco.ios
|
||||
version: ">=7.0.0"
|
||||
- name: cisco.iosxr
|
||||
version: ">=8.0.0"
|
||||
- name: cisco.nxos
|
||||
version: ">=7.0.0"
|
||||
# openshift
|
||||
- name: redhat.openshift
|
||||
version: 2.3.0
|
||||
- name: kubernetes.core
|
||||
version: 2.4.0
|
||||
version: ">=4.0.0"
|
||||
- name: redhat.openshift
|
||||
version: ">=3.0.1"
|
||||
- name: redhat.openshift_virtualization
|
||||
version: ">=1.4.0"
|
||||
|
||||
3
common/README.md
Normal file
3
common/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Common Prerequisites
|
||||
|
||||
Demos from some categories (cloud, linux, windows, etc.) have become dependent on controller resources defined in other demo categories. The setup.yml file in this directory is used to configure these common prerequisites so that they are available before setup for a demo category is called.
|
||||
283
common/setup.yml
Normal file
283
common/setup.yml
Normal file
@@ -0,0 +1,283 @@
|
||||
---
|
||||
controller_execution_environments:
|
||||
- name: product-demos
|
||||
image: quay.io/acme_corp/product-demos-ee:latest
|
||||
- name: Cloud Services Execution Environment
|
||||
image: quay.io/scottharwell/cloud-ee:latest
|
||||
|
||||
controller_organizations:
|
||||
- name: Default
|
||||
default_environment: product-demos
|
||||
|
||||
controller_projects:
|
||||
- name: Ansible Cloud Content Lab - AWS
|
||||
organization: Default
|
||||
scm_type: git
|
||||
wait: true
|
||||
scm_url: https://github.com/ansible-content-lab/aws.infrastructure_config_demos.git
|
||||
default_environment: Cloud Services Execution Environment
|
||||
|
||||
controller_credentials:
|
||||
- name: AWS
|
||||
credential_type: Amazon Web Services
|
||||
organization: Default
|
||||
update_secrets: false
|
||||
state: exists
|
||||
inputs:
|
||||
username: REPLACEME
|
||||
password: REPLACEME
|
||||
|
||||
controller_inventory_sources:
|
||||
- name: AWS Inventory
|
||||
organization: Default
|
||||
source: ec2
|
||||
inventory: Demo Inventory
|
||||
credential: AWS
|
||||
overwrite: true
|
||||
source_vars:
|
||||
hostnames:
|
||||
- tag:Name
|
||||
compose:
|
||||
ansible_host: public_ip_address
|
||||
ansible_user: 'ec2-user'
|
||||
groups:
|
||||
cloud_aws: true
|
||||
os_linux: tags.blueprint.startswith('rhel')
|
||||
os_windows: tags.blueprint.startswith('win')
|
||||
keyed_groups:
|
||||
- key: platform
|
||||
prefix: os
|
||||
- key: tags.blueprint
|
||||
prefix: blueprint
|
||||
- key: tags.owner
|
||||
prefix: owner
|
||||
- key: tags.purpose
|
||||
prefix: purpose
|
||||
- key: tags.deployment
|
||||
prefix: deployment
|
||||
|
||||
controller_groups:
|
||||
- name: cloud_aws
|
||||
inventory: Demo Inventory
|
||||
variables:
|
||||
ansible_user: ec2-user
|
||||
- name: os_windows
|
||||
inventory: Demo Inventory
|
||||
variables:
|
||||
ansible_connection: winrm
|
||||
ansible_winrm_transport: credssp
|
||||
|
||||
controller_templates:
|
||||
- name: SUBMIT FEEDBACK
|
||||
job_type: run
|
||||
inventory: Demo Inventory
|
||||
project: Ansible official demo project
|
||||
playbook: feedback.yml
|
||||
execution_environment: Default execution environment
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: Name/Email/Contact
|
||||
type: text
|
||||
variable: email
|
||||
required: true
|
||||
- question_name: Issue or Feedback
|
||||
type: textarea
|
||||
variable: feedback
|
||||
required: true
|
||||
|
||||
- name: Cloud / AWS / Create VPC
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible official demo project
|
||||
playbook: cloud/create_vpc.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- question_name: Owner
|
||||
type: text
|
||||
variable: aws_owner_tag
|
||||
required: true
|
||||
|
||||
- name: Cloud / AWS / Create Keypair
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
project: Ansible official demo project
|
||||
playbook: cloud/aws_key.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- question_name: Keypair Name
|
||||
type: text
|
||||
variable: aws_key_name
|
||||
required: true
|
||||
default: aws-test-key
|
||||
- question_name: Keypair Public Key
|
||||
type: textarea
|
||||
variable: aws_public_key
|
||||
required: true
|
||||
- question_name: Owner
|
||||
type: text
|
||||
variable: aws_keypair_owner
|
||||
required: true
|
||||
|
||||
- name: Cloud / AWS / Create VM
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
- Demo Credential
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbooks/create_vm.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
allow_simultaneous: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: AWS Region
|
||||
type: multiplechoice
|
||||
variable: create_vm_aws_region
|
||||
required: true
|
||||
choices:
|
||||
- us-east-1
|
||||
- us-east-2
|
||||
- us-west-1
|
||||
- us-west-2
|
||||
- question_name: Name
|
||||
type: text
|
||||
variable: create_vm_vm_name
|
||||
required: true
|
||||
- question_name: Owner
|
||||
type: text
|
||||
variable: create_vm_vm_owner
|
||||
required: true
|
||||
- question_name: Deployment
|
||||
type: text
|
||||
variable: create_vm_vm_deployment
|
||||
required: true
|
||||
- question_name: Purpose
|
||||
type: text
|
||||
variable: create_vm_vm_purpose
|
||||
required: true
|
||||
default: demo
|
||||
- question_name: Environment
|
||||
type: multiplechoice
|
||||
variable: create_vm_vm_environment
|
||||
required: true
|
||||
choices:
|
||||
- Dev
|
||||
- QA
|
||||
- Prod
|
||||
- question_name: Blueprint
|
||||
type: multiplechoice
|
||||
variable: vm_blueprint
|
||||
required: true
|
||||
choices:
|
||||
- windows_core
|
||||
- windows_full
|
||||
- rhel9
|
||||
- rhel8
|
||||
- rhel7
|
||||
- al2023
|
||||
- question_name: Subnet
|
||||
type: text
|
||||
variable: create_vm_aws_vpc_subnet_name
|
||||
required: true
|
||||
default: aws-test-subnet
|
||||
- question_name: Security Group
|
||||
type: text
|
||||
variable: create_vm_aws_securitygroup_name
|
||||
required: true
|
||||
default: aws-test-sg
|
||||
- question_name: SSH Keypair
|
||||
type: text
|
||||
variable: create_vm_aws_keypair_name
|
||||
required: true
|
||||
default: aws-test-key
|
||||
- question_name: AWS Instance Type (defaults to blueprint value)
|
||||
type: text
|
||||
variable: create_vm_aws_instance_size
|
||||
required: false
|
||||
- question_name: AWS Image Filter (defaults to blueprint value)
|
||||
type: text
|
||||
variable: create_vm_aws_image_filter
|
||||
required: false
|
||||
|
||||
- name: Cloud / AWS / Delete VM
|
||||
job_type: run
|
||||
organization: Default
|
||||
credentials:
|
||||
- AWS
|
||||
- Demo Credential
|
||||
project: Ansible Cloud Content Lab - AWS
|
||||
playbook: playbooks/delete_inventory_vm.yml
|
||||
inventory: Demo Inventory
|
||||
notification_templates_started: Telemetry
|
||||
notification_templates_success: Telemetry
|
||||
notification_templates_error: Telemetry
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
description: ''
|
||||
spec:
|
||||
- question_name: Name or Pattern
|
||||
type: text
|
||||
variable: _hosts
|
||||
required: true
|
||||
|
||||
controller_notifications:
|
||||
- name: Telemetry
|
||||
organization: Default
|
||||
notification_type: webhook
|
||||
notification_configuration:
|
||||
url: https://script.google.com/macros/s/AKfycbzxUObvCJ6ZbzfJyicw4RvxlGE3AZdrK4AR5-TsedCYd7O-rtTOVjvsRvqyb3rx6B0g8g/exec
|
||||
http_method: POST
|
||||
headers: {}
|
||||
|
||||
controller_settings:
|
||||
- name: SESSION_COOKIE_AGE
|
||||
value: 180000
|
||||
@@ -60,7 +60,7 @@ Edit the `Linux / System Roles` job to include the list of roles that you wish t
|
||||
|
||||
**Linux / Temporary Sudo** - Use this job to show how to grant sudo access with automated cleanup to a server. The user must exist on the system. Using the student user is a good example (ie. student1)
|
||||
|
||||
**Linux / Patching** - Use this job to apply updates or audit for missing updates and produce an html report of systems with missing updates. See the end of the job for the URL to view the report. In other environments this report could be uploaded to a wiki, email, other system. This demo also shows installing a webserver on a linux server. The report is places on the system defined by the `report_server` variable. By default, `report_server` is configured as `node1`. This may be overridden with `extra_vars` on the Job Template.
|
||||
**Linux / Patching** - Use this job to apply updates or audit for missing updates and produce an html report of systems with missing updates. See the end of the job for the URL to view the report. In other environments this report could be uploaded to a wiki, email, other system. This demo also shows installing a webserver on a linux server. The report is places on the system defined by the `report_server` variable. By default, `report_server` is configured as `reports`. This may be overridden with `extra_vars` on the Job Template.
|
||||
|
||||
**Linux / Run Shell Script** - Use this job to demonstrate running shell commands or an existing shell script across a group of systems as root. This can be preferred over using Ad-Hoc commands due to the ability to control usage with RBAC. This is helpful in showing the scalable of execution of an existing shell script. It is always recommended to convert shell scripts to playbooks over time. Example usage would be getting the public key used in the environment with the command `cat .ssh/authorized_keys`.
|
||||
|
||||
|
||||
@@ -12,6 +12,5 @@
|
||||
|
||||
- name: Run Compliance Profile
|
||||
ansible.builtin.include_role:
|
||||
name: "redhatofficial.rhel{{ ansible_distribution_major_version }}_{{ compliance_profile }}"
|
||||
|
||||
name: "redhatofficial.rhel{{ ansible_distribution_major_version }}-{{ compliance_profile }}"
|
||||
...
|
||||
|
||||
@@ -5,6 +5,7 @@ The following compliance profiles are supported by the [**Linux / Enforce Compli
|
||||
| **Profile** | **Role Repository** |
|
||||
|-------------|---------------------|
|
||||
| CIS | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-cis |
|
||||
| CJIS | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-cjis |
|
||||
| CUI | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-cui |
|
||||
| HIPAA | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-hipaa |
|
||||
| OSPP | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-ospp |
|
||||
@@ -12,4 +13,3 @@ The following compliance profiles are supported by the [**Linux / Enforce Compli
|
||||
| DISA STIG | https://galaxy.ansible.com/RedHatOfficial/ansible-role-rhel8-stig |
|
||||
|
||||
These roles are derived from the [Compliance as Code](https://github.com/ComplianceAsCode/content) project, which provides SCAP content used by the [OpenSCAP](https://www.open-scap.org/) `oscap` tool.
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
hosts: "{{ _hosts | default(omit) }}"
|
||||
become: true
|
||||
vars:
|
||||
report_server: node1
|
||||
report_server: reports
|
||||
|
||||
tasks:
|
||||
# Install yum-utils if it's not there
|
||||
|
||||
@@ -3,13 +3,9 @@ user_message:
|
||||
- Update the 'activation_key' and 'org_id' extra variables for 'LINUX / Register with Insights'. https://access.redhat.com/management/activation_keys
|
||||
- Update Credential for Insights Inventory with Red Hat account.
|
||||
- Add variables for system_roles. https://console.redhat.com/ansible/automation-hub/repo/published/redhat/rhel_system_roles
|
||||
controller_components:
|
||||
- projects
|
||||
- credential_types
|
||||
- credentials
|
||||
- inventory_sources
|
||||
- job_templates
|
||||
|
||||
# "!unsafe" used to pass raw jinja2 through to the injector definition, see
|
||||
# https://github.com/redhat-cop/controller_configuration/tree/devel/roles/credential_types#formating-injectors
|
||||
controller_credential_types:
|
||||
- name: Insights Collection
|
||||
kind: cloud
|
||||
@@ -24,13 +20,14 @@ controller_credential_types:
|
||||
secret: true
|
||||
injectors:
|
||||
env:
|
||||
INSIGHTS_USER: "{% raw %}{ { insights_user }}{% endraw %}"
|
||||
INSIGHTS_PASSWORD: "{% raw %}{ { insights_password }}{% endraw %}"
|
||||
INSIGHTS_USER: !unsafe '{{ insights_user }}'
|
||||
INSIGHTS_PASSWORD: !unsafe '{{ insights_password }}'
|
||||
|
||||
controller_credentials:
|
||||
- name: Insights Inventory
|
||||
credential_type: Insights Collection
|
||||
organization: Default
|
||||
state: exists
|
||||
inputs:
|
||||
insights_user: REPLACEME
|
||||
insights_password: REPLACEME
|
||||
@@ -377,6 +374,12 @@ controller_templates:
|
||||
sudo_remove_no_authenticate: false
|
||||
# used by CIS and STIG profile role
|
||||
accounts_password_set_max_life_existing: false
|
||||
# used by the CJIS profile role
|
||||
service_firewalld_enabled: false
|
||||
firewalld_sshd_port_enabled: false
|
||||
# used by the PCI-DSS profile role
|
||||
firewalld_loopback_traffic_restricted: false
|
||||
firewalld_loopback_traffic_trusted: false
|
||||
survey_enabled: true
|
||||
survey:
|
||||
name: ''
|
||||
@@ -392,10 +395,11 @@ controller_templates:
|
||||
required: true
|
||||
choices:
|
||||
- cis
|
||||
- cjis
|
||||
- cui
|
||||
- hipaa
|
||||
- ospp
|
||||
- pci_dss
|
||||
- pci-dss
|
||||
- stig
|
||||
|
||||
- name: "LINUX / Multi-profile Compliance Report"
|
||||
@@ -423,6 +427,7 @@ controller_templates:
|
||||
required: true
|
||||
choices:
|
||||
- cis
|
||||
- cjis
|
||||
- cui
|
||||
- hipaa
|
||||
- ospp
|
||||
|
||||
@@ -15,4 +15,4 @@
|
||||
|
||||
- name: Default Components
|
||||
ansible.builtin.include_role:
|
||||
name: "redhat_cop.controller_configuration.job_launch"
|
||||
name: "infra.controller_configuration.job_launch"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user