Update roles and netbox inventory

This commit is contained in:
2020-06-25 13:27:06 -04:00
parent e87f15168a
commit 07d88cc752
21 changed files with 256 additions and 67 deletions

View File

@@ -1,9 +1,16 @@
# Note: need to specify extra_vars, providing ansible_ssh_user, and ansible_ssh_pass
- name: Set up IPA Client
hosts: lab_ipa_client
hosts: tag_ipa_client
become: yes
collections:
- freeipa.ansible_freeipa
pre_tasks:
- name: Attach subscriptions
command: '/usr/bin/subscription-manager attach'
register: result
changed_when:
- '"All installed products are covered by valid entitlements. No need to update subscriptions at this time." not in result.stdout'
roles:
- role: debian-freeipa-client

45
monitoring.yml Normal file
View File

@@ -0,0 +1,45 @@
---
- name: Prometheus
hosts: podhost1.mgmt.toal.ca # Hard-coded for now
become: yes
vars:
container_image: prom/prometheus
container_name: prometheus
container_state: running
container_firewall_ports:
- 9090/tcp
container_run_args: >-
-p 9090:9090
-v /home/prometheus/etc:/etc/prometheus:Z
-v /home/prometheus/data:/prometheus:Z
-v /home/prometheus/console_libraries:/usr/share/prometheus/console_libraries:Z
-v /home/prometheus/console_templates:/usr/share/prometheus/consoles:Z
roles:
- ikke_t.podman_container_systemd
pre_tasks:
- name: Directories exist
file:
path: '{{ item }}'
state: directory
owner: nobody
group: nobody
loop:
- /home/prometheus/etc
- /home/prometheus/data
- /home/prometheus/console_libraries
- /home/prometheus/console_template
- name: Grafana
hosts: podhost1.mgmt.toal.ca # Hard-coded for now
become: yes
vars:
container_image: grafana/grafana
container_name: grafana
container_state: running
container_firewall_ports:
- 3000/tcp
container_run_args: >-
-p 3000:3000
roles:
- ikke_t.podman_container_systemd

View File

@@ -3,6 +3,8 @@ plugin: netbox.netbox.nb_inventory
api_endpoint: http://netbox.mgmt.toal.ca
validate_certs: True
config_context: True
flatten_config_context: True
flatten_custom_fields: True
interfaces: True
services: True
plurals: False
@@ -14,6 +16,8 @@ group_by:
- platform
- cluster
query_filters:
- tag: ansible
#query_filters:
# - role: network-edge-router

View File

@@ -10,6 +10,7 @@ env:
- MOLECULE_DISTRO: centos7
- MOLECULE_DISTRO: centos6
- MOLECULE_DISTRO: fedora31
- MOLECULE_DISTRO: ubuntu2004
- MOLECULE_DISTRO: ubuntu1804
- MOLECULE_DISTRO: ubuntu1604
- MOLECULE_DISTRO: debian10
@@ -17,7 +18,7 @@ env:
install:
# Install test dependencies.
- pip install molecule docker
- pip install molecule yamllint ansible-lint docker
before_script:
# Use actual Ansible Galaxy role name for the project directory.

View File

@@ -1,2 +1,2 @@
install_date: Fri Apr 3 19:21:40 2020
version: 1.9.7
install_date: Wed Jun 24 18:44:31 2020
version: 1.10.0

View File

@@ -29,6 +29,7 @@ galaxy_info:
- trusty
- xenial
- bionic
- focal
- name: FreeBSD
versions:
- 10.2

View File

@@ -3,10 +3,10 @@ dependency:
name: galaxy
driver:
name: docker
lint:
name: yamllint
options:
config-file: molecule/default/yaml-lint.yml
lint: |
set -e
yamllint .
ansible-lint
platforms:
- name: instance
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest"
@@ -17,13 +17,5 @@ platforms:
pre_build_image: true
provisioner:
name: ansible
lint:
name: ansible-lint
playbooks:
converge: ${MOLECULE_PLAYBOOK:-playbook.yml}
scenario:
name: default
verifier:
name: testinfra
lint:
name: flake8
converge: ${MOLECULE_PLAYBOOK:-converge.yml}

View File

@@ -1,13 +0,0 @@
---
- name: Converge
hosts: all
become: true
pre_tasks:
- name: Update apt cache.
apt: update_cache=true cache_valid_time=600
when: ansible_os_family == 'Debian'
changed_when: false
roles:
- role: geerlingguy.java

View File

@@ -1,6 +0,0 @@
---
extends: default
rules:
line-length:
max: 120
level: warning

View File

@@ -8,7 +8,7 @@
recurse: true
when:
- ansible_distribution == 'Ubuntu'
- ansible_distribution_version == '18.04'
- ansible_distribution_major_version | int >= 18
- name: Ensure Java is installed.
apt:

View File

@@ -1,2 +1,2 @@
install_date: Sun Dec 29 00:38:40 2019
install_date: Wed Jun 24 18:44:38 2020
version: master

View File

@@ -1,5 +1,9 @@
---
galaxy_info:
description: Periodicly cleans up all unused container images from host. Role sets up cron job based on whether podman or docker is installed.
description: >
Periodicly cleans up all unused container images from host. Role
sets up cron job based on whether podman or docker is installed.
author: Ilkka Tengvall
company: ITSE
license: GPLv3
@@ -7,13 +11,23 @@ galaxy_info:
min_ansible_version: 2.4
platforms:
- name: Fedora
versions:
- all
- name: Debian
versions:
- all
- name: EL
versions:
- 8
- 7
- name: Fedora
versions:
- all
- name: Ubuntu
versions:
- all
galaxy_tags:
- containers
- podman
- docker
- containers
- podman
- docker
dependencies: []

View File

@@ -21,14 +21,30 @@ What role does:
* set's container or pod to be always automatically restarted if container dies.
* makes container or pod enter run state at system boot
* adds or removes containers exposed ports to firewall.
* It takes parameter for running rootless containers under given user
(I didn't test this with pod mode yet)
For reference, see these two blogs about the role:
* [Automate Podman Containers with Ansible 1/2](https://redhatnordicssa.github.io/ansible-podman-containers-1)
* [Automate Podman Containers with Ansible 2/2](https://redhatnordicssa.github.io/ansible-podman-containers-2)
Blogs describe how you can single containers, or several containers as one pod
Blogs describe how you can single container, or several containers as one pod
using this module.
## Note for running rootless containers:
* You need to have the user created prior running this role.
* The user should have entries in /etc/sub[gu]id files for namespace range.
If not, this role adds some variables there in order to get something going,
but preferrably you check them.
* I only tested the single container mode, not the pod mode with several containers.
Please report back how that part works! :)
* Some control things like memory or other resource limit's won't work as user.
* You want to increase ```systemd_TimeoutStartSec``` heavily, as we can not
prefetch the images before systemd unit start. So systemd needs to wait
for podman to pull images prior it starts container. Might take minutes
depending on your network connection, and container image size.
Requirements
------------
@@ -51,13 +67,20 @@ note that some options apply only to other method.
Systemd service file be named container_name--container-pod.service.
- ```container_run_args``` - Anything you pass to podman, except for the name
and image while running single container. Not used for pod.
- ```container_cmd_args``` - Any command and arguments passed to podman-run after specifying the image name. Not used for pod.
- ```container_run_as_user``` - Which user should systemd run container as.
Defaults to root.
- ```container_run_as_group``` - Which grou should systemd run container as.
Defaults to root.
- ```container_state``` - container is installed and run if state is
```running```, and stopped and systemd file removed if ```absent```
- ```container_firewall_ports``` - list of ports you have exposed from container
and want to open firewall for. When container_state is absent, firewall ports
get closed. If you don't want firewalld installed, don't define this.
- ```systemd_TimeoutStartSec``` - how long does systemd wait for container to start?
- ```systemd_tempdir``` - Where to store conmon-pidfile and cidfile for single containers.
Defaults to ``%T`` on systems supporting this specifier (see man 5 systemd.unit) ``/tmp``
otherwise.
This playbook doesn't have python module to parse parameters for podman command.
Until that you just need to pass all parameters as you would use podman from
@@ -75,7 +98,9 @@ No dependencies.
Example Playbook
----------------
See the tests/main.yml for sample. In short, include role with vars:
See the tests/main.yml for sample. In short, include role with vars.
Root container:
```
- name: tests container
@@ -95,6 +120,40 @@ See the tests/main.yml for sample. In short, include role with vars:
name: podman-container-systemd
```
Rootless container:
```
- name: ensure user
user:
name: rootless_user
comment: I run sample container
- name: ensure directory
file:
name: /tmp/podman-container-systemd
owner: rootless_user
group: rootless_user
state: directory
- name: tests container
vars:
container_run_as_user: rootless_user
container_run_as_group: rootless_user
container_image: sebp/lighttpd:latest
container_name: lighttpd
container_run_args: >-
--rm
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
-p 8080:80
#container_state: absent
container_state: running
container_firewall_ports:
- 8080/tcp
- 8443/tcp
import_role:
name: podman-container-systemd
```
License
-------

View File

@@ -13,7 +13,11 @@ container_restart: on-failure
service_files_dir: /etc/systemd/system
systemd_TimeoutStartSec: 15
systemd_RestartSec: 30
systemd_tempdir: "{{ '/tmp' if ansible_os_family == 'RedHat' and
ansible_distribution_major_version|int == 7 else '%T' }}"
container_run_as_user: root
container_run_as_group: root
container_stop_timeout: 15
# to sepped up you can disable always checking if podman is installed.
skip_podman_install: true

View File

@@ -1,2 +1,2 @@
install_date: Sun Dec 29 00:38:07 2019
install_date: Wed Jun 24 18:44:37 2020
version: master

View File

@@ -1,3 +1,5 @@
---
galaxy_info:
author: Ilkka Tengvall
description: Role sets up container(s) to run on host with help of systemd.
@@ -5,12 +7,16 @@ galaxy_info:
license: GPLv3
min_ansible_version: 2.4
platforms:
- name: Fedora
versions:
- all
- name: EL
versions:
- 8
- 7
- name: Fedora
versions:
- all
galaxy_tags:
- podman
- container
- systemd
- podman
- container
- systemd
dependencies: []

View File

@@ -14,27 +14,82 @@
state: installed
when: not skip_podman_install
- name: running single container, get image Id if it exists
- name: check user exists
user:
name: "{{ container_run_as_user }}"
- name: check if user is in subuid file
lineinfile:
line: '\1'
path: /etc/subuid
regexp: "^({{ container_run_as_user }}:.*)"
backrefs: yes
check_mode: yes
register: uid_has
ignore_errors: true
when: container_run_as_user != 'root'
- name: check if group is in subgid file
lineinfile:
line: '\1'
path: /etc/subgid
regexp: "^({{ container_run_as_group }}:.*)"
backrefs: yes
check_mode: yes
register: gid_has
ignore_errors: true
when: container_run_as_group != 'root'
- name: ensure user is in subuid file, if it was missing
lineinfile:
path: /etc/subuid
regexp: "^{{ container_run_as_user }}:.*"
line: "{{ container_run_as_user }}:305536:65536"
create: yes
mode: '0644'
owner: root
group: root
when: uid_has.changed and container_run_as_user != 'root'
- name: ensure group is in subgid file, if it was missing
lineinfile:
path: /etc/subgid
regexp: "^{{ container_run_as_group }}:.*"
line: "{{ container_run_as_group }}:305536:65536"
create: yes
mode: '0644'
owner: root
group: root
when: gid_has.changed and container_run_as_group != 'root'
- name: running single container, get image Id if it exists and we are root
# XXX podman doesn't work through sudo for non root users, so skip preload if user
# https://github.com/containers/libpod/issues/5570
# command: podman inspect -f {{.Id}} "{{ container_image }}"
command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ container_image }}"
register: pre_pull_id
ignore_errors: yes
when: container_image is defined
when: container_image is defined and container_run_as_user == 'root'
- name: running single container, ensure we have up to date container image
command: "podman pull {{ container_image }}"
when: container_image is defined
become: yes
become_user: "{{ container_run_as_user }}"
when: container_image is defined and container_run_as_user == 'root'
- name: running single container, get image Id if it exists
command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ container_image }}"
become: yes
become_user: "{{ container_run_as_user }}"
register: post_pull_id
when: container_image is defined
when: container_image is defined and container_run_as_user == 'root'
- name: force restart after image change
debug: msg="image has changed"
changed_when: True
notify: restart service
when:
- container_run_as_user == 'root'
- container_image is defined
- pre_pull_id.stdout != post_pull_id.stdout
- pre_pull_id is succeeded
@@ -43,6 +98,8 @@
- name: seems we use several container images, ensure all are up to date
command: "podman pull {{ item }}"
become: yes
become_user: "{{ container_run_as_user }}"
when: container_image_list is defined
with_items: "{{ container_image_list }}"

View File

@@ -5,18 +5,21 @@ After=network.target
[Service]
Type=simple
TimeoutStartSec={{ systemd_TimeoutStartSec }}
ExecStartPre=-/usr/bin/podman rm {{ container_name }}
ExecStartPre=-/usr/bin/rm -f {{ pidfile }} {{ cidfile }}
User={{ container_run_as_user }}
ExecStart=/usr/bin/podman run --name {{ container_name }} \
{{ container_run_args }} \
{{ container_image }}
--conmon-pidfile {{ pidfile }} --cidfile {{ cidfile }} \
{{ container_image }} {% if container_cmd_args is defined %} \
{{ container_cmd_args }} {% endif %}
ExecReload=-/usr/bin/podman stop "{{ container_name }}"
ExecReload=-/usr/bin/podman rm "{{ container_name }}"
ExecStop=-/usr/bin/podman stop "{{ container_name }}"
ExecStop=/usr/bin/sh -c "/usr/bin/podman stop -t "{{ container_stop_timeout }}" `cat {{ cidfile }}`"
ExecStop=/usr/bin/sh -c "/usr/bin/podman rm -f `cat {{ cidfile }}`"
Restart={{ container_restart }}
RestartSec={{ systemd_RestartSec }}
KillMode=none
PIDFile={{ pidfile }}
[Install]
WantedBy=multi-user.target

View File

@@ -2,3 +2,6 @@
# systemd service name
service_name: "{{ container_name }}-container-pod.service"
cidpid_base: "{{ systemd_tempdir }}/%n-"
cidfile: "{{ cidpid_base }}cid"
pidfile: "{{ cidpid_base }}pid"

View File

@@ -3,8 +3,8 @@
# Java
- name: geerlingguy.java
# Node.js (Using this repo temporarily, as it fixes a package naming bug (See #95))
- src: https://github.com/halkeye/ansible-role-nodejs
version: halkeye-patch-1
# - src: https://github.com/halkeye/ansible-role-nodejs
# version: halkeye-patch-1
# Gitlab
- name: geerlingguy.gitlab
# Windows Ovirt Template
@@ -12,3 +12,5 @@
- name: oatakan.windows_template_build
- name: oatakan.windows_ovirt_guest_agent
- name: oatakan.windows_virtio
- name: ikke_t.podman_container_systemd
- name: ikke_t.container_image_cleanup

View File

@@ -24,7 +24,6 @@
state: present
notify: Ovirt Agent Restart
when:
- ansible_virtualization_type == "RHEV"
- ansible_os_family == "RedHat"
- ansible_distribution_major_version == '7'
@@ -34,10 +33,12 @@
state: present
notify: Qemu Agent Restart
when:
- ansible_virtualization_type == "RHEV"
- ansible_os_family == "RedHat"
- ansible_distribution_major_version == '8'
when:
- ansible_virtualization_type == "RHEV"
- name: Install katello-agent on Satellite managed systems
yum:
name: katello-agent
@@ -49,3 +50,12 @@
name: insights-client
state: present
when: ansible_os_family == "RedHat"
- name: Performance Co-Pilot
yum:
name:
- pcp
- cockpit-pcp
- pcp-system-tools
- pcp-pmda-trace
- pcp-selinux