Update roles

This commit is contained in:
2021-04-20 12:14:42 -04:00
parent 8005080b8b
commit 595021d449
131 changed files with 4144 additions and 3018 deletions

View File

@@ -59,10 +59,12 @@ Role uses variables that are required to be passed while including it. As
there is option to run one container separately or multiple containers in pod,
note that some options apply only to other method.
- ```container_image``` - container image and tag, e.g. nextcloud:latest
This is used only if you run single container
- ```container_image_list``` - list of container images to run within a pod.
This is used only if you run containers in pod.
- ```container_image_list``` - list of container images to run.
If more than one image is defined, then the containers will be run in a pod.
- ```container_image_user``` - optional username to use when authenticating
to remote registries
- ```container_image_password``` - optional password to use when authenticating
to remote registries
- ```container_name``` - Identify the container in systemd and podman commands.
Systemd service file be named container_name--container-pod.service.
- ```container_run_args``` - Anything you pass to podman, except for the name
@@ -88,12 +90,14 @@ command line. See ```man podman``` or
[podman tutorials](https://github.com/containers/libpod/tree/master/docs/tutorials)
for info.
If you want your
[images to be automatically updated](http://docs.podman.io/en/latest/markdown/podman-auto-update.1.html),
add this label to container_cmd_args: ```--label "io.containers.autoupdate=image"```
Dependencies
------------
No dependencies.
* [containers.podman](https://galaxy.ansible.com/containers/podman) (collection)
Example Playbook
----------------
@@ -105,11 +109,13 @@ Root container:
```
- name: tests container
vars:
container_image: sebp/lighttpd:latest
container_image_list:
- sebp/lighttpd:latest
container_name: lighttpd
container_run_args: >-
--rm
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
--label "io.containers.autoupdate=image"
-p 8080:80
#container_state: absent
container_state: running
@@ -139,7 +145,8 @@ Rootless container:
vars:
container_run_as_user: rootless_user
container_run_as_group: rootless_user
container_image: sebp/lighttpd:latest
container_image_list:
- sebp/lighttpd:latest
container_name: lighttpd
container_run_args: >-
--rm

View File

@@ -3,14 +3,12 @@
# state can be running or absent
container_state: running
# systemd service name
service_name: "{{ container_name }}-container-pod.service"
# SystemD restart policy
# see man systemd.service for info
# by default we want to restart failed container
container_restart: on-failure
service_files_dir: /etc/systemd/system
systemd_scope: system
systemd_TimeoutStartSec: 15
systemd_RestartSec: 30
systemd_tempdir: "{{ '/tmp' if ansible_os_family == 'RedHat' and
@@ -19,5 +17,13 @@ container_run_as_user: root
container_run_as_group: root
container_stop_timeout: 15
# systemd service name
service_name: "{{ container_name }}-container-pod-{{ container_run_as_user }}.service"
# to sepped up you can disable always checking if podman is installed.
skip_podman_install: true
podman_dependencies_rootless:
- fuse-overlayfs
- slirp4netns
- uidmap

View File

@@ -1,15 +1,40 @@
---
- name: reload systemctl
become: true
become_user: "{{ container_run_as_user }}"
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
daemon_reload: yes
daemon_reload: true
scope: "{{ systemd_scope }}"
- name: start service
become: true
become_user: "{{ container_run_as_user }}"
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
scope: "{{ systemd_scope }}"
state: started
- name: restart service
become: true
become_user: "{{ container_run_as_user }}"
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
scope: "{{ systemd_scope }}"
state: restarted
- name: enable service
become: true
become_user: "{{ container_run_as_user }}"
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
enabled: true
scope: "{{ systemd_scope }}"

View File

@@ -1,2 +1,2 @@
install_date: Wed Jun 24 18:44:37 2020
version: master
install_date: Tue Apr 20 16:13:54 2021
version: 2.1.0

View File

@@ -7,6 +7,9 @@ galaxy_info:
license: GPLv3
min_ansible_version: 2.4
platforms:
- name: Debian
versions:
- buster
- name: EL
versions:
- 8
@@ -14,9 +17,26 @@ galaxy_info:
- name: Fedora
versions:
- all
- name: Ubuntu
versions:
- bionic
- disco
- eoan
- focal
galaxy_tags:
- podman
- container
- systemd
dependencies: []
dependencies:
- role: systemli.apt_repositories
vars:
apt_repositories:
- preset: kubic
when: >
(ansible_distribution == 'Debian' and
ansible_distribution_release == 'buster') or
ansible_distribution == 'Ubuntu'
collections:
- containers.podman

View File

@@ -1,5 +1,48 @@
---
- name: prepare rootless stuff if needed
block:
- name: get user information
user:
name: "{{ container_run_as_user }}"
check_mode: true
register: user_info
- name: set systemd dir if user is not root
set_fact:
service_files_dir: "{{ user_info.home }}/.config/systemd/user"
systemd_scope: user
changed_when: false
- name: ensure systemd files directory exists if user not root
file:
path: "{{ service_files_dir }}"
state: directory
owner: "{{ container_run_as_user }}"
group: "{{ container_run_as_group }}"
when: container_run_as_user != "root"
- name: "Find uid of user"
command: "id -u {{ container_run_as_user }}"
register: container_run_as_uid
check_mode: false # Run even in check mode, to avoid fail with --check.
changed_when: false
- name: set systemd runtime dir
set_fact:
xdg_runtime_dir: "/run/user/{{ container_run_as_uid.stdout }}"
changed_when: false
- name: set systemd scope to system if needed
set_fact:
systemd_scope: system
service_files_dir: '/etc/systemd/system'
xdg_runtime_dir: "/run/user/{{ container_run_as_uid.stdout }}"
when: container_run_as_user == "root"
changed_when: false
- name: check if service file exists already
stat:
path: "{{ service_files_dir }}/{{ service_name }}"
@@ -8,99 +51,95 @@
- name: do tasks when "{{ service_name }}" state is "running"
block:
- name: Check for user namespace support in kernel
stat:
path: /proc/sys/kernel/unprivileged_userns_clone
register: unprivileged_userns_clone
changed_when: false
- name: Allow unprivileged users on Debian
sysctl:
name: kernel.unprivileged_userns_clone
value: '1'
state: present
sysctl_file: /etc/sysctl.d/userns.conf
sysctl_set: true
when:
- ansible_distribution == 'Debian'
- unprivileged_userns_clone.stat.exists
- name: Install rootless dependencies on Debian-based
package:
name: "{{ podman_dependencies_rootless }}"
state: present
when:
- ansible_os_family == 'Debian'
- container_run_as_user != 'root'
- name: ensure podman is installed
package:
name: podman
state: installed
state: present
when: not skip_podman_install
- name: check user exists
user:
name: "{{ container_run_as_user }}"
- name: check if user is in subuid file
lineinfile:
line: '\1'
path: /etc/subuid
regexp: "^({{ container_run_as_user }}:.*)"
backrefs: yes
check_mode: yes
register: uid_has
ignore_errors: true
when: container_run_as_user != 'root'
- name: check if group is in subgid file
lineinfile:
line: '\1'
path: /etc/subgid
regexp: "^({{ container_run_as_group }}:.*)"
backrefs: yes
check_mode: yes
register: gid_has
ignore_errors: true
when: container_run_as_group != 'root'
- name: ensure user is in subuid file, if it was missing
lineinfile:
path: /etc/subuid
regexp: "^{{ container_run_as_user }}:.*"
line: "{{ container_run_as_user }}:305536:65536"
create: yes
mode: '0644'
owner: root
group: root
when: uid_has.changed and container_run_as_user != 'root'
- name: ensure group is in subgid file, if it was missing
lineinfile:
path: /etc/subgid
regexp: "^{{ container_run_as_group }}:.*"
line: "{{ container_run_as_group }}:305536:65536"
create: yes
mode: '0644'
owner: root
group: root
when: gid_has.changed and container_run_as_group != 'root'
- name: Check subuid & subgid
import_tasks: check_subid.yml
- name: running single container, get image Id if it exists and we are root
# XXX podman doesn't work through sudo for non root users, so skip preload if user
# XXX podman doesn't work through sudo for non root users,
# so skip preload if user
# https://github.com/containers/libpod/issues/5570
# command: podman inspect -f {{.Id}} "{{ container_image }}"
command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ container_image }}"
command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ item }}"
register: pre_pull_id
ignore_errors: yes
when: container_image is defined and container_run_as_user == 'root'
ignore_errors: true
when:
- container_image_list is defined
- container_image_list | length == 1
- container_run_as_user == 'root'
with_items: "{{ container_image_list }}"
- name: running single container, ensure we have up to date container image
command: "podman pull {{ container_image }}"
become: yes
containers.podman.podman_image:
name: "{{ item }}"
force: true
username: "{{ container_image_user | default(omit) }}"
password: "{{ container_image_password | default(omit) }}"
notify: restart service
become: true
become_user: "{{ container_run_as_user }}"
when: container_image is defined and container_run_as_user == 'root'
when:
- container_image_list is defined
- container_image_list | length == 1
- container_run_as_user == 'root'
with_items: "{{ container_image_list }}"
- name: running single container, get image Id if it exists
command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ container_image }}"
become: yes
command:
"podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ item }}"
become: true
become_user: "{{ container_run_as_user }}"
register: post_pull_id
when: container_image is defined and container_run_as_user == 'root'
- name: force restart after image change
debug: msg="image has changed"
changed_when: True
notify: restart service
ignore_errors: true
when:
- container_image_list is defined
- container_image_list | length == 1
- container_run_as_user == 'root'
- container_image is defined
- pre_pull_id.stdout != post_pull_id.stdout
- pre_pull_id is succeeded
# XXX remove above comparison if future podman tells image changed.
with_items: "{{ container_image_list }}"
- name: seems we use several container images, ensure all are up to date
command: "podman pull {{ item }}"
become: yes
containers.podman.podman_image:
name: "{{ item }}"
force: true
username: "{{ container_image_user | default(omit) }}"
password: "{{ container_image_password | default(omit) }}"
become: true
become_user: "{{ container_run_as_user }}"
when: container_image_list is defined
when: container_image_list is defined and container_image_list | length > 1
with_items: "{{ container_image_list }}"
- name: if running pod, ensure configuration file exists
@@ -110,11 +149,25 @@
when: container_pod_yaml is defined
- name: fail if pod configuration file is missing
fail:
msg: "Error: Asking to run pod, but pod definition yaml file is missing: {{ container_pod_yaml }}"
msg: >
"Error: Asking to run pod, but pod definition yaml file is missing: "
"{{ container_pod_yaml }}"
when:
- container_pod_yaml is defined
- not pod_file.stat.exists
- name: Check if user is lingering
stat:
path: "/var/lib/systemd/linger/{{ container_run_as_user }}"
register: user_lingering
when: container_run_as_user != "root"
- name: Enable lingering is needed
command: "loginctl enable-linger {{ container_run_as_user }}"
when:
- container_run_as_user != "root"
- not user_lingering.stat.exists
- name: "create systemd service file for container: {{ container_name }}"
template:
src: systemd-service-single.j2
@@ -122,9 +175,12 @@
owner: root
group: root
mode: 0644
notify: reload systemctl
notify:
- reload systemctl
- start service
- enable service
register: service_file
when: container_image is defined
when: container_image_list is defined and container_image_list | length == 1
- name: "create systemd service file for pod: {{ container_name }}"
template:
@@ -136,24 +192,13 @@
notify:
- reload systemctl
- start service
- enable service
register: service_file
when: container_image_list is defined
- name: ensure "{{ service_name }}" is enabled at boot, and systemd reloaded
systemd:
name: "{{ service_name }}"
enabled: yes
daemon_reload: yes
- name: ensure "{{ service_name }}" is running
service:
name: "{{ service_name }}"
state: started
when: not service_file_before_template.stat.exists
when: container_image_list is defined and container_image_list | length > 1
- name: "ensure {{ service_name }} is restarted due config change"
debug: msg="config has changed:"
changed_when: True
changed_when: true
notify: restart service
when:
- service_file_before_template.stat.exists
@@ -169,14 +214,32 @@
fw_state: enabled
when: container_state == "running"
- name: set firewall ports state to disabled when container state is not running
- name: disable firewall ports state when container state is not running
set_fact:
fw_state: disabled
when: container_state != "running"
- name: ensure firewalld is installed
tags: firewall
package: name=firewalld state=installed
package: name=firewalld state=present
when: ansible_pkg_mgr != "atomic_container"
- name: ensure firewalld is installed (on fedora-iot)
tags: firewall
command: >-
rpm-ostree install --idempotent --unchanged-exit-77
--allow-inactive firewalld
register: ostree
failed_when: not ( ostree.rc == 77 or ostree.rc == 0 )
changed_when: ostree.rc != 77
when: ansible_pkg_mgr == "atomic_container"
- name: reboot if new stuff was installed
reboot:
reboot_timeout: 300
when:
- ansible_pkg_mgr == "atomic_container"
- ostree.rc != 77
- name: ensure firewall service is running
tags: firewall
@@ -186,11 +249,14 @@
tags: firewall
firewalld:
port: "{{ item }}"
permanent: yes
immediate: yes
permanent: true
immediate: true
state: "{{ fw_state }}"
with_items: "{{ container_firewall_ports }}"
- name: Force all notified handlers to run at this point
meta: flush_handlers
when: container_firewall_ports is defined
@@ -198,17 +264,29 @@
block:
- name: ensure "{{ service_name }}" is disabled at boot
service:
become: true
become_user: "{{ container_run_as_user }}"
# become_method: machinectl
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
enabled: false
scope: "{{ systemd_scope }}"
when:
- service_file_before_template.stat.exists
- name: ensure "{{ service_name }}" is stopped
service:
become: true
become_user: "{{ container_run_as_user }}"
# become_method: machinectl
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
state: stopped
enabled: no
enabled: false
scope: "{{ systemd_scope }}"
when:
- service_file_before_template.stat.exists
@@ -218,6 +296,21 @@
state: absent
notify: reload systemctl
- name: Force all notified handlers to run at this point
meta: flush_handlers
- name: Check if user is lingering
stat:
path: "/var/lib/systemd/linger/{{ container_run_as_user }}"
register: user_lingering
when: container_run_as_user != "root"
- name: Disable lingering (are we sure we want to do this always?)
command: "loginctl disable-linger {{ container_run_as_user }}"
when:
- container_run_as_user != "root"
- user_lingering.stat.exists
- name: clean up pod configuration file
file:
path: "{{ container_pod_yaml }}"

View File

@@ -6,7 +6,9 @@ After=network.target
Type=forking
TimeoutStartSec={{ systemd_TimeoutStartSec }}
ExecStartPre=-/usr/bin/podman pod rm -f {{ container_name }}
{% if container_run_as_user == 'root' %}
User={{ container_run_as_user }}
{% endif %}
RemainAfterExit=yes
ExecStart=/usr/bin/podman play kube {{ container_pod_yaml }}
@@ -18,4 +20,9 @@ Restart={{ container_restart }}
RestartSec={{ systemd_RestartSec }}
[Install]
{% if container_run_as_user == 'root' %}
WantedBy=multi-user.target
{% endif %}
{% if container_run_as_user != 'root' %}
WantedBy=default.target
{% endif %}

View File

@@ -6,20 +6,27 @@ After=network.target
Type=simple
TimeoutStartSec={{ systemd_TimeoutStartSec }}
ExecStartPre=-/usr/bin/rm -f {{ pidfile }} {{ cidfile }}
{% if container_run_as_user == 'root' %}
User={{ container_run_as_user }}
{% endif %}
ExecStart=/usr/bin/podman run --name {{ container_name }} \
{{ container_run_args }} \
--conmon-pidfile {{ pidfile }} --cidfile {{ cidfile }} \
{{ container_image }} {% if container_cmd_args is defined %} \
{{ container_image_list | first }} {% if container_cmd_args is defined %} \
{{ container_cmd_args }} {% endif %}
ExecStop=/usr/bin/sh -c "/usr/bin/podman stop -t "{{ container_stop_timeout }}" `cat {{ cidfile }}`"
ExecStop=/usr/bin/sh -c "/usr/bin/podman rm -f `cat {{ cidfile }}`"
Restart={{ container_restart }}
RestartSec={{ systemd_RestartSec }}
KillMode=none
KillMode=mixed
PIDFile={{ pidfile }}
[Install]
{% if container_run_as_user == 'root' %}
WantedBy=multi-user.target
{% endif %}
{% if container_run_as_user != 'root' %}
WantedBy=default.target
{% endif %}

View File

@@ -1,10 +1,14 @@
---
# yamllint disable rule:line-length
# I run this file with following line to test against my Vagrant Fedora:
# ansible-playbook --vault-password-file .vault-password -b -i \
# ~/vagrant/fedora/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory \
# -e ansible_python_interpreter=/usr/bin/python3 \
# -e container_state=running test-podman.yml
# yamllint enable rule:line-length
- name: create lighttpd pod
hosts: all
# connection: local
@@ -25,12 +29,14 @@
- name: tests container
vars:
container_state: running
#container_state: absent
container_image: sebp/lighttpd:latest
# container_state: absent
container_image_list:
- sebp/lighttpd:latest
container_name: lighttpd
container_run_args: >-
--rm
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
-t
-p 8080:80/tcp
container_firewall_ports:
- 8080/tcp

View File

@@ -1,7 +1,6 @@
---
# systemd service name
service_name: "{{ container_name }}-container-pod.service"
cidpid_base: "{{ systemd_tempdir }}/%n-"
cidfile: "{{ cidpid_base }}cid"
pidfile: "{{ cidpid_base }}pid"