Update role dependencies

This commit is contained in:
2024-02-08 15:55:01 -05:00
parent e09a7f7d45
commit bb21e8d5c6
507 changed files with 1270 additions and 28219 deletions

View File

@@ -18,11 +18,12 @@ What role does:
* on consecutive runs it pulls image again,
and restarts container if image changed (not for pod yet)
* creates systemd file for container or pod
* creates kubernetes yaml for pod
* creates volume directories for containers if they do not exist. (for pod use DirectoryOrCreate)
* set's container or pod to be always automatically restarted if container dies.
* makes container or pod enter run state at system boot
* adds or removes containers exposed ports to firewall.
* It takes parameter for running rootless containers under given user
(I didn't test this with pod mode yet)
For reference, see these two blogs about the role:
* [Automate Podman Containers with Ansible 1/2](https://redhatnordicssa.github.io/ansible-podman-containers-1)
@@ -37,8 +38,6 @@ using this module.
* The user should have entries in /etc/sub[gu]id files for namespace range.
If not, this role adds some variables there in order to get something going,
but preferrably you check them.
* I only tested the single container mode, not the pod mode with several containers.
Please report back how that part works! :)
* Some control things like memory or other resource limit's won't work as user.
* You want to increase ```systemd_TimeoutStartSec``` heavily, as we can not
prefetch the images before systemd unit start. So systemd needs to wait
@@ -50,7 +49,8 @@ Requirements
Requires system which is capable of running podman, and that podman is found
from package repositories. Role installs podman. Role also installs firewalld
if user has defined ```container_firewall_ports``` -variable.
if user has defined ```container_firewall_ports``` -variable. Installs kubeval
for a pod if ```container_pod_yaml_template_validation: true```.
Role Variables
--------------
@@ -61,19 +61,35 @@ note that some options apply only to other method.
- ```container_image_list``` - list of container images to run.
If more than one image is defined, then the containers will be run in a pod.
- ```container_image_user``` - optional username to use when authenticating
It is possible to define it as a dictionary to include authentication information per image, like so:
```
container_image_list:
- image: docker.io/imagename
user: exampleuser
password: examplepw
- image: docker.io/imagename2
```
- ```container_image_user``` - optional default username to use when authenticating
to remote registries
- ```container_image_password``` - optional password to use when authenticating
- ```container_image_password``` - optional default password to use when authenticating
to remote registries
- ```container_name``` - Identify the container in systemd and podman commands.
Systemd service file be named container_name--container-pod.service.
Systemd service file be named container_name--container-pod.service. This can be overwritten with service_name.
- ```container_run_args``` - Anything you pass to podman, except for the name
and image while running single container. Not used for pod.
- ```container_cmd_args``` - Any command and arguments passed to podman-run after specifying the image name. Not used for pod.
- ```container_run_as_user``` - Which user should systemd run container as.
Defaults to root.
- ```container_run_as_group``` - Which grou should systemd run container as.
- ```container_run_as_group``` - Which group should systemd run container as.
Defaults to root.
- ```container_dir_owner``` - Which owner should the volume dirs have.
Defaults to container_run_as_user.
If you use :U as a volume option podman will set the permissions for the user inside the container automatically.
Quote: The :U suffix tells Podman to use the correct host UID and GID based on the UID and GID within the container, to change recursively the owner and group of the source volume. Warning use with caution since this will modify the host filesystem.
- ```container_dir_group``` - Which group should the volume dirs have.
Defaults to container_run_as_group.
- ```container_dir_mode``` - Which permissions should the volume dirs have.
Defaults to '0755'.
- ```container_state``` - container is installed and run if state is
```running```, and stopped and systemd file removed if ```absent```
- ```container_firewall_ports``` - list of ports you have exposed from container
@@ -83,6 +99,25 @@ note that some options apply only to other method.
- ```systemd_tempdir``` - Where to store conmon-pidfile and cidfile for single containers.
Defaults to ``%T`` on systems supporting this specifier (see man 5 systemd.unit) ``/tmp``
otherwise.
- ```service_name``` - How the systemd service files are named.
Defaults to ```"{{ container_name }}-container-pod-{{ container_run_as_user }}.service"```.
- ```service_files_dir``` - Where to store the systemd service files.
Defaults to ```/usr/local/lib/systemd/system``` for root and ```"{{ user_info.home }}/.config/systemd/user``` for a rootless user
- ```service_files_owner``` - Which user should own the systemd service files.
Defaults to root.
- ```service_files_group``` - Which group should own the systemd service files.
Defaults to root.
- ```service_files_mode``` - Which permissions should the systemd service files have.
Defaults to 0644.
- ```container_pod_yaml``` - Path to the pod yaml file. Required for a pod.
- ```container_pod_yaml_deploy``` - Wheter to deploy the pod yaml file. Defaults to ``false``
- ```container_pod_yaml_template``` - Template to use for pod yaml deploy.
As the template doesn't include every possible configuration option it is possible to overwrite it with your own template.
Defaults to ``templates/container-pod-yaml.j2``.
- ```container_pod_yaml_template_validation``` - Wheter to validate the deployed pod yaml file. Defaults to ``false``.
- ```container_pod_labels``` - Defines labels for ```container_pod_yaml_deploy```.
- ```container_pod_volumes``` - Defines volumes for ```container_pod_yaml_deploy```.
- ```container_pod_containers``` - Defines containers for ```container_pod_yaml_deploy```.
This playbook doesn't have python module to parse parameters for podman command.
Until that you just need to pass all parameters as you would use podman from
@@ -94,6 +129,10 @@ If you want your
[images to be automatically updated](http://docs.podman.io/en/latest/markdown/podman-auto-update.1.html),
add this label to container_cmd_args: ```--label "io.containers.autoupdate=image"```
Never use `ansible.builtin.import_role` to execute this role if you intend to use it more
than once per playbook, or you will fall in
[this anti-pattern](https://medium.com/opsops/ansible-anti-pattern-import-role-task-with-task-level-vars-a9f5c752c9c3).
Dependencies
------------
@@ -115,7 +154,7 @@ Root container:
container_name: lighttpd
container_run_args: >-
--rm
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z,U
--label "io.containers.autoupdate=image"
-p 8080:80
#container_state: absent
@@ -123,7 +162,7 @@ Root container:
container_firewall_ports:
- 8080/tcp
- 8443/tcp
import_role:
ansible.builtin.include_role:
name: podman-container-systemd
```
@@ -135,13 +174,6 @@ Rootless container:
name: rootless_user
comment: I run sample container
- name: ensure directory
file:
name: /tmp/podman-container-systemd
owner: rootless_user
group: rootless_user
state: directory
- name: tests container
vars:
container_run_as_user: rootless_user
@@ -151,17 +183,59 @@ Rootless container:
container_name: lighttpd
container_run_args: >-
--rm
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z,U
-p 8080:80
#container_state: absent
container_state: running
container_firewall_ports:
- 8080/tcp
- 8443/tcp
import_role:
ansible.builtin.include_role:
name: podman-container-systemd
```
Rootless Pod:
```
- name: ensure user
user:
name: rootless_user
comment: I run sample container
- name: tests pod
vars:
container_run_as_user: rootless_user
container_run_as_group: rootless_user
container_image_list:
- sebp/lighttpd:latest
container_name: lighttpd-pod
container_pod_yaml: /home/rootless_user/lighttpd-pod.yml
container_pod_yaml_deploy: true
container_pod_yaml_template_validation: true
container_pod_labels:
app: "{{ container_name }}"
io.containers.autoupdate: 'image(1)'
container_pod_volumes:
- name: htdocs
hostPath:
path: /tmp/podman-container-systemd
type: DirectoryOrCreate
container_pod_containers:
- name: lighttpd
image: sebp/lighttpd:latest
volumeMounts:
- name: htdocs
mountPath: /var/www/localhost/htdocs:Z
ports:
- containerPort: 80
hostPort: 8080
container_state: running
container_firewall_ports:
- 8080/tcp
- 8443/tcp
ansible.builtin.include_role:
name: podman-container-systemd
```
License
-------

View File

@@ -8,6 +8,9 @@ container_state: running
# by default we want to restart failed container
container_restart: on-failure
service_files_dir: /usr/local/lib/systemd/system
service_files_owner: root
service_files_group: root
service_files_mode: 0644
systemd_scope: system
systemd_TimeoutStartSec: 15
systemd_RestartSec: 30
@@ -30,9 +33,14 @@ systemd_Wants: []
service_name: "{{ container_name }}-container-pod-{{ container_run_as_user }}.service"
# to sepped up you can disable always checking if podman is installed.
skip_podman_install: true
skip_podman_install: false
podman_dependencies_rootless:
- fuse-overlayfs
- slirp4netns
- uidmap
# pod yaml deploy
container_pod_yaml_deploy: false
container_pod_yaml_template: templates/container-pod-yaml.j2
container_pod_yaml_template_validation: false

View File

@@ -9,16 +9,6 @@
daemon_reload: true
scope: "{{ systemd_scope }}"
- name: start service
become: true
become_user: "{{ container_run_as_user }}"
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
scope: "{{ systemd_scope }}"
state: started
- name: restart service
become: true
become_user: "{{ container_run_as_user }}"
@@ -28,13 +18,4 @@
name: "{{ service_name }}"
scope: "{{ systemd_scope }}"
state: restarted
- name: enable service
become: true
become_user: "{{ container_run_as_user }}"
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
enabled: true
scope: "{{ systemd_scope }}"

View File

@@ -1,2 +1,2 @@
install_date: Fri Oct 15 18:59:22 2021
version: 2.2.0
install_date: Thu 08 Feb 2024 08:54:06 PM
version: 2.5.0

View File

@@ -1,4 +1,5 @@
---
collections:
- ansible.posix
- community.general
- containers.podman

View File

@@ -1,14 +1,19 @@
---
- name: Get user information
user:
name: "{{ container_run_as_user }}"
check_mode: true
changed_when: false
register: user_info
- name: Fails if user "{{ container_run_as_user }}" doesn't exist
fail:
msg: User "{{ container_run_as_user }}" doesn't exist.
when: user_info.name is not defined
- name: prepare rootless stuff if needed
block:
- name: get user information
user:
name: "{{ container_run_as_user }}"
check_mode: true
register: user_info
- name: set systemd dir if user is not root
set_fact:
service_files_dir: "{{ user_info.home }}/.config/systemd/user"
@@ -24,38 +29,28 @@
when: container_run_as_user != "root"
- name: "Find uid of user"
command: "id -u {{ container_run_as_user }}"
register: container_run_as_uid
check_mode: false # Run even in check mode, to avoid fail with --check.
changed_when: false
- name: set systemd runtime dir
set_fact:
xdg_runtime_dir: "/run/user/{{ container_run_as_uid.stdout }}"
xdg_runtime_dir: "/run/user/{{ user_info.uid }}"
changed_when: false
- name: set systemd scope to system if needed
set_fact:
systemd_scope: system
service_files_dir: /usr/local/lib/systemd/system
xdg_runtime_dir: "/run/user/{{ container_run_as_uid.stdout }}"
service_files_dir: "{{ service_files_dir }}"
when: container_run_as_user == "root"
changed_when: false
- name: create local systemd directory
when: service_files_dir == '/usr/local/lib/systemd/system'
file:
group: root
mode: u=rwX,go=rX
owner: root
path: /usr/local/lib/systemd/system/
state: directory
become: true
when: container_run_as_user == "root" and service_files_dir == '/usr/local/lib/systemd/system'
- name: check if service file exists already
stat:
path: "{{ service_files_dir }}/{{ service_name }}"
register: service_file_before_template
- name: do tasks when "{{ service_name }}" state is "running"
block:
@@ -91,73 +86,71 @@
state: present
when: not skip_podman_install
- name: check user exists
user:
name: "{{ container_run_as_user }}"
- name: Check subuid & subgid
import_tasks: check_subid.yml
- name: running single container, get image Id if it exists and we are root
# XXX podman doesn't work through sudo for non root users,
# so skip preload if user
# https://github.com/containers/libpod/issues/5570
# command: podman inspect -f {{.Id}} "{{ container_image }}"
command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ item }}"
- name: Ensure empty internal variable _container_image_list
set_fact:
_container_image_list: []
changed_when: false
register: pre_pull_id
ignore_errors: true
when:
- container_image_list is defined
- container_image_list | length == 1
- container_run_as_user == 'root'
- name: Convert container_image_list to new form
set_fact:
_container_image_list: "{{ _container_image_list + [{'image': item}] }}"
with_items: "{{ container_image_list }}"
when: not (container_image_list | selectattr("image", "defined"))
changed_when: false
no_log: true
- name: Always use internal variable for container_image_list
set_fact:
_container_image_list: "{{ container_image_list }}"
when: _container_image_list | length == 0
changed_when: false
no_log: true
- name: running single container, ensure we have up to date container image
containers.podman.podman_image:
name: "{{ item }}"
name: "{{ item.image }}"
force: true
username: "{{ container_image_user | default(omit) }}"
password: "{{ container_image_password | default(omit) }}"
username: "{{ item.user | default(container_image_user) | default(omit) }}"
password: "{{ item.password | default(container_image_password) | default(omit) }}"
notify: restart service
become: true
become_user: "{{ container_run_as_user }}"
when:
- container_image_list is defined
- container_image_list | length == 1
- _container_image_list | length == 1
- container_run_as_user == 'root'
with_items: "{{ container_image_list }}"
- name: running single container, get image Id if it exists
command:
"podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ item }}"
changed_when: false
become: true
become_user: "{{ container_run_as_user }}"
register: post_pull_id
ignore_errors: true
when:
- container_image_list is defined
- container_image_list | length == 1
- container_run_as_user == 'root'
with_items: "{{ container_image_list }}"
- not (item.image | regex_search ('^localhost/.*'))
loop: "{{ _container_image_list }}"
no_log: true
- name: seems we use several container images, ensure all are up to date
containers.podman.podman_image:
name: "{{ item }}"
name: "{{ item.image }}"
force: true
username: "{{ container_image_user | default(omit) }}"
password: "{{ container_image_password | default(omit) }}"
username: "{{ item.user | default(container_image_user) | default(omit) }}"
password: "{{ item.password | default(container_image_password) | default(omit) }}"
become: true
become_user: "{{ container_run_as_user }}"
when: container_image_list is defined and container_image_list | length > 1
with_items: "{{ container_image_list }}"
when:
- _container_image_list | length > 1
- not (item.image | regex_search ('^localhost/.*'))
loop: "{{ _container_image_list }}"
no_log: true
- name: Include pod yaml templating
ansible.builtin.include_tasks: deploy_pod_yaml.yml
when:
- container_pod_yaml is defined
- container_pod_yaml_deploy
- name: if running pod, ensure configuration file exists
stat:
path: "{{ container_pod_yaml }}"
register: pod_file
when: container_pod_yaml is defined
- name: fail if pod configuration file is missing
fail:
msg: >
@@ -179,41 +172,32 @@
- container_run_as_user != "root"
- not user_lingering.stat.exists
- name: "create systemd service file for container: {{ container_name }}"
template:
src: systemd-service-single.j2
dest: "{{ service_files_dir }}/{{ service_name }}"
owner: root
group: root
mode: 0644
notify:
- reload systemctl
- start service
- enable service
register: service_file
when: container_image_list is defined and container_image_list | length == 1
- name: "create systemd service file for pod: {{ container_name }}"
template:
src: systemd-service-pod.j2
dest: "{{ service_files_dir }}/{{ service_name }}"
owner: root
group: root
mode: 0644
notify:
- reload systemctl
- start service
- enable service
register: service_file
when: container_image_list is defined and container_image_list | length > 1
- name: "ensure {{ service_name }} is restarted due config change"
debug: msg="config has changed:"
changed_when: true
notify: restart service
- name: Ensure volume directories exist for {{ container_name }}
file:
path: "{{ item }}"
owner: "{{ container_dir_owner | default(container_run_as_user) }}"
group: "{{ container_dir_group | default(container_run_as_group) }}"
mode: "{{ container_dir_mode | default(omit) }}"
state: directory
become: true
loop: "{{ container_run_args | regex_findall('-v ([^:]*)') }}"
when:
- service_file_before_template.stat.exists
- service_file.changed
- _container_image_list | length == 1
- container_run_args is defined and container_run_args | length > 0
- container_pod_yaml is undefined
- name: Create systemd service file for {{ container_name }}
template:
src: "{% if _container_image_list | length == 1 %}systemd-service-single.j2{% else %}systemd-service-pod.j2{% endif %}"
dest: "{{ service_files_dir }}/{{ service_name }}"
owner: "{{ service_files_owner }}"
group: "{{ service_files_group }}"
mode: "{{ service_files_mode }}"
become: true
notify:
- reload systemctl
- restart service
register: service_file
- name: ensure auto update is running for images
become: true
@@ -232,74 +216,58 @@
- name: configure firewall if container_firewall_ports is defined
block:
- name: set firewall ports state to enabled when container state is running
set_fact:
fw_state: enabled
when: container_state == "running"
- name: disable firewall ports state when container state is not running
set_fact:
fw_state: disabled
when: container_state != "running"
- name: ensure firewalld is installed
tags: firewall
package: name=firewalld state=present
become: true
when: ansible_pkg_mgr != "atomic_container"
- name: ensure firewalld is installed (on fedora-iot)
tags: firewall
command: >-
rpm-ostree install --idempotent --unchanged-exit-77
--allow-inactive firewalld
register: ostree
failed_when: not ( ostree.rc == 77 or ostree.rc == 0 )
changed_when: ostree.rc != 77
- name: Ensure firewalld is installed (rpm-ostree)
when: ansible_pkg_mgr == "atomic_container"
block:
- name: Ensure firewalld is installed (rpm-ostree)
tags: firewall
community.general.rpm_ostree_pkg:
name: firewalld
become: true
register: ostree
- name: reboot if new stuff was installed
reboot:
reboot_timeout: 300
when:
- ansible_pkg_mgr == "atomic_container"
- ostree.rc != 77
- name: Reboot if firewalld was installed
reboot:
reboot_timeout: 300
become: true
when: ostree is changed
- name: ensure firewall service is running
- name: Ensure firewall service is running
tags: firewall
service: name=firewalld state=started
service:
name: firewalld
state: started
become: true
- name: ensure container's exposed ports firewall state
- name: Ensure container's exposed ports firewall state
tags: firewall
ansible.posix.firewalld:
port: "{{ item }}"
permanent: true
immediate: true
state: "{{ fw_state }}"
state: "{% if container_state == 'running' %}enabled{% else %}disabled{% endif %}"
become: true
with_items: "{{ container_firewall_ports }}"
- name: Force all notified handlers to run at this point
meta: flush_handlers
when: container_firewall_ports is defined
- name: do cleanup stuff when container_state is "absent"
block:
- name: ensure "{{ service_name }}" is disabled at boot
become: true
become_user: "{{ container_run_as_user }}"
# become_method: machinectl
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
enabled: false
scope: "{{ systemd_scope }}"
when:
- service_file_before_template.stat.exists
- name: Check if service file exists
stat:
path: "{{ service_files_dir }}/{{ service_name }}"
register: service_file
- name: ensure "{{ service_name }}" is stopped
- name: Ensure "{{ service_name }}" is stopped and disabled at boot
become: true
become_user: "{{ container_run_as_user }}"
# become_method: machinectl
@@ -311,17 +279,15 @@
enabled: false
scope: "{{ systemd_scope }}"
when:
- service_file_before_template.stat.exists
- service_file.stat.exists
- name: clean up systemd service file
file:
path: "{{ service_files_dir }}/{{ service_name }}"
state: absent
become: true
notify: reload systemctl
- name: Force all notified handlers to run at this point
meta: flush_handlers
- name: Check if user is lingering
stat:
path: "/var/lib/systemd/linger/{{ container_run_as_user }}"
@@ -341,3 +307,6 @@
when: container_pod_yaml is defined
when: container_state == "absent"
- name: Force all notified handlers to run at this point
meta: flush_handlers

View File

@@ -29,7 +29,7 @@ User={{ container_run_as_user }}
ExecStart=/usr/bin/podman run --name {{ container_name }} \
{{ container_run_args }} \
--conmon-pidfile {{ pidfile }} --cidfile {{ cidfile }} \
{{ container_image_list | first }} {% if container_cmd_args is defined %} \
{{ _container_image_list | map(attribute='image') | first }} {% if container_cmd_args is defined %} \
{{ container_cmd_args }} {% endif %}
ExecStop=/usr/bin/sh -c "/usr/bin/podman stop -t "{{ container_stop_timeout }}" `cat {{ cidfile }}`"

View File

@@ -14,7 +14,13 @@
# connection: local
# delegate_to: localhost
vars:
container_state: running
# container_state: absent
container_instances:
- name: lighthttpd-1
port: 8080
- name: lighthttpd-2
port: 8081
tasks:
- name: create test dir for www file
file:
@@ -28,31 +34,34 @@
- name: tests container
vars:
container_state: running
# container_state: absent
container_image_list:
- sebp/lighttpd:latest
container_name: lighttpd
container_name: "{{ outer_item.name }}"
container_run_args: >-
--rm
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
-t
-p 8080:80/tcp
-p "{{ outer_item.port }}:80/tcp"
container_firewall_ports:
- 8080/tcp
- "{{ outer_item.port }}/tcp"
import_role:
ansible.builtin.include_role:
name: podman-container-systemd
loop: "{{ container_instances }}"
loop_control:
loop_var: outer_item
- name: Wait for lighttpd to come up
wait_for:
port: 8080
port: "{{ item.port }}"
loop: "{{ container_instances }}"
when: container_state == "running"
- name: test if container runs
get_url:
url: http://localhost:8080
url: "http://localhost:{{ item.port }}"
dest: /tmp/podman-container-systemd/index.return.html
loop: "{{ container_instances }}"
register: get_url
when: container_state == "running"
@@ -64,6 +73,9 @@
- debug:
msg:
- "Got http://localhost:8080 to test if it worked!"
- "This sould state 'file' on success: {{ get_url.state }}"
- "This should state 'file' on success: {{ get_url.results[idx].state }}"
- "On success, output should say 'Hello world!' here: {{ curl.stdout }}"
loop: "{{ container_instances }}"
loop_control:
index_var: idx
when: container_state == "running"

View File

@@ -4,3 +4,6 @@
cidpid_base: "{{ systemd_tempdir }}/%n-"
cidfile: "{{ cidpid_base }}cid"
pidfile: "{{ cidpid_base }}pid"
# kubeval
kubeval_url: "https://github.com/instrumenta/kubeval/releases/latest"