Update roles

This commit is contained in:
2021-04-20 12:14:42 -04:00
parent 8005080b8b
commit 595021d449
131 changed files with 4144 additions and 3018 deletions

View File

@@ -1,3 +1,5 @@
*.retry
*/__pycache__
*.pyc
.cache

View File

@@ -1,32 +0,0 @@
---
language: python
services: docker
env:
global:
- ROLE_NAME: gitlab
matrix:
- MOLECULE_DISTRO: centos7
- MOLECULE_DISTRO: ubuntu1804
- MOLECULE_DISTRO: debian9
- MOLECULE_DISTRO: centos7
MOLECULE_PLAYBOOK: playbook-version.yml
- MOLECULE_DISTRO: ubuntu1804
MOLECULE_PLAYBOOK: playbook-version.yml
install:
# Install test dependencies.
- pip install molecule yamllint ansible-lint docker
before_script:
# Use actual Ansible Galaxy role name for the project directory.
- cd ../
- mv ansible-role-$ROLE_NAME geerlingguy.$ROLE_NAME
- cd geerlingguy.$ROLE_NAME
script:
# Run tests.
- molecule test
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@@ -1,6 +1,10 @@
---
extends: default
rules:
line-length:
max: 140
max: 180
level: warning
ignore: |
.github/stale.yml

View File

@@ -1,6 +1,6 @@
# Ansible Role: GitLab
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-gitlab.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-gitlab)
[![CI](https://github.com/geerlingguy/ansible-role-gitlab/workflows/CI/badge.svg?event=push)](https://github.com/geerlingguy/ansible-role-gitlab/actions?query=workflow%3ACI)
Installs GitLab, a Ruby-based front-end to Git, on any RedHat/CentOS or Debian/Ubuntu linux system.
@@ -60,6 +60,17 @@ GitLab SSL configuration; tells GitLab to redirect normal http requests to https
Whether to create a self-signed certificate for serving GitLab over a secure connection. Set `gitlab_self_signed_cert_subj` according to your locality and organization.
### LetsEncrypt Configuration.
gitlab_letsencrypt_enable: "false"
gitlab_letsencrypt_contact_emails: ["gitlab@example.com"]
gitlab_letsencrypt_auto_renew_hour: 1
gitlab_letsencrypt_auto_renew_minute: 30
gitlab_letsencrypt_auto_renew_day_of_month: "*/7"
gitlab_letsencrypt_auto_renew: true
GitLab LetsEncrypt configuration; tells GitLab whether to request and use a certificate from LetsEncrypt, if `gitlab_letsencrypt_enable` is set to `"true"`. Multiple contact emails can be configured under `gitlab_letsencrypt_contact_emails` as a list.
# LDAP Configuration.
gitlab_ldap_enabled: "false"
gitlab_ldap_host: "example.com"

View File

@@ -73,3 +73,11 @@ gitlab_registry_enable: "false"
gitlab_registry_external_url: "https://gitlab.example.com:4567"
gitlab_registry_nginx_ssl_certificate: "/etc/gitlab/ssl/gitlab.crt"
gitlab_registry_nginx_ssl_certificate_key: "/etc/gitlab/ssl/gitlab.key"
# LetsEncrypt configuration.
gitlab_letsencrypt_enable: "false"
gitlab_letsencrypt_contact_emails: ["gitlab@example.com"]
gitlab_letsencrypt_auto_renew_hour: 1
gitlab_letsencrypt_auto_renew_minute: 30
gitlab_letsencrypt_auto_renew_day_of_month: "*/7"
gitlab_letsencrypt_auto_renew: true

View File

@@ -1,2 +1,2 @@
install_date: Wed Jun 24 18:44:32 2020
version: 3.0.0
install_date: Tue Apr 20 16:13:49 2021
version: 3.1.0

View File

@@ -2,6 +2,7 @@
dependencies: []
galaxy_info:
role_name: gitlab
author: geerlingguy
description: GitLab Git web interface
company: "Midwestern Mac, LLC"
@@ -10,8 +11,8 @@ galaxy_info:
platforms:
- name: EL
versions:
- 6
- 7
- 8
- name: Debian
versions:
- all

View File

@@ -3,10 +3,6 @@ dependency:
name: galaxy
driver:
name: docker
lint: |
set -e
yamllint .
ansible-lint
platforms:
- name: instance
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest"

View File

@@ -1,31 +0,0 @@
---
- name: Converge
hosts: all
become: true
vars:
gitlab_restart_handler_failed_when: false
pre_tasks:
- name: Update apt cache.
apt: update_cache=true cache_valid_time=600
when: ansible_os_family == 'Debian'
changed_when: false
- name: Remove the .dockerenv file so GitLab Omnibus doesn't get confused.
file:
path: /.dockerenv
state: absent
- name: Set the test GitLab version number for Debian.
set_fact:
gitlab_version: '11.4.0-ce.0'
when: ansible_os_family == 'Debian'
- name: Set the test GitLab version number for RedHat.
set_fact:
gitlab_version: '11.4.0-ce.0.el7'
when: ansible_os_family == 'RedHat'
roles:
- role: geerlingguy.gitlab

View File

@@ -19,6 +19,15 @@ nginx['redirect_http_to_https'] = {{ gitlab_redirect_http_to_https }}
nginx['ssl_certificate'] = "{{ gitlab_ssl_certificate }}"
nginx['ssl_certificate_key'] = "{{ gitlab_ssl_certificate_key }}"
letsencrypt['enable'] = "{{ gitlab_letsencrypt_enable }}"
{% if gitlab_letsencrypt_enable %}
letsencrypt['contact_emails'] = "{{ gitlab_letsencrypt_contact_emails | to_json }}"
letsencrypt['auto_renew_hour'] = "{{ gitlab_letsencrypt_auto_renew_hour }}"
letsencrypt['auto_renew_minute'] = "{{ gitlab_letsencrypt_auto_renew_minute }}"
letsencrypt['auto_renew_day_of_month'] = "{{ gitlab_letsencrypt_auto_renew_day_of_month }}"
letsencrypt['auto_renew'] = "{{ gitlab_letsencrypt_auto_renew }}"
{% endif %}
# The directory where Git repositories will be stored.
git_data_dirs({"default" => {"path" => "{{ gitlab_git_data_dir }}"} })
@@ -82,7 +91,7 @@ nginx['ssl_client_certificate'] = "{{ gitlab_nginx_ssl_client_certificate }}"
# GitLab registry.
registry['enable'] = {{ gitlab_registry_enable }}
{% if gitlab_registry_enable %}
{% if gitlab_registry_enable == "true" %}
registry_external_url "{{ gitlab_registry_external_url }}"
registry_nginx['ssl_certificate'] = "{{ gitlab_registry_nginx_ssl_certificate }}"
registry_nginx['ssl_certificate_key'] = "{{ gitlab_registry_nginx_ssl_certificate_key }}"

View File

@@ -1,2 +1,2 @@
install_date: Wed Jun 24 18:44:31 2020
install_date: Tue Apr 20 16:13:48 2021
version: 1.10.0

View File

@@ -1,2 +1,2 @@
install_date: Wed Jun 24 18:44:38 2020
install_date: Tue Apr 20 16:13:55 2021
version: master

View File

@@ -59,10 +59,12 @@ Role uses variables that are required to be passed while including it. As
there is option to run one container separately or multiple containers in pod,
note that some options apply only to other method.
- ```container_image``` - container image and tag, e.g. nextcloud:latest
This is used only if you run single container
- ```container_image_list``` - list of container images to run within a pod.
This is used only if you run containers in pod.
- ```container_image_list``` - list of container images to run.
If more than one image is defined, then the containers will be run in a pod.
- ```container_image_user``` - optional username to use when authenticating
to remote registries
- ```container_image_password``` - optional password to use when authenticating
to remote registries
- ```container_name``` - Identify the container in systemd and podman commands.
Systemd service file be named container_name--container-pod.service.
- ```container_run_args``` - Anything you pass to podman, except for the name
@@ -88,12 +90,14 @@ command line. See ```man podman``` or
[podman tutorials](https://github.com/containers/libpod/tree/master/docs/tutorials)
for info.
If you want your
[images to be automatically updated](http://docs.podman.io/en/latest/markdown/podman-auto-update.1.html),
add this label to container_cmd_args: ```--label "io.containers.autoupdate=image"```
Dependencies
------------
No dependencies.
* [containers.podman](https://galaxy.ansible.com/containers/podman) (collection)
Example Playbook
----------------
@@ -105,11 +109,13 @@ Root container:
```
- name: tests container
vars:
container_image: sebp/lighttpd:latest
container_image_list:
- sebp/lighttpd:latest
container_name: lighttpd
container_run_args: >-
--rm
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
--label "io.containers.autoupdate=image"
-p 8080:80
#container_state: absent
container_state: running
@@ -139,7 +145,8 @@ Rootless container:
vars:
container_run_as_user: rootless_user
container_run_as_group: rootless_user
container_image: sebp/lighttpd:latest
container_image_list:
- sebp/lighttpd:latest
container_name: lighttpd
container_run_args: >-
--rm

View File

@@ -3,14 +3,12 @@
# state can be running or absent
container_state: running
# systemd service name
service_name: "{{ container_name }}-container-pod.service"
# SystemD restart policy
# see man systemd.service for info
# by default we want to restart failed container
container_restart: on-failure
service_files_dir: /etc/systemd/system
systemd_scope: system
systemd_TimeoutStartSec: 15
systemd_RestartSec: 30
systemd_tempdir: "{{ '/tmp' if ansible_os_family == 'RedHat' and
@@ -19,5 +17,13 @@ container_run_as_user: root
container_run_as_group: root
container_stop_timeout: 15
# systemd service name
service_name: "{{ container_name }}-container-pod-{{ container_run_as_user }}.service"
# to sepped up you can disable always checking if podman is installed.
skip_podman_install: true
podman_dependencies_rootless:
- fuse-overlayfs
- slirp4netns
- uidmap

View File

@@ -1,15 +1,40 @@
---
- name: reload systemctl
become: true
become_user: "{{ container_run_as_user }}"
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
daemon_reload: yes
daemon_reload: true
scope: "{{ systemd_scope }}"
- name: start service
become: true
become_user: "{{ container_run_as_user }}"
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
scope: "{{ systemd_scope }}"
state: started
- name: restart service
become: true
become_user: "{{ container_run_as_user }}"
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
scope: "{{ systemd_scope }}"
state: restarted
- name: enable service
become: true
become_user: "{{ container_run_as_user }}"
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
enabled: true
scope: "{{ systemd_scope }}"

View File

@@ -1,2 +1,2 @@
install_date: Wed Jun 24 18:44:37 2020
version: master
install_date: Tue Apr 20 16:13:54 2021
version: 2.1.0

View File

@@ -7,6 +7,9 @@ galaxy_info:
license: GPLv3
min_ansible_version: 2.4
platforms:
- name: Debian
versions:
- buster
- name: EL
versions:
- 8
@@ -14,9 +17,26 @@ galaxy_info:
- name: Fedora
versions:
- all
- name: Ubuntu
versions:
- bionic
- disco
- eoan
- focal
galaxy_tags:
- podman
- container
- systemd
dependencies: []
dependencies:
- role: systemli.apt_repositories
vars:
apt_repositories:
- preset: kubic
when: >
(ansible_distribution == 'Debian' and
ansible_distribution_release == 'buster') or
ansible_distribution == 'Ubuntu'
collections:
- containers.podman

View File

@@ -1,5 +1,48 @@
---
- name: prepare rootless stuff if needed
block:
- name: get user information
user:
name: "{{ container_run_as_user }}"
check_mode: true
register: user_info
- name: set systemd dir if user is not root
set_fact:
service_files_dir: "{{ user_info.home }}/.config/systemd/user"
systemd_scope: user
changed_when: false
- name: ensure systemd files directory exists if user not root
file:
path: "{{ service_files_dir }}"
state: directory
owner: "{{ container_run_as_user }}"
group: "{{ container_run_as_group }}"
when: container_run_as_user != "root"
- name: "Find uid of user"
command: "id -u {{ container_run_as_user }}"
register: container_run_as_uid
check_mode: false # Run even in check mode, to avoid fail with --check.
changed_when: false
- name: set systemd runtime dir
set_fact:
xdg_runtime_dir: "/run/user/{{ container_run_as_uid.stdout }}"
changed_when: false
- name: set systemd scope to system if needed
set_fact:
systemd_scope: system
service_files_dir: '/etc/systemd/system'
xdg_runtime_dir: "/run/user/{{ container_run_as_uid.stdout }}"
when: container_run_as_user == "root"
changed_when: false
- name: check if service file exists already
stat:
path: "{{ service_files_dir }}/{{ service_name }}"
@@ -8,99 +51,95 @@
- name: do tasks when "{{ service_name }}" state is "running"
block:
- name: Check for user namespace support in kernel
stat:
path: /proc/sys/kernel/unprivileged_userns_clone
register: unprivileged_userns_clone
changed_when: false
- name: Allow unprivileged users on Debian
sysctl:
name: kernel.unprivileged_userns_clone
value: '1'
state: present
sysctl_file: /etc/sysctl.d/userns.conf
sysctl_set: true
when:
- ansible_distribution == 'Debian'
- unprivileged_userns_clone.stat.exists
- name: Install rootless dependencies on Debian-based
package:
name: "{{ podman_dependencies_rootless }}"
state: present
when:
- ansible_os_family == 'Debian'
- container_run_as_user != 'root'
- name: ensure podman is installed
package:
name: podman
state: installed
state: present
when: not skip_podman_install
- name: check user exists
user:
name: "{{ container_run_as_user }}"
- name: check if user is in subuid file
lineinfile:
line: '\1'
path: /etc/subuid
regexp: "^({{ container_run_as_user }}:.*)"
backrefs: yes
check_mode: yes
register: uid_has
ignore_errors: true
when: container_run_as_user != 'root'
- name: check if group is in subgid file
lineinfile:
line: '\1'
path: /etc/subgid
regexp: "^({{ container_run_as_group }}:.*)"
backrefs: yes
check_mode: yes
register: gid_has
ignore_errors: true
when: container_run_as_group != 'root'
- name: ensure user is in subuid file, if it was missing
lineinfile:
path: /etc/subuid
regexp: "^{{ container_run_as_user }}:.*"
line: "{{ container_run_as_user }}:305536:65536"
create: yes
mode: '0644'
owner: root
group: root
when: uid_has.changed and container_run_as_user != 'root'
- name: ensure group is in subgid file, if it was missing
lineinfile:
path: /etc/subgid
regexp: "^{{ container_run_as_group }}:.*"
line: "{{ container_run_as_group }}:305536:65536"
create: yes
mode: '0644'
owner: root
group: root
when: gid_has.changed and container_run_as_group != 'root'
- name: Check subuid & subgid
import_tasks: check_subid.yml
- name: running single container, get image Id if it exists and we are root
# XXX podman doesn't work through sudo for non root users, so skip preload if user
# XXX podman doesn't work through sudo for non root users,
# so skip preload if user
# https://github.com/containers/libpod/issues/5570
# command: podman inspect -f {{.Id}} "{{ container_image }}"
command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ container_image }}"
command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ item }}"
register: pre_pull_id
ignore_errors: yes
when: container_image is defined and container_run_as_user == 'root'
ignore_errors: true
when:
- container_image_list is defined
- container_image_list | length == 1
- container_run_as_user == 'root'
with_items: "{{ container_image_list }}"
- name: running single container, ensure we have up to date container image
command: "podman pull {{ container_image }}"
become: yes
containers.podman.podman_image:
name: "{{ item }}"
force: true
username: "{{ container_image_user | default(omit) }}"
password: "{{ container_image_password | default(omit) }}"
notify: restart service
become: true
become_user: "{{ container_run_as_user }}"
when: container_image is defined and container_run_as_user == 'root'
when:
- container_image_list is defined
- container_image_list | length == 1
- container_run_as_user == 'root'
with_items: "{{ container_image_list }}"
- name: running single container, get image Id if it exists
command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ container_image }}"
become: yes
command:
"podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ item }}"
become: true
become_user: "{{ container_run_as_user }}"
register: post_pull_id
when: container_image is defined and container_run_as_user == 'root'
- name: force restart after image change
debug: msg="image has changed"
changed_when: True
notify: restart service
ignore_errors: true
when:
- container_image_list is defined
- container_image_list | length == 1
- container_run_as_user == 'root'
- container_image is defined
- pre_pull_id.stdout != post_pull_id.stdout
- pre_pull_id is succeeded
# XXX remove above comparison if future podman tells image changed.
with_items: "{{ container_image_list }}"
- name: seems we use several container images, ensure all are up to date
command: "podman pull {{ item }}"
become: yes
containers.podman.podman_image:
name: "{{ item }}"
force: true
username: "{{ container_image_user | default(omit) }}"
password: "{{ container_image_password | default(omit) }}"
become: true
become_user: "{{ container_run_as_user }}"
when: container_image_list is defined
when: container_image_list is defined and container_image_list | length > 1
with_items: "{{ container_image_list }}"
- name: if running pod, ensure configuration file exists
@@ -110,11 +149,25 @@
when: container_pod_yaml is defined
- name: fail if pod configuration file is missing
fail:
msg: "Error: Asking to run pod, but pod definition yaml file is missing: {{ container_pod_yaml }}"
msg: >
"Error: Asking to run pod, but pod definition yaml file is missing: "
"{{ container_pod_yaml }}"
when:
- container_pod_yaml is defined
- not pod_file.stat.exists
- name: Check if user is lingering
stat:
path: "/var/lib/systemd/linger/{{ container_run_as_user }}"
register: user_lingering
when: container_run_as_user != "root"
- name: Enable lingering is needed
command: "loginctl enable-linger {{ container_run_as_user }}"
when:
- container_run_as_user != "root"
- not user_lingering.stat.exists
- name: "create systemd service file for container: {{ container_name }}"
template:
src: systemd-service-single.j2
@@ -122,9 +175,12 @@
owner: root
group: root
mode: 0644
notify: reload systemctl
notify:
- reload systemctl
- start service
- enable service
register: service_file
when: container_image is defined
when: container_image_list is defined and container_image_list | length == 1
- name: "create systemd service file for pod: {{ container_name }}"
template:
@@ -136,24 +192,13 @@
notify:
- reload systemctl
- start service
- enable service
register: service_file
when: container_image_list is defined
- name: ensure "{{ service_name }}" is enabled at boot, and systemd reloaded
systemd:
name: "{{ service_name }}"
enabled: yes
daemon_reload: yes
- name: ensure "{{ service_name }}" is running
service:
name: "{{ service_name }}"
state: started
when: not service_file_before_template.stat.exists
when: container_image_list is defined and container_image_list | length > 1
- name: "ensure {{ service_name }} is restarted due config change"
debug: msg="config has changed:"
changed_when: True
changed_when: true
notify: restart service
when:
- service_file_before_template.stat.exists
@@ -169,14 +214,32 @@
fw_state: enabled
when: container_state == "running"
- name: set firewall ports state to disabled when container state is not running
- name: disable firewall ports state when container state is not running
set_fact:
fw_state: disabled
when: container_state != "running"
- name: ensure firewalld is installed
tags: firewall
package: name=firewalld state=installed
package: name=firewalld state=present
when: ansible_pkg_mgr != "atomic_container"
- name: ensure firewalld is installed (on fedora-iot)
tags: firewall
command: >-
rpm-ostree install --idempotent --unchanged-exit-77
--allow-inactive firewalld
register: ostree
failed_when: not ( ostree.rc == 77 or ostree.rc == 0 )
changed_when: ostree.rc != 77
when: ansible_pkg_mgr == "atomic_container"
- name: reboot if new stuff was installed
reboot:
reboot_timeout: 300
when:
- ansible_pkg_mgr == "atomic_container"
- ostree.rc != 77
- name: ensure firewall service is running
tags: firewall
@@ -186,11 +249,14 @@
tags: firewall
firewalld:
port: "{{ item }}"
permanent: yes
immediate: yes
permanent: true
immediate: true
state: "{{ fw_state }}"
with_items: "{{ container_firewall_ports }}"
- name: Force all notified handlers to run at this point
meta: flush_handlers
when: container_firewall_ports is defined
@@ -198,17 +264,29 @@
block:
- name: ensure "{{ service_name }}" is disabled at boot
service:
become: true
become_user: "{{ container_run_as_user }}"
# become_method: machinectl
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
enabled: false
scope: "{{ systemd_scope }}"
when:
- service_file_before_template.stat.exists
- name: ensure "{{ service_name }}" is stopped
service:
become: true
become_user: "{{ container_run_as_user }}"
# become_method: machinectl
environment:
XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
systemd:
name: "{{ service_name }}"
state: stopped
enabled: no
enabled: false
scope: "{{ systemd_scope }}"
when:
- service_file_before_template.stat.exists
@@ -218,6 +296,21 @@
state: absent
notify: reload systemctl
- name: Force all notified handlers to run at this point
meta: flush_handlers
- name: Check if user is lingering
stat:
path: "/var/lib/systemd/linger/{{ container_run_as_user }}"
register: user_lingering
when: container_run_as_user != "root"
- name: Disable lingering (are we sure we want to do this always?)
command: "loginctl disable-linger {{ container_run_as_user }}"
when:
- container_run_as_user != "root"
- user_lingering.stat.exists
- name: clean up pod configuration file
file:
path: "{{ container_pod_yaml }}"

View File

@@ -6,7 +6,9 @@ After=network.target
Type=forking
TimeoutStartSec={{ systemd_TimeoutStartSec }}
ExecStartPre=-/usr/bin/podman pod rm -f {{ container_name }}
{% if container_run_as_user == 'root' %}
User={{ container_run_as_user }}
{% endif %}
RemainAfterExit=yes
ExecStart=/usr/bin/podman play kube {{ container_pod_yaml }}
@@ -18,4 +20,9 @@ Restart={{ container_restart }}
RestartSec={{ systemd_RestartSec }}
[Install]
{% if container_run_as_user == 'root' %}
WantedBy=multi-user.target
{% endif %}
{% if container_run_as_user != 'root' %}
WantedBy=default.target
{% endif %}

View File

@@ -6,20 +6,27 @@ After=network.target
Type=simple
TimeoutStartSec={{ systemd_TimeoutStartSec }}
ExecStartPre=-/usr/bin/rm -f {{ pidfile }} {{ cidfile }}
{% if container_run_as_user == 'root' %}
User={{ container_run_as_user }}
{% endif %}
ExecStart=/usr/bin/podman run --name {{ container_name }} \
{{ container_run_args }} \
--conmon-pidfile {{ pidfile }} --cidfile {{ cidfile }} \
{{ container_image }} {% if container_cmd_args is defined %} \
{{ container_image_list | first }} {% if container_cmd_args is defined %} \
{{ container_cmd_args }} {% endif %}
ExecStop=/usr/bin/sh -c "/usr/bin/podman stop -t "{{ container_stop_timeout }}" `cat {{ cidfile }}`"
ExecStop=/usr/bin/sh -c "/usr/bin/podman rm -f `cat {{ cidfile }}`"
Restart={{ container_restart }}
RestartSec={{ systemd_RestartSec }}
KillMode=none
KillMode=mixed
PIDFile={{ pidfile }}
[Install]
{% if container_run_as_user == 'root' %}
WantedBy=multi-user.target
{% endif %}
{% if container_run_as_user != 'root' %}
WantedBy=default.target
{% endif %}

View File

@@ -1,10 +1,14 @@
---
# yamllint disable rule:line-length
# I run this file with following line to test against my Vagrant Fedora:
# ansible-playbook --vault-password-file .vault-password -b -i \
# ~/vagrant/fedora/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory \
# -e ansible_python_interpreter=/usr/bin/python3 \
# -e container_state=running test-podman.yml
# yamllint enable rule:line-length
- name: create lighttpd pod
hosts: all
# connection: local
@@ -25,12 +29,14 @@
- name: tests container
vars:
container_state: running
#container_state: absent
container_image: sebp/lighttpd:latest
# container_state: absent
container_image_list:
- sebp/lighttpd:latest
container_name: lighttpd
container_run_args: >-
--rm
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
-t
-p 8080:80/tcp
container_firewall_ports:
- 8080/tcp

View File

@@ -1,7 +1,6 @@
---
# systemd service name
service_name: "{{ container_name }}-container-pod.service"
cidpid_base: "{{ systemd_tempdir }}/%n-"
cidfile: "{{ cidpid_base }}cid"
pidfile: "{{ cidpid_base }}pid"

View File

@@ -9,3 +9,5 @@
/tests/tmp_merge_coveragerc
/tests/total-*coveragedata
/.tox
/.vagrant
/.vscode

View File

@@ -1,27 +0,0 @@
---
dist: xenial
language: python
matrix:
include:
- python: 2.6
dist: trusty
- python: 2.7
- python: 3.5
env: aptpkgs=python3-selinux
- python: 3.6
- python: 3.7
- python: 3.7-dev
- python: 3.8-dev
# - python: nightly
services:
- docker
before_install:
- if [ -n "${aptpkgs}" ]; then sudo apt-get install -y python3-selinux; fi
install:
- pip install tox tox-travis
script:
- tox

View File

@@ -1,8 +1,10 @@
linux-system-roles/network
==========================
[![Coverage Status](https://coveralls.io/repos/github/linux-system-roles/network/badge.svg)](https://coveralls.io/github/linux-system-roles/network)
[![Travis Build Status](https://travis-ci.org/linux-system-roles/network.svg?branch=master)](https://travis-ci.org/linux-system-roles/network)
![CI Testing](https://github.com/linux-system-roles/network/workflows/tox/badge.svg)
[![Code Style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/linux-system-roles/network.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/linux-system-roles/network/context:python)
Overview
--------
@@ -13,13 +15,16 @@ This role can be used to configure:
- Ethernet interfaces
- Bridge interfaces
- Bonded interfaces
- VLAN interfaces
- VLAN interfaces
- MacVLAN interfaces
- Infiniband interfaces
- Wireless (WiFi) interfaces
- IP configuration
- 802.1x authentication
Introduction
------------
The `network` role supports two providers: `nm` and `initscripts`. `nm` is
used by default in RHEL7 and `initscripts` in RHEL6. These providers can be
configured per host via the [`network_provider`](#provider) variable. In
@@ -32,19 +37,20 @@ For `initscripts`, the legacy network service is required as used in Fedora or R
For each host a list of networking profiles can be configured via the
`network_connections` variable.
- For `initscripts`, profiles correspond to ifcfg files in the `/etc/sysconfig/network-scripts/ifcfg-*` directory.
- For `initscripts`, profiles correspond to ifcfg files in the
`/etc/sysconfig/network-scripts/ifcfg-*` directory.
- For `NetworkManager`, profiles correspond to connection profiles as handled by
NetworkManager. Fedora and RHEL use the `ifcfg-rh-plugin` for NetworkManager,
which also writes or reads configuration files to `/etc/sysconfig/network-scripts/ifcfg-*`
for compatibility.
Note that the `network` role primarily operates on networking profiles (connections) and
not on devices, but it uses the profile name by default as the interface name.
It is also possible to create generic profiles, by creating for example a
profile with a certain IP configuration without activating the profile. To
apply the configuration to the actual networking interface, use the `nmcli`
commands on the target system.
Note that the `network` role primarily operates on networking profiles
(connections) and not on devices, but it uses the profile name by default as
the interface name. It is also possible to create generic profiles, by creating
for example a profile with a certain IP configuration without activating the
profile. To apply the configuration to the actual networking interface, use the
`nmcli` commands on the target system.
**Warning**: The `network` role updates or creates all connection profiles on
the target system as specified in the `network_connections` variable. Therefore,
@@ -54,17 +60,25 @@ Exceptions are mentioned below.
Variables
---------
The `network` role is configured via variables starting with `network_` as the name prefix.
List of variables:
* `network_provider` - The `network_provider` variable allows to set a specific
provider (`nm` or `initscripts`) . Setting it to `{{ network_provider_os_default }}`,
the provider is set depending on the operating system. This is usually `nm`
except for RHEL 6 or CentOS 6 systems.
* `network_connections` - The connection profiles are configured as `network_connections`,
which is a list of dictionaries that include specific options.
The `network` role is configured via variables starting with `network_` as
the name prefix. List of variables:
- `network_provider` - The `network_provider` variable allows to set a specific
provider (`nm` or `initscripts`) . Setting it to `{{
network_provider_os_default }}`, the provider is set depending on the
operating system. This is usually `nm` except for RHEL 6 or CentOS 6 systems.
Changing the provider for an existing profile is not supported. To switch
providers, it is recommended to first remove profiles with the old provider
and then create new profiles with the new provider.
- `network_connections` - The connection profiles are configured as
`network_connections`, which is a list of dictionaries that include specific
options.
- `network_allow_restart` - Certain configurations require the role to restart
network services. For example, if a wireless connection is configured and
NetworkManager-wifi is not installed, NetworkManager must be restarted prior
to the connection being configured. Setting this to `no` will prevent the
role from restarting network service.
Examples of Variables
---------------------
@@ -76,12 +90,14 @@ network_provider: nm
network_connections:
- name: eth0
#...
network_allow_restart: yes
```
Options
-------
The `network_connections` variable is a list of dictionaries that include the following options.
List of options:
The `network_connections` variable is a list of dictionaries that include the
following options. List of options:
### `name` (required)
@@ -92,38 +108,42 @@ Note that you can have multiple profiles for the same device, but only
one profile can be active on the device each time.
For NetworkManager, a connection can only be active at one device each time.
* For `NetworkManager`, the `name` option corresponds to the
- For `NetworkManager`, the `name` option corresponds to the
[`connection.id`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.connection.id)
property option.
Although NetworkManager supports multiple connections with the same `connection.id`,
the `network` role cannot handle a duplicate `name`. Specifying a `name` multiple
times refers to the same connection profile.
* For `initscripts`, the `name` option determines the ifcfg file name `/etc/sysconfig/network-scripts/ifcfg-$NAME`.
- For `initscripts`, the `name` option determines the ifcfg file name `/etc/sysconfig/network-scripts/ifcfg-$NAME`.
Note that the `name` does not specify the `DEVICE` but a filename. As a consequence,
`'/'` is not a valid character for the `name`.
You can also use the same connection profile multiple times. Therefore, it is possible to create a profile and activate it separately.
You can also use the same connection profile multiple times. Therefore, it is possible
to create a profile and activate it separately.
### `state`
The `state` option identifies what is the runtime state of each connection profile. The `state` option (optional) can be set to the following values:
The `state` option identifies what is the runtime state of each connection profile. The
`state` option (optional) can be set to the following values:
* `up` - the connection profile is activated
* `down` - the connection profile is deactivated
- `up` - the connection profile is activated
- `down` - the connection profile is deactivated
#### `state: up`
- For `NetworkManager`, this corresponds to `nmcli connection id {{name}} up`.
- For `initscripts`, this corresponds to `ifup {{name}}`.
When the `state` option is set to `up`, you can also specify the `wait` option (optional):
* `wait: 0` - initiates only the activation, but does not wait until the device is fully connected.
The connection will be completed in the background, for example after a DHCP lease was received.
* `wait: <seconds>` is a timeout that enables you to decide how long you give the device to
activate. The default is using a suitable timeout. Note that the `wait` option is
only supported by NetworkManager.
- `wait: 0` - initiates only the activation, but does not wait until the device is fully
connected. The connection will be completed in the background, for example after a
DHCP lease was received.
- `wait: <seconds>` is a timeout that enables you to decide how long you give the device
to activate. The default is using a suitable timeout. Note that the `wait` option is
only supported by NetworkManager.
Note that `state: up` always re-activates the profile and possibly changes the
networking configuration, even if the profile was already active before. As
@@ -135,14 +155,16 @@ a consequence, `state: up` always changes the system.
- For `initscripts`, it corresponds to call `ifdown {{name}}`.
You can deactivate a connection profile, even if is currently not active. As a consequence, `state: down` always changes the system.
Note that if the `state` option is unset, the connection profiles runtime state will not be changed.
You can deactivate a connection profile, even if is currently not active. As a
consequence, `state: down` always changes the system.
Note that if the `state` option is unset, the connection profiles runtime state will
not be changed.
### `persistent_state`
The `persistent_state` option identifies if a connection profile is persistent (saved on disk). The `persistent_state` option can be set to the following values:
The `persistent_state` option identifies if a connection profile is persistent (saved on
disk). The `persistent_state` option can be set to the following values:
#### `persistent_state: present` (default)
@@ -161,29 +183,31 @@ profile on a currently disconnected device. ([rh#1401515](https://bugzilla.redha
The `absent` value ensures that the profile is not present on the
target host. If a profile with the given `name` exists, it will be deleted. In this case:
- `NetworkManager` deletes all connection profiles with the corresponding `connection.id`.
Deleting a profile usually does not change the current networking configuration, unless
the profile was currently activated on a device. Deleting the currently
active connection profile disconnects the device. That makes the device eligible
to autoconnect another connection (for more details, see [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515)).
- `NetworkManager` deletes all connection profiles with the corresponding
`connection.id`. Deleting a profile usually does not change the current networking
configuration, unless the profile was currently activated on a device. Deleting the
currently active connection profile disconnects the device. That makes the device
eligible to autoconnect another connection (for more details, see
[rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515)).
- `initscripts` deletes the ifcfg file in most cases with no impact on the runtime state of the system unless some component is watching the sysconfig directory.
- `initscripts` deletes the ifcfg file in most cases with no impact on the runtime state
of the system unless some component is watching the sysconfig directory.
**Note**: For profiles that only contain a `state` option, the `network` role only activates
or deactivates the connection without changing its configuration.
### `type`
The `type` option can be set to the following values:
- `ethernet`
- `bridge`
- `bond`
- `team`
- `vlan`
- `macvlan`
- `infiniband`
- `ethernet`
- `bridge`
- `bond`
- `team`
- `vlan`
- `macvlan`
- `infiniband`
- `wireless`
#### `type: ethernet`
@@ -191,44 +215,65 @@ If the type is `ethernet`, then there can be an extra `ethernet` dictionary with
items (options): `autoneg`, `speed` and `duplex`, which correspond to the
settings of the `ethtool` utility with the same name.
* `autoneg`: `yes` (default) or `no` [if auto-negotiation is enabled or disabled]
* `speed`: speed in Mbit/s
* `duplex`: `half` or `full`
- `autoneg`: `yes` (default) or `no` [if auto-negotiation is enabled or disabled]
- `speed`: speed in Mbit/s
- `duplex`: `half` or `full`
Note that the `speed` and `duplex` link settings are required when autonegotiation is disabled (autoneg:no).
Note that the `speed` and `duplex` link settings are required when autonegotiation is
disabled (`autoneg: no`).
#### `type: bridge`, `type: bond`, `type: team`
The `bridge`, `bond`, `team` device types work similar. Note that `team` is not supported in RHEL6 kernels.
The `bridge`, `bond`, `team` device types work similar. Note that `team` is not
supported in RHEL6 kernels.
For slaves, the `slave_type` and `master` properties must be set. Note that slaves should not have `ip` settings.
For ports, the `port_type` and `controller` properties must be set. Note that ports
should not have `ip` settings.
The `master` refers to the `name` of a profile in the Ansible
The `controller` refers to the `name` of a profile in the Ansible
playbook. It is neither an interface-name nor a connection-id of
NetworkManager.
- For NetworkManager, `master` will be converted to the `connection.uuid`
- For NetworkManager, `controller` will be converted to the `connection.uuid`
of the corresponding profile.
- For initscripts, the master is looked up as the `DEVICE` from the corresponding
- For initscripts, the controller is looked up as the `DEVICE` from the corresponding
ifcfg file.
As `master` refers to other profiles of the same or another play,
the order of the `connections` list matters. Also, `--check` ignores
the value of the `master` and assumes it will be present during a real
run. That means, in presence of an invalid `master`, `--check` may
signal success but the actual play run fails.
As `controller` refers to other profiles of the same or another play, the order of the
`connections` list matters. Profiles that are referenced by other profiles need to be
specified first. Also, `--check` ignores the value of the `controller` and assumes it
will be present during a real run. That means, in presence of an invalid `controller`,
`--check` may signal success but the actual play run fails.
The `team` type uses `roundrobin` as the `runner` configuration. No further
configuration is supported at the moment.
#### `type: vlan`
Similar to `master`, the `parent` references the connection profile in the ansible
Similar to `controller`, the `parent` references the connection profile in the ansible
role.
#### `type: macvlan`
Similar to `master` and `vlan`, the `parent` references the connection profile in the ansible
role.
Similar to `controller` and `vlan`, the `parent` references the connection profile in
the ansible role.
#### `type: wireless`
The `wireless` type supports WPA-PSK (password) authentication and WPA-EAP (802.1x)
authentication.
`nm` (NetworkManager) is the only supported `network_provider` for this type.
If WPA-EAP is used, ieee802_1x settings must be defined in the
[ieee802_1x](#-`ieee802_1x`) option.
The following options are supported:
- `ssid`: the SSID of the wireless network (required)
- `key_mgmt`: `wpa-psk` or `wpa-eap` (required)
- `password`: password for the network (required if `wpa-psk` is used)
### `autoconnect`
@@ -243,19 +288,28 @@ By default, profiles are created with autoconnect enabled.
The `mac` address is optional and restricts the profile to be usable only on
devices with the given MAC address. `mac` is only allowed for `type`
`ethernet` or `infiniband` to match a non-virtual device with the
profile.
profile. The value of the `mac` address needs to be specified in hexadecimal notation
using colons (for example: `mac: "00:00:5e:00:53:5d"`). To avoid YAML parsing mac
addresses as integers in sexagesimal (base 60) notation (see
<https://yaml.org/spec/1.1/#id858600>), it is recommended to always quote the value
with double quotes and sometimes it is necessary.
- For `NetworkManager`, `mac` is the permanent MAC address, `ethernet.mac-address`.
- For `initscripts`, `mac` is the currently configured MAC address of the device (`HWADDR`).
### `mtu`
The `mtu` option denotes the maximum transmission unit for the profile's
device. The maximum value depends on the device. For virtual devices, the
maximum value of the `mtu` option depends on the underlying device.
### `interface_name`
For the `ethernet` and `infiniband` types, the `interface_name` option restricts the profile to
the given interface by name. This argument is optional and by default the
profile name is used unless a mac address is specified using the `mac` key.
Specifying an empty string (`""`) means that the profile is not
restricted to a network interface.
For the `ethernet` and `infiniband` types, the `interface_name` option restricts the
profile to the given interface by name. This argument is optional and by default the
profile name is used unless a mac address is specified using the `mac` key. Specifying
an empty string (`""`) means that the profile is not restricted to a network interface.
**Note:** With [persistent interface naming](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/ch-Consistent_Network_Device_Naming.html),
the interface is predictable based on the hardware configuration.
@@ -271,26 +325,25 @@ different or the profile may not be tied to an interface at all.
The `zone` option sets the firewalld zone for the interface.
Slaves to the bridge, bond or team devices cannot specify a zone.
Ports to the bridge, bond or team devices cannot specify a zone.
### `ip`
The IP configuration supports the following options:
* `address`
- `address`
Manual addressing can be specified via a list of addresses under the `address` option.
* `dhcp4` and `auto6`
- `dhcp4`, `auto6`, and `ipv6_disabled`
Also, manual addressing can be specified by setting either `dhcp4` or `auto6`.
The `dhcp4` key is for DHCPv4 and `auto6` for StateLess Address Auto Configuration
(SLAAC). Note that the `dhcp4` and `auto6` keys can be omitted and the default key
depends on the presence of manual addresses.
depends on the presence of manual addresses. `ipv6_disabled` can be set to disable
ipv6 for the connection.
* `dhcp4_send_hostname`
- `dhcp4_send_hostname`
If `dhcp4` is enabled, it can be configured whether the DHCPv4 request includes
the hostname via the `dhcp4_send_hostname` option. Note that `dhcp4_send_hostname`
@@ -298,110 +351,252 @@ The IP configuration supports the following options:
[`ipv4.dhcp-send-hostname`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv4.dhcp-send-hostname)
property.
* `dns` and `dns_search`
- `dns`
Manual DNS configuration can be specified via a list of addresses
given in the `dns` option and a list of domains to search given in the
`dns_search` option.
Manual DNS configuration can be specified via a list of addresses given in the
`dns` option.
- `dns_search`
* `route_metric4` and `route_metric6`
`dns_search` is only supported for IPv4 nameservers. Manual DNS configuration can
be specified via a list of domains to search given in the `dns_search` option.
- For `NetworkManager`, `route_metric4` and `route_metric6` corresponds to the
[`ipv4.route-metric`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv4.route-metric) and
- `dns_options`
`dns_options` is only supported for the NetworkManager provider and IPv4
nameservers. Manual DNS configuration via a list of DNS options can be given in the
`dns_options`. The list of supported DNS options for IPv4 nameservers is described
in [man 5 resolv.conf](https://man7.org/linux/man-pages/man5/resolv.conf.5.html).
Currently, the list of supported DNS options is:
- `attempts:n`
- `debug`
- `edns0`
- `ndots:n`
- `no-check-names`
- `no-reload`
- `no-tld-query`
- `rotate`
- `single-request`
- `single-request-reopen`
- `timeout:n`
- `trust-ad`
- `use-vc`
**Note:** The "trust-ad" setting is only honored if the profile contributes name
servers to resolv.conf, and if all contributing profiles have "trust-ad" enabled.
When using a caching DNS plugin (dnsmasq or systemd-resolved in NetworkManager.conf)
then "edns0" and "trust-ad" are automatically added.
- `route_metric4` and `route_metric6`
For `NetworkManager`, `route_metric4` and `route_metric6` corresponds to the
[`ipv4.route-metric`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv4.route-metric)
and
[`ipv6.route-metric`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv6.route-metric)
properties, respectively. If specified, it determines the route metric for DHCP
assigned routes and the default route, and thus the priority for multiple interfaces.
assigned routes and the default route, and thus the priority for multiple
interfaces.
* `route`
- `route`
Static route configuration can be specified via a list of routes given in the `route`
option. The default value is an empty list. Each route is a dictionary with the following
entries: `network`, `prefix`, `gateway` and `metric`. `network` and `prefix` specify
the destination network.
Note that Classless inter-domain routing (CIDR) notation or network mask notation are not supported yet.
Static route configuration can be specified via a list of routes given in the
`route` option. The default value is an empty list. Each route is a dictionary with
the following entries: `network`, `prefix`, `gateway` and `metric`. `network` and
`prefix` specify the destination network.
Note that Classless inter-domain routing (CIDR) notation or network mask notation
are not supported yet.
* `route_append_only`
- `route_append_only`
The `route_append_only` option allows only to add new routes to the
existing routes on the system.
If the `route_append_only` boolean option is set to `yes`, the specified routes are appended to the existing routes.
If `route_append_only` is set to `no` (default), the current routes are replaced.
Note that setting `route_append_only` to `yes` without setting `route` has the effect of preserving the current static routes.
If the `route_append_only` boolean option is set to `yes`, the specified routes are
appended to the existing routes. If `route_append_only` is set to `no` (default),
the current routes are replaced. Note that setting `route_append_only` to `yes`
without setting `route` has the effect of preserving the current static routes.
* `rule_append_only`
- `rule_append_only`
The `rule_append_only` boolean option allows to preserve the current routing rules.
Note that specifying routing rules is not supported yet.
**Note:** When `route_append_only` or `rule_append_only` is not specified, the `network` role deletes the current routes or routing rules.
**Note:** When `route_append_only` or `rule_append_only` is not specified, the network
role deletes the current routes or routing rules.
**Note:** Slaves to the bridge, bond or team devices cannot specify `ip` settings.
**Note:** Ports to the bridge, bond or team devices cannot specify `ip` settings.
### `ethtool`
The ethtool settings allow to enable or disable varios features. The names
The ethtool settings allow to enable or disable various features. The names
correspond to the names used by the `ethtool` utility. Depending on the actual
kernel and device, changing some features might not be supported.
kernel and device, changing some options might not be supported.
```yaml
ethtool:
features:
esp-hw-offload: yes|no # optional
esp-tx-csum-hw-offload: yes|no # optional
fcoe-mtu: yes|no # optional
esp_hw_offload: yes|no # optional
esp_tx_csum_hw_offload: yes|no # optional
fcoe_mtu: yes|no # optional
gro: yes|no # optional
gso: yes|no # optional
highdma: yes|no # optional
hw-tc-offload: yes|no # optional
l2-fwd-offload: yes|no # optional
hw_tc_offload: yes|no # optional
l2_fwd_offload: yes|no # optional
loopback: yes|no # optional
lro: yes|no # optional
ntuple: yes|no # optional
rx: yes|no # optional
rx-all: yes|no # optional
rx-fcs: yes|no # optional
rx-gro-hw: yes|no # optional
rx-udp_tunnel-port-offload: yes|no # optional
rx-vlan-filter: yes|no # optional
rx-vlan-stag-filter: yes|no # optional
rx-vlan-stag-hw-parse: yes|no # optional
rx_all: yes|no # optional
rx_fcs: yes|no # optional
rx_gro_hw: yes|no # optional
rx_udp_tunnel_port_offload: yes|no # optional
rx_vlan_filter: yes|no # optional
rx_vlan_stag_filter: yes|no # optional
rx_vlan_stag_hw_parse: yes|no # optional
rxhash: yes|no # optional
rxvlan: yes|no # optional
sg: yes|no # optional
tls-hw-record: yes|no # optional
tls-hw-tx-offload: yes|no # optional
tls_hw_record: yes|no # optional
tls_hw_tx_offload: yes|no # optional
tso: yes|no # optional
tx: yes|no # optional
tx-checksum-fcoe-crc: yes|no # optional
tx-checksum-ip-generic: yes|no # optional
tx-checksum-ipv4: yes|no # optional
tx-checksum-ipv6: yes|no # optional
tx-checksum-sctp: yes|no # optional
tx-esp-segmentation: yes|no # optional
tx-fcoe-segmentation: yes|no # optional
tx-gre-csum-segmentation: yes|no # optional
tx-gre-segmentation: yes|no # optional
tx-gso-partial: yes|no # optional
tx-gso-robust: yes|no # optional
tx-ipxip4-segmentation: yes|no # optional
tx-ipxip6-segmentation: yes|no # optional
tx-nocache-copy: yes|no # optional
tx-scatter-gather: yes|no # optional
tx-scatter-gather-fraglist: yes|no # optional
tx-sctp-segmentation: yes|no # optional
tx-tcp-ecn-segmentation: yes|no # optional
tx-tcp-mangleid-segmentation: yes|no # optional
tx-tcp-segmentation: yes|no # optional
tx-tcp6-segmentation: yes|no # optional
tx-udp-segmentation: yes|no # optional
tx-udp_tnl-csum-segmentation: yes|no # optional
tx-udp_tnl-segmentation: yes|no # optional
tx-vlan-stag-hw-insert: yes|no # optional
tx_checksum_fcoe_crc: yes|no # optional
tx_checksum_ip_generic: yes|no # optional
tx_checksum_ipv4: yes|no # optional
tx_checksum_ipv6: yes|no # optional
tx_checksum_sctp: yes|no # optional
tx_esp_segmentation: yes|no # optional
tx_fcoe_segmentation: yes|no # optional
tx_gre_csum_segmentation: yes|no # optional
tx_gre_segmentation: yes|no # optional
tx_gso_partial: yes|no # optional
tx_gso_robust: yes|no # optional
tx_ipxip4_segmentation: yes|no # optional
tx_ipxip6_segmentation: yes|no # optional
tx_nocache_copy: yes|no # optional
tx_scatter_gather: yes|no # optional
tx_scatter_gather_fraglist: yes|no # optional
tx_sctp_segmentation: yes|no # optional
tx_tcp_ecn_segmentation: yes|no # optional
tx_tcp_mangleid_segmentation: yes|no # optional
tx_tcp_segmentation: yes|no # optional
tx_tcp6_segmentation: yes|no # optional
tx_udp_segmentation: yes|no # optional
tx_udp_tnl_csum_segmentation: yes|no # optional
tx_udp_tnl_segmentation: yes|no # optional
tx_vlan_stag_hw_insert: yes|no # optional
txvlan: yes|no # optional
coalesce:
adaptive_rx: yes|no # optional
adaptive_tx: yes|no # optional
pkt_rate_high: 0 # optional mininum=0 maximum=0xffffffff
pkt_rate_low: 0 # optional mininum=0 maximum=0xffffffff
rx_frames: 0 # optional mininum=0 maximum=0xffffffff
rx_frames_high: 0 # optional mininum=0 maximum=0xffffffff
rx_frames_irq: 0 # optional mininum=0 maximum=0xffffffff
rx_frames_low: 0 # optional mininum=0 maximum=0xffffffff
rx_usecs: 0 # optional mininum=0 maximum=0xffffffff
rx_usecs_high: 0 # optional mininum=0 maximum=0xffffffff
rx_usecs_irq: 0 # optional mininum=0 maximum=0xffffffff
rx_usecs_low: 0 # optional mininum=0 maximum=0xffffffff
sample_interval: 0 # optional mininum=0 maximum=0xffffffff
stats_block_usecs: 0 # optional mininum=0 maximum=0xffffffff
tx_frames: 0 # optional mininum=0 maximum=0xffffffff
tx_frames_high: 0 # optional mininum=0 maximum=0xffffffff
tx_frames_irq: 0 # optional mininum=0 maximum=0xffffffff
tx_frames_low: 0 # optional mininum=0 maximum=0xffffffff
tx_usecs: 0 # optional mininum=0 maximum=0xffffffff
tx_usecs_high: 0 # optional mininum=0 maximum=0xffffffff
tx_usecs_irq: 0 # optional mininum=0 maximum=0xffffffff
tx_usecs_low: 0 # optional mininum=0 maximum=0xffffffff
```
### `ieee802_1x`
Configures 802.1x authentication for an interface.
Currently, NetworkManager is the only supported provider and EAP-TLS is the only
supported EAP method.
SSL certificates and keys must be deployed on the host prior to running the role.
- `eap`
The allowed EAP method to be used when authenticating to the network with 802.1x.
Currently, `tls` is the default and the only accepted value.
- `identity` (required)
Identity string for EAP authentication methods.
- `private_key` (required)
Absolute path to the client's PEM or PKCS#12 encoded private key used for 802.1x
authentication.
- `private_key_password`
Password to the private key specified in `private_key`.
- `private_key_password_flags`
List of flags to configure how the private key password is managed.
Multiple flags may be specified.
Valid flags are:
- `none`
- `agent-owned`
- `not-saved`
- `not-required`
See NetworkManager documentation on "Secret flag types" more details (`man 5
nm-settings`).
- `client_cert` (required)
Absolute path to the client's PEM encoded certificate used for 802.1x
authentication.
- `ca_cert`
Absolute path to the PEM encoded certificate authority used to verify the EAP
server.
- `ca_path`
Absolute path to directory containing additional pem encoded ca certificates used to
verify the EAP server. Can be used instead of or in addition to ca_cert. Cannot be
used if system_ca_certs is enabled.
- `system_ca_certs`
If set to `True`, NetworkManager will use the system's trusted ca
certificates to verify the EAP server.
- `domain_suffix_match`
If set, NetworkManager will ensure the domain name of the EAP server certificate
matches this string.
### `bond`
The `bond` setting configures the options of bonded interfaces
(type `bond`). It supports the following options:
- `mode`
Bonding mode. See the
[kernel documentation](https://www.kernel.org/doc/Documentation/networking/bonding.txt)
or your distribution `nmcli` documentation for valid values.
NetworkManager defaults to `balance-rr`.
- `miimon`
Sets the MII link monitoring interval (in milliseconds)
Examples of Options
-------------------
@@ -443,7 +638,7 @@ network_connections:
#persistent_state: present # default
type: ethernet
autoconnect: yes
mac: 00:00:5e:00:53:5d
mac: "00:00:5e:00:53:5d"
ip:
dhcp4: yes
```
@@ -478,7 +673,6 @@ network_connections:
#interface_name: br0 # defaults to the connection name
```
Configuring a bridge connection:
```yaml
@@ -491,21 +685,21 @@ network_connections:
auto6: no
```
Setting `master` and `slave_type`:
Setting `controller` and `port_type`:
```yaml
network_connections:
- name: br0-bond0
type: bond
interface_name: bond0
master: internal-br0
slave_type: bridge
controller: internal-br0
port_type: bridge
- name: br0-bond0-eth1
type: ethernet
interface_name: eth1
master: br0-bond0
slave_type: bond
controller: br0-bond0
port_type: bond
```
Configuring VLANs:
@@ -555,6 +749,20 @@ network_connections:
- 192.168.1.1/24
```
Configuring a wireless connection:
```yaml
network_connections:
- name: wlan0
type: wireless
wireless:
ssid: "My WPA2-PSK Network"
key_mgmt: "wpa-psk"
# recommend vault encrypting the wireless password
# see https://docs.ansible.com/ansible/latest/user_guide/vault.html
password: "p@55w0rD"
```
Setting the IP configuration:
```yaml
@@ -573,6 +781,9 @@ network_connections:
dns_search:
- example.com
- subdomain.example.com
dns_options:
- rotate
- timeout:1
route_metric6: -1
auto6: no
@@ -596,13 +807,30 @@ network_connections:
rule_append_only: yes
```
Configuring 802.1x:
```yaml
network_connections:
- name: eth0
type: ethernet
ieee802_1x:
identity: myhost
eap: tls
private_key: /etc/pki/tls/client.key
# recommend vault encrypting the private key password
# see https://docs.ansible.com/ansible/latest/user_guide/vault.html
private_key_password: "p@55w0rD"
client_cert: /etc/pki/tls/client.pem
ca_cert: /etc/pki/tls/cacert.pem
domain_suffix_match: example.com
```
### Invalid and Wrong Configuration
The `network` role rejects invalid configurations. It is recommended to test the role
with `--check` first. There is no protection against wrong (but valid) configuration.
Double-check your configuration before applying it.
Compatibility
-------------
@@ -628,13 +856,15 @@ after disabling the NetworkManager service.
Limitations
-----------
As Ansible usually works via the network, for example via SSH, there are some limitations to be considered:
As Ansible usually works via the network, for example via SSH, there are some
limitations to be considered:
The `network` role does not support bootstraping networking configuration. One
option may be [ansible-pull](https://docs.ansible.com/ansible/playbooks_intro.html#ansible-pull).
Another option maybe be to initially auto-configure the host during installation
(ISO based, kickstart, etc.), so that the host is connected to a management LAN
or VLAN. It strongly depends on your environment.
The `network` role does not support bootstraping networking configuration. One option
may be
[ansible-pull](https://docs.ansible.com/ansible/playbooks_intro.html#ansible-pull).
Another option maybe be to initially auto-configure the host during installation (ISO
based, kickstart, etc.), so that the host is connected to a management LAN or VLAN. It
strongly depends on your environment.
For `initscripts` provider, deploying a profile merely means to create the ifcfg
files. Nothing happens automatically until the play issues `ifup` or `ifdown`
@@ -642,20 +872,20 @@ via the `up` or `down` [states](#state) -- unless there are other
components that rely on the ifcfg files and react on changes.
The `initscripts` provider requires the different profiles to be in the right
order when they depend on each other. For example the bonding master device
needs to be specified before the slave devices.
order when they depend on each other. For example the bonding controller device
needs to be specified before the port devices.
When removing a profile for NetworkManager it also takes the connection
down and possibly removes virtual interfaces. With the `initscripts` provider
removing a profile does not change its current runtime state (this is a future
feature for NetworkManager as well).
For NetworkManager, modifying a connection with autoconnect enabled
may result in the activation of a new profile on a previously disconnected
interface. Also, deleting a NetworkManager connection that is currently active
results in removing the interface. Therefore, the order of the steps should be
followed, and carefully handling of [autoconnect](#autoconnect) property may be
necessary. This should be improved in NetworkManager RFE [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515).
For NetworkManager, modifying a connection with autoconnect enabled may result in the
activation of a new profile on a previously disconnected interface. Also, deleting a
NetworkManager connection that is currently active results in removing the interface.
Therefore, the order of the steps should be followed, and carefully handling of
[autoconnect](#autoconnect) property may be necessary. This should be improved in
NetworkManager RFE [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515).
It seems difficult to change networking of the target host in a way that breaks
the current SSH connection of ansible. If you want to do that, ansible-pull might
@@ -680,3 +910,6 @@ feature. At the beginning of the play we could create a checkpoint and if we los
connectivity due to an error, NetworkManager would automatically rollback after
timeout. The limitations is that this would only work with NetworkManager, and
it is not clear that rollback will result in a working configuration.
*Want to contribute? Take a look at our [contributing
guidelines](https://github.com/linux-system-roles/network/blob/main/contributing.md)!*

View File

@@ -2,54 +2,91 @@
---
network_connections: []
network_allow_restart: no
# Use initscripts for RHEL/CentOS < 7, nm otherwise
network_provider_os_default: "{{
'initscripts' if ansible_distribution in ['RedHat', 'CentOS'] and
ansible_distribution_major_version is version('7', '<')
'initscripts' if ansible_distribution in [
'RedHat',
'CentOS',
'OracleLinux'
] and ansible_distribution_major_version is version('7', '<')
else 'nm' }}"
# If NetworkManager.service is running, assume that 'nm' is currently in-use,
# otherwise initscripts
network_provider_current: "{{
__network_provider_current: "{{
'nm' if 'NetworkManager.service' in ansible_facts.services and
ansible_facts.services['NetworkManager.service']['state'] == 'running'
else 'initscripts'
}}"
# Default to the auto-detected value
network_provider: "{{ network_provider_current }}"
network_provider: "{{ __network_provider_current }}"
# check if any 802.1x connections are defined
__network_ieee802_1x_connections_defined: "{{ network_connections |
selectattr('ieee802_1x', 'defined') | list | count > 0 }}"
# check if any wireless connections are defined
__network_wireless_connections_defined: "{{
['wireless'] in network_connections|json_query('[*][type]') }}"
# NetworkManager-wireless is required for wireless connections
__network_packages_default_wireless: ["{%
if __network_wireless_connections_defined
%}NetworkManager-wifi{% endif %}"]
# check if any team connections are defined
__network_team_connections_defined: "{{
['team'] in network_connections|json_query('[*][type]') }}"
# NetworkManager-team is required for team connections
__network_packages_default_team: ["{%
if __network_team_connections_defined
%}NetworkManager-team{% endif %}"]
# wpa_supplicant is required if any 802.1x or wireless connections are defined
__network_wpa_supplicant_required: "{{
__network_ieee802_1x_connections_defined or
__network_wireless_connections_defined }}"
__network_packages_default_wpa_supplicant: ["{%
if __network_wpa_supplicant_required
%}wpa_supplicant{% endif %}"]
# The python-gobject-base package depends on the python version and
# distribution:
# - python-gobject-base on RHEL7 (no python2-gobject-base :-/)
# - python-gobject-base or python2-gobject-base on Fedora 27
# - python3-gobject-base on Fedora 28+
network_service_name_default_nm: NetworkManager
network_packages_default_nm:
- ethtool
- NetworkManager
- "python{{ ansible_python['version']['major'] | replace('2', '') }}-gobject-base"
__network_packages_default_gobject_packages: ["python{{
ansible_python['version']['major'] | replace('2', '')}}-gobject-base"]
network_service_name_default_initscripts: network
__network_service_name_default_nm: NetworkManager
__network_packages_default_nm: "{{['NetworkManager']
+ __network_packages_default_gobject_packages|select()|list()
+ __network_packages_default_wpa_supplicant|select()|list()
+ __network_packages_default_wireless|select()|list()
+ __network_packages_default_team|select()|list()}}"
__network_service_name_default_initscripts: network
# initscripts requires bridge-utils to manage bridges, install it when the
# 'bridge' type is used in network_connections
_network_packages_default_initscripts_bridge: ["{% if ['bridge'] in network_connections|json_query('[*][type]') and
(
(ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('7', '<=')) or
(ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('28', '<='))
)
__network_packages_default_initscripts_bridge: ["{%
if ['bridge'] in network_connections|json_query('[*][type]') and
ansible_distribution in ['RedHat', 'CentOS', 'OracleLinux'] and
ansible_distribution_major_version is version('7', '<=')
%}bridge-utils{% endif %}"]
_network_packages_default_initscripts_network_scripts: ["{%
if (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('7', '<=')) or
(ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('28', '<='))
__network_packages_default_initscripts_network_scripts: ["{%
if ansible_distribution in ['RedHat', 'CentOS', 'OracleLinux'] and
ansible_distribution_major_version is version('7', '<=')
%}initscripts{% else %}network-scripts{% endif %}"]
# convert _network_packages_default_initscripts_bridge to an empty list if it
# contains only the empty string and add it to the default package list
# |select() filters the list to include only values that evaluate to true
# (the empty string is false)
# |list() converts the generator that |select() creates to a list
network_packages_default_initscripts: "{{ ['ethtool']
+ _network_packages_default_initscripts_bridge|select()|list()
+ _network_packages_default_initscripts_network_scripts|select()|list()
__network_packages_default_initscripts: "{{
__network_packages_default_initscripts_bridge|select()|list()
+ __network_packages_default_initscripts_network_scripts|select()|list()
}}"
@@ -58,25 +95,25 @@ network_packages_default_initscripts: "{{ ['ethtool']
#
# Usually, the user only wants to select the "network_provider"
# (or not set it at all and let it be autodetected via the
# internal variable "{{ network_provider_current }}". Hence,
# internal variable "{{ __network_provider_current }}". Hence,
# depending on the "network_provider", a different set of
# service-name and packages is chosen.
#
# That is done via the internal "_network_provider_setup" dictionary.
# That is done via the internal "__network_provider_setup" dictionary.
# If the user doesn't explicitly set "network_service_name" or
# "network_packages" (which he usually wouldn't), then the defaults
# from "network_service_name_default_*" and "network_packages_default_*"
# from "__network_service_name_default_*" and "__network_packages_default_*"
# apply. These values are hard-coded in this file, but they also could
# be overwritten as host variables or via vars/*.yml.
_network_provider_setup:
__network_provider_setup:
nm:
service_name: "{{ network_service_name_default_nm }}"
packages: "{{ network_packages_default_nm }}"
service_name: "{{ __network_service_name_default_nm }}"
packages: "{{ __network_packages_default_nm }}"
initscripts:
service_name: "{{ network_service_name_default_initscripts }}"
packages: "{{ network_packages_default_initscripts }}"
service_name: "{{ __network_service_name_default_initscripts }}"
packages: "{{ __network_packages_default_initscripts }}"
network_packages: "{{
_network_provider_setup[network_provider]['packages'] }}"
__network_provider_setup[network_provider]['packages'] }}"
network_service_name: "{{
_network_provider_setup[network_provider]['service_name'] }}"
__network_provider_setup[network_provider]['service_name'] }}"

View File

@@ -1,38 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- hosts: network-test
vars:
network_connections:
# Create a bond profile, which is the parent of VLAN.
- name: prod2
state: up
type: bond
interface_name: bond2
ip:
dhcp4: no
auto6: no
bond:
mode: active-backup
miimon: 110
# enslave an ethernet to the bond
- name: prod2-slave1
state: up
type: ethernet
interface_name: "{{ network_interface_name2 }}"
master: prod2
# on top of it, create a VLAN with ID 100 and static
# addressing
- name: prod2.100
state: up
type: vlan
parent: prod2
vlan_id: 100
ip:
address:
- "192.0.2.{{ network_iphost }}/24"
roles:
- linux-system-roles.network

View File

@@ -1,36 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- hosts: network-test
vars:
network_connections:
# Create a bridge profile, which is the parent of VLAN.
- name: prod2
state: up
type: bridge
interface_name: bridge2
ip:
dhcp4: no
auto6: no
# enslave an ethernet to the bridge
- name: prod2-slave1
state: up
type: ethernet
interface_name: "{{ network_interface_name2 }}"
master: prod2
slave_type: bridge
# on top of it, create a VLAN with ID 100 and static
# addressing
- name: prod2.100
state: up
type: vlan
parent: prod2
vlan_id: 100
ip:
address:
- "192.0.2.{{ network_iphost }}/24"
roles:
- linux-system-roles.network

View File

@@ -1 +0,0 @@
../tests/down-profile.yml

View File

@@ -1,18 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- hosts: network-test
vars:
network_connections:
# Create one ethernet profile and activate it.
# The profile uses automatic IP addressing
# and is tied to the interface by MAC address.
- name: prod1
state: up
type: ethernet
autoconnect: yes
mac: "{{ network_mac1 }}"
mtu: 1450
roles:
- linux-system-roles.network

View File

@@ -1,29 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- hosts: network-test
vars:
network_connections:
# Create a profile for the underlying device of the VLAN.
- name: prod2
type: ethernet
autoconnect: no
state: up
interface_name: "{{ network_interface_name2 }}"
ip:
dhcp4: no
auto6: no
# on top of it, create a VLAN with ID 100 and static
# addressing
- name: prod2.100
state: up
type: vlan
parent: prod2
vlan_id: 100
ip:
address:
- "192.0.2.{{ network_iphost }}/24"
roles:
- linux-system-roles.network

View File

@@ -1,14 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- hosts: all
tasks:
- include_role:
name: linux-system-roles.network
vars:
network_connections:
- name: "{{ network_interface_name1 }}"
state: up
type: ethernet
ip:
dhcp4: "no"
auto6: "no"

View File

@@ -1,19 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- hosts: all
tasks:
- include_role:
name: linux-system-roles.network
vars:
network_connections:
- name: "{{ network_interface_name1 }}"
state: up
type: ethernet
ip:
dhcp4: "no"
auto6: "no"
ethtool:
features:
gro: "no"
gso: "yes"
tx-sctp-segmentation: "no"

View File

@@ -1 +0,0 @@
../tests/remove-profile.yml

View File

@@ -1,2 +1,2 @@
install_date: Wed Jul 1 18:41:54 2020
version: 1.1.0
install_date: Tue Apr 20 16:13:56 2021
version: 1.3.0

View File

@@ -5,7 +5,8 @@ galaxy_info:
description: Configure networking
company: Red Hat, Inc.
license: BSD-3-Clause
min_ansible_version: 2.5
min_ansible_version: 2.7
github_branch: main
galaxy_tags:
- centos
- fedora
@@ -17,9 +18,7 @@ galaxy_info:
platforms:
- name: Fedora
versions:
- 28
- 29
- 30
- all
- name: EL
versions:
- 6

View File

@@ -2,22 +2,39 @@
""" Support for NetworkManager aka the NM provider """
# pylint: disable=import-error, no-name-in-module
from ansible.module_utils.network_lsr.utils import Util
from ansible.module_utils.network_lsr.utils import Util # noqa:E501
ETHTOOL_FEATURE_PREFIX = "ETHTOOL_OPTNAME_FEATURE_"
ETHTOOL_COALESCE_PREFIX = "ETHTOOL_OPTNAME_COALESCE_"
def get_nm_ethtool_feature(name):
"""
Translate ethtool feature into Network Manager name
Translate ethtool feature into Network Manager name
:param name: Name of the feature
:type name: str
:returns: Name of the feature to be used by `NM.SettingEthtool.set_feature()`
:rtype: str
:param name: Name of the feature
:type name: str
:returns: Name of the feature to be used by `NM.SettingEthtool.set_feature()`
:rtype: str
"""
name = ETHTOOL_FEATURE_PREFIX + name.upper().replace("-", "_")
name = ETHTOOL_FEATURE_PREFIX + name.upper()
feature = getattr(Util.NM(), name, None)
return feature
def get_nm_ethtool_coalesce(name):
"""
Translate ethtool coalesce into Network Manager name
:param name: Name of the coalesce
:type name: str
:returns: Name of the setting to be used by `NM.SettingEthtool.set_coalesce()`
:rtype: str
"""
name = ETHTOOL_COALESCE_PREFIX + name.upper()
coalesce = getattr(Util.NM(), name, None)
return coalesce

View File

@@ -2,13 +2,12 @@
# SPDX-License-Identifier: BSD-3-Clause
# vim: fileencoding=utf8
import os
import socket
import sys
import uuid
# pylint: disable=import-error, no-name-in-module
from ansible.module_utils.network_lsr import MyError
from ansible.module_utils.network_lsr import MyError # noqa:E501
class Util:
@@ -25,19 +24,31 @@ class Util:
return default
@staticmethod
def check_output(argv):
# subprocess.check_output is python 2.7.
with open("/dev/null", "wb") as DEVNULL:
import subprocess
def path_to_glib_bytes(path):
"""
Converts a path to a GLib.Bytes object that can be accepted by NM
"""
return Util.GLib().Bytes.new(("file://%s\x00" % path).encode("utf-8"))
env = os.environ.copy()
env["LANG"] = "C"
p = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=DEVNULL, env=env)
# FIXME: Can we assume this to always be UTF-8?
out = p.communicate()[0].decode("UTF-8")
if p.returncode != 0:
raise MyError("failure calling %s: exit with %s" % (argv, p.returncode))
return out
@staticmethod
def convert_passwd_flags_nm(secret_flags):
"""
Converts an array of "secret flags" strings
to an integer represantion understood by NetworkManager
"""
flag_int = 0
if "none" in secret_flags:
flag_int += 0
if "agent-owned" in secret_flags:
flag_int += 1
if "not-saved" in secret_flags:
flag_int += 2
if "not-required" in secret_flags:
flag_int += 4
return flag_int
@classmethod
def create_uuid(cls):
@@ -147,7 +158,7 @@ class Util:
if not cls.GMainLoop_run(mainloop_timeout):
cancellable.cancel()
raise MyError("failure to call %s.%s(): timeout" % object_, async_action)
raise MyError("failure to call %s.%s(): timeout" % (object_, async_action))
success = user_data.get("success", None)
if success is not None:
@@ -249,7 +260,8 @@ class Util:
def mac_ntoa(mac):
if mac is None:
return None
return ":".join(["%02x" % c for c in mac])
# bytearray() is needed for python2 compatibility
return ":".join(["%02x" % c for c in bytearray(mac)])
@staticmethod
def mac_norm(mac_str, force_len=None):

View File

@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MIT
# Molecule managed
{% if item.registry is defined %}
@@ -6,9 +7,22 @@ FROM {{ item.registry.url }}/{{ item.image }}
FROM {{ item.image }}
{% endif %}
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python2-dnf bash && dnf clean all; \
elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
RUN set -euo pipefail; \
pkgs="python sudo yum-plugin-ovl bash"; \
if grep 'CentOS release 6' /etc/centos-release > /dev/null 2>&1; then \
for file in /etc/yum.repos.d/CentOS-*.repo; do \
if ! grep '^baseurl=.*vault[.]centos[.]org' "$file"; then \
sed -i -e 's,^mirrorlist,#mirrorlist,' \
-e 's,^#baseurl=,baseurl=,' \
-e 's,mirror.centos.org/centos/$releasever,vault.centos.org/6.10,' \
"$file"; \
fi; \
done; \
pkgs="$pkgs upstart chkconfig initscripts"; \
fi; \
if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python3 sudo python3-devel python3-dnf bash && dnf clean all; \
elif [ $(command -v yum) ]; then yum makecache fast && yum install -y $pkgs && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \
elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi

View File

@@ -1,26 +1,31 @@
# SPDX-License-Identifier: MIT
---
dependency:
name: galaxy
driver:
name: docker
lint:
name: yamllint
options:
config-file: molecule/default/yamllint.yml
name: ${LSR_MOLECULE_DRIVER:-docker}
platforms:
- name: centos-6
image: linuxsystemroles/centos-6
privileged: true
- name: centos-7
image: linuxsystemroles/centos-7
image: registry.centos.org/centos:6
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
privileged: true
command: /sbin/init
- name: centos-7
image: registry.centos.org/centos/systemd:latest
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
privileged: true
command: /usr/lib/systemd/systemd --system
- name: centos-8
image: registry.centos.org/centos:8
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
privileged: true
command: /usr/lib/systemd/systemd --system
provisioner:
name: ansible
log: true
lint:
name: ansible-lint
playbooks:
converge: ../../tests/tests_default.yml
scenario:
@@ -32,7 +37,3 @@ scenario:
- idempotence
- check
- destroy
verifier:
name: testinfra
lint:
name: flake8

View File

@@ -1,12 +0,0 @@
---
extends: default
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
line-length: disable
truthy: disable
document-start: disable

View File

@@ -1,3 +1,5 @@
# SPDX-License-Identifier: MIT
# This file was generated using `pylint --generate-rcfile > pylintrc` command.
[MASTER]
@@ -8,7 +10,7 @@ extension-pkg-whitelist=
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
ignore=.git,.tox
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
@@ -16,8 +18,7 @@ ignore-patterns=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
init-hook="from pylint.config import find_pylintrc; import os, sys; sys.path.append(os.path.dirname(find_pylintrc()) + '/library'); sys.path.append(os.path.dirname(find_pylintrc()) + '/module_utils'); sys.path.append(os.path.dirname(find_pylintrc()) + '/tests')"
#init-hook=
# Use multiple processes to speed up Pylint.
jobs=1
@@ -56,7 +57,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=
disable=wrong-import-position
#disable=print-statement,
# parameter-unpacking,
# unpacking-in-except,
@@ -246,7 +247,7 @@ indent-after-paren=4
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=100
max-line-length=88
# Maximum number of lines in a module
max-module-lines=1000

View File

@@ -23,6 +23,23 @@
state: present
when:
- not network_packages is subset(ansible_facts.packages.keys())
register: __network_package_install
# If network packages changed and wireless or team connections are specified,
# NetworkManager must be restarted
- name: Restart NetworkManager due to wireless or team interfaces
service:
name: NetworkManager
state: restarted
when:
- __network_wireless_connections_defined
or __network_team_connections_defined
- network_provider == "nm"
- network_allow_restart
# ansible-lint wants this to be a handler, but this is not appropriate as
# NetworkManager must be restarted prior to the connections being created.
# see (https://docs.ansible.com/ansible-lint/rules/default_rules.html)
- __network_package_install.changed # noqa 503
- name: Enable and start NetworkManager
service:
@@ -31,6 +48,18 @@
enabled: true
when:
- network_provider == "nm"
no_log: true
# If any 802.1x connections are used, the wpa_supplicant
# service is required to be running
- name: Enable and start wpa_supplicant
service:
name: wpa_supplicant
state: started
enabled: true
when:
- network_provider == "nm"
- __network_wpa_supplicant_required
- name: Enable network service
service:
@@ -38,11 +67,13 @@
enabled: true
when:
- network_provider == "initscripts"
no_log: true
- name: Ensure initscripts network file dependency is present
copy:
dest: /etc/sysconfig/network
content: "# Created by network system role"
mode: "0644"
force: false
when:
- network_provider == "initscripts"
@@ -53,6 +84,11 @@
ignore_errors: "{{ network_ignore_errors | default(omit) }}"
force_state_change: "{{ network_force_state_change | default(omit) }}"
connections: "{{ network_connections | default([]) }}"
__debug_flags: "{{ __network_debug_flags | default(omit) }}"
register: __network_connections_result
- name: Show debug messages
debug: var=__network_connections_result
- name: Re-test connectivity
ping:

View File

@@ -1 +0,0 @@
roles/linux-system-roles.network/library/network_connections.py

View File

@@ -1,10 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- name: Set {{ profile }} down
hosts: all
vars:
network_connections:
- name: "{{ profile }}"
state: down
roles:
- linux-system-roles.network

View File

@@ -1,109 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
""" Check that there is a playbook to run all role tests with the non-default
provider as well """
# vim: fileencoding=utf8
import glob
import os
import sys
import yaml
OTHER_PROVIDER_SUFFIX = "_other_provider.yml"
IGNORE = [
"tests_helpers-and-asserts.yml",
"tests_states.yml",
"tests_unit.yml",
"tests_vlan_mtu_initscripts.yml",
"tests_vlan_mtu_nm.yml",
"tests_ethtool_features_initscripts.yml",
"tests_ethtool_features_nm.yml",
]
OTHER_PLAYBOOK = """
# SPDX-License-Identifier: BSD-3-Clause
---
- name: Run playbook '{tests_playbook}' with non-default provider
hosts: all
vars:
network_provider_current:
tasks:
# required for the code to set network_provider_current
- name: Get service facts
service_facts:
- name: Set network provider
set_fact:
network_provider: '{{{{ "initscripts" if network_provider_current == "nm"
else "nm" }}}}'
- import_playbook: "{tests_playbook}"
when:
- ansible_distribution_major_version != '6'
""" # noqa: E501 # ignore that the line is too long
def get_current_provider_code():
with open("../defaults/main.yml") as defaults:
yaml_defaults = yaml.safe_load(defaults)
current_provider = yaml_defaults["network_provider_current"]
return current_provider
def generate_nominal_other_playbook(tests_playbook):
nominal_other_testfile_data = OTHER_PLAYBOOK.format(tests_playbook=tests_playbook)
nominal = yaml.safe_load(nominal_other_testfile_data)
nominal[0]["vars"]["network_provider_current"] = get_current_provider_code()
return yaml.dump(nominal, default_flow_style=False, explicit_start=True, width=80)
def main():
testsfiles = glob.glob("tests_*.yml")
missing = []
returncode = 0
# Generate files when specified
generate = bool(len(sys.argv) > 1 and sys.argv[1] == "generate")
if not testsfiles:
print("ERROR: No tests found")
returncode = 1
for filename in testsfiles:
if filename.endswith(OTHER_PROVIDER_SUFFIX):
continue
if filename in IGNORE:
continue
fileroot = os.path.splitext(filename)[0]
other_testfile = fileroot + OTHER_PROVIDER_SUFFIX
nominal_other_testfile_data = generate_nominal_other_playbook(filename)
if generate:
with open(other_testfile, "w") as ofile:
ofile.write(nominal_other_testfile_data)
if other_testfile not in testsfiles and not generate:
missing.append(filename)
else:
with open(other_testfile) as ifile:
testdata = ifile.read()
if testdata != nominal_other_testfile_data:
print(
"ERROR: Playbook does not match nominal value " + other_testfile
)
returncode = 1
if missing:
print("ERROR: No tests for other provider found for:\n" + ", \n".join(missing))
print("Try to generate them with '{} generate'".format(sys.argv[0]))
returncode = 1
return returncode
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,68 +0,0 @@
#! /bin/bash
# SPDX-License-Identifier: BSD-3-Clause
if [ -n "${DEBUG}" ]
then
set -x
fi
set -e
if [ "$#" -lt 2 ]
then
echo "USAGE: ${0} host playbook"
echo "Get coverage info from host for playbook"
exit 1
fi
host="${1}"
shift
playbook="${1}"
coverage_data="remote-coveragedata-${host}-${playbook%.yml}"
coverage="/root/.local/bin/coverage"
echo "Getting coverage for ${playbook} on ${host}" >&2
call_ansible() {
local module="${1}"
shift
local args="${1}"
shift
ansible -m "${module}" -i "${host}", -a "${args}" all "${@}"
}
remote_coverage_dir="$(mktemp -d /tmp/remote_coverage-XXXXXX)"
trap "rm -rf '${remote_coverage_dir}'" EXIT
ansible-playbook -i "${host}", get-coverage.yml -e "test_playbook=${playbook} destdir=${remote_coverage_dir}"
#COVERAGE_FILE=remote-coverage coverage combine remote-coverage/tests_*/*/root/.coverage
./merge-coverage.sh coverage "${coverage_data}"-tmp $(find "${remote_coverage_dir}" -type f | tr , _)
# When https://github.com/nedbat/coveragepy/pull/49 is merged, this can be simplified:
if false
then
cat > tmp_merge_coveragerc <<EOF
[paths]
source =
.
/tmp/ansible_*/
EOF
else
cat > tmp_merge_coveragerc <<EOF
[paths]
source =
.
EOF
for file in $(COVERAGE_FILE="${coverage_data}"-tmp coverage report | grep -o "/tmp/ansible_[^/]*" | sort -u)
do
echo " ${file}" >> tmp_merge_coveragerc
done
fi
COVERAGE_FILE="${coverage_data}" coverage combine --rcfile tmp_merge_coveragerc "${coverage_data}"-tmp
rm tmp_merge_coveragerc
COVERAGE_FILE="${coverage_data}" coverage report ||:
COVERAGE_FILE="${coverage_data}" coverage html --directory "htmlcov-${coverage_data}" ||:
echo "Coverage collected in: ${coverage_data}"

View File

@@ -1,66 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
# This expects the variable test_playbook to be set from the outside
- name: Prepare for coverage extraction
hosts: all
tasks:
# Use set_fact to set variables to make them available in all plays
# 'vars:' Would only set variables for the current play
- name: set facts
set_fact:
coverage_module: network_connections
coverage: /root/.local/bin/coverage
destdir: "remote_coverage/{{ test_playbook }}"
# This uses variables from the other set_fact task, therefore it needs to
# be its own task
- name: set more facts
set_fact:
coverage_file: ansible-coverage-{{ coverage_module }}-{{ test_playbook|replace('.yml', '') }}
- name: debug info
debug:
msg: Getting coverage for '{{ coverage_module }}' with '{{ test_playbook }}'
# combine data in case old data is left there
- command: "{{ coverage }} combine"
environment:
COVERAGE_FILE: "{{ coverage_file }}"
ignore_errors: yes
- name: remove old data
file:
state: absent
path: "{{ coverage_file }}"
- name: remove old data
shell: rm -f .coverage.*
- name: copy coveragerc
copy:
content: "[run]\ndisable_warnings = no-data-collected\n"
dest: .coveragerc
- name: install latest pip
pip:
name: coverage
extra_args: --user --upgrade
- import_playbook: "{{ test_playbook }}"
vars:
ansible_python_interpreter: "{{ coverage }} run -p --include *ansible_module_{{ coverage_module }}.py"
- name: Gather coverage data
hosts: all
tasks:
- shell: "{{ coverage }} combine .coverage.*"
environment:
COVERAGE_FILE: "{{ coverage_file }}"
- name: Get coverage data
hosts: all
tasks:
- fetch:
src: "{{ coverage_file }}"
dest: "{{ destdir }}"
flat: no

View File

@@ -1,34 +0,0 @@
#! /bin/bash
# SPDX-License-Identifier: BSD-3-Clause
set -e
coverage_data=total-coveragedata
testhost="${1}"
if [ "$#" -lt 1 ]
then
echo "USAGE: ${0} host"
echo "Get local and all remote coverage data for host"
exit 1
fi
rm -f remote-coveragedata* "${coveragedata}"
# collect pytest coverage
tox -e py26,py27,py36,py37 -- --cov-append
for test_playbook in tests_*.yml
do
./get-coverage.sh "${testhost}" "${test_playbook}"
done
./merge-coverage.sh coverage "total-remote-coveragedata" remote-coveragedata-*
./covstats .coverage remote-coveragedata-* "total-remote-coveragedata"
./merge-coverage.sh coverage "${coverage_data}" .coverage remote-coveragedata-*
echo "Total coverage:"
COVERAGE_FILE="${coverage_data}" coverage report ||:
COVERAGE_FILE="${coverage_data}" coverage html --directory "htmlcov-${coverage_data}" ||:
echo "Open HTML report with:"
echo "xdg-open htmlcov-${coverage_data}/index.html"

View File

@@ -1,35 +0,0 @@
#! /bin/bash
# SPDX-License-Identifier: BSD-3-Clause
if [ -n "${DEBUG}" ]
then
set -x
fi
set -e
if [ "$#" -lt 3 ]
then
echo "USAGE: ${0} path_to_coverage_binary output_file input_files..."
echo "Merges all input_files into output file without removing input_files"
exit 1
fi
# path to coverage binary
coverage="${1}"
shift
# read by coverage binary
export COVERAGE_FILE="${1}"
shift
tempdir="$(mktemp -d /tmp/coverage_merge-XXXXXX)"
trap "rm -rf '${tempdir}'" EXIT
cp --backup=numbered -- "${@}" "${tempdir}"
# FIXME: Would not work if coverage files are not hidden but they are by
# default
shopt -s dotglob
"${coverage}" combine "${tempdir}/"*
echo "Merged data into ${COVERAGE_FILE}"
./covstats "${COVERAGE_FILE}"

View File

@@ -2,23 +2,31 @@
---
- hosts: all
vars:
interface: lsrfeat1
interface: testnic1
type: veth
tasks:
- debug:
msg: "this is: playbooks/tests_ethtool_features.yml"
tags:
- always
- name: "INIT: Ethtool feeatures tests"
debug:
msg: "##################################################"
- include_tasks: tasks/show-interfaces.yml
- include_tasks: tasks/manage-test-interface.yml
- include_tasks: tasks/show_interfaces.yml
- include_tasks: tasks/manage_test_interface.yml
vars:
state: present
- include_tasks: tasks/assert-device_present.yml
- include_tasks: tasks/assert_device_present.yml
- name: Install ethtool (test dependency)
package:
name: ethtool
state: present
- block:
- name: "TEST: I can create a profile without changing the ethtool features."
- name: >-
TEST: I can create a profile without changing the ethtool features.
debug:
msg: "##################################################"
- name: Get current device features
@@ -41,7 +49,10 @@
assert:
that:
- original_ethtool_features.stdout == ethtool_features.stdout
- name: "TEST: I can disable gro and tx-tcp-segmentation and enable gso."
- name: >-
TEST: I can disable gro and tx-tcp-segmentation and enable gso.
debug:
msg: "##################################################"
- import_role:
@@ -68,9 +79,87 @@
- name: Assert device features
assert:
that:
- "'generic-receive-offload: off' in ethtool_features.stdout_lines"
- "'generic-segmentation-offload: on' in ethtool_features.stdout_lines"
- "'tx-tcp-segmentation: off' in ethtool_features.stdout_lines | map('trim')"
- >-
'generic-receive-offload: off' in
ethtool_features.stdout_lines
- >-
'generic-segmentation-offload: on' in
ethtool_features.stdout_lines
- >-
'tx-tcp-segmentation: off' in
ethtool_features.stdout_lines | map('trim')
- name: >-
TEST: I can enable tx_tcp_segmentation (using underscores).
debug:
msg: "##################################################"
- import_role:
name: linux-system-roles.network
vars:
network_connections:
- name: "{{ interface }}"
state: up
type: ethernet
ip:
dhcp4: "no"
auto6: "no"
ethtool:
features:
tx_tcp_segmentation: "yes"
- name: Get current device features
command: "ethtool --show-features {{ interface }}"
register: ethtool_features
- name:
debug:
var: ethtool_features.stdout_lines
- name: Assert device features
assert:
that:
- >-
'tx-tcp-segmentation: on' in
ethtool_features.stdout_lines | map('trim')
- name: I cannot change tx_tcp_segmentation and tx-tcp-segmentation at
the same time.
block:
- name: >-
TEST: Change feature with both underscores and dashes.
debug:
msg: "##################################################"
- network_connections:
provider: "{{ network_provider | mandatory }}"
connections:
- name: "{{ interface }}"
state: up
type: ethernet
ip:
dhcp4: "no"
auto6: "no"
ethtool:
features:
tx_tcp_segmentation: "no"
tx-tcp-segmentation: "no"
register: __network_connections_result
rescue:
- name: Show network_connections result
debug:
var: __network_connections_result
- assert:
that:
- '{{ "fatal error: configuration error:
connections[0].ethtool.features: duplicate key
''tx_tcp_segmentation''" in
__network_connections_result.msg }}'
always:
- name: Check failure
debug:
var: __network_connections_result
- assert:
that: "{{ __network_connections_result.failed == true }}"
- name: "TEST: I can reset features to their original value."
debug:
msg: "##################################################"
@@ -104,7 +193,7 @@
persistent_state: absent
state: down
ignore_errors: true
- include_tasks: tasks/manage-test-interface.yml
- include_tasks: tasks/manage_test_interface.yml
vars:
state: absent
tags:

View File

@@ -4,46 +4,134 @@
vars:
interface: statebr
profile: "{{ interface }}"
network_provider: nm
lsr_fail_debug:
- __network_connections_result
tasks:
- debug:
msg: Inside states tests
- include_tasks: tasks/show-interfaces.yml
- include_tasks: tasks/assert-device_absent.yml
msg: "this is: playbooks/tests_states.yml"
tags:
- always
# create test profile
- include_role:
name: linux-system-roles.network
vars:
network_connections:
- name: statebr
state: up
type: bridge
ip:
dhcp4: false
auto6: false
- include_tasks: tasks/assert-device_present.yml
- include_tasks: tasks/assert-profile_present.yml
# test case (remove profile but keep it up)
# I can remove a profile but keep the configuration active.
- include_role:
name: linux-system-roles.network
vars:
network_connections:
- name: statebr
persistent_state: absent
- include_tasks: tasks/assert-device_present.yml
- include_tasks: tasks/assert-profile_absent.yml
- block:
- include_tasks: tasks/run_test.yml
vars:
lsr_description: I can create a profile
lsr_setup:
- tasks/delete_interface.yml
- tasks/assert_device_absent.yml
lsr_test:
- tasks/create_bridge_profile.yml
lsr_assert:
- tasks/assert_profile_present.yml
lsr_assert_when:
# Device should be present because of autoconnect: true by
# default for NM (this might be considered a bug)
- what: tasks/assert_device_present.yml
when: "{{ network_provider == 'nm' }}"
lsr_cleanup:
- tasks/cleanup_profile+device.yml
tags:
- tests::states:create
# test case
# I can set a profile down that is up and absent.
- name: Set down
include_role:
name: linux-system-roles.network
vars:
network_connections:
- name: statebr
state: down
- include_tasks: tasks/assert-device_absent.yml
- include_tasks: tasks/assert-profile_absent.yml
- block:
- include_tasks: tasks/run_test.yml
vars:
lsr_description: I can create a profile without autoconnect
lsr_setup:
- tasks/delete_interface.yml
- tasks/assert_device_absent.yml
lsr_test:
- tasks/create_bridge_profile_no_autoconnect.yml
lsr_assert:
# Device should be absent because of autoconnect: false
- tasks/assert_device_absent.yml
- tasks/assert_profile_present.yml
lsr_cleanup:
- tasks/cleanup_profile+device.yml
tags:
- tests::states:create_without_autoconnect
- block:
- include_tasks: tasks/run_test.yml
vars:
lsr_description: I can activate an existing profile
lsr_setup:
- tasks/create_bridge_profile.yml
lsr_test:
- tasks/activate_profile.yml
lsr_assert:
- tasks/assert_device_present.yml
- tasks/assert_profile_present.yml
lsr_cleanup:
- tasks/cleanup_profile+device.yml
tags:
- tests::states:activate
- block:
- include_tasks: tasks/run_test.yml
vars:
lsr_description: I can remove an existing profile without taking it
down
lsr_setup:
- tasks/create_bridge_profile.yml
- tasks/activate_profile.yml
lsr_test:
- tasks/remove_profile.yml
lsr_assert:
- tasks/assert_device_present.yml
- tasks/assert_profile_absent.yml
lsr_cleanup:
- tasks/cleanup_profile+device.yml
tags:
- tests::states:remove_up
- block:
- include_tasks: tasks/run_test.yml
vars:
lsr_description: I can take a profile down that is absent
lsr_setup:
- tasks/create_bridge_profile.yml
- tasks/activate_profile.yml
- tasks/remove_profile.yml
lsr_test:
- tasks/remove+down_profile.yml
lsr_assert:
- tasks/assert_profile_absent.yml
lsr_assert_when:
- what: tasks/assert_device_absent.yml
when: "{{ network_provider == 'nm' }}"
lsr_cleanup:
- tasks/cleanup_profile+device.yml
tags:
- tests::states:remove_down
- block:
- include_tasks: tasks/run_test.yml
vars:
lsr_description: I will not get an error when I try to
remove an absent profile
lsr_setup:
- tasks/create_bridge_profile.yml
- tasks/activate_profile.yml
- tasks/remove+down_profile.yml
lsr_test:
- tasks/remove+down_profile.yml
lsr_assert:
- tasks/assert_profile_absent.yml
# FIXME: This needs to be included before lsr_assert_when but
# after the role ran to ensure that NetworkManager is actually
# installed but it is not an assert.
- tasks/get_NetworkManager_NVR.yml
lsr_assert_when:
- what: tasks/assert_device_absent.yml
# NetworkManager 1.18.4 from CentOS does not seem to remove the
# virtual interface in this case but it seems to work with
# 1:NetworkManager-1.27.0-26129.d0a2eb8f05.el7
when: "{{ network_provider == 'nm' and
NetworkManager_NVR != 'NetworkManager-1.18.4-3.el7'
}}"
lsr_cleanup:
- tasks/cleanup_profile+device.yml
tags:
- tests::states:remove_down_twice

View File

@@ -6,12 +6,13 @@
interface: lsr101
vlan_interface: lsr101.90
tasks:
- include_tasks: tasks/show-interfaces.yml
- include_tasks: tasks/manage-test-interface.yml
- include_tasks: tasks/show_interfaces.yml
- include_tasks: tasks/manage_test_interface.yml
vars:
state: present
- include_tasks: tasks/assert-device_present.yml
- name: "TEST: I can configure the MTU for a vlan interface without autoconnect."
- include_tasks: tasks/assert_device_present.yml
- name: >-
TEST: I can configure the MTU for a vlan interface without autoconnect.
debug:
msg: "##################################################"
- import_role:
@@ -37,15 +38,15 @@
ip:
dhcp4: false
auto6: false
- include_tasks: tasks/assert-device_present.yml
- include_tasks: tasks/assert_device_present.yml
vars:
interface: "{{ vlan_interface }}"
- include_tasks: tasks/assert-profile_present.yml
- include_tasks: tasks/assert_profile_present.yml
vars:
profile: "{{ item }}"
loop:
- "{{ interface }}"
- "{{ vlan_interface }}"
- "{{ interface }}"
- "{{ vlan_interface }}"
- name: "TEARDOWN: remove profiles."
debug:
@@ -61,6 +62,6 @@
persistent_state: absent
state: down
ignore_errors: true
- include_tasks: tasks/manage-test-interface.yml
- include_tasks: tasks/manage_test_interface.yml
vars:
state: absent

View File

@@ -1,10 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- name: Remove {{ profile }}
hosts: all
vars:
network_connections:
- name: "{{ profile }}"
persistent_state: absent
roles:
- linux-system-roles.network

View File

@@ -1,6 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- name: Run the tasklist {{ task }}
hosts: all
tasks:
- include_tasks: "{{ task }}"

View File

@@ -1,7 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- include: get-interface_stat.yml
- name: "assert that interface {{ interface }} is absent"
assert:
that: not interface_stat.stat.exists
msg: "{{ interface }} exists"

View File

@@ -1,7 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- include: get-interface_stat.yml
- name: "assert that interface {{ interface }} is present"
assert:
that: interface_stat.stat.exists
msg: "{{ interface }} does not exist"

View File

@@ -1,7 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- include: get-profile_stat.yml
- name: "assert that profile '{{ profile }}' is absent"
assert:
that: not profile_stat.stat.exists
msg: "profile {{ profile_path }} does exist"

View File

@@ -1,7 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- include: get-profile_stat.yml
- name: "assert that profile '{{ profile }}' is present"
assert:
that: profile_stat.stat.exists
msg: "profile {{ profile_path }} does not exist"

View File

@@ -1,20 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- include_tasks: show-interfaces.yml
- include_tasks: manage-test-interface.yml
vars:
state: absent
- include_tasks: show-interfaces.yml
- include_tasks: assert-device_absent.yml
- include_tasks: manage-test-interface.yml
vars:
state: present
- include_tasks: show-interfaces.yml
- include_tasks: assert-device_present.yml
- include_tasks: manage-test-interface.yml
vars:
state: absent
- include_tasks: show-interfaces.yml
- include_tasks: assert-device_absent.yml

View File

@@ -1,8 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- command: ls -1
args:
chdir: /sys/class/net
register: _current_interfaces
- set_fact:
current_interfaces: "{{ _current_interfaces.stdout_lines }}"

View File

@@ -1,9 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- name: "Get stat for interface {{ interface }}"
stat:
get_attributes: false
get_checksum: false
get_mime: false
path: "/sys/class/net/{{ interface }}"
register: interface_stat

View File

@@ -1,26 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- name: "Get stat for network-scripts"
stat:
get_attributes: false
get_checksum: false
get_mime: false
path: "/etc/sysconfig/network-scripts"
register: network_scripts_stat
- name: Set profile path (network-scripts)
set_fact:
profile_path: /etc/sysconfig/network-scripts/ifcfg-{{ profile }}
when:
- network_scripts_stat.stat.exists
- name: Set profile path (NetworkManager system-connections)
set_fact:
profile_path: /etc/NetworkManager/system-connections/{{ profile }}
when:
- not network_scripts_stat.stat.exists
- name: stat profile file
stat:
get_attributes: false
get_checksum: false
get_mime: false
path: "{{ profile_path }}"
register: profile_stat

View File

@@ -1,50 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- fail:
msg: "state needs to be present or absent, not '{{ state }}'"
when: state not in ["present", "absent"]
- fail:
msg: "type needs to be dummy, tap or veth, not '{{ type }}'"
when: type not in ["dummy", "tap", "veth"]
# - include: get-current_interfaces.yml
- include: show-interfaces.yml
- name: Install iproute
package:
name: iproute
state: present
# veth
- name: Create veth interface {{ interface }}
shell: ip link add {{ interface }} type veth peer name peer{{ interface }}
when: "type == 'veth' and state == 'present' and
interface not in current_interfaces"
- name: Delete veth interface {{ interface }}
shell: ip link del {{ interface }} type veth
when: "type == 'veth' and state == 'absent' and
interface in current_interfaces"
# dummy
- name: Create dummy interface {{ interface }}
shell: ip link add "{{ interface }}" type dummy
when: "type == 'dummy' and state == 'present' and
interface not in current_interfaces"
- name: Delete dummy interface {{ interface }}
shell: ip link del "{{ interface }}" type dummy
when: "type == 'dummy' and state == 'absent' and
interface in current_interfaces"
# tap
- name: Create tap interface {{ interface }}
shell: ip tuntap add dev {{ interface }} mode tap
when: "type == 'tap' and state == 'present'
and interface not in current_interfaces"
- name: Delete tap interface {{ interface }}
shell: ip tuntap del dev {{ interface }} mode tap
when: "type == 'tap' and state == 'absent' and
interface in current_interfaces"

View File

@@ -1,5 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- include: get-current_interfaces.yml
- debug:
msg: "current_interfaces: {{ current_interfaces }}"

View File

@@ -1,55 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- name: Test configuring bridges
hosts: all
vars:
interface: LSR-TST-br31
tasks:
- name: "set interface={{ interface }}"
set_fact:
interface: "{{ interface }}"
- include_tasks: tasks/show-interfaces.yml
- include_tasks: tasks/assert-device_absent.yml
- name: Add test bridge
hosts: all
vars:
network_connections:
- name: "{{ interface }}"
interface_name: "{{ interface }}"
state: up
type: bridge
ip:
dhcp4: no
auto6: yes
roles:
- linux-system-roles.network
- import_playbook: run-tasks.yml
vars:
task: tasks/assert-device_present.yml
- import_playbook: run-tasks.yml
vars:
profile: "{{ interface }}"
task: tasks/assert-profile_present.yml
- import_playbook: down-profile.yml
vars:
profile: "{{ interface }}"
# FIXME: assert profile/device down
- import_playbook: remove-profile.yml
vars:
profile: "{{ interface }}"
- import_playbook: run-tasks.yml
vars:
profile: "{{ interface }}"
task: tasks/assert-profile_absent.yml
# FIXME: Devices might still be left when profile is absent
#- import_playbook: run-tasks.yml
# vars:
# task: tasks/assert-device_absent.yml

View File

@@ -1,17 +0,0 @@
---
- hosts: all
name: Run playbook 'tests_bridge.yml' with non-default provider
tasks:
- name: Get service facts
service_facts: null
- name: Set network provider
set_fact:
network_provider: '{{ "initscripts" if network_provider_current == "nm" else
"nm" }}'
vars:
network_provider_current: '{{ ''nm'' if ''NetworkManager.service'' in ansible_facts.services
and ansible_facts.services[''NetworkManager.service''][''state''] == ''running''
else ''initscripts'' }}'
- import_playbook: tests_bridge.yml
when:
- ansible_distribution_major_version != '6'

View File

@@ -4,3 +4,10 @@
hosts: all
roles:
- linux-system-roles.network
tasks:
- include_tasks: tasks/el_repo_setup.yml
- name: Test warning and info logs
assert:
that:
- "'warnings' not in __network_connections_result"
msg: "There are warnings"

View File

@@ -1,17 +0,0 @@
---
- hosts: all
name: Run playbook 'tests_default.yml' with non-default provider
tasks:
- name: Get service facts
service_facts: null
- name: Set network provider
set_fact:
network_provider: '{{ "initscripts" if network_provider_current == "nm" else
"nm" }}'
vars:
network_provider_current: '{{ ''nm'' if ''NetworkManager.service'' in ansible_facts.services
and ansible_facts.services[''NetworkManager.service''][''state''] == ''running''
else ''initscripts'' }}'
- import_playbook: tests_default.yml
when:
- ansible_distribution_major_version != '6'

View File

@@ -1,62 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- hosts: all
tasks:
- debug:
msg: Inside ethernet tests
- debug:
var: network_provider
- name: Test configuring ethernet devices
hosts: all
vars:
type: veth
interface: lsr27
tasks:
- name: "set type={{ type }} and interface={{ interface }}"
set_fact:
type: "{{ type }}"
interface: "{{ interface }}"
- include_tasks: tasks/show-interfaces.yml
- include_tasks: tasks/manage-test-interface.yml
vars:
state: present
- include_tasks: tasks/assert-device_present.yml
- name: Test static interface up
hosts: all
vars:
network_connections:
- name: "{{ interface }}"
interface_name: "{{ interface }}"
state: up
type: ethernet
autoconnect: yes
ip:
address: 192.0.2.1/24
roles:
- linux-system-roles.network
- hosts: all
tasks:
- debug:
var: network_provider
# FIXME: assert profile present
# FIXME: assert profile/device up + IP address
- import_playbook: down-profile.yml
vars:
profile: "{{ interface }}"
# FIXME: assert profile/device down
- import_playbook: remove-profile.yml
vars:
profile: "{{ interface }}"
# FIXME: assert profile away
- name: Remove interfaces
hosts: all
tasks:
- include_tasks: tasks/manage-test-interface.yml
vars:
state: absent
- include_tasks: tasks/assert-device_absent.yml

View File

@@ -1,17 +0,0 @@
---
- hosts: all
name: Run playbook 'tests_ethernet.yml' with non-default provider
tasks:
- name: Get service facts
service_facts: null
- name: Set network provider
set_fact:
network_provider: '{{ "initscripts" if network_provider_current == "nm" else
"nm" }}'
vars:
network_provider_current: '{{ ''nm'' if ''NetworkManager.service'' in ansible_facts.services
and ansible_facts.services[''NetworkManager.service''][''state''] == ''running''
else ''initscripts'' }}'
- import_playbook: tests_ethernet.yml
when:
- ansible_distribution_major_version != '6'

View File

@@ -2,6 +2,7 @@
# set network provider and gather facts
- hosts: all
tasks:
- include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts

View File

@@ -1,28 +1,39 @@
# SPDX-License-Identifier: BSD-3-Clause
# This file was generated by ensure_provider_tests.py
---
# set network provider and gather facts
- hosts: all
name: Run playbook 'playbooks/tests_ethtool_features.yml' with nm as provider
tasks:
- include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
- name: Install NetworkManager
package:
name: NetworkManager
state: present
- name: Get NetworkManager version
command: rpm -q --qf "%{version}" NetworkManager
args:
warn: "no"
when: true
register: NetworkManager_version
tags:
- always
# workaround for: https://github.com/ansible/ansible/issues/27973
# There is no way in Ansible to abort a playbook hosts with specific OS
# releases Therefore we include the playbook with the tests only if the hosts
# would support it.
# The test should run with NetworkManager, therefore it cannot run on RHEL 6 or CentOS 6.
- block:
- name: Install NetworkManager
package:
name: NetworkManager
state: present
- name: Get NetworkManager version
command: rpm -q --qf "%{version}" NetworkManager
args:
warn: false
register: NetworkManager_version
when: true
when:
- ansible_distribution_major_version != '6'
tags:
- always
# The test requires or should run with NetworkManager, therefore it cannot run
# on RHEL/CentOS 6
- import_playbook: playbooks/tests_ethtool_features.yml
when:
- ansible_distribution_major_version != '6'
# NetworkManager 1.20.0 introduced ethtool settings support
- NetworkManager_version.stdout is version('1.20.0', '>=')

View File

@@ -1,27 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
---
- name: Check that creating and removing test devices and assertions work
hosts: all
tasks:
- name: test veth interface management
include_tasks: tasks/create-and-remove-interface.yml
vars:
type: veth
interface: veth1298
- name: test veth interface management
include_tasks: tasks/create-and-remove-interface.yml
vars:
type: dummy
interface: dummy1298
# FIXME: when: does not seem to work with include_tasks, therefore this cannot be safely tested for now
# - name: test tap interfaces
# include_tasks: tasks/create-and-remove-interface.yml
# vars:
# - type: tap
# - interface: tap1298
# when: ansible_distribution_major_version > 6
# # ip tuntap does not exist on RHEL6
# # FIXME: Maybe use some other tool to manage devices, openvpn can do this,
# # but it is in EPEL

View File

@@ -1,11 +0,0 @@
---
# empty playbook to gather facts for import_playbook when clause
- hosts: all
# workaround for: https://github.com/ansible/ansible/issues/27973
# There is no way in Ansible to abort a playbook hosts with specific OS
# releases Therefore we include the playbook with the tests only if the hosts
# would support it.
# The test requires NetworkManager, therefore it cannot run on RHEL 6 or CentOS 6.
- import_playbook: playbooks/tests_states.yml
when: ansible_distribution_major_version != '6'

View File

@@ -3,14 +3,7 @@
- hosts: all
name: Setup for test running
tasks:
- name: Install EPEL on enterprise Linux for python2-mock
command: yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
args:
warn: false
creates: /etc/yum.repos.d/epel.repo
when:
- ansible_distribution in ['RedHat', 'CentOS']
- ansible_distribution_major_version in ['6', '7']
- include_tasks: tasks/el_repo_setup.yml
- name: Install dependencies
package:
@@ -28,62 +21,140 @@
- hosts: all
name: execute python unit tests
tasks:
- name: Copy python modules
copy:
src: "{{ item }}"
dest: /tmp/test-unit-1/
local_follow: false
loop:
- ../library/network_connections.py
- unit/test_network_connections.py
- ../module_utils/network_lsr
- block:
- name: create tempdir for code to test
tempfile:
state: directory
prefix: lsrtest_
register: _rundir
- name: Create helpers directory
file:
state: directory
dest: /tmp/test-unit-1/helpers
- name: get tempfile for tar
tempfile:
prefix: lsrtest_
suffix: ".tar"
register: temptar
delegate_to: localhost
- name: Copy helpers
copy:
src: "{{ item }}"
dest: /tmp/test-unit-1/helpers
mode: 0755
with_fileglob:
- unit/helpers/*
- include_tasks: tasks/get_modules_and_utils_paths.yml
- name: Check if python2 is available
command: python2 --version
ignore_errors: true
register: python2_available
when: true
# TODO: using tar and copying the file is a workaround for the
# synchronize module that does not work in test-harness. Related issue:
# https://github.com/linux-system-roles/test-harness/issues/102
#
- name: Create Tar file
command: >
tar -cvf {{ temptar.path }} --exclude "*.pyc"
--exclude "__pycache__"
-C {{ modules_parent_and_dir.stdout_lines[0] }}
{{ modules_parent_and_dir.stdout_lines[1] }}
-C {{ module_utils_parent_and_dir.stdout_lines[0] }}
{{ module_utils_parent_and_dir.stdout_lines[1] }}
delegate_to: localhost
- name: Run python2 unit tests
command: python2 /tmp/test-unit-1/test_network_connections.py --verbose
when: python2_available is succeeded
register: python2_result
- name: Copy testrepo.tar to the remote system
copy:
src: "{{ temptar.path }}"
dest: "{{ _rundir.path }}"
- name: Check if python3 is available
command: python3 --version
ignore_errors: true
register: python3_available
when: true
- name: Untar testrepo.tar
command: tar -xvf {{ temptar.path | basename }}
args:
chdir: "{{ _rundir.path }}"
- name: Run python3 unit tests
command: python3 /tmp/test-unit-1/test_network_connections.py --verbose
when: python3_available is succeeded
register: python3_result
- file:
state: directory
path: "{{ item }}"
loop:
- "{{ _rundir.path }}/ansible"
- "{{ _rundir.path }}/ansible/module_utils"
- name: Show python2 unit test results
debug:
var: python2_result.stderr_lines
when: python2_result is succeeded
- name: Move module_utils to ansible directory
shell: |
if [ -d {{ _rundir.path }}/module_utils ]; then
mv {{ _rundir.path }}/module_utils {{ _rundir.path }}/ansible
fi
- name: Show python3 unit test results
debug:
var: python3_result.stderr_lines
when: python3_result is succeeded
- name: Fake out python module directories, primarily for python2
shell: |
for dir in $(find {{ _rundir.path }} -type d -print); do
if [ ! -f "$dir/__init__.py" ]; then
touch "$dir/__init__.py"
fi
done
- name: Copy unit test to remote system
copy:
src: unit/test_network_connections.py
dest: "{{ _rundir.path }}"
- set_fact:
_lsr_python_path: "{{
_rundir.path ~ '/' ~
modules_parent_and_dir.stdout_lines[1] ~ ':' ~
_rundir.path ~ '/' ~ 'ansible' ~ '/' ~
module_utils_parent_and_dir.stdout_lines[1] ~ ':' ~
_rundir.path ~ '/' ~
module_utils_parent_and_dir.stdout_lines[1] ~ ':' ~
_rundir.path
}}"
- command: ls -alrtFR {{ _rundir.path }}
- debug:
msg: path {{ _lsr_python_path }}
- name: Check if python2 is available
command: python2 --version
ignore_errors: true
register: python2_available
when: true
- name: Run python2 unit tests
command: >
python2 {{ _rundir.path }}/test_network_connections.py --verbose
environment:
PYTHONPATH: "{{ _lsr_python_path }}"
when: >
python2_available is succeeded and ansible_distribution != 'Fedora'
register: python2_result
- name: Check if python3 is available
command: python3 --version
ignore_errors: true
register: python3_available
when: true
- name: Run python3 unit tests
command: >
python3 {{ _rundir.path }}/test_network_connections.py --verbose
environment:
PYTHONPATH: "{{ _lsr_python_path }}"
when: python3_available is succeeded
register: python3_result
- name: Show python2 unit test results
debug:
var: python2_result.stderr_lines
when: python2_result is succeeded
- name: Show python3 unit test results
debug:
var: python3_result.stderr_lines
when: python3_result is succeeded
always:
- name: remove local tar file
file:
state: absent
path: "{{ temptar.path }}"
delegate_to: localhost
- name: remove tempdir
file:
state: absent
path: "{{ _rundir.path }}"
- name: Ensure that at least one python unit test ran
fail:
msg: Tests did not run with python2 or python3
when: not (python2_available is succeeded or python3_available is succeeded)
when: not python2_available is succeeded and
not python3_available is succeeded

View File

@@ -1,13 +1,15 @@
# SPDX-License-Identifier: BSD-3-Clause
# This file was generated by ensure_provider_tests.py
---
# set network provider and gather facts
- hosts: all
name: Run playbook 'playbooks/tests_vlan_mtu.yml' with initscripts as provider
tasks:
- include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts
tags:
- always
# workaround for: https://github.com/ansible/ansible/issues/27973
# There is no way in Ansible to abort a playbook hosts with specific OS
# releases Therefore we include the playbook with the tests only if the hosts
# would support it.
- import_playbook: playbooks/tests_vlan_mtu.yml

View File

@@ -1,15 +1,21 @@
# SPDX-License-Identifier: BSD-3-Clause
# This file was generated by ensure_provider_tests.py
---
# set network provider and gather facts
- hosts: all
name: Run playbook 'playbooks/tests_vlan_mtu.yml' with nm as provider
tasks:
- include_tasks: tasks/el_repo_setup.yml
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
tags:
- always
# workaround for: https://github.com/ansible/ansible/issues/27973
# There is no way in Ansible to abort a playbook hosts with specific OS
# releases Therefore we include the playbook with the tests only if the hosts
# would support it.
# The test requires NetworkManager, therefore it cannot run on RHEL 6 or CentOS 6.
# The test requires or should run with NetworkManager, therefore it cannot run
# on RHEL/CentOS 6
- import_playbook: playbooks/tests_vlan_mtu.yml
when: ansible_distribution_major_version != '6'
when:
- ansible_distribution_major_version != '6'

View File

@@ -1,6 +0,0 @@
#! /bin/bash
if [ "${1}" == "-P" ] && [ "${2}" != "" ]
then
echo "Permanent address: 23:00:00:00:00:00"
fi

File diff suppressed because it is too large Load Diff

View File

@@ -27,5 +27,12 @@ with mock.patch.dict("sys.modules", {"gi": mock.Mock(), "gi.repository": mock.Mo
def test_get_nm_ethtool_feature():
""" Test get_nm_ethtool_feature() """
with mock.patch.object(nm_provider.Util, "NM") as nm_mock:
nm_feature = nm_provider.get_nm_ethtool_feature("esp-hw-offload")
nm_feature = nm_provider.get_nm_ethtool_feature("esp_hw_offload")
assert nm_feature == nm_mock.return_value.ETHTOOL_OPTNAME_FEATURE_ESP_HW_OFFLOAD
def test_get_nm_ethtool_coalesce():
""" Test get_nm_ethtool_coalesce() """
with mock.patch.object(nm_provider.Util, "NM") as nm_mock:
nm_feature = nm_provider.get_nm_ethtool_coalesce("rx_frames")
assert nm_feature == nm_mock.return_value.ETHTOOL_OPTNAME_COALESCE_RX_FRAMES

View File

@@ -1,181 +1,22 @@
[tox]
envlist = black, flake8, pylint, py{26,27,36,37}, ensure_non_running_provider
skipsdist = true
skip_missing_interpreters = True
# SPDX-License-Identifier: MIT
[lsr_config]
lsr_enable = true
[lsr_yamllint]
configfile = .yamllint.yml
configbasename = .yamllint.yml
[lsr_ansible-lint]
configfile = .ansible-lint
[testenv]
basepython = python3
deps =
py{26,27,36,37,38}: pytest-cov
py{27,36,37,38}: pytest>=3.5.1
py{26,27}: mock
py26: pytest
molecule_{lint,syntax,test}: docker
molecule_{lint,syntax,test}: jmespath
molecule_{lint,syntax,test}: molecule
# The selinux pypi shim does not work with Ubuntu (as used by Travis), yet.
# Therefore use a fork with Ubuntu support. This can be changed once the
# update is available on PyPi.
# molecule_{lint,syntax,test}: selinux
molecule_{lint,syntax,test}: git+https://github.com/tyll/selinux-pypi-shim@fulllocation
[base]
passenv = *
setenv =
PYTHONPATH = {toxinidir}/library:{toxinidir}/module_utils
LC_ALL = C
changedir = {toxinidir}/tests
covtarget = {toxinidir}/library --cov {toxinidir}/module_utils
pytesttarget = .
RUN_PYLINT_EXCLUDE = ^(\..*|ensure_provider_tests\.py|print_all_options\.py)$
RUN_PYTEST_SETUP_MODULE_UTILS = true
RUN_PYLINT_SETUP_MODULE_UTILS = true
RUN_PYTEST_EXTRA_ARGS = -v
RUN_FLAKE8_EXTRA_ARGS = --exclude tests/ensure_provider_tests.py,scripts/print_all_options.py,tests/network/ensure_provider_tests.py,.svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg
LSR_PUBLISH_COVERAGE = normal
[testenv:black]
deps = black
commands = black --check --diff --include "^[^.].*\.py$" .
[testenv:py26]
install_command = pip install {opts} {packages}
list_dependencies_command = pip freeze
basepython = python2.6
passenv = {[base]passenv}
setenv =
{[base]setenv}
changedir = {[base]changedir}
commands =
pytest \
--durations=5 \
--cov={[base]covtarget} \
--cov-report=html:htmlcov-py26 --cov-report=term \
{posargs} \
{[base]pytesttarget}
[testenv:py27]
basepython = python2.7
passenv = {[base]passenv}
setenv =
{[base]setenv}
changedir = {[base]changedir}
commands =
pytest \
--durations=5 \
--cov={[base]covtarget} \
--cov-report=html:htmlcov-py27 --cov-report=term \
{posargs} \
{[base]pytesttarget}
[testenv:py36]
basepython = python3.6
passenv = {[base]passenv}
setenv =
{[base]setenv}
changedir = {[base]changedir}
commands =
pytest \
--durations=5 \
--cov={[base]covtarget} \
--cov-report=html:htmlcov-py36 --cov-report=term \
{posargs} \
{[base]pytesttarget}
[testenv:py37]
basepython = python3.7
passenv = {[base]passenv}
setenv =
{[base]setenv}
changedir = {[base]changedir}
commands =
pytest \
--durations=5 \
--cov={[base]covtarget} \
--cov-report=html:htmlcov-py37 --cov-report=term \
{posargs} \
{[base]pytesttarget}
[testenv:py38]
passenv = {[base]passenv}
setenv =
{[base]setenv}
changedir = {[base]changedir}
basepython = python3.8
commands =
pytest \
--durations=5 \
--cov={[base]covtarget} \
--cov-report=html:htmlcov-py38 --cov-report=term \
{posargs} \
{[base]pytesttarget}
[testenv:pylint]
basepython = python2.7
setenv =
{[base]setenv}
deps =
pylint>=1.8.4
ansible
commands =
pylint \
--errors-only \
{posargs} \
library/network_connections.py \
module_utils/network_lsr \
tests/unit/test_network_connections.py
[testenv:flake8]
basepython = python2.7
deps =
flake8>=3.5
whitelist_externals = flake8
commands=
flake8 --statistics {posargs} \
.
[testenv:coveralls]
basepython = python2.7
passenv = TRAVIS TRAVIS_*
deps =
coveralls
changedir = {[base]changedir}
commands =
coveralls
[testenv:ensure_non_running_provider]
deps =
PyYAML
changedir = {toxinidir}/tests
commands = {toxinidir}/tests/ensure_non_running_provider.py
[testenv:molecule_lint]
commands_pre =
molecule --version
ansible --version
commands = molecule {posargs} lint
[testenv:molecule_syntax]
commands = molecule {posargs} syntax
[testenv:molecule_test]
commands = molecule {posargs} test
[pytest]
addopts = -rxs
[flake8]
show_source = True
max-line-length = 88
ignore = E402,W503
[pylint]
max-line-length = 88
disable = wrong-import-position
[pycodestyle]
max-line-length = 88
[travis]
python =
2.6: py26
2.7: py27,coveralls,flake8,pylint
3.5: molecule_lint,molecule_syntax,molecule_test
3.6: py36,black,ensure_non_running_provider
3.7: py37
3.8: py38
[testenv:shellcheck]
commands = bash -c 'echo shellcheck is currently not enabled - please fix this'

View File

@@ -1,2 +1,2 @@
install_date: Wed Jun 24 18:44:35 2020
install_date: Tue Apr 20 16:13:52 2021
version: master

View File

@@ -21,14 +21,21 @@
# Arguments: >
# /S
- name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent"
win_shell: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe /S'
args:
executable: cmd
creates: "{{ ansible_env['ProgramFiles(x86)'] }}\\oVirt Guest Tools"
async: 1
poll: 0
ignore_errors: yes
- block:
- name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent"
win_shell: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe /S'
args:
executable: cmd
creates: "{{ ansible_env['ProgramFiles(x86)'] }}\\oVirt Guest Tools"
async: 1000
poll: 0
rescue:
- name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent"
win_shell: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe /S'
args:
executable: cmd
creates: "{{ ansible_env['ProgramFiles(x86)'] }}\\oVirt Guest Tools"
- name: "{{ ansible_distribution | lower }} | wait for system to be online"
wait_for_connection:

View File

@@ -25,6 +25,8 @@ A description of the settable variables for this role should go here, including
Dependencies
------------
Import ovirt.ovirt collections.
A list of roles that this role utilizes, make sure to call this out in requirements.yml file under roles directory or download manually:
- oatakan.windows_template_build
@@ -34,6 +36,7 @@ Example Playbook
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
# import ovirt.ovirt collections
- name: create a ovirt windows template
hosts: all
gather_facts: False

View File

@@ -1,5 +1,6 @@
---
install_updates: yes
instance_wait_retry_limit: 300
instance_wait_connection_timeout: 400
@@ -14,9 +15,14 @@ enable_auto_logon: yes
remove_vm_on_error: yes
vm_failed: no
custom_efi_enabled: no
custom_efi_path: /usr/share/edk2.git/ovmf-x64/OVMF_CODE-pure-efi.fd
virtio_iso_url: https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.173-2/virtio-win.iso
winrm_enable_script_url: https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1
set_network_to_private: '([Activator]::CreateInstance([Type]::GetTypeFromCLSID([Guid]"{DCB00C01-570F-4A9B-8D69-199FDBA5723B}"))).GetNetworkConnections() | % {$_.GetNetwork().SetCategory(1)}'
windows_build_role: oatakan.windows_template_build
local_administrator_password: Chang3MyP@ssw0rd21

View File

@@ -1,2 +1,2 @@
install_date: Wed Jun 24 18:44:33 2020
install_date: Tue Apr 20 16:13:50 2021
version: master

View File

@@ -1,6 +1,6 @@
---
- name: convert to template
ovirt_template:
ovirt.ovirt.ovirt_template:
auth: "{{ ovirt_auth }}"
name: "{{ template.name }}"
vm: "{{ template.name }}"

View File

@@ -2,7 +2,7 @@
- block:
- name: remove iso file from data_domain
ovirt_disk:
ovirt.ovirt.ovirt_disk:
auth: "{{ ovirt_auth }}"
name: "{{ iso_file }}"
storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"
@@ -15,7 +15,7 @@
when: ansible_version.full is version('2.9', '>=')
- name: remove iso file from data_domain
ovirt_disk:
ovirt.ovirt.ovirt_disk:
auth: "{{ ovirt_auth }}"
name: "{{ iso_file }}"
storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"

View File

@@ -1,18 +1,25 @@
---
- name: validate file
stat:
path: "{{ playbook_dir }}/{{ temp_directory }}/windows_{{ windows_distro_name }}_autounattend_autogen.iso"
get_checksum: no
register: iso_file_check
- name: upload iso file to data_domain
ovirt_disk:
ovirt.ovirt.ovirt_disk:
auth: "{{ ovirt_auth }}"
name: "{{ iso_file }}"
upload_image_path: "{{ playbook_dir }}/{{ temp_directory }}/windows_{{ windows_distro_name }}_autounattend_autogen.iso"
upload_image_path: "{{ iso_file_check.stat.path }}"
storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"
size: 20MiB
size: "{{ (iso_file_check.stat.size/1024/1024)|round(0, 'ceil')|int|string }}MiB"
wait: true
bootable: true
format: raw
content_type: iso
force: yes
register: disk_iso_file
when: iso_file_check.stat.exists
- name: set iso file disk id
set_fact:

View File

@@ -1,7 +1,7 @@
---
- name: export template to export domain
ovirt_template:
ovirt.ovirt.ovirt_template:
auth: "{{ ovirt_auth }}"
state: exported
name: "{{ template.name }}"

View File

@@ -1,7 +1,7 @@
---
- name: obtain SSO token with using username/password credentials
ovirt_auth:
ovirt.ovirt.ovirt_auth:
url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}"
username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}"
password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}"
@@ -62,7 +62,7 @@
delegate_to: template_host
- name: refresh SSO credentials
ovirt_auth:
ovirt.ovirt.ovirt_auth:
url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}"
username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}"
password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}"
@@ -77,7 +77,7 @@
rescue:
- name: refresh SSO credentials
ovirt_auth:
ovirt.ovirt.ovirt_auth:
url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}"
username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}"
password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}"
@@ -92,7 +92,7 @@
always:
- name: refresh SSO credentials
ovirt_auth:
ovirt.ovirt.ovirt_auth:
url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}"
username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}"
password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}"
@@ -109,7 +109,7 @@
state: absent
- name: logout from oVirt
ovirt_auth:
ovirt.ovirt.ovirt_auth:
state: absent
ovirt_auth: "{{ ovirt_auth }}"

View File

@@ -10,6 +10,15 @@
src: "{{ windows_sysprep_template_folder }}/Autounattend.xml.j2"
dest: "{{ temp_directory }}/ks_iso/Autounattend.xml"
- name: download ConfigureRemotingForAnsible.ps1 script
get_url:
url: "{{ winrm_enable_script_url }}"
dest: "{{ temp_directory }}/ks_iso/ConfigureRemotingForAnsible.ps1"
register: download_script
until: download_script is success
delay: 3
retries: 5
- name: include virtio drivers
include_tasks: virtio_drivers.yml

View File

@@ -1,13 +1,13 @@
---
- name: get the datacenter name
ovirt_datacenter_info:
ovirt.ovirt.ovirt_datacenter_info:
auth: "{{ ovirt_auth }}"
pattern: "Clusters.name = {{ providers.ovirt.cluster }}"
register: datacenter_info
- name: get storage information
ovirt_storage_domain_info:
ovirt.ovirt.ovirt_storage_domain_info:
auth: "{{ ovirt_auth }}"
pattern: "datacenter={{ datacenter_info.ovirt_datacenters[0].name }}"
register: storage_info
@@ -29,7 +29,7 @@
the_query: "[?type=='iso']"
- name: check if template already exists
ovirt_template_info:
ovirt.ovirt.ovirt_template_info:
auth: "{{ ovirt_auth }}"
pattern: "name={{ template.name }} and datacenter={{ datacenter_info.ovirt_datacenters[0].name }}"
register: template_info
@@ -48,7 +48,7 @@
- template_info.ovirt_templates | length > 0
- name: check iso file on data domain
ovirt_disk_info:
ovirt.ovirt.ovirt_disk_info:
auth: "{{ ovirt_auth }}"
pattern: "name={{ iso_file_name }}"
register: ovirt_disk_main_iso

View File

@@ -1,12 +1,12 @@
---
- name: get the datacenter name (<2.9)
ovirt_datacenter_facts:
ovirt.ovirt.ovirt_datacenter_facts:
auth: "{{ ovirt_auth }}"
pattern: "Clusters.name = {{ providers.ovirt.cluster }}"
- name: get storage information (<2.9)
ovirt_storage_domain_facts:
ovirt.ovirt.ovirt_storage_domain_facts:
auth: "{{ ovirt_auth }}"
pattern: "datacenter={{ ovirt_datacenters[0].name }}"
when:
@@ -27,7 +27,7 @@
the_query: "[?type=='iso']"
- name: check if template already exists (<2.9)
ovirt_template_facts:
ovirt.ovirt.ovirt_template_facts:
auth: "{{ ovirt_auth }}"
pattern: "name={{ template.name }} and datacenter={{ ovirt_datacenters[0].name }}"
@@ -45,7 +45,7 @@
- ovirt_templates | length > 0
- name: check iso file on data domain
ovirt_disk_facts:
ovirt.ovirt.ovirt_disk_facts:
auth: "{{ ovirt_auth }}"
pattern: "name={{ iso_file_name }}"
when: iso_file_name is defined

View File

@@ -1,7 +1,7 @@
---
- name: provision a new vm
ovirt_vm:
ovirt.ovirt.ovirt_vm:
auth: "{{ ovirt_auth }}"
name: "{{ template.name }}"
cluster: "{{ providers.ovirt.cluster|default('Default') }}"
@@ -9,6 +9,7 @@
wait: yes
memory: "{{ template.memory }}MiB"
cpu_sockets: "{{ template.cpu }}"
bios_type: "{{ template.bios_type | default(omit) }}"
boot_devices:
- hd
- cdrom
@@ -34,7 +35,7 @@
delay: 10
- name: create a disk
ovirt_disk:
ovirt.ovirt.ovirt_disk:
auth: "{{ ovirt_auth }}"
name: "{% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}"
vm_name: "{{ template.name }}"
@@ -82,7 +83,7 @@
- disks_creation.results is defined
- name: assign tags to provisioned vms
ovirt_tag:
ovirt.ovirt.ovirt_tag:
name: "{{ item }}_{{ instance.item.item[item] }}"
vms: ["{{ instance.item.item.name }}"]
state: attached
@@ -96,7 +97,7 @@
- instance.item.item[item] is defined
- name: start vm
ovirt_vm:
ovirt.ovirt.ovirt_vm:
auth: "{{ ovirt_auth }}"
name: "{{ template.name }}"
cluster: "{{ providers.ovirt.cluster|default('Default') }}"

View File

@@ -1,7 +1,7 @@
---
- name: remove template
ovirt_template:
ovirt.ovirt.ovirt_template:
auth: "{{ ovirt_auth }}"
cluster: "{{ providers.ovirt.cluster }}"
name: "{{ template.name }}"

View File

@@ -1,7 +1,7 @@
---
- name: remove vm
ovirt_vm:
ovirt.ovirt.ovirt_vm:
auth: "{{ ovirt_auth }}"
cluster: "{{ providers.ovirt.cluster }}"
name: "{{ template.name }}"

Some files were not shown because too many files have changed in this diff Show More