diff --git a/roles/geerlingguy.gitlab/.gitignore b/roles/geerlingguy.gitlab/.gitignore index f56f5b5..8840c8f 100644 --- a/roles/geerlingguy.gitlab/.gitignore +++ b/roles/geerlingguy.gitlab/.gitignore @@ -1,3 +1,5 @@ *.retry */__pycache__ *.pyc +.cache + diff --git a/roles/geerlingguy.gitlab/.travis.yml b/roles/geerlingguy.gitlab/.travis.yml deleted file mode 100644 index 9409aa5..0000000 --- a/roles/geerlingguy.gitlab/.travis.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -language: python -services: docker - -env: - global: - - ROLE_NAME: gitlab - matrix: - - MOLECULE_DISTRO: centos7 - - MOLECULE_DISTRO: ubuntu1804 - - MOLECULE_DISTRO: debian9 - - MOLECULE_DISTRO: centos7 - MOLECULE_PLAYBOOK: playbook-version.yml - - MOLECULE_DISTRO: ubuntu1804 - MOLECULE_PLAYBOOK: playbook-version.yml - -install: - # Install test dependencies. - - pip install molecule yamllint ansible-lint docker - -before_script: - # Use actual Ansible Galaxy role name for the project directory. - - cd ../ - - mv ansible-role-$ROLE_NAME geerlingguy.$ROLE_NAME - - cd geerlingguy.$ROLE_NAME - -script: - # Run tests. - - molecule test - -notifications: - webhooks: https://galaxy.ansible.com/api/v1/notifications/ diff --git a/roles/geerlingguy.gitlab/.yamllint b/roles/geerlingguy.gitlab/.yamllint index d43c306..84ecaec 100644 --- a/roles/geerlingguy.gitlab/.yamllint +++ b/roles/geerlingguy.gitlab/.yamllint @@ -1,6 +1,10 @@ --- extends: default + rules: line-length: - max: 140 + max: 180 level: warning + +ignore: | + .github/stale.yml diff --git a/roles/geerlingguy.gitlab/README.md b/roles/geerlingguy.gitlab/README.md index 324167c..63463e9 100644 --- a/roles/geerlingguy.gitlab/README.md +++ b/roles/geerlingguy.gitlab/README.md @@ -1,6 +1,6 @@ # Ansible Role: GitLab -[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-gitlab.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-gitlab) +[![CI](https://github.com/geerlingguy/ansible-role-gitlab/workflows/CI/badge.svg?event=push)](https://github.com/geerlingguy/ansible-role-gitlab/actions?query=workflow%3ACI) Installs GitLab, a Ruby-based front-end to Git, on any RedHat/CentOS or Debian/Ubuntu linux system. @@ -60,6 +60,17 @@ GitLab SSL configuration; tells GitLab to redirect normal http requests to https Whether to create a self-signed certificate for serving GitLab over a secure connection. Set `gitlab_self_signed_cert_subj` according to your locality and organization. +### LetsEncrypt Configuration. + + gitlab_letsencrypt_enable: "false" + gitlab_letsencrypt_contact_emails: ["gitlab@example.com"] + gitlab_letsencrypt_auto_renew_hour: 1 + gitlab_letsencrypt_auto_renew_minute: 30 + gitlab_letsencrypt_auto_renew_day_of_month: "*/7" + gitlab_letsencrypt_auto_renew: true + +GitLab LetsEncrypt configuration; tells GitLab whether to request and use a certificate from LetsEncrypt, if `gitlab_letsencrypt_enable` is set to `"true"`. Multiple contact emails can be configured under `gitlab_letsencrypt_contact_emails` as a list. + # LDAP Configuration. gitlab_ldap_enabled: "false" gitlab_ldap_host: "example.com" diff --git a/roles/geerlingguy.gitlab/defaults/main.yml b/roles/geerlingguy.gitlab/defaults/main.yml index 0499186..0762f77 100644 --- a/roles/geerlingguy.gitlab/defaults/main.yml +++ b/roles/geerlingguy.gitlab/defaults/main.yml @@ -73,3 +73,11 @@ gitlab_registry_enable: "false" gitlab_registry_external_url: "https://gitlab.example.com:4567" gitlab_registry_nginx_ssl_certificate: "/etc/gitlab/ssl/gitlab.crt" gitlab_registry_nginx_ssl_certificate_key: "/etc/gitlab/ssl/gitlab.key" + +# LetsEncrypt configuration. +gitlab_letsencrypt_enable: "false" +gitlab_letsencrypt_contact_emails: ["gitlab@example.com"] +gitlab_letsencrypt_auto_renew_hour: 1 +gitlab_letsencrypt_auto_renew_minute: 30 +gitlab_letsencrypt_auto_renew_day_of_month: "*/7" +gitlab_letsencrypt_auto_renew: true diff --git a/roles/geerlingguy.gitlab/meta/.galaxy_install_info b/roles/geerlingguy.gitlab/meta/.galaxy_install_info index 6eaf28c..60757b7 100644 --- a/roles/geerlingguy.gitlab/meta/.galaxy_install_info +++ b/roles/geerlingguy.gitlab/meta/.galaxy_install_info @@ -1,2 +1,2 @@ -install_date: Wed Jun 24 18:44:32 2020 -version: 3.0.0 +install_date: Tue Apr 20 16:13:49 2021 +version: 3.1.0 diff --git a/roles/geerlingguy.gitlab/meta/main.yml b/roles/geerlingguy.gitlab/meta/main.yml index ef25250..75a0d7d 100644 --- a/roles/geerlingguy.gitlab/meta/main.yml +++ b/roles/geerlingguy.gitlab/meta/main.yml @@ -2,6 +2,7 @@ dependencies: [] galaxy_info: + role_name: gitlab author: geerlingguy description: GitLab Git web interface company: "Midwestern Mac, LLC" @@ -10,8 +11,8 @@ galaxy_info: platforms: - name: EL versions: - - 6 - 7 + - 8 - name: Debian versions: - all diff --git a/roles/geerlingguy.gitlab/molecule/default/molecule.yml b/roles/geerlingguy.gitlab/molecule/default/molecule.yml index 2da47dd..7490710 100644 --- a/roles/geerlingguy.gitlab/molecule/default/molecule.yml +++ b/roles/geerlingguy.gitlab/molecule/default/molecule.yml @@ -3,10 +3,6 @@ dependency: name: galaxy driver: name: docker -lint: | - set -e - yamllint . - ansible-lint platforms: - name: instance image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest" diff --git a/roles/geerlingguy.gitlab/molecule/default/playbook-version.yml b/roles/geerlingguy.gitlab/molecule/default/playbook-version.yml deleted file mode 100644 index f7060c9..0000000 --- a/roles/geerlingguy.gitlab/molecule/default/playbook-version.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: Converge - hosts: all - become: true - - vars: - gitlab_restart_handler_failed_when: false - - pre_tasks: - - name: Update apt cache. - apt: update_cache=true cache_valid_time=600 - when: ansible_os_family == 'Debian' - changed_when: false - - - name: Remove the .dockerenv file so GitLab Omnibus doesn't get confused. - file: - path: /.dockerenv - state: absent - - - name: Set the test GitLab version number for Debian. - set_fact: - gitlab_version: '11.4.0-ce.0' - when: ansible_os_family == 'Debian' - - - name: Set the test GitLab version number for RedHat. - set_fact: - gitlab_version: '11.4.0-ce.0.el7' - when: ansible_os_family == 'RedHat' - - roles: - - role: geerlingguy.gitlab diff --git a/roles/geerlingguy.gitlab/templates/gitlab.rb.j2 b/roles/geerlingguy.gitlab/templates/gitlab.rb.j2 index 80088ea..4701776 100644 --- a/roles/geerlingguy.gitlab/templates/gitlab.rb.j2 +++ b/roles/geerlingguy.gitlab/templates/gitlab.rb.j2 @@ -19,6 +19,15 @@ nginx['redirect_http_to_https'] = {{ gitlab_redirect_http_to_https }} nginx['ssl_certificate'] = "{{ gitlab_ssl_certificate }}" nginx['ssl_certificate_key'] = "{{ gitlab_ssl_certificate_key }}" +letsencrypt['enable'] = "{{ gitlab_letsencrypt_enable }}" +{% if gitlab_letsencrypt_enable %} +letsencrypt['contact_emails'] = "{{ gitlab_letsencrypt_contact_emails | to_json }}" +letsencrypt['auto_renew_hour'] = "{{ gitlab_letsencrypt_auto_renew_hour }}" +letsencrypt['auto_renew_minute'] = "{{ gitlab_letsencrypt_auto_renew_minute }}" +letsencrypt['auto_renew_day_of_month'] = "{{ gitlab_letsencrypt_auto_renew_day_of_month }}" +letsencrypt['auto_renew'] = "{{ gitlab_letsencrypt_auto_renew }}" +{% endif %} + # The directory where Git repositories will be stored. git_data_dirs({"default" => {"path" => "{{ gitlab_git_data_dir }}"} }) @@ -82,7 +91,7 @@ nginx['ssl_client_certificate'] = "{{ gitlab_nginx_ssl_client_certificate }}" # GitLab registry. registry['enable'] = {{ gitlab_registry_enable }} -{% if gitlab_registry_enable %} +{% if gitlab_registry_enable == "true" %} registry_external_url "{{ gitlab_registry_external_url }}" registry_nginx['ssl_certificate'] = "{{ gitlab_registry_nginx_ssl_certificate }}" registry_nginx['ssl_certificate_key'] = "{{ gitlab_registry_nginx_ssl_certificate_key }}" diff --git a/roles/geerlingguy.java/meta/.galaxy_install_info b/roles/geerlingguy.java/meta/.galaxy_install_info index 7b174c1..577b71d 100644 --- a/roles/geerlingguy.java/meta/.galaxy_install_info +++ b/roles/geerlingguy.java/meta/.galaxy_install_info @@ -1,2 +1,2 @@ -install_date: Wed Jun 24 18:44:31 2020 +install_date: Tue Apr 20 16:13:48 2021 version: 1.10.0 diff --git a/roles/ikke_t.container_image_cleanup/meta/.galaxy_install_info b/roles/ikke_t.container_image_cleanup/meta/.galaxy_install_info index 64e8624..37bdedc 100644 --- a/roles/ikke_t.container_image_cleanup/meta/.galaxy_install_info +++ b/roles/ikke_t.container_image_cleanup/meta/.galaxy_install_info @@ -1,2 +1,2 @@ -install_date: Wed Jun 24 18:44:38 2020 +install_date: Tue Apr 20 16:13:55 2021 version: master diff --git a/roles/ikke_t.podman_container_systemd/README.md b/roles/ikke_t.podman_container_systemd/README.md index 2474ccc..56e37d7 100644 --- a/roles/ikke_t.podman_container_systemd/README.md +++ b/roles/ikke_t.podman_container_systemd/README.md @@ -59,10 +59,12 @@ Role uses variables that are required to be passed while including it. As there is option to run one container separately or multiple containers in pod, note that some options apply only to other method. -- ```container_image``` - container image and tag, e.g. nextcloud:latest - This is used only if you run single container -- ```container_image_list``` - list of container images to run within a pod. - This is used only if you run containers in pod. +- ```container_image_list``` - list of container images to run. + If more than one image is defined, then the containers will be run in a pod. +- ```container_image_user``` - optional username to use when authenticating + to remote registries +- ```container_image_password``` - optional password to use when authenticating + to remote registries - ```container_name``` - Identify the container in systemd and podman commands. Systemd service file be named container_name--container-pod.service. - ```container_run_args``` - Anything you pass to podman, except for the name @@ -88,12 +90,14 @@ command line. See ```man podman``` or [podman tutorials](https://github.com/containers/libpod/tree/master/docs/tutorials) for info. - +If you want your +[images to be automatically updated](http://docs.podman.io/en/latest/markdown/podman-auto-update.1.html), +add this label to container_cmd_args: ```--label "io.containers.autoupdate=image"``` Dependencies ------------ -No dependencies. +* [containers.podman](https://galaxy.ansible.com/containers/podman) (collection) Example Playbook ---------------- @@ -105,11 +109,13 @@ Root container: ``` - name: tests container vars: - container_image: sebp/lighttpd:latest + container_image_list: + - sebp/lighttpd:latest container_name: lighttpd container_run_args: >- --rm -v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z + --label "io.containers.autoupdate=image" -p 8080:80 #container_state: absent container_state: running @@ -139,7 +145,8 @@ Rootless container: vars: container_run_as_user: rootless_user container_run_as_group: rootless_user - container_image: sebp/lighttpd:latest + container_image_list: + - sebp/lighttpd:latest container_name: lighttpd container_run_args: >- --rm diff --git a/roles/ikke_t.podman_container_systemd/defaults/main.yml b/roles/ikke_t.podman_container_systemd/defaults/main.yml index 591f3da..29eee9f 100644 --- a/roles/ikke_t.podman_container_systemd/defaults/main.yml +++ b/roles/ikke_t.podman_container_systemd/defaults/main.yml @@ -3,14 +3,12 @@ # state can be running or absent container_state: running -# systemd service name -service_name: "{{ container_name }}-container-pod.service" - # SystemD restart policy # see man systemd.service for info # by default we want to restart failed container container_restart: on-failure service_files_dir: /etc/systemd/system +systemd_scope: system systemd_TimeoutStartSec: 15 systemd_RestartSec: 30 systemd_tempdir: "{{ '/tmp' if ansible_os_family == 'RedHat' and @@ -19,5 +17,13 @@ container_run_as_user: root container_run_as_group: root container_stop_timeout: 15 +# systemd service name +service_name: "{{ container_name }}-container-pod-{{ container_run_as_user }}.service" + # to sepped up you can disable always checking if podman is installed. skip_podman_install: true + +podman_dependencies_rootless: + - fuse-overlayfs + - slirp4netns + - uidmap diff --git a/roles/ikke_t.podman_container_systemd/handlers/main.yml b/roles/ikke_t.podman_container_systemd/handlers/main.yml index 966c50d..d45c343 100644 --- a/roles/ikke_t.podman_container_systemd/handlers/main.yml +++ b/roles/ikke_t.podman_container_systemd/handlers/main.yml @@ -1,15 +1,40 @@ --- - name: reload systemctl + become: true + become_user: "{{ container_run_as_user }}" + environment: + XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}" systemd: - daemon_reload: yes + daemon_reload: true + scope: "{{ systemd_scope }}" - name: start service + become: true + become_user: "{{ container_run_as_user }}" + environment: + XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}" systemd: name: "{{ service_name }}" + scope: "{{ systemd_scope }}" state: started - name: restart service + become: true + become_user: "{{ container_run_as_user }}" + environment: + XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}" systemd: name: "{{ service_name }}" + scope: "{{ systemd_scope }}" state: restarted + +- name: enable service + become: true + become_user: "{{ container_run_as_user }}" + environment: + XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}" + systemd: + name: "{{ service_name }}" + enabled: true + scope: "{{ systemd_scope }}" diff --git a/roles/ikke_t.podman_container_systemd/meta/.galaxy_install_info b/roles/ikke_t.podman_container_systemd/meta/.galaxy_install_info index 243c6b9..61ca4ff 100644 --- a/roles/ikke_t.podman_container_systemd/meta/.galaxy_install_info +++ b/roles/ikke_t.podman_container_systemd/meta/.galaxy_install_info @@ -1,2 +1,2 @@ -install_date: Wed Jun 24 18:44:37 2020 -version: master +install_date: Tue Apr 20 16:13:54 2021 +version: 2.1.0 diff --git a/roles/ikke_t.podman_container_systemd/meta/main.yml b/roles/ikke_t.podman_container_systemd/meta/main.yml index 63d58f7..bccfb7a 100644 --- a/roles/ikke_t.podman_container_systemd/meta/main.yml +++ b/roles/ikke_t.podman_container_systemd/meta/main.yml @@ -7,6 +7,9 @@ galaxy_info: license: GPLv3 min_ansible_version: 2.4 platforms: + - name: Debian + versions: + - buster - name: EL versions: - 8 @@ -14,9 +17,26 @@ galaxy_info: - name: Fedora versions: - all + - name: Ubuntu + versions: + - bionic + - disco + - eoan + - focal galaxy_tags: - podman - container - systemd -dependencies: [] +dependencies: + - role: systemli.apt_repositories + vars: + apt_repositories: + - preset: kubic + when: > + (ansible_distribution == 'Debian' and + ansible_distribution_release == 'buster') or + ansible_distribution == 'Ubuntu' + +collections: + - containers.podman diff --git a/roles/ikke_t.podman_container_systemd/tasks/main.yml b/roles/ikke_t.podman_container_systemd/tasks/main.yml index 1903e17..bdea812 100644 --- a/roles/ikke_t.podman_container_systemd/tasks/main.yml +++ b/roles/ikke_t.podman_container_systemd/tasks/main.yml @@ -1,5 +1,48 @@ --- +- name: prepare rootless stuff if needed + block: + + - name: get user information + user: + name: "{{ container_run_as_user }}" + check_mode: true + register: user_info + + - name: set systemd dir if user is not root + set_fact: + service_files_dir: "{{ user_info.home }}/.config/systemd/user" + systemd_scope: user + changed_when: false + + - name: ensure systemd files directory exists if user not root + file: + path: "{{ service_files_dir }}" + state: directory + owner: "{{ container_run_as_user }}" + group: "{{ container_run_as_group }}" + + when: container_run_as_user != "root" + +- name: "Find uid of user" + command: "id -u {{ container_run_as_user }}" + register: container_run_as_uid + check_mode: false # Run even in check mode, to avoid fail with --check. + changed_when: false + +- name: set systemd runtime dir + set_fact: + xdg_runtime_dir: "/run/user/{{ container_run_as_uid.stdout }}" + changed_when: false + +- name: set systemd scope to system if needed + set_fact: + systemd_scope: system + service_files_dir: '/etc/systemd/system' + xdg_runtime_dir: "/run/user/{{ container_run_as_uid.stdout }}" + when: container_run_as_user == "root" + changed_when: false + - name: check if service file exists already stat: path: "{{ service_files_dir }}/{{ service_name }}" @@ -8,99 +51,95 @@ - name: do tasks when "{{ service_name }}" state is "running" block: + - name: Check for user namespace support in kernel + stat: + path: /proc/sys/kernel/unprivileged_userns_clone + register: unprivileged_userns_clone + changed_when: false + + - name: Allow unprivileged users on Debian + sysctl: + name: kernel.unprivileged_userns_clone + value: '1' + state: present + sysctl_file: /etc/sysctl.d/userns.conf + sysctl_set: true + when: + - ansible_distribution == 'Debian' + - unprivileged_userns_clone.stat.exists + + - name: Install rootless dependencies on Debian-based + package: + name: "{{ podman_dependencies_rootless }}" + state: present + when: + - ansible_os_family == 'Debian' + - container_run_as_user != 'root' + - name: ensure podman is installed package: name: podman - state: installed + state: present when: not skip_podman_install - name: check user exists user: name: "{{ container_run_as_user }}" - - name: check if user is in subuid file - lineinfile: - line: '\1' - path: /etc/subuid - regexp: "^({{ container_run_as_user }}:.*)" - backrefs: yes - check_mode: yes - register: uid_has - ignore_errors: true - when: container_run_as_user != 'root' - - - name: check if group is in subgid file - lineinfile: - line: '\1' - path: /etc/subgid - regexp: "^({{ container_run_as_group }}:.*)" - backrefs: yes - check_mode: yes - register: gid_has - ignore_errors: true - when: container_run_as_group != 'root' - - - name: ensure user is in subuid file, if it was missing - lineinfile: - path: /etc/subuid - regexp: "^{{ container_run_as_user }}:.*" - line: "{{ container_run_as_user }}:305536:65536" - create: yes - mode: '0644' - owner: root - group: root - when: uid_has.changed and container_run_as_user != 'root' - - - name: ensure group is in subgid file, if it was missing - lineinfile: - path: /etc/subgid - regexp: "^{{ container_run_as_group }}:.*" - line: "{{ container_run_as_group }}:305536:65536" - create: yes - mode: '0644' - owner: root - group: root - when: gid_has.changed and container_run_as_group != 'root' + - name: Check subuid & subgid + import_tasks: check_subid.yml - name: running single container, get image Id if it exists and we are root - # XXX podman doesn't work through sudo for non root users, so skip preload if user + # XXX podman doesn't work through sudo for non root users, + # so skip preload if user # https://github.com/containers/libpod/issues/5570 # command: podman inspect -f {{.Id}} "{{ container_image }}" - command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ container_image }}" + command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ item }}" register: pre_pull_id - ignore_errors: yes - when: container_image is defined and container_run_as_user == 'root' + ignore_errors: true + when: + - container_image_list is defined + - container_image_list | length == 1 + - container_run_as_user == 'root' + with_items: "{{ container_image_list }}" - name: running single container, ensure we have up to date container image - command: "podman pull {{ container_image }}" - become: yes + containers.podman.podman_image: + name: "{{ item }}" + force: true + username: "{{ container_image_user | default(omit) }}" + password: "{{ container_image_password | default(omit) }}" + notify: restart service + become: true become_user: "{{ container_run_as_user }}" - when: container_image is defined and container_run_as_user == 'root' + when: + - container_image_list is defined + - container_image_list | length == 1 + - container_run_as_user == 'root' + with_items: "{{ container_image_list }}" - name: running single container, get image Id if it exists - command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ container_image }}" - become: yes + command: + "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ item }}" + become: true become_user: "{{ container_run_as_user }}" register: post_pull_id - when: container_image is defined and container_run_as_user == 'root' - - - name: force restart after image change - debug: msg="image has changed" - changed_when: True - notify: restart service + ignore_errors: true when: + - container_image_list is defined + - container_image_list | length == 1 - container_run_as_user == 'root' - - container_image is defined - - pre_pull_id.stdout != post_pull_id.stdout - - pre_pull_id is succeeded - - # XXX remove above comparison if future podman tells image changed. + with_items: "{{ container_image_list }}" - name: seems we use several container images, ensure all are up to date - command: "podman pull {{ item }}" - become: yes + containers.podman.podman_image: + name: "{{ item }}" + force: true + username: "{{ container_image_user | default(omit) }}" + password: "{{ container_image_password | default(omit) }}" + become: true become_user: "{{ container_run_as_user }}" - when: container_image_list is defined + when: container_image_list is defined and container_image_list | length > 1 with_items: "{{ container_image_list }}" - name: if running pod, ensure configuration file exists @@ -110,11 +149,25 @@ when: container_pod_yaml is defined - name: fail if pod configuration file is missing fail: - msg: "Error: Asking to run pod, but pod definition yaml file is missing: {{ container_pod_yaml }}" + msg: > + "Error: Asking to run pod, but pod definition yaml file is missing: " + "{{ container_pod_yaml }}" when: - container_pod_yaml is defined - not pod_file.stat.exists + - name: Check if user is lingering + stat: + path: "/var/lib/systemd/linger/{{ container_run_as_user }}" + register: user_lingering + when: container_run_as_user != "root" + + - name: Enable lingering is needed + command: "loginctl enable-linger {{ container_run_as_user }}" + when: + - container_run_as_user != "root" + - not user_lingering.stat.exists + - name: "create systemd service file for container: {{ container_name }}" template: src: systemd-service-single.j2 @@ -122,9 +175,12 @@ owner: root group: root mode: 0644 - notify: reload systemctl + notify: + - reload systemctl + - start service + - enable service register: service_file - when: container_image is defined + when: container_image_list is defined and container_image_list | length == 1 - name: "create systemd service file for pod: {{ container_name }}" template: @@ -136,24 +192,13 @@ notify: - reload systemctl - start service + - enable service register: service_file - when: container_image_list is defined - - - name: ensure "{{ service_name }}" is enabled at boot, and systemd reloaded - systemd: - name: "{{ service_name }}" - enabled: yes - daemon_reload: yes - - - name: ensure "{{ service_name }}" is running - service: - name: "{{ service_name }}" - state: started - when: not service_file_before_template.stat.exists + when: container_image_list is defined and container_image_list | length > 1 - name: "ensure {{ service_name }} is restarted due config change" debug: msg="config has changed:" - changed_when: True + changed_when: true notify: restart service when: - service_file_before_template.stat.exists @@ -169,14 +214,32 @@ fw_state: enabled when: container_state == "running" - - name: set firewall ports state to disabled when container state is not running + - name: disable firewall ports state when container state is not running set_fact: fw_state: disabled when: container_state != "running" - name: ensure firewalld is installed tags: firewall - package: name=firewalld state=installed + package: name=firewalld state=present + when: ansible_pkg_mgr != "atomic_container" + + - name: ensure firewalld is installed (on fedora-iot) + tags: firewall + command: >- + rpm-ostree install --idempotent --unchanged-exit-77 + --allow-inactive firewalld + register: ostree + failed_when: not ( ostree.rc == 77 or ostree.rc == 0 ) + changed_when: ostree.rc != 77 + when: ansible_pkg_mgr == "atomic_container" + + - name: reboot if new stuff was installed + reboot: + reboot_timeout: 300 + when: + - ansible_pkg_mgr == "atomic_container" + - ostree.rc != 77 - name: ensure firewall service is running tags: firewall @@ -186,11 +249,14 @@ tags: firewall firewalld: port: "{{ item }}" - permanent: yes - immediate: yes + permanent: true + immediate: true state: "{{ fw_state }}" with_items: "{{ container_firewall_ports }}" + - name: Force all notified handlers to run at this point + meta: flush_handlers + when: container_firewall_ports is defined @@ -198,17 +264,29 @@ block: - name: ensure "{{ service_name }}" is disabled at boot - service: + become: true + become_user: "{{ container_run_as_user }}" + # become_method: machinectl + environment: + XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}" + systemd: name: "{{ service_name }}" enabled: false + scope: "{{ systemd_scope }}" when: - service_file_before_template.stat.exists - name: ensure "{{ service_name }}" is stopped - service: + become: true + become_user: "{{ container_run_as_user }}" + # become_method: machinectl + environment: + XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}" + systemd: name: "{{ service_name }}" state: stopped - enabled: no + enabled: false + scope: "{{ systemd_scope }}" when: - service_file_before_template.stat.exists @@ -218,6 +296,21 @@ state: absent notify: reload systemctl + - name: Force all notified handlers to run at this point + meta: flush_handlers + + - name: Check if user is lingering + stat: + path: "/var/lib/systemd/linger/{{ container_run_as_user }}" + register: user_lingering + when: container_run_as_user != "root" + + - name: Disable lingering (are we sure we want to do this always?) + command: "loginctl disable-linger {{ container_run_as_user }}" + when: + - container_run_as_user != "root" + - user_lingering.stat.exists + - name: clean up pod configuration file file: path: "{{ container_pod_yaml }}" diff --git a/roles/ikke_t.podman_container_systemd/templates/systemd-service-pod.j2 b/roles/ikke_t.podman_container_systemd/templates/systemd-service-pod.j2 index 3169032..6dde870 100644 --- a/roles/ikke_t.podman_container_systemd/templates/systemd-service-pod.j2 +++ b/roles/ikke_t.podman_container_systemd/templates/systemd-service-pod.j2 @@ -6,7 +6,9 @@ After=network.target Type=forking TimeoutStartSec={{ systemd_TimeoutStartSec }} ExecStartPre=-/usr/bin/podman pod rm -f {{ container_name }} +{% if container_run_as_user == 'root' %} User={{ container_run_as_user }} +{% endif %} RemainAfterExit=yes ExecStart=/usr/bin/podman play kube {{ container_pod_yaml }} @@ -18,4 +20,9 @@ Restart={{ container_restart }} RestartSec={{ systemd_RestartSec }} [Install] +{% if container_run_as_user == 'root' %} WantedBy=multi-user.target +{% endif %} +{% if container_run_as_user != 'root' %} +WantedBy=default.target +{% endif %} diff --git a/roles/ikke_t.podman_container_systemd/templates/systemd-service-single.j2 b/roles/ikke_t.podman_container_systemd/templates/systemd-service-single.j2 index 985aade..bc648d0 100644 --- a/roles/ikke_t.podman_container_systemd/templates/systemd-service-single.j2 +++ b/roles/ikke_t.podman_container_systemd/templates/systemd-service-single.j2 @@ -6,20 +6,27 @@ After=network.target Type=simple TimeoutStartSec={{ systemd_TimeoutStartSec }} ExecStartPre=-/usr/bin/rm -f {{ pidfile }} {{ cidfile }} +{% if container_run_as_user == 'root' %} User={{ container_run_as_user }} +{% endif %} ExecStart=/usr/bin/podman run --name {{ container_name }} \ {{ container_run_args }} \ --conmon-pidfile {{ pidfile }} --cidfile {{ cidfile }} \ - {{ container_image }} {% if container_cmd_args is defined %} \ + {{ container_image_list | first }} {% if container_cmd_args is defined %} \ {{ container_cmd_args }} {% endif %} ExecStop=/usr/bin/sh -c "/usr/bin/podman stop -t "{{ container_stop_timeout }}" `cat {{ cidfile }}`" ExecStop=/usr/bin/sh -c "/usr/bin/podman rm -f `cat {{ cidfile }}`" Restart={{ container_restart }} RestartSec={{ systemd_RestartSec }} -KillMode=none +KillMode=mixed PIDFile={{ pidfile }} [Install] +{% if container_run_as_user == 'root' %} WantedBy=multi-user.target +{% endif %} +{% if container_run_as_user != 'root' %} +WantedBy=default.target +{% endif %} diff --git a/roles/ikke_t.podman_container_systemd/tests/test.yml b/roles/ikke_t.podman_container_systemd/tests/test.yml index 907ec41..af8bb36 100644 --- a/roles/ikke_t.podman_container_systemd/tests/test.yml +++ b/roles/ikke_t.podman_container_systemd/tests/test.yml @@ -1,10 +1,14 @@ --- +# yamllint disable rule:line-length + # I run this file with following line to test against my Vagrant Fedora: # ansible-playbook --vault-password-file .vault-password -b -i \ # ~/vagrant/fedora/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory \ # -e ansible_python_interpreter=/usr/bin/python3 \ # -e container_state=running test-podman.yml +# yamllint enable rule:line-length + - name: create lighttpd pod hosts: all # connection: local @@ -25,12 +29,14 @@ - name: tests container vars: container_state: running - #container_state: absent - container_image: sebp/lighttpd:latest + # container_state: absent + container_image_list: + - sebp/lighttpd:latest container_name: lighttpd container_run_args: >- --rm -v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z + -t -p 8080:80/tcp container_firewall_ports: - 8080/tcp diff --git a/roles/ikke_t.podman_container_systemd/vars/main.yml b/roles/ikke_t.podman_container_systemd/vars/main.yml index 2e06fe0..5806a2b 100644 --- a/roles/ikke_t.podman_container_systemd/vars/main.yml +++ b/roles/ikke_t.podman_container_systemd/vars/main.yml @@ -1,7 +1,6 @@ --- # systemd service name -service_name: "{{ container_name }}-container-pod.service" cidpid_base: "{{ systemd_tempdir }}/%n-" cidfile: "{{ cidpid_base }}cid" pidfile: "{{ cidpid_base }}pid" diff --git a/roles/linux-system-roles.network/.gitignore b/roles/linux-system-roles.network/.gitignore index b54b2fb..69dd322 100644 --- a/roles/linux-system-roles.network/.gitignore +++ b/roles/linux-system-roles.network/.gitignore @@ -9,3 +9,5 @@ /tests/tmp_merge_coveragerc /tests/total-*coveragedata /.tox +/.vagrant +/.vscode diff --git a/roles/linux-system-roles.network/.travis.yml b/roles/linux-system-roles.network/.travis.yml deleted file mode 100644 index 8e76d66..0000000 --- a/roles/linux-system-roles.network/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -dist: xenial -language: python -matrix: - include: - - python: 2.6 - dist: trusty - - python: 2.7 - - python: 3.5 - env: aptpkgs=python3-selinux - - python: 3.6 - - python: 3.7 - - python: 3.7-dev - - python: 3.8-dev - # - python: nightly - -services: - - docker - -before_install: - - if [ -n "${aptpkgs}" ]; then sudo apt-get install -y python3-selinux; fi - -install: - - pip install tox tox-travis - -script: - - tox diff --git a/roles/linux-system-roles.network/README.md b/roles/linux-system-roles.network/README.md index 2830b4e..0648165 100644 --- a/roles/linux-system-roles.network/README.md +++ b/roles/linux-system-roles.network/README.md @@ -1,8 +1,10 @@ linux-system-roles/network ========================== + [![Coverage Status](https://coveralls.io/repos/github/linux-system-roles/network/badge.svg)](https://coveralls.io/github/linux-system-roles/network) -[![Travis Build Status](https://travis-ci.org/linux-system-roles/network.svg?branch=master)](https://travis-ci.org/linux-system-roles/network) +![CI Testing](https://github.com/linux-system-roles/network/workflows/tox/badge.svg) [![Code Style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black) +[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/linux-system-roles/network.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/linux-system-roles/network/context:python) Overview -------- @@ -13,13 +15,16 @@ This role can be used to configure: - Ethernet interfaces - Bridge interfaces - Bonded interfaces -- VLAN interfaces +- VLAN interfaces - MacVLAN interfaces - Infiniband interfaces +- Wireless (WiFi) interfaces - IP configuration +- 802.1x authentication Introduction ------------ + The `network` role supports two providers: `nm` and `initscripts`. `nm` is used by default in RHEL7 and `initscripts` in RHEL6. These providers can be configured per host via the [`network_provider`](#provider) variable. In @@ -32,19 +37,20 @@ For `initscripts`, the legacy network service is required as used in Fedora or R For each host a list of networking profiles can be configured via the `network_connections` variable. -- For `initscripts`, profiles correspond to ifcfg files in the `/etc/sysconfig/network-scripts/ifcfg-*` directory. +- For `initscripts`, profiles correspond to ifcfg files in the + `/etc/sysconfig/network-scripts/ifcfg-*` directory. - For `NetworkManager`, profiles correspond to connection profiles as handled by NetworkManager. Fedora and RHEL use the `ifcfg-rh-plugin` for NetworkManager, which also writes or reads configuration files to `/etc/sysconfig/network-scripts/ifcfg-*` for compatibility. -Note that the `network` role primarily operates on networking profiles (connections) and -not on devices, but it uses the profile name by default as the interface name. -It is also possible to create generic profiles, by creating for example a -profile with a certain IP configuration without activating the profile. To -apply the configuration to the actual networking interface, use the `nmcli` -commands on the target system. +Note that the `network` role primarily operates on networking profiles +(connections) and not on devices, but it uses the profile name by default as +the interface name. It is also possible to create generic profiles, by creating +for example a profile with a certain IP configuration without activating the +profile. To apply the configuration to the actual networking interface, use the +`nmcli` commands on the target system. **Warning**: The `network` role updates or creates all connection profiles on the target system as specified in the `network_connections` variable. Therefore, @@ -54,17 +60,25 @@ Exceptions are mentioned below. Variables --------- -The `network` role is configured via variables starting with `network_` as the name prefix. -List of variables: -* `network_provider` - The `network_provider` variable allows to set a specific - provider (`nm` or `initscripts`) . Setting it to `{{ network_provider_os_default }}`, - the provider is set depending on the operating system. This is usually `nm` - except for RHEL 6 or CentOS 6 systems. - -* `network_connections` - The connection profiles are configured as `network_connections`, - which is a list of dictionaries that include specific options. +The `network` role is configured via variables starting with `network_` as +the name prefix. List of variables: +- `network_provider` - The `network_provider` variable allows to set a specific + provider (`nm` or `initscripts`) . Setting it to `{{ + network_provider_os_default }}`, the provider is set depending on the + operating system. This is usually `nm` except for RHEL 6 or CentOS 6 systems. + Changing the provider for an existing profile is not supported. To switch + providers, it is recommended to first remove profiles with the old provider + and then create new profiles with the new provider. +- `network_connections` - The connection profiles are configured as + `network_connections`, which is a list of dictionaries that include specific + options. +- `network_allow_restart` - Certain configurations require the role to restart + network services. For example, if a wireless connection is configured and + NetworkManager-wifi is not installed, NetworkManager must be restarted prior + to the connection being configured. Setting this to `no` will prevent the + role from restarting network service. Examples of Variables --------------------- @@ -76,12 +90,14 @@ network_provider: nm network_connections: - name: eth0 #... +network_allow_restart: yes ``` Options ------- -The `network_connections` variable is a list of dictionaries that include the following options. -List of options: + +The `network_connections` variable is a list of dictionaries that include the +following options. List of options: ### `name` (required) @@ -92,38 +108,42 @@ Note that you can have multiple profiles for the same device, but only one profile can be active on the device each time. For NetworkManager, a connection can only be active at one device each time. -* For `NetworkManager`, the `name` option corresponds to the +- For `NetworkManager`, the `name` option corresponds to the [`connection.id`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.connection.id) property option. Although NetworkManager supports multiple connections with the same `connection.id`, the `network` role cannot handle a duplicate `name`. Specifying a `name` multiple times refers to the same connection profile. -* For `initscripts`, the `name` option determines the ifcfg file name `/etc/sysconfig/network-scripts/ifcfg-$NAME`. +- For `initscripts`, the `name` option determines the ifcfg file name `/etc/sysconfig/network-scripts/ifcfg-$NAME`. Note that the `name` does not specify the `DEVICE` but a filename. As a consequence, `'/'` is not a valid character for the `name`. -You can also use the same connection profile multiple times. Therefore, it is possible to create a profile and activate it separately. +You can also use the same connection profile multiple times. Therefore, it is possible +to create a profile and activate it separately. ### `state` -The `state` option identifies what is the runtime state of each connection profile. The `state` option (optional) can be set to the following values: +The `state` option identifies what is the runtime state of each connection profile. The +`state` option (optional) can be set to the following values: -* `up` - the connection profile is activated -* `down` - the connection profile is deactivated +- `up` - the connection profile is activated +- `down` - the connection profile is deactivated #### `state: up` + - For `NetworkManager`, this corresponds to `nmcli connection id {{name}} up`. - For `initscripts`, this corresponds to `ifup {{name}}`. When the `state` option is set to `up`, you can also specify the `wait` option (optional): -* `wait: 0` - initiates only the activation, but does not wait until the device is fully connected. -The connection will be completed in the background, for example after a DHCP lease was received. -* `wait: ` is a timeout that enables you to decide how long you give the device to -activate. The default is using a suitable timeout. Note that the `wait` option is -only supported by NetworkManager. +- `wait: 0` - initiates only the activation, but does not wait until the device is fully + connected. The connection will be completed in the background, for example after a + DHCP lease was received. +- `wait: ` is a timeout that enables you to decide how long you give the device + to activate. The default is using a suitable timeout. Note that the `wait` option is + only supported by NetworkManager. Note that `state: up` always re-activates the profile and possibly changes the networking configuration, even if the profile was already active before. As @@ -135,14 +155,16 @@ a consequence, `state: up` always changes the system. - For `initscripts`, it corresponds to call `ifdown {{name}}`. -You can deactivate a connection profile, even if is currently not active. As a consequence, `state: down` always changes the system. - -Note that if the `state` option is unset, the connection profile’s runtime state will not be changed. +You can deactivate a connection profile, even if is currently not active. As a +consequence, `state: down` always changes the system. +Note that if the `state` option is unset, the connection profile’s runtime state will +not be changed. ### `persistent_state` -The `persistent_state` option identifies if a connection profile is persistent (saved on disk). The `persistent_state` option can be set to the following values: +The `persistent_state` option identifies if a connection profile is persistent (saved on +disk). The `persistent_state` option can be set to the following values: #### `persistent_state: present` (default) @@ -161,29 +183,31 @@ profile on a currently disconnected device. ([rh#1401515](https://bugzilla.redha The `absent` value ensures that the profile is not present on the target host. If a profile with the given `name` exists, it will be deleted. In this case: -- `NetworkManager` deletes all connection profiles with the corresponding `connection.id`. - Deleting a profile usually does not change the current networking configuration, unless - the profile was currently activated on a device. Deleting the currently - active connection profile disconnects the device. That makes the device eligible - to autoconnect another connection (for more details, see [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515)). +- `NetworkManager` deletes all connection profiles with the corresponding + `connection.id`. Deleting a profile usually does not change the current networking + configuration, unless the profile was currently activated on a device. Deleting the + currently active connection profile disconnects the device. That makes the device + eligible to autoconnect another connection (for more details, see + [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515)). -- `initscripts` deletes the ifcfg file in most cases with no impact on the runtime state of the system unless some component is watching the sysconfig directory. +- `initscripts` deletes the ifcfg file in most cases with no impact on the runtime state + of the system unless some component is watching the sysconfig directory. **Note**: For profiles that only contain a `state` option, the `network` role only activates or deactivates the connection without changing its configuration. - ### `type` The `type` option can be set to the following values: - - `ethernet` - - `bridge` - - `bond` - - `team` - - `vlan` - - `macvlan` - - `infiniband` +- `ethernet` +- `bridge` +- `bond` +- `team` +- `vlan` +- `macvlan` +- `infiniband` +- `wireless` #### `type: ethernet` @@ -191,44 +215,65 @@ If the type is `ethernet`, then there can be an extra `ethernet` dictionary with items (options): `autoneg`, `speed` and `duplex`, which correspond to the settings of the `ethtool` utility with the same name. -* `autoneg`: `yes` (default) or `no` [if auto-negotiation is enabled or disabled] -* `speed`: speed in Mbit/s -* `duplex`: `half` or `full` +- `autoneg`: `yes` (default) or `no` [if auto-negotiation is enabled or disabled] +- `speed`: speed in Mbit/s +- `duplex`: `half` or `full` -Note that the `speed` and `duplex` link settings are required when autonegotiation is disabled (autoneg:no). +Note that the `speed` and `duplex` link settings are required when autonegotiation is +disabled (`autoneg: no`). #### `type: bridge`, `type: bond`, `type: team` -The `bridge`, `bond`, `team` device types work similar. Note that `team` is not supported in RHEL6 kernels. +The `bridge`, `bond`, `team` device types work similar. Note that `team` is not +supported in RHEL6 kernels. -For slaves, the `slave_type` and `master` properties must be set. Note that slaves should not have `ip` settings. +For ports, the `port_type` and `controller` properties must be set. Note that ports +should not have `ip` settings. -The `master` refers to the `name` of a profile in the Ansible +The `controller` refers to the `name` of a profile in the Ansible playbook. It is neither an interface-name nor a connection-id of NetworkManager. -- For NetworkManager, `master` will be converted to the `connection.uuid` +- For NetworkManager, `controller` will be converted to the `connection.uuid` of the corresponding profile. -- For initscripts, the master is looked up as the `DEVICE` from the corresponding +- For initscripts, the controller is looked up as the `DEVICE` from the corresponding ifcfg file. -As `master` refers to other profiles of the same or another play, -the order of the `connections` list matters. Also, `--check` ignores -the value of the `master` and assumes it will be present during a real -run. That means, in presence of an invalid `master`, `--check` may -signal success but the actual play run fails. +As `controller` refers to other profiles of the same or another play, the order of the +`connections` list matters. Profiles that are referenced by other profiles need to be +specified first. Also, `--check` ignores the value of the `controller` and assumes it +will be present during a real run. That means, in presence of an invalid `controller`, +`--check` may signal success but the actual play run fails. + +The `team` type uses `roundrobin` as the `runner` configuration. No further +configuration is supported at the moment. #### `type: vlan` -Similar to `master`, the `parent` references the connection profile in the ansible +Similar to `controller`, the `parent` references the connection profile in the ansible role. #### `type: macvlan` -Similar to `master` and `vlan`, the `parent` references the connection profile in the ansible -role. +Similar to `controller` and `vlan`, the `parent` references the connection profile in +the ansible role. +#### `type: wireless` + +The `wireless` type supports WPA-PSK (password) authentication and WPA-EAP (802.1x) +authentication. + +`nm` (NetworkManager) is the only supported `network_provider` for this type. + +If WPA-EAP is used, ieee802_1x settings must be defined in the +[ieee802_1x](#-`ieee802_1x`) option. + +The following options are supported: + +- `ssid`: the SSID of the wireless network (required) +- `key_mgmt`: `wpa-psk` or `wpa-eap` (required) +- `password`: password for the network (required if `wpa-psk` is used) ### `autoconnect` @@ -243,19 +288,28 @@ By default, profiles are created with autoconnect enabled. The `mac` address is optional and restricts the profile to be usable only on devices with the given MAC address. `mac` is only allowed for `type` `ethernet` or `infiniband` to match a non-virtual device with the -profile. +profile. The value of the `mac` address needs to be specified in hexadecimal notation +using colons (for example: `mac: "00:00:5e:00:53:5d"`). To avoid YAML parsing mac +addresses as integers in sexagesimal (base 60) notation (see +), it is recommended to always quote the value +with double quotes and sometimes it is necessary. - For `NetworkManager`, `mac` is the permanent MAC address, `ethernet.mac-address`. - For `initscripts`, `mac` is the currently configured MAC address of the device (`HWADDR`). +### `mtu` + +The `mtu` option denotes the maximum transmission unit for the profile's +device. The maximum value depends on the device. For virtual devices, the +maximum value of the `mtu` option depends on the underlying device. + ### `interface_name` -For the `ethernet` and `infiniband` types, the `interface_name` option restricts the profile to -the given interface by name. This argument is optional and by default the -profile name is used unless a mac address is specified using the `mac` key. -Specifying an empty string (`""`) means that the profile is not -restricted to a network interface. +For the `ethernet` and `infiniband` types, the `interface_name` option restricts the +profile to the given interface by name. This argument is optional and by default the +profile name is used unless a mac address is specified using the `mac` key. Specifying +an empty string (`""`) means that the profile is not restricted to a network interface. **Note:** With [persistent interface naming](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/ch-Consistent_Network_Device_Naming.html), the interface is predictable based on the hardware configuration. @@ -271,26 +325,25 @@ different or the profile may not be tied to an interface at all. The `zone` option sets the firewalld zone for the interface. -Slaves to the bridge, bond or team devices cannot specify a zone. - +Ports to the bridge, bond or team devices cannot specify a zone. ### `ip` The IP configuration supports the following options: -* `address` +- `address` Manual addressing can be specified via a list of addresses under the `address` option. -* `dhcp4` and `auto6` +- `dhcp4`, `auto6`, and `ipv6_disabled` Also, manual addressing can be specified by setting either `dhcp4` or `auto6`. The `dhcp4` key is for DHCPv4 and `auto6` for StateLess Address Auto Configuration (SLAAC). Note that the `dhcp4` and `auto6` keys can be omitted and the default key - depends on the presence of manual addresses. + depends on the presence of manual addresses. `ipv6_disabled` can be set to disable + ipv6 for the connection. - -* `dhcp4_send_hostname` +- `dhcp4_send_hostname` If `dhcp4` is enabled, it can be configured whether the DHCPv4 request includes the hostname via the `dhcp4_send_hostname` option. Note that `dhcp4_send_hostname` @@ -298,110 +351,252 @@ The IP configuration supports the following options: [`ipv4.dhcp-send-hostname`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv4.dhcp-send-hostname) property. -* `dns` and `dns_search` +- `dns` - Manual DNS configuration can be specified via a list of addresses - given in the `dns` option and a list of domains to search given in the - `dns_search` option. + Manual DNS configuration can be specified via a list of addresses given in the + `dns` option. +- `dns_search` -* `route_metric4` and `route_metric6` + `dns_search` is only supported for IPv4 nameservers. Manual DNS configuration can + be specified via a list of domains to search given in the `dns_search` option. - - For `NetworkManager`, `route_metric4` and `route_metric6` corresponds to the - [`ipv4.route-metric`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv4.route-metric) and +- `dns_options` + + `dns_options` is only supported for the NetworkManager provider and IPv4 + nameservers. Manual DNS configuration via a list of DNS options can be given in the + `dns_options`. The list of supported DNS options for IPv4 nameservers is described + in [man 5 resolv.conf](https://man7.org/linux/man-pages/man5/resolv.conf.5.html). + Currently, the list of supported DNS options is: + - `attempts:n` + - `debug` + - `edns0` + - `ndots:n` + - `no-check-names` + - `no-reload` + - `no-tld-query` + - `rotate` + - `single-request` + - `single-request-reopen` + - `timeout:n` + - `trust-ad` + - `use-vc` + + **Note:** The "trust-ad" setting is only honored if the profile contributes name + servers to resolv.conf, and if all contributing profiles have "trust-ad" enabled. + When using a caching DNS plugin (dnsmasq or systemd-resolved in NetworkManager.conf) + then "edns0" and "trust-ad" are automatically added. + +- `route_metric4` and `route_metric6` + + For `NetworkManager`, `route_metric4` and `route_metric6` corresponds to the + [`ipv4.route-metric`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv4.route-metric) + and [`ipv6.route-metric`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv6.route-metric) properties, respectively. If specified, it determines the route metric for DHCP - assigned routes and the default route, and thus the priority for multiple interfaces. + assigned routes and the default route, and thus the priority for multiple + interfaces. -* `route` +- `route` - Static route configuration can be specified via a list of routes given in the `route` - option. The default value is an empty list. Each route is a dictionary with the following - entries: `network`, `prefix`, `gateway` and `metric`. `network` and `prefix` specify - the destination network. - Note that Classless inter-domain routing (CIDR) notation or network mask notation are not supported yet. + Static route configuration can be specified via a list of routes given in the + `route` option. The default value is an empty list. Each route is a dictionary with + the following entries: `network`, `prefix`, `gateway` and `metric`. `network` and + `prefix` specify the destination network. + Note that Classless inter-domain routing (CIDR) notation or network mask notation + are not supported yet. -* `route_append_only` +- `route_append_only` The `route_append_only` option allows only to add new routes to the existing routes on the system. - If the `route_append_only` boolean option is set to `yes`, the specified routes are appended to the existing routes. - If `route_append_only` is set to `no` (default), the current routes are replaced. - Note that setting `route_append_only` to `yes` without setting `route` has the effect of preserving the current static routes. + If the `route_append_only` boolean option is set to `yes`, the specified routes are + appended to the existing routes. If `route_append_only` is set to `no` (default), + the current routes are replaced. Note that setting `route_append_only` to `yes` + without setting `route` has the effect of preserving the current static routes. -* `rule_append_only` +- `rule_append_only` The `rule_append_only` boolean option allows to preserve the current routing rules. Note that specifying routing rules is not supported yet. -**Note:** When `route_append_only` or `rule_append_only` is not specified, the `network` role deletes the current routes or routing rules. +**Note:** When `route_append_only` or `rule_append_only` is not specified, the network +role deletes the current routes or routing rules. -**Note:** Slaves to the bridge, bond or team devices cannot specify `ip` settings. +**Note:** Ports to the bridge, bond or team devices cannot specify `ip` settings. ### `ethtool` -The ethtool settings allow to enable or disable varios features. The names +The ethtool settings allow to enable or disable various features. The names correspond to the names used by the `ethtool` utility. Depending on the actual -kernel and device, changing some features might not be supported. +kernel and device, changing some options might not be supported. ```yaml ethtool: features: - esp-hw-offload: yes|no # optional - esp-tx-csum-hw-offload: yes|no # optional - fcoe-mtu: yes|no # optional + esp_hw_offload: yes|no # optional + esp_tx_csum_hw_offload: yes|no # optional + fcoe_mtu: yes|no # optional gro: yes|no # optional gso: yes|no # optional highdma: yes|no # optional - hw-tc-offload: yes|no # optional - l2-fwd-offload: yes|no # optional + hw_tc_offload: yes|no # optional + l2_fwd_offload: yes|no # optional loopback: yes|no # optional lro: yes|no # optional ntuple: yes|no # optional rx: yes|no # optional - rx-all: yes|no # optional - rx-fcs: yes|no # optional - rx-gro-hw: yes|no # optional - rx-udp_tunnel-port-offload: yes|no # optional - rx-vlan-filter: yes|no # optional - rx-vlan-stag-filter: yes|no # optional - rx-vlan-stag-hw-parse: yes|no # optional + rx_all: yes|no # optional + rx_fcs: yes|no # optional + rx_gro_hw: yes|no # optional + rx_udp_tunnel_port_offload: yes|no # optional + rx_vlan_filter: yes|no # optional + rx_vlan_stag_filter: yes|no # optional + rx_vlan_stag_hw_parse: yes|no # optional rxhash: yes|no # optional rxvlan: yes|no # optional sg: yes|no # optional - tls-hw-record: yes|no # optional - tls-hw-tx-offload: yes|no # optional + tls_hw_record: yes|no # optional + tls_hw_tx_offload: yes|no # optional tso: yes|no # optional tx: yes|no # optional - tx-checksum-fcoe-crc: yes|no # optional - tx-checksum-ip-generic: yes|no # optional - tx-checksum-ipv4: yes|no # optional - tx-checksum-ipv6: yes|no # optional - tx-checksum-sctp: yes|no # optional - tx-esp-segmentation: yes|no # optional - tx-fcoe-segmentation: yes|no # optional - tx-gre-csum-segmentation: yes|no # optional - tx-gre-segmentation: yes|no # optional - tx-gso-partial: yes|no # optional - tx-gso-robust: yes|no # optional - tx-ipxip4-segmentation: yes|no # optional - tx-ipxip6-segmentation: yes|no # optional - tx-nocache-copy: yes|no # optional - tx-scatter-gather: yes|no # optional - tx-scatter-gather-fraglist: yes|no # optional - tx-sctp-segmentation: yes|no # optional - tx-tcp-ecn-segmentation: yes|no # optional - tx-tcp-mangleid-segmentation: yes|no # optional - tx-tcp-segmentation: yes|no # optional - tx-tcp6-segmentation: yes|no # optional - tx-udp-segmentation: yes|no # optional - tx-udp_tnl-csum-segmentation: yes|no # optional - tx-udp_tnl-segmentation: yes|no # optional - tx-vlan-stag-hw-insert: yes|no # optional + tx_checksum_fcoe_crc: yes|no # optional + tx_checksum_ip_generic: yes|no # optional + tx_checksum_ipv4: yes|no # optional + tx_checksum_ipv6: yes|no # optional + tx_checksum_sctp: yes|no # optional + tx_esp_segmentation: yes|no # optional + tx_fcoe_segmentation: yes|no # optional + tx_gre_csum_segmentation: yes|no # optional + tx_gre_segmentation: yes|no # optional + tx_gso_partial: yes|no # optional + tx_gso_robust: yes|no # optional + tx_ipxip4_segmentation: yes|no # optional + tx_ipxip6_segmentation: yes|no # optional + tx_nocache_copy: yes|no # optional + tx_scatter_gather: yes|no # optional + tx_scatter_gather_fraglist: yes|no # optional + tx_sctp_segmentation: yes|no # optional + tx_tcp_ecn_segmentation: yes|no # optional + tx_tcp_mangleid_segmentation: yes|no # optional + tx_tcp_segmentation: yes|no # optional + tx_tcp6_segmentation: yes|no # optional + tx_udp_segmentation: yes|no # optional + tx_udp_tnl_csum_segmentation: yes|no # optional + tx_udp_tnl_segmentation: yes|no # optional + tx_vlan_stag_hw_insert: yes|no # optional txvlan: yes|no # optional + coalesce: + adaptive_rx: yes|no # optional + adaptive_tx: yes|no # optional + pkt_rate_high: 0 # optional mininum=0 maximum=0xffffffff + pkt_rate_low: 0 # optional mininum=0 maximum=0xffffffff + rx_frames: 0 # optional mininum=0 maximum=0xffffffff + rx_frames_high: 0 # optional mininum=0 maximum=0xffffffff + rx_frames_irq: 0 # optional mininum=0 maximum=0xffffffff + rx_frames_low: 0 # optional mininum=0 maximum=0xffffffff + rx_usecs: 0 # optional mininum=0 maximum=0xffffffff + rx_usecs_high: 0 # optional mininum=0 maximum=0xffffffff + rx_usecs_irq: 0 # optional mininum=0 maximum=0xffffffff + rx_usecs_low: 0 # optional mininum=0 maximum=0xffffffff + sample_interval: 0 # optional mininum=0 maximum=0xffffffff + stats_block_usecs: 0 # optional mininum=0 maximum=0xffffffff + tx_frames: 0 # optional mininum=0 maximum=0xffffffff + tx_frames_high: 0 # optional mininum=0 maximum=0xffffffff + tx_frames_irq: 0 # optional mininum=0 maximum=0xffffffff + tx_frames_low: 0 # optional mininum=0 maximum=0xffffffff + tx_usecs: 0 # optional mininum=0 maximum=0xffffffff + tx_usecs_high: 0 # optional mininum=0 maximum=0xffffffff + tx_usecs_irq: 0 # optional mininum=0 maximum=0xffffffff + tx_usecs_low: 0 # optional mininum=0 maximum=0xffffffff ``` +### `ieee802_1x` + +Configures 802.1x authentication for an interface. + +Currently, NetworkManager is the only supported provider and EAP-TLS is the only +supported EAP method. + +SSL certificates and keys must be deployed on the host prior to running the role. + +- `eap` + + The allowed EAP method to be used when authenticating to the network with 802.1x. + + Currently, `tls` is the default and the only accepted value. + +- `identity` (required) + + Identity string for EAP authentication methods. + +- `private_key` (required) + + Absolute path to the client's PEM or PKCS#12 encoded private key used for 802.1x + authentication. + +- `private_key_password` + + Password to the private key specified in `private_key`. + +- `private_key_password_flags` + + List of flags to configure how the private key password is managed. + + Multiple flags may be specified. + + Valid flags are: + - `none` + - `agent-owned` + - `not-saved` + - `not-required` + + See NetworkManager documentation on "Secret flag types" more details (`man 5 + nm-settings`). + +- `client_cert` (required) + + Absolute path to the client's PEM encoded certificate used for 802.1x + authentication. + +- `ca_cert` + + Absolute path to the PEM encoded certificate authority used to verify the EAP + server. + +- `ca_path` + + Absolute path to directory containing additional pem encoded ca certificates used to + verify the EAP server. Can be used instead of or in addition to ca_cert. Cannot be + used if system_ca_certs is enabled. + +- `system_ca_certs` + + If set to `True`, NetworkManager will use the system's trusted ca + certificates to verify the EAP server. + +- `domain_suffix_match` + + If set, NetworkManager will ensure the domain name of the EAP server certificate + matches this string. + +### `bond` + +The `bond` setting configures the options of bonded interfaces +(type `bond`). It supports the following options: + +- `mode` + + Bonding mode. See the + [kernel documentation](https://www.kernel.org/doc/Documentation/networking/bonding.txt) + or your distribution `nmcli` documentation for valid values. + NetworkManager defaults to `balance-rr`. + +- `miimon` + + Sets the MII link monitoring interval (in milliseconds) + Examples of Options ------------------- @@ -443,7 +638,7 @@ network_connections: #persistent_state: present # default type: ethernet autoconnect: yes - mac: 00:00:5e:00:53:5d + mac: "00:00:5e:00:53:5d" ip: dhcp4: yes ``` @@ -478,7 +673,6 @@ network_connections: #interface_name: br0 # defaults to the connection name ``` - Configuring a bridge connection: ```yaml @@ -491,21 +685,21 @@ network_connections: auto6: no ``` -Setting `master` and `slave_type`: +Setting `controller` and `port_type`: ```yaml network_connections: - name: br0-bond0 type: bond interface_name: bond0 - master: internal-br0 - slave_type: bridge + controller: internal-br0 + port_type: bridge - name: br0-bond0-eth1 type: ethernet interface_name: eth1 - master: br0-bond0 - slave_type: bond + controller: br0-bond0 + port_type: bond ``` Configuring VLANs: @@ -555,6 +749,20 @@ network_connections: - 192.168.1.1/24 ``` +Configuring a wireless connection: + +```yaml +network_connections: + - name: wlan0 + type: wireless + wireless: + ssid: "My WPA2-PSK Network" + key_mgmt: "wpa-psk" + # recommend vault encrypting the wireless password + # see https://docs.ansible.com/ansible/latest/user_guide/vault.html + password: "p@55w0rD" +``` + Setting the IP configuration: ```yaml @@ -573,6 +781,9 @@ network_connections: dns_search: - example.com - subdomain.example.com + dns_options: + - rotate + - timeout:1 route_metric6: -1 auto6: no @@ -596,13 +807,30 @@ network_connections: rule_append_only: yes ``` +Configuring 802.1x: + +```yaml +network_connections: + - name: eth0 + type: ethernet + ieee802_1x: + identity: myhost + eap: tls + private_key: /etc/pki/tls/client.key + # recommend vault encrypting the private key password + # see https://docs.ansible.com/ansible/latest/user_guide/vault.html + private_key_password: "p@55w0rD" + client_cert: /etc/pki/tls/client.pem + ca_cert: /etc/pki/tls/cacert.pem + domain_suffix_match: example.com +``` + ### Invalid and Wrong Configuration The `network` role rejects invalid configurations. It is recommended to test the role with `--check` first. There is no protection against wrong (but valid) configuration. Double-check your configuration before applying it. - Compatibility ------------- @@ -628,13 +856,15 @@ after disabling the NetworkManager service. Limitations ----------- -As Ansible usually works via the network, for example via SSH, there are some limitations to be considered: +As Ansible usually works via the network, for example via SSH, there are some +limitations to be considered: -The `network` role does not support bootstraping networking configuration. One -option may be [ansible-pull](https://docs.ansible.com/ansible/playbooks_intro.html#ansible-pull). -Another option maybe be to initially auto-configure the host during installation -(ISO based, kickstart, etc.), so that the host is connected to a management LAN -or VLAN. It strongly depends on your environment. +The `network` role does not support bootstraping networking configuration. One option +may be +[ansible-pull](https://docs.ansible.com/ansible/playbooks_intro.html#ansible-pull). +Another option maybe be to initially auto-configure the host during installation (ISO +based, kickstart, etc.), so that the host is connected to a management LAN or VLAN. It +strongly depends on your environment. For `initscripts` provider, deploying a profile merely means to create the ifcfg files. Nothing happens automatically until the play issues `ifup` or `ifdown` @@ -642,20 +872,20 @@ via the `up` or `down` [states](#state) -- unless there are other components that rely on the ifcfg files and react on changes. The `initscripts` provider requires the different profiles to be in the right -order when they depend on each other. For example the bonding master device -needs to be specified before the slave devices. +order when they depend on each other. For example the bonding controller device +needs to be specified before the port devices. When removing a profile for NetworkManager it also takes the connection down and possibly removes virtual interfaces. With the `initscripts` provider removing a profile does not change its current runtime state (this is a future feature for NetworkManager as well). -For NetworkManager, modifying a connection with autoconnect enabled -may result in the activation of a new profile on a previously disconnected -interface. Also, deleting a NetworkManager connection that is currently active -results in removing the interface. Therefore, the order of the steps should be -followed, and carefully handling of [autoconnect](#autoconnect) property may be -necessary. This should be improved in NetworkManager RFE [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515). +For NetworkManager, modifying a connection with autoconnect enabled may result in the +activation of a new profile on a previously disconnected interface. Also, deleting a +NetworkManager connection that is currently active results in removing the interface. +Therefore, the order of the steps should be followed, and carefully handling of +[autoconnect](#autoconnect) property may be necessary. This should be improved in +NetworkManager RFE [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515). It seems difficult to change networking of the target host in a way that breaks the current SSH connection of ansible. If you want to do that, ansible-pull might @@ -680,3 +910,6 @@ feature. At the beginning of the play we could create a checkpoint and if we los connectivity due to an error, NetworkManager would automatically rollback after timeout. The limitations is that this would only work with NetworkManager, and it is not clear that rollback will result in a working configuration. + +*Want to contribute? Take a look at our [contributing +guidelines](https://github.com/linux-system-roles/network/blob/main/contributing.md)!* diff --git a/roles/linux-system-roles.network/defaults/main.yml b/roles/linux-system-roles.network/defaults/main.yml index e5c8c6f..2d8a56e 100644 --- a/roles/linux-system-roles.network/defaults/main.yml +++ b/roles/linux-system-roles.network/defaults/main.yml @@ -2,54 +2,91 @@ --- network_connections: [] +network_allow_restart: no + # Use initscripts for RHEL/CentOS < 7, nm otherwise network_provider_os_default: "{{ - 'initscripts' if ansible_distribution in ['RedHat', 'CentOS'] and - ansible_distribution_major_version is version('7', '<') + 'initscripts' if ansible_distribution in [ + 'RedHat', + 'CentOS', + 'OracleLinux' + ] and ansible_distribution_major_version is version('7', '<') else 'nm' }}" # If NetworkManager.service is running, assume that 'nm' is currently in-use, # otherwise initscripts -network_provider_current: "{{ +__network_provider_current: "{{ 'nm' if 'NetworkManager.service' in ansible_facts.services and ansible_facts.services['NetworkManager.service']['state'] == 'running' else 'initscripts' }}" # Default to the auto-detected value -network_provider: "{{ network_provider_current }}" +network_provider: "{{ __network_provider_current }}" + +# check if any 802.1x connections are defined +__network_ieee802_1x_connections_defined: "{{ network_connections | + selectattr('ieee802_1x', 'defined') | list | count > 0 }}" + +# check if any wireless connections are defined +__network_wireless_connections_defined: "{{ + ['wireless'] in network_connections|json_query('[*][type]') }}" + +# NetworkManager-wireless is required for wireless connections +__network_packages_default_wireless: ["{% + if __network_wireless_connections_defined + %}NetworkManager-wifi{% endif %}"] + +# check if any team connections are defined +__network_team_connections_defined: "{{ + ['team'] in network_connections|json_query('[*][type]') }}" + +# NetworkManager-team is required for team connections +__network_packages_default_team: ["{% + if __network_team_connections_defined + %}NetworkManager-team{% endif %}"] + +# wpa_supplicant is required if any 802.1x or wireless connections are defined +__network_wpa_supplicant_required: "{{ + __network_ieee802_1x_connections_defined or + __network_wireless_connections_defined }}" +__network_packages_default_wpa_supplicant: ["{% + if __network_wpa_supplicant_required + %}wpa_supplicant{% endif %}"] # The python-gobject-base package depends on the python version and # distribution: # - python-gobject-base on RHEL7 (no python2-gobject-base :-/) -# - python-gobject-base or python2-gobject-base on Fedora 27 # - python3-gobject-base on Fedora 28+ -network_service_name_default_nm: NetworkManager -network_packages_default_nm: - - ethtool - - NetworkManager - - "python{{ ansible_python['version']['major'] | replace('2', '') }}-gobject-base" +__network_packages_default_gobject_packages: ["python{{ + ansible_python['version']['major'] | replace('2', '')}}-gobject-base"] -network_service_name_default_initscripts: network +__network_service_name_default_nm: NetworkManager +__network_packages_default_nm: "{{['NetworkManager'] + + __network_packages_default_gobject_packages|select()|list() + + __network_packages_default_wpa_supplicant|select()|list() + + __network_packages_default_wireless|select()|list() + + __network_packages_default_team|select()|list()}}" + +__network_service_name_default_initscripts: network # initscripts requires bridge-utils to manage bridges, install it when the # 'bridge' type is used in network_connections -_network_packages_default_initscripts_bridge: ["{% if ['bridge'] in network_connections|json_query('[*][type]') and -( - (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('7', '<=')) or - (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('28', '<=')) -) +__network_packages_default_initscripts_bridge: ["{% +if ['bridge'] in network_connections|json_query('[*][type]') and + ansible_distribution in ['RedHat', 'CentOS', 'OracleLinux'] and + ansible_distribution_major_version is version('7', '<=') %}bridge-utils{% endif %}"] -_network_packages_default_initscripts_network_scripts: ["{% -if (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('7', '<=')) or - (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('28', '<=')) +__network_packages_default_initscripts_network_scripts: ["{% +if ansible_distribution in ['RedHat', 'CentOS', 'OracleLinux'] and + ansible_distribution_major_version is version('7', '<=') %}initscripts{% else %}network-scripts{% endif %}"] # convert _network_packages_default_initscripts_bridge to an empty list if it # contains only the empty string and add it to the default package list # |select() filters the list to include only values that evaluate to true # (the empty string is false) # |list() converts the generator that |select() creates to a list -network_packages_default_initscripts: "{{ ['ethtool'] -+ _network_packages_default_initscripts_bridge|select()|list() -+ _network_packages_default_initscripts_network_scripts|select()|list() +__network_packages_default_initscripts: "{{ +__network_packages_default_initscripts_bridge|select()|list() ++ __network_packages_default_initscripts_network_scripts|select()|list() }}" @@ -58,25 +95,25 @@ network_packages_default_initscripts: "{{ ['ethtool'] # # Usually, the user only wants to select the "network_provider" # (or not set it at all and let it be autodetected via the -# internal variable "{{ network_provider_current }}". Hence, +# internal variable "{{ __network_provider_current }}". Hence, # depending on the "network_provider", a different set of # service-name and packages is chosen. # -# That is done via the internal "_network_provider_setup" dictionary. +# That is done via the internal "__network_provider_setup" dictionary. # If the user doesn't explicitly set "network_service_name" or # "network_packages" (which he usually wouldn't), then the defaults -# from "network_service_name_default_*" and "network_packages_default_*" +# from "__network_service_name_default_*" and "__network_packages_default_*" # apply. These values are hard-coded in this file, but they also could # be overwritten as host variables or via vars/*.yml. -_network_provider_setup: +__network_provider_setup: nm: - service_name: "{{ network_service_name_default_nm }}" - packages: "{{ network_packages_default_nm }}" + service_name: "{{ __network_service_name_default_nm }}" + packages: "{{ __network_packages_default_nm }}" initscripts: - service_name: "{{ network_service_name_default_initscripts }}" - packages: "{{ network_packages_default_initscripts }}" + service_name: "{{ __network_service_name_default_initscripts }}" + packages: "{{ __network_packages_default_initscripts }}" network_packages: "{{ - _network_provider_setup[network_provider]['packages'] }}" + __network_provider_setup[network_provider]['packages'] }}" network_service_name: "{{ - _network_provider_setup[network_provider]['service_name'] }}" + __network_provider_setup[network_provider]['service_name'] }}" diff --git a/roles/linux-system-roles.network/examples/bond-with-vlan.yml b/roles/linux-system-roles.network/examples/bond-with-vlan.yml deleted file mode 100644 index 2e6be23..0000000 --- a/roles/linux-system-roles.network/examples/bond-with-vlan.yml +++ /dev/null @@ -1,38 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- hosts: network-test - vars: - network_connections: - - # Create a bond profile, which is the parent of VLAN. - - name: prod2 - state: up - type: bond - interface_name: bond2 - ip: - dhcp4: no - auto6: no - bond: - mode: active-backup - miimon: 110 - - # enslave an ethernet to the bond - - name: prod2-slave1 - state: up - type: ethernet - interface_name: "{{ network_interface_name2 }}" - master: prod2 - - # on top of it, create a VLAN with ID 100 and static - # addressing - - name: prod2.100 - state: up - type: vlan - parent: prod2 - vlan_id: 100 - ip: - address: - - "192.0.2.{{ network_iphost }}/24" - - roles: - - linux-system-roles.network diff --git a/roles/linux-system-roles.network/examples/bridge-with-vlan.yml b/roles/linux-system-roles.network/examples/bridge-with-vlan.yml deleted file mode 100644 index 037ff8e..0000000 --- a/roles/linux-system-roles.network/examples/bridge-with-vlan.yml +++ /dev/null @@ -1,36 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- hosts: network-test - vars: - network_connections: - - # Create a bridge profile, which is the parent of VLAN. - - name: prod2 - state: up - type: bridge - interface_name: bridge2 - ip: - dhcp4: no - auto6: no - - # enslave an ethernet to the bridge - - name: prod2-slave1 - state: up - type: ethernet - interface_name: "{{ network_interface_name2 }}" - master: prod2 - slave_type: bridge - - # on top of it, create a VLAN with ID 100 and static - # addressing - - name: prod2.100 - state: up - type: vlan - parent: prod2 - vlan_id: 100 - ip: - address: - - "192.0.2.{{ network_iphost }}/24" - - roles: - - linux-system-roles.network diff --git a/roles/linux-system-roles.network/examples/down-profile.yml b/roles/linux-system-roles.network/examples/down-profile.yml deleted file mode 120000 index d5d2ed7..0000000 --- a/roles/linux-system-roles.network/examples/down-profile.yml +++ /dev/null @@ -1 +0,0 @@ -../tests/down-profile.yml \ No newline at end of file diff --git a/roles/linux-system-roles.network/examples/eth-simple-auto.yml b/roles/linux-system-roles.network/examples/eth-simple-auto.yml deleted file mode 100644 index 0ba168a..0000000 --- a/roles/linux-system-roles.network/examples/eth-simple-auto.yml +++ /dev/null @@ -1,18 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- hosts: network-test - vars: - network_connections: - - # Create one ethernet profile and activate it. - # The profile uses automatic IP addressing - # and is tied to the interface by MAC address. - - name: prod1 - state: up - type: ethernet - autoconnect: yes - mac: "{{ network_mac1 }}" - mtu: 1450 - - roles: - - linux-system-roles.network diff --git a/roles/linux-system-roles.network/examples/eth-with-vlan.yml b/roles/linux-system-roles.network/examples/eth-with-vlan.yml deleted file mode 100644 index 69da673..0000000 --- a/roles/linux-system-roles.network/examples/eth-with-vlan.yml +++ /dev/null @@ -1,29 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- hosts: network-test - vars: - network_connections: - - # Create a profile for the underlying device of the VLAN. - - name: prod2 - type: ethernet - autoconnect: no - state: up - interface_name: "{{ network_interface_name2 }}" - ip: - dhcp4: no - auto6: no - - # on top of it, create a VLAN with ID 100 and static - # addressing - - name: prod2.100 - state: up - type: vlan - parent: prod2 - vlan_id: 100 - ip: - address: - - "192.0.2.{{ network_iphost }}/24" - - roles: - - linux-system-roles.network diff --git a/roles/linux-system-roles.network/examples/ethtool-features-default.yml b/roles/linux-system-roles.network/examples/ethtool-features-default.yml deleted file mode 100644 index 78965e6..0000000 --- a/roles/linux-system-roles.network/examples/ethtool-features-default.yml +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- hosts: all - tasks: - - include_role: - name: linux-system-roles.network - vars: - network_connections: - - name: "{{ network_interface_name1 }}" - state: up - type: ethernet - ip: - dhcp4: "no" - auto6: "no" diff --git a/roles/linux-system-roles.network/examples/ethtool-features.yml b/roles/linux-system-roles.network/examples/ethtool-features.yml deleted file mode 100644 index d8842c2..0000000 --- a/roles/linux-system-roles.network/examples/ethtool-features.yml +++ /dev/null @@ -1,19 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- hosts: all - tasks: - - include_role: - name: linux-system-roles.network - vars: - network_connections: - - name: "{{ network_interface_name1 }}" - state: up - type: ethernet - ip: - dhcp4: "no" - auto6: "no" - ethtool: - features: - gro: "no" - gso: "yes" - tx-sctp-segmentation: "no" diff --git a/roles/linux-system-roles.network/examples/remove-profile.yml b/roles/linux-system-roles.network/examples/remove-profile.yml deleted file mode 120000 index f2cf478..0000000 --- a/roles/linux-system-roles.network/examples/remove-profile.yml +++ /dev/null @@ -1 +0,0 @@ -../tests/remove-profile.yml \ No newline at end of file diff --git a/roles/linux-system-roles.network/library/network_connections.py b/roles/linux-system-roles.network/library/network_connections.py index 39e81e8..6eb1581 100644 --- a/roles/linux-system-roles.network/library/network_connections.py +++ b/roles/linux-system-roles.network/library/network_connections.py @@ -2,25 +2,33 @@ # -*- coding: utf-8 -*- # SPDX-License-Identifier: BSD-3-Clause +import errno import functools import os +import re +import shlex import socket +import subprocess import time import traceback +import logging # pylint: disable=import-error, no-name-in-module -from ansible.module_utils.network_lsr import MyError +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.network_lsr import ethtool # noqa:E501 +from ansible.module_utils.network_lsr import MyError # noqa:E501 -# pylint: disable=import-error -from ansible.module_utils.network_lsr.argument_validator import ( +from ansible.module_utils.network_lsr.argument_validator import ( # noqa:E501 ArgUtil, ArgValidator_ListConnections, ValidationError, ) -# pylint: disable=import-error -from ansible.module_utils.network_lsr.utils import Util -from ansible.module_utils.network_lsr import nm_provider +from ansible.module_utils.network_lsr.utils import Util # noqa:E501 +from ansible.module_utils.network_lsr import nm_provider # noqa:E501 + +# pylint: enable=import-error, no-name-in-module + DOCUMENTATION = """ --- @@ -39,8 +47,11 @@ options: Documentation needs to be written. Note that the network_connections ############################################################################### +PERSISTENT_STATE = "persistent_state" +ABSENT_STATE = "absent" DEFAULT_ACTIVATION_TIMEOUT = 90 +DEFAULT_TIMEOUT = 10 class CheckMode: @@ -57,6 +68,17 @@ class LogLevel: INFO = "info" DEBUG = "debug" + _LOGGING_LEVEL_MAP = { + logging.DEBUG: DEBUG, + logging.INFO: INFO, + logging.WARN: WARN, + logging.ERROR: ERROR, + } + + @staticmethod + def from_logging_level(logging_level): + return LogLevel._LOGGING_LEVEL_MAP.get(logging_level, LogLevel.ERROR) + @staticmethod def fmt(level): return "<%-6s" % (str(level) + ">") @@ -103,16 +125,7 @@ class SysUtil: @staticmethod def _link_read_permaddress(ifname): - try: - out = Util.check_output(["ethtool", "-P", ifname]) - except MyError: - return None - import re - - m = re.match("^Permanent address: ([0-9A-Fa-f:]*)\n$", out) - if not m: - return None - return Util.mac_norm(m.group(1)) + return ethtool.get_perm_addr(ifname) @staticmethod def _link_infos_fetch(): @@ -220,8 +233,6 @@ class IfcfgUtil: def KeyValid(cls, name): r = getattr(cls, "_CHECKSTR_VALID_KEY", None) if r is None: - import re - r = re.compile("^[a-zA-Z][a-zA-Z0-9_]*$") cls._CHECKSTR_VALID_KEY = r return bool(r.match(name)) @@ -231,8 +242,6 @@ class IfcfgUtil: r = getattr(cls, "_re_ValueEscape", None) if r is None: - import re - r = re.compile("^[a-zA-Z_0-9-.]*$") cls._re_ValueEscape = r @@ -335,7 +344,7 @@ class IfcfgUtil: ifcfg["PKEY"] = "yes" ifcfg["PKEY_ID"] = str(connection["infiniband"]["p_key"]) if connection["parent"]: - ifcfg["PHYSDEV"] = ArgUtil.connection_find_master( + ifcfg["PHYSDEV"] = ArgUtil.connection_find_controller( connection["parent"], connections, idx ) elif connection["type"] == "bridge": @@ -352,7 +361,7 @@ class IfcfgUtil: elif connection["type"] == "vlan": ifcfg["VLAN"] = "yes" ifcfg["TYPE"] = "Vlan" - ifcfg["PHYSDEV"] = ArgUtil.connection_find_master( + ifcfg["PHYSDEV"] = ArgUtil.connection_find_controller( connection["parent"], connections, idx ) ifcfg["VID"] = str(connection["vlan"]["id"]) @@ -376,6 +385,7 @@ class IfcfgUtil: ethtool_features = connection["ethtool"]["features"] configured_features = [] for feature, setting in ethtool_features.items(): + feature = feature.replace("_", "-") value = "" if setting: value = "on" @@ -393,24 +403,44 @@ class IfcfgUtil: " ".join(configured_features), ) + ethtool_coalesce = connection["ethtool"]["coalesce"] + configured_coalesce = [] + for coalesce, setting in ethtool_coalesce.items(): + if setting is not None: + if isinstance(setting, bool): + setting = int(setting) + configured_coalesce.append( + "%s %s" % (coalesce.replace("_", "-"), setting) + ) + + if configured_coalesce: + if ethtool_options: + ethtool_options += " ; " + ethtool_options += "-C %s %s" % ( + connection["interface_name"], + " ".join(configured_coalesce), + ) + if ethtool_options: ifcfg["ETHTOOL_OPTS"] = ethtool_options - if connection["master"] is not None: - m = ArgUtil.connection_find_master(connection["master"], connections, idx) - if connection["slave_type"] == "bridge": + if connection["controller"] is not None: + m = ArgUtil.connection_find_controller( + connection["controller"], connections, idx + ) + if connection["port_type"] == "bridge": ifcfg["BRIDGE"] = m - elif connection["slave_type"] == "bond": + elif connection["port_type"] == "bond": ifcfg["MASTER"] = m ifcfg["SLAVE"] = "yes" - elif connection["slave_type"] == "team": + elif connection["port_type"] == "team": ifcfg["TEAM_MASTER"] = m if "TYPE" in ifcfg: del ifcfg["TYPE"] if connection["type"] != "team": ifcfg["DEVICETYPE"] = "TeamPort" else: - raise MyError("invalid slave_type '%s'" % (connection["slave_type"])) + raise MyError("invalid port_type '%s'" % (connection["port_type"])) if ip["route_append_only"] and content_current: route4_file = content_current["route"] @@ -449,9 +479,11 @@ class IfcfgUtil: else: ifcfg["IPV6INIT"] = "no" if addrs6: - ifcfg["IPVADDR"] = addrs6[0]["address"] + "/" + str(addrs6[0]["prefix"]) + ifcfg["IPV6ADDR"] = ( + addrs6[0]["address"] + "/" + str(addrs6[0]["prefix"]) + ) if len(addrs6) > 1: - ifcfg["IPVADDR_SECONDARIES"] = " ".join( + ifcfg["IPV6ADDR_SECONDARIES"] = " ".join( [a["address"] + "/" + str(a["prefix"]) for a in addrs6[1:]] ) if ip["gateway6"] is not None: @@ -507,9 +539,6 @@ class IfcfgUtil: def ifcfg_parse_line(cls, line): r1 = getattr(cls, "_re_parse_line1", None) if r1 is None: - import re - import shlex - r1 = re.compile("^[ \t]*([a-zA-Z_][a-zA-Z_0-9]*)=(.*)$") cls._re_parse_line1 = r1 cls._shlex = shlex @@ -596,8 +625,6 @@ class IfcfgUtil: try: os.unlink(path) except OSError as e: - import errno - if e.errno != errno.ENOENT: raise else: @@ -655,7 +682,7 @@ class NMUtil: connection.add_setting(setting) return setting - def device_is_master_type(self, dev): + def device_is_controller_type(self, dev): if dev: NM = Util.NM() GObject = Util.GObject() @@ -799,7 +826,7 @@ class NMUtil: if connection["parent"]: s_infiniband.set_property( NM.SETTING_INFINIBAND_PARENT, - ArgUtil.connection_find_master( + ArgUtil.connection_find_controller( connection["parent"], connections, idx ), ) @@ -817,13 +844,17 @@ class NMUtil: s_bond.add_option("miimon", str(connection["bond"]["miimon"])) elif connection["type"] == "team": s_con.set_property(NM.SETTING_CONNECTION_TYPE, NM.SETTING_TEAM_SETTING_NAME) + elif connection["type"] == "dummy": + s_con.set_property( + NM.SETTING_CONNECTION_TYPE, NM.SETTING_DUMMY_SETTING_NAME + ) elif connection["type"] == "vlan": s_con.set_property(NM.SETTING_CONNECTION_TYPE, NM.SETTING_VLAN_SETTING_NAME) s_vlan = self.connection_ensure_setting(con, NM.SettingVlan) s_vlan.set_property(NM.SETTING_VLAN_ID, connection["vlan"]["id"]) s_vlan.set_property( NM.SETTING_VLAN_PARENT, - ArgUtil.connection_find_master_uuid( + ArgUtil.connection_find_controller_uuid( connection["parent"], connections, idx ), ) @@ -845,8 +876,32 @@ class NMUtil: s_macvlan.set_property(NM.SETTING_MACVLAN_TAP, connection["macvlan"]["tap"]) s_macvlan.set_property( NM.SETTING_MACVLAN_PARENT, - ArgUtil.connection_find_master(connection["parent"], connections, idx), + ArgUtil.connection_find_controller( + connection["parent"], connections, idx + ), ) + elif connection["type"] == "wireless": + s_con.set_property( + NM.SETTING_CONNECTION_TYPE, NM.SETTING_WIRELESS_SETTING_NAME + ) + s_wireless = self.connection_ensure_setting(con, NM.SettingWireless) + s_wireless.set_property( + NM.SETTING_WIRELESS_SSID, + Util.GLib().Bytes.new(connection["wireless"]["ssid"].encode("utf-8")), + ) + + s_wireless_sec = self.connection_ensure_setting( + con, NM.SettingWirelessSecurity + ) + s_wireless_sec.set_property( + NM.SETTING_WIRELESS_SECURITY_KEY_MGMT, + connection["wireless"]["key_mgmt"], + ) + + if connection["wireless"]["key_mgmt"] == "wpa-psk": + s_wireless_sec.set_property( + NM.SETTING_WIRELESS_SECURITY_PSK, connection["wireless"]["password"] + ) else: raise MyError("unsupported type %s" % (connection["type"])) @@ -877,6 +932,15 @@ class NMUtil: else: s_ethtool.set_feature(nm_feature, NM.Ternary.FALSE) + for coalesce, setting in connection["ethtool"]["coalesce"].items(): + nm_coalesce = nm_provider.get_nm_ethtool_coalesce(coalesce) + + if nm_coalesce: + if setting is None: + s_ethtool.option_set(nm_coalesce, None) + else: + s_ethtool.option_set_uint32(nm_coalesce, int(setting)) + if connection["mtu"]: if connection["type"] == "infiniband": s_infiniband = self.connection_ensure_setting(con, NM.SettingInfiniband) @@ -885,14 +949,14 @@ class NMUtil: s_wired = self.connection_ensure_setting(con, NM.SettingWired) s_wired.set_property(NM.SETTING_WIRED_MTU, connection["mtu"]) - if connection["master"] is not None: + if connection["controller"] is not None: s_con.set_property( - NM.SETTING_CONNECTION_SLAVE_TYPE, connection["slave_type"] + NM.SETTING_CONNECTION_SLAVE_TYPE, connection["port_type"] ) s_con.set_property( NM.SETTING_CONNECTION_MASTER, - ArgUtil.connection_find_master_uuid( - connection["master"], connections, idx + ArgUtil.connection_find_controller_uuid( + connection["controller"], connections, idx ), ) else: @@ -935,13 +999,24 @@ class NMUtil: s_ip4.add_dns(d["address"]) for s in ip["dns_search"]: s_ip4.add_dns_search(s) + s_ip4.clear_dns_options(True) + for s in ip["dns_options"]: + s_ip4.add_dns_option(s) - if ip["auto6"]: + if ip["ipv6_disabled"]: + s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "disabled") + elif ip["auto6"]: s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "auto") elif addrs6: s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "manual") else: + # we should not set "ipv6.method=ignore". "ignore" is a legacy mode + # and not really useful. Instead, we should set "link-local" here. + # + # But that fix is a change in behavior for the role, so it needs special + # care. s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "ignore") + for a in addrs6: s_ip6.add_address( NM.IPAddress.new(a["family"], a["address"], a["prefix"]) @@ -974,6 +1049,65 @@ class NMUtil: else: s_ip6.add_route(rr) + if connection["ieee802_1x"]: + s_8021x = self.connection_ensure_setting(con, NM.Setting8021x) + + s_8021x.set_property( + NM.SETTING_802_1X_EAP, [connection["ieee802_1x"]["eap"]] + ) + s_8021x.set_property( + NM.SETTING_802_1X_IDENTITY, connection["ieee802_1x"]["identity"] + ) + + s_8021x.set_property( + NM.SETTING_802_1X_PRIVATE_KEY, + Util.path_to_glib_bytes(connection["ieee802_1x"]["private_key"]), + ) + + if connection["ieee802_1x"]["private_key_password"]: + s_8021x.set_property( + NM.SETTING_802_1X_PRIVATE_KEY_PASSWORD, + connection["ieee802_1x"]["private_key_password"], + ) + + if connection["ieee802_1x"]["private_key_password_flags"]: + s_8021x.set_secret_flags( + NM.SETTING_802_1X_PRIVATE_KEY_PASSWORD, + Util.NM().SettingSecretFlags( + Util.convert_passwd_flags_nm( + connection["ieee802_1x"]["private_key_password_flags"] + ), + ), + ) + + s_8021x.set_property( + NM.SETTING_802_1X_CLIENT_CERT, + Util.path_to_glib_bytes(connection["ieee802_1x"]["client_cert"]), + ) + + if connection["ieee802_1x"]["ca_cert"]: + s_8021x.set_property( + NM.SETTING_802_1X_CA_CERT, + Util.path_to_glib_bytes(connection["ieee802_1x"]["ca_cert"]), + ) + + if connection["ieee802_1x"]["ca_path"]: + s_8021x.set_property( + NM.SETTING_802_1X_CA_PATH, + connection["ieee802_1x"]["ca_path"], + ) + + s_8021x.set_property( + NM.SETTING_802_1X_SYSTEM_CA_CERTS, + connection["ieee802_1x"]["system_ca_certs"], + ) + + if connection["ieee802_1x"]["domain_suffix_match"]: + s_8021x.set_property( + NM.SETTING_802_1X_DOMAIN_SUFFIX_MATCH, + connection["ieee802_1x"]["domain_suffix_match"], + ) + try: con.normalize() except Exception as e: @@ -1032,75 +1166,6 @@ class NMUtil: ) return True - def connection_delete(self, connection, timeout=10): - - # Do nothing, if the connection is already gone - if connection not in self.connection_list(): - return - - if "update2" in dir(connection): - return self.volatilize_connection(connection, timeout) - - delete_cb = Util.create_callback("delete_finish") - - cancellable = Util.create_cancellable() - cb_args = {} - connection.delete_async(cancellable, delete_cb, cb_args) - if not Util.GMainLoop_run(timeout): - cancellable.cancel() - raise MyError("failure to delete connection: %s" % ("timeout")) - if not cb_args.get("success", False): - raise MyError( - "failure to delete connection: %s" - % (cb_args.get("error", "unknown error")) - ) - - # workaround libnm oddity. The connection may not yet be gone if the - # connection was active and is deactivating. Wait. - c_uuid = connection.get_uuid() - gone = self.wait_till_connection_is_gone(c_uuid) - if not gone: - raise MyError( - "connection %s was supposedly deleted successfully, but it's still here" - % (c_uuid) - ) - - def volatilize_connection(self, connection, timeout=10): - update2_cb = Util.create_callback("update2_finish") - - cancellable = Util.create_cancellable() - cb_args = {} - - connection.update2( - None, # settings - Util.NM().SettingsUpdate2Flags.IN_MEMORY_ONLY - | Util.NM().SettingsUpdate2Flags.VOLATILE, # flags - None, # args - cancellable, - update2_cb, - cb_args, - ) - - if not Util.GMainLoop_run(timeout): - cancellable.cancel() - raise MyError("failure to volatilize connection: %s" % ("timeout")) - - Util.GMainLoop_iterate_all() - - # Do not check of success if the connection does not exist anymore This - # can happen if the connection was already volatile and set to down - # during the module call - if connection not in self.connection_list(): - return - - # update2_finish returns None on failure and a GLib.Variant of type - # a{sv} with the result otherwise (which can be empty) - if cb_args.get("success", None) is None: - raise MyError( - "failure to volatilize connection: %s: %r" - % (cb_args.get("error", "unknown error"), cb_args) - ) - def create_checkpoint(self, timeout): """ Create a new checkpoint """ checkpoint = Util.call_async_method( @@ -1131,25 +1196,6 @@ class NMUtil: mainloop_timeout=DEFAULT_ACTIVATION_TIMEOUT, ) - def wait_till_connection_is_gone(self, uuid, timeout=10): - """ - Wait until a connection is gone or until the timeout elapsed - - :param uuid: UUID of the connection that to wait for to be gone - :param timeout: Timeout in seconds to wait for - :returns: True when connection is gone, False when timeout elapsed - :rtype: bool - """ - - def _poll_timeout_cb(unused): - if not self.connection_list(uuid=uuid): - Util.GMainLoop().quit() - - poll_timeout_id = Util.GLib().timeout_add(100, _poll_timeout_cb, None) - gone = Util.GMainLoop_run(timeout) - Util.GLib().source_remove(poll_timeout_id) - return gone - def connection_activate(self, connection, timeout=15, wait_time=None): already_retried = False @@ -1226,13 +1272,13 @@ class NMUtil: if ac_state == NM.ActiveConnectionState.ACTIVATING: if ( - self.device_is_master_type(dev) + self.device_is_controller_type(dev) and dev_state >= NM.DeviceState.IP_CONFIG and dev_state <= NM.DeviceState.ACTIVATED ): - # master connections qualify as activated once they + # controller connections qualify as activated once they # reach IP-Config state. That is because they may - # wait for slave devices to attach + # wait for port devices to attach return True, None # fall through elif ac_state == NM.ActiveConnectionState.ACTIVATED: @@ -1314,66 +1360,18 @@ class NMUtil: if failure_reason: raise MyError("connection not activated: %s" % (failure_reason)) - def active_connection_deactivate(self, ac, timeout=10, wait_time=None): - def deactivate_cb(client, result, cb_args): - success = False - try: - success = client.deactivate_connection_finish(result) - except Exception as e: - if Util.error_is_cancelled(e): - return - cb_args["error"] = str(e) - cb_args["success"] = success - Util.GMainLoop().quit() - - cancellable = Util.create_cancellable() - cb_args = {} - self.nmclient.deactivate_connection_async( - ac, cancellable, deactivate_cb, cb_args + def reapply(self, device, connection=None): + version_id = 0 + flags = 0 + return Util.call_async_method( + device, "reapply", [connection, version_id, flags] ) - if not Util.GMainLoop_run(timeout): - cancellable.cancel() - raise MyError("failure to deactivate connection: %s" % (timeout)) - if not cb_args.get("success", False): - raise MyError( - "failure to deactivate connection: %s" - % (cb_args.get("error", "unknown error")) - ) - - self.active_connection_deactivate_wait(ac, wait_time) - return True - - def active_connection_deactivate_wait(self, ac, wait_time): - - if not wait_time: - return - - NM = Util.NM() - - def check_deactivated(ac): - return ac.get_state() >= NM.ActiveConnectionState.DEACTIVATED - - if not check_deactivated(ac): - - def check_deactivated_cb(): - if check_deactivated(ac): - Util.GMainLoop().quit() - - ac_id = ac.connect( - "notify::state", lambda source, pspec: check_deactivated_cb() - ) - - try: - if not Util.GMainLoop_run(wait_time): - raise MyError("connection not fully deactivated after timeout") - finally: - ac.handler_disconnect(ac_id) ############################################################################### -class RunEnvironment: +class RunEnvironment(object): def __init__(self): self._check_mode = None @@ -1423,15 +1421,14 @@ class RunEnvironmentAnsible(RunEnvironment): "force_state_change": {"required": False, "default": False, "type": "bool"}, "provider": {"required": True, "default": None, "type": "str"}, "connections": {"required": False, "default": None, "type": "list"}, + "__debug_flags": {"required": False, "default": "", "type": "str"}, } def __init__(self): RunEnvironment.__init__(self) self._run_results = [] self._log_idx = 0 - - from ansible.module_utils.basic import AnsibleModule - + self.on_failure = None module = AnsibleModule(argument_spec=self.ARGS, supports_check_mode=True) self.module = module @@ -1498,24 +1495,33 @@ class RunEnvironmentAnsible(RunEnvironment): c["persistent_state"], ) prefix = prefix + (", '%s'" % (c["name"])) - for r in rr["log"]: - yield (r[2], "[%03d] %s %s: %s" % (r[2], LogLevel.fmt(r[0]), prefix, r[1])) - - def _complete_kwargs(self, connections, kwargs, traceback_msg=None): - if "warnings" in kwargs: - logs = list(kwargs["warnings"]) - else: - logs = [] + for severity, msg, idx in rr["log"]: + yield ( + idx, + "[%03d] %s %s: %s" % (idx, LogLevel.fmt(severity), prefix, msg), + severity, + ) + def _complete_kwargs(self, connections, kwargs, traceback_msg=None, fail=False): + warning_logs = kwargs.get("warnings", []) + debug_logs = [] loglines = [] for res in self._run_results: for idx, rr in enumerate(res): loglines.extend(self._complete_kwargs_loglines(rr, connections, idx)) - loglines.sort(key=lambda x: x[0]) - logs.extend([x[1] for x in loglines]) + loglines.sort(key=lambda log_line: log_line[0]) + for idx, log_line, severity in loglines: + debug_logs.append(log_line) + if fail: + warning_logs.append(log_line) + elif severity >= LogLevel.WARN: + warning_logs.append(log_line) if traceback_msg is not None: - logs.append(traceback_msg) - kwargs["warnings"] = logs + warning_logs.append(traceback_msg) + kwargs["warnings"] = warning_logs + stderr = "\n".join(debug_logs) + "\n" + kwargs["stderr"] = stderr + kwargs["_invocation"] = {"module_args": self.module.params} return kwargs def exit_json(self, connections, changed=False, **kwargs): @@ -1525,20 +1531,38 @@ class RunEnvironmentAnsible(RunEnvironment): def fail_json( self, connections, msg, changed=False, warn_traceback=False, **kwargs ): + if self.on_failure: + self.on_failure() + traceback_msg = None if warn_traceback: traceback_msg = "exception: %s" % (traceback.format_exc()) kwargs["msg"] = msg kwargs["changed"] = changed self.module.fail_json( - **self._complete_kwargs(connections, kwargs, traceback_msg) + **self._complete_kwargs(connections, kwargs, traceback_msg, fail=True) ) ############################################################################### -class Cmd: +class NmLogHandler(logging.Handler): + def __init__(self, log_func, idx): + self._log = log_func + self._idx = idx + super(NmLogHandler, self).__init__() + + def filter(self, record): + return True + + def emit(self, record): + self._log( + self._idx, LogLevel.from_logging_level(record.levelno), record.getMessage() + ) + + +class Cmd(object): def __init__( self, run_env, @@ -1547,6 +1571,7 @@ class Cmd: is_check_mode=False, ignore_errors=False, force_state_change=False, + debug_flags="", ): self.run_env = run_env self.validate_one_type = None @@ -1560,6 +1585,7 @@ class Cmd: self._connections_data = None self._check_mode = CheckMode.PREPARE self._is_changed_modified_system = False + self._debug_flags = debug_flags def run_command(self, argv, encoding=None): return self.run_env.run_command(argv, encoding=encoding) @@ -1740,23 +1766,27 @@ class Cmd: if self.check_mode == CheckMode.REAL_RUN: self.start_transaction() - for idx, connection in enumerate(self.connections): - try: - for action in connection["actions"]: - if action == "absent": - self.run_action_absent(idx) - elif action == "present": - self.run_action_present(idx) - elif action == "up": - self.run_action_up(idx) - elif action == "down": - self.run_action_down(idx) - else: - assert False - except Exception as error: - if self.check_mode == CheckMode.REAL_RUN: - self.rollback_transaction(idx, action, error) - raise + # Reasoning for this order: + # For down/up profiles might need to be present, so do this first + # Put profile down before removing it if necessary + # To ensure up does not depend on anything that might be removed, + # do it last + for action in ("present", "down", "absent", "up"): + for idx, connection in enumerate(self.connections): + try: + if action in connection["actions"]: + if action == "absent": + self.run_action_absent(idx) + elif action == "present": + self.run_action_present(idx) + elif action == "up": + self.run_action_up(idx) + elif action == "down": + self.run_action_down(idx) + except Exception as error: + if self.check_mode == CheckMode.REAL_RUN: + self.rollback_transaction(idx, action, error) + raise if self.check_mode == CheckMode.REAL_RUN: self.finish_transaction() @@ -1816,7 +1846,7 @@ class Cmd: """ Hook for after all changes where made successfuly """ def rollback_transaction(self, idx, action, error): - """ Hook if configuring a profile results in an error + """Hook if configuring a profile results in an error :param idx: Index of the connection that triggered the error :param action: Action that triggered the error @@ -1831,6 +1861,10 @@ class Cmd: idx, "failure: %s (%s) [[%s]]" % (error, action, traceback.format_exc()) ) + def on_failure(self): + """ Hook to do any cleanup on failure before exiting """ + pass + def run_action_absent(self, idx): raise NotImplementedError() @@ -1853,6 +1887,12 @@ class Cmd_nm(Cmd): self._nmutil = None self.validate_one_type = ArgValidator_ListConnections.VALIDATE_ONE_MODE_NM self._checkpoint = None + # pylint: disable=import-error, no-name-in-module + from ansible.module_utils.network_lsr.nm import provider # noqa:E501 + + # pylint: enable=import-error, no-name-in-module + + self._nm_provider = provider.NetworkManagerProvider() @property def nmutil(self): @@ -1892,17 +1932,16 @@ class Cmd_nm(Cmd): def start_transaction(self): Cmd.start_transaction(self) - self._checkpoint = self.nmutil.create_checkpoint( - len(self.connections) * DEFAULT_ACTIVATION_TIMEOUT - ) + if "disable-checkpoints" in self._debug_flags: + pass + else: + self._checkpoint = self.nmutil.create_checkpoint( + len(self.connections) * DEFAULT_ACTIVATION_TIMEOUT + ) def rollback_transaction(self, idx, action, error): Cmd.rollback_transaction(self, idx, action, error) - if self._checkpoint: - try: - self.nmutil.rollback_checkpoint(self._checkpoint) - finally: - self._checkpoint = None + self.on_failure() def finish_transaction(self): Cmd.finish_transaction(self) @@ -1912,12 +1951,19 @@ class Cmd_nm(Cmd): finally: self._checkpoint = None - def _check_ethtool_setting_support(self, idx, connection): - """ Check if SettingEthtool support is needed and available + def on_failure(self): + if self._checkpoint: + try: + self.nmutil.rollback_checkpoint(self._checkpoint) + finally: + self._checkpoint = None - If any feature is specified, the SettingEthtool setting needs to be - available. Also NM needs to know about each specified setting. Do not - check if NM knows about any defaults. + def _check_ethtool_setting_support(self, idx, connection): + """Check if SettingEthtool support is needed and available + + If any ethtool setting is specified, the SettingEthtool + setting needs to be available. Also NM needs to know about each + specified setting. Do not check if NM knows about any defaults """ NM = Util.NM() @@ -1928,45 +1974,65 @@ class Cmd_nm(Cmd): if "ethtool" not in connection: return - ethtool_features = connection["ethtool"]["features"] - specified_features = dict( - [(k, v) for k, v in ethtool_features.items() if v is not None] - ) + ethtool_dict = { + "features": nm_provider.get_nm_ethtool_feature, + "coalesce": nm_provider.get_nm_ethtool_coalesce, + } - if specified_features and not hasattr(NM, "SettingEthtool"): - self.log_fatal(idx, "ethtool.features specified but not supported by NM") + for ethtool_key, nm_get_name_fcnt in ethtool_dict.items(): + ethtool_settings = connection["ethtool"][ethtool_key] + specified = dict( + [(k, v) for k, v in ethtool_settings.items() if v is not None] + ) - for feature, setting in specified_features.items(): - nm_feature = nm_provider.get_nm_ethtool_feature(feature) - if not nm_feature: + if specified and not hasattr(NM, "SettingEthtool"): self.log_fatal( - idx, "ethtool feature %s specified but not support by NM" % feature + idx, "ethtool.%s specified but not supported by NM", specified ) + for option, _ in specified.items(): + nm_name = nm_get_name_fcnt(option) + if not nm_name: + self.log_fatal( + idx, + "ethtool %s setting %s specified " + "but not supported by NM" % (ethtool_key, option), + ) + def run_action_absent(self, idx): - seen = set() name = self.connections[idx]["name"] - black_list_names = None - if not name: - name = None + profile_uuids = set() + + if name: + black_list_names = [] + else: + # Delete all profiles except explicitly included black_list_names = ArgUtil.connection_get_non_absent_names(self.connections) - while True: - connections = self.nmutil.connection_list( - name=name, black_list_names=black_list_names, black_list=seen + + for nm_profile in self._nm_provider.get_connections(): + if name and nm_profile.get_id() != name: + continue + if nm_profile.get_id() not in black_list_names: + profile_uuids.add(nm_profile.get_uuid()) + + if not profile_uuids: + self.log_info(idx, "no connection matches '%s' to delete" % (name)) + return + + logger = logging.getLogger() + log_handler = NmLogHandler(self.log, idx) + logger.addHandler(log_handler) + timeout = self.connections[idx].get("wait") + changed = False + for profile_uuid in profile_uuids: + changed |= self._nm_provider.volatilize_connection_by_uuid( + profile_uuid, + DEFAULT_TIMEOUT if timeout is None else timeout, + self.check_mode != CheckMode.REAL_RUN, ) - if not connections: - break - c = connections[-1] - seen.add(c) - self.log_info(idx, "delete connection %s, %s" % (c.get_id(), c.get_uuid())) + if changed: self.connections_data_set_changed(idx) - if self.check_mode == CheckMode.REAL_RUN: - try: - self.nmutil.connection_delete(c) - except MyError as e: - self.log_error(idx, "delete connection failed: %s" % (e)) - if not seen: - self.log_info(idx, "no connection '%s'" % (name)) + logger.removeHandler(log_handler) def run_action_present(self, idx): connection = self.connections[idx] @@ -2014,29 +2080,50 @@ class Cmd_nm(Cmd): % (con_cur.get_id(), con_cur.get_uuid()), ) - seen = set() + if ( + self.check_mode == CheckMode.REAL_RUN + and connection["ieee802_1x"] is not None + and connection["ieee802_1x"].get("ca_path") + ): + # It seems that NM on Fedora 31 + # (NetworkManager-1.20.4-1.fc31.x86_64) does need some time so that + # the D-Bus information is actually up-to-date. + time.sleep(0.1) + Util.GMainLoop_iterate_all() + updated_connection = Util.first( + self.nmutil.connection_list( + name=connection["name"], uuid=connection["nm.uuid"] + ) + ) + ca_path = updated_connection.get_setting_802_1x().props.ca_path + if not ca_path: + self.log_fatal( + idx, + "ieee802_1x.ca_path specified but not supported by " + "NetworkManager. Please update NetworkManager or use " + "ieee802_1x.ca_cert.", + ) if con_cur is not None: - seen.add(con_cur) + self._remove_duplicate_profile(idx, con_cur, connection.get("timeout")) - while True: - connections = self.nmutil.connection_list( - name=connection["name"], - black_list=seen, - black_list_uuids=[connection["nm.uuid"]], - ) - if not connections: - break - c = connections[-1] - self.log_info( - idx, "delete duplicate connection %s, %s" % (c.get_id(), c.get_uuid()) - ) - self.connections_data_set_changed(idx) - if self.check_mode == CheckMode.REAL_RUN: - try: - self.nmutil.connection_delete(c) - except MyError as e: - self.log_error(idx, "delete duplicate connection failed: %s" % (e)) - seen.add(c) + def _remove_duplicate_profile(self, idx, cur_nm_profile, timeout): + logger = logging.getLogger() + log_handler = NmLogHandler(self.log, idx) + logger.addHandler(log_handler) + + for nm_profile in self._nm_provider.get_connections(): + if ( + nm_profile.get_id() == cur_nm_profile.get_id() + and nm_profile.get_uuid() != cur_nm_profile.get_uuid() + ): + if self.check_mode == CheckMode.REAL_RUN: + self._nm_provider.volatilize_connection_by_uuid( + uuid=nm_profile.get_uuid(), + timeout=(DEFAULT_TIMEOUT if timeout is None else timeout), + check_mode=True, + ) + self.connections_data_set_changed(idx) + logger.removeHandler(log_handler) def run_action_up(self, idx): connection = self.connections[idx] @@ -2088,6 +2175,9 @@ class Cmd_nm(Cmd): ) self.connections_data_set_changed(idx) if self.check_mode == CheckMode.REAL_RUN: + if self._try_reapply(idx, con): + return + try: ac = self.nmutil.connection_activate(con) except MyError as e: @@ -2102,51 +2192,46 @@ class Cmd_nm(Cmd): except MyError as e: self.log_error(idx, "up connection failed while waiting: %s" % (e)) + def _try_reapply(self, idx, con): + """Try to reapply a connection + + If there is exactly one active connection with the same UUID activated + on exactly one device, ask the device to reapply the connection. + + :returns: `True`, when the connection was reapplied, `False` otherwise + :rtype: bool + """ + NM = Util.NM() + + acons = list(self.nmutil.active_connection_list(connections=[con])) + if len(acons) != 1: + return False + + active_connection = acons[0] + if active_connection.get_state() == NM.ActiveConnectionState.ACTIVATED: + devices = active_connection.get_devices() + if len(devices) == 1: + try: + self.nmutil.reapply(devices[0]) + self.log_info(idx, "connection reapplied") + return True + except MyError as error: + self.log_info(idx, "connection reapply failed: %s" % (error)) + return False + def run_action_down(self, idx): connection = self.connections[idx] - - cons = self.nmutil.connection_list(name=connection["name"]) - changed = False - if cons: - seen = set() - while True: - ac = Util.first( - self.nmutil.active_connection_list( - connections=cons, black_list=seen - ) - ) - if ac is None: - break - seen.add(ac) - self.log_info( - idx, "down connection %s: %s" % (connection["name"], ac.get_path()) - ) - changed = True - self.connections_data_set_changed(idx) - if self.check_mode == CheckMode.REAL_RUN: - try: - self.nmutil.active_connection_deactivate(ac) - except MyError as e: - self.log_error(idx, "down connection failed: %s" % (e)) - - wait_time = connection["wait"] - if wait_time is None: - wait_time = 10 - - try: - self.nmutil.active_connection_deactivate_wait(ac, wait_time) - except MyError as e: - self.log_error( - idx, "down connection failed while waiting: %s" % (e) - ) - - cons = self.nmutil.connection_list(name=connection["name"]) - if not changed: - self.log_error( - idx, - "down connection %s failed: connection not found" - % (connection["name"]), - ) + logger = logging.getLogger() + log_handler = NmLogHandler(self.log, idx) + logger.addHandler(log_handler) + timeout = connection["wait"] + if self._nm_provider.deactivate_connection( + connection["name"], + 10 if timeout is None else timeout, + self.check_mode != CheckMode.REAL_RUN, + ): + self.connections_data_set_changed(idx) + logger.removeHandler(log_handler) ############################################################################### @@ -2179,6 +2264,36 @@ class Cmd_initscripts(Cmd): return None return f + def forget_nm_connection(self, path): + """ + Forget a NetworkManager connection by loading the path of the deleted + profile. This inverts the effect of loading a profile with + `NM_CONTROLLED=no` earlier, which made NetworkManager ignore the + device. + + This does not use the Python libnm bindings because they might not be + present on the system, since the module is currently operating for the + initscripts provider. If it fails, assume that NetworkManager is not + present and did not save any state about the corresponding interface. + """ + try: + subprocess.call( + [ + "busctl", + "--system", + "call", + "org.freedesktop.NetworkManager", + "/org/freedesktop/NetworkManager/Settings", + "org.freedesktop.NetworkManager.Settings", + "LoadConnections", + "as", + "1", + path, + ] + ) + except Exception: + pass + def run_action_absent(self, idx): n = self.connections[idx]["name"] name = n @@ -2210,6 +2325,7 @@ class Cmd_initscripts(Cmd): if self.check_mode == CheckMode.REAL_RUN: try: os.unlink(path) + self.forget_nm_connection(path) except Exception as e: self.log_error( idx, "delete ifcfg-rh file '%s' failed: %s" % (path, e) @@ -2279,11 +2395,18 @@ class Cmd_initscripts(Cmd): path = IfcfgUtil.ifcfg_path(name) if not os.path.isfile(path): - if self.check_mode == CheckMode.REAL_RUN: + if ( + self.check_mode == CheckMode.REAL_RUN + and connection.get(PERSISTENT_STATE) != ABSENT_STATE + ): self.log_error(idx, "ifcfg file '%s' does not exist" % (path)) else: + if self.check_mode != CheckMode.REAL_RUN: + in_checkmode = " in check mode" + else: + in_checkmode = "" self.log_info( - idx, "ifcfg file '%s' does not exist in check mode" % (path) + idx, "ifcfg file '%s' does not exist%s" % (path, in_checkmode) ) return @@ -2361,8 +2484,10 @@ def main(): is_check_mode=run_env_ansible.module.check_mode, ignore_errors=params["ignore_errors"], force_state_change=params["force_state_change"], + debug_flags=params["__debug_flags"], ) connections = cmd.connections + run_env_ansible.on_failure = cmd.on_failure cmd.run() except Exception as e: run_env_ansible.fail_json( diff --git a/roles/linux-system-roles.network/meta/.galaxy_install_info b/roles/linux-system-roles.network/meta/.galaxy_install_info index d30e58b..6f4ec76 100644 --- a/roles/linux-system-roles.network/meta/.galaxy_install_info +++ b/roles/linux-system-roles.network/meta/.galaxy_install_info @@ -1,2 +1,2 @@ -install_date: Wed Jul 1 18:41:54 2020 -version: 1.1.0 +install_date: Tue Apr 20 16:13:56 2021 +version: 1.3.0 diff --git a/roles/linux-system-roles.network/meta/main.yml b/roles/linux-system-roles.network/meta/main.yml index b350d57..38197e4 100644 --- a/roles/linux-system-roles.network/meta/main.yml +++ b/roles/linux-system-roles.network/meta/main.yml @@ -5,7 +5,8 @@ galaxy_info: description: Configure networking company: Red Hat, Inc. license: BSD-3-Clause - min_ansible_version: 2.5 + min_ansible_version: 2.7 + github_branch: main galaxy_tags: - centos - fedora @@ -17,9 +18,7 @@ galaxy_info: platforms: - name: Fedora versions: - - 28 - - 29 - - 30 + - all - name: EL versions: - 6 diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/argument_validator.py b/roles/linux-system-roles.network/module_utils/network_lsr/argument_validator.py index b58ec67..1bfaeda 100644 --- a/roles/linux-system-roles.network/module_utils/network_lsr/argument_validator.py +++ b/roles/linux-system-roles.network/module_utils/network_lsr/argument_validator.py @@ -2,11 +2,15 @@ # vim: fileencoding=utf8 # SPDX-License-Identifier: BSD-3-Clause +import posixpath import socket +import re # pylint: disable=import-error, no-name-in-module -from ansible.module_utils.network_lsr import MyError -from ansible.module_utils.network_lsr.utils import Util +from ansible.module_utils.network_lsr import MyError # noqa:E501 +from ansible.module_utils.network_lsr.utils import Util # noqa:E501 + +UINT32_MAX = 0xFFFFFFFF class ArgUtil: @@ -28,26 +32,27 @@ class ArgUtil: return c @staticmethod - def connection_find_master(name, connections, n_connections=None): + def connection_find_controller(name, connections, n_connections=None): c = ArgUtil.connection_find_by_name(name, connections, n_connections) if not c: - raise MyError("invalid master/parent '%s'" % (name)) + raise MyError("invalid controller/parent '%s'" % (name)) if c["interface_name"] is None: raise MyError( - "invalid master/parent '%s' which needs an 'interface_name'" % (name) + "invalid controller/parent '%s' which needs an 'interface_name'" + % (name) ) if not Util.ifname_valid(c["interface_name"]): raise MyError( - "invalid master/parent '%s' with invalid 'interface_name' ('%s')" + "invalid controller/parent '%s' with invalid 'interface_name' ('%s')" % (name, c["interface_name"]) ) return c["interface_name"] @staticmethod - def connection_find_master_uuid(name, connections, n_connections=None): + def connection_find_controller_uuid(name, connections, n_connections=None): c = ArgUtil.connection_find_by_name(name, connections, n_connections) if not c: - raise MyError("invalid master/parent '%s'" % (name)) + raise MyError("invalid controller/parent '%s'" % (name)) return c["nm.uuid"] @staticmethod @@ -92,17 +97,69 @@ class ArgValidator: return self.default_value def validate(self, value): + """ + Validate and normalize the input dictionary + + This validate @value or raises a ValidationError() on error. + It also returns a normalized value, where the settings are + converted to appropriate types and default values set. You + should rely on the normalization to fill unspecified values + and resolve ambiguity. + + You are implementing "types" of ArgValidator instances and + a major point of them is to implement a suitable validation and + normalization. The means for that is for subclasses to override + _validate_impl() and possibly _validate_post(). Some subclasses + support convenience arguments for simpler validation, like + ArgValidatorStr.enum_values or ArgValidatorNum.val_min. + Or ArgValidator.required which is honored by ArgValidatorDict + to determine whether a mandatory key is missing. Also, + ArgValidatorDict and ArgValidatorList have a nested parameter + which is an ArgValidator for the elements of the dictionary and list. + """ return self._validate(value, self.name) def _validate(self, value, name): + """ + The internal implementation for validate(). + + This is mostly called from internal code and by validate(). + Usually you would not call this directly nor override it. + Instead, you would implement either _validate_impl() or + _validate_post(). + """ validated = self._validate_impl(value, name) return self._validate_post(value, name, validated) def _validate_impl(self, value, name): + """ + Implementation of validation. + + Subclasses must implement this validation function. It is + the main hook to implement validate(). On validation error + it must raise ValidationError() or otherwise return a pre-normalized + value that gets passed to _validate_post(). + """ raise NotImplementedError() # pylint: disable=unused-argument,no-self-use def _validate_post(self, value, name, result): + """ + Post validation of the validated result. + + This will be called with the result from _validate_impl(). + By default it does nothing, but subclasses can override + this to perform additional validation. The use for this + hook is to split the validation in two steps. When validating + a dictionary of multiple keys, then _validate_impl() can + implement the basic pre-validation and pre-normalization of the individual + keys (which can be in any order). Afterwards, _validate_post() + can take a more holistic view and validate interdependencies + between keys and perform additional validation. For example, + _validate_impl() would validate that the keys are of the correct + basic type, and _validate_post() would validate that the values + don't conflict and possibly normalize derived default values. + """ return result @@ -114,10 +171,28 @@ class ArgValidatorStr(ArgValidator): default_value=None, enum_values=None, allow_empty=False, + min_length=None, + max_length=None, + regex=None, ): ArgValidator.__init__(self, name, required, default_value) self.enum_values = enum_values self.allow_empty = allow_empty + self.regex = regex + + if max_length is not None: + if not isinstance(max_length, int): + raise ValueError("max_length must be an integer") + elif max_length < 0: + raise ValueError("max_length must be a positive integer") + self.max_length = max_length + + if min_length is not None: + if not isinstance(min_length, int): + raise ValueError("min_length must be an integer") + elif min_length < 0: + raise ValueError("min_length must be a positive integer") + self.min_length = min_length def _validate_impl(self, value, name): if not isinstance(value, Util.STRING_TYPE): @@ -129,10 +204,44 @@ class ArgValidatorStr(ArgValidator): "is '%s' but must be one of '%s'" % (value, "' '".join(sorted(self.enum_values))), ) + if self.regex is not None and not any(re.match(x, value) for x in self.regex): + raise ValidationError( + name, + "is '%s' which does not match the regex '%s'" + % (value, "' '".join(sorted(self.regex))), + ) if not self.allow_empty and not value: raise ValidationError(name, "cannot be empty") + if not self._validate_string_max_length(value): + raise ValidationError( + name, "maximum length is %s characters" % (self.max_length) + ) + if not self._validate_string_min_length(value): + raise ValidationError( + name, "minimum length is %s characters" % (self.min_length) + ) return value + def _validate_string_max_length(self, value): + """ + Ensures that the length of string `value` is less than or equal to + the maximum length + """ + if self.max_length is not None: + return len(str(value)) <= self.max_length + else: + return True + + def _validate_string_min_length(self, value): + """ + Ensures that the length of string `value` is more than or equal to + the minimum length + """ + if self.min_length is not None: + return len(str(value)) >= self.min_length + else: + return True + class ArgValidatorNum(ArgValidator): def __init__( # pylint: disable=too-many-arguments @@ -198,6 +307,12 @@ class ArgValidatorBool(ArgValidator): raise ValidationError(name, "must be an boolean but is '%s'" % (value)) +class ArgValidatorDeprecated: + def __init__(self, name, deprecated_by): + self.name = name + self.deprecated_by = deprecated_by + + class ArgValidatorDict(ArgValidator): def __init__( self, @@ -221,26 +336,33 @@ class ArgValidatorDict(ArgValidator): items = list(value.items()) except AttributeError: raise ValidationError(name, "invalid content is not a dictionary") - for (k, v) in items: - if k in seen_keys: - raise ValidationError(name, "duplicate key '%s'" % (k)) - seen_keys.add(k) - validator = self.nested.get(k, None) - if validator is None: - raise ValidationError(name, "invalid key '%s'" % (k)) + for (setting, value) in items: try: - vv = validator._validate(v, name + "." + k) + validator = self.nested[setting] + except KeyError: + raise ValidationError(name, "invalid key '%s'" % (setting)) + if isinstance(validator, ArgValidatorDeprecated): + setting = validator.deprecated_by + validator = self.nested.get(setting, None) + if setting in seen_keys: + raise ValidationError(name, "duplicate key '%s'" % (setting)) + seen_keys.add(setting) + try: + validated_value = validator._validate(value, name + "." + setting) except ValidationError as e: raise ValidationError(e.name, e.error_message) - result[k] = vv - for (k, v) in self.nested.items(): - if k in seen_keys: + result[setting] = validated_value + for (setting, validator) in self.nested.items(): + if setting in seen_keys or isinstance(validator, ArgValidatorDeprecated): continue - if v.required: - raise ValidationError(name, "missing required key '%s'" % (k)) - vv = v.get_default_value() - if not self.all_missing_during_validate and vv is not ArgValidator.MISSING: - result[k] = vv + if validator.required: + raise ValidationError(name, "missing required key '%s'" % (setting)) + default_value = validator.get_default_value() + if ( + not self.all_missing_during_validate + and default_value is not ArgValidator.MISSING + ): + result[setting] = default_value return result @@ -405,6 +527,22 @@ class ArgValidatorIPRoute(ArgValidatorDict): class ArgValidator_DictIP(ArgValidatorDict): + REGEX_DNS_OPTIONS = [ + r"^attempts:([1-9]\d*|0)$", + r"^debug$", + r"^edns0$", + r"^ndots:([1-9]\d*|0)$", + r"^no-check-names$", + r"^no-reload$", + r"^no-tld-query$", + r"^rotate$", + r"^single-request$", + r"^single-request-reopen$", + r"^timeout:([1-9]\d*|0)$", + r"^trust-ad$", + r"^use-vc$", + ] + def __init__(self): ArgValidatorDict.__init__( self, @@ -417,6 +555,7 @@ class ArgValidator_DictIP(ArgValidatorDict): "route_metric4", val_min=-1, val_max=0xFFFFFFFF, default_value=None ), ArgValidatorBool("auto6", default_value=None), + ArgValidatorBool("ipv6_disabled", default_value=None), ArgValidatorIP("gateway6", family=socket.AF_INET6), ArgValidatorNum( "route_metric6", val_min=-1, val_max=0xFFFFFFFF, default_value=None @@ -441,6 +580,13 @@ class ArgValidator_DictIP(ArgValidatorDict): nested=ArgValidatorStr("dns_search[?]"), default_value=list, ), + ArgValidatorList( + "dns_options", + nested=ArgValidatorStr( + "dns_options[?]", regex=ArgValidator_DictIP.REGEX_DNS_OPTIONS + ), + default_value=list, + ), ], default_value=lambda: { "dhcp4": True, @@ -448,6 +594,7 @@ class ArgValidator_DictIP(ArgValidatorDict): "gateway4": None, "route_metric4": None, "auto6": True, + "ipv6_disabled": False, "gateway6": None, "route_metric6": None, "address": [], @@ -456,18 +603,50 @@ class ArgValidator_DictIP(ArgValidatorDict): "rule_append_only": False, "dns": [], "dns_search": [], + "dns_options": [], }, ) def _validate_post(self, value, name, result): + + has_ipv6_addresses = any( + [a for a in result["address"] if a["family"] == socket.AF_INET6] + ) + + if result["ipv6_disabled"] is True: + if result["auto6"] is True: + raise ValidationError( + name, "'auto6' and 'ipv6_disabled' are mutually exclusive" + ) + if has_ipv6_addresses: + raise ValidationError( + name, + "'ipv6_disabled' and static IPv6 addresses are mutually exclusive", + ) + if result["gateway6"] is not None: + raise ValidationError( + name, "'ipv6_disabled' and 'gateway6' are mutually exclusive" + ) + if result["route_metric6"] is not None: + raise ValidationError( + name, "'ipv6_disabled' and 'route_metric6' are mutually exclusive" + ) + elif result["ipv6_disabled"] is None: + # "ipv6_disabled" is not explicitly set, we always set it to False. + # Either "auto6" is enabled or static addresses are set, then this + # is clearly correct. + # Even with "auto6:False" and no IPv6 addresses, we at least enable + # IPv6 link local addresses. + result["ipv6_disabled"] = False + if result["dhcp4"] is None: result["dhcp4"] = result["dhcp4_send_hostname"] is not None or not any( [a for a in result["address"] if a["family"] == socket.AF_INET] ) + if result["auto6"] is None: - result["auto6"] = not any( - [a for a in result["address"] if a["family"] == socket.AF_INET6] - ) + result["auto6"] = not has_ipv6_addresses + if result["dhcp4_send_hostname"] is not None: if not result["dhcp4"]: raise ValidationError( @@ -526,7 +705,10 @@ class ArgValidator_DictEthtool(ArgValidatorDict): ArgValidatorDict.__init__( self, name="ethtool", - nested=[ArgValidator_DictEthtoolFeatures()], + nested=[ + ArgValidator_DictEthtoolFeatures(), + ArgValidator_DictEthtoolCoalesce(), + ], default_value=ArgValidator.MISSING, ) @@ -548,58 +730,274 @@ class ArgValidator_DictEthtoolFeatures(ArgValidatorDict): self, name="features", nested=[ - ArgValidatorBool("esp-hw-offload", default_value=None), - ArgValidatorBool("esp-tx-csum-hw-offload", default_value=None), - ArgValidatorBool("fcoe-mtu", default_value=None), + ArgValidatorBool("esp_hw_offload", default_value=None), + ArgValidatorDeprecated( + "esp-hw-offload", deprecated_by="esp_hw_offload" + ), + ArgValidatorBool("esp_tx_csum_hw_offload", default_value=None), + ArgValidatorDeprecated( + "esp-tx-csum-hw-offload", + deprecated_by="esp_tx_csum_hw_offload", + ), + ArgValidatorBool("fcoe_mtu", default_value=None), + ArgValidatorDeprecated("fcoe-mtu", deprecated_by="fcoe_mtu"), ArgValidatorBool("gro", default_value=None), ArgValidatorBool("gso", default_value=None), ArgValidatorBool("highdma", default_value=None), - ArgValidatorBool("hw-tc-offload", default_value=None), - ArgValidatorBool("l2-fwd-offload", default_value=None), + ArgValidatorBool("hw_tc_offload", default_value=None), + ArgValidatorDeprecated("hw-tc-offload", deprecated_by="hw_tc_offload"), + ArgValidatorBool("l2_fwd_offload", default_value=None), + ArgValidatorDeprecated( + "l2-fwd-offload", deprecated_by="l2_fwd_offload" + ), ArgValidatorBool("loopback", default_value=None), ArgValidatorBool("lro", default_value=None), ArgValidatorBool("ntuple", default_value=None), ArgValidatorBool("rx", default_value=None), ArgValidatorBool("rxhash", default_value=None), ArgValidatorBool("rxvlan", default_value=None), - ArgValidatorBool("rx-all", default_value=None), - ArgValidatorBool("rx-fcs", default_value=None), - ArgValidatorBool("rx-gro-hw", default_value=None), - ArgValidatorBool("rx-udp_tunnel-port-offload", default_value=None), - ArgValidatorBool("rx-vlan-filter", default_value=None), - ArgValidatorBool("rx-vlan-stag-filter", default_value=None), - ArgValidatorBool("rx-vlan-stag-hw-parse", default_value=None), + ArgValidatorBool("rx_all", default_value=None), + ArgValidatorDeprecated("rx-all", deprecated_by="rx_all"), + ArgValidatorBool("rx_fcs", default_value=None), + ArgValidatorDeprecated("rx-fcs", deprecated_by="rx_fcs"), + ArgValidatorBool("rx_gro_hw", default_value=None), + ArgValidatorDeprecated("rx-gro-hw", deprecated_by="rx_gro_hw"), + ArgValidatorBool("rx_udp_tunnel_port_offload", default_value=None), + ArgValidatorDeprecated( + "rx-udp_tunnel-port-offload", + deprecated_by="rx_udp_tunnel_port_offload", + ), + ArgValidatorBool("rx_vlan_filter", default_value=None), + ArgValidatorDeprecated( + "rx-vlan-filter", deprecated_by="rx_vlan_filter" + ), + ArgValidatorBool("rx_vlan_stag_filter", default_value=None), + ArgValidatorDeprecated( + "rx-vlan-stag-filter", + deprecated_by="rx_vlan_stag_filter", + ), + ArgValidatorBool("rx_vlan_stag_hw_parse", default_value=None), + ArgValidatorDeprecated( + "rx-vlan-stag-hw-parse", + deprecated_by="rx_vlan_stag_hw_parse", + ), ArgValidatorBool("sg", default_value=None), - ArgValidatorBool("tls-hw-record", default_value=None), - ArgValidatorBool("tls-hw-tx-offload", default_value=None), + ArgValidatorBool("tls_hw_record", default_value=None), + ArgValidatorDeprecated("tls-hw-record", deprecated_by="tls_hw_record"), + ArgValidatorBool("tls_hw_tx_offload", default_value=None), + ArgValidatorDeprecated( + "tls-hw-tx-offload", + deprecated_by="tls_hw_tx_offload", + ), ArgValidatorBool("tso", default_value=None), ArgValidatorBool("tx", default_value=None), ArgValidatorBool("txvlan", default_value=None), - ArgValidatorBool("tx-checksum-fcoe-crc", default_value=None), - ArgValidatorBool("tx-checksum-ipv4", default_value=None), - ArgValidatorBool("tx-checksum-ipv6", default_value=None), - ArgValidatorBool("tx-checksum-ip-generic", default_value=None), - ArgValidatorBool("tx-checksum-sctp", default_value=None), - ArgValidatorBool("tx-esp-segmentation", default_value=None), - ArgValidatorBool("tx-fcoe-segmentation", default_value=None), - ArgValidatorBool("tx-gre-csum-segmentation", default_value=None), - ArgValidatorBool("tx-gre-segmentation", default_value=None), - ArgValidatorBool("tx-gso-partial", default_value=None), - ArgValidatorBool("tx-gso-robust", default_value=None), - ArgValidatorBool("tx-ipxip4-segmentation", default_value=None), - ArgValidatorBool("tx-ipxip6-segmentation", default_value=None), - ArgValidatorBool("tx-nocache-copy", default_value=None), - ArgValidatorBool("tx-scatter-gather", default_value=None), - ArgValidatorBool("tx-scatter-gather-fraglist", default_value=None), - ArgValidatorBool("tx-sctp-segmentation", default_value=None), - ArgValidatorBool("tx-tcp6-segmentation", default_value=None), - ArgValidatorBool("tx-tcp-ecn-segmentation", default_value=None), - ArgValidatorBool("tx-tcp-mangleid-segmentation", default_value=None), - ArgValidatorBool("tx-tcp-segmentation", default_value=None), - ArgValidatorBool("tx-udp-segmentation", default_value=None), - ArgValidatorBool("tx-udp_tnl-csum-segmentation", default_value=None), - ArgValidatorBool("tx-udp_tnl-segmentation", default_value=None), - ArgValidatorBool("tx-vlan-stag-hw-insert", default_value=None), + ArgValidatorBool("tx_checksum_fcoe_crc", default_value=None), + ArgValidatorDeprecated( + "tx-checksum-fcoe-crc", + deprecated_by="tx_checksum_fcoe_crc", + ), + ArgValidatorBool("tx_checksum_ipv4", default_value=None), + ArgValidatorDeprecated( + "tx-checksum-ipv4", + deprecated_by="tx_checksum_ipv4", + ), + ArgValidatorBool("tx_checksum_ipv6", default_value=None), + ArgValidatorDeprecated( + "tx-checksum-ipv6", + deprecated_by="tx_checksum_ipv6", + ), + ArgValidatorBool("tx_checksum_ip_generic", default_value=None), + ArgValidatorDeprecated( + "tx-checksum-ip-generic", + deprecated_by="tx_checksum_ip_generic", + ), + ArgValidatorBool("tx_checksum_sctp", default_value=None), + ArgValidatorDeprecated( + "tx-checksum-sctp", + deprecated_by="tx_checksum_sctp", + ), + ArgValidatorBool("tx_esp_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-esp-segmentation", + deprecated_by="tx_esp_segmentation", + ), + ArgValidatorBool("tx_fcoe_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-fcoe-segmentation", + deprecated_by="tx_fcoe_segmentation", + ), + ArgValidatorBool("tx_gre_csum_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-gre-csum-segmentation", + deprecated_by="tx_gre_csum_segmentation", + ), + ArgValidatorBool("tx_gre_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-gre-segmentation", + deprecated_by="tx_gre_segmentation", + ), + ArgValidatorBool("tx_gso_partial", default_value=None), + ArgValidatorDeprecated( + "tx-gso-partial", deprecated_by="tx_gso_partial" + ), + ArgValidatorBool("tx_gso_robust", default_value=None), + ArgValidatorDeprecated("tx-gso-robust", deprecated_by="tx_gso_robust"), + ArgValidatorBool("tx_ipxip4_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-ipxip4-segmentation", + deprecated_by="tx_ipxip4_segmentation", + ), + ArgValidatorBool("tx_ipxip6_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-ipxip6-segmentation", + deprecated_by="tx_ipxip6_segmentation", + ), + ArgValidatorBool("tx_nocache_copy", default_value=None), + ArgValidatorDeprecated( + "tx-nocache-copy", + deprecated_by="tx_nocache_copy", + ), + ArgValidatorBool("tx_scatter_gather", default_value=None), + ArgValidatorDeprecated( + "tx-scatter-gather", + deprecated_by="tx_scatter_gather", + ), + ArgValidatorBool("tx_scatter_gather_fraglist", default_value=None), + ArgValidatorDeprecated( + "tx-scatter-gather-fraglist", + deprecated_by="tx_scatter_gather_fraglist", + ), + ArgValidatorBool("tx_sctp_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-sctp-segmentation", + deprecated_by="tx_sctp_segmentation", + ), + ArgValidatorBool("tx_tcp6_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-tcp6-segmentation", + deprecated_by="tx_tcp6_segmentation", + ), + ArgValidatorBool("tx_tcp_ecn_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-tcp-ecn-segmentation", + deprecated_by="tx_tcp_ecn_segmentation", + ), + ArgValidatorBool("tx_tcp_mangleid_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-tcp-mangleid-segmentation", + deprecated_by="tx_tcp_mangleid_segmentation", + ), + ArgValidatorBool("tx_tcp_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-tcp-segmentation", + deprecated_by="tx_tcp_segmentation", + ), + ArgValidatorBool("tx_udp_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-udp-segmentation", + deprecated_by="tx_udp_segmentation", + ), + ArgValidatorBool("tx_udp_tnl_csum_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-udp_tnl-csum-segmentation", + deprecated_by="tx_udp_tnl_csum_segmentation", + ), + ArgValidatorBool("tx_udp_tnl_segmentation", default_value=None), + ArgValidatorDeprecated( + "tx-udp_tnl-segmentation", + deprecated_by="tx_udp_tnl_segmentation", + ), + ArgValidatorBool("tx_vlan_stag_hw_insert", default_value=None), + ArgValidatorDeprecated( + "tx-vlan-stag-hw-insert", + deprecated_by="tx_vlan_stag_hw_insert", + ), + ], + ) + self.default_value = dict( + [ + (name, validator.default_value) + for name, validator in self.nested.items() + if not isinstance(validator, ArgValidatorDeprecated) + ] + ) + + +class ArgValidator_DictEthtoolCoalesce(ArgValidatorDict): + def __init__(self): + ArgValidatorDict.__init__( + self, + name="coalesce", + nested=[ + ArgValidatorBool("adaptive_rx", default_value=None), + ArgValidatorBool("adaptive_tx", default_value=None), + ArgValidatorNum( + "pkt_rate_high", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "pkt_rate_low", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "rx_frames", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "rx_frames_high", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "rx_frames_irq", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "rx_frames_low", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "rx_usecs", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "rx_usecs_high", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "rx_usecs_irq", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "rx_usecs_low", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "sample_interval", + val_min=0, + val_max=UINT32_MAX, + default_value=None, + ), + ArgValidatorNum( + "stats_block_usecs", + val_min=0, + val_max=UINT32_MAX, + default_value=None, + ), + ArgValidatorNum( + "tx_frames", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "tx_frames_high", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "tx_frames_irq", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "tx_frames_low", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "tx_usecs", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "tx_usecs_high", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "tx_usecs_irq", val_min=0, val_max=UINT32_MAX, default_value=None + ), + ArgValidatorNum( + "tx_usecs_low", val_min=0, val_max=UINT32_MAX, default_value=None + ), ], ) self.default_value = dict( @@ -698,6 +1096,108 @@ class ArgValidator_DictMacvlan(ArgValidatorDict): return result +class ArgValidatorPath(ArgValidatorStr): + """ + Valides that the value is a valid posix absolute path + """ + + def __init__(self, name, required=False, default_value=None): + ArgValidatorStr.__init__(self, name, required, default_value, None) + + def _validate_impl(self, value, name): + ArgValidatorStr._validate_impl(self, value, name) + + if posixpath.isabs(value) is False: + raise ValidationError( + name, + "value '%s' is not a valid posix absolute path" % (value), + ) + return value + + +class ArgValidator_Dict802_1X(ArgValidatorDict): + + VALID_EAP_TYPES = ["tls"] + + VALID_PRIVATE_KEY_FLAGS = ["none", "agent-owned", "not-saved", "not-required"] + + def __init__(self): + ArgValidatorDict.__init__( + self, + name="ieee802_1x", + nested=[ + ArgValidatorStr( + "eap", + enum_values=ArgValidator_Dict802_1X.VALID_EAP_TYPES, + default_value="tls", + ), + ArgValidatorStr("identity", required=True), + ArgValidatorPath("private_key", required=True), + ArgValidatorStr("private_key_password"), + ArgValidatorList( + "private_key_password_flags", + nested=ArgValidatorStr( + "private_key_password_flags[?]", + enum_values=ArgValidator_Dict802_1X.VALID_PRIVATE_KEY_FLAGS, + ), + default_value=None, + ), + ArgValidatorPath("client_cert", required=True), + ArgValidatorPath("ca_cert"), + ArgValidatorPath("ca_path"), + ArgValidatorBool("system_ca_certs", default_value=False), + ArgValidatorStr("domain_suffix_match", required=False), + ], + default_value=None, + ) + + def _validate_post(self, value, name, result): + if result["system_ca_certs"] is True and result["ca_path"] is not None: + raise ValidationError( + name, + "ca_path will be ignored by NetworkManager if system_ca_certs is used", + ) + return result + + +class ArgValidator_DictWireless(ArgValidatorDict): + + VALID_KEY_MGMT = [ + "wpa-psk", + "wpa-eap", + ] + + def __init__(self): + ArgValidatorDict.__init__( + self, + name="wireless", + nested=[ + ArgValidatorStr("ssid", max_length=32), + ArgValidatorStr( + "key_mgmt", enum_values=ArgValidator_DictWireless.VALID_KEY_MGMT + ), + ArgValidatorStr("password", default_value=None, max_length=63), + ], + default_value=None, + ) + + def _validate_post(self, value, name, result): + if result["key_mgmt"] == "wpa-psk": + if result["password"] is None: + raise ValidationError( + name, + "must supply a password if using 'wpa-psk' key management", + ) + else: + if result["password"] is not None: + raise ValidationError( + name, + "password only allowed if using 'wpa-psk' key management", + ) + + return result + + class ArgValidator_DictConnection(ArgValidatorDict): VALID_PERSISTENT_STATES = ["absent", "present"] @@ -710,8 +1210,10 @@ class ArgValidator_DictConnection(ArgValidatorDict): "bond", "vlan", "macvlan", + "wireless", + "dummy", ] - VALID_SLAVE_TYPES = ["bridge", "bond", "team"] + VALID_PORT_TYPES = ["bridge", "bond", "team"] def __init__(self): ArgValidatorDict.__init__( @@ -739,10 +1241,15 @@ class ArgValidator_DictConnection(ArgValidatorDict): ), ArgValidatorBool("autoconnect", default_value=True), ArgValidatorStr( - "slave_type", - enum_values=ArgValidator_DictConnection.VALID_SLAVE_TYPES, + "port_type", + enum_values=ArgValidator_DictConnection.VALID_PORT_TYPES, ), - ArgValidatorStr("master"), + ArgValidatorDeprecated( + "slave_type", + deprecated_by="port_type", + ), + ArgValidatorStr("controller"), + ArgValidatorDeprecated("master", deprecated_by="controller"), ArgValidatorStr("interface_name", allow_empty=True), ArgValidatorMac("mac"), ArgValidatorNum( @@ -759,6 +1266,8 @@ class ArgValidator_DictConnection(ArgValidatorDict): ArgValidator_DictInfiniband(), ArgValidator_DictVlan(), ArgValidator_DictMacvlan(), + ArgValidator_Dict802_1X(), + ArgValidator_DictWireless(), # deprecated options: ArgValidatorStr( "infiniband_transport_mode", @@ -792,14 +1301,18 @@ class ArgValidator_DictConnection(ArgValidatorDict): """ actions = [] state = result.get("state") - if state in self.VALID_PERSISTENT_STATES: - del result["state"] - persistent_state_default = state - state = None - else: - persistent_state_default = None + persistent_state = result.get("persistent_state") - persistent_state = result.get("persistent_state", persistent_state_default) + if state in self.VALID_PERSISTENT_STATES: + if persistent_state: + raise ValidationError( + name, + "State cannot be '{0}' if persistent_state is specified".format( + state + ), + ) + persistent_state = state + state = None # default persistent_state to present (not done via default_value in the # ArgValidatorStr, the value will only be set at the end of @@ -807,23 +1320,19 @@ class ArgValidator_DictConnection(ArgValidatorDict): if not persistent_state: persistent_state = "present" - # If the profile is present, it should be ensured first - if persistent_state == "present": - actions.append(persistent_state) - # If the profile should be absent at the end, it needs to be present in - # the meantime to allow to (de)activate it - if persistent_state == "absent" and state: + # the meantime to allow to (de)activate it. This is only possible if it + # is completely defined, for which `type` needs to be specified. + # Otherwise, downing is happening on a best-effort basis + if persistent_state == "absent" and state and result.get("type"): actions.append("present") + actions.append(persistent_state) + # Change the runtime state if necessary if state: actions.append(state) - # Remove the profile in the end if requested - if persistent_state == "absent": - actions.append(persistent_state) - result["state"] = state result["persistent_state"] = persistent_state result["actions"] = actions @@ -891,39 +1400,72 @@ class ArgValidator_DictConnection(ArgValidatorDict): self.VALID_FIELDS = valid_fields return result + def _validate_post_wireless(self, value, name, result): + """ + Validate wireless settings + """ + if "type" in result: + if result["type"] == "wireless": + if "wireless" in result: + if ( + result["wireless"]["key_mgmt"] == "wpa-eap" + and "ieee802_1x" not in result + ): + raise ValidationError( + name + ".wireless", + "key management set to wpa-eap but no " + "'ieee802_1x' settings defined", + ) + else: + raise ValidationError( + name + ".wireless", + "must define 'wireless' settings for 'type' 'wireless'", + ) + + else: + if "wireless" in result: + raise ValidationError( + name + ".wireless", + "'wireless' settings are not allowed for 'type' '%s'" + % (result["type"]), + ) + + return result + def _validate_post(self, value, name, result): result = self._validate_post_state(value, name, result) result = self._validate_post_fields(value, name, result) + result = self._validate_post_wireless(value, name, result) if "type" in result: - if "master" in result: - if "slave_type" not in result: - result["slave_type"] = None - if result["master"] == result["name"]: + if "controller" in result: + if "port_type" not in result: + result["port_type"] = None + if result["controller"] == result["name"]: raise ValidationError( - name + ".master", '"master" cannot refer to itself' + name + ".controller", '"controller" cannot refer to itself' ) else: - if "slave_type" in result: + if "port_type" in result: raise ValidationError( - name + ".slave_type", - "'slave_type' requires a 'master' property", + name + ".port_type", + "'port_type' requires a 'controller' property", ) if "ip" in result: - if "master" in result: + if "controller" in result: raise ValidationError( - name + ".ip", 'a slave cannot have an "ip" property' + name + ".ip", 'a port cannot have an "ip" property' ) else: - if "master" not in result: + if "controller" not in result: result["ip"] = self.nested["ip"].get_default_value() if "zone" in result: - if "master" in result: + if "controller" in result: raise ValidationError( - name + ".zone", '"zone" cannot be configured for slave types' + name + ".zone", '"zone" cannot be configured for port types' ) else: result["zone"] = None @@ -1109,13 +1651,23 @@ class ArgValidator_DictConnection(ArgValidatorDict): % (result["type"]), ) - for k in self.VALID_FIELDS: - if k in result: + if "ieee802_1x" in result and result["type"] not in [ + "ethernet", + "wireless", + ]: + raise ValidationError( + name + ".ieee802_1x", + "802.1x settings only allowed for ethernet or wireless interfaces.", + ) + + for name in self.VALID_FIELDS: + if name in result: continue - v = self.nested[k] - vv = v.get_default_value() - if vv is not ArgValidator.MISSING: - result[k] = vv + validator = self.nested[name] + if not isinstance(validator, ArgValidatorDeprecated): + value = validator.get_default_value() + if value is not ArgValidator.MISSING: + result[name] = value return result @@ -1132,33 +1684,34 @@ class ArgValidator_ListConnections(ArgValidatorList): def _validate_post(self, value, name, result): for idx, connection in enumerate(result): if "type" in connection: - if connection["master"]: + if connection["controller"]: c = ArgUtil.connection_find_by_name( - connection["master"], result, idx + connection["controller"], result, idx ) if not c: raise ValidationError( - name + "[" + str(idx) + "].master", - "references non-existing 'master' connection '%s'" - % (connection["master"]), + name + "[" + str(idx) + "].controller", + "references non-existing 'controller' connection '%s'" + % (connection["controller"]), ) - if c["type"] not in ArgValidator_DictConnection.VALID_SLAVE_TYPES: + if c["type"] not in ArgValidator_DictConnection.VALID_PORT_TYPES: raise ValidationError( - name + "[" + str(idx) + "].master", - "references 'master' connection '%s' which is not a master " - "type by '%s'" % (connection["master"], c["type"]), + name + "[" + str(idx) + "].controller", + "references 'controller' connection '%s' which is " + "not a controller " + "type by '%s'" % (connection["controller"], c["type"]), ) - if connection["slave_type"] is None: - connection["slave_type"] = c["type"] - elif connection["slave_type"] != c["type"]: + if connection["port_type"] is None: + connection["port_type"] = c["type"] + elif connection["port_type"] != c["type"]: raise ValidationError( - name + "[" + str(idx) + "].master", - "references 'master' connection '%s' which is of type '%s' " - "instead of slave_type '%s'" + name + "[" + str(idx) + "].controller", + "references 'controller' connection '%s' which is " + "of type '%s' instead of port_type '%s'" % ( - connection["master"], + connection["controller"], c["type"], - connection["slave_type"], + connection["port_type"], ), ) if connection["parent"]: @@ -1191,7 +1744,9 @@ class ArgValidator_ListConnections(ArgValidatorList): ) ): try: - ArgUtil.connection_find_master(connection["parent"], connections, idx) + ArgUtil.connection_find_controller( + connection["parent"], connections, idx + ) except MyError: raise ValidationError.from_connection( idx, @@ -1199,12 +1754,51 @@ class ArgValidator_ListConnections(ArgValidatorList): "missing" % (connection["parent"]), ) - if (connection["master"]) and (mode == self.VALIDATE_ONE_MODE_INITSCRIPTS): + if (connection["controller"]) and (mode == self.VALIDATE_ONE_MODE_INITSCRIPTS): try: - ArgUtil.connection_find_master(connection["master"], connections, idx) + ArgUtil.connection_find_controller( + connection["controller"], connections, idx + ) except MyError: raise ValidationError.from_connection( idx, - "profile references a master '%s' which has 'interface_name' " - "missing" % (connection["master"]), + "profile references a controller '%s' which has 'interface_name' " + "missing" % (connection["controller"]), + ) + + # check if 802.1x connection is valid + if connection["ieee802_1x"]: + if mode == self.VALIDATE_ONE_MODE_INITSCRIPTS: + raise ValidationError.from_connection( + idx, + "802.1x authentication is not supported by initscripts. " + "Configure 802.1x in /etc/wpa_supplicant.conf " + "if you need to use initscripts.", + ) + + # check if wireless connection is valid + if connection["type"] == "wireless": + if mode == self.VALIDATE_ONE_MODE_INITSCRIPTS: + raise ValidationError.from_connection( + idx, + "Wireless WPA auth is not supported by initscripts. " + "Configure wireless connection in /etc/wpa_supplicant.conf " + "if you need to use initscripts.", + ) + + # initscripts does not support ip.dns_options, so raise errors when network + # provider is initscripts + if connection["ip"]["dns_options"]: + if mode == self.VALIDATE_ONE_MODE_INITSCRIPTS: + raise ValidationError.from_connection( + idx, + "ip.dns_options is not supported by initscripts.", + ) + # initscripts does not support ip.ipv6_disabled, so raise errors when network + # provider is initscripts + if connection["ip"]["ipv6_disabled"]: + if mode == self.VALIDATE_ONE_MODE_INITSCRIPTS: + raise ValidationError.from_connection( + idx, + "ip.ipv6_disabled is not supported by initscripts.", ) diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/nm_provider.py b/roles/linux-system-roles.network/module_utils/network_lsr/nm_provider.py index 9f9b028..c75242a 100644 --- a/roles/linux-system-roles.network/module_utils/network_lsr/nm_provider.py +++ b/roles/linux-system-roles.network/module_utils/network_lsr/nm_provider.py @@ -2,22 +2,39 @@ """ Support for NetworkManager aka the NM provider """ # pylint: disable=import-error, no-name-in-module -from ansible.module_utils.network_lsr.utils import Util +from ansible.module_utils.network_lsr.utils import Util # noqa:E501 ETHTOOL_FEATURE_PREFIX = "ETHTOOL_OPTNAME_FEATURE_" +ETHTOOL_COALESCE_PREFIX = "ETHTOOL_OPTNAME_COALESCE_" def get_nm_ethtool_feature(name): """ - Translate ethtool feature into Network Manager name + Translate ethtool feature into Network Manager name - :param name: Name of the feature - :type name: str - :returns: Name of the feature to be used by `NM.SettingEthtool.set_feature()` - :rtype: str + :param name: Name of the feature + :type name: str + :returns: Name of the feature to be used by `NM.SettingEthtool.set_feature()` + :rtype: str """ - name = ETHTOOL_FEATURE_PREFIX + name.upper().replace("-", "_") + name = ETHTOOL_FEATURE_PREFIX + name.upper() feature = getattr(Util.NM(), name, None) return feature + + +def get_nm_ethtool_coalesce(name): + """ + Translate ethtool coalesce into Network Manager name + + :param name: Name of the coalesce + :type name: str + :returns: Name of the setting to be used by `NM.SettingEthtool.set_coalesce()` + :rtype: str + """ + + name = ETHTOOL_COALESCE_PREFIX + name.upper() + + coalesce = getattr(Util.NM(), name, None) + return coalesce diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/utils.py b/roles/linux-system-roles.network/module_utils/network_lsr/utils.py index bd1887d..73d9528 100644 --- a/roles/linux-system-roles.network/module_utils/network_lsr/utils.py +++ b/roles/linux-system-roles.network/module_utils/network_lsr/utils.py @@ -2,13 +2,12 @@ # SPDX-License-Identifier: BSD-3-Clause # vim: fileencoding=utf8 -import os import socket import sys import uuid # pylint: disable=import-error, no-name-in-module -from ansible.module_utils.network_lsr import MyError +from ansible.module_utils.network_lsr import MyError # noqa:E501 class Util: @@ -25,19 +24,31 @@ class Util: return default @staticmethod - def check_output(argv): - # subprocess.check_output is python 2.7. - with open("/dev/null", "wb") as DEVNULL: - import subprocess + def path_to_glib_bytes(path): + """ + Converts a path to a GLib.Bytes object that can be accepted by NM + """ + return Util.GLib().Bytes.new(("file://%s\x00" % path).encode("utf-8")) - env = os.environ.copy() - env["LANG"] = "C" - p = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=DEVNULL, env=env) - # FIXME: Can we assume this to always be UTF-8? - out = p.communicate()[0].decode("UTF-8") - if p.returncode != 0: - raise MyError("failure calling %s: exit with %s" % (argv, p.returncode)) - return out + @staticmethod + def convert_passwd_flags_nm(secret_flags): + """ + Converts an array of "secret flags" strings + to an integer represantion understood by NetworkManager + """ + + flag_int = 0 + + if "none" in secret_flags: + flag_int += 0 + if "agent-owned" in secret_flags: + flag_int += 1 + if "not-saved" in secret_flags: + flag_int += 2 + if "not-required" in secret_flags: + flag_int += 4 + + return flag_int @classmethod def create_uuid(cls): @@ -147,7 +158,7 @@ class Util: if not cls.GMainLoop_run(mainloop_timeout): cancellable.cancel() - raise MyError("failure to call %s.%s(): timeout" % object_, async_action) + raise MyError("failure to call %s.%s(): timeout" % (object_, async_action)) success = user_data.get("success", None) if success is not None: @@ -249,7 +260,8 @@ class Util: def mac_ntoa(mac): if mac is None: return None - return ":".join(["%02x" % c for c in mac]) + # bytearray() is needed for python2 compatibility + return ":".join(["%02x" % c for c in bytearray(mac)]) @staticmethod def mac_norm(mac_str, force_len=None): diff --git a/roles/linux-system-roles.network/molecule/default/Dockerfile.j2 b/roles/linux-system-roles.network/molecule/default/Dockerfile.j2 index 0a60553..7d2fbe8 100644 --- a/roles/linux-system-roles.network/molecule/default/Dockerfile.j2 +++ b/roles/linux-system-roles.network/molecule/default/Dockerfile.j2 @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: MIT # Molecule managed {% if item.registry is defined %} @@ -6,9 +7,22 @@ FROM {{ item.registry.url }}/{{ item.image }} FROM {{ item.image }} {% endif %} -RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \ - elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python2-dnf bash && dnf clean all; \ - elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ +RUN set -euo pipefail; \ + pkgs="python sudo yum-plugin-ovl bash"; \ + if grep 'CentOS release 6' /etc/centos-release > /dev/null 2>&1; then \ + for file in /etc/yum.repos.d/CentOS-*.repo; do \ + if ! grep '^baseurl=.*vault[.]centos[.]org' "$file"; then \ + sed -i -e 's,^mirrorlist,#mirrorlist,' \ + -e 's,^#baseurl=,baseurl=,' \ + -e 's,mirror.centos.org/centos/$releasever,vault.centos.org/6.10,' \ + "$file"; \ + fi; \ + done; \ + pkgs="$pkgs upstart chkconfig initscripts"; \ + fi; \ + if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \ + elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python3 sudo python3-devel python3-dnf bash && dnf clean all; \ + elif [ $(command -v yum) ]; then yum makecache fast && yum install -y $pkgs && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \ elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi diff --git a/roles/linux-system-roles.network/molecule/default/molecule.yml b/roles/linux-system-roles.network/molecule/default/molecule.yml index 066964a..91fc962 100644 --- a/roles/linux-system-roles.network/molecule/default/molecule.yml +++ b/roles/linux-system-roles.network/molecule/default/molecule.yml @@ -1,26 +1,31 @@ +# SPDX-License-Identifier: MIT --- dependency: name: galaxy driver: - name: docker -lint: - name: yamllint - options: - config-file: molecule/default/yamllint.yml + name: ${LSR_MOLECULE_DRIVER:-docker} platforms: - name: centos-6 - image: linuxsystemroles/centos-6 - privileged: true - - name: centos-7 - image: linuxsystemroles/centos-7 + image: registry.centos.org/centos:6 volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro privileged: true + command: /sbin/init + - name: centos-7 + image: registry.centos.org/centos/systemd:latest + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + command: /usr/lib/systemd/systemd --system + - name: centos-8 + image: registry.centos.org/centos:8 + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + command: /usr/lib/systemd/systemd --system provisioner: name: ansible log: true - lint: - name: ansible-lint playbooks: converge: ../../tests/tests_default.yml scenario: @@ -32,7 +37,3 @@ scenario: - idempotence - check - destroy -verifier: - name: testinfra - lint: - name: flake8 diff --git a/roles/linux-system-roles.network/molecule/default/yamllint.yml b/roles/linux-system-roles.network/molecule/default/yamllint.yml deleted file mode 100644 index e00a5a9..0000000 --- a/roles/linux-system-roles.network/molecule/default/yamllint.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -extends: default -rules: - braces: - max-spaces-inside: 1 - level: error - brackets: - max-spaces-inside: 1 - level: error - line-length: disable - truthy: disable - document-start: disable diff --git a/roles/linux-system-roles.network/pylintrc b/roles/linux-system-roles.network/pylintrc index 2f07798..3cd3739 100644 --- a/roles/linux-system-roles.network/pylintrc +++ b/roles/linux-system-roles.network/pylintrc @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: MIT + # This file was generated using `pylint --generate-rcfile > pylintrc` command. [MASTER] @@ -8,7 +10,7 @@ extension-pkg-whitelist= # Add files or directories to the blacklist. They should be base names, not # paths. -ignore=CVS +ignore=.git,.tox # Add files or directories matching the regex patterns to the blacklist. The # regex matches against base names, not paths. @@ -16,8 +18,7 @@ ignore-patterns= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). -init-hook="from pylint.config import find_pylintrc; import os, sys; sys.path.append(os.path.dirname(find_pylintrc()) + '/library'); sys.path.append(os.path.dirname(find_pylintrc()) + '/module_utils'); sys.path.append(os.path.dirname(find_pylintrc()) + '/tests')" - +#init-hook= # Use multiple processes to speed up Pylint. jobs=1 @@ -56,7 +57,7 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable= +disable=wrong-import-position #disable=print-statement, # parameter-unpacking, # unpacking-in-except, @@ -246,7 +247,7 @@ indent-after-paren=4 indent-string=' ' # Maximum number of characters on a single line. -max-line-length=100 +max-line-length=88 # Maximum number of lines in a module max-module-lines=1000 diff --git a/roles/linux-system-roles.network/tasks/main.yml b/roles/linux-system-roles.network/tasks/main.yml index f7f041f..dfc5481 100644 --- a/roles/linux-system-roles.network/tasks/main.yml +++ b/roles/linux-system-roles.network/tasks/main.yml @@ -23,6 +23,23 @@ state: present when: - not network_packages is subset(ansible_facts.packages.keys()) + register: __network_package_install + +# If network packages changed and wireless or team connections are specified, +# NetworkManager must be restarted +- name: Restart NetworkManager due to wireless or team interfaces + service: + name: NetworkManager + state: restarted + when: + - __network_wireless_connections_defined + or __network_team_connections_defined + - network_provider == "nm" + - network_allow_restart + # ansible-lint wants this to be a handler, but this is not appropriate as + # NetworkManager must be restarted prior to the connections being created. + # see (https://docs.ansible.com/ansible-lint/rules/default_rules.html) + - __network_package_install.changed # noqa 503 - name: Enable and start NetworkManager service: @@ -31,6 +48,18 @@ enabled: true when: - network_provider == "nm" + no_log: true + +# If any 802.1x connections are used, the wpa_supplicant +# service is required to be running +- name: Enable and start wpa_supplicant + service: + name: wpa_supplicant + state: started + enabled: true + when: + - network_provider == "nm" + - __network_wpa_supplicant_required - name: Enable network service service: @@ -38,11 +67,13 @@ enabled: true when: - network_provider == "initscripts" + no_log: true - name: Ensure initscripts network file dependency is present copy: dest: /etc/sysconfig/network content: "# Created by network system role" + mode: "0644" force: false when: - network_provider == "initscripts" @@ -53,6 +84,11 @@ ignore_errors: "{{ network_ignore_errors | default(omit) }}" force_state_change: "{{ network_force_state_change | default(omit) }}" connections: "{{ network_connections | default([]) }}" + __debug_flags: "{{ __network_debug_flags | default(omit) }}" + register: __network_connections_result + +- name: Show debug messages + debug: var=__network_connections_result - name: Re-test connectivity ping: diff --git a/roles/linux-system-roles.network/tests/ansible_module_network_connections.py b/roles/linux-system-roles.network/tests/ansible_module_network_connections.py deleted file mode 120000 index b30a744..0000000 --- a/roles/linux-system-roles.network/tests/ansible_module_network_connections.py +++ /dev/null @@ -1 +0,0 @@ -roles/linux-system-roles.network/library/network_connections.py \ No newline at end of file diff --git a/roles/linux-system-roles.network/tests/down-profile.yml b/roles/linux-system-roles.network/tests/down-profile.yml deleted file mode 100644 index 5087240..0000000 --- a/roles/linux-system-roles.network/tests/down-profile.yml +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- name: Set {{ profile }} down - hosts: all - vars: - network_connections: - - name: "{{ profile }}" - state: down - roles: - - linux-system-roles.network diff --git a/roles/linux-system-roles.network/tests/ensure_non_running_provider.py b/roles/linux-system-roles.network/tests/ensure_non_running_provider.py deleted file mode 100755 index 9048c90..0000000 --- a/roles/linux-system-roles.network/tests/ensure_non_running_provider.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python3 -# SPDX-License-Identifier: BSD-3-Clause -""" Check that there is a playbook to run all role tests with the non-default -provider as well """ -# vim: fileencoding=utf8 - -import glob -import os -import sys - - -import yaml - -OTHER_PROVIDER_SUFFIX = "_other_provider.yml" - -IGNORE = [ - "tests_helpers-and-asserts.yml", - "tests_states.yml", - "tests_unit.yml", - "tests_vlan_mtu_initscripts.yml", - "tests_vlan_mtu_nm.yml", - "tests_ethtool_features_initscripts.yml", - "tests_ethtool_features_nm.yml", -] - -OTHER_PLAYBOOK = """ -# SPDX-License-Identifier: BSD-3-Clause ---- -- name: Run playbook '{tests_playbook}' with non-default provider - hosts: all - vars: - network_provider_current: - tasks: - # required for the code to set network_provider_current - - name: Get service facts - service_facts: - - name: Set network provider - set_fact: - network_provider: '{{{{ "initscripts" if network_provider_current == "nm" - else "nm" }}}}' - -- import_playbook: "{tests_playbook}" - when: - - ansible_distribution_major_version != '6' -""" # noqa: E501 # ignore that the line is too long - - -def get_current_provider_code(): - with open("../defaults/main.yml") as defaults: - yaml_defaults = yaml.safe_load(defaults) - current_provider = yaml_defaults["network_provider_current"] - return current_provider - - -def generate_nominal_other_playbook(tests_playbook): - nominal_other_testfile_data = OTHER_PLAYBOOK.format(tests_playbook=tests_playbook) - nominal = yaml.safe_load(nominal_other_testfile_data) - nominal[0]["vars"]["network_provider_current"] = get_current_provider_code() - return yaml.dump(nominal, default_flow_style=False, explicit_start=True, width=80) - - -def main(): - testsfiles = glob.glob("tests_*.yml") - missing = [] - returncode = 0 - - # Generate files when specified - generate = bool(len(sys.argv) > 1 and sys.argv[1] == "generate") - - if not testsfiles: - print("ERROR: No tests found") - returncode = 1 - - for filename in testsfiles: - if filename.endswith(OTHER_PROVIDER_SUFFIX): - continue - - if filename in IGNORE: - continue - - fileroot = os.path.splitext(filename)[0] - other_testfile = fileroot + OTHER_PROVIDER_SUFFIX - nominal_other_testfile_data = generate_nominal_other_playbook(filename) - - if generate: - with open(other_testfile, "w") as ofile: - ofile.write(nominal_other_testfile_data) - - if other_testfile not in testsfiles and not generate: - missing.append(filename) - else: - with open(other_testfile) as ifile: - testdata = ifile.read() - if testdata != nominal_other_testfile_data: - print( - "ERROR: Playbook does not match nominal value " + other_testfile - ) - returncode = 1 - - if missing: - print("ERROR: No tests for other provider found for:\n" + ", \n".join(missing)) - print("Try to generate them with '{} generate'".format(sys.argv[0])) - returncode = 1 - - return returncode - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/roles/linux-system-roles.network/tests/get-coverage.sh b/roles/linux-system-roles.network/tests/get-coverage.sh deleted file mode 100755 index 7b6cd21..0000000 --- a/roles/linux-system-roles.network/tests/get-coverage.sh +++ /dev/null @@ -1,68 +0,0 @@ -#! /bin/bash -# SPDX-License-Identifier: BSD-3-Clause - -if [ -n "${DEBUG}" ] -then - set -x -fi -set -e - -if [ "$#" -lt 2 ] -then - echo "USAGE: ${0} host playbook" - echo "Get coverage info from host for playbook" - exit 1 -fi - -host="${1}" -shift -playbook="${1}" - -coverage_data="remote-coveragedata-${host}-${playbook%.yml}" -coverage="/root/.local/bin/coverage" - -echo "Getting coverage for ${playbook} on ${host}" >&2 - -call_ansible() { - local module="${1}" - shift - local args="${1}" - shift - ansible -m "${module}" -i "${host}", -a "${args}" all "${@}" -} - -remote_coverage_dir="$(mktemp -d /tmp/remote_coverage-XXXXXX)" -trap "rm -rf '${remote_coverage_dir}'" EXIT -ansible-playbook -i "${host}", get-coverage.yml -e "test_playbook=${playbook} destdir=${remote_coverage_dir}" - -#COVERAGE_FILE=remote-coverage coverage combine remote-coverage/tests_*/*/root/.coverage -./merge-coverage.sh coverage "${coverage_data}"-tmp $(find "${remote_coverage_dir}" -type f | tr , _) - -# When https://github.com/nedbat/coveragepy/pull/49 is merged, this can be simplified: -if false -then -cat > tmp_merge_coveragerc < tmp_merge_coveragerc <> tmp_merge_coveragerc -done -fi - -COVERAGE_FILE="${coverage_data}" coverage combine --rcfile tmp_merge_coveragerc "${coverage_data}"-tmp -rm tmp_merge_coveragerc - -COVERAGE_FILE="${coverage_data}" coverage report ||: -COVERAGE_FILE="${coverage_data}" coverage html --directory "htmlcov-${coverage_data}" ||: - -echo "Coverage collected in: ${coverage_data}" diff --git a/roles/linux-system-roles.network/tests/get-coverage.yml b/roles/linux-system-roles.network/tests/get-coverage.yml deleted file mode 100644 index 4845c62..0000000 --- a/roles/linux-system-roles.network/tests/get-coverage.yml +++ /dev/null @@ -1,66 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -# This expects the variable test_playbook to be set from the outside -- name: Prepare for coverage extraction - hosts: all - tasks: - # Use set_fact to set variables to make them available in all plays - # 'vars:' Would only set variables for the current play - - name: set facts - set_fact: - coverage_module: network_connections - coverage: /root/.local/bin/coverage - destdir: "remote_coverage/{{ test_playbook }}" - - # This uses variables from the other set_fact task, therefore it needs to - # be its own task - - name: set more facts - set_fact: - coverage_file: ansible-coverage-{{ coverage_module }}-{{ test_playbook|replace('.yml', '') }} - - - name: debug info - debug: - msg: Getting coverage for '{{ coverage_module }}' with '{{ test_playbook }}' - - # combine data in case old data is left there - - command: "{{ coverage }} combine" - environment: - COVERAGE_FILE: "{{ coverage_file }}" - ignore_errors: yes - - - name: remove old data - file: - state: absent - path: "{{ coverage_file }}" - - - name: remove old data - shell: rm -f .coverage.* - - - name: copy coveragerc - copy: - content: "[run]\ndisable_warnings = no-data-collected\n" - dest: .coveragerc - - - name: install latest pip - pip: - name: coverage - extra_args: --user --upgrade - -- import_playbook: "{{ test_playbook }}" - vars: - ansible_python_interpreter: "{{ coverage }} run -p --include *ansible_module_{{ coverage_module }}.py" - -- name: Gather coverage data - hosts: all - tasks: - - shell: "{{ coverage }} combine .coverage.*" - environment: - COVERAGE_FILE: "{{ coverage_file }}" - -- name: Get coverage data - hosts: all - tasks: - - fetch: - src: "{{ coverage_file }}" - dest: "{{ destdir }}" - flat: no diff --git a/roles/linux-system-roles.network/tests/get-total-coverage.sh b/roles/linux-system-roles.network/tests/get-total-coverage.sh deleted file mode 100755 index c3dacfe..0000000 --- a/roles/linux-system-roles.network/tests/get-total-coverage.sh +++ /dev/null @@ -1,34 +0,0 @@ -#! /bin/bash -# SPDX-License-Identifier: BSD-3-Clause - -set -e -coverage_data=total-coveragedata -testhost="${1}" - -if [ "$#" -lt 1 ] -then - echo "USAGE: ${0} host" - echo "Get local and all remote coverage data for host" - exit 1 -fi - -rm -f remote-coveragedata* "${coveragedata}" - - -# collect pytest coverage -tox -e py26,py27,py36,py37 -- --cov-append - -for test_playbook in tests_*.yml -do - ./get-coverage.sh "${testhost}" "${test_playbook}" -done - -./merge-coverage.sh coverage "total-remote-coveragedata" remote-coveragedata-* -./covstats .coverage remote-coveragedata-* "total-remote-coveragedata" - -./merge-coverage.sh coverage "${coverage_data}" .coverage remote-coveragedata-* -echo "Total coverage:" -COVERAGE_FILE="${coverage_data}" coverage report ||: -COVERAGE_FILE="${coverage_data}" coverage html --directory "htmlcov-${coverage_data}" ||: -echo "Open HTML report with:" -echo "xdg-open htmlcov-${coverage_data}/index.html" diff --git a/roles/linux-system-roles.network/tests/merge-coverage.sh b/roles/linux-system-roles.network/tests/merge-coverage.sh deleted file mode 100755 index a33e94d..0000000 --- a/roles/linux-system-roles.network/tests/merge-coverage.sh +++ /dev/null @@ -1,35 +0,0 @@ -#! /bin/bash -# SPDX-License-Identifier: BSD-3-Clause - -if [ -n "${DEBUG}" ] -then - set -x -fi -set -e - -if [ "$#" -lt 3 ] -then - echo "USAGE: ${0} path_to_coverage_binary output_file input_files..." - echo "Merges all input_files into output file without removing input_files" - exit 1 -fi - -# path to coverage binary -coverage="${1}" -shift - -# read by coverage binary -export COVERAGE_FILE="${1}" -shift - -tempdir="$(mktemp -d /tmp/coverage_merge-XXXXXX)" -trap "rm -rf '${tempdir}'" EXIT - -cp --backup=numbered -- "${@}" "${tempdir}" -# FIXME: Would not work if coverage files are not hidden but they are by -# default -shopt -s dotglob -"${coverage}" combine "${tempdir}/"* - -echo "Merged data into ${COVERAGE_FILE}" -./covstats "${COVERAGE_FILE}" diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_features.yml b/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_features.yml index ba0c6c3..43fddc3 100644 --- a/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_features.yml +++ b/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_features.yml @@ -2,23 +2,31 @@ --- - hosts: all vars: - interface: lsrfeat1 + interface: testnic1 type: veth tasks: + - debug: + msg: "this is: playbooks/tests_ethtool_features.yml" + tags: + - always + - name: "INIT: Ethtool feeatures tests" debug: msg: "##################################################" - - include_tasks: tasks/show-interfaces.yml - - include_tasks: tasks/manage-test-interface.yml + - include_tasks: tasks/show_interfaces.yml + - include_tasks: tasks/manage_test_interface.yml vars: state: present - - include_tasks: tasks/assert-device_present.yml + - include_tasks: tasks/assert_device_present.yml - name: Install ethtool (test dependency) package: name: ethtool state: present + + - block: - - name: "TEST: I can create a profile without changing the ethtool features." + - name: >- + TEST: I can create a profile without changing the ethtool features. debug: msg: "##################################################" - name: Get current device features @@ -41,7 +49,10 @@ assert: that: - original_ethtool_features.stdout == ethtool_features.stdout - - name: "TEST: I can disable gro and tx-tcp-segmentation and enable gso." + + + - name: >- + TEST: I can disable gro and tx-tcp-segmentation and enable gso. debug: msg: "##################################################" - import_role: @@ -68,9 +79,87 @@ - name: Assert device features assert: that: - - "'generic-receive-offload: off' in ethtool_features.stdout_lines" - - "'generic-segmentation-offload: on' in ethtool_features.stdout_lines" - - "'tx-tcp-segmentation: off' in ethtool_features.stdout_lines | map('trim')" + - >- + 'generic-receive-offload: off' in + ethtool_features.stdout_lines + - >- + 'generic-segmentation-offload: on' in + ethtool_features.stdout_lines + - >- + 'tx-tcp-segmentation: off' in + ethtool_features.stdout_lines | map('trim') + + + - name: >- + TEST: I can enable tx_tcp_segmentation (using underscores). + debug: + msg: "##################################################" + - import_role: + name: linux-system-roles.network + vars: + network_connections: + - name: "{{ interface }}" + state: up + type: ethernet + ip: + dhcp4: "no" + auto6: "no" + ethtool: + features: + tx_tcp_segmentation: "yes" + - name: Get current device features + command: "ethtool --show-features {{ interface }}" + register: ethtool_features + - name: + debug: + var: ethtool_features.stdout_lines + - name: Assert device features + assert: + that: + - >- + 'tx-tcp-segmentation: on' in + ethtool_features.stdout_lines | map('trim') + + + - name: I cannot change tx_tcp_segmentation and tx-tcp-segmentation at + the same time. + block: + - name: >- + TEST: Change feature with both underscores and dashes. + debug: + msg: "##################################################" + - network_connections: + provider: "{{ network_provider | mandatory }}" + connections: + - name: "{{ interface }}" + state: up + type: ethernet + ip: + dhcp4: "no" + auto6: "no" + ethtool: + features: + tx_tcp_segmentation: "no" + tx-tcp-segmentation: "no" + register: __network_connections_result + rescue: + - name: Show network_connections result + debug: + var: __network_connections_result + - assert: + that: + - '{{ "fatal error: configuration error: + connections[0].ethtool.features: duplicate key + ''tx_tcp_segmentation''" in + __network_connections_result.msg }}' + always: + - name: Check failure + debug: + var: __network_connections_result + - assert: + that: "{{ __network_connections_result.failed == true }}" + + - name: "TEST: I can reset features to their original value." debug: msg: "##################################################" @@ -104,7 +193,7 @@ persistent_state: absent state: down ignore_errors: true - - include_tasks: tasks/manage-test-interface.yml + - include_tasks: tasks/manage_test_interface.yml vars: state: absent tags: diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_states.yml b/roles/linux-system-roles.network/tests/playbooks/tests_states.yml index 7a1e207..eec27c0 100644 --- a/roles/linux-system-roles.network/tests/playbooks/tests_states.yml +++ b/roles/linux-system-roles.network/tests/playbooks/tests_states.yml @@ -4,46 +4,134 @@ vars: interface: statebr profile: "{{ interface }}" - network_provider: nm + lsr_fail_debug: + - __network_connections_result tasks: - debug: - msg: Inside states tests - - include_tasks: tasks/show-interfaces.yml - - include_tasks: tasks/assert-device_absent.yml + msg: "this is: playbooks/tests_states.yml" + tags: + - always - # create test profile - - include_role: - name: linux-system-roles.network - vars: - network_connections: - - name: statebr - state: up - type: bridge - ip: - dhcp4: false - auto6: false - - include_tasks: tasks/assert-device_present.yml - - include_tasks: tasks/assert-profile_present.yml - # test case (remove profile but keep it up) - # I can remove a profile but keep the configuration active. - - include_role: - name: linux-system-roles.network - vars: - network_connections: - - name: statebr - persistent_state: absent - - include_tasks: tasks/assert-device_present.yml - - include_tasks: tasks/assert-profile_absent.yml + - block: + - include_tasks: tasks/run_test.yml + vars: + lsr_description: I can create a profile + lsr_setup: + - tasks/delete_interface.yml + - tasks/assert_device_absent.yml + lsr_test: + - tasks/create_bridge_profile.yml + lsr_assert: + - tasks/assert_profile_present.yml + lsr_assert_when: + # Device should be present because of autoconnect: true by + # default for NM (this might be considered a bug) + - what: tasks/assert_device_present.yml + when: "{{ network_provider == 'nm' }}" + lsr_cleanup: + - tasks/cleanup_profile+device.yml + tags: + - tests::states:create - # test case - # I can set a profile down that is up and absent. - - name: Set down - include_role: - name: linux-system-roles.network - vars: - network_connections: - - name: statebr - state: down - - include_tasks: tasks/assert-device_absent.yml - - include_tasks: tasks/assert-profile_absent.yml + - block: + - include_tasks: tasks/run_test.yml + vars: + lsr_description: I can create a profile without autoconnect + lsr_setup: + - tasks/delete_interface.yml + - tasks/assert_device_absent.yml + lsr_test: + - tasks/create_bridge_profile_no_autoconnect.yml + lsr_assert: + # Device should be absent because of autoconnect: false + - tasks/assert_device_absent.yml + - tasks/assert_profile_present.yml + lsr_cleanup: + - tasks/cleanup_profile+device.yml + tags: + - tests::states:create_without_autoconnect + + - block: + - include_tasks: tasks/run_test.yml + vars: + lsr_description: I can activate an existing profile + lsr_setup: + - tasks/create_bridge_profile.yml + lsr_test: + - tasks/activate_profile.yml + lsr_assert: + - tasks/assert_device_present.yml + - tasks/assert_profile_present.yml + lsr_cleanup: + - tasks/cleanup_profile+device.yml + tags: + - tests::states:activate + + - block: + - include_tasks: tasks/run_test.yml + vars: + lsr_description: I can remove an existing profile without taking it + down + lsr_setup: + - tasks/create_bridge_profile.yml + - tasks/activate_profile.yml + lsr_test: + - tasks/remove_profile.yml + lsr_assert: + - tasks/assert_device_present.yml + - tasks/assert_profile_absent.yml + lsr_cleanup: + - tasks/cleanup_profile+device.yml + tags: + - tests::states:remove_up + + - block: + - include_tasks: tasks/run_test.yml + vars: + lsr_description: I can take a profile down that is absent + lsr_setup: + - tasks/create_bridge_profile.yml + - tasks/activate_profile.yml + - tasks/remove_profile.yml + lsr_test: + - tasks/remove+down_profile.yml + lsr_assert: + - tasks/assert_profile_absent.yml + lsr_assert_when: + - what: tasks/assert_device_absent.yml + when: "{{ network_provider == 'nm' }}" + lsr_cleanup: + - tasks/cleanup_profile+device.yml + tags: + - tests::states:remove_down + + - block: + - include_tasks: tasks/run_test.yml + vars: + lsr_description: I will not get an error when I try to + remove an absent profile + lsr_setup: + - tasks/create_bridge_profile.yml + - tasks/activate_profile.yml + - tasks/remove+down_profile.yml + lsr_test: + - tasks/remove+down_profile.yml + lsr_assert: + - tasks/assert_profile_absent.yml + # FIXME: This needs to be included before lsr_assert_when but + # after the role ran to ensure that NetworkManager is actually + # installed but it is not an assert. + - tasks/get_NetworkManager_NVR.yml + lsr_assert_when: + - what: tasks/assert_device_absent.yml + # NetworkManager 1.18.4 from CentOS does not seem to remove the + # virtual interface in this case but it seems to work with + # 1:NetworkManager-1.27.0-26129.d0a2eb8f05.el7 + when: "{{ network_provider == 'nm' and + NetworkManager_NVR != 'NetworkManager-1.18.4-3.el7' + }}" + lsr_cleanup: + - tasks/cleanup_profile+device.yml + tags: + - tests::states:remove_down_twice diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_vlan_mtu.yml b/roles/linux-system-roles.network/tests/playbooks/tests_vlan_mtu.yml index ae0322e..029b599 100644 --- a/roles/linux-system-roles.network/tests/playbooks/tests_vlan_mtu.yml +++ b/roles/linux-system-roles.network/tests/playbooks/tests_vlan_mtu.yml @@ -6,12 +6,13 @@ interface: lsr101 vlan_interface: lsr101.90 tasks: - - include_tasks: tasks/show-interfaces.yml - - include_tasks: tasks/manage-test-interface.yml + - include_tasks: tasks/show_interfaces.yml + - include_tasks: tasks/manage_test_interface.yml vars: state: present - - include_tasks: tasks/assert-device_present.yml - - name: "TEST: I can configure the MTU for a vlan interface without autoconnect." + - include_tasks: tasks/assert_device_present.yml + - name: >- + TEST: I can configure the MTU for a vlan interface without autoconnect. debug: msg: "##################################################" - import_role: @@ -37,15 +38,15 @@ ip: dhcp4: false auto6: false - - include_tasks: tasks/assert-device_present.yml + - include_tasks: tasks/assert_device_present.yml vars: interface: "{{ vlan_interface }}" - - include_tasks: tasks/assert-profile_present.yml + - include_tasks: tasks/assert_profile_present.yml vars: profile: "{{ item }}" loop: - - "{{ interface }}" - - "{{ vlan_interface }}" + - "{{ interface }}" + - "{{ vlan_interface }}" - name: "TEARDOWN: remove profiles." debug: @@ -61,6 +62,6 @@ persistent_state: absent state: down ignore_errors: true - - include_tasks: tasks/manage-test-interface.yml + - include_tasks: tasks/manage_test_interface.yml vars: state: absent diff --git a/roles/linux-system-roles.network/tests/remove-profile.yml b/roles/linux-system-roles.network/tests/remove-profile.yml deleted file mode 100644 index a50e848..0000000 --- a/roles/linux-system-roles.network/tests/remove-profile.yml +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- name: Remove {{ profile }} - hosts: all - vars: - network_connections: - - name: "{{ profile }}" - persistent_state: absent - roles: - - linux-system-roles.network diff --git a/roles/linux-system-roles.network/tests/run-tasks.yml b/roles/linux-system-roles.network/tests/run-tasks.yml deleted file mode 100644 index ea56720..0000000 --- a/roles/linux-system-roles.network/tests/run-tasks.yml +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- name: Run the tasklist {{ task }} - hosts: all - tasks: - - include_tasks: "{{ task }}" diff --git a/roles/linux-system-roles.network/tests/tasks/assert-device_absent.yml b/roles/linux-system-roles.network/tests/tasks/assert-device_absent.yml deleted file mode 100644 index 67b83ad..0000000 --- a/roles/linux-system-roles.network/tests/tasks/assert-device_absent.yml +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- include: get-interface_stat.yml -- name: "assert that interface {{ interface }} is absent" - assert: - that: not interface_stat.stat.exists - msg: "{{ interface }} exists" diff --git a/roles/linux-system-roles.network/tests/tasks/assert-device_present.yml b/roles/linux-system-roles.network/tests/tasks/assert-device_present.yml deleted file mode 100644 index e0d4097..0000000 --- a/roles/linux-system-roles.network/tests/tasks/assert-device_present.yml +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- include: get-interface_stat.yml -- name: "assert that interface {{ interface }} is present" - assert: - that: interface_stat.stat.exists - msg: "{{ interface }} does not exist" diff --git a/roles/linux-system-roles.network/tests/tasks/assert-profile_absent.yml b/roles/linux-system-roles.network/tests/tasks/assert-profile_absent.yml deleted file mode 100644 index e7a6fde..0000000 --- a/roles/linux-system-roles.network/tests/tasks/assert-profile_absent.yml +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- include: get-profile_stat.yml -- name: "assert that profile '{{ profile }}' is absent" - assert: - that: not profile_stat.stat.exists - msg: "profile {{ profile_path }} does exist" diff --git a/roles/linux-system-roles.network/tests/tasks/assert-profile_present.yml b/roles/linux-system-roles.network/tests/tasks/assert-profile_present.yml deleted file mode 100644 index c84c080..0000000 --- a/roles/linux-system-roles.network/tests/tasks/assert-profile_present.yml +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- include: get-profile_stat.yml -- name: "assert that profile '{{ profile }}' is present" - assert: - that: profile_stat.stat.exists - msg: "profile {{ profile_path }} does not exist" diff --git a/roles/linux-system-roles.network/tests/tasks/create-and-remove-interface.yml b/roles/linux-system-roles.network/tests/tasks/create-and-remove-interface.yml deleted file mode 100644 index 9bebf6e..0000000 --- a/roles/linux-system-roles.network/tests/tasks/create-and-remove-interface.yml +++ /dev/null @@ -1,20 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- include_tasks: show-interfaces.yml -- include_tasks: manage-test-interface.yml - vars: - state: absent -- include_tasks: show-interfaces.yml -- include_tasks: assert-device_absent.yml - -- include_tasks: manage-test-interface.yml - vars: - state: present -- include_tasks: show-interfaces.yml -- include_tasks: assert-device_present.yml - -- include_tasks: manage-test-interface.yml - vars: - state: absent -- include_tasks: show-interfaces.yml -- include_tasks: assert-device_absent.yml diff --git a/roles/linux-system-roles.network/tests/tasks/get-current_interfaces.yml b/roles/linux-system-roles.network/tests/tasks/get-current_interfaces.yml deleted file mode 100644 index 33a4a76..0000000 --- a/roles/linux-system-roles.network/tests/tasks/get-current_interfaces.yml +++ /dev/null @@ -1,8 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- command: ls -1 - args: - chdir: /sys/class/net - register: _current_interfaces -- set_fact: - current_interfaces: "{{ _current_interfaces.stdout_lines }}" diff --git a/roles/linux-system-roles.network/tests/tasks/get-interface_stat.yml b/roles/linux-system-roles.network/tests/tasks/get-interface_stat.yml deleted file mode 100644 index a8b8e5b..0000000 --- a/roles/linux-system-roles.network/tests/tasks/get-interface_stat.yml +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- name: "Get stat for interface {{ interface }}" - stat: - get_attributes: false - get_checksum: false - get_mime: false - path: "/sys/class/net/{{ interface }}" - register: interface_stat diff --git a/roles/linux-system-roles.network/tests/tasks/get-profile_stat.yml b/roles/linux-system-roles.network/tests/tasks/get-profile_stat.yml deleted file mode 100644 index bd33a32..0000000 --- a/roles/linux-system-roles.network/tests/tasks/get-profile_stat.yml +++ /dev/null @@ -1,26 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- name: "Get stat for network-scripts" - stat: - get_attributes: false - get_checksum: false - get_mime: false - path: "/etc/sysconfig/network-scripts" - register: network_scripts_stat -- name: Set profile path (network-scripts) - set_fact: - profile_path: /etc/sysconfig/network-scripts/ifcfg-{{ profile }} - when: - - network_scripts_stat.stat.exists -- name: Set profile path (NetworkManager system-connections) - set_fact: - profile_path: /etc/NetworkManager/system-connections/{{ profile }} - when: - - not network_scripts_stat.stat.exists -- name: stat profile file - stat: - get_attributes: false - get_checksum: false - get_mime: false - path: "{{ profile_path }}" - register: profile_stat diff --git a/roles/linux-system-roles.network/tests/tasks/manage-test-interface.yml b/roles/linux-system-roles.network/tests/tasks/manage-test-interface.yml deleted file mode 100644 index e7b40f0..0000000 --- a/roles/linux-system-roles.network/tests/tasks/manage-test-interface.yml +++ /dev/null @@ -1,50 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- fail: - msg: "state needs to be present or absent, not '{{ state }}'" - when: state not in ["present", "absent"] - -- fail: - msg: "type needs to be dummy, tap or veth, not '{{ type }}'" - when: type not in ["dummy", "tap", "veth"] - -# - include: get-current_interfaces.yml -- include: show-interfaces.yml - -- name: Install iproute - package: - name: iproute - state: present - -# veth -- name: Create veth interface {{ interface }} - shell: ip link add {{ interface }} type veth peer name peer{{ interface }} - when: "type == 'veth' and state == 'present' and - interface not in current_interfaces" - -- name: Delete veth interface {{ interface }} - shell: ip link del {{ interface }} type veth - when: "type == 'veth' and state == 'absent' and - interface in current_interfaces" - -# dummy -- name: Create dummy interface {{ interface }} - shell: ip link add "{{ interface }}" type dummy - when: "type == 'dummy' and state == 'present' and - interface not in current_interfaces" - -- name: Delete dummy interface {{ interface }} - shell: ip link del "{{ interface }}" type dummy - when: "type == 'dummy' and state == 'absent' and - interface in current_interfaces" - -# tap -- name: Create tap interface {{ interface }} - shell: ip tuntap add dev {{ interface }} mode tap - when: "type == 'tap' and state == 'present' - and interface not in current_interfaces" - -- name: Delete tap interface {{ interface }} - shell: ip tuntap del dev {{ interface }} mode tap - when: "type == 'tap' and state == 'absent' and - interface in current_interfaces" diff --git a/roles/linux-system-roles.network/tests/tasks/show-interfaces.yml b/roles/linux-system-roles.network/tests/tasks/show-interfaces.yml deleted file mode 100644 index 704e8c5..0000000 --- a/roles/linux-system-roles.network/tests/tasks/show-interfaces.yml +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- include: get-current_interfaces.yml -- debug: - msg: "current_interfaces: {{ current_interfaces }}" diff --git a/roles/linux-system-roles.network/tests/tests_bridge.yml b/roles/linux-system-roles.network/tests/tests_bridge.yml deleted file mode 100644 index 9ead308..0000000 --- a/roles/linux-system-roles.network/tests/tests_bridge.yml +++ /dev/null @@ -1,55 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- name: Test configuring bridges - hosts: all - vars: - interface: LSR-TST-br31 - - tasks: - - name: "set interface={{ interface }}" - set_fact: - interface: "{{ interface }}" - - include_tasks: tasks/show-interfaces.yml - - include_tasks: tasks/assert-device_absent.yml - -- name: Add test bridge - hosts: all - vars: - network_connections: - - name: "{{ interface }}" - interface_name: "{{ interface }}" - state: up - type: bridge - ip: - dhcp4: no - auto6: yes - roles: - - linux-system-roles.network - -- import_playbook: run-tasks.yml - vars: - task: tasks/assert-device_present.yml - -- import_playbook: run-tasks.yml - vars: - profile: "{{ interface }}" - task: tasks/assert-profile_present.yml - -- import_playbook: down-profile.yml - vars: - profile: "{{ interface }}" -# FIXME: assert profile/device down - -- import_playbook: remove-profile.yml - vars: - profile: "{{ interface }}" - -- import_playbook: run-tasks.yml - vars: - profile: "{{ interface }}" - task: tasks/assert-profile_absent.yml - -# FIXME: Devices might still be left when profile is absent -#- import_playbook: run-tasks.yml -# vars: -# task: tasks/assert-device_absent.yml diff --git a/roles/linux-system-roles.network/tests/tests_bridge_other_provider.yml b/roles/linux-system-roles.network/tests/tests_bridge_other_provider.yml deleted file mode 100644 index e5a4ad7..0000000 --- a/roles/linux-system-roles.network/tests/tests_bridge_other_provider.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- hosts: all - name: Run playbook 'tests_bridge.yml' with non-default provider - tasks: - - name: Get service facts - service_facts: null - - name: Set network provider - set_fact: - network_provider: '{{ "initscripts" if network_provider_current == "nm" else - "nm" }}' - vars: - network_provider_current: '{{ ''nm'' if ''NetworkManager.service'' in ansible_facts.services - and ansible_facts.services[''NetworkManager.service''][''state''] == ''running'' - else ''initscripts'' }}' -- import_playbook: tests_bridge.yml - when: - - ansible_distribution_major_version != '6' diff --git a/roles/linux-system-roles.network/tests/tests_default.yml b/roles/linux-system-roles.network/tests/tests_default.yml index fda6ed5..e196314 100644 --- a/roles/linux-system-roles.network/tests/tests_default.yml +++ b/roles/linux-system-roles.network/tests/tests_default.yml @@ -4,3 +4,10 @@ hosts: all roles: - linux-system-roles.network + tasks: + - include_tasks: tasks/el_repo_setup.yml + - name: Test warning and info logs + assert: + that: + - "'warnings' not in __network_connections_result" + msg: "There are warnings" diff --git a/roles/linux-system-roles.network/tests/tests_default_other_provider.yml b/roles/linux-system-roles.network/tests/tests_default_other_provider.yml deleted file mode 100644 index 697bc57..0000000 --- a/roles/linux-system-roles.network/tests/tests_default_other_provider.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- hosts: all - name: Run playbook 'tests_default.yml' with non-default provider - tasks: - - name: Get service facts - service_facts: null - - name: Set network provider - set_fact: - network_provider: '{{ "initscripts" if network_provider_current == "nm" else - "nm" }}' - vars: - network_provider_current: '{{ ''nm'' if ''NetworkManager.service'' in ansible_facts.services - and ansible_facts.services[''NetworkManager.service''][''state''] == ''running'' - else ''initscripts'' }}' -- import_playbook: tests_default.yml - when: - - ansible_distribution_major_version != '6' diff --git a/roles/linux-system-roles.network/tests/tests_ethernet.yml b/roles/linux-system-roles.network/tests/tests_ethernet.yml deleted file mode 100644 index 25f117d..0000000 --- a/roles/linux-system-roles.network/tests/tests_ethernet.yml +++ /dev/null @@ -1,62 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- hosts: all - tasks: - - debug: - msg: Inside ethernet tests - - debug: - var: network_provider - -- name: Test configuring ethernet devices - hosts: all - vars: - type: veth - interface: lsr27 - - tasks: - - name: "set type={{ type }} and interface={{ interface }}" - set_fact: - type: "{{ type }}" - interface: "{{ interface }}" - - include_tasks: tasks/show-interfaces.yml - - include_tasks: tasks/manage-test-interface.yml - vars: - state: present - - include_tasks: tasks/assert-device_present.yml - -- name: Test static interface up - hosts: all - vars: - network_connections: - - name: "{{ interface }}" - interface_name: "{{ interface }}" - state: up - type: ethernet - autoconnect: yes - ip: - address: 192.0.2.1/24 - roles: - - linux-system-roles.network - -- hosts: all - tasks: - - debug: - var: network_provider - -# FIXME: assert profile present -# FIXME: assert profile/device up + IP address -- import_playbook: down-profile.yml - vars: - profile: "{{ interface }}" -# FIXME: assert profile/device down -- import_playbook: remove-profile.yml - vars: - profile: "{{ interface }}" -# FIXME: assert profile away -- name: Remove interfaces - hosts: all - tasks: - - include_tasks: tasks/manage-test-interface.yml - vars: - state: absent - - include_tasks: tasks/assert-device_absent.yml diff --git a/roles/linux-system-roles.network/tests/tests_ethernet_other_provider.yml b/roles/linux-system-roles.network/tests/tests_ethernet_other_provider.yml deleted file mode 100644 index 456b052..0000000 --- a/roles/linux-system-roles.network/tests/tests_ethernet_other_provider.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- hosts: all - name: Run playbook 'tests_ethernet.yml' with non-default provider - tasks: - - name: Get service facts - service_facts: null - - name: Set network provider - set_fact: - network_provider: '{{ "initscripts" if network_provider_current == "nm" else - "nm" }}' - vars: - network_provider_current: '{{ ''nm'' if ''NetworkManager.service'' in ansible_facts.services - and ansible_facts.services[''NetworkManager.service''][''state''] == ''running'' - else ''initscripts'' }}' -- import_playbook: tests_ethernet.yml - when: - - ansible_distribution_major_version != '6' diff --git a/roles/linux-system-roles.network/tests/tests_ethtool_features_initscripts.yml b/roles/linux-system-roles.network/tests/tests_ethtool_features_initscripts.yml index 6aea73b..5bac5d3 100644 --- a/roles/linux-system-roles.network/tests/tests_ethtool_features_initscripts.yml +++ b/roles/linux-system-roles.network/tests/tests_ethtool_features_initscripts.yml @@ -2,6 +2,7 @@ # set network provider and gather facts - hosts: all tasks: + - include_tasks: tasks/el_repo_setup.yml - name: Set network provider to 'initscripts' set_fact: network_provider: initscripts diff --git a/roles/linux-system-roles.network/tests/tests_ethtool_features_nm.yml b/roles/linux-system-roles.network/tests/tests_ethtool_features_nm.yml index 12e5042..2027862 100644 --- a/roles/linux-system-roles.network/tests/tests_ethtool_features_nm.yml +++ b/roles/linux-system-roles.network/tests/tests_ethtool_features_nm.yml @@ -1,28 +1,39 @@ +# SPDX-License-Identifier: BSD-3-Clause +# This file was generated by ensure_provider_tests.py --- # set network provider and gather facts - hosts: all + name: Run playbook 'playbooks/tests_ethtool_features.yml' with nm as provider tasks: + - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'nm' set_fact: network_provider: nm - - name: Install NetworkManager - package: - name: NetworkManager - state: present - - name: Get NetworkManager version - command: rpm -q --qf "%{version}" NetworkManager - args: - warn: "no" - when: true - register: NetworkManager_version + tags: + - always -# workaround for: https://github.com/ansible/ansible/issues/27973 -# There is no way in Ansible to abort a playbook hosts with specific OS -# releases Therefore we include the playbook with the tests only if the hosts -# would support it. -# The test should run with NetworkManager, therefore it cannot run on RHEL 6 or CentOS 6. + - block: + - name: Install NetworkManager + package: + name: NetworkManager + state: present + - name: Get NetworkManager version + command: rpm -q --qf "%{version}" NetworkManager + args: + warn: false + register: NetworkManager_version + when: true + when: + - ansible_distribution_major_version != '6' + tags: + - always + + +# The test requires or should run with NetworkManager, therefore it cannot run +# on RHEL/CentOS 6 - import_playbook: playbooks/tests_ethtool_features.yml when: - ansible_distribution_major_version != '6' - # NetworkManager 1.20.0 introduced ethtool settings support + - NetworkManager_version.stdout is version('1.20.0', '>=') diff --git a/roles/linux-system-roles.network/tests/tests_helpers-and-asserts.yml b/roles/linux-system-roles.network/tests/tests_helpers-and-asserts.yml deleted file mode 100644 index 36f02c2..0000000 --- a/roles/linux-system-roles.network/tests/tests_helpers-and-asserts.yml +++ /dev/null @@ -1,27 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause ---- -- name: Check that creating and removing test devices and assertions work - hosts: all - tasks: - - name: test veth interface management - include_tasks: tasks/create-and-remove-interface.yml - vars: - type: veth - interface: veth1298 - - - name: test veth interface management - include_tasks: tasks/create-and-remove-interface.yml - vars: - type: dummy - interface: dummy1298 - -# FIXME: when: does not seem to work with include_tasks, therefore this cannot be safely tested for now -# - name: test tap interfaces -# include_tasks: tasks/create-and-remove-interface.yml -# vars: -# - type: tap -# - interface: tap1298 -# when: ansible_distribution_major_version > 6 -# # ip tuntap does not exist on RHEL6 -# # FIXME: Maybe use some other tool to manage devices, openvpn can do this, -# # but it is in EPEL diff --git a/roles/linux-system-roles.network/tests/tests_states.yml b/roles/linux-system-roles.network/tests/tests_states.yml deleted file mode 100644 index eff3436..0000000 --- a/roles/linux-system-roles.network/tests/tests_states.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# empty playbook to gather facts for import_playbook when clause -- hosts: all - -# workaround for: https://github.com/ansible/ansible/issues/27973 -# There is no way in Ansible to abort a playbook hosts with specific OS -# releases Therefore we include the playbook with the tests only if the hosts -# would support it. -# The test requires NetworkManager, therefore it cannot run on RHEL 6 or CentOS 6. -- import_playbook: playbooks/tests_states.yml - when: ansible_distribution_major_version != '6' diff --git a/roles/linux-system-roles.network/tests/tests_unit.yml b/roles/linux-system-roles.network/tests/tests_unit.yml index c6ea4ef..44dfaec 100644 --- a/roles/linux-system-roles.network/tests/tests_unit.yml +++ b/roles/linux-system-roles.network/tests/tests_unit.yml @@ -3,14 +3,7 @@ - hosts: all name: Setup for test running tasks: - - name: Install EPEL on enterprise Linux for python2-mock - command: yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm - args: - warn: false - creates: /etc/yum.repos.d/epel.repo - when: - - ansible_distribution in ['RedHat', 'CentOS'] - - ansible_distribution_major_version in ['6', '7'] + - include_tasks: tasks/el_repo_setup.yml - name: Install dependencies package: @@ -28,62 +21,140 @@ - hosts: all name: execute python unit tests tasks: - - name: Copy python modules - copy: - src: "{{ item }}" - dest: /tmp/test-unit-1/ - local_follow: false - loop: - - ../library/network_connections.py - - unit/test_network_connections.py - - ../module_utils/network_lsr + - block: + - name: create tempdir for code to test + tempfile: + state: directory + prefix: lsrtest_ + register: _rundir - - name: Create helpers directory - file: - state: directory - dest: /tmp/test-unit-1/helpers + - name: get tempfile for tar + tempfile: + prefix: lsrtest_ + suffix: ".tar" + register: temptar + delegate_to: localhost - - name: Copy helpers - copy: - src: "{{ item }}" - dest: /tmp/test-unit-1/helpers - mode: 0755 - with_fileglob: - - unit/helpers/* + - include_tasks: tasks/get_modules_and_utils_paths.yml - - name: Check if python2 is available - command: python2 --version - ignore_errors: true - register: python2_available - when: true + # TODO: using tar and copying the file is a workaround for the + # synchronize module that does not work in test-harness. Related issue: + # https://github.com/linux-system-roles/test-harness/issues/102 + # + - name: Create Tar file + command: > + tar -cvf {{ temptar.path }} --exclude "*.pyc" + --exclude "__pycache__" + -C {{ modules_parent_and_dir.stdout_lines[0] }} + {{ modules_parent_and_dir.stdout_lines[1] }} + -C {{ module_utils_parent_and_dir.stdout_lines[0] }} + {{ module_utils_parent_and_dir.stdout_lines[1] }} + delegate_to: localhost - - name: Run python2 unit tests - command: python2 /tmp/test-unit-1/test_network_connections.py --verbose - when: python2_available is succeeded - register: python2_result + - name: Copy testrepo.tar to the remote system + copy: + src: "{{ temptar.path }}" + dest: "{{ _rundir.path }}" - - name: Check if python3 is available - command: python3 --version - ignore_errors: true - register: python3_available - when: true + - name: Untar testrepo.tar + command: tar -xvf {{ temptar.path | basename }} + args: + chdir: "{{ _rundir.path }}" - - name: Run python3 unit tests - command: python3 /tmp/test-unit-1/test_network_connections.py --verbose - when: python3_available is succeeded - register: python3_result + - file: + state: directory + path: "{{ item }}" + loop: + - "{{ _rundir.path }}/ansible" + - "{{ _rundir.path }}/ansible/module_utils" - - name: Show python2 unit test results - debug: - var: python2_result.stderr_lines - when: python2_result is succeeded + - name: Move module_utils to ansible directory + shell: | + if [ -d {{ _rundir.path }}/module_utils ]; then + mv {{ _rundir.path }}/module_utils {{ _rundir.path }}/ansible + fi - - name: Show python3 unit test results - debug: - var: python3_result.stderr_lines - when: python3_result is succeeded + - name: Fake out python module directories, primarily for python2 + shell: | + for dir in $(find {{ _rundir.path }} -type d -print); do + if [ ! -f "$dir/__init__.py" ]; then + touch "$dir/__init__.py" + fi + done + + - name: Copy unit test to remote system + copy: + src: unit/test_network_connections.py + dest: "{{ _rundir.path }}" + + - set_fact: + _lsr_python_path: "{{ + _rundir.path ~ '/' ~ + modules_parent_and_dir.stdout_lines[1] ~ ':' ~ + _rundir.path ~ '/' ~ 'ansible' ~ '/' ~ + module_utils_parent_and_dir.stdout_lines[1] ~ ':' ~ + _rundir.path ~ '/' ~ + module_utils_parent_and_dir.stdout_lines[1] ~ ':' ~ + _rundir.path + }}" + + - command: ls -alrtFR {{ _rundir.path }} + - debug: + msg: path {{ _lsr_python_path }} + + - name: Check if python2 is available + command: python2 --version + ignore_errors: true + register: python2_available + when: true + + - name: Run python2 unit tests + command: > + python2 {{ _rundir.path }}/test_network_connections.py --verbose + environment: + PYTHONPATH: "{{ _lsr_python_path }}" + when: > + python2_available is succeeded and ansible_distribution != 'Fedora' + register: python2_result + + - name: Check if python3 is available + command: python3 --version + ignore_errors: true + register: python3_available + when: true + + - name: Run python3 unit tests + command: > + python3 {{ _rundir.path }}/test_network_connections.py --verbose + environment: + PYTHONPATH: "{{ _lsr_python_path }}" + when: python3_available is succeeded + register: python3_result + + - name: Show python2 unit test results + debug: + var: python2_result.stderr_lines + when: python2_result is succeeded + + - name: Show python3 unit test results + debug: + var: python3_result.stderr_lines + when: python3_result is succeeded + + always: + - name: remove local tar file + file: + state: absent + path: "{{ temptar.path }}" + delegate_to: localhost + + - name: remove tempdir + file: + state: absent + path: "{{ _rundir.path }}" - name: Ensure that at least one python unit test ran fail: msg: Tests did not run with python2 or python3 - when: not (python2_available is succeeded or python3_available is succeeded) + when: not python2_available is succeeded and + not python3_available is succeeded diff --git a/roles/linux-system-roles.network/tests/tests_vlan_mtu_initscripts.yml b/roles/linux-system-roles.network/tests/tests_vlan_mtu_initscripts.yml index a57db4b..dcd5d74 100644 --- a/roles/linux-system-roles.network/tests/tests_vlan_mtu_initscripts.yml +++ b/roles/linux-system-roles.network/tests/tests_vlan_mtu_initscripts.yml @@ -1,13 +1,15 @@ +# SPDX-License-Identifier: BSD-3-Clause +# This file was generated by ensure_provider_tests.py --- -# set network provider and gather facts - hosts: all + name: Run playbook 'playbooks/tests_vlan_mtu.yml' with initscripts as provider tasks: + - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'initscripts' set_fact: network_provider: initscripts + tags: + - always -# workaround for: https://github.com/ansible/ansible/issues/27973 -# There is no way in Ansible to abort a playbook hosts with specific OS -# releases Therefore we include the playbook with the tests only if the hosts -# would support it. - import_playbook: playbooks/tests_vlan_mtu.yml diff --git a/roles/linux-system-roles.network/tests/tests_vlan_mtu_nm.yml b/roles/linux-system-roles.network/tests/tests_vlan_mtu_nm.yml index d830817..c38263c 100644 --- a/roles/linux-system-roles.network/tests/tests_vlan_mtu_nm.yml +++ b/roles/linux-system-roles.network/tests/tests_vlan_mtu_nm.yml @@ -1,15 +1,21 @@ +# SPDX-License-Identifier: BSD-3-Clause +# This file was generated by ensure_provider_tests.py --- # set network provider and gather facts - hosts: all + name: Run playbook 'playbooks/tests_vlan_mtu.yml' with nm as provider tasks: + - include_tasks: tasks/el_repo_setup.yml + - name: Set network provider to 'nm' set_fact: network_provider: nm + tags: + - always -# workaround for: https://github.com/ansible/ansible/issues/27973 -# There is no way in Ansible to abort a playbook hosts with specific OS -# releases Therefore we include the playbook with the tests only if the hosts -# would support it. -# The test requires NetworkManager, therefore it cannot run on RHEL 6 or CentOS 6. + +# The test requires or should run with NetworkManager, therefore it cannot run +# on RHEL/CentOS 6 - import_playbook: playbooks/tests_vlan_mtu.yml - when: ansible_distribution_major_version != '6' + when: + - ansible_distribution_major_version != '6' diff --git a/roles/linux-system-roles.network/tests/unit/helpers/ethtool b/roles/linux-system-roles.network/tests/unit/helpers/ethtool deleted file mode 100755 index 874561f..0000000 --- a/roles/linux-system-roles.network/tests/unit/helpers/ethtool +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/bash - -if [ "${1}" == "-P" ] && [ "${2}" != "" ] -then - echo "Permanent address: 23:00:00:00:00:00" -fi diff --git a/roles/linux-system-roles.network/tests/unit/test_network_connections.py b/roles/linux-system-roles.network/tests/unit/test_network_connections.py old mode 100755 new mode 100644 index 9c2f0ed..b14e7b3 --- a/roles/linux-system-roles.network/tests/unit/test_network_connections.py +++ b/roles/linux-system-roles.network/tests/unit/test_network_connections.py @@ -1,35 +1,26 @@ #!/usr/bin/env python """ Tests for network_connections Ansible module """ # SPDX-License-Identifier: BSD-3-Clause - +import copy import itertools -import os import pprint as pprint_ import socket import sys import unittest -TESTS_BASEDIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(1, os.path.join(TESTS_BASEDIR, "../..", "library")) -sys.path.insert(1, os.path.join(TESTS_BASEDIR, "../..", "module_utils")) - try: from unittest import mock except ImportError: # py2 import mock -sys.modules["ansible"] = mock.Mock() sys.modules["ansible.module_utils.basic"] = mock.Mock() -sys.modules["ansible.module_utils"] = mock.Mock() -sys.modules["ansible.module_utils.network_lsr"] = __import__("network_lsr") # pylint: disable=import-error, wrong-import-position + import network_lsr -import network_connections as n - -from network_connections import SysUtil -from network_connections import Util - +import network_lsr.argument_validator +from network_connections import IfcfgUtil, NMUtil, SysUtil, Util +from network_lsr.argument_validator import ValidationError try: my_test_skipIf = unittest.skipIf @@ -43,7 +34,7 @@ except AttributeError: try: - nmutil = n.NMUtil() + nmutil = NMUtil() assert nmutil except Exception: # NMUtil is not supported, for example on RHEL 6 or without @@ -51,8 +42,8 @@ except Exception: nmutil = None if nmutil: - NM = n.Util.NM() - GObject = n.Util.GObject() + NM = Util.NM() + GObject = Util.GObject() def pprint(msg, obj): @@ -66,63 +57,93 @@ def pprint(msg, obj): ARGS_CONNECTIONS = network_lsr.argument_validator.ArgValidator_ListConnections() VALIDATE_ONE_MODE_INITSCRIPTS = ARGS_CONNECTIONS.VALIDATE_ONE_MODE_INITSCRIPTS +VALIDATE_ONE_MODE_NM = ARGS_CONNECTIONS.VALIDATE_ONE_MODE_NM ETHTOOL_FEATURES_DEFAULTS = { - "esp-hw-offload": None, - "esp-tx-csum-hw-offload": None, - "fcoe-mtu": None, + "esp_hw_offload": None, + "esp_tx_csum_hw_offload": None, + "fcoe_mtu": None, "gro": None, "gso": None, "highdma": None, - "hw-tc-offload": None, - "l2-fwd-offload": None, + "hw_tc_offload": None, + "l2_fwd_offload": None, "loopback": None, "lro": None, "ntuple": None, "rx": None, - "rx-all": None, - "rx-fcs": None, - "rx-gro-hw": None, - "rx-udp_tunnel-port-offload": None, - "rx-vlan-filter": None, - "rx-vlan-stag-filter": None, - "rx-vlan-stag-hw-parse": None, + "rx_all": None, + "rx_fcs": None, + "rx_gro_hw": None, + "rx_udp_tunnel_port_offload": None, + "rx_vlan_filter": None, + "rx_vlan_stag_filter": None, + "rx_vlan_stag_hw_parse": None, "rxhash": None, "rxvlan": None, "sg": None, - "tls-hw-record": None, - "tls-hw-tx-offload": None, + "tls_hw_record": None, + "tls_hw_tx_offload": None, "tso": None, "tx": None, - "tx-checksum-fcoe-crc": None, - "tx-checksum-ip-generic": None, - "tx-checksum-ipv4": None, - "tx-checksum-ipv6": None, - "tx-checksum-sctp": None, - "tx-esp-segmentation": None, - "tx-fcoe-segmentation": None, - "tx-gre-csum-segmentation": None, - "tx-gre-segmentation": None, - "tx-gso-partial": None, - "tx-gso-robust": None, - "tx-ipxip4-segmentation": None, - "tx-ipxip6-segmentation": None, - "tx-nocache-copy": None, - "tx-scatter-gather": None, - "tx-scatter-gather-fraglist": None, - "tx-sctp-segmentation": None, - "tx-tcp-ecn-segmentation": None, - "tx-tcp-mangleid-segmentation": None, - "tx-tcp-segmentation": None, - "tx-tcp6-segmentation": None, - "tx-udp-segmentation": None, - "tx-udp_tnl-csum-segmentation": None, - "tx-udp_tnl-segmentation": None, - "tx-vlan-stag-hw-insert": None, + "tx_checksum_fcoe_crc": None, + "tx_checksum_ip_generic": None, + "tx_checksum_ipv4": None, + "tx_checksum_ipv6": None, + "tx_checksum_sctp": None, + "tx_esp_segmentation": None, + "tx_fcoe_segmentation": None, + "tx_gre_csum_segmentation": None, + "tx_gre_segmentation": None, + "tx_gso_partial": None, + "tx_gso_robust": None, + "tx_ipxip4_segmentation": None, + "tx_ipxip6_segmentation": None, + "tx_nocache_copy": None, + "tx_scatter_gather": None, + "tx_scatter_gather_fraglist": None, + "tx_sctp_segmentation": None, + "tx_tcp_ecn_segmentation": None, + "tx_tcp_mangleid_segmentation": None, + "tx_tcp_segmentation": None, + "tx_tcp6_segmentation": None, + "tx_udp_segmentation": None, + "tx_udp_tnl_csum_segmentation": None, + "tx_udp_tnl_segmentation": None, + "tx_vlan_stag_hw_insert": None, "txvlan": None, } -ETHTOOL_DEFAULTS = {"features": ETHTOOL_FEATURES_DEFAULTS} + +ETHTOOL_COALESCE_DEFAULTS = { + "adaptive_rx": None, + "adaptive_tx": None, + "pkt_rate_high": None, + "pkt_rate_low": None, + "rx_frames": None, + "rx_frames_high": None, + "rx_frames_irq": None, + "rx_frames_low": None, + "rx_usecs": None, + "rx_usecs_high": None, + "rx_usecs_irq": None, + "rx_usecs_low": None, + "sample_interval": None, + "stats_block_usecs": None, + "tx_frames": None, + "tx_frames_high": None, + "tx_frames_irq": None, + "tx_frames_low": None, + "tx_usecs": None, + "tx_usecs_high": None, + "tx_usecs_irq": None, + "tx_usecs_low": None, +} + +ETHTOOL_DEFAULTS = { + "features": ETHTOOL_FEATURES_DEFAULTS, + "coalesce": ETHTOOL_COALESCE_DEFAULTS, +} ETHERNET_DEFAULTS = {"autoneg": None, "duplex": None, "speed": 0} @@ -141,6 +162,7 @@ class TestValidator(unittest.TestCase): "gateway4": None, "route_metric4": None, "auto6": True, + "ipv6_disabled": False, "dhcp4": True, "address": [], "route_append_only": False, @@ -149,19 +171,22 @@ class TestValidator(unittest.TestCase): "route_metric6": None, "dhcp4_send_hostname": None, "dns": [], + "dns_options": [], "dns_search": [], }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "5", "parent": None, - "slave_type": None, + "port_type": None, "zone": None, } def assertValidationError(self, v, value): - self.assertRaises(n.ValidationError, v.validate, value) + self.assertRaises(ValidationError, v.validate, value) def assert_nm_connection_routes_expected(self, connection, route_list_expected): parser = network_lsr.argument_validator.ArgValidatorIPRoute("route[?]") @@ -196,12 +221,13 @@ class TestValidator(unittest.TestCase): for connection in connections: if "type" in connection: connection["nm.exists"] = False - connection["nm.uuid"] = n.Util.create_uuid() - mode = VALIDATE_ONE_MODE_INITSCRIPTS + connection["nm.uuid"] = Util.create_uuid() + + mode = VALIDATE_ONE_MODE_NM for idx, connection in enumerate(connections): try: ARGS_CONNECTIONS.validate_connection_one(mode, connections, idx) - except n.ValidationError: + except ValidationError: continue if "type" in connection: con_new = nmutil.connection_create(connections, idx) @@ -244,17 +270,20 @@ class TestValidator(unittest.TestCase): for idx, connection in enumerate(connections): try: ARGS_CONNECTIONS.validate_connection_one(mode, connections, idx) - except n.ValidationError: + except ValidationError: continue if "type" not in connection: continue - if connection["type"] in ["macvlan"]: + if ( + connection["type"] in ["macvlan", "wireless"] + or connection["ieee802_1x"] + ): # initscripts do not support this type. Skip the test. continue content_current = kwargs.get("initscripts_content_current", None) if content_current: content_current = content_current[idx] - c = n.IfcfgUtil.ifcfg_create( + c = IfcfgUtil.ifcfg_create( connections, idx, content_current=content_current ) # pprint("con[%s] = \"%s\"" % (idx, connections[idx]['name']), c) @@ -280,6 +309,50 @@ class TestValidator(unittest.TestCase): v = network_lsr.argument_validator.ArgValidatorStr("state", required=True) self.assertValidationError(v, None) + v = network_lsr.argument_validator.ArgValidatorStr( + "test_max_length", max_length=13 + ) + self.assertEqual("less_than_13", v.validate("less_than_13")) + self.assertValidationError(v, "longer_than_13") + + v = network_lsr.argument_validator.ArgValidatorStr( + "test_min_length", min_length=13 + ) + self.assertEqual("longer_than_13", v.validate("longer_than_13")) + self.assertValidationError(v, "less_than_13") + + v = network_lsr.argument_validator.ArgValidatorStr( + "test_min_max_length", min_length=10, max_length=15 + ) + self.assertEqual("13_characters", v.validate("13_characters")) + self.assertValidationError(v, "too_short") + self.assertValidationError(v, "string_is_too_long") + + self.assertRaises( + ValueError, + network_lsr.argument_validator.ArgValidatorStr, + "non_int", + min_length="string", + ) + self.assertRaises( + ValueError, + network_lsr.argument_validator.ArgValidatorStr, + "non_int", + max_length="string", + ) + self.assertRaises( + ValueError, + network_lsr.argument_validator.ArgValidatorStr, + "negative_int", + min_length=-5, + ) + self.assertRaises( + ValueError, + network_lsr.argument_validator.ArgValidatorStr, + "negative_int", + max_length=-5, + ) + def test_validate_int(self): v = network_lsr.argument_validator.ArgValidatorNum( @@ -382,6 +455,7 @@ class TestValidator(unittest.TestCase): "gateway4": None, "route_metric4": None, "auto6": True, + "ipv6_disabled": False, "dhcp4": True, "address": [], "route_append_only": False, @@ -390,15 +464,18 @@ class TestValidator(unittest.TestCase): "route_metric6": None, "dhcp4_send_hostname": None, "dns": [], + "dns_options": [], "dns_search": [], }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "5", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": None, "type": "ethernet", "zone": None, @@ -432,23 +509,27 @@ class TestValidator(unittest.TestCase): "gateway4": None, "route_metric4": None, "auto6": True, + "ipv6_disabled": False, "dhcp4": True, "address": [], "route_append_only": False, "rule_append_only": False, "route": [], "dns": [], + "dns_options": [], "dns_search": [], "route_metric6": None, "dhcp4_send_hostname": None, }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "5", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "ethernet", "wait": None, @@ -476,23 +557,27 @@ class TestValidator(unittest.TestCase): "gateway4": None, "route_metric4": None, "auto6": True, + "ipv6_disabled": False, "dhcp4": True, "address": [], "route_append_only": False, "rule_append_only": False, "route": [], "dns": [], + "dns_options": [], "dns_search": [], "route_metric6": None, "dhcp4_send_hostname": None, }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "5", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "ethernet", "wait": None, @@ -556,11 +641,13 @@ class TestValidator(unittest.TestCase): "dhcp4": False, "route_metric6": None, "route_metric4": None, + "dns_options": [], "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, "gateway4": None, "auto6": True, + "ipv6_disabled": False, "dns": [], "address": [ { @@ -574,12 +661,14 @@ class TestValidator(unittest.TestCase): "route": [], }, "mac": "52:54:00:44:9f:ba", - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": 1450, "name": "prod1", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "ethernet", "wait": None, @@ -617,11 +706,13 @@ class TestValidator(unittest.TestCase): "dhcp4": False, "route_metric6": None, "route_metric4": None, + "dns_options": [], "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, "gateway4": None, "auto6": True, + "ipv6_disabled": False, "dns": [{"address": "192.168.174.1", "family": socket.AF_INET}], "address": [ { @@ -635,12 +726,14 @@ class TestValidator(unittest.TestCase): "route": [], }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "prod1", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "ethernet", "wait": None, @@ -658,6 +751,107 @@ class TestValidator(unittest.TestCase): ], ) + def test_ipv6_static(self): + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "prod1", + "ip": { + "gateway6": "2001:db8::1", + "gateway4": None, + "route_metric4": None, + "auto6": False, + "ipv6_disabled": False, + "dhcp4": False, + "address": [ + { + "address": "2001:db8::2", + "family": socket.AF_INET6, + "prefix": 32, + }, + { + "address": "2001:db8::3", + "family": socket.AF_INET6, + "prefix": 32, + }, + { + "address": "2001:db8::4", + "family": socket.AF_INET6, + "prefix": 32, + }, + ], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "dns": [], + "dns_options": [], + "dns_search": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, + "mtu": None, + "name": "prod1", + "parent": None, + "persistent_state": "present", + "port_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "prod1", + "state": "up", + "type": "ethernet", + "ip": { + "dhcp4": "no", + "auto6": "no", + "address": [ + "2001:db8::2/32", + "2001:db8::3/32", + "2001:db8::4/32", + ], + "gateway6": "2001:db8::1", + }, + } + ], + initscripts_dict_expected=[ + { + "ifcfg": { + "BOOTPROTO": "none", + "IPV6INIT": "yes", + "IPV6_AUTOCONF": "no", + "IPV6ADDR": "2001:db8::2/32", + "IPV6ADDR_SECONDARIES": "2001:db8::3/32 2001:db8::4/32", + "IPV6_DEFAULTGW": "2001:db8::1", + "NM_CONTROLLED": "no", + "ONBOOT": "yes", + "TYPE": "Ethernet", + "DEVICE": "prod1", + }, + "keys": None, + "route": None, + "route6": None, + "rule": None, + "rule6": None, + } + ], + ) + def test_routes(self): self.maxDiff = None self.do_connections_validate( @@ -674,6 +868,7 @@ class TestValidator(unittest.TestCase): "ip": { "dhcp4": False, "auto6": True, + "ipv6_disabled": False, "address": [ { "prefix": 24, @@ -691,6 +886,7 @@ class TestValidator(unittest.TestCase): "route": [], "route_metric6": None, "route_metric4": None, + "dns_options": [], "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, @@ -698,12 +894,14 @@ class TestValidator(unittest.TestCase): "dns": [], }, "mac": "52:54:00:44:9f:ba", - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": 1450, "name": "prod1", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "ethernet", "wait": None, @@ -722,11 +920,13 @@ class TestValidator(unittest.TestCase): "dhcp4": False, "route_metric6": None, "route_metric4": None, + "dns_options": [], "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, "gateway4": None, "auto6": False, + "ipv6_disabled": False, "dns": [], "address": [ { @@ -753,12 +953,14 @@ class TestValidator(unittest.TestCase): ], }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "prod.100", "parent": "prod1", "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "vlan", "vlan": {"id": 100}, @@ -828,19 +1030,23 @@ class TestValidator(unittest.TestCase): "route": [], "route_metric6": None, "route_metric4": None, + "dns_options": [], "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, "gateway4": None, + "ipv6_disabled": False, "dns": [], }, "mac": "52:54:00:44:9f:ba", - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": 1450, "name": "prod1", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "ethernet", "wait": None, @@ -859,10 +1065,12 @@ class TestValidator(unittest.TestCase): "dhcp4": False, "route_metric6": None, "route_metric4": None, + "dns_options": [], "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, "gateway4": None, + "ipv6_disabled": False, "auto6": False, "dns": [], "address": [ @@ -890,12 +1098,14 @@ class TestValidator(unittest.TestCase): ], }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "prod.100", "parent": "prod1", "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "vlan", "vlan": {"id": 101}, @@ -960,6 +1170,8 @@ class TestValidator(unittest.TestCase): "route": [], "route_metric6": None, "route_metric4": None, + "dns_options": [], + "ipv6_disabled": False, "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, @@ -967,12 +1179,14 @@ class TestValidator(unittest.TestCase): "dns": [], }, "mac": "33:24:10:24:2f:b9", - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": 1450, "name": "eth0-parent", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "ethernet", "wait": None, @@ -990,6 +1204,8 @@ class TestValidator(unittest.TestCase): "dhcp4": False, "route_metric6": None, "route_metric4": None, + "dns_options": [], + "ipv6_disabled": False, "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, @@ -1017,12 +1233,14 @@ class TestValidator(unittest.TestCase): }, "mac": None, "macvlan": {"mode": "bridge", "promiscuous": True, "tap": False}, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "veth0.0", "parent": "eth0-parent", "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "macvlan", "wait": None, @@ -1040,10 +1258,12 @@ class TestValidator(unittest.TestCase): "dhcp4": False, "route_metric6": None, "route_metric4": None, + "dns_options": [], "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, "gateway4": None, + "ipv6_disabled": False, "auto6": False, "dns": [], "address": [ @@ -1067,12 +1287,14 @@ class TestValidator(unittest.TestCase): }, "mac": None, "macvlan": {"mode": "passthru", "promiscuous": False, "tap": True}, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "veth0.1", "parent": "eth0-parent", "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "macvlan", "wait": None, @@ -1142,9 +1364,11 @@ class TestValidator(unittest.TestCase): "dhcp4": False, "dhcp4_send_hostname": None, "dns": [], + "dns_options": [], "dns_search": [], "gateway4": None, "gateway6": None, + "ipv6_disabled": False, "route": [], "route_append_only": False, "route_metric4": None, @@ -1152,12 +1376,14 @@ class TestValidator(unittest.TestCase): "rule_append_only": False, }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "prod2", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "bridge", "wait": None, @@ -1178,9 +1404,11 @@ class TestValidator(unittest.TestCase): "dhcp4": True, "dhcp4_send_hostname": None, "dns": [], + "dns_options": [], "dns_search": [], "gateway4": None, "gateway6": None, + "ipv6_disabled": False, "route": [], "route_append_only": False, "route_metric4": None, @@ -1188,12 +1416,14 @@ class TestValidator(unittest.TestCase): "rule_append_only": False, }, "mac": None, - "master": "prod2", + "controller": "prod2", + "ieee802_1x": None, + "wireless": None, "mtu": None, - "name": "prod2-slave1", + "name": "prod2-port1", "parent": None, "persistent_state": "present", - "slave_type": "bridge", + "port_type": "bridge", "state": "up", "type": "ethernet", "wait": None, @@ -1209,11 +1439,11 @@ class TestValidator(unittest.TestCase): "ip": {"dhcp4": False, "auto6": False}, }, { - "name": "prod2-slave1", + "name": "prod2-port1", "state": "up", "type": "ethernet", "interface_name": "eth1", - "master": "prod2", + "controller": "prod2", }, ], ) @@ -1236,11 +1466,13 @@ class TestValidator(unittest.TestCase): "dhcp4": True, "route_metric6": None, "route_metric4": None, + "dns_options": [], "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, "gateway4": None, "auto6": True, + "ipv6_disabled": False, "dns": [], "address": [], "route_append_only": False, @@ -1248,12 +1480,14 @@ class TestValidator(unittest.TestCase): "route": [], }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "bond1", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "bond", "wait": None, @@ -1281,11 +1515,13 @@ class TestValidator(unittest.TestCase): "dhcp4": True, "route_metric6": None, "route_metric4": None, + "dns_options": [], "dns_search": [], "dhcp4_send_hostname": None, "gateway6": None, "gateway4": None, "auto6": True, + "ipv6_disabled": False, "dns": [], "address": [], "route_append_only": False, @@ -1293,12 +1529,14 @@ class TestValidator(unittest.TestCase): "route": [], }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "bond1", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "bond", "wait": None, @@ -1338,6 +1576,7 @@ class TestValidator(unittest.TestCase): "rule_append_only": False, "route": [], "auto6": True, + "ipv6_disabled": False, "dhcp4": True, "dhcp4_send_hostname": None, "gateway4": None, @@ -1345,15 +1584,18 @@ class TestValidator(unittest.TestCase): "route_metric4": None, "route_metric6": None, "dns": [], + "dns_options": [], "dns_search": [], }, "mac": "aa:bb:cc:dd:ee:ff", - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "5", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": None, "type": "ethernet", "zone": None, @@ -1380,23 +1622,27 @@ class TestValidator(unittest.TestCase): "gateway4": None, "route_metric4": None, "auto6": True, + "ipv6_disabled": False, "dhcp4": True, "address": [], "route_append_only": False, "rule_append_only": False, "route": [], "dns": [], + "dns_options": [], "dns_search": [], "route_metric6": None, "dhcp4_send_hostname": None, }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "5", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "ethernet", "wait": None, @@ -1445,13 +1691,15 @@ class TestValidator(unittest.TestCase): "ethtool": ETHTOOL_DEFAULTS, "force_state_change": None, "ignore_errors": None, - "interface_name": "6643-master", + "interface_name": "6643-controller", "ip": { "address": [], "auto6": True, + "ipv6_disabled": False, "dhcp4": True, "dhcp4_send_hostname": None, "dns": [], + "dns_options": [], "dns_search": [], "gateway4": None, "gateway6": None, @@ -1462,12 +1710,14 @@ class TestValidator(unittest.TestCase): "rule_append_only": False, }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, - "name": "6643-master", + "name": "6643-controller", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "bridge", "wait": None, @@ -1488,9 +1738,11 @@ class TestValidator(unittest.TestCase): "dhcp4_send_hostname": None, "dhcp4": True, "dns": [], + "dns_options": [], "dns_search": [], "gateway4": None, "gateway6": None, + "ipv6_disabled": False, "route": [], "route_append_only": False, "route_metric4": None, @@ -1498,12 +1750,14 @@ class TestValidator(unittest.TestCase): "rule_append_only": False, }, "mac": None, - "master": "6643-master", + "controller": "6643-controller", + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "6643", "parent": None, "persistent_state": "present", - "slave_type": "bridge", + "port_type": "bridge", "state": "up", "type": "ethernet", "wait": None, @@ -1511,12 +1765,12 @@ class TestValidator(unittest.TestCase): }, ], [ - {"name": "6643-master", "state": "up", "type": "bridge"}, + {"name": "6643-controller", "state": "up", "type": "bridge"}, { "name": "6643", "state": "up", "type": "ethernet", - "master": "6643-master", + "controller": "6643-controller", }, ], ) @@ -1540,9 +1794,11 @@ class TestValidator(unittest.TestCase): "dhcp4": True, "dhcp4_send_hostname": None, "dns": [], + "dns_options": [], "dns_search": [], "gateway4": None, "gateway6": None, + "ipv6_disabled": False, "route": [], "route_append_only": False, "route_metric4": None, @@ -1550,12 +1806,14 @@ class TestValidator(unittest.TestCase): "rule_append_only": False, }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "infiniband.1", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "infiniband", "wait": None, @@ -1609,9 +1867,11 @@ class TestValidator(unittest.TestCase): "dhcp4": True, "dhcp4_send_hostname": None, "dns": [], + "dns_options": [], "dns_search": [], "gateway4": None, "gateway6": None, + "ipv6_disabled": False, "route": [], "route_append_only": False, "route_metric4": None, @@ -1620,12 +1880,14 @@ class TestValidator(unittest.TestCase): }, "mac": "11:22:33:44:55:66:77:88:99:00:" "11:22:33:44:55:66:77:88:99:00", - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "infiniband.2", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "infiniband", "wait": None, @@ -1684,6 +1946,7 @@ class TestValidator(unittest.TestCase): "gateway4": None, "route_metric4": None, "auto6": True, + "ipv6_disabled": False, "dhcp4": True, "address": [], "route_append_only": False, @@ -1705,17 +1968,20 @@ class TestValidator(unittest.TestCase): }, ], "dns": [], + "dns_options": [], "dns_search": ["aa", "bb"], "route_metric6": None, "dhcp4_send_hostname": None, }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "555", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "ethernet", "wait": None, @@ -1775,6 +2041,7 @@ class TestValidator(unittest.TestCase): "gateway4": None, "route_metric4": None, "auto6": True, + "ipv6_disabled": False, "dhcp4": True, "address": [], "route_append_only": True, @@ -1803,17 +2070,20 @@ class TestValidator(unittest.TestCase): }, ], "dns": [], + "dns_options": [], "dns_search": ["aa", "bb"], "route_metric6": None, "dhcp4_send_hostname": None, }, "mac": None, - "master": None, + "controller": None, + "ieee802_1x": None, + "wireless": None, "mtu": None, "name": "e556", "parent": None, "persistent_state": "present", - "slave_type": None, + "port_type": None, "state": "up", "type": "ethernet", "wait": None, @@ -1887,6 +2157,637 @@ class TestValidator(unittest.TestCase): ], ) + def test_802_1x_1(self): + """ + Test private key with password + """ + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "eth0", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "ipv6_disabled": False, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "dns": [], + "dns_options": [], + "dns_search": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "controller": None, + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "private_key_password": "p@55w0rD", + "private_key_password_flags": None, + "client_cert": "/etc/pki/tls/client.pem", + "ca_cert": "/etc/pki/tls/cacert.pem", + "ca_path": None, + "system_ca_certs": False, + "domain_suffix_match": None, + }, + "wireless": None, + "mtu": None, + "name": "eth0", + "parent": None, + "persistent_state": "present", + "port_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "eth0", + "state": "up", + "type": "ethernet", + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "private_key_password": "p@55w0rD", + "client_cert": "/etc/pki/tls/client.pem", + "ca_cert": "/etc/pki/tls/cacert.pem", + }, + } + ], + ) + + def test_802_1x_2(self): + """ + Test 802.1x profile with unencrypted private key, + domain suffix match, and system ca certs + """ + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "eth0", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "ipv6_disabled": False, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "dns": [], + "dns_options": [], + "dns_search": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "controller": None, + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "private_key_password": None, + "private_key_password_flags": ["not-required"], + "client_cert": "/etc/pki/tls/client.pem", + "ca_cert": None, + "ca_path": None, + "system_ca_certs": True, + "domain_suffix_match": "example.com", + }, + "wireless": None, + "mtu": None, + "name": "eth0", + "parent": None, + "persistent_state": "present", + "port_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "eth0", + "state": "up", + "type": "ethernet", + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "client_cert": "/etc/pki/tls/client.pem", + "private_key_password_flags": ["not-required"], + "system_ca_certs": True, + "domain_suffix_match": "example.com", + }, + } + ], + ) + + def test_802_1x_3(self): + """ + Test 802.1x profile with unencrypted private key and ca_path + """ + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethernet": ETHERNET_DEFAULTS, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "eth0", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "ipv6_disabled": False, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "dns": [], + "dns_options": [], + "dns_search": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "controller": None, + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "private_key_password": None, + "private_key_password_flags": ["not-required"], + "client_cert": "/etc/pki/tls/client.pem", + "ca_cert": None, + "ca_path": "/etc/pki/tls/my_ca_certs", + "system_ca_certs": False, + "domain_suffix_match": None, + }, + "wireless": None, + "mtu": None, + "name": "eth0", + "parent": None, + "persistent_state": "present", + "port_type": None, + "state": "up", + "type": "ethernet", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "eth0", + "state": "up", + "type": "ethernet", + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "client_cert": "/etc/pki/tls/client.pem", + "private_key_password_flags": ["not-required"], + "ca_path": "/etc/pki/tls/my_ca_certs", + }, + } + ], + ) + + def test_wireless_psk(self): + """ + Test wireless connection with wpa-psk auth + """ + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "wireless1", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "ipv6_disabled": False, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "dns": [], + "dns_options": [], + "dns_search": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "controller": None, + "ieee802_1x": None, + "wireless": { + "ssid": "test wireless network", + "key_mgmt": "wpa-psk", + "password": "p@55w0rD", + }, + "mtu": None, + "name": "wireless1", + "parent": None, + "persistent_state": "present", + "port_type": None, + "state": "up", + "type": "wireless", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "wireless1", + "state": "up", + "type": "wireless", + "wireless": { + "ssid": "test wireless network", + "key_mgmt": "wpa-psk", + "password": "p@55w0rD", + }, + } + ], + ) + + def test_wireless_eap(self): + """ + Test wireless connection with wpa-eap + """ + self.maxDiff = None + self.do_connections_validate( + [ + { + "actions": ["present", "up"], + "autoconnect": True, + "check_iface_exists": True, + "ethtool": ETHTOOL_DEFAULTS, + "force_state_change": None, + "ignore_errors": None, + "interface_name": "wireless1", + "ip": { + "gateway6": None, + "gateway4": None, + "route_metric4": None, + "auto6": True, + "ipv6_disabled": False, + "dhcp4": True, + "address": [], + "route_append_only": False, + "rule_append_only": False, + "route": [], + "dns": [], + "dns_options": [], + "dns_search": [], + "route_metric6": None, + "dhcp4_send_hostname": None, + }, + "mac": None, + "controller": None, + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "private_key_password": "p@55w0rD", + "private_key_password_flags": None, + "client_cert": "/etc/pki/tls/client.pem", + "ca_cert": "/etc/pki/tls/cacert.pem", + "ca_path": None, + "system_ca_certs": False, + "domain_suffix_match": None, + }, + "wireless": { + "ssid": "test wireless network", + "password": None, + "key_mgmt": "wpa-eap", + }, + "mtu": None, + "name": "wireless1", + "parent": None, + "persistent_state": "present", + "port_type": None, + "state": "up", + "type": "wireless", + "wait": None, + "zone": None, + } + ], + [ + { + "name": "wireless1", + "state": "up", + "type": "wireless", + "wireless": { + "ssid": "test wireless network", + "key_mgmt": "wpa-eap", + }, + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "private_key_password": "p@55w0rD", + "client_cert": "/etc/pki/tls/client.pem", + "ca_cert": "/etc/pki/tls/cacert.pem", + }, + } + ], + ) + + def test_invalid_cert_path(self): + """ + should fail if a relative path is used for 802.1x certs/keys + """ + self.maxDiff = None + self.do_connections_check_invalid( + [ + { + "name": "eth0", + "state": "up", + "type": "ethernet", + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "client.key", + "client_cert": "client.pem", + "private_key_password_flags": ["not-required"], + "system_ca_certs": True, + }, + } + ] + ) + + def test_invalid_password_flag(self): + """ + should fail if an invalid private key password flag is set + """ + self.maxDiff = None + self.do_connections_check_invalid( + [ + { + "name": "eth0", + "state": "up", + "type": "ethernet", + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "client_cert": "/etc/pki/tls/client.pem", + "private_key_password_flags": ["bad-flag"], + "system_ca_certs": True, + }, + } + ] + ) + + def test_802_1x_ca_path_and_system_ca_certs(self): + """ + should fail if ca_path and system_ca_certs are used together + """ + self.maxDiff = None + self.do_connections_check_invalid( + [ + { + "name": "eth0", + "state": "up", + "type": "ethernet", + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "client_cert": "/etc/pki/tls/client.pem", + "private_key_password_flags": ["not-required"], + "ca_path": "/etc/pki/my_ca_certs", + "system_ca_certs": True, + }, + } + ] + ) + + def test_802_1x_initscripts(self): + """ + should fail to create ieee802_1x connection with initscripts + """ + input_connections = [ + { + "name": "eth0", + "state": "up", + "type": "ethernet", + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "client_cert": "/etc/pki/tls/client.pem", + "private_key_password_flags": ["not-required"], + "system_ca_certs": True, + }, + } + ] + + connections = ARGS_CONNECTIONS.validate(input_connections) + + self.assertRaises( + ValidationError, + ARGS_CONNECTIONS.validate_connection_one, + VALIDATE_ONE_MODE_INITSCRIPTS, + connections, + 0, + ) + + def test_802_1x_unsupported_type(self): + """ + should fail if a non ethernet/wireless connection has 802.1x settings defined + """ + self.do_connections_check_invalid( + [ + { + "name": "bond0", + "state": "up", + "type": "bond", + "ieee802_1x": { + "identity": "myhost", + "eap": "tls", + "private_key": "/etc/pki/tls/client.key", + "client_cert": "/etc/pki/tls/client.pem", + "private_key_password_flags": ["not-required"], + "system_ca_certs": True, + }, + } + ] + ) + + def test_wireless_initscripts(self): + """ + should fail to create wireless connection with initscripts + """ + input_connections = [ + { + "name": "wireless1", + "state": "up", + "type": "wireless", + "wireless": { + "ssid": "test wireless network", + "key_mgmt": "wpa-psk", + "password": "p@55w0rD", + }, + } + ] + + connections = ARGS_CONNECTIONS.validate(input_connections) + + self.assertRaises( + ValidationError, + ARGS_CONNECTIONS.validate_connection_one, + VALIDATE_ONE_MODE_INITSCRIPTS, + connections, + 0, + ) + + def test_wireless_unsupported_type(self): + """ + should fail if a non wireless connection has wireless settings defined + """ + self.do_connections_check_invalid( + [ + { + "name": "wireless-bond", + "state": "up", + "type": "bond", + "wireless": { + "ssid": "test wireless network", + "key_mgmt": "wpa-psk", + "password": "p@55w0rD", + }, + } + ] + ) + + def test_wireless_ssid_too_long(self): + """ + should fail if ssid longer than 32 bytes + """ + self.do_connections_check_invalid( + [ + { + "name": "wireless1", + "state": "up", + "type": "wireless", + "wireless": { + "ssid": "test wireless network with ssid too long", + "key_mgmt": "wpa-psk", + "password": "p@55w0rD", + }, + } + ] + ) + + def test_wireless_no_password(self): + """ + should fail if wpa-psk is selected and no password provided + """ + self.do_connections_check_invalid( + [ + { + "name": "wireless1", + "state": "up", + "type": "wireless", + "wireless": { + "ssid": "test wireless network", + "key_mgmt": "wpa-psk", + }, + } + ] + ) + + def test_wireless_password_too_long(self): + """ + should fail if wpa-psk is selected and no password provided + """ + self.do_connections_check_invalid( + [ + { + "name": "wireless1", + "state": "up", + "type": "wireless", + "wireless": { + "ssid": "test wireless network", + "key_mgmt": "wpa-psk", + "password": "This password is too long and should " + "not be able to validate properly", + }, + } + ] + ) + + def test_wireless_no_802_1x_for_wpa_eap(self): + """ + should fail if no 802.1x parameters are defined for a wireless + connection with key_mgmt=wpa-eap + """ + self.do_connections_check_invalid( + [ + { + "name": "wireless1", + "state": "up", + "type": "wireless", + "wireless": { + "ssid": "test wireless network", + "key_mgmt": "wpa-eap", + }, + } + ] + ) + + def test_wireless_no_options_defined(self): + """ + should fail if a connection of type='wireless' does not + have any 'wireless' settings defined + """ + self.do_connections_check_invalid( + [{"name": "wireless1", "state": "up", "type": "wireless"}] + ) + def test_invalid_mac(self): self.maxDiff = None self.do_connections_check_invalid( @@ -1922,7 +2823,7 @@ class TestValidator(unittest.TestCase): {"name": "internal_network", "type": "ethernet", "interface_name": None} ] self.assertRaises( - n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ValidationError, ARGS_CONNECTIONS.validate, network_connections ) def test_interface_name_ethernet_explicit(self): @@ -1934,11 +2835,11 @@ class TestValidator(unittest.TestCase): self.assertEqual(connections[0]["interface_name"], "eth0") def test_interface_name_ethernet_invalid_profile(self): - """ Require explicit interface_name when the profile name is not a - valid interface_name """ + """Require explicit interface_name when the profile name is not a + valid interface_name""" network_connections = [{"name": "internal:main", "type": "ethernet"}] self.assertRaises( - n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ValidationError, ARGS_CONNECTIONS.validate, network_connections ) network_connections = [ {"name": "internal:main", "type": "ethernet", "interface_name": "eth0"} @@ -1951,7 +2852,7 @@ class TestValidator(unittest.TestCase): {"name": "internal", "type": "ethernet", "interface_name": "invalid:name"} ] self.assertRaises( - n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ValidationError, ARGS_CONNECTIONS.validate, network_connections ) def test_interface_name_bond_empty_interface_name(self): @@ -1959,7 +2860,7 @@ class TestValidator(unittest.TestCase): {"name": "internal", "type": "bond", "interface_name": "invalid:name"} ] self.assertRaises( - n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ValidationError, ARGS_CONNECTIONS.validate, network_connections ) def test_interface_name_bond_profile_as_interface_name(self): @@ -1995,19 +2896,19 @@ class TestValidator(unittest.TestCase): def test_invalid_persistent_state_up(self): network_connections = [{"name": "internal", "persistent_state": "up"}] self.assertRaises( - n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ValidationError, ARGS_CONNECTIONS.validate, network_connections ) def test_invalid_persistent_state_down(self): network_connections = [{"name": "internal", "persistent_state": "down"}] self.assertRaises( - n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ValidationError, ARGS_CONNECTIONS.validate, network_connections ) def test_invalid_state_test(self): network_connections = [{"name": "internal", "state": "test"}] self.assertRaises( - n.ValidationError, ARGS_CONNECTIONS.validate, network_connections + ValidationError, ARGS_CONNECTIONS.validate, network_connections ) def test_default_states_type(self): @@ -2073,11 +2974,7 @@ class TestValidator(unittest.TestCase): def test_state_absent_up_no_type(self): self.check_partial_connection_zero( {"name": "eth0", "persistent_state": "absent", "state": "up"}, - { - "actions": ["present", "up", "absent"], - "persistent_state": "absent", - "state": "up", - }, + {"actions": ["absent", "up"], "persistent_state": "absent", "state": "up"}, ) def test_state_absent_up_type(self): @@ -2090,7 +2987,7 @@ class TestValidator(unittest.TestCase): "type": "ethernet", }, { - "actions": ["present", "up", "absent"], + "actions": ["present", "absent", "up"], "persistent_state": "absent", "state": "up", }, @@ -2101,7 +2998,7 @@ class TestValidator(unittest.TestCase): self.check_partial_connection_zero( {"name": "eth0", "persistent_state": "absent", "state": "down"}, { - "actions": ["present", "down", "absent"], + "actions": ["absent", "down"], "persistent_state": "absent", "state": "down", }, @@ -2195,6 +3092,275 @@ class TestValidator(unittest.TestCase): }, ) + def _test_ethtool_changes(self, input_ethtool, expected_ethtool): + """ + When passing a dictionary 'input_features' with each feature and their + value to change, and a dictionary 'expected_features' with the expected + result in the configuration, the expected and resulting connection are + created and validated. + """ + expected_ethtool_full = copy.deepcopy(ETHTOOL_DEFAULTS) + for key in list(expected_ethtool_full): + if key in expected_ethtool: + expected_ethtool_full[key].update(expected_ethtool[key]) + + input_connection = { + "ethtool": input_ethtool, + "name": "5", + "persistent_state": "present", + "type": "ethernet", + } + + expected_connection = { + "actions": ["present"], + "ethtool": expected_ethtool_full, + "interface_name": "5", + "persistent_state": "present", + "state": None, + "type": "ethernet", + } + self.check_one_connection_with_defaults(input_connection, expected_connection) + + def test_set_ethtool_feature(self): + """ + When passing the name of an non-deprecated ethtool feature, their + current version is updated. + """ + input_ethtool = {"features": {"tx_tcp_segmentation": "yes"}} + expected_ethtool = {"features": {"tx_tcp_segmentation": True}} + self._test_ethtool_changes(input_ethtool, expected_ethtool) + + def test_set_deprecated_ethtool_feature(self): + """ + When passing a deprecated name, their current version is updated. + """ + input_ethtool = {"features": {"esp-hw-offload": "yes"}} + expected_ethtool = {"features": {"esp_hw_offload": True}} + self._test_ethtool_changes(input_ethtool, expected_ethtool) + + def test_invalid_ethtool_settings(self): + """ + When both the deprecated and current version of a feature are stated, + a Validation Error is raised. + """ + input_features = {"tx-tcp-segmentation": "yes", "tx_tcp_segmentation": "yes"} + features_validator = ( + network_lsr.argument_validator.ArgValidator_DictEthtoolFeatures() + ) + self.assertValidationError(features_validator, input_features) + + def test_deprecated_ethtool_names(self): + """ + Test that for each validator in + ArgValidator_DictEthtoolFeatures.nested there is another non-deprecated + validator that has the name from the deprecated_by attribute" + """ + validators = ( + network_lsr.argument_validator.ArgValidator_DictEthtoolFeatures().nested + ) + for name, validator in validators.items(): + if isinstance( + validator, network_lsr.argument_validator.ArgValidatorDeprecated + ): + assert validator.deprecated_by in validators.keys() + + def test_valid_persistent_state(self): + """ + Test that when persistent_state is present and state is set to present + or absent, a ValidationError raises. + """ + validator = network_lsr.argument_validator.ArgValidator_DictConnection() + input_connection = { + "name": "test", + "persistent_state": "present", + "state": "present", + "type": "ethernet", + } + self.assertValidationError(validator, input_connection) + input_connection.update({"state": "absent"}) + self.assertValidationError(validator, input_connection) + + def test_dns_options_argvalidator(self): + """ + Test that argvalidator for validating dns_options value is correctly defined. + """ + validator = network_lsr.argument_validator.ArgValidator_DictIP() + + false_testcase_1 = { + "dns_options": ["attempts:01"], + } + false_testcase_2 = { + "dns_options": ["debug$"], + } + false_testcase_3 = { + "dns_options": ["edns00"], + } + false_testcase_4 = { + "dns_options": ["ndots:"], + } + false_testcase_5 = { + "dns_options": ["no-check-name"], + } + false_testcase_6 = { + "dns_options": ["no-rel0ad"], + } + false_testcase_7 = { + "dns_options": ["bugno-tld-query"], + } + false_testcase_8 = { + "dns_options": ["etator"], + } + false_testcase_9 = { + "dns_options": ["singlerequest"], + } + false_testcase_10 = { + "dns_options": ["single-request-reopen:2"], + } + false_testcase_11 = { + "dns_options": ["timeout"], + } + false_testcase_12 = { + "dns_options": ["*trust-ad*"], + } + false_testcase_13 = { + "dns_options": ["use1-vc2-use-vc"], + } + + self.assertValidationError(validator, false_testcase_1) + self.assertValidationError(validator, false_testcase_2) + self.assertValidationError(validator, false_testcase_3) + self.assertValidationError(validator, false_testcase_4) + self.assertValidationError(validator, false_testcase_5) + self.assertValidationError(validator, false_testcase_6) + self.assertValidationError(validator, false_testcase_7) + self.assertValidationError(validator, false_testcase_8) + self.assertValidationError(validator, false_testcase_9) + self.assertValidationError(validator, false_testcase_10) + self.assertValidationError(validator, false_testcase_11) + self.assertValidationError(validator, false_testcase_12) + self.assertValidationError(validator, false_testcase_13) + + true_testcase_1 = { + "dns_options": ["attempts:3"], + } + true_testcase_2 = { + "dns_options": ["debug"], + } + true_testcase_3 = { + "dns_options": ["ndots:3", "single-request-reopen"], + } + true_testcase_4 = { + "dns_options": ["ndots:2", "timeout:3"], + } + true_testcase_5 = { + "dns_options": ["no-check-names"], + } + true_testcase_6 = { + "dns_options": ["no-reload"], + } + true_testcase_7 = { + "dns_options": ["no-tld-query"], + } + true_testcase_8 = { + "dns_options": ["rotate"], + } + true_testcase_9 = { + "dns_options": ["single-request"], + } + true_testcase_10 = { + "dns_options": ["single-request-reopen"], + } + true_testcase_11 = { + "dns_options": ["trust-ad"], + } + true_testcase_12 = { + "dns_options": ["use-vc"], + } + + self.assertEqual( + validator.validate(true_testcase_1)["dns_options"], ["attempts:3"] + ) + self.assertEqual(validator.validate(true_testcase_2)["dns_options"], ["debug"]) + self.assertEqual( + validator.validate(true_testcase_3)["dns_options"], + ["ndots:3", "single-request-reopen"], + ) + self.assertEqual( + validator.validate(true_testcase_4)["dns_options"], ["ndots:2", "timeout:3"] + ) + self.assertEqual( + validator.validate(true_testcase_5)["dns_options"], ["no-check-names"] + ) + self.assertEqual( + validator.validate(true_testcase_6)["dns_options"], ["no-reload"] + ) + self.assertEqual( + validator.validate(true_testcase_7)["dns_options"], ["no-tld-query"] + ) + self.assertEqual(validator.validate(true_testcase_8)["dns_options"], ["rotate"]) + self.assertEqual( + validator.validate(true_testcase_9)["dns_options"], ["single-request"] + ) + self.assertEqual( + validator.validate(true_testcase_10)["dns_options"], + ["single-request-reopen"], + ) + self.assertEqual( + validator.validate(true_testcase_11)["dns_options"], ["trust-ad"] + ) + self.assertEqual( + validator.validate(true_testcase_12)["dns_options"], ["use-vc"] + ) + + def test_set_deprecated_master(self): + """ + When passing the deprecated "master" it is updated to "controller". + """ + input_connections = [ + { + "name": "prod2", + "state": "up", + "type": "bridge", + }, + { + "name": "prod2-port1", + "state": "up", + "type": "ethernet", + "interface_name": "eth1", + "master": "prod2", + }, + ] + connections = ARGS_CONNECTIONS.validate(input_connections) + self.assertTrue(len(connections) == 2) + for connection in connections: + self.assertTrue("controller" in connection) + self.assertTrue("master" not in connection) + + def test_set_deprecated_slave_type(self): + """ + When passing the deprecated "slave_type" it is updated to "port_type". + """ + input_connections = [ + { + "name": "prod2", + "state": "up", + "type": "bridge", + }, + { + "name": "prod2-port1", + "state": "up", + "type": "ethernet", + "interface_name": "eth1", + "controller": "prod2", + "slave_type": "bridge", + }, + ] + connections = ARGS_CONNECTIONS.validate(input_connections) + self.assertTrue(len(connections) == 2) + for connection in connections: + self.assertTrue("port_type" in connection) + self.assertTrue("slave_type" not in connection) + @my_test_skipIf(nmutil is None, "no support for NM (libnm via pygobject)") class TestNM(unittest.TestCase): @@ -2216,20 +3382,43 @@ class TestNM(unittest.TestCase): connections = nmutil.connection_list() self.assertIsNotNone(connections) + def test_path_to_glib_bytes(self): + result = Util.path_to_glib_bytes("/my/test/path") + self.assertIsInstance(result, Util.GLib().Bytes) + self.assertEqual(result.get_data(), b"file:///my/test/path\x00") + class TestUtils(unittest.TestCase): - def test_check_output(self): - res = Util.check_output(["echo", "test"]) - self.assertEqual(res, "test\n") - self.assertRaises(n.MyError, Util.check_output, ["false"]) + def test_mac_ntoa(self): + mac_bytes = b"\xaa\xbb\xcc\xdd\xee\xff" + self.assertEqual(Util.mac_ntoa(mac_bytes), "aa:bb:cc:dd:ee:ff") + + def test_convert_passwd_flags_nm(self): + test_cases = [ + ([], 0), + (["none"], 0), + (["agent-owned"], 1), + (["not-saved"], 2), + (["agent-owned", "not-saved"], 3), + ( + ["not-required"], + 4, + ), + (["agent-owned", "not-required"], 5), + (["not-saved", "not-required"], 6), + (["agent-owned", "not-saved", "not-required"], 7), + ] + + for test_case in test_cases: + result = Util.convert_passwd_flags_nm(test_case[0]) + self.assertEqual(result, test_case[1]) class TestSysUtils(unittest.TestCase): def test_link_read_permaddress(self): - # Manipulate PATH to use ethtool mock script to avoid hard dependency on - # ethtool - os.environ["PATH"] = TESTS_BASEDIR + "/helpers:" + os.environ["PATH"] - self.assertEqual(SysUtil._link_read_permaddress("lo"), "23:00:00:00:00:00") + self.assertEqual(SysUtil._link_read_permaddress("lo"), "00:00:00:00:00:00") + self.assertEqual(SysUtil._link_read_permaddress("fakeiface"), None) + self.assertEqual(SysUtil._link_read_permaddress("morethansixteenchars"), None) if __name__ == "__main__": diff --git a/roles/linux-system-roles.network/tests/unit/test_nm_provider.py b/roles/linux-system-roles.network/tests/unit/test_nm_provider.py index 0a2679a..ed8563f 100644 --- a/roles/linux-system-roles.network/tests/unit/test_nm_provider.py +++ b/roles/linux-system-roles.network/tests/unit/test_nm_provider.py @@ -27,5 +27,12 @@ with mock.patch.dict("sys.modules", {"gi": mock.Mock(), "gi.repository": mock.Mo def test_get_nm_ethtool_feature(): """ Test get_nm_ethtool_feature() """ with mock.patch.object(nm_provider.Util, "NM") as nm_mock: - nm_feature = nm_provider.get_nm_ethtool_feature("esp-hw-offload") + nm_feature = nm_provider.get_nm_ethtool_feature("esp_hw_offload") assert nm_feature == nm_mock.return_value.ETHTOOL_OPTNAME_FEATURE_ESP_HW_OFFLOAD + + +def test_get_nm_ethtool_coalesce(): + """ Test get_nm_ethtool_coalesce() """ + with mock.patch.object(nm_provider.Util, "NM") as nm_mock: + nm_feature = nm_provider.get_nm_ethtool_coalesce("rx_frames") + assert nm_feature == nm_mock.return_value.ETHTOOL_OPTNAME_COALESCE_RX_FRAMES diff --git a/roles/linux-system-roles.network/tox.ini b/roles/linux-system-roles.network/tox.ini index 604c295..6ff26e7 100644 --- a/roles/linux-system-roles.network/tox.ini +++ b/roles/linux-system-roles.network/tox.ini @@ -1,181 +1,22 @@ -[tox] -envlist = black, flake8, pylint, py{26,27,36,37}, ensure_non_running_provider -skipsdist = true -skip_missing_interpreters = True +# SPDX-License-Identifier: MIT +[lsr_config] +lsr_enable = true + +[lsr_yamllint] +configfile = .yamllint.yml +configbasename = .yamllint.yml + +[lsr_ansible-lint] +configfile = .ansible-lint [testenv] -basepython = python3 -deps = - py{26,27,36,37,38}: pytest-cov - py{27,36,37,38}: pytest>=3.5.1 - py{26,27}: mock - py26: pytest - molecule_{lint,syntax,test}: docker - molecule_{lint,syntax,test}: jmespath - molecule_{lint,syntax,test}: molecule - # The selinux pypi shim does not work with Ubuntu (as used by Travis), yet. - # Therefore use a fork with Ubuntu support. This can be changed once the - # update is available on PyPi. - # molecule_{lint,syntax,test}: selinux - molecule_{lint,syntax,test}: git+https://github.com/tyll/selinux-pypi-shim@fulllocation - -[base] -passenv = * setenv = - PYTHONPATH = {toxinidir}/library:{toxinidir}/module_utils - LC_ALL = C -changedir = {toxinidir}/tests -covtarget = {toxinidir}/library --cov {toxinidir}/module_utils -pytesttarget = . + RUN_PYLINT_EXCLUDE = ^(\..*|ensure_provider_tests\.py|print_all_options\.py)$ + RUN_PYTEST_SETUP_MODULE_UTILS = true + RUN_PYLINT_SETUP_MODULE_UTILS = true + RUN_PYTEST_EXTRA_ARGS = -v + RUN_FLAKE8_EXTRA_ARGS = --exclude tests/ensure_provider_tests.py,scripts/print_all_options.py,tests/network/ensure_provider_tests.py,.svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg + LSR_PUBLISH_COVERAGE = normal -[testenv:black] -deps = black - -commands = black --check --diff --include "^[^.].*\.py$" . - -[testenv:py26] -install_command = pip install {opts} {packages} -list_dependencies_command = pip freeze -basepython = python2.6 -passenv = {[base]passenv} -setenv = - {[base]setenv} -changedir = {[base]changedir} -commands = - pytest \ - --durations=5 \ - --cov={[base]covtarget} \ - --cov-report=html:htmlcov-py26 --cov-report=term \ - {posargs} \ - {[base]pytesttarget} - -[testenv:py27] -basepython = python2.7 -passenv = {[base]passenv} -setenv = - {[base]setenv} -changedir = {[base]changedir} -commands = - pytest \ - --durations=5 \ - --cov={[base]covtarget} \ - --cov-report=html:htmlcov-py27 --cov-report=term \ - {posargs} \ - {[base]pytesttarget} - -[testenv:py36] -basepython = python3.6 -passenv = {[base]passenv} -setenv = - {[base]setenv} -changedir = {[base]changedir} -commands = - pytest \ - --durations=5 \ - --cov={[base]covtarget} \ - --cov-report=html:htmlcov-py36 --cov-report=term \ - {posargs} \ - {[base]pytesttarget} - -[testenv:py37] -basepython = python3.7 -passenv = {[base]passenv} -setenv = - {[base]setenv} -changedir = {[base]changedir} -commands = - pytest \ - --durations=5 \ - --cov={[base]covtarget} \ - --cov-report=html:htmlcov-py37 --cov-report=term \ - {posargs} \ - {[base]pytesttarget} - -[testenv:py38] -passenv = {[base]passenv} -setenv = - {[base]setenv} -changedir = {[base]changedir} -basepython = python3.8 -commands = - pytest \ - --durations=5 \ - --cov={[base]covtarget} \ - --cov-report=html:htmlcov-py38 --cov-report=term \ - {posargs} \ - {[base]pytesttarget} - -[testenv:pylint] -basepython = python2.7 -setenv = - {[base]setenv} -deps = - pylint>=1.8.4 - ansible -commands = - pylint \ - --errors-only \ - {posargs} \ - library/network_connections.py \ - module_utils/network_lsr \ - tests/unit/test_network_connections.py - -[testenv:flake8] -basepython = python2.7 -deps = - flake8>=3.5 -whitelist_externals = flake8 -commands= - flake8 --statistics {posargs} \ - . - -[testenv:coveralls] -basepython = python2.7 -passenv = TRAVIS TRAVIS_* -deps = - coveralls -changedir = {[base]changedir} -commands = - coveralls - -[testenv:ensure_non_running_provider] -deps = - PyYAML -changedir = {toxinidir}/tests -commands = {toxinidir}/tests/ensure_non_running_provider.py - -[testenv:molecule_lint] -commands_pre = - molecule --version - ansible --version -commands = molecule {posargs} lint - -[testenv:molecule_syntax] -commands = molecule {posargs} syntax - -[testenv:molecule_test] -commands = molecule {posargs} test - -[pytest] -addopts = -rxs - -[flake8] -show_source = True -max-line-length = 88 -ignore = E402,W503 - -[pylint] -max-line-length = 88 -disable = wrong-import-position - -[pycodestyle] -max-line-length = 88 - -[travis] -python = - 2.6: py26 - 2.7: py27,coveralls,flake8,pylint - 3.5: molecule_lint,molecule_syntax,molecule_test - 3.6: py36,black,ensure_non_running_provider - 3.7: py37 - 3.8: py38 +[testenv:shellcheck] +commands = bash -c 'echo shellcheck is currently not enabled - please fix this' diff --git a/roles/oatakan.windows_ovirt_guest_agent/meta/.galaxy_install_info b/roles/oatakan.windows_ovirt_guest_agent/meta/.galaxy_install_info index 872777a..8c763d2 100644 --- a/roles/oatakan.windows_ovirt_guest_agent/meta/.galaxy_install_info +++ b/roles/oatakan.windows_ovirt_guest_agent/meta/.galaxy_install_info @@ -1,2 +1,2 @@ -install_date: Wed Jun 24 18:44:35 2020 +install_date: Tue Apr 20 16:13:52 2021 version: master diff --git a/roles/oatakan.windows_ovirt_guest_agent/tasks/Windows.yml b/roles/oatakan.windows_ovirt_guest_agent/tasks/Windows.yml index 1fdc179..3ce671d 100644 --- a/roles/oatakan.windows_ovirt_guest_agent/tasks/Windows.yml +++ b/roles/oatakan.windows_ovirt_guest_agent/tasks/Windows.yml @@ -21,14 +21,21 @@ # Arguments: > # /S -- name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent" - win_shell: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe /S' - args: - executable: cmd - creates: "{{ ansible_env['ProgramFiles(x86)'] }}\\oVirt Guest Tools" - async: 1 - poll: 0 - ignore_errors: yes +- block: + - name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent" + win_shell: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe /S' + args: + executable: cmd + creates: "{{ ansible_env['ProgramFiles(x86)'] }}\\oVirt Guest Tools" + async: 1000 + poll: 0 + + rescue: + - name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent" + win_shell: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe /S' + args: + executable: cmd + creates: "{{ ansible_env['ProgramFiles(x86)'] }}\\oVirt Guest Tools" - name: "{{ ansible_distribution | lower }} | wait for system to be online" wait_for_connection: diff --git a/roles/oatakan.windows_ovirt_template/README.md b/roles/oatakan.windows_ovirt_template/README.md index f6b2110..be15ba2 100644 --- a/roles/oatakan.windows_ovirt_template/README.md +++ b/roles/oatakan.windows_ovirt_template/README.md @@ -25,6 +25,8 @@ A description of the settable variables for this role should go here, including Dependencies ------------ +Import ovirt.ovirt collections. + A list of roles that this role utilizes, make sure to call this out in requirements.yml file under roles directory or download manually: - oatakan.windows_template_build @@ -34,6 +36,7 @@ Example Playbook Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + # import ovirt.ovirt collections - name: create a ovirt windows template hosts: all gather_facts: False diff --git a/roles/oatakan.windows_ovirt_template/defaults/main.yml b/roles/oatakan.windows_ovirt_template/defaults/main.yml index fa97544..9881548 100644 --- a/roles/oatakan.windows_ovirt_template/defaults/main.yml +++ b/roles/oatakan.windows_ovirt_template/defaults/main.yml @@ -1,5 +1,6 @@ --- +install_updates: yes instance_wait_retry_limit: 300 instance_wait_connection_timeout: 400 @@ -14,9 +15,14 @@ enable_auto_logon: yes remove_vm_on_error: yes vm_failed: no +custom_efi_enabled: no +custom_efi_path: /usr/share/edk2.git/ovmf-x64/OVMF_CODE-pure-efi.fd + virtio_iso_url: https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.173-2/virtio-win.iso winrm_enable_script_url: https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1 +set_network_to_private: '([Activator]::CreateInstance([Type]::GetTypeFromCLSID([Guid]"{DCB00C01-570F-4A9B-8D69-199FDBA5723B}"))).GetNetworkConnections() | % {$_.GetNetwork().SetCategory(1)}' + windows_build_role: oatakan.windows_template_build local_administrator_password: Chang3MyP@ssw0rd21 diff --git a/roles/oatakan.windows_ovirt_template/meta/.galaxy_install_info b/roles/oatakan.windows_ovirt_template/meta/.galaxy_install_info index e85bc2e..5f0de89 100644 --- a/roles/oatakan.windows_ovirt_template/meta/.galaxy_install_info +++ b/roles/oatakan.windows_ovirt_template/meta/.galaxy_install_info @@ -1,2 +1,2 @@ -install_date: Wed Jun 24 18:44:33 2020 +install_date: Tue Apr 20 16:13:50 2021 version: master diff --git a/roles/oatakan.windows_ovirt_template/tasks/convert_to_template.yml b/roles/oatakan.windows_ovirt_template/tasks/convert_to_template.yml index 3881ce5..b184c16 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/convert_to_template.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/convert_to_template.yml @@ -1,6 +1,6 @@ --- - name: convert to template - ovirt_template: + ovirt.ovirt.ovirt_template: auth: "{{ ovirt_auth }}" name: "{{ template.name }}" vm: "{{ template.name }}" diff --git a/roles/oatakan.windows_ovirt_template/tasks/datastore_iso_remove.yml b/roles/oatakan.windows_ovirt_template/tasks/datastore_iso_remove.yml index d22c708..f42f425 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/datastore_iso_remove.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/datastore_iso_remove.yml @@ -2,7 +2,7 @@ - block: - name: remove iso file from data_domain - ovirt_disk: + ovirt.ovirt.ovirt_disk: auth: "{{ ovirt_auth }}" name: "{{ iso_file }}" storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}" @@ -15,7 +15,7 @@ when: ansible_version.full is version('2.9', '>=') - name: remove iso file from data_domain - ovirt_disk: + ovirt.ovirt.ovirt_disk: auth: "{{ ovirt_auth }}" name: "{{ iso_file }}" storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}" diff --git a/roles/oatakan.windows_ovirt_template/tasks/datastore_upload.yml b/roles/oatakan.windows_ovirt_template/tasks/datastore_upload.yml index b69b2ab..556e8b9 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/datastore_upload.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/datastore_upload.yml @@ -1,18 +1,25 @@ --- +- name: validate file + stat: + path: "{{ playbook_dir }}/{{ temp_directory }}/windows_{{ windows_distro_name }}_autounattend_autogen.iso" + get_checksum: no + register: iso_file_check + - name: upload iso file to data_domain - ovirt_disk: + ovirt.ovirt.ovirt_disk: auth: "{{ ovirt_auth }}" name: "{{ iso_file }}" - upload_image_path: "{{ playbook_dir }}/{{ temp_directory }}/windows_{{ windows_distro_name }}_autounattend_autogen.iso" + upload_image_path: "{{ iso_file_check.stat.path }}" storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}" - size: 20MiB + size: "{{ (iso_file_check.stat.size/1024/1024)|round(0, 'ceil')|int|string }}MiB" wait: true bootable: true format: raw content_type: iso force: yes register: disk_iso_file + when: iso_file_check.stat.exists - name: set iso file disk id set_fact: diff --git a/roles/oatakan.windows_ovirt_template/tasks/export_ovf.yml b/roles/oatakan.windows_ovirt_template/tasks/export_ovf.yml index 3f3f5b2..8a38d6f 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/export_ovf.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/export_ovf.yml @@ -1,7 +1,7 @@ --- - name: export template to export domain - ovirt_template: + ovirt.ovirt.ovirt_template: auth: "{{ ovirt_auth }}" state: exported name: "{{ template.name }}" diff --git a/roles/oatakan.windows_ovirt_template/tasks/main.yml b/roles/oatakan.windows_ovirt_template/tasks/main.yml index 6d54d0b..018ea2d 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/main.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: obtain SSO token with using username/password credentials - ovirt_auth: + ovirt.ovirt.ovirt_auth: url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}" username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}" password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}" @@ -62,7 +62,7 @@ delegate_to: template_host - name: refresh SSO credentials - ovirt_auth: + ovirt.ovirt.ovirt_auth: url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}" username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}" password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}" @@ -77,7 +77,7 @@ rescue: - name: refresh SSO credentials - ovirt_auth: + ovirt.ovirt.ovirt_auth: url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}" username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}" password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}" @@ -92,7 +92,7 @@ always: - name: refresh SSO credentials - ovirt_auth: + ovirt.ovirt.ovirt_auth: url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url) }}" username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username) }}" password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password) }}" @@ -109,7 +109,7 @@ state: absent - name: logout from oVirt - ovirt_auth: + ovirt.ovirt.ovirt_auth: state: absent ovirt_auth: "{{ ovirt_auth }}" diff --git a/roles/oatakan.windows_ovirt_template/tasks/make_iso.yml b/roles/oatakan.windows_ovirt_template/tasks/make_iso.yml index f486e9a..53bfc05 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/make_iso.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/make_iso.yml @@ -10,6 +10,15 @@ src: "{{ windows_sysprep_template_folder }}/Autounattend.xml.j2" dest: "{{ temp_directory }}/ks_iso/Autounattend.xml" + - name: download ConfigureRemotingForAnsible.ps1 script + get_url: + url: "{{ winrm_enable_script_url }}" + dest: "{{ temp_directory }}/ks_iso/ConfigureRemotingForAnsible.ps1" + register: download_script + until: download_script is success + delay: 3 + retries: 5 + - name: include virtio drivers include_tasks: virtio_drivers.yml diff --git a/roles/oatakan.windows_ovirt_template/tasks/preflight_check.yml b/roles/oatakan.windows_ovirt_template/tasks/preflight_check.yml index d9a12be..7f423ac 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/preflight_check.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/preflight_check.yml @@ -1,13 +1,13 @@ --- - name: get the datacenter name - ovirt_datacenter_info: + ovirt.ovirt.ovirt_datacenter_info: auth: "{{ ovirt_auth }}" pattern: "Clusters.name = {{ providers.ovirt.cluster }}" register: datacenter_info - name: get storage information - ovirt_storage_domain_info: + ovirt.ovirt.ovirt_storage_domain_info: auth: "{{ ovirt_auth }}" pattern: "datacenter={{ datacenter_info.ovirt_datacenters[0].name }}" register: storage_info @@ -29,7 +29,7 @@ the_query: "[?type=='iso']" - name: check if template already exists - ovirt_template_info: + ovirt.ovirt.ovirt_template_info: auth: "{{ ovirt_auth }}" pattern: "name={{ template.name }} and datacenter={{ datacenter_info.ovirt_datacenters[0].name }}" register: template_info @@ -48,7 +48,7 @@ - template_info.ovirt_templates | length > 0 - name: check iso file on data domain - ovirt_disk_info: + ovirt.ovirt.ovirt_disk_info: auth: "{{ ovirt_auth }}" pattern: "name={{ iso_file_name }}" register: ovirt_disk_main_iso diff --git a/roles/oatakan.windows_ovirt_template/tasks/preflight_check_pre29.yml b/roles/oatakan.windows_ovirt_template/tasks/preflight_check_pre29.yml index f7a3c17..e4cc192 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/preflight_check_pre29.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/preflight_check_pre29.yml @@ -1,12 +1,12 @@ --- - name: get the datacenter name (<2.9) - ovirt_datacenter_facts: + ovirt.ovirt.ovirt_datacenter_facts: auth: "{{ ovirt_auth }}" pattern: "Clusters.name = {{ providers.ovirt.cluster }}" - name: get storage information (<2.9) - ovirt_storage_domain_facts: + ovirt.ovirt.ovirt_storage_domain_facts: auth: "{{ ovirt_auth }}" pattern: "datacenter={{ ovirt_datacenters[0].name }}" when: @@ -27,7 +27,7 @@ the_query: "[?type=='iso']" - name: check if template already exists (<2.9) - ovirt_template_facts: + ovirt.ovirt.ovirt_template_facts: auth: "{{ ovirt_auth }}" pattern: "name={{ template.name }} and datacenter={{ ovirt_datacenters[0].name }}" @@ -45,7 +45,7 @@ - ovirt_templates | length > 0 - name: check iso file on data domain - ovirt_disk_facts: + ovirt.ovirt.ovirt_disk_facts: auth: "{{ ovirt_auth }}" pattern: "name={{ iso_file_name }}" when: iso_file_name is defined diff --git a/roles/oatakan.windows_ovirt_template/tasks/provision_vm.yml b/roles/oatakan.windows_ovirt_template/tasks/provision_vm.yml index 4923002..2c161d6 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/provision_vm.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/provision_vm.yml @@ -1,7 +1,7 @@ --- - name: provision a new vm - ovirt_vm: + ovirt.ovirt.ovirt_vm: auth: "{{ ovirt_auth }}" name: "{{ template.name }}" cluster: "{{ providers.ovirt.cluster|default('Default') }}" @@ -9,6 +9,7 @@ wait: yes memory: "{{ template.memory }}MiB" cpu_sockets: "{{ template.cpu }}" + bios_type: "{{ template.bios_type | default(omit) }}" boot_devices: - hd - cdrom @@ -34,7 +35,7 @@ delay: 10 - name: create a disk - ovirt_disk: + ovirt.ovirt.ovirt_disk: auth: "{{ ovirt_auth }}" name: "{% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}" vm_name: "{{ template.name }}" @@ -82,7 +83,7 @@ - disks_creation.results is defined - name: assign tags to provisioned vms - ovirt_tag: + ovirt.ovirt.ovirt_tag: name: "{{ item }}_{{ instance.item.item[item] }}" vms: ["{{ instance.item.item.name }}"] state: attached @@ -96,7 +97,7 @@ - instance.item.item[item] is defined - name: start vm - ovirt_vm: + ovirt.ovirt.ovirt_vm: auth: "{{ ovirt_auth }}" name: "{{ template.name }}" cluster: "{{ providers.ovirt.cluster|default('Default') }}" diff --git a/roles/oatakan.windows_ovirt_template/tasks/remove_template.yml b/roles/oatakan.windows_ovirt_template/tasks/remove_template.yml index 36e4eae..bce3ff0 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/remove_template.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/remove_template.yml @@ -1,7 +1,7 @@ --- - name: remove template - ovirt_template: + ovirt.ovirt.ovirt_template: auth: "{{ ovirt_auth }}" cluster: "{{ providers.ovirt.cluster }}" name: "{{ template.name }}" diff --git a/roles/oatakan.windows_ovirt_template/tasks/remove_vm.yml b/roles/oatakan.windows_ovirt_template/tasks/remove_vm.yml index d13838d..e61487d 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/remove_vm.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/remove_vm.yml @@ -1,7 +1,7 @@ --- - name: remove vm - ovirt_vm: + ovirt.ovirt.ovirt_vm: auth: "{{ ovirt_auth }}" cluster: "{{ providers.ovirt.cluster }}" name: "{{ template.name }}" diff --git a/roles/oatakan.windows_ovirt_template/tasks/stop_vm.yml b/roles/oatakan.windows_ovirt_template/tasks/stop_vm.yml index eaa5835..057ba45 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/stop_vm.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/stop_vm.yml @@ -22,14 +22,14 @@ msg: "ignoring error..." - name: reconfigure vm - ovirt_vm: + ovirt.ovirt.ovirt_vm: auth: "{{ ovirt_auth }}" cluster: "{{ providers.ovirt.cluster }}" name: "{{ template.name }}" boot_devices: - hd cd_iso: "" - custom_properties: "{{ custom_properties_efi if template_vm_efi|bool else '' }}" + custom_properties: "{{ custom_properties_efi if (template_vm_efi|bool and custom_efi_enabled|bool) else ([{}]) }}" force: yes state: present when: template is defined \ No newline at end of file diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock.yml index 96bc7de..aa231d6 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock.yml @@ -1,7 +1,7 @@ --- - name: wait until the image is unlocked by the oVirt engine - ovirt_disk_info: + ovirt.ovirt.ovirt_disk_info: auth: "{{ ovirt_auth }}" pattern: "name={% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}" register: ovirt_disk_info diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock.yml index c85807d..7f0795e 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock.yml @@ -1,7 +1,7 @@ --- - name: wait until the disk is unlocked by the oVirt engine - ovirt_disk_info: + ovirt.ovirt.ovirt_disk_info: auth: "{{ ovirt_auth }}" pattern: "name={{ iso_file }}" register: ovirt_disk_info diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff.yml index eaba261..918ef86 100644 --- a/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff.yml +++ b/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff.yml @@ -1,7 +1,7 @@ --- - name: wait for vm status to be poweredoff - ovirt_vm_info: + ovirt.ovirt.ovirt_vm_info: auth: "{{ ovirt_auth }}" pattern: name={{ template.name }} and cluster={{ providers.ovirt.cluster }} register: ovirt_vm_info_result diff --git a/roles/oatakan.windows_ovirt_template/templates/windows_server/Autounattend.xml.j2 b/roles/oatakan.windows_ovirt_template/templates/windows_server/Autounattend.xml.j2 index 5d19485..83de4f3 100644 --- a/roles/oatakan.windows_ovirt_template/templates/windows_server/Autounattend.xml.j2 +++ b/roles/oatakan.windows_ovirt_template/templates/windows_server/Autounattend.xml.j2 @@ -61,6 +61,7 @@ NTFS 1 + de94bba4-06d1-4d40-a16a-bfd50179d6ac 2 @@ -131,9 +132,9 @@ Your Org. {% if unattend.product_key is defined and unattend.product_key|length %} - {{ unattend.product_key }} + {{ unattend.product_key | trim }} {% endif %} - OnError + Never @@ -213,18 +214,25 @@ {% endif %} - cmd.exe /c powershell -Command "Set-NetConnectionProfile -NetworkCategory Private" - Set network connection profile to private + cmd.exe /c powershell -Command "Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Force" + Set Execution Policy 64 Bit 1 true +{% if not '2008' in windows_distro_name %} - cmd.exe /c powershell -Command "Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Force" - Set Execution Policy 64 Bit + cmd.exe /c powershell -Command "Set-NetConnectionProfile -NetworkCategory Private" + Set network connection profile to private + 2 + true + +{% else %} + + cmd.exe /c powershell –EncodedCommand {{ set_network_to_private | b64encode(encoding='utf-16-le') }} + Set network connection profile to private 2 true -{% if '2008' in windows_distro_name %} cmd.exe /c winrm quickconfig -q winrm quickconfig -q @@ -298,8 +306,16 @@ true {% endif %} +{% if '2016' in windows_distro_name %} - cmd.exe /c powershell -Command "& $([scriptblock]::Create((New-Object Net.WebClient).DownloadString('{{ winrm_enable_script_url }}'))) -ForceNewSSLCert -EnableCredSSP" + cmd.exe /c reg add HKLM\SOFTWARE\Microsoft\.NETFramework\v4.0.30319 /v SchUseStrongCrypto /t REG_DWORD /d 1 /reg:64 /f + Configure security protocol + 19 + true + +{% endif %} + + cmd.exe /c powershell -ExecutionPolicy ByPass -File E:\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP Enable winrm 20 true @@ -395,6 +411,9 @@ {{ settings.skip_auto_activation | default('true') }} + + * + diff --git a/roles/oatakan.windows_ovirt_template/vars/main.yml b/roles/oatakan.windows_ovirt_template/vars/main.yml index e50e6a2..542591a 100644 --- a/roles/oatakan.windows_ovirt_template/vars/main.yml +++ b/roles/oatakan.windows_ovirt_template/vars/main.yml @@ -42,6 +42,7 @@ template: storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}" memory: "{{ template_vm_memory }}" cpu: "{{ template_vm_cpu }}" + bios_type: "{{ ('q35_ovmf') if (template_vm_efi|bool and not custom_efi_enabled|bool) else (omit) }}" networks: - name: "{{ template_vm_network_name }}" ip: "{{ template_vm_ip_address }}" @@ -60,12 +61,12 @@ qemu_cmdline_second_iso: qemu_cmdline_efi: - -drive - - if=pflash,format=raw,readonly,file=/usr/share/edk2.git/ovmf-x64/OVMF_CODE-pure-efi.fd + - if=pflash,format=raw,readonly,file={{ custom_efi_path }} custom_properties: - name: qemu_cmdline - value: "{{ ((qemu_cmdline_second_iso + qemu_cmdline_efi) | to_json) if template_vm_efi|bool else (qemu_cmdline_second_iso | to_json) }}" + value: "{{ ((qemu_cmdline_second_iso + qemu_cmdline_efi) | to_json) if (template_vm_efi|bool and custom_efi_enabled|bool) else (qemu_cmdline_second_iso | to_json) }}" custom_properties_efi: - name: qemu_cmdline - value: "{{ qemu_cmdline_efi | to_json }}" \ No newline at end of file + value: "{{ (qemu_cmdline_efi | to_json) if (template_vm_efi|bool and custom_efi_enabled|bool) else ('[]') }}" \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/README.md b/roles/oatakan.windows_template_build/README.md index e4c3094..bee9ca8 100644 --- a/roles/oatakan.windows_template_build/README.md +++ b/roles/oatakan.windows_template_build/README.md @@ -21,6 +21,7 @@ A list of roles that this role utilizes: - oatakan.windows_ec2_ena_driver - oatakan.windows_ovirt_guest_agent +- oatakan.windows_update - oatakan.windows_virtio - oatakan.windows_vmware_tools - oatakan.windows_virtualbox_guest_additions diff --git a/roles/oatakan.windows_template_build/defaults/main.yml b/roles/oatakan.windows_template_build/defaults/main.yml index 17c9136..33b9e42 100644 --- a/roles/oatakan.windows_template_build/defaults/main.yml +++ b/roles/oatakan.windows_template_build/defaults/main.yml @@ -4,24 +4,44 @@ install_updates: yes remove_apps: no clean_up_components: yes upgrade_powershell: no -powershell_target_version: 4.0 +powershell_target_version: 3.0 temp_directory: "{{ ansible_env.TEMP }}" update_retry_limit: 10 +upgrade_wait_timeout: 600 -powershell_script_url: https://raw.githubusercontent.com/jborean93/ansible-windows/master/scripts/Upgrade-PowerShell.ps1 +powershell_script_url: https://raw.githubusercontent.com/oatakan/ansible-role-windows-template-build/master/files/Upgrade-PowerShell.ps1 powershell_upgrade_script_file: 'C:\Upgrade-PowerShell.ps1' +ps_memfix_script_url: https://raw.githubusercontent.com/oatakan/ansible-role-windows-template-build/master/files/Install-WMF3Hotfix.ps1 +ps_memfix_script_file: 'C:\Install-WMF3Hotfix.ps1' + +set_network_to_private: '([Activator]::CreateInstance([Type]::GetTypeFromCLSID([Guid]"{DCB00C01-570F-4A9B-8D69-199FDBA5723B}"))).GetNetworkConnections() | % {$_.GetNetwork().SetCategory(1)}' + enable_tlsv12_hotfix_download_location: "{{ ansible_env.TEMP }}" enable_tlsv12_hotfix: kb: KB3080079 file: Windows6.1-KB3080079-x64.msu url: https://download.microsoft.com/download/F/4/1/F4154AD2-2119-48B4-BF99-CC15F68E110D/Windows6.1-KB3080079-x64.msu +enable_tls_support_hotfix_download_location: 'C:\Windows\Temp' +enable_tls_support_hotfix: + kb: kb3154518 + file: windows6.1-kb3154518-x64.msu + url: http://download.microsoft.com/download/6/8/0/680ee424-358c-4fdf-a0de-b45dee07b711/windows6.1-kb3154518-x64.msu + +dot_net_security_hotfix_download_location: 'C:\Windows\Temp' +dot_net_security_hotfix: + kb: KB2898850 + file: Windows8.1-KB2898850-x64.msu + url: http://download.microsoft.com/download/C/6/9/C690CC33-18F7-405D-B18A-0A8E199E531C/Windows8.1-KB2898850-x64.msu + +windows_update_agent_url: http://download.windowsupdate.com/windowsupdate/redist/standalone/7.6.7600.320/windowsupdateagent-7.6-x64.exe + #sdelete_download_url: http://web.archive.org/web/20140902022253/http://download.sysinternals.com/files/SDelete.zip -bleachbit_download_url: https://download.bleachbit.org/BleachBit-2.2-portable.zip +bleachbit_download_url: https://download.bleachbit.org/BleachBit-4.0.0-portable.zip sdelete_download_url: https://download.sysinternals.com/files/SDelete.zip #ultradefrag_download_url: http://downloads.sourceforge.net/project/ultradefrag/stable-release/6.1.0/ultradefrag-portable-6.1.0.bin.amd64.zip -ultradefrag_download_url: https://astuteinternet.dl.sourceforge.net/project/ultradefrag/stable-release/7.1.3/ultradefrag-portable-7.1.3.bin.amd64.zip +ultradefrag_download_url: https://sourceforge.net/projects/ultradefrag/files/stable-release/7.1.4/ultradefrag-portable-7.1.4.bin.amd64.zip/download enable_auto_logon: yes @@ -38,6 +58,7 @@ ovirt_guest_agent_role: oatakan.windows_ovirt_guest_agent virtio_role: oatakan.windows_virtio vmware_tools_role: oatakan.windows_vmware_tools virtualbox_guest_additions_role: oatakan.windows_virtualbox_guest_additions +windows_update_role: oatakan.windows_update policy: allow_unauthenticated_guest_access: no diff --git a/roles/oatakan.windows_template_build/files/win-updates.ps1 b/roles/oatakan.windows_template_build/files/win-updates.ps1 deleted file mode 100644 index 9fbdb15..0000000 --- a/roles/oatakan.windows_template_build/files/win-updates.ps1 +++ /dev/null @@ -1,229 +0,0 @@ -param($global:RestartRequired=0, - $global:MoreUpdates=0, - $global:MaxCycles=5, - $MaxUpdatesPerCycle=500) - -$Logfile = "C:\Windows\Temp\win-updates.log" - -function LogWrite { - Param ([string]$logstring) - $now = Get-Date -format s - Add-Content $Logfile -value "$now $logstring" - Write-Host $logstring -} - -function Check-ContinueRestartOrEnd() { - $RegistryKey = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update" - $RegistryEntry = "CustomRebootRequired" - switch ($global:RestartRequired) { - 0 { - $prop = (Get-ItemProperty $RegistryKey).$RegistryEntry - if ($prop) { - LogWrite "Restart Registry Entry Exists - Removing It" - Remove-ItemProperty -Path $RegistryKey -Name $RegistryEntry -ErrorAction SilentlyContinue - } - - LogWrite "No Restart Required" - Check-WindowsUpdates - - if (($global:MoreUpdates -eq 1) -and ($script:Cycles -le $global:MaxCycles)) { - Install-WindowsUpdates - } elseif ($script:Cycles -gt $global:MaxCycles) { - LogWrite "Exceeded Cycle Count - Stopping" - } else { - LogWrite "Done Installing Windows Updates" - } - } - 1 { - $prop = (Get-ItemProperty $RegistryKey).$RegistryEntry - if (-not $prop) { - LogWrite "Restart Registry Entry Does Not Exist - Creating It" - Set-ItemProperty -Path $RegistryKey -Name $RegistryEntry -Value "1" - } else { - LogWrite "Restart Registry Entry Exists Already" - } - - #LogWrite "Restart Required - Restarting..." - #Restart-Computer - } - default { - LogWrite "Unsure If A Restart Is Required" - break - } - } -} - -function Install-WindowsUpdates() { - $script:Cycles++ - LogWrite "Evaluating Available Updates with limit of $($MaxUpdatesPerCycle):" - $UpdatesToDownload = New-Object -ComObject 'Microsoft.Update.UpdateColl' - $script:i = 0; - $CurrentUpdates = $SearchResult.Updates - while($script:i -lt $CurrentUpdates.Count -and $script:CycleUpdateCount -lt $MaxUpdatesPerCycle) { - $Update = $CurrentUpdates.Item($script:i) - if (($Update -ne $null) -and (!$Update.IsDownloaded)) { - [bool]$addThisUpdate = $false - if ($Update.InstallationBehavior.CanRequestUserInput) { - LogWrite "> Skipping: $($Update.Title) because it requires user input" - } else { - if (!($Update.EulaAccepted)) { - LogWrite "> Note: $($Update.Title) has a license agreement that must be accepted. Accepting the license." - $Update.AcceptEula() - [bool]$addThisUpdate = $true - $script:CycleUpdateCount++ - } else { - [bool]$addThisUpdate = $true - $script:CycleUpdateCount++ - } - } - - if ([bool]$addThisUpdate) { - LogWrite "Adding: $($Update.Title)" - $UpdatesToDownload.Add($Update) |Out-Null - } - } - $script:i++ - } - - if ($UpdatesToDownload.Count -eq 0) { - LogWrite "No Updates To Download..." - } else { - LogWrite 'Downloading Updates...' - $ok = 0; - while (! $ok) { - try { - $Downloader = $UpdateSession.CreateUpdateDownloader() - $Downloader.Updates = $UpdatesToDownload - $Downloader.Download() - $ok = 1; - } catch { - LogWrite $_.Exception | Format-List -force - LogWrite "Error downloading updates. Retrying in 30s." - $script:attempts = $script:attempts + 1 - Start-Sleep -s 30 - } - } - } - - $UpdatesToInstall = New-Object -ComObject 'Microsoft.Update.UpdateColl' - [bool]$rebootMayBeRequired = $false - LogWrite 'The following updates are downloaded and ready to be installed:' - foreach ($Update in $SearchResult.Updates) { - if (($Update.IsDownloaded)) { - LogWrite "> $($Update.Title)" - $UpdatesToInstall.Add($Update) |Out-Null - - if ($Update.InstallationBehavior.RebootBehavior -gt 0){ - [bool]$rebootMayBeRequired = $true - } - } - } - - if ($UpdatesToInstall.Count -eq 0) { - LogWrite 'No updates available to install...' - $global:MoreUpdates=0 - $global:RestartRequired=0 - break - } - - if ($rebootMayBeRequired) { - LogWrite 'These updates may require a reboot' - $global:RestartRequired=1 - } - - LogWrite 'Installing updates...' - - $Installer = $script:UpdateSession.CreateUpdateInstaller() - $Installer.Updates = $UpdatesToInstall - $InstallationResult = $Installer.Install() - - LogWrite "Installation Result: $($InstallationResult.ResultCode)" - LogWrite "Reboot Required: $($InstallationResult.RebootRequired)" - LogWrite 'Listing of updates installed and individual installation results:' - if ($InstallationResult.RebootRequired) { - $global:RestartRequired=1 - } else { - $global:RestartRequired=0 - } - - for($i=0; $i -lt $UpdatesToInstall.Count; $i++) { - New-Object -TypeName PSObject -Property @{ - Title = $UpdatesToInstall.Item($i).Title - Result = $InstallationResult.GetUpdateResult($i).ResultCode - } - LogWrite "Item: " $UpdatesToInstall.Item($i).Title - LogWrite "Result: " $InstallationResult.GetUpdateResult($i).ResultCode; - } - - Check-ContinueRestartOrEnd -} - -function Check-WindowsUpdates() { - LogWrite "Checking For Windows Updates" - $Username = $env:USERDOMAIN + "\" + $env:USERNAME - - New-EventLog -Source $ScriptName -LogName 'Windows Powershell' -ErrorAction SilentlyContinue - - $Message = "Script: " + $ScriptPath + "`nScript User: " + $Username + "`nStarted: " + (Get-Date).toString() - - Write-EventLog -LogName 'Windows Powershell' -Source $ScriptName -EventID "104" -EntryType "Information" -Message $Message - LogWrite $Message - - $script:UpdateSearcher = $script:UpdateSession.CreateUpdateSearcher() - $script:successful = $FALSE - $script:attempts = 0 - $script:maxAttempts = 12 - while(-not $script:successful -and $script:attempts -lt $script:maxAttempts) { - try { - $script:SearchResult = $script:UpdateSearcher.Search("IsInstalled=0 and Type='Software' and IsHidden=0") - $script:successful = $TRUE - } catch { - LogWrite $_.Exception | Format-List -force - LogWrite "Search call to UpdateSearcher was unsuccessful. Retrying in 10s." - $script:attempts = $script:attempts + 1 - Start-Sleep -s 10 - } - } - - if ($SearchResult.Updates.Count -ne 0) { - $Message = "There are " + $SearchResult.Updates.Count + " more updates." - LogWrite $Message - try { - for($i=0; $i -lt $script:SearchResult.Updates.Count; $i++) { - LogWrite $script:SearchResult.Updates.Item($i).Title - LogWrite $script:SearchResult.Updates.Item($i).Description - LogWrite $script:SearchResult.Updates.Item($i).RebootRequired - LogWrite $script:SearchResult.Updates.Item($i).EulaAccepted - } - $global:MoreUpdates=1 - } catch { - LogWrite $_.Exception | Format-List -force - LogWrite "Showing SearchResult was unsuccessful. Rebooting." - $global:RestartRequired=1 - $global:MoreUpdates=0 - Check-ContinueRestartOrEnd - LogWrite "Show never happen to see this text!" - Restart-Computer - } - } else { - LogWrite 'There are no applicable updates' - $global:RestartRequired=0 - $global:MoreUpdates=0 - } -} - -$script:ScriptName = $MyInvocation.MyCommand.ToString() -$script:ScriptPath = $MyInvocation.MyCommand.Path -$script:UpdateSession = New-Object -ComObject 'Microsoft.Update.Session' -$script:UpdateSession.ClientApplicationID = 'Packer Windows Update Installer' -$script:UpdateSearcher = $script:UpdateSession.CreateUpdateSearcher() -$script:SearchResult = New-Object -ComObject 'Microsoft.Update.UpdateColl' -$script:Cycles = 0 -$script:CycleUpdateCount = 0 - -Check-WindowsUpdates -if ($global:MoreUpdates -eq 1) { - Install-WindowsUpdates -} else { - Check-ContinueRestartOrEnd -} \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/meta/.galaxy_install_info b/roles/oatakan.windows_template_build/meta/.galaxy_install_info index b2324ea..9059431 100644 --- a/roles/oatakan.windows_template_build/meta/.galaxy_install_info +++ b/roles/oatakan.windows_template_build/meta/.galaxy_install_info @@ -1,2 +1,2 @@ -install_date: Wed Jun 24 18:44:34 2020 +install_date: Tue Apr 20 16:13:51 2021 version: master diff --git a/roles/oatakan.windows_template_build/tasks/compact.yml b/roles/oatakan.windows_template_build/tasks/compact.yml index 574d5ca..4ad637b 100644 --- a/roles/oatakan.windows_template_build/tasks/compact.yml +++ b/roles/oatakan.windows_template_build/tasks/compact.yml @@ -27,6 +27,10 @@ path: https://download.microsoft.com/download/5/D/8/5D8C65CB-C849-4025-8E95-C3966CAFD8AE/vcredist_x86.exe product_id: '{9BE518E6-ECC6-35A9-88E4-87755C07200F}' arguments: '/qb!' + register: install_visual_c + until: install_visual_c is success + delay: 3 + retries: 5 when: "'Windows Server 2008' in ansible_distribution" - name: stop windows update service @@ -91,6 +95,7 @@ win_get_url: url: '{{ ultradefrag_download_url }}' dest: '{{ temp_directory }}\win_build\ultradefrag.zip' + follow_redirects: all register: download_ultradefrag until: download_ultradefrag is success delay: 3 @@ -103,7 +108,7 @@ - name: set udefrag extract directory set_fact: - udefrag_dir: '{{ temp_directory }}\win_build\ultradefrag-portable-7.1.3.amd64' + udefrag_dir: '{{ temp_directory }}\win_build\ultradefrag-portable-7.1.4.amd64' - name: defrag with ultradefrag win_shell: '{{ udefrag_dir }}\udefrag.exe --optimize --repeat C:' diff --git a/roles/oatakan.windows_template_build/tasks/enable-tlsv12.yml b/roles/oatakan.windows_template_build/tasks/enable-tlsv12.yml index d9a7ef4..418f538 100644 --- a/roles/oatakan.windows_template_build/tasks/enable-tlsv12.yml +++ b/roles/oatakan.windows_template_build/tasks/enable-tlsv12.yml @@ -13,16 +13,29 @@ state: present elements: "C:\\Program Files (x86)\\Windows Kits\\10\\Assessment and Deployment Kit\\Deployment Tools\\amd64\\DISM" +- pause: + seconds: 10 + - name: download hotfix win_get_url: url: '{{ enable_tlsv12_hotfix.url }}' dest: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }}' + register: download_hotfix + until: download_hotfix is success + delay: 3 + retries: 5 -- name: install hotfix +- name: install hotfix (PS >= 4) win_hotfix: source: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }}' state: present register: hotfix_install + when: ansible_powershell_version is version('4', '>=') + +- name: install hotfix (PS == 3) + win_shell: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }} /quiet /norestart' + register: hotfix_install + when: ansible_powershell_version is version('3', '==') - name: debug hotfix installation result debug: @@ -35,7 +48,7 @@ - name: reboot if needed win_reboot: - when: hotfix_install.reboot_required + when: hotfix_install.reboot_required | default(False) - name: enable TLSv1.2 support win_regedit: @@ -59,10 +72,25 @@ property: DisabledByDefault value: 0 +- name: enable strong crypto + win_regedit: + path: HKLM:\{{ item }} + name: SchUseStrongCrypto + data: 1 + type: dword + state: present + loop: + - 'SOFTWARE\Microsoft\.NETFramework\v4.0.30319' + - 'SOFTWARE\WOW6432Node\Microsoft\.NETFramework\v4.0.30319' + - name: ensure Windows ADK with DISM is removed win_chocolatey: name: windows-adk-deploy state: absent + register: remove_win_adk_dism + until: remove_win_adk_dism is success + delay: 3 + retries: 5 - name: reboot if TLS config was applied win_reboot: diff --git a/roles/oatakan.windows_template_build/tasks/main.yml b/roles/oatakan.windows_template_build/tasks/main.yml index 708ce16..e3f6b01 100644 --- a/roles/oatakan.windows_template_build/tasks/main.yml +++ b/roles/oatakan.windows_template_build/tasks/main.yml @@ -9,9 +9,16 @@ - include_tasks: enable-tlsv12.yml when: upgrade_powershell | bool +- include_tasks: update-agent-win2008.yml + when: "'Windows Server 2008' in ansible_distribution or 'Windows 7' in ansible_distribution" + +- include_tasks: security-update-win2012.yml + when: "'Windows Server 2012' in ansible_distribution or 'Windows 8' in ansible_distribution" + - include_tasks: disable-auto-logon.yml -- include_tasks: updates.yml +- include_role: + name: "{{ windows_update_role }}" when: install_updates | bool - include_role: @@ -35,7 +42,7 @@ - include_tasks: policy.yml - include_tasks: power.yml - when: "'Windows 10' in ansible_distribution" + when: "'Server' not in ansible_distribution" - include_tasks: enable-rdp.yml @@ -44,13 +51,14 @@ - "'VMware' not in ansible_product_name" - "'VirtualBox' not in ansible_product_name" - ('KubeVirt' not in ansible_system_vendor | default(False)) + - ('Red Hat' not in ansible_system_vendor | default(False)) - not target_ovirt | bool - not target_vagrant | bool - include_tasks: remove-apps-alt-2.yml when: - remove_apps | bool - - "'Windows 10' in ansible_distribution" + - "'Server' not in ansible_distribution" - include_role: name: "{{ ec2_ena_driver_role }}" diff --git a/roles/oatakan.windows_template_build/tasks/policy.yml b/roles/oatakan.windows_template_build/tasks/policy.yml index 3c00090..536634e 100644 --- a/roles/oatakan.windows_template_build/tasks/policy.yml +++ b/roles/oatakan.windows_template_build/tasks/policy.yml @@ -9,11 +9,16 @@ type: dword when: policy.allow_unauthenticated_guest_access|bool -- name: set connection profile to private +- name: set connection profile to private (Windows 10) win_shell: Set-NetConnectionProfile -NetworkCategory Private when: - "'Windows 10' in ansible_distribution" +- name: set connection profile to private (Windows 7) + win_shell: '{{ set_network_to_private }}' + when: + - "'Windows 7' in ansible_distribution" + - name: Ensure local account password doesn't expire win_user: name: "{{ ansible_user }}" diff --git a/roles/oatakan.windows_template_build/tasks/powershell-upgrade.yml b/roles/oatakan.windows_template_build/tasks/powershell-upgrade.yml index 3b065be..cb71db7 100644 --- a/roles/oatakan.windows_template_build/tasks/powershell-upgrade.yml +++ b/roles/oatakan.windows_template_build/tasks/powershell-upgrade.yml @@ -1,7 +1,9 @@ --- +- include_tasks: enable_tls_system_default.yml + - name: download script - raw: '(New-Object -TypeName System.Net.WebClient).DownloadFile("{{ powershell_script_url }}", "{{ powershell_upgrade_script_file }}")' + raw: '[Net.ServicePointManager]::SecurityProtocol = [Enum]::ToObject([Net.SecurityProtocolType], 3072); (New-Object -TypeName System.Net.WebClient).DownloadFile("{{ powershell_script_url }}", "{{ powershell_upgrade_script_file }}")' changed_when: False check_mode: no register: download_script @@ -13,12 +15,12 @@ ignore_errors: yes - name: delete scheduled task if it exists - raw: 'SCHTASKS /Delete /TN upgrade' + raw: 'SCHTASKS /Delete /TN upgrade /f' args: executable: cmd.exe changed_when: False check_mode: no - ignore_errors: yes + failed_when: False - name: create a scheduled task to run powershell script raw: > @@ -30,6 +32,15 @@ changed_when: False check_mode: no +- name: start windows update service + raw: net start wuauserv + args: + executable: cmd.exe + failed_when: false + +- pause: + seconds: 60 + - name: run scheduled task raw: 'SCHTASKS /Run /TN upgrade' args: @@ -37,11 +48,30 @@ changed_when: False check_mode: no +- pause: + seconds: "{{ upgrade_wait_timeout }}" + +- name: wait for powershell upgrade task to finish + raw: '((schtasks /query /TN upgrade)[4] -split " +")[-2]' + changed_when: False + check_mode: no + register: upgrade_status_check + failed_when: false + until: (upgrade_status_check.stdout | trim | lower) == 'ready' + delay: 10 + retries: 10 + +- debug: + msg: "{{ powershell_target_version }}" + +# apply winrm memory hotfix for powershell 3.0 +- include_tasks: winrm-memfix.yml + when: powershell_target_version is version('3.0', '==') + - name: wait for system to reboot after upgrade wait_for_connection: - delay: 300 - sleep: 30 - timeout: 300 + sleep: 60 + timeout: 400 - name: delete scheduled task win_scheduled_task: diff --git a/roles/oatakan.windows_template_build/tasks/sysprep.yml b/roles/oatakan.windows_template_build/tasks/sysprep.yml index a199c71..f66fd0e 100644 --- a/roles/oatakan.windows_template_build/tasks/sysprep.yml +++ b/roles/oatakan.windows_template_build/tasks/sysprep.yml @@ -18,7 +18,7 @@ - name: enable winrm win_shell: '& $([scriptblock]::Create((New-Object Net.WebClient).DownloadString("https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1"))) -ForceNewSSLCert -EnableCredSSP' ignore_errors: yes - when: "'Windows Server 2008' in ansible_distribution" + when: "'Windows Server 2008' in ansible_distribution or 'Windows 7' in ansible_distribution" - name: copy unattend.xml win_template: diff --git a/roles/oatakan.windows_template_build/tasks/updates-all.yml b/roles/oatakan.windows_template_build/tasks/updates-all.yml deleted file mode 100644 index c43c19a..0000000 --- a/roles/oatakan.windows_template_build/tasks/updates-all.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- - -- name: check for available updates - win_updates: - category_names: "{{ win_update_category_names }}" - blacklist: "{{ win_update_blacklist | default(omit) }}" - state: searched - register: available_updates - -- debug: - msg: | - {{ inventory_hostname }} has {{ available_updates.found_update_count }} updates available. - {% for key, value in available_updates.updates.items() %} - - {{ value.title }} - {% endfor %} - when: available_updates.updates is defined - -- include_tasks: updates-with-retry.yml - when: - - available_updates.updates is defined - - available_updates.found_update_count > 0 - -- name: check for missing updates. - win_updates: - state: searched - register: available_updates - -- name: list missing updates - debug: - var: available_updates - -- name: check to see if update is finished - win_shell: gwmi -Class win32_computersystem -ComputerName 127.0.0.1 | select -ExpandProperty username -ErrorAction Stop - register: logon_status - until: logon_status is success - delay: 10 - retries: 100 - ignore_errors: yes - when: "'Windows 10' in ansible_distribution" - -- name: reboot windows - win_reboot: - when: "'Windows 10' in ansible_distribution" \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/updates-powershell.yml b/roles/oatakan.windows_template_build/tasks/updates-powershell.yml deleted file mode 100644 index 2f1a45c..0000000 --- a/roles/oatakan.windows_template_build/tasks/updates-powershell.yml +++ /dev/null @@ -1,98 +0,0 @@ ---- - -- name: update over multiple reboots - block: - - name: check for available updates - win_updates: - category_names: - - CriticalUpdates - - DefinitionUpdates - - SecurityUpdates - - UpdateRollups - - Updates - state: searched - register: available_updates - - - debug: - msg: | - {{ inventory_hostname }} has {{ available_updates.found_update_count }} updates available. - {% for key, value in available_updates.updates.items() %} - - {{ value.title }} - {% endfor %} - when: available_updates.updates is defined - - - block: - - name: install windows updates using powershell script - script: win-updates.ps1 - become: yes - become_method: runas - become_user: SYSTEM - when: - - available_updates.updates is defined - - available_updates.found_update_count > 0 - - rescue: - - name: reboot the system to recover from a failed update - win_reboot: - reboot_timeout: 7200 - - - name: wait for system to be responsive after update - wait_for_connection: - delay: 60 - sleep: 10 - timeout: 600 - - - name: check to see if reboot is required - win_reg_stat: - path: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update - name: CustomRebootRequired - register: update_reboot_required_key - - - name: reboot the system to continue with the update - win_reboot: - reboot_timeout: 7200 - when: update_reboot_required_key.exists - - - name: check for missing updates - win_updates: - category_names: - - CriticalUpdates - - DefinitionUpdates - - SecurityUpdates - - UpdateRollups - - Updates - state: searched - register: missing_updates - - - debug: - msg: | - {{ inventory_hostname }} has {{ missing_updates.found_update_count }} updates still missing. - {% for key, value in missing_updates.updates.items() %} - - {{ value.title }} - {% endfor %} - when: missing_updates.updates is defined - - - block: - - name: set update count - set_fact: - update_retry_count: '{{ update_retry_count | default(0) | int + 1 }}' - - - name: still more updates - need to retry - fail: - msg: > - '{{ inventory_hostname }} has {{ missing_updates.found_update_count }} updates still missing. - {{ (update_retry_limit | int) - (update_retry_count | int) }} more retries left' - when: ((update_retry_limit | int) - (update_retry_count | int) > 0) - when: missing_updates.found_update_count > 0 - - - name: ensure the CustomRebootRequired key doesn't exist - win_regedit: - path: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update - name: CustomRebootRequired - state: absent - - rescue: - - debug: - msg: "Still more updates remaining - retrying..." - - - include_tasks: updates-powershell.yml \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/updates-win2008r2.yml b/roles/oatakan.windows_template_build/tasks/updates-win2008r2.yml deleted file mode 100644 index e66b516..0000000 --- a/roles/oatakan.windows_template_build/tasks/updates-win2008r2.yml +++ /dev/null @@ -1,74 +0,0 @@ ---- - -- name: ensure Windows ADK with DISM is installed - win_chocolatey: - name: windows-adk-deploy - state: present - version: 10.0.17134.0 - register: install_windows_adk_deploy - -- name: ensure PATH contains Windows ADK - win_path: - scope: machine - state: present - elements: "C:\\Program Files (x86)\\Windows Kits\\10\\Assessment and Deployment Kit\\Deployment Tools\\amd64\\DISM" - -- name: download hotfix group 1 - win_get_url: - url: '{{ item.url }}' - dest: '{{ hotfix_download_location }}\{{ item.file }}' - loop: "{{ hotfixes_group_1 }}" - -- name: install hotfix group 1 - win_hotfix: - source: '{{ hotfix_download_location }}\{{ item.file }}' - state: present - register: hotfix_install_group_1 - loop: "{{ hotfixes_group_1 }}" - -- name: debug hotfix installation result - debug: - var: hotfix_install_group_1 - -- name: ensure hotfix file is removed (group 1) - win_file: - path: '{{ hotfix_download_location }}\{{ item.file }}' - state: absent - loop: "{{ hotfixes_group_1 }}" - -- name: reboot from starting update - win_reboot: - -- name: check for available updates - win_updates: - category_names: "{{ win_update_category_names }}" - blacklist: "{{ win_update_blacklist | default(omit) }}" - state: searched - register: available_updates - -- debug: - msg: | - {{ inventory_hostname }} has {{ available_updates.found_update_count }} updates available. - {% for key, value in available_updates.updates.items() %} - - {{ value.title }} - {% endfor %} - when: available_updates.updates is defined - -- include_tasks: updates-with-retry.yml - when: - - available_updates.updates is defined - - available_updates.found_update_count > 0 - -- name: check for missing updates. - win_updates: - state: searched - register: available_updates - -- name: list missing updates - debug: - var: available_updates - -- name: make sure Windows ADK with DISM for Server 2008 R2 is not installed - win_chocolatey: - name: windows-adk-deploy - state: absent \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/updates-with-retry.yml b/roles/oatakan.windows_template_build/tasks/updates-with-retry.yml deleted file mode 100644 index facf18d..0000000 --- a/roles/oatakan.windows_template_build/tasks/updates-with-retry.yml +++ /dev/null @@ -1,84 +0,0 @@ ---- - -- name: update over multiple reboots - block: - - block: - - name: install all windows updates - win_updates: - category_names: "{{ win_update_category_names }}" - blacklist: "{{ (win_update_blacklist | default([])) + (failed_kb | default([])) }}" - whitelist: "{{ win_update_whitelist | default(omit) }}" - reboot: yes - register: installed_updates - - rescue: - - name: reboot the system to recover from a failed update - win_reboot: - reboot_timeout: 7200 - - - name: set failed KB to skip - set_fact: - failed_kb: "{{ failed_kb|default([]) + [installed_updates.msg | regex_replace('^.*\\((KB.*)\\).*','\\1')] }}" - when: - - installed_updates.msg is defined - - ('Failed' in installed_updates.msg) - - ('KB' in installed_updates.msg) - - - name: fail to retry - fail: - msg: "There are failed updates: {{ failed_kb | join(' ') }}" - when: - - failed_kb is defined - - failed_kb | length > 0 - - - name: wait for system to be responsive after update - wait_for_connection: - delay: 60 - sleep: 10 - timeout: 600 - - - name: work on any skipped KB - win_updates: - category_names: "{{ win_update_category_names }}" - blacklist: "{{ win_update_blacklist | default(omit) }}" - whitelist: "{{ failed_kb | default([]) }}" - reboot: yes - register: installed_updates_retry_skipped - when: - - failed_kb is defined - - failed_kb | length > 0 - - - name: check for missing updates - win_updates: - category_names: "{{ win_update_category_names }}" - blacklist: "{{ win_update_blacklist | default(omit) }}" - state: searched - register: missing_updates - - - debug: - msg: | - {{ inventory_hostname }} has {{ missing_updates.found_update_count }} updates still missing. - {% for key, value in missing_updates.updates.items() %} - - {{ value.title }} - {% endfor %} - when: missing_updates.updates is defined - - - name: still more updates - need to retry - fail: - msg: > - '{{ inventory_hostname }} has {{ missing_updates.found_update_count }} updates still missing. - {{ (update_retry_limit | int) - (update_retry_count | int) }} more retries left' - when: - - missing_updates.found_update_count > 0 - - ((update_retry_limit | int) - (update_retry_count | int) >= 0) - - rescue: - - name: set update count - set_fact: - update_retry_count: '{{ update_retry_count | default(0) | int + 1 }}' - - - debug: - msg: "Still more updates remaining - retrying... ({{ update_retry_count }}/{{ update_retry_limit }})" - - - include_tasks: updates-with-retry.yml - when: ((update_retry_limit | int) - (update_retry_count | int) >= 0) \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/tasks/updates.yml b/roles/oatakan.windows_template_build/tasks/updates.yml deleted file mode 100644 index 94ea25f..0000000 --- a/roles/oatakan.windows_template_build/tasks/updates.yml +++ /dev/null @@ -1,89 +0,0 @@ ---- - -- name: disable firewall for Domain, Public and Private profiles - win_shell: Set-NetFirewallProfile -Profile Domain,Public,Private -Enabled False - when: "'Windows Server 2012' in ansible_distribution" - -- name: disable firewall for Domain, Public and Private profiles - win_shell: netsh advfirewall set allprofiles state off - when: "'Windows Server 2008' in ansible_distribution" - -- name: get used space before update - win_shell: Get-PSDrive C | Select-Object Used | ConvertTo-Json - register: used_space_before_update - ignore_errors: yes - -- name: update Windows Update Agent on 2008 - win_package: - path: http://download.windowsupdate.com/windowsupdate/redist/standalone/7.6.7600.320/windowsupdateagent-7.6-x64.exe - arguments: - - /quiet - - /norestart - - /wuforce - creates_path: C:\Windows\System32\wuaueng.dll - creates_version: 7.6.7600.320 - when: "'Windows Server 2008' in ansible_distribution" - -- include_tasks: updates-all.yml - vars: - win_update_category_names: - - CriticalUpdates - - DefinitionUpdates - - SecurityUpdates - - UpdateRollups - - Updates - when: - - install_updates | bool - - "'Windows Server 2008' not in ansible_distribution" - -#- include_tasks: updates-powershell.yml -# when: -# - install_updates | bool -# - "'Windows Server 2008' in ansible_distribution" - -- include_tasks: updates-win2008r2.yml - vars: - win_update_category_names: - - CriticalUpdates - - DefinitionUpdates - - SecurityUpdates - - UpdateRollups - - Updates - hotfix_download_location: "{{ ansible_env.TEMP }}" - hotfixes_group_1: - - kb: KB3020369 - file: Windows6.1-KB3020369-x64.msu - url: https://download.microsoft.com/download/F/D/3/FD3728D5-0D2F-44A6-B7DA-1215CC0C9B75/Windows6.1-KB3020369-x64.msu - - kb: KB3125574 - file: windows6.1-kb3125574-v4-x64_2dafb1d203c8964239af3048b5dd4b1264cd93b9.msu - url: http://download.windowsupdate.com/d/msdownload/update/software/updt/2016/05/windows6.1-kb3125574-v4-x64_2dafb1d203c8964239af3048b5dd4b1264cd93b9.msu - - kb: KB4474419 - file: windows6.1-kb4474419-v3-x64_b5614c6cea5cb4e198717789633dca16308ef79c.msu - url: http://download.windowsupdate.com/c/msdownload/update/software/secu/2019/09/windows6.1-kb4474419-v3-x64_b5614c6cea5cb4e198717789633dca16308ef79c.msu - - kb: KB4490628 - file: windows6.1-kb4490628-x64_d3de52d6987f7c8bdc2c015dca69eac96047c76e.msu - url: http://download.windowsupdate.com/c/msdownload/update/software/secu/2019/03/windows6.1-kb4490628-x64_d3de52d6987f7c8bdc2c015dca69eac96047c76e.msu - when: - - install_updates | bool - - "'Windows Server 2008' in ansible_distribution" - -- name: get used space after update - win_shell: Get-PSDrive C | Select-Object Used | ConvertTo-Json - register: used_space_after_update - ignore_errors: yes - -- debug: - msg: - - "Used space before update: {{ ((used_space_before_update.stdout | from_json)['Used']|int / (1024*1024*1024)) | round(2, 'floor') }} GB" - - "Used space after update: {{ ((used_space_after_update.stdout | from_json)['Used']|int / (1024*1024*1024)) | round(2, 'floor') }} GB" - when: - - used_space_before_update.stdout is defined - - used_space_after_update.stdout is defined - -- name: enabled firewall for Domain, Public and Private profiles - win_shell: Set-NetFirewallProfile -Profile Domain,Public,Private -Enabled True - when: "'Windows Server 2012' in ansible_distribution" - -- name: enable firewall for Domain, Public and Private profiles - win_shell: netsh advfirewall set allprofiles state on - when: "'Windows Server 2008' in ansible_distribution" \ No newline at end of file diff --git a/roles/oatakan.windows_template_build/templates/unattend.xml.j2 b/roles/oatakan.windows_template_build/templates/unattend.xml.j2 index f259546..b271c88 100644 --- a/roles/oatakan.windows_template_build/templates/unattend.xml.j2 +++ b/roles/oatakan.windows_template_build/templates/unattend.xml.j2 @@ -41,9 +41,11 @@ true Home 1 +{% if not '2008' in ansible_distribution or not 'Windows 7' in ansible_distribution %} true true true +{% endif %} true true diff --git a/roles/oatakan.windows_virtio/defaults/main.yml b/roles/oatakan.windows_virtio/defaults/main.yml index 221828e..cd20f2d 100644 --- a/roles/oatakan.windows_virtio/defaults/main.yml +++ b/roles/oatakan.windows_virtio/defaults/main.yml @@ -19,8 +19,17 @@ virtio_driver_directory: >- {% set virt_dir = '2k12R2' %} {% elif 'Windows Server 2008 R2' in ansible_distribution -%} {% set virt_dir = '2k8R2' %} + {% elif 'Windows 7' in ansible_distribution -%} + {% set virt_dir = 'w7' %} {% elif 'Windows 10' in ansible_distribution -%} {% set virt_dir = 'w10' %} {% else -%} {% set virt_dir = 'w10' %} {%- endif %}{{ virt_dir }} + +qxl_driver: >- + {% if 'Windows Server 2008 R2' in ansible_distribution or 'Windows 7' in ansible_distribution -%} + {% set qxl_driver_name = 'qxl' %} + {% else -%} + {% set qxl_driver_name = 'qxldod' %} + {%- endif %}{{ qxl_driver_name }} \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/handlers/main.yml b/roles/oatakan.windows_virtio/handlers/main.yml index d1c8713..d5a5e4d 100644 --- a/roles/oatakan.windows_virtio/handlers/main.yml +++ b/roles/oatakan.windows_virtio/handlers/main.yml @@ -13,6 +13,6 @@ when: virtio_iso_mount_drive | length == 0 with_items: - "{{ ansible_env.TEMP }}\\redhat_balloon.cer" - - "{{ ansible_env.TEMP }}\\redhat_qxldod.cer" + - "{{ ansible_env.TEMP }}\\redhat_{{ qxl_driver }}.cer" - "{{ ansible_env.TEMP }}\\{{ virtio_win_iso_name }}" - "{{ ansible_env.TEMP }}\\virtio_iso_extract" \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/meta/.galaxy_install_info b/roles/oatakan.windows_virtio/meta/.galaxy_install_info index a867d99..9486f98 100644 --- a/roles/oatakan.windows_virtio/meta/.galaxy_install_info +++ b/roles/oatakan.windows_virtio/meta/.galaxy_install_info @@ -1,2 +1,2 @@ -install_date: Wed Jun 24 18:44:36 2020 +install_date: Tue Apr 20 16:13:53 2021 version: master diff --git a/roles/oatakan.windows_virtio/tasks/download.yml b/roles/oatakan.windows_virtio/tasks/download.yml index a9a0187..6c9bda1 100644 --- a/roles/oatakan.windows_virtio/tasks/download.yml +++ b/roles/oatakan.windows_virtio/tasks/download.yml @@ -17,7 +17,9 @@ retries: 5 notify: - Unmount - when: ('Windows Server 2008' not in ansible_distribution) + when: + - ('Windows Server 2008' not in ansible_distribution) + - ('Windows 7' not in ansible_distribution) - include_tasks: extract_iso.yml - when: ('Windows Server 2008' in ansible_distribution) \ No newline at end of file + when: ('Windows Server 2008' in ansible_distribution) or ('Windows 7' in ansible_distribution) \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/tasks/extract_iso.yml b/roles/oatakan.windows_virtio/tasks/extract_iso.yml index 5a6822b..9684756 100644 --- a/roles/oatakan.windows_virtio/tasks/extract_iso.yml +++ b/roles/oatakan.windows_virtio/tasks/extract_iso.yml @@ -4,6 +4,10 @@ win_chocolatey: name: 7zip state: present + register: install_7zip + until: install_7zip is success + delay: 3 + retries: 5 - name: Ensure temp directory exists for iso win_file: diff --git a/roles/oatakan.windows_virtio/tasks/install.yml b/roles/oatakan.windows_virtio/tasks/install.yml index 273ef7f..d28309a 100644 --- a/roles/oatakan.windows_virtio/tasks/install.yml +++ b/roles/oatakan.windows_virtio/tasks/install.yml @@ -6,7 +6,7 @@ virtio_win_virtio_path: "{{ (win_disk_image.mount_path | default(virtio_iso_mount_drive)) + '\\virtio' if virtio_win_ovirt else (win_disk_image.mount_path | default(virtio_iso_mount_drive)) }}" virtio_win_iso_name: "{{ virtio_win_iso_name }}" when: - - virtio_iso_mount_drive | length > 0 or ('Windows Server 2008' not in ansible_distribution) + - virtio_iso_mount_drive | length > 0 or ('Windows Server 2008' not in ansible_distribution and 'Windows 7' not in ansible_distribution) - name: Set the virtio_win_iso_path and virtio_win_virtio_path set_fact: @@ -15,7 +15,7 @@ virtio_win_iso_name: "{{ virtio_win_iso_name }}" when: - virtio_iso_mount_drive | length == 0 - - ('Windows Server 2008' in ansible_distribution) + - ('Windows Server 2008' in ansible_distribution) or ('Windows 7' in ansible_distribution) - name: Get list of all drivers win_command: driverquery /V diff --git a/roles/oatakan.windows_virtio/tasks/install_cert.yml b/roles/oatakan.windows_virtio/tasks/install_cert.yml index d6fdde1..f64fe7e 100644 --- a/roles/oatakan.windows_virtio/tasks/install_cert.yml +++ b/roles/oatakan.windows_virtio/tasks/install_cert.yml @@ -1,22 +1,15 @@ --- -- name: Export Cert from qxldod - win_shell: '$cert = (Get-AuthenticodeSignature "{{ virtio_win_virtio_path }}\qxldod\{{ virtio_driver_directory }}\amd64\qxldod.cat").SignerCertificate; [System.IO.File]::WriteAllBytes("{{ ansible_env.TEMP }}\redhat_qxldod.cer", $cert.Export([System.Security.Cryptography.X509Certificates.X509ContentType]::Cert));' - when: virtio_driver_directory != '2k8R2' - -- name: Export Cert from qxl - win_shell: '$cert = (Get-AuthenticodeSignature "{{ virtio_win_virtio_path }}\qxl\{{ virtio_driver_directory }}\amd64\qxl.cat").SignerCertificate; [System.IO.File]::WriteAllBytes("{{ ansible_env.TEMP }}\redhat_qxldod.cer", $cert.Export([System.Security.Cryptography.X509Certificates.X509ContentType]::Cert));' - when: virtio_driver_directory == '2k8R2' +- name: Export Cert from {{ qxl_driver }} + win_shell: '$cert = (Get-AuthenticodeSignature "{{ virtio_win_virtio_path }}\{{ qxl_driver }}\{{ virtio_driver_directory }}\amd64\{{ qxl_driver }}.cat").SignerCertificate; [System.IO.File]::WriteAllBytes("{{ ansible_env.TEMP }}\redhat_{{ qxl_driver }}.cer", $cert.Export([System.Security.Cryptography.X509Certificates.X509ContentType]::Cert));' - name: Export Cert from balloon win_shell: '$cert = (Get-AuthenticodeSignature "{{ virtio_win_virtio_path }}\Balloon\{{ virtio_driver_directory }}\amd64\blnsvr.exe").SignerCertificate; [System.IO.File]::WriteAllBytes("{{ ansible_env.TEMP }}\redhat_balloon.cer", $cert.Export([System.Security.Cryptography.X509Certificates.X509ContentType]::Cert));' -- name: Install RH certificate (qxldod) to TrustedPublisher certificate store - win_command: 'certutil.exe -f -addstore "TrustedPublisher" {{ ansible_env.TEMP }}\redhat_qxldod.cer' +- name: Install RH certificates to TrustedPublisher certificate store + win_command: 'certutil.exe -f -addstore "TrustedPublisher" {{ ansible_env.TEMP }}\redhat_{{ item }}.cer' notify: - Delete downloaded - -- name: Install RH certificate (Balloon) to TrustedPublisher certificate store - win_command: 'certutil.exe -f -addstore "TrustedPublisher" {{ ansible_env.TEMP }}\redhat_balloon.cer' - notify: - - Delete downloaded \ No newline at end of file + loop: + - "{{ qxl_driver }}" + - balloon \ No newline at end of file diff --git a/roles/oatakan.windows_virtio/tasks/install_drivers.yml b/roles/oatakan.windows_virtio/tasks/install_drivers.yml index d5894ee..428d776 100644 --- a/roles/oatakan.windows_virtio/tasks/install_drivers.yml +++ b/roles/oatakan.windows_virtio/tasks/install_drivers.yml @@ -1,45 +1,16 @@ --- -- name: Install the Virtio Network Driver (netkvm) - win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\NetKVM\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" - when: driver_list.stdout is not search("netkvm") - -- name: Install the Virtio Block Driver (viostor) - win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\viostor\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" - when: driver_list.stdout is not search("viostor") - -- name: Install the QXL Graphics Driver (qxldod) - win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\qxldod\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" - when: - - driver_list.stdout is not search("qxldod") - - virtio_driver_directory != '2k8R2' - -- name: Install the QXL Graphics Driver (qxl) - win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\qxl\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" - when: - - driver_list.stdout is not search("qxl") - - virtio_driver_directory == '2k8R2' - -- name: Install the Balloon Driver (Balloon) - win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\Balloon\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" - when: driver_list.stdout is not search("balloon") - -- name: Install Virtio RNG driver (viorng) - win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\viorng\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" - when: driver_list.stdout is not search("viorng") - -- name: Install Virtio serial driver (vioserial) - win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\vioserial\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" - when: driver_list.stdout is not search("vioser") - -- name: Install Virtio Input driver (vioinput) - win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\vioinput\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" - when: driver_list.stdout is not search("vioinput") - -- name: Install Virtio SCSI Passthrough driver (vioscsi) - win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\vioscsi\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" - when: driver_list.stdout is not search("vioscsi") - -- name: Install pvpanic device driver (pvpanic) - win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\pvpanic\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" - when: driver_list.stdout is not search("pvpanic") \ No newline at end of file +- name: Install the Virtio Drivers + win_command: "pnputil -i -a \"{{ virtio_win_virtio_path }}\\{{ item }}\\{{ virtio_driver_directory }}\\{{ ansible_env.PROCESSOR_ARCHITECTURE | lower }}\\*.inf\"" + when: driver_list.stdout is not search(item|lower) + ignore_errors: yes + loop: + - NetKVM + - viostor + - "{{ qxl_driver }}" + - Balloon + - viorng + - vioserial + - vioinput + - vioscsi + - pvpanic \ No newline at end of file diff --git a/roles/requirements.yml.old b/roles/requirements.yml.old deleted file mode 100644 index bb13029..0000000 --- a/roles/requirements.yml.old +++ /dev/null @@ -1,19 +0,0 @@ -# Java -#- name: geerlingguy.java -# Node.js (Using this repo temporarily, as it fixes a package naming bug (See #95)) -# - src: https://github.com/halkeye/ansible-role-nodejs -# version: halkeye-patch-1 -# Gitlab -#- name: geerlingguy.gitlab -## Windows Ovirt Template -#- name: oatakan.windows_ovirt_template -#- name: oatakan.windows_template_build -#- name: oatakan.windows_ovirt_guest_agent -#- name: oatakan.windows_virtio -#- name: ikke_t.podman_container_systemd -#- name: ikke_t.container_image_cleanup - -# Infra -#- name: bertvv.bind -#- name: bertvv.dhcp -#- name: linux-system-roles.network