diff --git a/collections/requirements.yml b/collections/requirements.yml
index 35fb7d8..d3ad3b6 100644
--- a/collections/requirements.yml
+++ b/collections/requirements.yml
@@ -1,38 +1,10 @@
---
collections:
- name: davidban77.gns3
- source: https://galaxy.ansible.com
-
- name: netbox.netbox
- source: https://galaxy.ansible.com
-
- name: freeipa.ansible_freeipa
- source: https://galaxy.ansible.com
- # source: https://hub.mgmt.toal.ca/api/galaxy/content/published/
-
- - name: ovirt.ovirt
- source: https://galaxy.ansible.com
-
- - name: redhat.rhv
- source: https://cloud.redhat.com/api/automation-hub/
-
- name: redhat.satellite
- source: https://cloud.redhat.com/api/automation-hub/
-
- name: community.general
- source: https://galaxy.ansible.com
-
- - name: jjaswanson4.install_satellite
- source: https://galaxy.ansible.com
-
- - name: jjaswanson4.configure_satellite
- source: https://galaxy.ansible.com
-
- name: redhat.satellite
- source: https://cloud.redhat.com/api/automation-hub/
-
- name: community.crypto
- source: https://galaxy.ansible.com
-
- name: onepassword.connect
-# - name: ansible.posix
diff --git a/roles/alvaroaleman.freeipa-client/.gitignore b/roles/alvaroaleman.freeipa-client/.gitignore
deleted file mode 100644
index 9721727..0000000
--- a/roles/alvaroaleman.freeipa-client/.gitignore
+++ /dev/null
@@ -1,27 +0,0 @@
-### Files to be ignored by Git ###
-
-# Ruby
-Gemfile.lock
-
-# Vagrant
-/\.vagrant/
-
-# IntelliJ IDEA
-/*\.ipr
-/*\.iws
-/\.idea/
-**/*\.iml
-
-# Eclipse
-/\.classpath
-/\.project
-/\.settings/
-
-# Mac
-**/\.DS_Store
-
-# Custom
-/temp/
-/tmp/
-envvars
-*.swp
diff --git a/roles/alvaroaleman.freeipa-client/.travis.yml b/roles/alvaroaleman.freeipa-client/.travis.yml
deleted file mode 100644
index 9e1323d..0000000
--- a/roles/alvaroaleman.freeipa-client/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-language: python
-python: "2.7"
-before_install:
- - sudo apt-get update --assume-yes -qq
- - sudo apt-get install --assume-yes -qq python-apt python-pycurl
-install:
- - sudo pip install ansible
-script:
- - ansible --version
- - ansible-playbook --inventory-file tests/hosts --syntax-check tests/playbook.yml
- - ansible-playbook --inventory-file tests/hosts --connection=local -vvvv tests/playbook.yml
diff --git a/roles/alvaroaleman.freeipa-client/CHANGELOG.md b/roles/alvaroaleman.freeipa-client/CHANGELOG.md
deleted file mode 100644
index e17659c..0000000
--- a/roles/alvaroaleman.freeipa-client/CHANGELOG.md
+++ /dev/null
@@ -1,85 +0,0 @@
-Remo Wenger (2):
-
-* Add support for RHEL
-* Add RHEL to Requirements
-
-# 1.3.1
-
-fxfitz (2):
-
-* Remove always_run
-* Use recommended change
-
-# 1.3.0
-
-Alvaro Aleman (10):
-
-* Use complex args style
-* More style
-* Fix defaults for Trusty
-* Fix variable namespace for package var
-* Update supported distros
-* Update sample playbook
-* Set test variables in playbook instead of Vagrantfile
-* Add assertion for required variables
-* Update role name in README
-* Add xenial to supported distros
-
-Tomas Dobrovolny (4):
-
-* Make DNS updates optional
-* Correct syntax using variable packages in with_items
-* Run Ubuntu specific tasks for all Debians
-* Add Ubuntu-16 to supported distributions
-
-# 1.2.0
-
-Alvaro Aleman (3):
-
-* Fix changelog ordering
-* Allow to register all routable client ips on DNS
-* Dont replace all DNS servers
-
-# 1.1.0
-
-Alvaro Aleman (14):
-
-* Move variables to role-specific namespace
-* Add consistent role tag
-* Refactor supported_distribution check for readability
-* Add Makefile to make testing easier
-* Make Vagrant not set DNS server by default
-* Use public ipa server for testing
-* Fix dependencies
-* Add no_ntp parameter
-* Add force_join parameter
-* Make hostname parameter optional
-* Fix checkmode
-* Remove obsolete serverspec testing
-* Refactor join for readability
-* Add contribution guidelines
-
-Casey Labs (2):
-
-* Update Ubuntu tasks
-* Adding SSH restart to handlers
-
-# 1.0.0
-
-Alvaro Aleman (8):
-
-* Populate galaxy info
-* Update documentation
-* Disable unused Docker testing
-* Fix naming
-* Fix Vagrant testing
-* Add *.swp files to gitignore
-* Update testing mechanism
-* init
-
-# 0.0.1
-
-* Initial commit
-
-
-
diff --git a/roles/alvaroaleman.freeipa-client/LICENSE b/roles/alvaroaleman.freeipa-client/LICENSE
deleted file mode 100644
index 9591157..0000000
--- a/roles/alvaroaleman.freeipa-client/LICENSE
+++ /dev/null
@@ -1,662 +0,0 @@
- GNU AFFERO GENERAL PUBLIC LICENSE
- Version 3, 19 November 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU Affero General Public License is a free, copyleft license for
-software and other kinds of works, specifically designed to ensure
-cooperation with the community in the case of network server software.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-our General Public Licenses are intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- Developers that use our General Public Licenses protect your rights
-with two steps: (1) assert copyright on the software, and (2) offer
-you this License which gives you legal permission to copy, distribute
-and/or modify the software.
-
- A secondary benefit of defending all users' freedom is that
-improvements made in alternate versions of the program, if they
-receive widespread use, become available for other developers to
-incorporate. Many developers of free software are heartened and
-encouraged by the resulting cooperation. However, in the case of
-software used on network servers, this result may fail to come about.
-The GNU General Public License permits making a modified version and
-letting the public access it on a server without ever releasing its
-source code to the public.
-
- The GNU Affero General Public License is designed specifically to
-ensure that, in such cases, the modified source code becomes available
-to the community. It requires the operator of a network server to
-provide the source code of the modified version running there to the
-users of that server. Therefore, public use of a modified version, on
-a publicly accessible server, gives the public access to the source
-code of the modified version.
-
- An older license, called the Affero General Public License and
-published by Affero, was designed to accomplish similar goals. This is
-a different license, not a version of the Affero GPL, but Affero has
-released a new version of the Affero GPL which permits relicensing under
-this license.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU Affero General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Remote Network Interaction; Use with the GNU General Public License.
-
- Notwithstanding any other provision of this License, if you modify the
-Program, your modified version must prominently offer all users
-interacting with it remotely through a computer network (if your version
-supports such interaction) an opportunity to receive the Corresponding
-Source of your version by providing access to the Corresponding Source
-from a network server at no charge, through some standard or customary
-means of facilitating copying of software. This Corresponding Source
-shall include the Corresponding Source for any work covered by version 3
-of the GNU General Public License that is incorporated pursuant to the
-following paragraph.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the work with which it is combined will remain governed by version
-3 of the GNU General Public License.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU Affero General Public License from time to time. Such new versions
-will be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU Affero General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU Affero General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU Affero General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
- If your software can interact with users remotely through a computer
-network, you should also make sure that it provides a way for users to
-get its source. For example, if your program is a web application, its
-interface could display a "Source" link that leads users to an archive
-of the code. There are many ways you could offer source, and different
-solutions will be better for different programs; see section 13 for the
-specific requirements.
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU AGPL, see
-.
-
diff --git a/roles/alvaroaleman.freeipa-client/Makefile b/roles/alvaroaleman.freeipa-client/Makefile
deleted file mode 100644
index f619cb5..0000000
--- a/roles/alvaroaleman.freeipa-client/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-LOGFILE = /tmp/ansible-freeipaclient-logfile
-
-default: test
-
-clean:
- - vagrant destroy -f
-
-box: clean
- # Start vagrant box
- vagrant up --no-provision --provider $(ANSIBLE_FREEIPACLIENT_VAGRANT_PROVIDER)
-
-checkmode:
- # Test checkmode
- ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_CHECKMODE=1 vagrant provision
-
-provision:
- # Test role
- vagrant provision
-
-idempotence:
- # Idempotence test
- vagrant provision | tee $(LOGFILE) | grep 'changed=0.*failed=0' || (cat $(LOGFILE) && false)
-
-
-test: box checkmode provision idempotence
- make checkmode
diff --git a/roles/alvaroaleman.freeipa-client/README.md b/roles/alvaroaleman.freeipa-client/README.md
deleted file mode 100644
index 9e4ce9b..0000000
--- a/roles/alvaroaleman.freeipa-client/README.md
+++ /dev/null
@@ -1,71 +0,0 @@
-# ansible-freeipa-client
-
-## Synopsis
-
-```yaml
-- hosts: all
- vars:
- freeipaclient_server: ipa.demo1.freeipa.org
- freeipaclient_domain: ipa.demo1.freeipa.org
- freeipaclient_enroll_user: admin
- freeipaclient_enroll_pass: Secret123
- roles:
- - alvaroaleman.freeipa-client
-```
-
-## Description
-
-This role allows to join clients to an ipa domain
-
-## Requirements
-
-* CentOS 7
-* Red Hat Enterprise Linux 7
-* Fedora 24
-* Ubuntu Trusty
-* Ubuntu Xenial
-* Ubuntu Bionic
-
-## Role Variables
-
-* ``freeipaclient_server``: IP/Hostname of IPA server to use (string, mandatory)
-* ``freeipaclient_domain``: Domain to use (string, mandatory)
-* ``freeipaclient_enroll_user``: Username to enroll host in domain (string, mandatory)
-* ``freeipaclient_enroll_pass``: Password to enroll host in domain (string, mandatory)
-* ``freeipaclient_hostname``: The hostname to use for the client (string, default: output of ``uname -n``)
-* ``freeipaclient_dns_server``: DNS server to configure. This will not do anything if variable is empty (string)
-* ``freeipaclient_force_join``: Whether to overwrite an already existing host entry of requested name (boolean, default: ``false``)
-* ``freeipaclient_enable_ntp``: Whether to enable ntp. Kerberos won't work if the time of master and client drift too much (boolean, default: ``true``)
-* ``freeipaclient_all_ip_addresses``: Whether to add all routable ip addresses to DNS (boolean, default: ``true if not Trusty, else false``)
-
-
-## License
-
-GNU AFFERO GENERAL PUBLIC LICENSE Version 3
-
-## Contributing
-
-If you want to contribute to this repository please be aware that this
-project uses a [gitflow](http://nvie.com/posts/a-successful-git-branching-model/)
-workflow with the next release branch called ``next``.
-
-Please fork this repository and create a local branch split off of the ``next``
-branch and create pull requests back to the origin ``next`` branch.
-
-## Integration testing
-
-This role provides integration tests using Vagrant:
-
-```bash
-cp envvars-vagrant.sample envvars
-EDITOR=vim
-$EDITOR envvars
-source envvars
-make test
-```
-
-## Author information
-
-Alvaro Aleman
-
-
diff --git a/roles/alvaroaleman.freeipa-client/VERSION b/roles/alvaroaleman.freeipa-client/VERSION
deleted file mode 100644
index d0149fe..0000000
--- a/roles/alvaroaleman.freeipa-client/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-1.3.4
diff --git a/roles/alvaroaleman.freeipa-client/Vagrantfile b/roles/alvaroaleman.freeipa-client/Vagrantfile
deleted file mode 100644
index b177f28..0000000
--- a/roles/alvaroaleman.freeipa-client/Vagrantfile
+++ /dev/null
@@ -1,56 +0,0 @@
-# vim: set ft=ruby ts=2 sw=2 et:
-# -*- mode: ruby -*-
-
-
-VAGRANT_API_VERSION = '2'
-Vagrant.configure(VAGRANT_API_VERSION) do |config|
-
- if ENV['ANSIBLE_FREEIPACLIENT_VAGRANT_BOXNAME']
- config.vm.box = ENV['ANSIBLE_FREEIPACLIENT_VAGRANT_BOXNAME']
- else
- config.vm.box = 'ubuntu/trusty64'
- end
-
- config.vm.define :ansiblefreeipaclienttest do |d|
-
- d.vm.hostname = 'ansiblefreeipaclienttest'
- d.vm.synced_folder '.', '/vagrant', id: 'vagrant-root', disabled: true
-
- d.vm.provision :ansible do |ansible|
- ansible.playbook = 'tests/playbook.yml'
- ansible.tags = ENV['ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_TAGS']
- ansible.skip_tags = ENV['ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_SKIP_TAGS']
- ansible.verbose = ENV['ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_VERBOSE']
- if ENV['ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_CHECKMODE'] == '1'
- ansible.raw_arguments = '--check'
- end
- ansible.groups = {
- 'vagrant' => ['ansiblefreeipaclienttest']
- }
- ansible.limit = 'vagrant'
- ansible.raw_arguments = [
- '--diff'
- ]
- if ENV['ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_CHECKMODE'] == '1'
- ansible.raw_arguments << '--check'
- end
-
- ::File.directory?('.vagrant/provisioners/ansible/inventory/') do
- ansible.inventory_path = '.vagrant/provisioners/ansible/inventory/'
- end
-
- end
-
- d.vm.provider :virtualbox do |v|
- v.customize 'pre-boot', ['modifyvm', :id, '--nictype1', 'virtio']
- v.customize [ 'modifyvm', :id, '--name', 'ansiblefreeipaclienttest', '--memory', '512', '--cpus', '1' ]
- end
-
- d.vm.provider :libvirt do |lv|
- lv.memory = 1024
- lv.cpus = 2
- end
-
-
- end
-end
diff --git a/roles/alvaroaleman.freeipa-client/ansible.cfg b/roles/alvaroaleman.freeipa-client/ansible.cfg
deleted file mode 100644
index d59540a..0000000
--- a/roles/alvaroaleman.freeipa-client/ansible.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[defaults]
-roles_path = ../
-nocows = 1
-retry_files_enabled = False
diff --git a/roles/alvaroaleman.freeipa-client/defaults/main.yml b/roles/alvaroaleman.freeipa-client/defaults/main.yml
deleted file mode 100644
index 0ca9376..0000000
--- a/roles/alvaroaleman.freeipa-client/defaults/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# defaults file for ansible-freeipa-client
-freeipaclient_force_join: false
-freeipaclient_enable_ntp: true
-freeipaclient_all_ip_addresses: "{{ true if ansible_distribution_release != 'trusty' else false }}"
-freeipaclient_enable_dns_updates: true
diff --git a/roles/alvaroaleman.freeipa-client/envvars-vagrant.sample b/roles/alvaroaleman.freeipa-client/envvars-vagrant.sample
deleted file mode 100644
index 9ea663a..0000000
--- a/roles/alvaroaleman.freeipa-client/envvars-vagrant.sample
+++ /dev/null
@@ -1,9 +0,0 @@
-# General settings for Vagrant
-export ANSIBLE_FREEIPACLIENT_VAGRANT_BOXNAME=centos/7 # name of the vagrant box to use for testing
-export ANSIBLE_FREEIPACLIENT_VAGRANT_PROVIDER=virtualbox # name of the vagrant provider to use for testing
-#export ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_TAGS= # Multiple tags can be specified comma seperated
-unset ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_TAGS # An empty tags variable leads to an error
-#export ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_SKIP_TAGS= # Multiple tags can be specified comma seperated
-unset ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_SKIP_TAGS # An empty skip_tags variable leads to an error
-export ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_VERBOSE= # May contain one to four 'v's
-export ANSIBLE_FREEIPACLIENT_VAGRANT_ANSIBLE_CHECKMODE= # 1 to enable, any other value to disable
diff --git a/roles/alvaroaleman.freeipa-client/handlers/main.yml b/roles/alvaroaleman.freeipa-client/handlers/main.yml
deleted file mode 100644
index 71327e4..0000000
--- a/roles/alvaroaleman.freeipa-client/handlers/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-# handlers file for ansible-freeipa-client
-#
-- name: restart sssd
- tags:
- - sssd
- - freeipaclient
- become: true
- service:
- name=sssd
- state=restarted
-
-- name: restart ssh
- tags:
- - sssd
- - freeipaclient
- become: true
- service:
- name=ssh
- state=restarted
diff --git a/roles/alvaroaleman.freeipa-client/meta/.galaxy_install_info b/roles/alvaroaleman.freeipa-client/meta/.galaxy_install_info
deleted file mode 100644
index 3b45e37..0000000
--- a/roles/alvaroaleman.freeipa-client/meta/.galaxy_install_info
+++ /dev/null
@@ -1 +0,0 @@
-{install_date: 'Tue Nov 20 16:00:28 2018', version: 1.3.4}
diff --git a/roles/alvaroaleman.freeipa-client/meta/main.yml b/roles/alvaroaleman.freeipa-client/meta/main.yml
deleted file mode 100644
index f7c4604..0000000
--- a/roles/alvaroaleman.freeipa-client/meta/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-galaxy_info:
- author: Alvaro Aleman
- description: A role to join clients to an IPA domain
- license: AGPLv3
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
- - name: Fedora
- versions:
- - 24
- - name: Ubuntu
- versions:
- - trusty
- - xenial
- categories:
- - system
- - identity
- - ldap
- - kerberos
-dependencies: []
diff --git a/roles/alvaroaleman.freeipa-client/scripts/release.sh b/roles/alvaroaleman.freeipa-client/scripts/release.sh
deleted file mode 100755
index 57d2c88..0000000
--- a/roles/alvaroaleman.freeipa-client/scripts/release.sh
+++ /dev/null
@@ -1,179 +0,0 @@
-#!/bin/bash
-
-##Always exit on $? -ne 0
-set -e
-##
-
-_cleanup()
-{
- rm $FILENAME_TMPCHANGELOG &>/dev/null
-}
-
-##### Create a release for a project managed with a Git repository #####
-
-
-
-### Functions ###
-
-# Confirm dialog for user interactions
-# Usage: confirm "Question?"
-# Returns: 0 = true, 1 = false
-confirm() {
- read -r -p "$1 [y/n] " response
- case $response in
- [yY][eE][sS]|[yY])
- return 0;;
- *)
- return 1;;
- esac
-}
-
-### Display usage message
-usage () {
- echo "Usage: $0 [-v VERSION] [-r REMOTE] -s 'One line summary of changes for this release'"
- exit 0
-}
-
-
-### Initialize the variables and settings ###
-#Variables
-RELEASE_VERSION=
-REMOTE=
-SHORT_SUMMARY=
-
-while getopts v:r:s: OPT; do
- case $OPT in
- v) RELEASE_VERSION="${OPTARG}";;
- r) REMOTE="${OPTARG}";;
- s) SHORT_SUMMARY="${OPTARG}";;
- esac
-done
-shift $(( $OPTIND - 1 ));
-
-if [ -z "${SHORT_SUMMARY}" ]; then
- usage && exit 0
-fi
-
-: ${REMOTE:=origin}
-
-FILENAME_TMPCHANGELOG="$( mktemp --suffix=ansible_role_release_changelog )"
-
-trap _cleanup ALRM HUP INT TERM EXIT
-
-# Fetch Git remote inormation
-git fetch $REMOTE --tags
-
-# Some information can be detected
-LAST_VERSION=$( git tag -l | tail -n 1 )
-REPOSITORY_NAME=$( git remote show -n $REMOTE | grep Fetch | sed 's#.*/##' | sed 's/\.git//' )
-EXISTING_TAGS=`git tag -l`
-
-# Detect version if not provided by user
-if [[ -z "${RELEASE_VERSION}" ]]; then
- RELEASE_VERSION=$(echo $LAST_VERSION|awk -F . '{print $1 "." $2 + 1 "." $3}')
-fi
-
-RELEASE_BRANCH="release/${RELEASE_VERSION}"
-# Check if there is already a tag named $RELEASE_VERSION
-## Temporary disabling exit on $? -ne 0 to be able to display error message
-set +e
-if [[ $EXISTING_TAGS =~ "$RELEASE_VERSION" ]]; then
- >&2 echo "A tag $RELEASE_VERSION already exists!"
- >&2 echo "Aborting.."
- exit 1
-fi
-set -e
-
-
-# Confirm or exit
-echo
-echo "Settings for the release to be created:"
-echo " Version of last release: ${LAST_VERSION}"
-echo " Version of new release: ${RELEASE_VERSION}"
-echo " Name of Git repository: ${REPOSITORY_NAME}"
-echo " Summary text for release: ${SHORT_SUMMARY}"
-if ! confirm "Continue with these settings?"; then
- exit 1
-fi
-
-
-
-### Perform release ###
-
-echo
-echo "Checkout and pull next branch"
-git checkout next
-git pull $REMOTE next
-
-echo
-echo "Checkout new release branch"
-git checkout -b ${RELEASE_BRANCH}
-
-echo
-echo "Write VERSION file"
-echo ${RELEASE_VERSION} > VERSION
-
-echo
-echo "Add release information to CHANGELOG.md file"
-
-cat CHANGELOG.md > $FILENAME_TMPCHANGELOG
-echo "# ${RELEASE_VERSION}" > CHANGELOG.md
-echo "" >> CHANGELOG.md
-
-if [[ -n ${LAST_VERSION} ]]; then
- git shortlog --no-merges next --not ${LAST_VERSION} | sed -e '/^[ \t]/s#^[ \t]*#* #' | perl -pe 's/:$/:\n/g' >> CHANGELOG.md
- cat $FILENAME_TMPCHANGELOG >> CHANGELOG.md
-else
- git shortlog --no-merges next | sed -e '/^[ \t]/s#^[ \t]*#* #' | perl -pe 's/:$/:\n/g' >> CHANGELOG.md
- cat $FILENAME_TMPCHANGELOG >> CHANGELOG.md
-fi
-echo "Please verify and adjust version information that was prepended to CHANGELOG.md file"
-echo "Diff looks like this:"
-echo
-echo '###### Diff start ######'
-
-## Disable exit on $? -ne 0 for the diff command, since it returns $? == 1 if a diff was found
-set +e
-diff -u $FILENAME_TMPCHANGELOG CHANGELOG.md
-set -e
-
-echo '###### Diff end ######'
-echo
-echo "In case this is not correct, press ctrl+z to pause this script, adjust CHANGELOG.md and get back using the fg command"
-while ! confirm "Continue?"; do
- echo "And now?"
-done
-
-echo
-echo "Commit generated release information"
-rm -f $FILENAME_TMPCHANGELOG
-git add VERSION
-git add CHANGELOG.md
-git commit -m "${REPOSITORY_NAME} ${RELEASE_VERSION}: ${SHORT_SUMMARY}"
-
-echo
-echo "Checkout and pull master branch"
-git checkout master
-git pull $REMOTE master
-
-echo
-echo "Merge release branch to master branch"
-git merge --no-ff --log --no-edit ${RELEASE_BRANCH}
-
-echo
-echo "Create release tag ${RELEASE_VERSION}"
-git tag -a ${RELEASE_VERSION} -m "${REPOSITORY_NAME} ${RELEASE_VERSION}: ${SHORT_SUMMARY}"
-
-echo
-echo "Merge release branch to next branch"
-git checkout next
-git merge ${RELEASE_BRANCH}
-
-echo
-echo "Delete release branch - it's obsolete now"
-git branch -d ${RELEASE_BRANCH}
-
-echo
-echo "Push all changes to remote repository"
-git push $REMOTE master next ${RELEASE_VERSION}
-exit 0
diff --git a/roles/alvaroaleman.freeipa-client/tasks/main.yml b/roles/alvaroaleman.freeipa-client/tasks/main.yml
deleted file mode 100644
index 7402119..0000000
--- a/roles/alvaroaleman.freeipa-client/tasks/main.yml
+++ /dev/null
@@ -1,95 +0,0 @@
----
-# tasks file for ansible-freeipa-client
-
-- name: Assert supported distribution
- tags:
- - assertion
- - freeipaclient
- assert:
- that:
- - ansible_distribution + '-' + ansible_distribution_major_version in freeipaclient_supported_distributions
-
-- name: Assert required variables
- tags:
- - assertion
- - freeipaclient
- assert:
- that:
- - freeipaclient_server is defined
- - freeipaclient_domain is defined
- - freeipaclient_enroll_user is defined
- - freeipaclient_enroll_pass is defined
-
-- name: Import variables
- tags:
- - import
- - freeipaclient
- include_vars: "{{ ansible_distribution }}.yml"
-
-- name: Set DNS server
- tags:
- - dns
- - freeipaclient
- become: true
- when: freeipaclient_dns_server is defined
- lineinfile:
- state: present
- line: "nameserver {{ freeipaclient_dns_server }}"
- dest: /etc/resolv.conf
-
-- name: Update apt cache
- tags:
- - packagemanagement
- - freeipaclient
- become: true
- when: ansible_pkg_mgr == 'apt'
- apt:
- update_cache: true
- cache_valid_time: 3600
-
-- name: Install required packages
- tags:
- - packagemanagement
- - freeipaclient
- become: true
- with_items: "{{ freeipaclient_packages }}"
- package:
- state: present
- name: "{{ item }}"
-
-- name: Check if host is enrolled
- tags:
- - enroll
- - freeipaclient
- register: freeipaclient_ipaconf
- check_mode: no
- stat:
- path: /etc/ipa/default.conf
-
-- name: Enroll host in domain
- tags:
- - enroll
- - freeipaclient
- become: true
- when: not freeipaclient_ipaconf.stat.exists
- command: >
- {{ freeipaclient_install_command }}
- {{'--hostname=' + freeipaclient_hostname if freeipaclient_hostname is defined else '--hostname=' + ansible_nodename }}
- --server={{ freeipaclient_server }}
- --domain={{ freeipaclient_domain }}
- --principal={{ freeipaclient_enroll_user }}
- --password={{ freeipaclient_enroll_pass }}
- --ssh-trust-dns
- --mkhomedir
- {{ '--enable-dns-updates' if freeipaclient_enable_dns_updates else ''}}
- --unattended
- {{ '--all-ip-addresses' if freeipaclient_all_ip_addresses else ''}}
- {{ '--no-ntp' if not freeipaclient_enable_ntp else ''}}
- {{ '--force-join' if freeipaclient_force_join else ''}}
-
-- name: Include Ubuntu specific tasks
- tags:
- - ubuntu
- - freeipaclient
- when: ansible_os_family == 'Debian'
- include: ubuntu.yml
diff --git a/roles/alvaroaleman.freeipa-client/tasks/ubuntu.yml b/roles/alvaroaleman.freeipa-client/tasks/ubuntu.yml
deleted file mode 100644
index 5385d63..0000000
--- a/roles/alvaroaleman.freeipa-client/tasks/ubuntu.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-- name: Enable mkhomedir
- tags:
- - ubuntu
- - mkhomedir
- - freeipaclient
- become: true
- lineinfile:
- dest: /etc/pam.d/common-session
- line: 'session required pam_mkhomedir.so skel=/etc/skel umask=0022'
-
-- name: Enable sssd sudo functionality
- tags:
- - ubuntu
- - sssd
- - freeipaclient
- become: true
- notify:
- - restart sssd
- - restart ssh
- lineinfile:
- dest: /etc/sssd/sssd.conf
- regexp: '^services.*'
- line: 'services = nss, pam, ssh, sudo'
diff --git a/roles/alvaroaleman.freeipa-client/tests/hosts b/roles/alvaroaleman.freeipa-client/tests/hosts
deleted file mode 100644
index 2fbb50c..0000000
--- a/roles/alvaroaleman.freeipa-client/tests/hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost
diff --git a/roles/alvaroaleman.freeipa-client/tests/playbook.yml b/roles/alvaroaleman.freeipa-client/tests/playbook.yml
deleted file mode 100644
index e01cc52..0000000
--- a/roles/alvaroaleman.freeipa-client/tests/playbook.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: all
- vars:
- freeipaclient_force_join: true
- freeipaclient_server: ipa.demo1.freeipa.org
- freeipaclient_domain: ipa.demo1.freeipa.org
- freeipaclient_enroll_user: admin
- freeipaclient_enroll_pass: Secret123
- freeipaclient_hostname: ansible-freeipa-client-test.demo1.freeipa.org
- roles:
- - ansible-freeipa-client
diff --git a/roles/alvaroaleman.freeipa-client/vars/CentOS.yml b/roles/alvaroaleman.freeipa-client/vars/CentOS.yml
deleted file mode 100644
index 25a9bfc..0000000
--- a/roles/alvaroaleman.freeipa-client/vars/CentOS.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-freeipaclient_install_command: '/sbin/ipa-client-install'
-freeipaclient_packages:
- - ipa-client
- - dbus-python
diff --git a/roles/alvaroaleman.freeipa-client/vars/Fedora.yml b/roles/alvaroaleman.freeipa-client/vars/Fedora.yml
deleted file mode 100644
index eb04b9a..0000000
--- a/roles/alvaroaleman.freeipa-client/vars/Fedora.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-freeipaclient_install_command: '/sbin/ipa-client-install'
-freeipaclient_packages:
- - freeipa-client
diff --git a/roles/alvaroaleman.freeipa-client/vars/RedHat.yml b/roles/alvaroaleman.freeipa-client/vars/RedHat.yml
deleted file mode 100644
index 25a9bfc..0000000
--- a/roles/alvaroaleman.freeipa-client/vars/RedHat.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-freeipaclient_install_command: '/sbin/ipa-client-install'
-freeipaclient_packages:
- - ipa-client
- - dbus-python
diff --git a/roles/alvaroaleman.freeipa-client/vars/Ubuntu.yml b/roles/alvaroaleman.freeipa-client/vars/Ubuntu.yml
deleted file mode 100644
index 1d4b507..0000000
--- a/roles/alvaroaleman.freeipa-client/vars/Ubuntu.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-freeipaclient_install_command: '/usr/sbin/ipa-client-install'
-freeipaclient_packages:
- - freeipa-client
- - dnsutils
diff --git a/roles/alvaroaleman.freeipa-client/vars/main.yml b/roles/alvaroaleman.freeipa-client/vars/main.yml
deleted file mode 100644
index ebcf316..0000000
--- a/roles/alvaroaleman.freeipa-client/vars/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# vars file for ansible-freeipa-client
-freeipaclient_supported_distributions:
- - Ubuntu-14
- - Ubuntu-16
- - Ubuntu-18
- - CentOS-7
- - RedHat-7
- - Fedora-24
diff --git a/roles/ansible-role-nodejs/.ansible-lint b/roles/ansible-role-nodejs/.ansible-lint
deleted file mode 100644
index 0af17d0..0000000
--- a/roles/ansible-role-nodejs/.ansible-lint
+++ /dev/null
@@ -1,3 +0,0 @@
-skip_list:
- - '405'
- - '204'
diff --git a/roles/ansible-role-nodejs/.gitignore b/roles/ansible-role-nodejs/.gitignore
deleted file mode 100644
index f56f5b5..0000000
--- a/roles/ansible-role-nodejs/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*.retry
-*/__pycache__
-*.pyc
diff --git a/roles/ansible-role-nodejs/.travis.yml b/roles/ansible-role-nodejs/.travis.yml
deleted file mode 100644
index 05cf095..0000000
--- a/roles/ansible-role-nodejs/.travis.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-language: python
-services: docker
-
-env:
- global:
- - ROLE_NAME: nodejs
- matrix:
- - MOLECULE_DISTRO: centos7
- - MOLECULE_DISTRO: centos6
- - MOLECULE_DISTRO: ubuntu1804
- - MOLECULE_DISTRO: ubuntu1604
- - MOLECULE_DISTRO: debian9
- - MOLECULE_DISTRO: debian8
-
- - MOLECULE_DISTRO: centos7
- MOLECULE_PLAYBOOK: playbook-latest.yml
-
-install:
- # Install test dependencies.
- - pip install molecule docker
-
-before_script:
- # Use actual Ansible Galaxy role name for the project directory.
- - cd ../
- - mv ansible-role-$ROLE_NAME geerlingguy.$ROLE_NAME
- - cd geerlingguy.$ROLE_NAME
-
-script:
- # Run tests.
- - molecule test
-
-notifications:
- webhooks: https://galaxy.ansible.com/api/v1/notifications/
diff --git a/roles/ansible-role-nodejs/LICENSE b/roles/ansible-role-nodejs/LICENSE
deleted file mode 100644
index 4275cf3..0000000
--- a/roles/ansible-role-nodejs/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2017 Jeff Geerling
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/roles/ansible-role-nodejs/README.md b/roles/ansible-role-nodejs/README.md
deleted file mode 100644
index d394354..0000000
--- a/roles/ansible-role-nodejs/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# Ansible Role: Node.js
-
-[](https://travis-ci.org/geerlingguy/ansible-role-nodejs)
-
-Installs Node.js on RHEL/CentOS or Debian/Ubuntu.
-
-## Requirements
-
-None.
-
-## Role Variables
-
-Available variables are listed below, along with default values (see `defaults/main.yml`):
-
- nodejs_version: "8.x"
-
-The Node.js version to install. "8.x" is the default and works on most supported OSes. Other versions such as "0.12", "4.x", "5.x", "6.x", "8.x", "10.x" etc. should work on the latest versions of Debian/Ubuntu and RHEL/CentOS.
-
- nodejs_install_npm_user: "{{ ansible_ssh_user }}"
-
-The user for whom the npm packages will be installed can be set here, this defaults to `ansible_user`.
-
- npm_config_prefix: "/usr/local/lib/npm"
-
-The global installation directory. This should be writeable by the `nodejs_install_npm_user`.
-
- npm_config_unsafe_perm: "false"
-
-Set to true to suppress the UID/GID switching when running package scripts. If set explicitly to false, then installing as a non-root user will fail.
-
- nodejs_npm_global_packages: []
-
-A list of npm packages with a `name` and (optional) `version` to be installed globally. For example:
-
- nodejs_npm_global_packages:
- # Install a specific version of a package.
- - name: jslint
- version: 0.9.3
- # Install the latest stable release of a package.
- - name: node-sass
- # This shorthand syntax also works (same as previous example).
- - node-sass
-
-
- nodejs_package_json_path: ""
-
-Set a path pointing to a particular `package.json` (e.g. `"/var/www/app/package.json"`). This will install all of the defined packages globally using Ansible's `npm` module.
-
-## Dependencies
-
-None.
-
-## Example Playbook
-
- - hosts: utility
- vars_files:
- - vars/main.yml
- roles:
- - geerlingguy.nodejs
-
-*Inside `vars/main.yml`*:
-
- nodejs_npm_global_packages:
- - name: jslint
- - name: node-sass
-
-## License
-
-MIT / BSD
-
-## Author Information
-
-This role was created in 2014 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
diff --git a/roles/ansible-role-nodejs/defaults/main.yml b/roles/ansible-role-nodejs/defaults/main.yml
deleted file mode 100644
index 7b76032..0000000
--- a/roles/ansible-role-nodejs/defaults/main.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-# Set the version of Node.js to install ("6.x", "8.x", "10.x", "11.x", etc.).
-# Version numbers from Nodesource: https://github.com/nodesource/distributions
-nodejs_version: "10.x"
-
-# The user for whom the npm packages will be installed.
-# nodejs_install_npm_user: username
-
-# The directory for global installations.
-npm_config_prefix: "/usr/local/lib/npm"
-
-# Set to true to suppress the UID/GID switching when running package scripts. If
-# set explicitly to false, then installing as a non-root user will fail.
-npm_config_unsafe_perm: "false"
-
-# Define a list of global packages to be installed with NPM.
-nodejs_npm_global_packages: []
-# # Install a specific version of a package.
-# - name: jslint
-# version: 0.9.3
-# # Install the latest stable release of a package.
-# - name: node-sass
-# # This shorthand syntax also works (same as previous example).
-# - node-sass
-
-# The path of a package.json file used to install packages globally.
-nodejs_package_json_path: ""
diff --git a/roles/ansible-role-nodejs/meta/.galaxy_install_info b/roles/ansible-role-nodejs/meta/.galaxy_install_info
deleted file mode 100644
index c122d85..0000000
--- a/roles/ansible-role-nodejs/meta/.galaxy_install_info
+++ /dev/null
@@ -1,2 +0,0 @@
-install_date: Fri Apr 3 19:21:41 2020
-version: halkeye-patch-1
diff --git a/roles/ansible-role-nodejs/meta/main.yml b/roles/ansible-role-nodejs/meta/main.yml
deleted file mode 100644
index 24dc3ee..0000000
--- a/roles/ansible-role-nodejs/meta/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-dependencies: []
-
-galaxy_info:
- author: geerlingguy
- description: Node.js installation for Linux
- company: "Midwestern Mac, LLC"
- license: "license (BSD, MIT)"
- min_ansible_version: 2.4
- platforms:
- - name: EL
- versions:
- - 6
- - 7
- - name: Debian
- versions:
- - all
- - name: Ubuntu
- versions:
- - trusty
- - xenial
- - bionic
- galaxy_tags:
- - development
- - web
- - javascript
- - js
- - node
- - npm
- - nodejs
diff --git a/roles/ansible-role-nodejs/molecule/default/molecule.yml b/roles/ansible-role-nodejs/molecule/default/molecule.yml
deleted file mode 100644
index 2ca6fea..0000000
--- a/roles/ansible-role-nodejs/molecule/default/molecule.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-dependency:
- name: galaxy
-driver:
- name: docker
-lint:
- name: yamllint
- options:
- config-file: molecule/default/yaml-lint.yml
-platforms:
- - name: instance
- image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest"
- command: ${MOLECULE_DOCKER_COMMAND:-""}
- volumes:
- - /sys/fs/cgroup:/sys/fs/cgroup:ro
- privileged: true
- pre_build_image: true
-provisioner:
- name: ansible
- lint:
- name: ansible-lint
- playbooks:
- converge: ${MOLECULE_PLAYBOOK:-playbook.yml}
-scenario:
- name: default
-verifier:
- name: testinfra
- lint:
- name: flake8
diff --git a/roles/ansible-role-nodejs/molecule/default/playbook-latest.yml b/roles/ansible-role-nodejs/molecule/default/playbook-latest.yml
deleted file mode 100644
index 7fca428..0000000
--- a/roles/ansible-role-nodejs/molecule/default/playbook-latest.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Converge
- hosts: all
- become: true
-
- vars:
- nodejs_version: "11.x"
- nodejs_install_npm_user: root
- npm_config_prefix: /root/.npm-global
- npm_config_unsafe_perm: "true"
- nodejs_npm_global_packages:
- - node-sass
- - name: jslint
- version: 0.12.0
- - name: yo
-
- pre_tasks:
- - name: Update apt cache.
- apt: update_cache=true cache_valid_time=600
- when: ansible_os_family == 'Debian'
-
- roles:
- - role: geerlingguy.nodejs
diff --git a/roles/ansible-role-nodejs/molecule/default/playbook.yml b/roles/ansible-role-nodejs/molecule/default/playbook.yml
deleted file mode 100644
index d1d5863..0000000
--- a/roles/ansible-role-nodejs/molecule/default/playbook.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Converge
- hosts: all
- become: true
-
- vars:
- nodejs_install_npm_user: root
- npm_config_prefix: /root/.npm-global
- npm_config_unsafe_perm: "true"
- nodejs_npm_global_packages:
- - node-sass
- - name: jslint
- version: 0.12.0
- - name: yo
-
- pre_tasks:
- - name: Update apt cache.
- apt: update_cache=true cache_valid_time=600
- when: ansible_os_family == 'Debian'
-
- roles:
- - role: geerlingguy.nodejs
diff --git a/roles/ansible-role-nodejs/molecule/default/tests/test_default.py b/roles/ansible-role-nodejs/molecule/default/tests/test_default.py
deleted file mode 100644
index eedd64a..0000000
--- a/roles/ansible-role-nodejs/molecule/default/tests/test_default.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
- os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
-
-
-def test_hosts_file(host):
- f = host.file('/etc/hosts')
-
- assert f.exists
- assert f.user == 'root'
- assert f.group == 'root'
diff --git a/roles/ansible-role-nodejs/molecule/default/yaml-lint.yml b/roles/ansible-role-nodejs/molecule/default/yaml-lint.yml
deleted file mode 100644
index 76d1459..0000000
--- a/roles/ansible-role-nodejs/molecule/default/yaml-lint.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-extends: default
-rules:
- line-length:
- max: 220
- level: warning
diff --git a/roles/ansible-role-nodejs/tasks/main.yml b/roles/ansible-role-nodejs/tasks/main.yml
deleted file mode 100644
index 5622c35..0000000
--- a/roles/ansible-role-nodejs/tasks/main.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-- import_tasks: setup-RedHat.yml
- when: ansible_os_family == 'RedHat'
-
-- import_tasks: setup-Debian.yml
- when: ansible_os_family == 'Debian'
-
-- name: Define nodejs_install_npm_user
- set_fact:
- nodejs_install_npm_user: "{{ ansible_user | default(lookup('env', 'USER')) }}"
- when: nodejs_install_npm_user is not defined
-
-- name: Create npm global directory
- file:
- path: "{{ npm_config_prefix }}"
- owner: "{{ nodejs_install_npm_user }}"
- group: "{{ nodejs_install_npm_user }}"
- state: directory
-
-- name: Add npm_config_prefix bin directory to global $PATH.
- template:
- src: npm.sh.j2
- dest: /etc/profile.d/npm.sh
- mode: 0644
-
-- name: Ensure npm global packages are installed.
- npm:
- name: "{{ item.name | default(item) }}"
- version: "{{ item.version | default('latest') }}"
- global: true
- state: latest
- environment:
- NPM_CONFIG_PREFIX: "{{ npm_config_prefix }}"
- NODE_PATH: "{{ npm_config_prefix }}/lib/node_modules"
- NPM_CONFIG_UNSAFE_PERM: "{{ npm_config_unsafe_perm }}"
- with_items: "{{ nodejs_npm_global_packages }}"
- tags: ['skip_ansible_lint']
-
-- name: Install packages defined in a given package.json.
- npm:
- path: "{{ nodejs_package_json_path }}"
- when: nodejs_package_json_path is defined and nodejs_package_json_path
diff --git a/roles/ansible-role-nodejs/tasks/setup-Debian.yml b/roles/ansible-role-nodejs/tasks/setup-Debian.yml
deleted file mode 100644
index c939617..0000000
--- a/roles/ansible-role-nodejs/tasks/setup-Debian.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Ensure apt-transport-https is installed.
- apt: name=apt-transport-https state=present
-
-- name: Add Nodesource apt key.
- apt_key:
- url: https://keyserver.ubuntu.com/pks/lookup?op=get&fingerprint=on&search=0x1655A0AB68576280
- id: "68576280"
- state: present
-
-- name: Add NodeSource repositories for Node.js.
- apt_repository:
- repo: "{{ item }}"
- state: present
- with_items:
- - "deb https://deb.nodesource.com/node_{{ nodejs_version }} {{ ansible_distribution_release }} main"
- - "deb-src https://deb.nodesource.com/node_{{ nodejs_version }} {{ ansible_distribution_release }} main"
- register: node_repo
-
-- name: Update apt cache if repo was added.
- apt: update_cache=yes
- when: node_repo.changed
-
-- name: Ensure Node.js and npm are installed.
- apt: "name=nodejs={{ nodejs_version|regex_replace('x', '') }}* state=present"
diff --git a/roles/ansible-role-nodejs/tasks/setup-RedHat.yml b/roles/ansible-role-nodejs/tasks/setup-RedHat.yml
deleted file mode 100644
index 928e0c6..0000000
--- a/roles/ansible-role-nodejs/tasks/setup-RedHat.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Set up the Nodesource RPM directory for Node.js > 0.10.
- set_fact:
- nodejs_rhel_rpm_dir: "pub_{{ nodejs_version }}"
- when: nodejs_version != '0.10'
-
-- name: Set up the Nodesource RPM variable for Node.js == 0.10.
- set_fact:
- nodejs_rhel_rpm_dir: "pub"
- when: nodejs_version == '0.10'
-
-- name: Import Nodesource RPM key (CentOS < 7).
- rpm_key:
- key: http://rpm.nodesource.com/pub/el/NODESOURCE-GPG-SIGNING-KEY-EL
- state: present
- when: ansible_distribution_major_version|int < 7
-
-- name: Import Nodesource RPM key (CentOS 7+)..
- rpm_key:
- key: https://rpm.nodesource.com/pub/el/NODESOURCE-GPG-SIGNING-KEY-EL
- state: present
- when: ansible_distribution_major_version|int >= 7
-
-- name: Add Nodesource repositories for Node.js (CentOS < 7).
- yum:
- name: "http://rpm.nodesource.com/{{ nodejs_rhel_rpm_dir }}/el/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}/nodesource-release-el{{ ansible_distribution_major_version }}-1.noarch.rpm"
- state: present
- when: ansible_distribution_major_version|int < 7
-
-- name: Add Nodesource repositories for Node.js (CentOS 7+).
- yum:
- name: "https://rpm.nodesource.com/{{ nodejs_rhel_rpm_dir }}/el/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}/nodesource-release-el{{ ansible_distribution_major_version }}-1.noarch.rpm"
- state: present
- when: ansible_distribution_major_version|int >= 7
-
-- name: Ensure Node.js and npm are installed.
- yum: "name=nodejs state=present enablerepo='nodesource'"
diff --git a/roles/ansible-role-nodejs/templates/npm.sh.j2 b/roles/ansible-role-nodejs/templates/npm.sh.j2
deleted file mode 100644
index 67caa78..0000000
--- a/roles/ansible-role-nodejs/templates/npm.sh.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-export PATH={{ npm_config_prefix }}/bin:$PATH
-export NPM_CONFIG_PREFIX={{ npm_config_prefix }}
-export NODE_PATH=$NODE_PATH:{{ npm_config_prefix }}/lib/node_modules
diff --git a/roles/bertvv.bind/.gitignore b/roles/bertvv.bind/.gitignore
deleted file mode 100644
index b433c0a..0000000
--- a/roles/bertvv.bind/.gitignore
+++ /dev/null
@@ -1,19 +0,0 @@
-# .gitignore
-
-# Hidden Vagrant-directory
-.vagrant
-
-# Backup files (e.g. Vim, Gedit, etc.)
-*~
-
-# Vagrant base boxes (you never know when someone puts one in the repository)
-*.box
-
-# Python artefacts
-.ropeproject
-*.pyc
-
-# Ignore test directory
-tests/
-vagrant-tests/
-docker-tests/
diff --git a/roles/bertvv.bind/.travis.yml b/roles/bertvv.bind/.travis.yml
deleted file mode 100644
index 05a9d32..0000000
--- a/roles/bertvv.bind/.travis.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-language: python
-
-# Use the new container infrastructure
-sudo: required
-
-env:
- global:
- - ROLE_NAME: bind
- matrix:
- - MOLECULE_DISTRO: centos7
- - MOLECULE_DISTRO: centos8
- - MOLECULE_DISTRO: debian8
- - MOLECULE_DISTRO: debian9
- - MOLECULE_DISTRO: debian10
- - MOLECULE_DISTRO: ubuntu1604
- - MOLECULE_DISTRO: ubuntu1804
- - MOLECULE_DISTRO: ubuntu2004
-
-# Enable docker support
-services:
- - docker
-
-install:
- - sudo apt-get update
- - sudo apt-get install bats curl dnsutils
- # Install dependencies for Molecule test
- - python3 -m pip install molecule yamllint ansible-lint docker netaddr
- # Check ansible and molecule version
- - ansible --version
- - molecule --version
-
- # Create ansible.cfg with correct roles_path
- - printf '[defaults]\nroles_path=../' >ansible.cfg
-
-before_script:
- # Renames ansible-role-bind to bertvv.bind to make it match with Ansible
- # Galaxy
- - cd ../
- - mv ansible-role-$ROLE_NAME bertvv.$ROLE_NAME
- - cd bertvv.$ROLE_NAME
-
-script:
- # Run molecule test
- - molecule test
-
-notifications:
- webhooks: https://galaxy.ansible.com/api/v1/notifications/
diff --git a/roles/bertvv.bind/.yamllint b/roles/bertvv.bind/.yamllint
deleted file mode 100644
index d19261c..0000000
--- a/roles/bertvv.bind/.yamllint
+++ /dev/null
@@ -1,56 +0,0 @@
----
-
-rules:
- braces:
- min-spaces-inside: 0
- max-spaces-inside: 0
- min-spaces-inside-empty: -1
- max-spaces-inside-empty: -1
- brackets:
- min-spaces-inside: 0
- max-spaces-inside: 0
- min-spaces-inside-empty: -1
- max-spaces-inside-empty: -1
- colons:
- max-spaces-before: 0
- max-spaces-after: 1
- commas:
- max-spaces-before: 0
- min-spaces-after: 1
- max-spaces-after: 1
- comments:
- level: warning
- require-starting-space: true
- min-spaces-from-content: 2
- comments-indentation:
- level: warning
- document-end: disable
- document-start:
- level: warning
- present: true
- empty-lines:
- max: 2
- max-start: 0
- max-end: 0
- empty-values:
- forbid-in-block-mappings: false
- forbid-in-flow-mappings: false
- hyphens:
- max-spaces-after: 1
- indentation:
- spaces: consistent
- indent-sequences: true
- check-multi-line-strings: false
- key-duplicates: enable
- key-ordering: disable
- line-length:
- max: 1000
- level: warning
- allow-non-breakable-words: true
- allow-non-breakable-inline-mappings: false
- new-line-at-end-of-file: enable
- new-lines:
- type: unix
- trailing-spaces: enable
- truthy:
- level: warning
diff --git a/roles/bertvv.bind/CHANGELOG.md b/roles/bertvv.bind/CHANGELOG.md
deleted file mode 100644
index 2a54ff4..0000000
--- a/roles/bertvv.bind/CHANGELOG.md
+++ /dev/null
@@ -1,250 +0,0 @@
-# Change log
-
-This file contains al notable changes to the bind Ansible role.
-
-This file adheres to the guidelines of [http://keepachangelog.com/](http://keepachangelog.com/). Versioning follows [Semantic Versioning](http://semver.org/). "GH-X" refers to the X'th issue/pull request on the Github project.
-
-## 4.2.0 - 2020-05-23
-
-An update that's been long overdue. Several PRs with new features were merged!
-
-A special thanks to @blofeldthefish for his willingness to help out with maintaining this role and to @RobinsOphalvens for contributing the new testing harness based on Molecule. Thanks to them, further development of this role got out of the deadlock it's been in since the previous version.
-
-## Added
-
-- New supported platforms
- - CentOS 8 (GH-107, credit: [Paulius Mazeika](https://github.com/pauliusm))
- - Debian 10 (no changes were needed)
- - FreeBSD (GH-100, credit: [psa](https://github.com/psa))
- - Ubuntu 20.04 LTS (no changes were needed)
-- (GH-69) Allow TTLs for individual records (credit: [Stuart Knight](https://github.com/blofeldthefish))
-- (GH-79) Added support for the SSHFP record type (credit: [Romuald](https://github.com/rds13))
-- (GH-81) Added support for the DNAME record type (credit: [B. Verschueren](https://github.com/bverschueren))
-- (GH-82) Added support for the NAPTR record type (credit: [Aido](https://github.com/aido))
-- (GH-83) Added support for the [`$GENERATE` directive](http://www.zytrax.com/books/dns/ch8/generate.html) (credit: [Rayford Johnson](https://github.com/rayfordj))
-- (GH-85) New role variable `bind_other_logs` (credit: [Paulo E. Castro](https://github.com/pecastro))
-- (GH-87) New role variable `bind_dns_keys`, a list of binding keys (credit: [Jérôme Avond](https://github.com/jadjay))
-- (GH-88) New role variable `bind_statistics_channels` (credit: [Stuart Knight](https://github.com/blofeldthefish))
-- (GH-105, GH-113) New role variable `bind_query_log`, with more flexibility w.r.t. query logging (credit: [Romuald](https://github.com/rds13) and [Jascha Sticher](https://github.com/itbane))
-- New keys in `bind_zone_domains`: `create_forward_zones` and `create_reverse_zones`. When present and set to false, they will prevent the creation of the forward or reverse zones, respectively. This results in a reverse only or forward only name server for that zone.
-
-## Changed
-
-- Molecule is now used as testing harness (credit: [Robin Ophalvens](https://github.com/RobinOphalvens)). The previous system was written before any standardised testing tools were available. It became too cumbersome to maintain, which had serious impact on the further development of this role.
-- (GH-75) Refactored hash gathering to determine if zone files need to be regenerated (credit: [Stuart Knight](https://github.com/blofeldthefish))
-- (GH-89) Add missing `allow-recursion` parameter for bind slaves, allowing them to handle recursion correctly (credit: [Lennart Weller](https://github.com/lhw))
-- (GH-91) Ensure the directory for cached slave zones is created (credit: [Otto Sabart](https://github.com/seberm))
-- (GH-99) Use `bind_group` variable instead of hard-coded value (credit: [Boris Momčilović](https://github.com/kornrunner))
-- (GH-114,115) Fix error with scenario in conjunction with a dhcp shared secret key to provide dynamic dns updates. (credit: [Fabio Rocha](https://github.com/frock81))
-
-## Removed
-
-- (GH-106) Removed DNSSEC Lookaside Validation, this service has been shut down
-
-## 4.1.0 - 2018-10-05
-
-## Added
-
-- (GH-53) Add variable `bind_zone_dir` and `bind_zone_file_mode` for setting the master zone file path and mode, and `bind_extra_include_files` for including arbitrary configuration files into named.conf. (credit: [Brad Durrow](https://github.com/bdurrow))
-- (GH-64) Add variable `bind_query_log` to enable query logging (credit: [Angel Barrera](https://github.com/angelbarrera92))
-
-## Changed
-
-- (GH-55) Fix issue with non-existing file when grepping domain (credit: [Tom Meinlschmidt](https://github.com/tmeinlschmidt))
-- (GH-57) Fix issue with forwarding in subdomain delegations (credit: [Stuart Knight](https://github.com/blofeldthefish))
-- (GH-66) Fix issue that causes playbook to fail when running in `--check` mode (credit: [Jörg Eichhorn](https://github.com/jeichhorn))
-- (GH-67) Improved documentation with minimal slave configuration (credit: [Christopher Hicks](https://github.com/chicks-net))
-- Add Ubuntu 18.04, Debian 8-9 and Arch Linux to list of supported distros.
-
-## 4.0.1 - 2018-05-21
-
-### Changed
-
-- (GH-52) Move all zone specific configuration options to `bind_zones` (credit: [Stuart Knight](https://github.com/blofeldthefish))
-
-## 4.0.0 - 2018-05-19
-
-### Added
-
-- (GH-50) Add support for multiple zones (credit: [Stuart Knight](https://github.com/blofeldthefish)). **This is a breaking change,** as it changes the syntax for specifying zones.
-- Allow out-of-zone name server records
-
-## 3.9.1 - 2018-04-22
-
-## Changed
-
-- Allow multi-line `ansible_managed` comment (credit: [Fazle Arefin](https://github.com/fazlearefin))
-- Fix the atrocious implementation of (GH-35)
-- Updated documentation for specifying hosts with multiple IP addresses
-- Create serial as UTC UNIX time (credit: [David J. Haines](https://github.com/dhaines))
-- Fix bugs, linter and deprecation warnings
-
-## 3.9.0 - 2017-11-21
-
-### Added
-
-- (GH-35) Role variable `bind_check_names`, which adds support for check-names (e.g. `check-names master ignore;`)
-- (GH-36) Role variable `bind_allow_recursion`, which adds support for allow-recursion (credit: [Loic Dachary](https://github.com/dachary))
-- (GH-39) Role variable `bind_zone_delegate`, which adds support for zone delegation / NS records (credit: [Loic Dachary](https://github.com/dachary))
-- (GH-40) Role variables `bind_dnssec_enable` and `bind_dnssec_validation`, which makes DNSSEC validation configurable (credit: [Guillaume Darmont](https://github.com/gdarmont)).
-
-### Changed
-
-- (GH-38) Only append domain to MX if it does not end with a dot (credit: [Loic Dachary](https://github.com/dachary))
-
-## 3.8.0 - 2017-07-12
-
-This release adds support for multiple TXT entries and fixes some bugs.
-
-### Added
-
-- (GH-31) Support for multiple TXT entries for the same name (credit: [Rafael Bodill](https://github.com/rafi))
-
-### Changed
-
-- (GH-31) Fixed ipv6 reverse zone hash calculation for complete idempotency (credit: [Stuart Knight](https://github.com/blofeldthefish))
-- (GH-32, GH-33) Fix for bug where CNAMEs and Multi-IP entries weren't working (credit: [Greg Cockburn](https://github.com/gergnz))
-
-## 3.7.1 - 2017-07-03
-
-### Changed
-
-- (GH-29) Zone files are fully idempotent, so are only changed when actual content changes (credit: [@Stuart Knight](https://github.com/blofeldthefish))
-
-## 3.7.0 - 2017-06-01
-
-### Added
-
-- (GH-10) Implement reverse IPv6 lookups
-- (GH-28) Add option `bind_forwarders` and `bind_forward_only`, which allows BIND to be set up as a caching name server.
-
-## 3.6.1 - 2017-06-01
-
-### Changed
-
-- Fixed a bug with generating the reverse zone names.
-
-## 3.6.0 - 2017-06-01
-
-### Changed
-
-- (GH-25) Allow slave log file to be set with variable `bind_log` instead of a hard coded value (credit @kartone).
-- The alignment of columns in the reverse zone file are improved
-
-### Added
-
-- (GH-22, 23) Documentation improvements
-- (GH-27) Allow dynamic updates (credit: @bverschueren)
-
-### Removed
-
-- The custom filter plugins were removed. The functionality has since been added to Ansible's built-in filter plugins. This does require `python-netaddr` to be installed on the management node.
-
-## 3.5.2 - 2016-09-29
-
-### Changed
-
-* The call to `named-checkconf` was fixed. It had the full path to the binary, which is not the same on all distributions. (GH-20, credit @peterjanes)
-
-## 3.5.1 - 2016-09-22
-
-### Changed
-
-* The check for master/slave server is improved (GH-19, credit @josetaas)
-
-## 3.5.0 - 2016-07-28
-
-### Added
-
-* Introduced role variable `bind_log`, the path to the log file.
-* Introduced role variable `bind_zone_also_notify`, a list of servers that will receive a notification when the master zone file is reloaded (GH-18, credit: Joanna Delaporte)
-* Reverse zone files now handle the case with only a single host (GH-18, credit: Joanna Delaporte)
-
-## 3.4.0 - 2016-05-26
-
-### Added
-
-* (GH-16) Support for service record (SRV) lookups
-* Support for text record (TXT) lookups
-
-### Changed
-
-* Fixed Ansible 2.0 deprecation warnings
-* Generating a serial is no longer considered a change
-* Ensured that all role variables have a default value, e.g. empty list instead of undefined. This simplifies template logic (no `if defined` tests), and is considered [deprecated in playbooks within a *with_* loop](https://docs.ansible.com/ansible/porting_guide_2.0.html#deprecated).
-
-## 3.3.1 - 2016-04-08
-
-### Removed
-
-* The `version:` field in `meta/main.yml`. This an unofficial field that is used by a third-party tool for managing role dependencies (librarian-ansible). Custom meta fields are no longer accepted in Ansible 2.0. See [ansible/ansible#13496](https://github.com/ansible/ansible/issues/13496) for more info. Unfortunately, this will break support for librarian-ansible. As a workaround, until this issue is resolved upstream, use version 3.3.0 of this role.
-
-## 3.3.0 - 2016-04-08
-
-### Added
-
-* Added role variable `bind_other_name_servers` for adding NS records for DNS servers outside of the domain. (GH-12)
-* Re-added `bind_recursion`, as it is needed in some cases. (GH-14)
-
-### Removed
-
-## 3.2.1 - 2015-12-15
-
-### Added
-
-* The domain name can now also point to an IP address, enabling e.g. "http://example.com/" (GH-11)
-
-## 3.2.0 - 2015-12-07
-
-### Added
-
-* Add support for multiple IP addresses per host (GH-9)
-* Allow setting `rrset-order` (for DNS round robin)
-* Add support for (multiple) IPv6 (AAAA) records (GH-2). For now, only forward lookups are supported.
-
-### Changed
-
-* Test code is put into a separate branch. This means that test code is no longer included when installing the role from Ansible Galaxy.
-
-## 3.1.0 - 2015-12-04
-
-### Added
-
-* Add support for zone transfers (GH-8)
-* Check whether `bind_zone_master_server_ip` was set (GH-7)
-
-### Removed
-
-* Role variable `bind_recursion` was removed. This role is explicitly only suitable for an authoritative DNS server, and in this case, recursion should be off.
-
-## 3.0.0 - 2015-06-14
-
-### Added
-
-* You can now set up a master and slave DNS server.
-* The variable `bind_zone_master_server_ip` was added. This is a **required** variable, which makes this release not backwards compatible.
-* Automated acceptance tests for the test playbook
-
-## 2.0.0 - 2015-06-10
-
-### Added
-
-* Added EL6 to supported platforms. Thanks to @rilindo for verifying this.
-
-### Changed
-
-* Recursion is turned off by default, which fits an authoritative name server. This change is not backwards compatible, as the behaviour of BIND is different from before when you do not set the variable `bind_recursion` explicitly.
-
-### Removed
-
-* Firewall settings. This should not be a concern of this role. Configuring the firewall is functionality offered by other roles (e.g. [bertvv.bind](https://github.com/bertvv/ansible-role-el7))
-
-## 1.0.0 - 2015-04-22
-
-First release!
-
-### Added
-
-- Functionality for master DNS server
-- Multiple reverse lookup zones
-
diff --git a/roles/bertvv.bind/LICENSE.md b/roles/bertvv.bind/LICENSE.md
deleted file mode 100644
index 8411892..0000000
--- a/roles/bertvv.bind/LICENSE.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# BSD License
-
-Copyright (c) 2014, Bert Van Vreckem, (bert.vanvreckem@gmail.com)
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/roles/bertvv.bind/README.md b/roles/bertvv.bind/README.md
deleted file mode 100644
index fd32d43..0000000
--- a/roles/bertvv.bind/README.md
+++ /dev/null
@@ -1,317 +0,0 @@
-# Ansible role `bind`
-
-[](https://travis-ci.org/bertvv/ansible-role-bind)
-
-An Ansible role for setting up BIND ISC as an **authoritative-only** DNS server for multiple domains. Specifically, the responsibilities of this role are to:
-
-- install BIND
-- set up the main configuration file
- - master server
- - slave server
-- set up forward and reverse lookup zone files
-
-This role supports multiple forward and reverse zones, including for IPv6. Although enabling recursion is supported (albeit *strongly* discouraged), consider using another role if you want to set up a caching or forwarding name server.
-
-Configuring the firewall is not a concern of this role, so you should do this using another role (e.g. [bertvv.rh-base](https://galaxy.ansible.com/bertvv/rh-base/)).
-
-If you like/use this role, please consider giving it a star and rating it on the role's [Ansible Galaxy page](https://galaxy.ansible.com/bertvv/bind). Thanks!
-
-See the [change log](CHANGELOG.md) for notable changes between versions.
-
-## Requirements
-
-- **The package `python-ipaddr` should be installed on the management node** (since v3.7.0)
-
-## Role Variables
-
-Variables are not required, unless specified.
-
-| Variable | Default | Comments (type) |
-| :--- | :--- | :--- |
-| `bind_acls` | `[]` | A list of ACL definitions, which are dicts with fields `name` and `match_list`. See below for an example. |
-| `bind_allow_query` | `['localhost']` | A list of hosts that are allowed to query this DNS server. Set to ['any'] to allow all hosts |
-| `bind_allow_recursion` | `['any']` | Similar to bind_allow_query, this option applies to recursive queries. |
-| `bind_check_names` | `[]` | Check host names for compliance with RFC 952 and RFC 1123 and take the defined action (e.g. `warn`, `ignore`, `fail`). |
-| `bind_dns_keys` | `[]` | A list of binding keys, which are dicts with fields `name` `algorithm` and `secret`. See below for an example. |
-| `bind_dnssec_enable` | `true` | Is DNSSEC enabled |
-| `bind_dnssec_validation` | `true` | Is DNSSEC validation enabled |
-| `bind_extra_include_files` | `[]` | |
-| `bind_forward_only` | `false` | If `true`, BIND is set up as a caching name server |
-| `bind_forwarders` | `[]` | A list of name servers to forward DNS requests to. |
-| `bind_listen_ipv4` | `['127.0.0.1']` | A list of the IPv4 address of the network interface(s) to listen on. Set to ['any'] to listen on all interfaces. |
-| `bind_listen_ipv6` | `['::1']` | A list of the IPv6 address of the network interface(s) to listen on |
-| `bind_log` | `data/named.run` | Path to the log file |
-| `bind_other_logs` | - | A list of logging channels to configure, with a separate dict for each domain, with relevant details |
-| `- allow_update` | `['none']` | A list of hosts that are allowed to dynamically update this DNS zone. |
-| `- also_notify` | - | A list of servers that will receive a notification when the master zone file is reloaded. |
-| `- delegate` | `[]` | Zone delegation. See below this table for examples. |
-| `bind_query_log` | - | A dict with fields `file` (e.g. `data/query.log`), `versions`, `size`, when defined this will turn on the query log |
-| `bind_recursion` | `false` | Determines whether requests for which the DNS server is not authoritative should be forwarded†. |
-| `bind_rrset_order` | `random` | Defines order for DNS round robin (either `random` or `cyclic`) |
-| `bind_statistcs_channels` | `false` | if `true`, BIND is configured with a statistics_channels clause (currently only supports a single inet) |
-| `bind_zone_dir` | - | When defined, sets a custom absolute path to the server directory (for zone files, etc.) instead of the default. |
-| `bind_zone_domains` | n/a | A list of domains to configure, with a separate dict for each domain, with relevant details |
-| `- allow_update` | `['none']` | A list of hosts that are allowed to dynamically update this DNS zone. |
-| `- also_notify` | - | A list of servers that will receive a notification when the master zone file is reloaded. |
-| `- create_forward_zones` | - | When initialized and set to `false`, creation of forward zones will be skipped (resulting in a reverse only zone) |
-| `- create_reverse_zones` | - | When initialized and set to `false`, creation of reverse zones will be skipped (resulting in a forward only zone) |
-| `- delegate` | `[]` | Zone delegation. See below this table for examples. |
-| `- hostmaster_email` | `hostmaster` | The e-mail address of the system administrator for the zone |
-| `- hosts` | `[]` | Host definitions. See below this table for examples. |
-| `- ipv6_networks` | `[]` | A list of the IPv6 networks that are part of the domain, in CIDR notation (e.g. 2001:db8::/48) |
-| `- mail_servers` | `[]` | A list of dicts (with fields `name` and `preference`) specifying the mail servers for this domain. |
-| `- name_servers` | `[ansible_hostname]` | A list of the DNS servers for this domain. |
-| `- name` | `example.com` | The domain name |
-| `- networks` | `['10.0.2']` | A list of the networks that are part of the domain |
-| `- other_name_servers` | `[]` | A list of the DNS servers outside of this domain. |
-| `- services` | `[]` | A list of services to be advertised by SRV records |
-| `- text` | `[]` | A list of dicts with fields `name` and `text`, specifying TXT records. `text` can be a list or string. |
-| `- naptr` | `[]` | A list of dicts with fields `name`, `order`, `pref`, `flags`, `service`, `regex` and `replacement` specifying NAPTR records. |
-| `bind_zone_file_mode` | 0640 | The file permissions for the main config file (named.conf) |
-| `bind_zone_master_server_ip` | - | **(Required)** The IP address of the master DNS server. |
-| `bind_zone_minimum_ttl` | `1D` | Minimum TTL field in the SOA record. |
-| `bind_zone_time_to_expire` | `1W` | Time to expire field in the SOA record. |
-| `bind_zone_time_to_refresh` | `1D` | Time to refresh field in the SOA record. |
-| `bind_zone_time_to_retry` | `1H` | Time to retry field in the SOA record. |
-| `bind_zone_ttl` | `1W` | Time to Live field in the SOA record. |
-
-† Best practice for an authoritative name server is to leave recursion turned off. However, [for some cases](http://www.zytrax.com/books/dns/ch7/queries.html#allow-query-cache) it may be necessary to have recursion turned on.
-
-### Minimal variables for a working zone
-
-Even though only variable `bind_zone_master_server_ip` is required for the role to run without errors, this is not sufficient to get a working zone. In order to set up an authoritative name server that is available to clients, you should also at least define the following variables:
-
-| Variable | Master | Slave |
-| :--- | :---: | :---: |
-| `bind_zone_domains` | V | V |
-| `- name` | V | V |
-| `- networks` | V | V |
-| `- name_servers` | V | -- |
-| `- hosts` | V | -- |
-| `bind_listen_ipv4` | V | V |
-| `bind_allow_query` | V | V |
-
-### Domain definitions
-
-```Yaml
-bind_zone_domains:
- - name: mydomain.com # Domain name
- create_reverse_zones: false # Skip creation of reverse zones
- hosts:
- - name: pub01
- ip: 192.0.2.1
- ipv6: 2001:db8::1
- aliases:
- - ns
- - name: '@' # Enables "http://mydomain.com/"
- ip:
- - 192.0.2.2 # Multiple IP addresses for a single host
- - 192.0.2.3 # results in DNS round robin
- sshfp: # Secure shell fingerprint
- - "3 1 1262006f9a45bb36b1aa14f45f354b694b77d7c3"
- - "3 2 e5921564252fe10d2dbafeb243733ed8b1d165b8fa6d5a0e29198e5793f0623b"
- ipv6:
- - 2001:db8::2
- - 2001:db8::3
- aliases:
- - www
- - name: priv01 # This IP is in another subnet, will result in
- ip: 10.0.0.1 # multiple reverse zones
- - name: mydomain.net.
- aliases:
- - name: sub01
- type: DNAME # Example of a DNAME alias record
- networks:
- - '192.0.2'
- - '10'
- - '172.16'
- delegate:
- - zone: foo
- dns: 192.0.2.1
- services:
- - name: _ldap._tcp
- weight: 100
- port: 88
- target: dc001
- naptr: # Name Authority Pointer record, used for IP
- - name: "sip" # telephony
- order: 100
- pref: 10
- flags: "S"
- service: "SIP+D2T"
- regex: "!^.*$!sip:customer-service@example.com!"
- replacement: "_sip._tcp.example.com."
-```
-
-### Minimal slave configuration
-
-```Yaml
- bind_listen_ipv4: ['any']
- bind_allow_query: ['any']
- bind_zone_master_server_ip: 192.168.111.222
- bind_zone_domains:
- - name: example.com
-```
-
-### Hosts
-
-Host names that this DNS server should resolve can be specified in `hosts` as a list of dicts with fields `name`, `ip`, `aliases` and `sshfp`. Aliases can be CNAME (default) or DNAME records.
-
-To allow to surf to `http://example.com/`, set the host name of your web server to `'@'` (must be quoted!). In BIND syntax, `@` indicates the domain name itself.
-
-If you want to specify multiple IP addresses for a host, add entries to `bind_zone_hosts` with the same name (e.g. `priv01` in the code snippet). This results in multiple A/AAAA records for that host and allows [DNS round robin](http://www.zytrax.com/books/dns/ch9/rr.html), a simple load balancing technique. The order in which the IP addresses are returned can be configured with role variable `bind_rrset_order`.
-
-### Networks
-
-As you can see, not all hosts are in the same network. This is perfectly acceptable, and supported by this role. All networks should be specified in `networks` (part of bind_zone_domains.name dict), though, or the host will not get a PTR record for reverse lookup:
-
-Remark that only the network part should be specified here! When specifying a class B IP address (e.g. "172.16") in a variable file, it must be quoted. Otherwise, the Yaml parser will interpret it as a float.
-
-Based on the idea and examples detailed at for the gdnsd package, the zonefiles are fully idempotent, and thus only get updated if "real" content changes.
-
-### Zone delgation
-
-To delegate a zone to a DNS, it is enough to create a `NS` record (under delegate) which is the equivalent of:
-
-```text
-foo IN NS 192.0.2.1
-```
-
-### Service records
-
-Service (SRV) records can be added with the services. This should be a list of dicts with mandatory fields `name` (service name), `target` (host providing the service), `port` (TCP/UDP port of the service) and optional fields `priority` (default = 0) and `weight` (default = 0).
-
-### ACLs
-
-ACLs can be defined like this:
-
-```Yaml
-bind_acls:
- - name: acl1
- match_list:
- - 192.0.2.0/24
- - 10.0.0.0/8
-```
-
-The names of the ACLs will be added to the `allow-transfer` clause in global options.
-
-### Binding Keys
-
-Binding keys can be defined like this:
-
-```Yaml
-bind_dns_keys:
- - name: master_key
- algorithm: hmac-sha256
- secret: "azertyAZERTY123456"
-bind_extra_include_files:
- - "{{ bind_auth_file }}"
-```
-
-**tip**: Extra include file must be set as an ansible variable because file is OS dependant
-
-This will be set in a file *"{{ bind_auth_file }}* (e.g. /etc/bind/auth_transfer.conf for debian) which have to be added in the list variable **bind_extra_include_files**
-
-## Dependencies
-
-No dependencies.
-
-## Example Playbook
-
-See the test playbook [converge.yml](molecule/default/converge.yml) for an elaborate example that showcases most features.
-
-## Testing
-
-This role is tested using [Ansible Molecule](https://molecule.readthedocs.io/). Tests are launched automatically on [Travis CI](https://travis-ci.org/bertvv/ansible-role-bind) after each commit and PR.
-
-This Molecule configuration will:
-
-- Run Yamllint and Ansible Lint
-- Create two Docker containers, one primary (`ns1`) and one secondary (`ns2`) DNS server
-- Run a syntax check
-- Apply the role with a [test playbook](molecule/default/converge.yml)
-- Run acceptance tests with [BATS](https://github.com/bats-core/bats-core/)
-
-This process is repeated for the supported Linux distributions.
-
-### Local test environment
-
-If you want to set up a local test environment, you can use this reproducible setup based on Vagrant+VirtualBox: . Steps to install the necessary tools manually:
-
-1. Docker and BATS should be installed on your machine (assumed to run Linux). No Docker containers should be running when you start the test.
-2. As recommended by Molecule, create a python virtual environment
-3. Install the software tools `python3 -m pip install molecule docker netaddr yamllint ansible-lint`
-4. Navigate to the root of the role directory and run `molecule test`
-
-Molecule automatically deletes the containers after a test. If you would like to check out the containers yourself, run `molecule converge` followed by `molecule login --host HOSTNAME`.
-
-The Docker containers are based on images created by [Jeff Geerling](https://hub.docker.com/u/geerlingguy), specifically for Ansible testing (look for images named `geerlingguy/docker-DISTRO-ansible`). You can use any of his images, but only the distributions mentioned in [meta/main.yml](meta/main.yml) are supported.
-
-The default config will start two Centos 7 containers (the primary supported platform at this time). Choose another distro by setting the `MOLECULE_DISTRO` variable with the command, e.g.:
-
-``` bash
-MOLECULE_DISTRO=debian9 molecule test
-```
-
-or
-
-``` bash
-MOLECULE_DISTRO=debian9 molecule converge
-```
-
-You can run the acceptance tests on both servers with `molecule verify` or manually with
-
-```console
-SUT_IP=172.17.0.2 bats molecule/default/files/dns.bats
-```
-
-You need to initialise the variable `SUT_IP`, the system under test's IP address. The primary server, `ns1`, should have IP address 172.17.0.2 and the secondary server, `ns2` 172.17.0.3.
-
-## License
-
-BSD
-
-## Contributors
-
-This role could only have been realized thanks to the contributions of many. If you have an idea to improve it even further, don't hesitate to pitch in!
-
-Issues, feature requests, ideas, suggestions, etc. can be posted in the Issues section.
-
-Pull requests are also very welcome. Please create a topic branch for your proposed changes. If you don't, this will create conflicts in your fork after the merge. Don't hesitate to add yourself to the contributor list below in your PR!
-
-Maintainers:
-
-- [Bert Van Vreckem](https://github.com/bertvv/)
-- [Stuart Knight](https://github.com/blofeldthefish)
-
-Contributors:
-
-- [Aido](https://github.com/aido)
-- [Angel Barrera](https://github.com/angelbarrera92)
-- [B. Verschueren](https://github.com/bverschueren)
-- [Boris Momčilović](https://github.com/kornrunner)
-- [Brad Durrow](https://github.com/bdurrow)
-- [Christopher Hicks](http://www.chicks.net/)
-- [David J. Haines](https://github.com/dhaines)
-- [Fabio Rocha](https://github.com/frock81)
-- [Fazle Arefin](https://github.com/fazlearefin)
-- [Greg Cockburn](https://github.com/gergnz)
-- [Guillaume Darmont](https://github.com/gdarmont)
-- [jadjay](https://github.com/jadjay)
-- [Jascha Sticher](https://github.com/itbane)
-- [Joanna Delaporte](https://github.com/jdelaporte)
-- [Jörg Eichhorn](https://github.com/jeichhorn)
-- [Jose Taas](https://github.com/josetaas)
-- [Lennart Weller](https://github.com/lhw)
-- [Loic Dachary](http://dachary.org)
-- [Mario Ciccarelli](https://github.com/kartone)
-- [Otto Sabart](https://github.com/seberm)
-- [Paulius Mazeika](https://github.com/pauliusm)
-- [Paulo E. Castro](https://github.com/pecastro)
-- [Peter Janes](https://github.com/peterjanes)
-- [psa](https://github.com/psa)
-- [Rafael Bodill](https://github.com/rafi)
-- [Rayford Johnson](https://github.com/rayfordj)
-- [Robin Ophalvens](https://github.com/RobinOphalvens)
-- [Romuald](https://github.com/rds13)
-- [Tom Meinlschmidt](https://github.com/tmeinlschmidt)
diff --git a/roles/bertvv.bind/defaults/main.yml b/roles/bertvv.bind/defaults/main.yml
deleted file mode 100644
index ac6aafc..0000000
--- a/roles/bertvv.bind/defaults/main.yml
+++ /dev/null
@@ -1,70 +0,0 @@
-# roles/bind/defaults/main.yml
----
-
-bind_log: "data/named.run"
-
-bind_zone_domains:
- - name: "example.com"
- hostmaster_email: "hostmaster"
- networks:
- - "10.0.2"
-
-# List of acls.
-bind_acls: []
-
-# Key binding for slaves
-bind_dns_keys: []
-# - name: master_key
-# algorithm: hmac-sha256
-# secret: "azertyAZERTY123456"
-
-# List of IPv4 address of the network interface(s) to listen on. Set to "any"
-# to listen on all interfaces
-bind_listen_ipv4:
- - "127.0.0.1"
-
-# List of IPv6 address of the network interface(s) to listen on.
-bind_listen_ipv6:
- - "::1"
-
-# List of hosts that are allowed to query this DNS server.
-bind_allow_query:
- - "localhost"
-
-# Determines whether recursion should be allowed. Typically, an authoritative
-# name server should have recursion turned OFF.
-bind_recursion: false
-bind_allow_recursion:
- - "any"
-
-# Allows BIND to be set up as a caching name server
-bind_forward_only: false
-
-# List of name servers to forward DNS requests to.
-bind_forwarders: []
-
-# DNS round robin order (random or cyclic)
-bind_rrset_order: "random"
-
-# statistics channels configuration
-bind_statistics_channels: false
-bind_statistics_port: 8053
-bind_statistics_host: 127.0.0.1
-bind_statistics_allow:
- - "127.0.0.1"
-
-# DNSSEC configuration
-bind_dnssec_enable: true
-bind_dnssec_validation: true
-
-bind_extra_include_files: []
-
-# SOA information
-bind_zone_ttl: "1W"
-bind_zone_time_to_refresh: "1D"
-bind_zone_time_to_retry: "1H"
-bind_zone_time_to_expire: "1W"
-bind_zone_minimum_ttl: "1D"
-
-# File mode for master zone files (needs to be something like 0660 for dynamic updates)
-bind_zone_file_mode: "0640"
diff --git a/roles/bertvv.bind/handlers/main.yml b/roles/bertvv.bind/handlers/main.yml
deleted file mode 100644
index 9acaaad..0000000
--- a/roles/bertvv.bind/handlers/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-# roles/bind/handlers/main.yml
----
-
-- name: reload bind
- service:
- name: "{{ bind_service }}"
- state: reloaded
diff --git a/roles/bertvv.bind/meta/.galaxy_install_info b/roles/bertvv.bind/meta/.galaxy_install_info
deleted file mode 100644
index 58fa52d..0000000
--- a/roles/bertvv.bind/meta/.galaxy_install_info
+++ /dev/null
@@ -1,2 +0,0 @@
-install_date: Sun Jun 28 14:49:10 2020
-version: v4.2.0
diff --git a/roles/bertvv.bind/meta/main.yml b/roles/bertvv.bind/meta/main.yml
deleted file mode 100644
index abfa20f..0000000
--- a/roles/bertvv.bind/meta/main.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-galaxy_info:
- author: Bert Van Vreckem
- description: >
- Sets up ISC BIND as an authoritative DNS server for one or more domains
- (primary and/or secondary).
- license: BSD
- min_ansible_version: 2.7
- platforms:
- - name: ArchLinux
- versions:
- - any
- - name: Debian
- versions:
- - jessie
- - stretch
- - buster
- - name: FreeBSD
- versions:
- - any
- - name: EL
- versions:
- - 7
- - 8
- - name: Ubuntu
- versions:
- - xenial
- - bionic
- - focal
- galaxy_tags:
- - dns
- - networking
- - system
-dependencies: []
diff --git a/roles/bertvv.bind/molecule/default/converge.yml b/roles/bertvv.bind/molecule/default/converge.yml
deleted file mode 100644
index dcc118a..0000000
--- a/roles/bertvv.bind/molecule/default/converge.yml
+++ /dev/null
@@ -1,117 +0,0 @@
----
-- name: Converge
- hosts: all
- vars:
- bind_zone_dir: /var/local/named-zones
- bind_zone_file_mode: '0660'
- bind_allow_query:
- - any
- bind_listen_ipv4:
- - any
- bind_listen_ipv6:
- - any
- bind_acls:
- - name: acl1
- match_list:
- - 172.17.0.0/16
- bind_forwarders:
- - '8.8.8.8'
- - '8.8.4.4'
- bind_recursion: true
- bind_query_log: 'data/query.log'
- bind_check_names: 'master ignore'
- bind_zone_master_server_ip: 172.17.0.2
- bind_zone_minimum_ttl: "2D"
- bind_zone_ttl: "2W"
- bind_zone_time_to_refresh: "2D"
- bind_zone_time_to_retry: "2H"
- bind_zone_time_to_expire: "2W"
- bind_zone_domains:
- - name: 'example.com'
- networks:
- - '192.0.2'
- ipv6_networks:
- - '2001:db9::/48'
- name_servers:
- - ns1.acme-inc.com.
- - ns2.acme-inc.com.
- hostmaster_email: admin
- hosts:
- - name: srv001
- ip: 192.0.2.1
- ipv6: '2001:db9::1'
- aliases:
- - www
- - name: srv002
- ip: 192.0.2.2
- ipv6: '2001:db9::2'
- - name: mail001
- ip: 192.0.2.10
- ipv6: '2001:db9::3'
- mail_servers:
- - name: mail001
- preference: 10
- - name: 'acme-inc.com'
- networks:
- - '172.17'
- - '10'
- ipv6_networks:
- - '2001:db8::/48'
- name_servers:
- - ns1
- - ns2
- hosts:
- - name: ns1
- ip: 172.17.0.2
- - name: ns2
- ip: 172.17.0.3
- - name: srv001
- ip: 172.17.1.1
- ipv6: 2001:db8::1
- aliases:
- - www
- - name: srv002
- ip: 172.17.1.2
- ipv6: 2001:db8::2
- aliases:
- - mysql
- - name: mail001
- ip: 172.17.2.1
- ipv6: 2001:db8::d:1
- aliases:
- - smtp
- - mail-in
- - name: mail002
- ip: 172.17.2.2
- ipv6: 2001:db8::d:2
- - name: mail003
- ip: 172.17.2.3
- ipv6: 2001:db8::d:3
- aliases:
- - imap
- - mail-out
- - name: srv010
- ip: 10.0.0.10
- - name: srv011
- ip: 10.0.0.11
- - name: srv012
- ip: 10.0.0.12
- mail_servers:
- - name: mail001
- preference: 10
- - name: mail002
- preference: 20
- services:
- - name: _ldap._tcp
- weight: 100
- port: 88
- target: srv010
- text:
- - name: _kerberos
- text: KERBEROS.ACME-INC.COM
- - name: '@'
- text:
- - 'some text'
- - 'more text'
- roles:
- - role: bertvv.bind
diff --git a/roles/bertvv.bind/molecule/default/files/dns.bats b/roles/bertvv.bind/molecule/default/files/dns.bats
deleted file mode 100644
index 7c98ff4..0000000
--- a/roles/bertvv.bind/molecule/default/files/dns.bats
+++ /dev/null
@@ -1,263 +0,0 @@
-#! /usr/bin/env bats
-#
-# Functional tests for a DNS server set up as a test case for Ansible role
-# bertvv.bind
-#
-# The variable SUT_IP, the IP address of the System Under Test must be set
-# outside of the script.
-
-#{{{ Helper functions
-
-# Usage: assert_forward_lookup NAME DOMAIN IP
-# Exits with status 0 if NAME.DOMAIN resolves to IP, a nonzero
-# status otherwise
-assert_forward_lookup() {
- local name="$1"
- local domain="$2"
- local ip="$3"
-
- local result
- result=$(dig @"${SUT_IP}" "${name}.${domain}" +short)
-
- echo "Expected: ${ip}"
- echo "Actual : ${result}"
- [ "${ip}" = "${result}" ]
-}
-
-# Usage: assert_forward_ipv6_lookup NAME DOMAIN IP
-assert_forward_ipv6_lookup() {
- local name="${1}"
- local domain="${2}"
- local ip="${3}"
-
- local result
- result=$(dig @"${SUT_IP}" AAAA "${name}.${domain}" +short)
-
- echo "Expected: ${ip}"
- echo "Actual : ${result}"
- [ "${ip}" = "${result}" ]
-}
-
-# Usage: assert_reverse_lookup NAME DOMAIN IP
-# Exits with status 0 if a reverse lookup on IP resolves to NAME,
-# a nonzero status otherwise
-assert_reverse_lookup() {
- local name="$1"
- local domain="$2"
- local ip="$3"
-
- local expected="${name}.${domain}."
- local result
- result=$(dig @"${SUT_IP}" -x "${ip}" +short)
-
- echo "Expected: ${expected}"
- echo "Actual : ${result}"
- [ "${expected}" = "${result}" ]
-}
-
-# Usage: assert_alias_lookup ALIAS NAME DOMAIN IP
-# Exits with status 0 if a forward lookup on NAME resolves to the
-# host name NAME.DOMAIN and to IP, a nonzero status otherwise
-assert_alias_lookup() {
- local alias="$1"
- local name="$2"
- local domain="$3"
- local ip="$4"
- local result
- result=$(dig @"${SUT_IP}" "${alias}.${domain}" +short)
-
- grep "${name}\\.${domain}\\." <<< "${result}"
- grep "${ip}" <<< "${result}"
-}
-
-# Usage: assert_ns_lookup DOMAIN NS_NAME...
-# Exits with status 0 if all specified host names occur in the list of
-# name servers for the domain.
-assert_ns_lookup() {
- local domain="${1}"
- shift
- local result
- result=$(dig @"${SUT_IP}" "${domain}" NS +short)
-
- [ -n "${result}" ] # the list of name servers should not be empty
- while (( "$#" )); do
- grep "$1\\." <<< "${result}"
- shift
- done
-}
-
-# Usage: assert_mx_lookup DOMAIN PREF1 NAME1 PREF2 NAME2...
-# e.g. assert_mx_lookup example.com 10 mailsrv1 20 mailsrv2
-# Exits with status 0 if all specified host names occur in the list of
-# mail servers for the domain.
-assert_mx_lookup() {
- local domain="${1}"
- shift
- local result
- result=$(dig @"${SUT_IP}" "${domain}" MX +short)
-
- [ -n "${result}" ] # the list of name servers should not be empty
- while (( "$#" )); do
- grep "$1 $2\\.${domain}\\." <<< "${result}"
- shift
- shift
- done
-}
-
-# Usage: assert_srv_lookup DOMAIN SERVICE WEIGHT PORT TARGET
-# e.g. assert_srv_lookup example.com _ldap._tcp 0 100 88 ldapsrv
-assert_srv_lookup() {
- local domain="${1}"
- shift
- local service="${1}"
- shift
- local expected="${*}.${domain}."
- local result
- result=$(dig @"${SUT_IP}" SRV "${service}.${domain}" +short)
-
- echo "expected: ${expected}"
- echo "actual : ${result}"
- [ "${result}" = "${expected}" ]
-}
-
-# Perform a TXT record lookup
-# Usage: assert_txt_lookup NAME TEXT...
-# e.g. assert_txt_lookup _kerberos.example.com KERBEROS.EXAMPLE.COM
-assert_txt_lookup() {
- local name="$1"
- shift
- local result
- result=$(dig @"${SUT_IP}" TXT "${name}" +short)
-
- echo "expected: ${*}"
- echo "actual : ${result}"
- while [ "$#" -ne "0" ]; do
- grep "${1}" <<< "${result}"
- shift
- done
-}
-
-
-#}}}
-
-@test "Forward lookups acme-inc.com" {
- # host name domain IP
- assert_forward_lookup ns1 acme-inc.com 172.17.0.2
- assert_forward_lookup ns2 acme-inc.com 172.17.0.3
- assert_forward_lookup srv001 acme-inc.com 172.17.1.1
- assert_forward_lookup srv002 acme-inc.com 172.17.1.2
- assert_forward_lookup mail001 acme-inc.com 172.17.2.1
- assert_forward_lookup mail002 acme-inc.com 172.17.2.2
- assert_forward_lookup mail003 acme-inc.com 172.17.2.3
- assert_forward_lookup srv010 acme-inc.com 10.0.0.10
- assert_forward_lookup srv011 acme-inc.com 10.0.0.11
- assert_forward_lookup srv012 acme-inc.com 10.0.0.12
-}
-
-@test "Reverse lookups acme-inc.com" {
- # host name domain IP
- assert_reverse_lookup ns1 acme-inc.com 172.17.0.2
- assert_reverse_lookup ns2 acme-inc.com 172.17.0.3
- assert_reverse_lookup srv001 acme-inc.com 172.17.1.1
- assert_reverse_lookup srv002 acme-inc.com 172.17.1.2
- assert_reverse_lookup mail001 acme-inc.com 172.17.2.1
- assert_reverse_lookup mail002 acme-inc.com 172.17.2.2
- assert_reverse_lookup mail003 acme-inc.com 172.17.2.3
- assert_reverse_lookup srv010 acme-inc.com 10.0.0.10
- assert_reverse_lookup srv011 acme-inc.com 10.0.0.11
- assert_reverse_lookup srv012 acme-inc.com 10.0.0.12
-}
-
-@test "Alias lookups acme-inc.com" {
- # alias hostname domain IP
- assert_alias_lookup www srv001 acme-inc.com 172.17.1.1
- assert_alias_lookup mysql srv002 acme-inc.com 172.17.1.2
- assert_alias_lookup smtp mail001 acme-inc.com 172.17.2.1
- assert_alias_lookup mail-in mail001 acme-inc.com 172.17.2.1
- assert_alias_lookup imap mail003 acme-inc.com 172.17.2.3
- assert_alias_lookup mail-out mail003 acme-inc.com 172.17.2.3
-
-}
-
-@test "IPv6 forward lookups acme-inc.com" {
- # hostname domain IPv6
- assert_forward_ipv6_lookup srv001 acme-inc.com 2001:db8::1
- assert_forward_ipv6_lookup srv002 acme-inc.com 2001:db8::2
- assert_forward_ipv6_lookup mail001 acme-inc.com 2001:db8::d:1
- assert_forward_ipv6_lookup mail002 acme-inc.com 2001:db8::d:2
- assert_forward_ipv6_lookup mail003 acme-inc.com 2001:db8::d:3
-}
-
-@test "IPv6 reverse lookups acme-inc.com" {
- # hostname domain IPv6
- assert_forward_ipv6_lookup srv001 acme-inc.com 2001:db8::1
- assert_forward_ipv6_lookup srv002 acme-inc.com 2001:db8::2
- assert_forward_ipv6_lookup mail001 acme-inc.com 2001:db8::d:1
- assert_forward_ipv6_lookup mail002 acme-inc.com 2001:db8::d:2
- assert_forward_ipv6_lookup mail003 acme-inc.com 2001:db8::d:3
-}
-
-@test "NS record lookup acme-inc.com" {
- assert_ns_lookup acme-inc.com \
- ns1.acme-inc.com \
- ns2.acme-inc.com
-}
-
-@test "Mail server lookup acme-inc.com" {
- assert_mx_lookup acme-inc.com \
- 10 mail001 \
- 20 mail002
-}
-
-@test "Service record lookup acme-inc.com" {
- assert_srv_lookup acme-inc.com _ldap._tcp 0 100 88 srv010
-}
-
-@test "TXT record lookup acme-inc.com" {
- assert_txt_lookup _kerberos.acme-inc.com KERBEROS.ACME-INC.COM
- assert_txt_lookup acme-inc.com "some text" "more text"
-}
-
-# Tests for domain example.com
-
-
-@test "Forward lookups example.com" {
- # host name domain IP
- assert_forward_lookup srv001 example.com 192.0.2.1
- assert_forward_lookup srv002 example.com 192.0.2.2
- assert_forward_lookup mail001 example.com 192.0.2.10
-}
-
-@test "Reverse lookups example.com" {
- # host name domain IP
- assert_reverse_lookup srv001 example.com 192.0.2.1
- assert_reverse_lookup srv002 example.com 192.0.2.2
- assert_reverse_lookup mail001 example.com 192.0.2.10
-}
-
-@test "Alias lookups example.com" {
- # alias hostname domain IP
- assert_alias_lookup www srv001 example.com 192.0.2.1
-}
-
-@test "IPv6 forward lookups example.com" {
- # hostname domain IPv6
- assert_forward_ipv6_lookup srv001 example.com 2001:db9::1
-}
-
-@test "IPv6 reverse lookups example.com" {
- # hostname domain IPv6
- assert_reverse_lookup srv001 example.com 2001:db9::1
-}
-
-@test "NS record lookup example.com" {
- assert_ns_lookup example.com \
- ns1.acme-inc.com \
- ns2.acme-inc.com
-}
-
-@test "Mail server lookup example.com" {
- assert_mx_lookup example.com \
- 10 mail001
-}
-
diff --git a/roles/bertvv.bind/molecule/default/files/functional-tests.sh b/roles/bertvv.bind/molecule/default/files/functional-tests.sh
deleted file mode 100755
index 301ee34..0000000
--- a/roles/bertvv.bind/molecule/default/files/functional-tests.sh
+++ /dev/null
@@ -1,117 +0,0 @@
-#! /usr/bin/env bash
-#
-# Author: Bert Van Vreckem
-#
-# Run BATS test files in the current directory, and the ones in the subdirectory
-# matching the host name.
-#
-# The script installs BATS if needed. It's best to put ${bats_install_dir} in
-# your .gitignore.
-
-set -o errexit # abort on nonzero exitstatus
-set -o nounset # abort on unbound variable
-
-#{{{ Variables
-
-test_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-
-bats_archive="v0.4.0.tar.gz"
-bats_url="https://github.com/sstephenson/bats/archive/${bats_archive}"
-bats_install_dir="/opt"
-bats_default_location="${bats_install_dir}/bats/libexec/bats"
-test_file_pattern="*.bats"
-
-# Color definitions
-readonly reset='\e[0m'
-readonly black='\e[0;30m'
-readonly red='\e[0;31m'
-readonly green='\e[0;32m'
-readonly yellow='\e[0;33m'
-readonly blue='\e[0;34m'
-readonly purple='\e[0;35m'
-readonly cyan='\e[0;36m'
-readonly white='\e[0;37m'
-#}}}
-
-main() {
-
- bats=$(find_bats_executable)
-
- if [ -z "${bats}" ]; then
- install_bats
- bats="${bats_default_location}"
- fi
-
- debug "Using BATS executable at: ${bats}"
-
- # List all test cases (i.e. files in the test dir matching the test file
- # pattern)
-
- # Tests to be run on all hosts
- global_tests=$(find_tests "${test_dir}" 1)
-
- # Tests for individual hosts
- host_tests=$(find_tests "${test_dir}/${HOSTNAME}")
-
- # Loop over test files
- for test_case in ${global_tests} ${host_tests}; do
- info "Running test ${test_case}"
- ${bats} "${test_case}"
- done
-}
-
-#{{{ Functions
-
-# Tries to find BATS executable in the PATH or the place where this script
-# installs it.
-find_bats_executable() {
- if which bats > /dev/null; then
- which bats
- elif [ -x "${bats_default_location}" ]; then
- echo "${bats_default_location}"
- else
- echo ""
- fi
-}
-
-# Usage: install_bats
-install_bats() {
- pushd "${bats_install_dir}" > /dev/null 2>&1
- curl --location --remote-name "${bats_url}"
- tar xzf "${bats_archive}"
- mv bats-* bats
- rm "${bats_archive}"
- popd > /dev/null 2>&1
-}
-
-# Usage: find_tests DIR [MAX_DEPTH]
-#
-# Finds BATS test suites in the specified directory
-find_tests() {
- local max_depth=""
- if [ "$#" -eq "2" ]; then
- max_depth="-maxdepth $2"
- fi
-
- local tests
- tests=$(find "$1" ${max_depth} -type f -name "${test_file_pattern}" -printf '%p\n' 2> /dev/null)
-
- echo "${tests}"
-}
-
-# Usage: info [ARG]...
-#
-# Prints all arguments on the standard output stream
-info() {
- printf "${yellow}### %s${reset}\n" "${*}"
-}
-
-# Usage: debug [ARG]...
-#
-# Prints all arguments on the standard output stream
-debug() {
- printf "${cyan}### %s${reset}\n" "${*}"
-}
-#}}}
-
-main
diff --git a/roles/bertvv.bind/molecule/default/molecule.yml b/roles/bertvv.bind/molecule/default/molecule.yml
deleted file mode 100644
index e9f92eb..0000000
--- a/roles/bertvv.bind/molecule/default/molecule.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-dependency:
- name: galaxy
-
-driver:
- # Specifies the driver that should be used. Podman should also work
- name: docker
-
-# Linting with yamllint and ansible-lint
-# verify.yml is skipped because it uses the shell: module, which would trigger
-# a linting error.
-lint: |
- yamllint .
- ansible-lint --exclude=molecule/default/verify.yml
-
-platforms:
- # Set name and hostname
- - name: ns1
- hostname: ns1
- # Specify which image should be used. Geerlingguys images are Ansible
- # compatible and have Systemd installed
- image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest"
- # Command to execute when the container starts
- command: ${MOLECULE_DOCKER_COMMAND:-""}
- # Volumes to mount within the container. Important to enable systemd
- volumes:
- - /sys/fs/cgroup:/sys/fs/cgroup:rw
- # Give extended privileges to the container. Necessary for Systemd to
- # operate within the container. DO NOT use extended privileges in a
- # production environment!
- privileged: true
- # Allocate pseudo-TTY
- tty: true
- environment:
- container: docker
-
- - name: ns2
- hostname: ns2
- image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest"
- command: ${MOLECULE_DOCKER_COMMAND:-""}
- volumes:
- - /sys/fs/cgroup:/sys/fs/cgroup:rw
- privileged: true
- tty: true
- environment:
- container: docker
-
-provisioner:
- name: ansible
-
-# Runs the verify.yml playbook
-verifier:
- name: ansible
diff --git a/roles/bertvv.bind/molecule/default/verify.yml b/roles/bertvv.bind/molecule/default/verify.yml
deleted file mode 100644
index 317556e..0000000
--- a/roles/bertvv.bind/molecule/default/verify.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: Verify
- hosts: all
- tasks:
- # We run the BATS tests from the localhost, since they are black box tests
- - name: "Run BATS tests for {{ ansible_hostname }}"
- shell: SUT_IP={{ ansible_default_ipv4.address }} bats {{ playbook_dir }}/files/dns.bats
- delegate_to: localhost
- changed_when: false
diff --git a/roles/bertvv.bind/tasks/main.yml b/roles/bertvv.bind/tasks/main.yml
deleted file mode 100644
index 7d2ffb9..0000000
--- a/roles/bertvv.bind/tasks/main.yml
+++ /dev/null
@@ -1,69 +0,0 @@
-# roles/bind/tasks/main.yml
----
-
-# Initialise distribution-specific variables
-- name: Source specific variables
- include_vars: "{{ item }}"
- with_first_found:
- - "{{ ansible_distribution }}.yml"
- - "{{ ansible_os_family }}.yml"
- tags: bind,pretask
-
-- name: Check whether `bind_zone_master_server_ip` was set
- assert:
- that: bind_zone_master_server_ip is defined
-
-- name: Install BIND
- package:
- pkg: "{{ item }}"
- state: present
- with_items:
- - "{{ bind_packages }}"
- tags: bind
-
-- name: Ensure runtime directories referenced in config exist
- file:
- path: "{{ item }}"
- state: directory
- owner: root
- group: "{{ bind_group }}"
- mode: 0770
- with_items:
- - "{{ bind_dir }}/dynamic"
- - "{{ bind_dir }}/data"
- - "{{ bind_zone_dir }}"
- tags: bind
-
-- name: Create serial, based on UTC UNIX time
- command: date -u +%s
- register: timestamp
- changed_when: false
- run_once: true
- check_mode: false
- tags: bind
-
-# file to set keys for XFR authentication
-- name: create extra config file for authenticated XFR request
- tags: pretask
- template:
- src: auth_transfer.j2
- dest: "{{ bind_conf_dir }}/{{ auth_file }}"
- mode: 0640
- owner: root
- group: "{{ bind_group }}"
- when: bind_dns_keys is defined and bind_dns_keys|length > 0
-
-- name: Set up the machine as a master DNS server
- include_tasks: master.yml
- when: bind_zone_master_server_ip in ansible_all_ipv4_addresses
-
-- name: Set up the machine as a slave DNS server
- include_tasks: slave.yml
- when: bind_zone_master_server_ip not in ansible_all_ipv4_addresses
-
-- name: Start BIND service
- service:
- name: "{{ bind_service }}"
- state: started
- enabled: true
- tags: bind
diff --git a/roles/bertvv.bind/tasks/master.yml b/roles/bertvv.bind/tasks/master.yml
deleted file mode 100644
index 3188a39..0000000
--- a/roles/bertvv.bind/tasks/master.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# roles/bind/tasks/master.yml
-# Set up a BIND master server
----
-
-- name: Read forward zone hashes
- shell: 'grep -s "^; Hash:" {{ bind_zone_dir }}/{{ item.name }} || true'
- changed_when: false
- check_mode: false
- register: forward_hashes_temp
- with_items:
- - "{{ bind_zone_domains }}"
- run_once: true
- loop_control:
- label: "{{ item.name }}"
-
-- name: create dict of forward hashes
- set_fact:
- forward_hashes: "{{ forward_hashes|default([]) + [ {'hash': item.stdout|default(), 'name': item.item.name} ] }}"
- with_items:
- - "{{ forward_hashes_temp.results }}"
- run_once: true
- loop_control:
- label: "{{ item.item.name }}"
-
-- name: Read reverse ipv4 zone hashes
- shell: "grep -s \"^; Hash:\" {{ bind_zone_dir }}/{{ ('.'.join(item.1.replace(item.1+'.','').split('.')[::-1])) }}.in-addr.arpa || true"
- changed_when: false
- check_mode: false
- register: reverse_hashes_temp
- with_subelements:
- - "{{ bind_zone_domains }}"
- - networks
- - flags:
- skip_missing: true
- run_once: true
- loop_control:
- label: "{{ item.1 }}"
-
-- name: create dict of reverse hashes
- set_fact:
- reverse_hashes: "{{ reverse_hashes|default([]) + [ {'hash': item.0.stdout|default(), 'network': item.1} ] }}"
- with_subelements:
- - "{{ reverse_hashes_temp.results }}"
- - item
- run_once: true
- loop_control:
- label: "{{ item.1.name |default(item.0.cmd.split(' ')[4]) }}"
-
-- name: Read reverse ipv6 zone hashes
- shell: "grep -s \"^; Hash:\" {{ bind_zone_dir }}/{{ (item.1 | ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):-1] }} || true"
- changed_when: false
- check_mode: false
- register: reverse_hashes_ipv6_temp
- with_subelements:
- - "{{ bind_zone_domains }}"
- - ipv6_networks
- - flags:
- skip_missing: true
- run_once: true
- loop_control:
- label: "{{ item.1 }}"
-
-- name: create dict of reverse ipv6 hashes
- set_fact:
- reverse_hashes_ipv6: "{{ reverse_hashes_ipv6|default([]) + [ {'hash': item.0.stdout|default(), 'network': item.1} ] }}"
- with_subelements:
- - "{{ reverse_hashes_ipv6_temp.results }}"
- - item
- run_once: true
- loop_control:
- label: "{{ item.1.name |default(item.0.cmd.split(' ')[4]) }}"
-
-- name: Master | Main BIND config file (master)
- template:
- src: master_etc_named.conf.j2
- dest: "{{ bind_config }}"
- owner: "{{ bind_owner }}"
- group: "{{ bind_group }}"
- mode: '0640'
- setype: named_conf_t
- validate: 'named-checkconf %s'
- notify: reload bind
- tags: bind
-
-- name: Master | Create forward lookup zone file
- template:
- src: bind_zone.j2
- dest: "{{ bind_zone_dir }}/{{ item.name }}"
- owner: "{{ bind_owner }}"
- group: "{{ bind_group }}"
- mode: "{{ bind_zone_file_mode }}"
- setype: named_zone_t
- validate: 'named-checkzone -d {{ item.name }} %s'
- with_items:
- - "{{ bind_zone_domains }}"
- loop_control:
- label: "{{ item.name }}"
- when: item.create_forward_zones is not defined or item.create_forward_zones
- notify: reload bind
- tags: bind
-
-- name: Master | Create reverse lookup zone file
- template:
- src: reverse_zone.j2
- dest: "{{ bind_zone_dir }}/{{ ('.'.join(item.1.replace(item.1+'.','').split('.')[::-1])) }}.in-addr.arpa"
- owner: "{{ bind_owner }}"
- group: "{{ bind_group }}"
- mode: "{{ bind_zone_file_mode }}"
- setype: named_zone_t
- validate: "named-checkzone {{ ('.'.join(item.1.replace(item.1+'.','').split('.')[::-1])) }}.in-addr.arpa %s"
- with_subelements:
- - "{{ bind_zone_domains }}"
- - networks
- - flags:
- skip_missing: true
- loop_control:
- label: "{{ item.1 }}"
- when: item.create_reverse_zones is not defined or item.create_reverse_zones
- notify: reload bind
- tags: bind
-
-- name: Master | Create reverse IPv6 lookup zone file
- template:
- src: reverse_zone_ipv6.j2
- dest: "{{ bind_zone_dir }}/{{ (item.1 | ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):-1] }}"
- owner: "{{ bind_owner }}"
- group: "{{ bind_group }}"
- mode: "{{ bind_zone_file_mode }}"
- setype: named_zone_t
- validate: "named-checkzone {{ (item.1 | ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):] }} %s"
- with_subelements:
- - "{{ bind_zone_domains }}"
- - ipv6_networks
- - flags:
- skip_missing: true
- loop_control:
- label: "{{ item.1 }}"
- when: item.create_reverse_zones is not defined or item.create_reverse_zones
- notify: reload bind
- tags: bind
diff --git a/roles/bertvv.bind/tasks/slave.yml b/roles/bertvv.bind/tasks/slave.yml
deleted file mode 100644
index c8efa88..0000000
--- a/roles/bertvv.bind/tasks/slave.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-# roles/bind/tasks/master.yml
-# Set up a BIND slave server
----
-
-- name: Slave | Main BIND config file (slave)
- template:
- src: slave_etc_named.conf.j2
- dest: "{{ bind_config }}"
- owner: "{{ bind_owner }}"
- group: "{{ bind_group }}"
- mode: '0640'
- setype: named_conf_t
- validate: 'named-checkconf %s'
- notify: reload bind
- tags: bind
-
-- name: Slave | ensure directory for cached slaves zones
- file:
- path: "{{ bind_dir }}/slaves"
- state: directory
- owner: "{{ bind_owner }}"
- group: "{{ bind_group }}"
- mode: '0770'
- setype: named_cache_t
diff --git a/roles/bertvv.bind/templates/auth_transfer.j2 b/roles/bertvv.bind/templates/auth_transfer.j2
deleted file mode 100644
index 95868ca..0000000
--- a/roles/bertvv.bind/templates/auth_transfer.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-
-server {{ ansible_default_ipv4.address }} {
- keys { {% for mykey in bind_dns_keys %} {{ mykey.name }}; {% endfor %} };
-};
-
-{% for mykey in bind_dns_keys %}
-key {{ mykey.name }} {
- algorithm {{ mykey.algorithm }};
- secret "{{ mykey.secret }}";
-{% endfor %}
-};
-
diff --git a/roles/bertvv.bind/templates/bind_zone.j2 b/roles/bertvv.bind/templates/bind_zone.j2
deleted file mode 100644
index 04584cf..0000000
--- a/roles/bertvv.bind/templates/bind_zone.j2
+++ /dev/null
@@ -1,140 +0,0 @@
-{#
- # First create a dict holding the entire zone information and create a hash
- # from it, that it can be compared with subsequent task executions. In this
- # way the serial will only be updated if there are some content changes.
- #}
-{% set _zone_data = {} %}
-{% set _ = _zone_data.update({'ttl': bind_zone_ttl}) %}
-{% set _ = _zone_data.update({'domain': item.name}) %}
-{% set _ = _zone_data.update({'mname': item.name_servers|default([])}) %}
-{% set _ = _zone_data.update({'aname': item.other_name_servers|default([])}) %}
-{% set _ = _zone_data.update({'mail': item.mail_servers|default([])}) %}
-{% if item.hostmaster_email is defined %}
-{% set _ = _zone_data.update({'rname': (( item.hostmaster_email )) + ('' if (item.hostmaster_email is search('\.')) else ('.' + _zone_data['domain']))}) %}
-{% else %}
-{% set _ = _zone_data.update({'rname': 'hostmaster.' + _zone_data['domain']}) %}
-{% endif %}
-{% set _ = _zone_data.update({'refresh': bind_zone_time_to_refresh}) %}
-{% set _ = _zone_data.update({'retry': bind_zone_time_to_retry}) %}
-{% set _ = _zone_data.update({'expire': bind_zone_time_to_expire}) %}
-{% set _ = _zone_data.update({'minimum': bind_zone_minimum_ttl}) %}
-{% set _ = _zone_data.update({'hosts': item.hosts|default([])}) %}
-{% set _ = _zone_data.update({'delegate': item.delegate|default([])}) %}
-{% set _ = _zone_data.update({'services': item.services|default([])}) %}
-{% set _ = _zone_data.update({'text': item.text|default([])}) %}
-{% set _ = _zone_data.update({'naptr': item.naptr|default([])}) %}
-{#
- # Compare the zone file hash with the current zone data hash and set serial
- # accordingly
- #}
-{% set _zone = {'hash': _zone_data | string | hash('md5')} %}
-{% for _result in forward_hashes if _result.name == item.name %}
-{% set _hash_serial = _result.hash.split(' ')[2:] %}
-{% if _hash_serial and _hash_serial[0] == _zone['hash'] %}
-{% set _ = _zone.update({'serial': _hash_serial[1]}) %}
-{% else %}
-{% set _ = _zone.update({'serial': timestamp.stdout}) %}
-{% endif %}
-{% endfor %}
-{#
- # Eventually output the zone data
- #}
-; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }}
-; Zone file for {{ _zone_data['domain'] }}
-{{ ansible_managed | comment(decoration='; ') }}
-
-$ORIGIN {{ _zone_data['domain'] }}.
-$TTL {{ _zone_data['ttl'] }}
-
-{% if _zone_data['mname']|length > 0 %}
-@ IN SOA {{ _zone_data['mname']|first }}{% if not _zone_data['mname']|first|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %} {{ _zone_data['rname'] }}. (
-{% else %}
-@ IN SOA {{ ansible_hostname }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. (
-{% endif %}
- {{ _zone['serial'] }}
- {{ _zone_data['refresh'] }}
- {{ _zone_data['retry'] }}
- {{ _zone_data['expire'] }}
- {{ _zone_data['minimum'] }} )
-
-{% if _zone_data['mname']|length > 0 %}
-{% for ns in _zone_data['mname'] %}
- IN NS {{ ns }}{% if not ns|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %}
-
-{% endfor %}
-{% else %}
- IN NS {{ ansible_hostname }}.{{ _zone_data['domain'] }}.
-{% endif %}
-{% for ns in _zone_data['aname'] %}
- IN NS {{ ns }}.
-{% endfor %}
-
-{% for mail in _zone_data['mail'] %}
-{% if loop.first %}@{% else %} {% endif %} IN MX {{ mail.preference}} {{ mail.name }}{% if not mail.name.endswith('.') %}.{{ _zone_data['domain'] }}.{% endif %}
-{% endfor %}
-
-{% if _zone_data['delegate']|length > 0 %}
-{% for host in _zone_data['delegate'] %}
-{{ host.zone.ljust(20) }} IN NS {{ host.dns }}
-{% endfor %}
-{% endif %}
-
-{% if _zone_data['hosts']|length > 0 %}
-{% for host in _zone_data['hosts'] %}
-{% if host.ip is defined %}
-{% if host.ip is string %}
-{% if "$GENERATE" not in host.name.upper() %}
-{{ host.name.ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN A {{ host.ip }}
-{% endif %}
-{% if "$GENERATE" in host.name.upper() %}
-{{ host.name.ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN A {{ host.ip }}
-{% endif %}
-{% else %}
-{% for ip in host.ip %}
-{{ host.name.ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN A {{ ip }}
-{% endfor %}
-{% endif %}
-{% endif %}
-{% if host.ipv6 is defined %}
-{% if host.ipv6 is string %}
-{{ host.name.ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN AAAA {{ host.ipv6 }}
-{% else %}
-{% for ip6 in host.ipv6 %}
-{{ host.name.ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN AAAA {{ ip6 }}
-{% endfor %}
-{% endif %}
-{% endif %}
-{% if host.aliases is defined %}
-{% for alias in host.aliases %}
-{% if "$GENERATE" not in host.name.upper() %}
-{{ (alias.name|default(alias)).ljust(20) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN {{ alias.type|default('cname')|upper}} {{ host.name }}
-{% endif %}
-{% if "$GENERATE" in host.name.upper() %}
-{{ alias.ljust(20) }} IN CNAME {{ host.name.rsplit(None, 1)[1] }}
-{% endif %}
-{% endfor %}
-{% endif %}
-{% if host.sshfp is defined %}
-{% for sshfp in host.sshfp %}
-{{ host.name.ljust(20) }} IN SSHFP {{ sshfp}}
-{% endfor %}
-{% endif %}
-{% endfor %}
-{% else %}
-{{ ansible_hostname.ljust(26) }} IN A {{ ansible_default_ipv4.address }}
-{% endif %}
-{% for service in _zone_data['services'] %}
-{{ service.name.ljust(20) }}{{ (service.ttl|string).rjust(6) if service.ttl is defined else ''.ljust(6) }} IN SRV {{ service.priority|default('0') }} {{ service.weight|default('0') }} {{ service.port }} {{ service.target }}
-{% endfor %}
-{% for text in _zone_data['text'] %}
-{% if text.text is string %}
-{{ text.name.ljust(20) }} IN TXT "{{ text.text }}"
-{% else %}
-{% for entry in text.text %}
-{{ text.name.ljust(20) }} IN TXT "{{ entry }}"
-{% endfor %}
-{% endif %}
-{% endfor %}
-{% for naptr in _zone_data['naptr'] %}
-{{ naptr.name.ljust(20) }} IN NAPTR {{ naptr.order|default('100') }} {{ naptr.pref|default('10') }} "{{ naptr.flags }}" "{{ naptr.service }}" "{{ naptr.regex }}" {{ naptr.replacement }}
-{% endfor %}
diff --git a/roles/bertvv.bind/templates/master_etc_named.conf.j2 b/roles/bertvv.bind/templates/master_etc_named.conf.j2
deleted file mode 100644
index 9d9a2b6..0000000
--- a/roles/bertvv.bind/templates/master_etc_named.conf.j2
+++ /dev/null
@@ -1,158 +0,0 @@
-//
-// named.conf
-//
-{{ ansible_managed | comment('c') }}
-//
-{% for acl in bind_acls %}
-acl "{{ acl.name }}" {
-{% for match in acl.match_list %}
- {{ match }};
-{% endfor %}
-};
-
-{% endfor %}
-options {
- listen-on port 53 { {{ bind_listen_ipv4|join('; ') }}; };
- listen-on-v6 port 53 { {{ bind_listen_ipv6|join('; ') }}; };
- directory "{{ bind_dir }}";
- dump-file "{{ bind_dir }}/data/cache_dump.db";
- statistics-file "{{ bind_dir }}/data/named_stats.txt";
- memstatistics-file "{{ bind_dir }}/data/named_mem_stats.txt";
- allow-query { {{ bind_allow_query|join('; ') }}; };
-{% if bind_acls|length != 0 %}
- allow-transfer { {% for acl in bind_acls %}"{{ acl.name }}"; {% endfor %}};
-{% endif %}
-{% if bind_check_names is defined %}
- check-names {{ bind_check_names }};
-{% endif %}
-
- recursion {% if bind_recursion %}yes{% else %}no{% endif %};
- {% if bind_recursion %}allow-recursion { {{ bind_allow_recursion|join('; ') }}; };
- {% endif %}
-{% if bind_forwarders|length > 0 %}forwarders { {{ bind_forwarders|join('; ') }}; };{% endif %}
- {% if bind_forward_only %}forward only;{% endif %}
-
- rrset-order { order {{ bind_rrset_order }}; };
-
- dnssec-enable {{ bind_dnssec_enable }};
- dnssec-validation {{ bind_dnssec_validation }};
-
- /* Path to ISC DLV key */
- bindkeys-file "{{ bind_bindkeys_file }}";
-
- managed-keys-directory "{{ bind_dir }}/dynamic";
-
- pid-file "{{ bind_pid_file }}";
- session-keyfile "{{ bind_session_keyfile }}";
-{% if bind_query_log is defined %}
-
- querylog yes;
-{% endif %}
-};
-
-{% if bind_statistics_channels %}
-statistics-channels {
- inet {{ bind_statistics_host }} port {{ bind_statistics_port }} allow { {{ bind_statistics_allow|join('; ') }}; };
-};
-{% endif %}
-
-logging {
- channel default_debug {
- file "{{ bind_log }}";
- severity dynamic;
- print-time yes;
- };
-{% if bind_query_log is defined %}
- channel querylog {
- {% if bind_query_log.file is defined %}
- file "{{ bind_query_log.file }}" versions {{ bind_query_log.versions }} size {{ bind_query_log.size }};
- {% else %}
- file "{{ bind_query_log }}" versions 600 size 20m;
- {% endif %}
- severity dynamic;
- print-time yes;
- };
- category queries { querylog; };
-{% endif %}
-{% if bind_other_logs is defined %}
-
-{% for log in bind_other_logs %}
- channel {{ log.name }} {
- file "{{ log.file }}" versions {{ log.versions }} size {{ log.size }};
- severity dynamic;
- print-time yes;
- };
- category "{{ log.name }}" { "{{ log.name }}"; };
-{% endfor %}
-{% endif %}
-};
-
-{% for file in bind_default_zone_files %}
-include "{{ file }}";
-{% endfor %}
-{% for file in bind_extra_include_files %}
-include "{{ file }}";
-{% endfor %}
-
-{% if bind_zone_domains is defined %}
-{% for bind_zone in bind_zone_domains %}
-{% if bind_zone.create_forward_zones is not defined or bind_zone.create_forward_zones %}
-zone "{{ bind_zone.name }}" IN {
- type master;
- file "{{ bind_zone_dir }}/{{ bind_zone.name }}";
- notify yes;
-{% if bind_zone.also_notify is defined %}
- also-notify { {{ bind_zone.also_notify|join('; ') }}; };
-{% endif %}
-{% if bind_zone.allow_update is defined %}
- allow-update { {{ bind_zone.allow_update|join('; ') }}; };
-{% else %}
- allow-update { none; };
-{% endif %}
-{% if bind_zone.delegate is defined %}
- forwarders {};
-{% endif %}
-};
-{% endif %}
-
-{% if bind_zone.networks is defined %}
-{% if bind_zone.create_reverse_zones is not defined or bind_zone.create_reverse_zones %}
-{% for network in bind_zone.networks %}
-zone "{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa" IN {
- type master;
- file "{{ bind_zone_dir }}/{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa";
- notify yes;
-{% if bind_zone.also_notify is defined %}
- also-notify { {{ bind_zone.also_notify|join('; ') }}; };
-{% endif %}
-{% if bind_zone.allow_update is defined %}
- allow-update { {{ bind_zone.allow_update|join('; ') }}; };
-{% else %}
- allow-update { none; };
-{% endif %}
-};
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if bind_zone.ipv6_networks is defined %}
-{% if bind_zone.create_reverse_zones is not defined or bind_zone.create_reverse_zones %}
-{% for network in bind_zone.ipv6_networks %}
-zone "{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):] }}" IN {
- type master;
- file "{{ bind_zone_dir }}/{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):-1] }}";
- notify yes;
-{% if bind_zone.also_notify is defined %}
- also-notify { {{ bind_zone.also_notify|join('; ') }}; };
-{% endif %}
-{% if bind_zone.allow_update is defined %}
- allow-update { {{ bind_zone.allow_update|join('; ') }}; };
-{% else %}
- allow-update { none; };
-{% endif %}
-};
-{% endfor %}
-{% endif %}
-{% endif %}
-{% endfor %}
-{% endif %}
diff --git a/roles/bertvv.bind/templates/reverse_zone.j2 b/roles/bertvv.bind/templates/reverse_zone.j2
deleted file mode 100644
index d639afa..0000000
--- a/roles/bertvv.bind/templates/reverse_zone.j2
+++ /dev/null
@@ -1,101 +0,0 @@
-{#
- # First create a dict holding the entire zone information and create a hash
- # from it, that it can be compared with subsequent task executions. In this
- # way the serial will only be updated if there are some content changes.
- #}
-{% set _zone_data = {} %}
-{% set _ = _zone_data.update({'ttl': bind_zone_ttl}) %}
-{% set _ = _zone_data.update({'domain': item.0.name}) %}
-{% set _ = _zone_data.update({'mname': item.0.name_servers|default([])}) %}
-{% set _ = _zone_data.update({'aname': item.0.other_name_servers|default([])}) %}
-{% if item.0.hostmaster_email is defined %}
-{% set _ = _zone_data.update({'rname': (( item.0.hostmaster_email )) + ('' if (item.0.hostmaster_email is search('\.')) else ('.' + _zone_data['domain']))}) %}
-{% else %}
-{% set _ = _zone_data.update({'rname': 'hostmaster.' + _zone_data['domain']}) %}
-{% endif %}
-{% set _ = _zone_data.update({'refresh': bind_zone_time_to_refresh}) %}
-{% set _ = _zone_data.update({'retry': bind_zone_time_to_retry}) %}
-{% set _ = _zone_data.update({'expire': bind_zone_time_to_expire}) %}
-{% set _ = _zone_data.update({'minimum': bind_zone_minimum_ttl}) %}
-{% set _ = _zone_data.update({'hosts': item.0.hosts|default([]) | selectattr('ip', 'defined') | selectattr('ip', 'string') | selectattr('ip', 'search', '^'+item.1) | list}) %}
-{% set _ = _zone_data.update({'revip': ('.'.join(item.1.replace(item.1+'.','').split('.')[::-1]))}) %}
-{#
- # Compare the zone file hash with the current zone data hash and set serial
- # accordingly
- #}
-{% set _zone = {'hash': _zone_data | string | hash('md5')} %}
-{% for _result in reverse_hashes if _result.network == item.1 %}
-{% set _hash_serial = _result.hash.split(' ')[2:] %}
-{% if _hash_serial and _hash_serial[0] == _zone['hash'] %}
-{% set _ = _zone.update({'serial': _hash_serial[1]}) %}
-{% else %}
-{% set _ = _zone.update({'serial': timestamp.stdout}) %}
-{% endif %}
-{% endfor %}
-{#
- # Eventually output the zone data
- #}
-; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }}
-; Reverse zone file for {{ _zone_data['domain'] }}
-{{ ansible_managed | comment(decoration='; ') }}
-
-$TTL {{ _zone_data['ttl'] }}
-$ORIGIN {{ ('.'.join(item.1.replace(item.1+'.','').split('.')[::-1])) }}.in-addr.arpa.
-
-{% if _zone_data['mname']|length > 0 %}
-@ IN SOA {{ _zone_data['mname']|first }}{% if not _zone_data['mname']|first|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %} {{ _zone_data['rname'] }}. (
-{% else %}
-@ IN SOA {{ ansible_hostname }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. (
-{% endif %}
- {{ _zone['serial'] }}
- {{ _zone_data['refresh'] }}
- {{ _zone_data['retry'] }}
- {{ _zone_data['expire'] }}
- {{ _zone_data['minimum'] }} )
-
-{% if _zone_data['mname']|length > 0 %}
-{% for ns in _zone_data['mname'] %}
- IN NS {{ ns }}{% if not ns|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %}
-
-{% endfor %}
-{% else %}
- IN NS {{ ansible_hostname }}.{{ _zone_data['domain'] }}.
-{% endif %}
-{% for ns in _zone_data['aname'] %}
- IN NS {{ ns }}.
-{% endfor %}
-
-{% if _zone_data['hosts']|length > 0 %}
-{% for host in _zone_data['hosts'] %}
-{% if host.ip is defined %}
-{% if host.ip == item.1 %}
-@ IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
-{% else %}
-{% if host.ip is string and host.ip.startswith(item.1) %}
-{% if host.name == '@' %}
-{{ ('.'.join(host.ip.replace(item.1+'.','').split('.')[::-1])).ljust(16) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ _zone_data['domain'] }}.
-{% else %}
-{% if "$GENERATE" not in host.name.upper() %}
-{{ ('.'.join(host.ip.replace(item.1+'.','').split('.')[::-1])).ljust(16) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
-{% endif %}
-{% if "$GENERATE" in host.name.upper() %}
-{{ host.name.rsplit(None, 1)[0] }} {{ ('.'.join(host.ip.replace(item.1+'.','').split('.')[::-1])).ljust(16) }} IN PTR {{ host.name.rsplit(None, 1)[1] }}.{{ _zone_data['domain'] }}.
-{% endif %}
-{% endif %}
-{% else %}
-{% for ip in host.ip %}
-{% if ip.startswith(item.1) %}
-{{ ('.'.join(ip.replace(item.1+'.','').split('.')[::-1])).ljust(16) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ _zone_data['domain'] }}.
-{% if host.name == '@' %}
-{% else %}
-{{ ('.'.join(ip.replace(item.1+'.','').split('.')[::-1])).ljust(16) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
-{% endif %}
-{% endif %}
-{% endfor %}
-{% endif %}
-{% endif %}
-{% endif %}
-{% endfor %}
-{% else %}
-{{ ('.'.join(ansible_default_ipv4.address.replace(item.1+'.','').split('.')[::-1])).ljust(16) }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ ansible_hostname }}.{{ _zone_data['domain'] }}.
-{% endif %}
diff --git a/roles/bertvv.bind/templates/reverse_zone_ipv6.j2 b/roles/bertvv.bind/templates/reverse_zone_ipv6.j2
deleted file mode 100644
index 2a1be82..0000000
--- a/roles/bertvv.bind/templates/reverse_zone_ipv6.j2
+++ /dev/null
@@ -1,96 +0,0 @@
-{#
- # First create a dict holding the entire zone information and create a hash
- # from it, that it can be compared with subsequent task executions. In this
- # way the serial will only be updated if there are some content changes.
- #}
-{% set _zone_data = {} %}
-{% set _ = _zone_data.update({'ttl': bind_zone_ttl}) %}
-{% set _ = _zone_data.update({'domain': item.0.name}) %}
-{% set _ = _zone_data.update({'mname': item.0.name_servers|default([])}) %}
-{% set _ = _zone_data.update({'aname': item.0.other_name_servers|default([])}) %}
-{% if item.0.hostmaster_email is defined %}
-{% set _ = _zone_data.update({'rname': (( item.0.hostmaster_email )) + ('' if (item.0.hostmaster_email is search('\.')) else ('.' + _zone_data['domain']))}) %}
-{% else %}
-{% set _ = _zone_data.update({'rname': 'hostmaster.' + _zone_data['domain']}) %}
-{% endif %}
-{% set _ = _zone_data.update({'refresh': bind_zone_time_to_refresh}) %}
-{% set _ = _zone_data.update({'retry': bind_zone_time_to_retry}) %}
-{% set _ = _zone_data.update({'expire': bind_zone_time_to_expire}) %}
-{% set _ = _zone_data.update({'minimum': bind_zone_minimum_ttl}) %}
-{% set _ = _zone_data.update({'hosts': item.0.hosts|default([]) | selectattr('ipv6','defined') | selectattr('ipv6','string') | selectattr('ipv6', 'search', '^'+item.1|regex_replace('/.*$','')) | list }) %}
-{% set _ = _zone_data.update({'revip': (item.1 | ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):] }) %}
-{#
- # Compare the zone file hash with the current zone data hash and set serial
- # accordingly
- #}
-{% set _zone = {'hash': _zone_data | string | hash('md5')} %}
-{% for _result in reverse_hashes_ipv6 if _result.network == item.1 %}
-{% set _hash_serial = _result.hash.split(' ')[2:] %}
-{% if _hash_serial and _hash_serial[0] == _zone['hash'] %}
-{% set _ = _zone.update({'serial': _hash_serial[1]}) %}
-{% else %}
-{% set _ = _zone.update({'serial': timestamp.stdout}) %}
-{% endif %}
-{% endfor %}
-{#
- # Eventually output the zone data
- #}
-; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }}
-; Reverse zone file for {{ _zone_data['domain'] }}
-{{ ansible_managed | comment(decoration='; ') }}
-
-$TTL {{ _zone_data['ttl'] }}
-$ORIGIN {{ (item.1 | ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):] }}
-
-{% if _zone_data['mname']|length > 0 %}
-@ IN SOA {{ _zone_data['mname']|first }}{% if not _zone_data['mname']|first|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %} {{ _zone_data['rname'] }}. (
-{% else %}
-@ IN SOA {{ ansible_hostname }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. (
-{% endif %}
- {{ _zone['serial'] }}
- {{ _zone_data['refresh'] }}
- {{ _zone_data['retry'] }}
- {{ _zone_data['expire'] }}
- {{ _zone_data['minimum'] }} )
-
-{% if _zone_data['mname']|length > 0 %}
-{% for ns in _zone_data['mname'] %}
- IN NS {{ ns }}{% if not ns|regex_search('\.$') %}.{{ _zone_data['domain'] }}.{% endif %}
-
-{% endfor %}
-{% else %}
- IN NS {{ ansible_hostname }}.{{ _zone_data['domain'] }}.
-{% endif %}
-{% for ns in _zone_data['aname'] %}
- IN NS {{ ns }}.
-{% endfor %}
-
-{% if _zone_data['hosts']|length > 0 %}
-{% for host in _zone_data['hosts'] %}
-{% if host.ipv6 is defined %}
-{% if host.ipv6 == item.1 %}
-@ IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
-{% else %}
-{% if host.ipv6 is string and host.ipv6.startswith(item.1|regex_replace('/.*$','')) %}
-{% if host.name == '@' %}
-{{ host.ipv6 | ipaddr('revdns') }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ _zone_data['domain'] }}.
-{% else %}
-{{ host.ipv6 | ipaddr('revdns') }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
-{% endif %}
-{% else %}
-{% for ip in host.ipv6 %}
-{% if ip.startswith(item.1|regex_replace('/.*$','')) %}
-{{ ip | ipaddr('revdns') }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ _zone_data['domain'] }}.
-{% if host.name == '@' %}
-{% else %}
-{{ ip | ipaddr('revdns') }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
-{% endif %}
-{% endif %}
-{% endfor %}
-{% endif %}
-{% endif %}
-{% endif %}
-{% endfor %}
-{% else %}
-{{ ansible_default_ipv6.address | ipaddr('revdns') }}{{ (host.ttl|string).rjust(6) if host.ttl is defined else ''.ljust(6) }} IN PTR {{ ansible_hostname }}.{{ _zone_data['domain'] }}.
-{% endif %}
diff --git a/roles/bertvv.bind/templates/slave_etc_named.conf.j2 b/roles/bertvv.bind/templates/slave_etc_named.conf.j2
deleted file mode 100644
index 4386723..0000000
--- a/roles/bertvv.bind/templates/slave_etc_named.conf.j2
+++ /dev/null
@@ -1,120 +0,0 @@
-//
-// named.conf
-//
-{{ ansible_managed | comment('c') }}
-//
-{% for acl in bind_acls %}
-acl "{{ acl.name }}" {
-{% for match in acl.match_list %}
- {{ match }};
-{% endfor %}
-};
-
-{% endfor %}
-options {
- listen-on port 53 { {{ bind_listen_ipv4|join(';') }}; };
- listen-on-v6 port 53 { {{ bind_listen_ipv6|join(';') }}; };
- directory "{{ bind_dir }}";
- dump-file "{{ bind_dir }}/data/cache_dump.db";
- statistics-file "{{ bind_dir }}/data/named_stats.txt";
- memstatistics-file "{{ bind_dir }}/data/named_mem_stats.txt";
- allow-query { {{ bind_allow_query|join(';') }}; };
-{% if bind_acls|length != 0 %}
- allow-transfer { {% for acl in bind_acls %}"{{ acl.name }}"; {% endfor %}};
-{% endif %}
-
- recursion {% if bind_recursion %}yes{% else %}no{% endif %};
- {% if bind_recursion %}allow-recursion { {{ bind_allow_recursion|join('; ') }}; };
- {% endif %}
-{% if bind_forwarders|length > 0 %}forwarders { {{ bind_forwarders|join('; ') }}; };{% endif %}
- {% if bind_forward_only %}forward only;{% endif %}
-
- rrset-order { order {{ bind_rrset_order }}; };
-
- dnssec-enable {{ bind_dnssec_enable }};
- dnssec-validation {{ bind_dnssec_validation }};
-
- /* Path to ISC DLV key */
- bindkeys-file "{{ bind_bindkeys_file }}";
-
- managed-keys-directory "{{ bind_dir }}/dynamic";
-
- pid-file "{{ bind_pid_file }}";
- session-keyfile "{{ bind_session_keyfile }}";
-
-{% if bind_query_log is defined %}
- querylog yes;
-{% endif %}
-};
-
-{% if bind_statistics_channels %}
-statistics-channels {
- inet {{ bind_statistics_host }} port {{ bind_statistics_port }} allow { {{ bind_statistics_allow|join('; ') }}; };
-};
-{% endif %}
-
-logging {
- channel default_debug {
- file "{{ bind_log }}";
- severity dynamic;
- print-time yes;
- };
-{% if bind_query_log is defined %}
- channel querylog {
- {% if bind_query_log.file is defined %}
- file "{{ bind_query_log.file }}" versions {{ bind_query_log.versions }} size {{ bind_query_log.size }};
- {% else %}
- file "{{ bind_query_log }}" versions 600 size 20m;
- {% endif %}
- severity dynamic;
- print-time yes;
- };
- category queries { querylog; };
-{% endif %}
-};
-
-{% for file in bind_default_zone_files %}
-include "{{ file }}";
-{% endfor %}
-{% for file in bind_extra_include_files %}
-include "{{ file }}";
-{% endfor %}
-
-{% if bind_zone_domains is defined %}
-{% for bind_zone in bind_zone_domains %}
-{% if bind_zone.create_forward_zones is not defined or bind_zone.create_forward_zones %}
-zone "{{ bind_zone.name }}" IN {
- type slave;
- masters { {{ bind_zone_master_server_ip }}; };
- file "{{ bind_slave_dir }}/{{ bind_zone.name }}";
-{% if bind_zone.delegate is defined %}
- forwarders {};
-{% endif %}
-};
-{% endif %}
-
-{% if bind_zone.create_reverse_zones is not defined or bind_zone.create_reverse_zones %}
-{% if bind_zone.networks is defined %}
-{% for network in bind_zone.networks %}
-zone "{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa" IN {
- type slave;
- masters { {{ bind_zone_master_server_ip }}; };
- file "{{ bind_slave_dir }}/{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa";
-};
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if bind_zone.create_reverse_zones is not defined or bind_zone.create_reverse_zones %}
-{% if bind_zone.ipv6_networks is defined %}
-{% for network in bind_zone.ipv6_networks %}
-zone "{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):] }}" IN {
- type slave;
- masters { {{ bind_zone_master_server_ip }}; };
- file "{{ bind_slave_dir }}/{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):-1] }}";
-};
-{% endfor %}
-{% endif %}
-{% endif %}
-{% endfor %}
-{% endif %}
diff --git a/roles/bertvv.bind/vars/Archlinux.yml b/roles/bertvv.bind/vars/Archlinux.yml
deleted file mode 100644
index 016fd0d..0000000
--- a/roles/bertvv.bind/vars/Archlinux.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-# roles/bind/vars/RedHat.yml
----
-
-bind_packages:
- - python-netaddr
- - bind
- - bind-tools
-
-bind_service: named
-
-# Main config file
-bind_config: /etc/named.conf
-
-# Zone files included in the installation
-bind_default_zone_files: []
-
-# Directory with run-time stuff
-bind_dir: /var/named
-bind_conf_dir: "{{ bind_dir }}"
-auth_file: "auth_transfer.conf"
-bind_auth_file: "{{ bind_conf_dir }}/{{ auth_file }}"
-
-bind_owner: root
-bind_group: named
-
-bind_bindkeys_file: "/etc/named.iscdlv.key"
-bind_pid_file: "/run/named/named.pid"
-bind_session_keyfile: "/run/named/session.key"
-
-# Custom location for zone files
-bind_zone_dir: "{{ bind_dir }}"
-bind_slave_dir: "{{ bind_dir }}/slaves"
diff --git a/roles/bertvv.bind/vars/Debian.yml b/roles/bertvv.bind/vars/Debian.yml
deleted file mode 100644
index 066d99c..0000000
--- a/roles/bertvv.bind/vars/Debian.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-# roles/bind/vars/Debian.yml
----
-
-bind_packages:
- - python-netaddr
- - bind9
- - bind9utils
-
-bind_service: bind9
-
-# Main config file
-bind_config: /etc/bind/named.conf
-
-# Localhost zone
-bind_default_zone_files:
- - /etc/bind/named.conf.default-zones
-
-# Directory with run-time stuff
-bind_dir: /var/cache/bind
-bind_conf_dir: "/etc/bind"
-auth_file: "auth_transfer.conf"
-bind_auth_file: "{{ bind_conf_dir }}/{{ auth_file }}"
-
-bind_owner: root
-bind_group: bind
-
-bind_bindkeys_file: "/etc/named.iscdlv.key"
-bind_pid_file: "/run/named/named.pid"
-bind_session_keyfile: "/run/named/session.key"
-
-# Custom location for master zone files
-bind_zone_dir: "{{ bind_dir }}"
-bind_slave_dir: "{{ bind_dir }}/slaves"
diff --git a/roles/bertvv.bind/vars/FreeBSD.yml b/roles/bertvv.bind/vars/FreeBSD.yml
deleted file mode 100644
index 18c9035..0000000
--- a/roles/bertvv.bind/vars/FreeBSD.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-# roles/bind/vars/Debian.yml
----
-
-bind_packages:
- - py36-netaddr
- - bind911
-
-bind_service: named
-
-# Main config file
-bind_config: /usr/local/etc/namedb/named.conf
-
-# Localhost zone
-bind_default_zone_files:
- - /usr/local/etc/namedb/named.conf.default-zones
-
-# Directory with run-time stuff
-bind_dir: /var/cache/named
-bind_conf_dir: "/usr/local/etc/namedb/"
-auth_file: "auth_transfer.conf"
-bind_auth_file: "{{ bind_conf_dir }}/{{ auth_file }}"
-
-bind_owner: bind
-bind_group: bind
-
-bind_bindkeys_file: "/usr/local/etc/namedb/bind.keys"
-bind_pid_file: "/var/run/named/named.pid"
-bind_session_keyfile: "/var/run/named/session.key"
-
-# Custom location for master zone files
-bind_zone_dir: "{{ bind_dir }}/master"
-bind_slave_dir: "{{ bind_dir }}/slave"
diff --git a/roles/bertvv.bind/vars/RedHat.yml b/roles/bertvv.bind/vars/RedHat.yml
deleted file mode 100644
index fb3b56a..0000000
--- a/roles/bertvv.bind/vars/RedHat.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-# roles/bind/vars/RedHat.yml
----
-
-bind_packages:
- - "{{ ( ansible_distribution_major_version == '8' ) | ternary( 'python3-netaddr', 'python-netaddr' ) }}"
- - bind
- - bind-utils
-
-bind_service: named
-
-# Main config file
-bind_config: /etc/named.conf
-
-# Zone files included in the installation
-bind_default_zone_files:
- - /etc/named.root.key
- - /etc/named.rfc1912.zones
-
-# Directory with run-time stuff
-bind_dir: /var/named
-bind_conf_dir: "/etc/named"
-auth_file: "auth_transfer.conf"
-bind_auth_file: "{{ bind_conf_dir }}/{{ auth_file }}"
-
-bind_owner: root
-bind_group: named
-
-bind_bindkeys_file: "/etc/named.iscdlv.key"
-bind_pid_file: "/run/named/named.pid"
-bind_session_keyfile: "/run/named/session.key"
-
-# Custom location for master zone files
-bind_zone_dir: "{{ bind_dir }}"
-bind_slave_dir: "{{ bind_dir }}/slaves"
diff --git a/roles/debian-freeipa-client/defaults/main.yml b/roles/debian-freeipa-client/defaults/main.yml
deleted file mode 100644
index e29fa61..0000000
--- a/roles/debian-freeipa-client/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-ipa_realm: "example.com"
-ipa_server: freeipa.example.com
diff --git a/roles/debian-freeipa-client/files/backup_excludes b/roles/debian-freeipa-client/files/backup_excludes
deleted file mode 100644
index 50535ff..0000000
--- a/roles/debian-freeipa-client/files/backup_excludes
+++ /dev/null
@@ -1,2 +0,0 @@
-- lastlog
-- faillog
diff --git a/roles/debian-freeipa-client/files/mkhomedir b/roles/debian-freeipa-client/files/mkhomedir
deleted file mode 100644
index d2e7214..0000000
--- a/roles/debian-freeipa-client/files/mkhomedir
+++ /dev/null
@@ -1,8 +0,0 @@
-Name: Create home directory during login
-Default: yes
-Priority: 127
-
-Session-Type: Additional
-Session-Interactive-Only: yes
-Session:
- required pam_mkhomedir.so skel=/etc/skel/ umask=0022
diff --git a/roles/debian-freeipa-client/handlers/main.yml b/roles/debian-freeipa-client/handlers/main.yml
deleted file mode 100644
index 2b16a8c..0000000
--- a/roles/debian-freeipa-client/handlers/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: restart sssd
- service: name=sssd state=restarted
-
-- name: restart sshd
- service: name=sshd state=restarted
-
-- name: execute pam-auth-update
- command: pam-auth-update --package
-
-- name: restart ntp
- service: name=ntp state=restarted
diff --git a/roles/debian-freeipa-client/tasks/main.yml b/roles/debian-freeipa-client/tasks/main.yml
deleted file mode 100644
index 80f602f..0000000
--- a/roles/debian-freeipa-client/tasks/main.yml
+++ /dev/null
@@ -1,135 +0,0 @@
----
-
-- name: install kerberoes user utility
- package:
- name: krb5-user
- state: present
-
-- name: check if we have a cached kerberos ticket
- delegate_to: "{{ ipa_server }}"
- vars: {ansible_user: ""}
- become: no
- command: klist
- run_once: yes
- changed_when: false
-
-- name: check if the host exists in the directory
- delegate_to: "{{ ipa_server }}"
- vars: {ansible_user: ""}
- become: no
- command: flock /tmp/ansible-lock ipa host-show {{ ansible_fqdn }}
- register: host_show
- failed_when: host_show.rc == 1
- changed_when: false
-
-- name: create the host principal
- delegate_to: "{{ ipa_server }}"
- vars: {ansible_user: ""}
- become: no
- command: flock /tmp/ansible-lock ipa host-add {{ ansible_fqdn }} --force
- --sshpubkey \"{{ ansible_ssh_host_key_rsa_public }}\"
- --os {{ ansible_distribution }}
- when: host_show.rc != 0
- tags: [install]
-
-- name: check if /etc/krb5.keytab exists
- stat: path=/etc/krb5.keytab
- register: keytab
-
-- name: generate the host keytab
- delegate_to: "{{ ipa_server }}"
- vars: {ansible_user: ""}
- become: no
- command: flock /tmp/ansible-lock /usr/sbin/ipa-getkeytab -s {{ ipa_server }} -p host/{{ ansible_fqdn }} -k /tmp/{{ ansible_hostname }}.keytab
- when: 'not keytab.stat.exists or "Keytab: True" not in host_show.stdout'
- tags: [install]
-
-- name: transfer the keytab over to the IPA client
- synchronize:
- src: /tmp/{{ ansible_hostname }}.keytab
- dest: /etc/krb5.keytab
- archive: no
- ssh_args: -l root
- delegate_to: "{{ ipa_server }}"
- vars: {ansible_user: ""}
- become: no
- when: 'not keytab.stat.exists or "Keytab: True" not in host_show.stdout'
- notify: restart sssd
- tags: [install]
-
-- name: remove the keytab file on the FreeIPA server
- delegate_to: "{{ ipa_server }}"
- vars: {ansible_user: ""}
- become: no
- file:
- path: /tmp/{{ ansible_hostname }}.keytab
- state: absent
- tags: [install]
-
-- name: create the directory /etc/sssd
- file:
- path: /etc/sssd
- state: directory
-
-- name: configure sssd
- template:
- src: sssd.conf.j2
- dest: /etc/sssd/sssd.conf
- mode: 0600
- notify: restart sssd
- tags: [configure]
-
-- name: install sssd
- apt: name=sssd state=present
- tags: [install]
-
-- name: automatically create user home directories
- copy:
- src: mkhomedir
- dest: /usr/share/pam-configs/mkhomedir
- notify: execute pam-auth-update
-
-- name: configure krb5
- template:
- src: krb5.conf.j2
- dest: /etc/krb5.conf
- tags: [configure]
-
-- name: set AuthorizedKeysCommand for sshd
- lineinfile:
- regexp: AuthorizedKeysCommand\b
- line: AuthorizedKeysCommand /usr/bin/sss_ssh_authorizedkeys
- dest: /etc/ssh/sshd_config
- notify: restart sshd
- tags: [configure]
-
-- name: set AuthorizedKeysCommandUser for sshd
- lineinfile:
- regexp: AuthorizedKeysCommandUser
- line: AuthorizedKeysCommandUser nobody
- dest: /etc/ssh/sshd_config
- notify: restart sshd
- tags: [configure]
-
-- name: set GlobalKnownHostsFile for ssh
- lineinfile:
- regexp: GlobalKnownHostsFile
- line: GlobalKnownHostsFile /var/lib/sss/pubconf/known_hosts
- dest: /etc/ssh/ssh_config
-
-- name: set ProxyCommand for ssh
- lineinfile:
- regexp: ProxyCommand
- line: ProxyCommand /usr/bin/sss_ssh_knownhostsproxy -p %p %h
- dest: /etc/ssh/ssh_config
- tags: [configure]
-
-- name: start and enable sssd
- service: name=sssd state=started enabled=yes
- tags: [serve]
-
-- name: exclude lastlog and faillog from backups
- copy:
- src: backup_excludes
- dest: /var/log/.backup
- tags: [configure]
diff --git a/roles/debian-freeipa-client/templates/krb5.conf.j2 b/roles/debian-freeipa-client/templates/krb5.conf.j2
deleted file mode 100644
index 58077b2..0000000
--- a/roles/debian-freeipa-client/templates/krb5.conf.j2
+++ /dev/null
@@ -1,31 +0,0 @@
-# {{ ansible_managed }}
-includedir /var/lib/sss/pubconf/krb5.include.d/
-
-[libdefaults]
- default_realm = {{ ipa_realm }}
- dns_lookup_realm = false
- dns_lookup_kdc = false
- rdns = false
- dns_canonicalize_hostname = false
- ticket_lifetime = 24h
- forwardable = true
-
-
-[realms]
- {{ ipa_realm |upper }} = {
- kdc = {{ ipa_server }}:88
- master_kdc = {{ ipa_server }}:88
- admin_server = {{ ipa_server }}:749
- kpasswd_server = {{ ipa_server }}:464
- default_domain = {{ bind_localdomain }}
- }
-
-
-[domain_realm]
- .{{ bind_localdomain }} = {{ ipa_realm |upper}}
- {{ bind_localdomain }} = {{ ipa_realm |upper}}
-
-[logging]
-default = FILE:/var/log/krb5libs.log
-kdc = FILE:/var/log/krb5kdc.log
-admin_server = FILE:/var/log/kadmin.log
diff --git a/roles/debian-freeipa-client/templates/sssd.conf.j2 b/roles/debian-freeipa-client/templates/sssd.conf.j2
deleted file mode 100644
index dc1d9cf..0000000
--- a/roles/debian-freeipa-client/templates/sssd.conf.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-# {{ ansible_managed }}
-[sssd]
-config_file_version = 2
-services = nss, pam, sudo, ssh
-domains = {{ ipa_realm }}
-
-[nss]
-
-[pam]
-
-[ssh]
-
-[sudo]
-
-[domain/{{ ipa_realm }}]
-cache_credentials = true
-krb5_store_password_if_offline = true
-id_provider = ipa
-auth_provider = ipa
-access_provider = ipa
-chpass_provider = ipa
-ldap_tls_cacert = /etc/ipa/ca.crt
-ipa_hostname = {{ ansible_fqdn }}
diff --git a/roles/felixfontein.acme_certificate/.gitignore b/roles/felixfontein.acme_certificate/.gitignore
deleted file mode 100644
index ed8ebf5..0000000
--- a/roles/felixfontein.acme_certificate/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-__pycache__
\ No newline at end of file
diff --git a/roles/felixfontein.acme_certificate/.yamllint b/roles/felixfontein.acme_certificate/.yamllint
deleted file mode 100644
index 6a8c00d..0000000
--- a/roles/felixfontein.acme_certificate/.yamllint
+++ /dev/null
@@ -1,45 +0,0 @@
----
-extends: default
-
-rules:
- line-length:
- max: 140
- level: warning
- document-start:
- present: true
- document-end:
- present: false
- truthy:
- level: error
- allowed-values:
- - 'yes'
- - 'no'
- - 'true'
- - 'false'
- - 'True'
- - 'False'
- indentation:
- spaces: 2
- indent-sequences: consistent
- key-duplicates: enable
- trailing-spaces: enable
- new-line-at-end-of-file: disable
- hyphens:
- max-spaces-after: 1
- empty-lines:
- max: 2
- max-start: 0
- max-end: 0
- commas:
- max-spaces-before: 0
- min-spaces-after: 1
- max-spaces-after: 1
- colons:
- max-spaces-before: 0
- max-spaces-after: 1
- brackets:
- min-spaces-inside: 0
- max-spaces-inside: 0
- braces:
- min-spaces-inside: 0
- max-spaces-inside: 0
diff --git a/roles/felixfontein.acme_certificate/CHANGELOG.md b/roles/felixfontein.acme_certificate/CHANGELOG.md
deleted file mode 100644
index 58da626..0000000
--- a/roles/felixfontein.acme_certificate/CHANGELOG.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# Changelog for acme_certificate
-
-## Version 1.1.1 (2020-05-22)
-
-- Linting, to make Galaxy more happy. (ansible-lint does not like missing modules. This might get better with collections.)
-
-## Version 1.1.0 (2020-05-22)
-
-- Added better namespacing for role parameters; all role parameters now start with `acme_certificate_`. The old, shorter names can still be used for now. Support for them will be dropped in version 2.0.0, to be released later this year.
-- Dropped support for GCDNS (which never worked).
-- Support for DNS provider NS1 for DNS challenges (thanks to @timelapserduck).
-- Lint YAML files (thanks to @pgporada).
-- Allow `key_path` to not have trailing slash (thanks to @nwmcsween).
-- Fix curve used for P-256.
-- Require Ansible 2.8.3.
-
-## Version 1.0 (2019-07-01)
-
-First version published on Ansible Galaxy.
diff --git a/roles/felixfontein.acme_certificate/LICENSE b/roles/felixfontein.acme_certificate/LICENSE
deleted file mode 100644
index 347b83e..0000000
--- a/roles/felixfontein.acme_certificate/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015--2020 Felix Fontein
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/roles/felixfontein.acme_certificate/README.md b/roles/felixfontein.acme_certificate/README.md
deleted file mode 100644
index 902abfe..0000000
--- a/roles/felixfontein.acme_certificate/README.md
+++ /dev/null
@@ -1,360 +0,0 @@
-# acme_certificate 1.1.1
-
-Allows to obtain certificates from Let's Encrypt with minimal interaction with the webserver. Most code is executed on the controller, and the account key is never send to the nodes.
-
-The role can be installed via [Ansible Galaxy](https://galaxy.ansible.com/felixfontein/acme_certificate):
-
- ansible-galaxy install felixfontein.acme_certificate
-
-For changes in this role, see [the changelog](CHANGELOG.md).
-
-## Description
-
-This is an [Ansible](https://github.com/ansible/ansible) role which can use any CA supporting the ACME protocol, such as [Let's Encrypt](https://letsencrypt.org/) or [Buypass](https://www.buypass.com/ssl/products/acme), to issue TLS/SSL certificates for your server. This role requires Ansible 2.8.3 or newer and is based on the [acme_certificate module](https://docs.ansible.com/ansible/latest/acme_certificate_module.html) coming with Ansible.
-
-The main advantage of this approach over others is that *almost no code is executed on your webserver*: only when you use HTTP challenges, files need to be copied onto your webserver, and afterwards deleted from it. Everything else is executed on your local machine!
-
-(This does not cover installing the certificates, you have to do that yourself in another role.)
-
-## Requirements
-
-Requires the Python [cryptography](https://pypi.org/project/cryptography/) library installed on the controller, available to the Python version used to execute the playbook. If `cryptography` is not installed, a recent enough version of [PyOpenSSL](https://pypi.org/project/pyOpenSSL/) is currently supported as a fallback by the Ansible `openssl_privatekey` and `openssl_csr` modules.
-
-The `openssl` binary must also be available in the executable path on the controller. It is needed by the `acme_certificate` module in case `cryptography` is not installed, and it is used for certificate chain validation.
-
-If DNS challenges are used, there can be other requirements depending on the DNS provider. For example, for Amazon's Route 53, the Ansible `route53` module requires the Python `boto` package.
-
-## Account Key Setup
-
-You can create an account key using the `openssl` binary as follows:
-
- # RSA 4096 bit key
- openssl genrsa 4096 -out keys/acme-account.key
- # ECC 256 bit key (P-256)
- openssl ecparam -name prime256v1 -genkey -out keys/acme-account.key
- # ECC 384 bit key (P-384)
- openssl ecparam -name secp384r1 -genkey -out keys/acme-account.key
-
-With Ansible, you can use the `openssl_privatekey` module as follows:
-
- - name: Generate RSA 4096 key
- openssl_privatekey:
- path: keys/acme-account.key
- type: RSA
- size: 4096
- - name: Generate ECC 256 bit key (P-256)
- openssl_privatekey:
- path: keys/acme-account.key
- type: ECC
- curve: secp256r1
- - name: Generate ECC 384 bit key (P-384)
- openssl_privatekey:
- path: keys/acme-account.key
- type: ECC
- curve: secp384r1
-
-Make sure you store the account key safely. As opposed to certificate private keys, there is no need to regenerate it frequently, and it makes recovation of certificates issued with it very simple.
-
-## Role Variables
-
-Please note that from May 2020 on, all variables must be prefixed with `acme_certificate_`. For some time, the module will still use the old (short) variable names if the longer ones are not defined. Please upgrade your role usage as soon as possible.
-
-These are the main variables:
-
-- `acme_certificate_acme_account`: Path to the private ACME account key. Must always be specified.
-- `acme_certificate_acme_email`: Your email address which shall be associated to the ACME account. Must always be specified.
-- `acme_certificate_algorithm`: The algorithm used for creating private keys. The default is `"rsa"`; other choices are `"p-256"`, `"p-384"` or `"p-521"` for the NIST elliptic curves `prime256v1`, `secp384r1` and `secp521r1`, respectively.
-- `acme_certificate_key_length`: The bitlength to use for RSA private keys. The default is 4096.
-- `acme_certificate_key_name`: The basename for storing the keys and certificates. The default is the first domain specified, with `*` replaced by `_`.
-- `acme_certificate_keys_path`: Where the keys and certificates are stored. Default value is `"keys/"`.
-- `acme_certificate_keys_old_path`: Where old keys and certificates should be copied to; used in case `acme_certificate_keys_old_store` is true. Default value is `"keys/old/"`.
-- `acme_certificate_keys_old_store`: If set to `true`, will make copies of old keys and certificates. The copies will be stored in the directory specified by `acme_certificate_keys_old_store`. Default value is `false`.
-- `acme_certificate_keys_old_prepend_timestamp`: Whether copies of old keys and certificates should be prepended by the current date and time. Default value is `false`.
-- `acme_certificate_ocsp_must_staple`: Whether a certificate with the OCSP Must Staple extension is requested. Default value is `false`.
-- `acme_certificate_agreement`: The terms of service document the user agrees to. Default value is `https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf`.
-- `acme_certificate_acme_directory`: The ACME directory to use. Default is `https://acme-v02.api.letsencrypt.org/directory`, which is the current production ACME v2 endpoint of Let's Encrypt.
-- `acme_certificate_acme_version`: The ACME directory's version. Default is 2. Use 1 for ACME v1.
-- `acme_certificate_challenge`: The challenge type to use. Should be `http-01` for HTTP challenges (needs access to web server) or `dns-01` for DNS challenges (needs access to DNS provider).
-- `acme_certificate_root_certificate`: The root certificate for the ACME directory. Default value is `https://letsencrypt.org/certs/isrgrootx1.pem` for the root certificate of Let's Encrypt.
-- `acme_certificate_deactivate_authzs`: Whether `authz`s (authorizations) should be deactivated afterwards. Default value is `true`. Set to `false` to be able to re-use `authz`s.
-- `acme_certificate_modify_account`: Whether the ACME account should be created (if it doesn't exist) and the contact data (email address) should be updated. Default value is `true`. Set to `false` if you want to use the `acme_account` module to manage your ACME account (not done by this role).
-- `acme_certificate_privatekey_mode`: Which file mode to use for the private key file. Default value is `"0600"`, which means read- and writeable by the owner, but not accessible by anyone else (except possibly `root`).
-
-### HTTP Challenges
-
-For HTTP challenges, the following variables define how the challenges can be put onto the (remote) webserver:
-
-- `acme_certificate_server_location`: Location where `.well-known/acme-challenge/` will be served from. Default is `/var/www/challenges`.
-- `acme_certificate_http_become`: Argument for `become:` for the `file` and `copy` tasks. Default value is `false`.
-- `acme_certificate_http_challenge_user`: The user the challenge files are owned by. Default value is `root`.
-- `acme_certificate_http_challenge_group`: The group the challenge files are owned by. Default value is `http`.
-- `acme_certificate_http_challenge_folder_mode`: The mode to use for the challenge folder. Default value is `0750` (octal).
-- `acme_certificate_http_challenge_file_mode`: The mode to use for the challenge files. Default value is `0640` (octal).
-
-The following subsection shows how to configure [nginx](https://nginx.org/) for HTTP challenges. Configuring other webservers can be done in a similar way.
-
-#### Nginx configuration
-
-Assume that for one of your TLS/SSL protected domains, you use a HTTP-to-HTTPS redirect. Let's assume it looks like this:
-
- server {
- listen example.com:80;
- server_name example.com *.example.com;
- return 301 https://www.example.com$request_uri;
- }
-
-To allow the `acme_certificate` role to put something at `http://*.example.com/.well-known/acme-challenge/`, you can change this to:
-
- server {
- listen example.com:80;
- server_name example.com *.example.com;
- location /.well-known/acme-challenge/ {
- alias /var/www/challenges/;
- try_files $uri =404;
- }
- location / {
- return 301 https://www.example.com$request_uri;
- }
- }
-
-With this nginx config, all other URLs on `*.example.com` and `example.com` are still redirected, while everything in `*.example.com/.well-known/acme-challenge/` is served from `/var/www/challenges`. When adjusting the location of `/var/www/challenges`, you must also change `acme_certificate_server_location`.
-
-You can even improve on this by redirecting all URLs in `*.example.com/.well-known/acme-challenge/` which do not resolve to a valid file in `/var/www/challenges` to your HTTPS server as well. One way to do this is:
-
- server {
- listen example.com:80;
- server_name example.com *.example.com;
- location /.well-known/acme-challenge/ {
- alias /var/www/lechallenges/;
- try_files $uri @forward_https;
- }
- location @forward_https {
- return 301 https://www.example.com$request_uri;
- }
- location / {
- return 301 https://www.example.com$request_uri;
- }
- }
-
-With this config, if `/var/www/challenges/` is empty, your HTTP server will behave as if the `/.well-known/acme-challenge/` location isn't specified.
-
-### DNS Challenges
-
-If DNS challenges are used, the following variables define how the challenges can be fulfilled:
-
-- `acme_certificate_dns_provider`: must be one of `route53`, `hosttech`, and `ns1`. Each needs more information:
- - For `route53` (Amazon Route 53), the credentials must be passed as `acme_certificate_aws_access_key` and `acme_certificate_aws_secret_key`.
- - For `hosttech` (hosttech GmbH, requires external [hosttech_dns_record module](https://github.com/felixfontein/ansible-hosttech)).
- - For `ns1` ([ns1.com](https://ns1.com)) the key for your API account must be passed as `acme_certificate_ns1_secret_key`. Also it depends on external module `ns1_record`. Assuming default directory structure and settings, you may need download 2 files into machine where playbook executed:
-
- ```bash
- curl --create-dirs -L -o ~/.ansible/plugins/module_utils/ns1.py https://github.com/ns1/ns1-ansible-modules/raw/master/module_utils/ns1.py
- curl --create-dirs -L -o ~/.ansible/plugins/modules/ns1_record.py https://github.com/ns1/ns1-ansible-modules/raw/master/library/ns1_record.py
- ```
-
-Please note that the DNS challenge code is not perfect. The Route 53, Hosttech and NS1 functionality has been tested. One thing that is not complete yet is that the code tries to extract the DNS zone from the domain by taking the last two components separated by dots. This will fail for example for `.co.uk` domains or other nested zones.
-
-Support for more DNS providers can be added by adding `tasks/dns-NAME-create.yml` and `tasks/dns-NAME-cleanup.yml` files with similar content as in the existing files.
-
-## Account key conversion
-
-Note that this Ansible role expects the Let's Encrypt account key to be in PEM format and not in JWK format, which is used by the [official Let's Encrypt client certbot](https://github.com/certbot/certbot). If you have created an account key with the official client and now want to use this key with this ansible role, you have to convert it. One tool which can do this is [pem-jwk](https://github.com/dannycoates/pem-jwk).
-
-## Generated Files
-
-Let's assume you created TLS keys for `www.example.com`. You have to copy the relevant files to your webserver. The ansible role created the following files:
-
- * `keys/www.example.com.key`: this is the private key for the certificate. Ensure nobody can access it.
- * `keys/www.example.com.pem`: this is the certificate itself.
- * `keys/www.example.com-chain.pem`: this is the intermediate certificate(s) needed for a trust path.
- * `keys/www.example.com.cnf`: this is an OpenSSL configuration file used to create the Certificate Signing Request. You can safely delete it.
- * `keys/www.example.com.csr`: this is the Certificate Signing Request used to obtain the certificate. You can safely delete it.
- * `keys/www.example.com-fullchain.pem`: this is the certificate combined with the intermediate certificate(s).
- * `keys/www.example.com-rootchain.pem`: this is the intermediate certificate(s) combined with the root certificate. You might need this for OCSP stapling.
- * `keys/www.example.com-root.pem`: this is the root certificate of Let's Encrypt.
-
-For configuring your webserver, you need the private key (`keys/www.example.com.key`), and either the certificate with intermediate certificate(s) combined in one file (`keys/www.example.com-fullchain.pem`), or the certificate and the intermediate certificate(s) as two separate files (`keys/www.example.com.pem` and `keys/www.example.com-chain.pem`). If you want to use [OCSP stapling](https://en.wikipedia.org/wiki/OCSP_stapling), you will also need `keys/www.example.com-rootchain.pem`.
-
-To get these files onto your web server, you could add tasks as follows:
-
- - name: copy private keys
- copy:
- src: keys/{{ item }}
- dest: /etc/ssl/private/
- owner: root
- group: root
- mode: "0400"
- with_items:
- - www.example.com.key
- notify: reload webserver
-
- - name: copy certificates
- copy:
- src: keys/{{ item }}
- dest: /etc/ssl/server-certs/
- owner: root
- group: root
- mode: "0444"
- with_items:
- - www.example.com-rootchain.pem
- - www.example.com-fullchain.pem
- - www.example.com.pem
- notify: reload webserver
-
-The webserver configuration could look as follows (for nginx):
-
- server {
- listen www.example.com:443 ssl; # IPv4: listen to IP www.example.com points to
- listen [::]:443 ssl; # IPv6: listen to localhost
- server_name www.example.com;
-
- # Allowing only TLS 1.0 and 1.2, with a very selective amount of ciphers.
- # According to SSL Lab's SSL server test, this will block:
- # - Android 2.3.7
- # - IE 6 and 8 under Windows XP
- # - Java 6, 7 and 8
- # If that's not acceptable for you, choose other cipher lists. Look for
- # example at https://wiki.mozilla.org/Security/Server_Side_TLS
- ssl_protocols TLSv1.2 TLSv1;
- ssl_prefer_server_ciphers on;
- ssl_ciphers "-ALL !ADH !aNULL !EXP !EXPORT40 !EXPORT56 !RC4 !3DES !eNULL !NULL !DES !MD5 !LOW ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-GCM-SHA384 ECDHE-ECDSA-AES256-SHA384 ECDHE-RSA-AES256-SHA384 DHE-RSA-AES256-SHA256 ECDHE-ECDSA-AES256-SHA ECDHE-RSA-AES256-SHA DHE-RSA-AES256-SHA";
-
- # The certificate chain sent to the browser, as well as the private key.
- # Make sure your private key is only accessible by the webserver during
- # configuration loading (which by default is done with user root).
- ssl_certificate /etc/ssl/server-certs/www.example.com-fullchain.pem;
- ssl_certificate_key /etc/ssl/private/www.example.com.key;
-
- # For OCSP stapling, we need a DNS resolver. Here only public Quad9 and
- # Google DNS servers are specified; I would prepent them by your hoster's
- # DNS servers. You can usually find their IPs in /etc/resolv.conf on your
- # webserver.
- resolver 9.9.9.9 8.8.8.8 8.8.4.4 valid=300s;
- resolver_timeout 10s;
-
- # Enabling OCSP stapling. Nginx will take care of retrieving the OCSP data
- # automatically. See https://wiki.mozilla.org/Security/Server_Side_TLS#OCSP_Stapling
- # for details on OCSP stapling.
- ssl_stapling on;
- ssl_stapling_verify on;
- ssl_trusted_certificate /etc/ssl/server-certs/www.example.com-rootchain.pem;
-
- # Enables a SSL session cache. Adjust the numbers depending on your site's usage.
- ssl_session_cache shared:SSL:50m;
- ssl_session_timeout 30m;
- ssl_session_tickets off;
-
- # You should only use HSTS with proper certificates; the ones from Let's Encrypt
- # are fine for this, self-signed ones are not. See MozillaWiki for more details:
- # https://wiki.mozilla.org/Security/Server_Side_TLS#HSTS:_HTTP_Strict_Transport_Security
- add_header Strict-Transport-Security "max-age=3155760000;";
-
- charset utf-8;
-
- access_log /var/log/nginx/www.example.com.log combined;
- error_log /var/log/nginx/www.example.com.log error;
-
- location / {
- root /var/www/www.example.com;
- index index.html;
- }
- }
-
-## Dependencies
-
-This role doesn't depend on other roles.
-
-## Example Playbook
-
-This role can be used as follows. Note that it obtains several certificates, and defines variables used for all certificates globally:
-
- ---
- - name: getting certificates for webserver
- hosts: webserver
- vars:
- acme_certificate_acme_account: 'keys/acme-account.key'
- acme_certificate_acme_email: 'mail@example.com'
- # For HTTP challenges:
- acme_certificate_server_location: '/var/www/challenges/'
- acme_certificate_http_challenge_user: root
- acme_certificate_http_challenge_group: http
- acme_certificate_http_challenge_folder_mode: "0750"
- acme_certificate_http_challenge_file_mode: "0640"
- # For DNS challenges with route53:
- acme_certificate_dns_provider: route53
- acme_certificate_aws_access_key: REPLACE_WITH_YOUR_ACCESS_KEY
- acme_certificate_aws_secret_key: REPLACE_WITH_YOUR_SECRET_KEY
- # For DNS challenges with ns1:
- # acme_certificate_dns_provider: ns1
- # acme_certificate_ns1_secret_key: REPLACE_WITH_YOUR_SECRET_KEY
- roles:
- - role: acme_certificate
- acme_certificate_domains: ['example.com', 'www.example.com']
- # Use DNS challenges:
- acme_certificate_challenge: dns-01
- # The certificate files will be stored at:
- # keys/example.com.key (private key)
- # keys/example.com.csr (certificate signing request)
- # keys/example.com.pem (certificate)
- # keys/example.com.cnf (OpenSSL config for CSR creation -- can be safely deleted)
- # keys/example.com-chain.pem (intermediate certificate)
- # keys/example.com-fullchain.pem (certificate with intermediate certificate)
- # keys/example.com-root.pem (root certificate)
- # keys/example.com-rootchain.pem (intermediate certificate with root certificate)
- - role: acme_certificate
- acme_certificate_domains: ['another.example.com']
- acme_certificate_key_name: 'another.example.com-rsa'
- acme_certificate_key_length: 4096
- # Use HTTP challenges:
- acme_certificate_challenge: http-01
- # The certificate files will be stored at:
- # keys/another.example.com-rsa.key (private key)
- # keys/another.example.com-rsa.csr (certificate signing request)
- # keys/another.example.com-rsa.pem (certificate)
- # keys/another.example.com-rsa.cnf (OpenSSL config for CSR creation -- can be safely deleted)
- # keys/another.example.com-rsa-chain.pem (intermediate certificate)
- # keys/another.example.com-rsa-fullchain.pem (certificate with intermediate certificate)
- # keys/another.example.com-rsa-root.pem (root certificate)
- # keys/another.example.com-rsa-rootchain.pem (intermediate certificate with root certificate)
- - role: acme_certificate
- acme_certificate_domains: ['another.example.com']
- acme_certificate_key_name: 'another.example.com-ecc'
- acme_certificate_algorithm: 'p-256'
- # Use HTTP challenges (default for challenge is http-01).
- # The certificate files will be stored at:
- # keys/another.example.com-ecc.key (private key)
- # keys/another.example.com-ecc.csr (certificate signing request)
- # keys/another.example.com-ecc.pem (certificate)
- # keys/another.example.com-ecc.cnf (OpenSSL config for CSR creation -- can be safely deleted)
- # keys/another.example.com-ecc-chain.pem (intermediate certificate)
- # keys/another.example.com-ecc-fullchain.pem (certificate with intermediate certificate)
- # keys/another.example.com-ecc-root.pem (root certificate)
- # keys/another.example.com-ecc-rootchain.pem (intermediate certificate with root certificate)
-
-## License
-
-The MIT License (MIT)
-
-Copyright (c) 2018-2020 Felix Fontein
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-## Author Information
-
-The homepage for this role is https://github.com/felixfontein/acme-certificate/. Please use the issue tracker to report problems.
diff --git a/roles/felixfontein.acme_certificate/ansible.cfg b/roles/felixfontein.acme_certificate/ansible.cfg
deleted file mode 100644
index 613d83b..0000000
--- a/roles/felixfontein.acme_certificate/ansible.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-[defaults]
-roles_path = ../
diff --git a/roles/felixfontein.acme_certificate/defaults/main.yml b/roles/felixfontein.acme_certificate/defaults/main.yml
deleted file mode 100644
index 477082d..0000000
--- a/roles/felixfontein.acme_certificate/defaults/main.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-acme_certificate_domains: '{{ domains }}'
-acme_certificate_dns_provider: '{{ dns_provider }}'
-acme_certificate_acme_account: '{{ acme_account }}'
-acme_certificate_acme_email: '{{ acme_email }}'
-
-acme_certificate_algorithm: '{{ algorithm | default("rsa") }}'
-acme_certificate_key_length: '{{ key_length | default(4096) }}'
-acme_certificate_key_name: "{{ key_name | default(acme_certificate_domains[0].replace('*', '_')) }}"
-acme_certificate_keys_path: '{{ keys_path | default("keys/") }}'
-acme_certificate_keys_old_path: '{{ keys_old_path | default("keys/old/") }}'
-acme_certificate_keys_old_store: '{{ keys_old_store | default(false) }}'
-acme_certificate_keys_old_prepend_timestamp: '{{ keys_old_prepend_timestamp | default(false) }}'
-acme_certificate_ocsp_must_staple: '{{ ocsp_must_staple | default(false) }}'
-acme_certificate_terms_agreed: '{{ terms_agreed | default(true) }}'
-acme_certificate_acme_directory: '{{ acme_directory | default("https://acme-v02.api.letsencrypt.org/directory") }}'
-acme_certificate_acme_version: '{{ acme_version | default(2) }}'
-# For ACME v1:
-# acme_certificate_acme_directory: https://acme-v01.api.letsencrypt.org/directory
-# acme_certificate_acme_version: 1
-# For staging, use:
-# acme_certificate_acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory (ACME v2)
-# acme_certificate_acme_directory: https://acme-staging.api.letsencrypt.org/directory (ACME v1)
-acme_certificate_challenge: '{{ challenge | default("http-01") }}'
-acme_certificate_root_certificate: '{{ root_certificate | default("https://letsencrypt.org/certs/isrgrootx1.pem") }}'
-# For staging, use:
-# root_certificate: https://letsencrypt.org/certs/fakelerootx1.pem
-acme_certificate_deactivate_authzs: '{{ deactivate_authzs | default(true) }}'
-acme_certificate_modify_account: '{{ modify_account | default(true) }}'
-acme_certificate_validate_certs: '{{ validate_certs | default(true) }}'
-acme_certificate_verify_certs: '{{ verify_certs | default(true) }}'
-acme_certificate_privatekey_mode: '{{ privatekey_mode | default("0600") }}'
-
-# For HTTP challenges:
-acme_certificate_server_location: '{{ server_location | default("/var/www/challenges") }}'
-acme_certificate_http_become: '{{ http_become | default(false) }}'
-acme_certificate_http_challenge_user: '{{ http_challenge_user | default("root") }}'
-acme_certificate_http_challenge_group: '{{ http_challenge_group | default("http") }}'
-acme_certificate_http_challenge_folder_mode: '{{ http_challenge_folder_mode | default("0750") }}'
-acme_certificate_http_challenge_file_mode: '{{ http_challenge_file_mode | default("0640") }}'
-
-# DNS challenge credentials
-acme_certificate_hosttech_username: '{{ hosttech_username | default(omit) }}'
-acme_certificate_hosttech_password: '{{ hosttech_password | default(omit) }}'
-acme_certificate_ns1_secret_key: '{{ ns1_secret_key | default(omit) }}'
-acme_certificate_aws_access_key: '{{ aws_access_key | default(omit) }}'
-acme_certificate_aws_secret_key: '{{ aws_secret_key | default(omit) }}'
diff --git a/roles/felixfontein.acme_certificate/filter_plugins/path_filter.py b/roles/felixfontein.acme_certificate/filter_plugins/path_filter.py
deleted file mode 100644
index fdb0aa7..0000000
--- a/roles/felixfontein.acme_certificate/filter_plugins/path_filter.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import os.path
-
-
-def path_join(list):
- return os.path.join(*list)
-
-
-class FilterModule(object):
- ''' Ansible core jinja2 filters '''
-
- def filters(self):
- return {
- 'path_join': path_join,
- }
diff --git a/roles/felixfontein.acme_certificate/meta/.galaxy_install_info b/roles/felixfontein.acme_certificate/meta/.galaxy_install_info
deleted file mode 100644
index ef65773..0000000
--- a/roles/felixfontein.acme_certificate/meta/.galaxy_install_info
+++ /dev/null
@@ -1,2 +0,0 @@
-install_date: Fri Oct 15 18:59:51 2021
-version: 1.1.1
diff --git a/roles/felixfontein.acme_certificate/meta/main.yml b/roles/felixfontein.acme_certificate/meta/main.yml
deleted file mode 100644
index d2f6423..0000000
--- a/roles/felixfontein.acme_certificate/meta/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-galaxy_info:
- role_name: acme_certificate
- author: Felix Fontein
- description: >
- Wrapper of Ansible's included acme_certificate module, whose aim is that almost no code
- is executed on the webserver. Requires the Python cryptography library as well as the
- OpenSSL binary installed locally and available on executable path.
-
- license: MIT
-
- min_ansible_version: 2.8.3
-
- galaxy_tags:
- - acme
- - letsencrypt
- - buypass
- - ssl
- - tls
- - https
- - encryption
- - security
- - web
-
-dependencies: []
diff --git a/roles/felixfontein.acme_certificate/sample-playbook.yml b/roles/felixfontein.acme_certificate/sample-playbook.yml
deleted file mode 100644
index d106f6b..0000000
--- a/roles/felixfontein.acme_certificate/sample-playbook.yml
+++ /dev/null
@@ -1,62 +0,0 @@
----
-- name: getting certificates for webserver
- hosts: webserver
- vars:
- acme_certificate_acme_account: 'keys/acme-account.key'
- acme_certificate_acme_email: 'mail@example.com'
- # For HTTP challenges:
- acme_certificate_server_location: '/var/www/challenges/'
- acme_certificate_http_challenge_user: root
- acme_certificate_http_challenge_group: http
- acme_certificate_http_challenge_folder_mode: "0750"
- acme_certificate_http_challenge_file_mode: "0640"
- # For DNS challenges with route53:
- acme_certificate_dns_provider: route53
- acme_certificate_aws_access_key: REPLACE_WITH_YOUR_ACCESS_KEY
- acme_certificate_aws_secret_key: REPLACE_WITH_YOUR_SECRET_KEY
- # For DNS challenges with ns1:
- # acme_certificate_dns_provider: ns1
- # acme_certificate_ns1_secret_key: REPLACE_WITH_YOUR_SECRET_KEY
- roles:
- - role: acme_certificate
- acme_certificate_domains: ['example.com', 'www.example.com']
- # Use DNS challenges:
- acme_certificate_challenge: dns-01
- # The certificate files will be stored at:
- # keys/example.com.key (private key)
- # keys/example.com.csr (certificate signing request)
- # keys/example.com.pem (certificate)
- # keys/example.com.cnf (OpenSSL config for CSR creation -- can be safely deleted)
- # keys/example.com-chain.pem (intermediate certificate)
- # keys/example.com-fullchain.pem (certificate with intermediate certificate)
- # keys/example.com-root.pem (root certificate)
- # keys/example.com-rootchain.pem (intermediate certificate with root certificate)
- - role: acme_certificate
- acme_certificate_domains: ['another.example.com']
- acme_certificate_key_name: 'another.example.com-rsa'
- acme_certificate_key_length: 4096
- # Use HTTP challenges:
- acme_certificate_challenge: http-01
- # The certificate files will be stored at:
- # keys/another.example.com-rsa.key (private key)
- # keys/another.example.com-rsa.csr (certificate signing request)
- # keys/another.example.com-rsa.pem (certificate)
- # keys/another.example.com-rsa.cnf (OpenSSL config for CSR creation -- can be safely deleted)
- # keys/another.example.com-rsa-chain.pem (intermediate certificate)
- # keys/another.example.com-rsa-fullchain.pem (certificate with intermediate certificate)
- # keys/another.example.com-rsa-root.pem (root certificate)
- # keys/another.example.com-rsa-rootchain.pem (intermediate certificate with root certificate)
- - role: acme_certificate
- acme_certificate_domains: ['another.example.com']
- acme_certificate_key_name: 'another.example.com-ecc'
- acme_certificate_algorithm: 'p-256'
- # Use HTTP challenges (default for challenge is http-01).
- # The certificate files will be stored at:
- # keys/another.example.com-ecc.key (private key)
- # keys/another.example.com-ecc.csr (certificate signing request)
- # keys/another.example.com-ecc.pem (certificate)
- # keys/another.example.com-ecc.cnf (OpenSSL config for CSR creation -- can be safely deleted)
- # keys/another.example.com-ecc-chain.pem (intermediate certificate)
- # keys/another.example.com-ecc-fullchain.pem (certificate with intermediate certificate)
- # keys/another.example.com-ecc-root.pem (root certificate)
- # keys/another.example.com-ecc-rootchain.pem (intermediate certificate with root certificate)
diff --git a/roles/felixfontein.acme_certificate/tasks/dns-dme-cleanup.yml b/roles/felixfontein.acme_certificate/tasks/dns-dme-cleanup.yml
deleted file mode 100644
index 95d03f6..0000000
--- a/roles/felixfontein.acme_certificate/tasks/dns-dme-cleanup.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# Clean up DNS challenges for DNS provider DNSMadeEasy
-- name: Cleaning up challenge DNS entries for domains {{ ', '.join(domains) }} via DNSMadeEasy
- connection: local
- community.general.dnsmadeeasy:
- account_key: "{{ dme_account_key }}"
- account_secret: "{{ dme_account_secret }}"
- domain: "{{ item.key |regex_replace('^(?:.*\\.|)([^.]+\\.[^.]+)$', '\\1') }}"
- record_ttl: 60
- record_type: TXT
- record_name: "{{ item.key |regex_replace('^(.*)(\\.[^.]+\\.[^.]+)$', '\\1') }}"
- record_value: "{{ item.value|first }}"
- state: absent
- run_once: True
- with_dict: "{{ acme_certificate_INTERNAL_challenge.challenge_data_dns }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
diff --git a/roles/felixfontein.acme_certificate/tasks/dns-dme-create.yml b/roles/felixfontein.acme_certificate/tasks/dns-dme-create.yml
deleted file mode 100644
index aa7e391..0000000
--- a/roles/felixfontein.acme_certificate/tasks/dns-dme-create.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# Create DNS challenges for DNS provider Amazon Route53
-- name: Creating challenge DNS entries for domains {{ ', '.join(domains) }} via DNSMadeEasy
- connection: local
- community.general.dnsmadeeasy:
- account_key: "{{ dme_account_key }}"
- account_secret: "{{ dme_account_secret }}"
- # This is fragile, and will only work for 2-level domain (eg: corp.com, NOT corp.co.uk )
- domain: "{{ item.key | regex_replace('^(?:.*\\.|)([^.]+\\.[^.]+)$', '\\1') }}"
- record_ttl: 60
- record_type: TXT
- record_name: "{{ item.key |regex_replace('^(.*)(\\.[^.]+\\.[^.]+)$', '\\1') }}"
- record_value: "{{ item.value|first }}"
- state: present
- # Need dnsmadeeasy module fixed (https://github.com/ansible/ansible/issues/58305)
- run_once: True
- with_dict: "{{ acme_certificate_INTERNAL_challenge.challenge_data_dns }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
-
-- name: Wait for DNS entries to become available
- shell: "dig txt {{ item.key }} +short @8.8.8.8"
- register: dig_result
- until: "item.value|first in dig_result.stdout"
- retries: 60
- delay: 5
- with_dict: "{{ acme_certificate_INTERNAL_challenge.challenge_data_dns }}"
-
-- name: Pause for 60s for more propagation
- pause:
- minutes: 1
diff --git a/roles/felixfontein.acme_certificate/tasks/dns-hosttech-cleanup.yml b/roles/felixfontein.acme_certificate/tasks/dns-hosttech-cleanup.yml
deleted file mode 100644
index a4b6482..0000000
--- a/roles/felixfontein.acme_certificate/tasks/dns-hosttech-cleanup.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-# Clean up DNS challenges for DNS provider HostTech
-- name: Cleaning up challenge DNS entries for domains {{ ', '.join(acme_certificate_domains) }} via HostTech API
- hosttech_dns_record:
- state: absent
- zone: "{{ item.key | regex_replace('^(?:.*\\.|)([^.]+\\.[^.]+)$', '\\1') }}"
- record: "{{ item.key }}"
- type: TXT
- ttl: 300
- value: "{{ item.value }}"
- overwrite: true
- hosttech_username: "{{ acme_certificate_hosttech_username }}"
- hosttech_password: "{{ acme_certificate_hosttech_password }}"
- delegate_to: localhost
- run_once: true
- with_dict: "{{ acme_certificate_INTERNAL_challenge.get('challenge_data_dns', {}) }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
diff --git a/roles/felixfontein.acme_certificate/tasks/dns-hosttech-create.yml b/roles/felixfontein.acme_certificate/tasks/dns-hosttech-create.yml
deleted file mode 100644
index ed4accb..0000000
--- a/roles/felixfontein.acme_certificate/tasks/dns-hosttech-create.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-# Create DNS challenges for DNS provider HostTech
-- name: Creating challenge DNS entries for domains {{ ', '.join(acme_certificate_domains) }} via HostTech API
- hosttech_dns_record:
- state: present
- zone: "{{ item.key | regex_replace('^(?:.*\\.|)([^.]+\\.[^.]+)$', '\\1') }}"
- record: "{{ item.key }}"
- type: TXT
- ttl: 300
- value: "{{ item.value }}"
- overwrite: true
- hosttech_username: "{{ acme_certificate_hosttech_username }}"
- hosttech_password: "{{ acme_certificate_hosttech_password }}"
- delegate_to: localhost
- run_once: true
- with_dict: "{{ acme_certificate_INTERNAL_challenge.challenge_data_dns }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
-
-- name: Wait for DNS entries to propagate
- pause:
- seconds: 10
diff --git a/roles/felixfontein.acme_certificate/tasks/dns-ns1-cleanup.yml b/roles/felixfontein.acme_certificate/tasks/dns-ns1-cleanup.yml
deleted file mode 100644
index 7a9e331..0000000
--- a/roles/felixfontein.acme_certificate/tasks/dns-ns1-cleanup.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Cleaning up challenge DNS entries for domains {{ ', '.join(acme_certificate_domains) }} via NS1 API
- ns1_record:
- apiKey: "{{ acme_certificate_ns1_secret_key }}"
- name: "{{ item.key }}"
- zone: "{{ item.key | regex_replace('^(?:.*\\.|)([^.]+\\.[^.]+)$', '\\1') }}"
- state: absent
- type: TXT
- answers: []
- delegate_to: localhost
- run_once: true
- when: "'_acme-challenge' in item.key"
- with_dict: "{{ acme_certificate_INTERNAL_challenge.get('challenge_data_dns', {}) }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
diff --git a/roles/felixfontein.acme_certificate/tasks/dns-ns1-create.yml b/roles/felixfontein.acme_certificate/tasks/dns-ns1-create.yml
deleted file mode 100644
index 3f4a220..0000000
--- a/roles/felixfontein.acme_certificate/tasks/dns-ns1-create.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Creating challenge DNS entries for domains {{ ', '.join(acme_certificate_domains) }} via NS1 DNS
- ns1_record:
- apiKey: "{{ acme_certificate_ns1_secret_key }}"
- name: "{{ item.key }}"
- zone: "{{ item.key | regex_replace('^(?:.*\\.|)([^.]+\\.[^.]+)$', '\\1') }}"
- state: present
- type: TXT
- answers:
- - answer:
- - "{{ item.value[0] }}"
- meta:
- up: true
- delegate_to: localhost
- when: "'_acme-challenge' in item.key"
- run_once: true
- with_dict: "{{ acme_certificate_INTERNAL_challenge.challenge_data_dns }}"
-
-- name: Check if DNS changes propagated at dns1.p01.nsone.net with 10-seconds intervals
- command: "dig TXT {{ item.key }} +short @dns1.p01.nsone.net"
- register: dig
- until: "item.value[0] in dig.stdout"
- with_dict: "{{ acme_certificate_INTERNAL_challenge.challenge_data_dns }}"
- retries: 6
- delay: 10
- changed_when: false
- ignore_errors: yes
\ No newline at end of file
diff --git a/roles/felixfontein.acme_certificate/tasks/dns-route53-cleanup.yml b/roles/felixfontein.acme_certificate/tasks/dns-route53-cleanup.yml
deleted file mode 100644
index c6dcc4a..0000000
--- a/roles/felixfontein.acme_certificate/tasks/dns-route53-cleanup.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-# Clean up DNS challenges for DNS provider Amazon Route53
-- name: Cleaning up challenge DNS entries for domains {{ ', '.join(acme_certificate_domains) }} via Route53
- route53:
- state: absent
- zone: "{{ item.key | regex_replace('^(?:.*\\.|)([^.]+\\.[^.]+)$', '\\1') }}"
- record: "{{ item.key }}"
- type: TXT
- ttl: 60
- value: "{{ item.value | map('regex_replace', '^(.*)$', '\"\\1\"' ) | list }}"
- overwrite: true
- aws_access_key: "{{ acme_certificate_aws_access_key }}"
- aws_secret_key: "{{ acme_certificate_aws_secret_key }}"
- delegate_to: localhost
- run_once: true
- with_dict: "{{ acme_certificate_INTERNAL_challenge.get('challenge_data_dns', {}) }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
diff --git a/roles/felixfontein.acme_certificate/tasks/dns-route53-create.yml b/roles/felixfontein.acme_certificate/tasks/dns-route53-create.yml
deleted file mode 100644
index e80ce08..0000000
--- a/roles/felixfontein.acme_certificate/tasks/dns-route53-create.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-# Create DNS challenges for DNS provider Amazon Route53
-- name: Creating challenge DNS entries for domains {{ ', '.join(acme_certificate_domains) }} via Route53
- route53:
- state: present
- zone: "{{ item.key | regex_replace('^(?:.*\\.|)([^.]+\\.[^.]+)$', '\\1') }}"
- record: "{{ item.key }}"
- type: TXT
- ttl: 60
- value: "{{ item.value | map('regex_replace', '^(.*)$', '\"\\1\"' ) | list }}"
- overwrite: true
- aws_access_key: "{{ acme_certificate_aws_access_key }}"
- aws_secret_key: "{{ acme_certificate_aws_secret_key }}"
- wait: true
- delegate_to: localhost
- run_once: true
- with_dict: "{{ acme_certificate_INTERNAL_challenge.challenge_data_dns }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
diff --git a/roles/felixfontein.acme_certificate/tasks/http-cleanup.yml b/roles/felixfontein.acme_certificate/tasks/http-cleanup.yml
deleted file mode 100644
index 8243cc8..0000000
--- a/roles/felixfontein.acme_certificate/tasks/http-cleanup.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-# Clean up challenge files on server.
-- name: "Cleaning up challenge files for domains {{ ', '.join(acme_certificate_domains) }}"
- file:
- path: >-
- {{ [
- acme_certificate_server_location,
- item.value[acme_certificate_challenge].resource[('.well-known/acme-challenge/'|length):]
- ] | path_join }}"
- state: absent
- with_dict: "{{ acme_certificate_INTERNAL_challenge.get('acme_certificate_challenge_data', {}) }}"
- become: "{{ acme_certificate_http_become }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
diff --git a/roles/felixfontein.acme_certificate/tasks/http-create.yml b/roles/felixfontein.acme_certificate/tasks/http-create.yml
deleted file mode 100644
index 1cdf343..0000000
--- a/roles/felixfontein.acme_certificate/tasks/http-create.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-# Create up challenge files directory on server.
-- name: Creating challenge destination directory
- file:
- dest: "{{ acme_certificate_server_location }}"
- state: directory
- owner: "{{ acme_certificate_http_challenge_user }}"
- group: "{{ acme_certificate_http_challenge_group }}"
- mode: "{{ acme_certificate_http_challenge_folder_mode }}"
- become: "{{ acme_certificate_http_become }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
-
-# Create challenge files on server.
-- name: "Copying challenge files for domains {{ ', '.join(acme_certificate_domains) }}"
- copy:
- dest: >-
- {{ [
- acme_certificate_server_location,
- item.value[acme_certificate_challenge].resource[('.well-known/acme-challenge/'|length):]
- ] | path_join }}
- content: "{{ item.value[acme_certificate_challenge].resource_value }}"
- owner: "{{ acme_certificate_http_challenge_user }}"
- group: "{{ acme_certificate_http_challenge_group }}"
- mode: "{{ acme_certificate_http_challenge_file_mode }}"
- with_dict: "{{ acme_certificate_INTERNAL_challenge.challenge_data }}"
- become: "{{ acme_certificate_http_become }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
diff --git a/roles/felixfontein.acme_certificate/tasks/main.yml b/roles/felixfontein.acme_certificate/tasks/main.yml
deleted file mode 100644
index 5a5f239..0000000
--- a/roles/felixfontein.acme_certificate/tasks/main.yml
+++ /dev/null
@@ -1,189 +0,0 @@
----
-- name: Determine whether to force private key regeneration (1/2)
- set_fact:
- acme_certificate_INTERNAL_force_regenerate_private_key: no
-
-- name: Determine whether to force private key regeneration (2/2)
- set_fact:
- acme_certificate_INTERNAL_force_regenerate_private_key: yes
- tags:
- - issue-tls-certs-newkey
-
-- block:
- - name: Ansible version check
- assert:
- that: "ansible_version.string is version('2.8.3', '>=')"
- msg: "This version of the acme-certificate role must be used with Ansible 2.8.3 or later."
- run_once: yes
-
- - name: Sanity checks
- assert:
- that: "acme_certificate_challenge != 'dns-01' or acme_certificate_dns_provider is not undefined"
- msg: "acme_certificate_dns_provider must be defined for dns-01 DNS challenge"
- run_once: yes
-
- - name: "Test whether old certificate files for domains {{ ', '.join(acme_certificate_domains) }} exist"
- stat:
- path: "{{ [acme_certificate_keys_path, acme_certificate_key_name] | path_join }}.pem"
- delegate_to: localhost
- register: acme_certificate_INTERNAL_old_certificate_exists
- when: "acme_certificate_keys_old_store"
- run_once: yes
-
- - name: "Copying old certificate files for domains {{ ', '.join(acme_certificate_domains) }}"
- copy:
- src: "{{ [acme_certificate_keys_path, acme_certificate_key_name] | path_join }}{{ item }}"
- dest: >-
- {{ [
- acme_certificate_keys_old_path,
- (
- (ansible_date_time.date ~ '-' ~ ansible_date_time.hour ~ ansible_date_time.minute ~ ansible_date_time.second ~ '-')
- if acme_certificate_keys_old_prepend_timestamp else ''
- ) ~ acme_certificate_key_name ~ item
- ] | path_join }}
- delegate_to: localhost
- with_items:
- - "-chain.pem"
- - "-fullchain.pem"
- - "-rootchain.pem"
- - "-root.pem"
- - ".key"
- - ".pem"
- when: "acme_certificate_keys_old_store and acme_certificate_INTERNAL_old_certificate_exists.stat.exists"
- run_once: yes
-
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
-
-- block:
- - name: "Creating private key for domains {{ ', '.join(acme_certificate_domains) }} (RSA)"
- openssl_privatekey:
- path: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '.key'] | path_join }}"
- mode: "{{ acme_certificate_privatekey_mode }}"
- type: "{{ 'RSA' if acme_certificate_algorithm == 'rsa' else 'ECC' }}"
- size: "{{ acme_certificate_key_length if acme_certificate_algorithm == 'rsa' else omit }}"
- curve: >-
- {{ omit if acme_certificate_algorithm == 'rsa' else
- 'secp256r1' if acme_certificate_algorithm == 'p-256' else
- 'secp384r1' if acme_certificate_algorithm == 'p-384' else
- 'secp521r1' if acme_certificate_algorithm == 'p-521' else
- 'invalid value for acme_certificate_algorithm!' }}
- force: "{{ acme_certificate_INTERNAL_force_regenerate_private_key }}"
- delegate_to: localhost
- run_once: yes
-
- - name: "Creating CSR for domains {{ ', '.join(acme_certificate_domains) }}"
- openssl_csr:
- path: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '.csr'] | path_join }}"
- privatekey_path: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '.key'] | path_join }}"
- subject_alt_name: |
- {{ acme_certificate_domains | map('regex_replace', '^(.*)$', 'DNS:\1' ) | list }}
- ocsp_must_staple: "{{ acme_certificate_ocsp_must_staple }}"
- use_common_name_for_san: no
- force: yes
- delegate_to: localhost
- run_once: yes
-
- - name: "Get root certificate for domains {{ ', '.join(acme_certificate_domains) }}"
- get_url:
- url: "{{ acme_certificate_root_certificate }}"
- dest: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '-root.pem'] | path_join }}"
- force: yes
- validate_certs: "{{ acme_certificate_validate_certs }}"
- delegate_to: localhost
- run_once: yes
-
- - block:
- - name: "Preparing challenges for domains {{ ', '.join(acme_certificate_domains) }}"
- acme_certificate:
- account_key: "{{ acme_certificate_acme_account }}"
- modify_account: "{{ acme_certificate_modify_account }}"
- csr: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '.csr'] | path_join }}"
- dest: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '.pem'] | path_join }}"
- fullchain_dest: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '-fullchain.pem'] | path_join }}"
- chain_dest: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '-chain.pem'] | path_join }}"
- account_email: "{{ acme_certificate_acme_email }}"
- terms_agreed: "{{ acme_certificate_terms_agreed }}"
- challenge: "{{ acme_certificate_challenge }}"
- acme_directory: "{{ acme_certificate_acme_directory }}"
- acme_version: "{{ acme_certificate_acme_version }}"
- force: yes
- validate_certs: "{{ acme_certificate_validate_certs }}"
- delegate_to: localhost
- run_once: yes
- register: acme_certificate_INTERNAL_challenge
-
- always:
- - debug:
- msg: >-
- account URI: {{ acme_certificate_INTERNAL_challenge.get('account_uri') }};
- order URI: {{ acme_certificate_INTERNAL_challenge.get('order_uri') }}
- run_once: yes
-
- - block:
- # Set up HTTP challenges
- - include_tasks: http-create.yml
- when: "acme_certificate_challenge == 'http-01'"
-
- # Set up DNS challenges
- - include_tasks: dns-{{ acme_certificate_dns_provider }}-create.yml
- when: "acme_certificate_challenge == 'dns-01'"
-
- - name: "Getting certificates for domains {{ ', '.join(acme_certificate_domains) }}"
- acme_certificate:
- account_key: "{{ acme_certificate_acme_account }}"
- modify_account: "{{ acme_certificate_modify_account }}"
- csr: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '.csr'] | path_join }}"
- dest: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '.pem'] | path_join }}"
- fullchain_dest: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '-fullchain.pem'] | path_join }}"
- chain_dest: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '-chain.pem'] | path_join }}"
- account_email: "{{ acme_certificate_acme_email }}"
- terms_agreed: "{{ acme_certificate_terms_agreed }}"
- challenge: "{{ acme_certificate_challenge }}"
- acme_directory: "{{ acme_certificate_acme_directory }}"
- acme_version: "{{ acme_certificate_acme_version }}"
- force: yes
- data: "{{ acme_certificate_INTERNAL_challenge }}"
- deactivate_authzs: "{{ acme_certificate_deactivate_authzs }}"
- validate_certs: "{{ acme_certificate_validate_certs }}"
- delegate_to: localhost
- run_once: yes
-
- - name: "Form root chain for domains {{ ', '.join(acme_certificate_domains) }}"
- copy:
- dest: "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '-rootchain.pem'] | path_join }}"
- content: |
- {{ lookup('file', [acme_certificate_keys_path, acme_certificate_key_name ~ '-root.pem'] | path_join) }}
- {{ lookup('file', [acme_certificate_keys_path, acme_certificate_key_name ~ '-chain.pem'] | path_join) }}
- delegate_to: localhost
- run_once: yes
- always:
- # Clean up HTTP challenges
- - include_tasks: http-cleanup.yml
- when: "acme_certificate_challenge == 'http-01'"
-
- # Clean up DNS challenges
- - include_tasks: dns-{{ acme_certificate_dns_provider }}-cleanup.yml
- when: "acme_certificate_challenge == 'dns-01'"
-
- when: acme_certificate_INTERNAL_challenge is changed
-
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
-
-- name: "Verifying certificate for domains {{ ', '.join(acme_certificate_domains) }}"
- command: >-
- openssl verify
- -CAfile "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '-root.pem'] | path_join }}"
- -untrusted "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '-chain.pem'] | path_join }}"
- "{{ [acme_certificate_keys_path, acme_certificate_key_name ~ '.pem'] | path_join }}"
- changed_when: no
- delegate_to: localhost
- run_once: yes
- ignore_errors: "{{ not acme_certificate_verify_certs }}"
- tags:
- - issue-tls-certs-newkey
- - issue-tls-certs
- - verify-tls-certs
diff --git a/roles/geerlingguy.gitlab/.ansible-lint b/roles/geerlingguy.gitlab/.ansible-lint
deleted file mode 100644
index 4464759..0000000
--- a/roles/geerlingguy.gitlab/.ansible-lint
+++ /dev/null
@@ -1,4 +0,0 @@
-skip_list:
- - 'yaml'
- - 'role-name'
- - 'package-latest'
diff --git a/roles/geerlingguy.gitlab/.github/FUNDING.yml b/roles/geerlingguy.gitlab/.github/FUNDING.yml
deleted file mode 100644
index 96b4938..0000000
--- a/roles/geerlingguy.gitlab/.github/FUNDING.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-# These are supported funding model platforms
----
-github: geerlingguy
-patreon: geerlingguy
diff --git a/roles/geerlingguy.gitlab/.github/stale.yml b/roles/geerlingguy.gitlab/.github/stale.yml
deleted file mode 100644
index c7ff127..0000000
--- a/roles/geerlingguy.gitlab/.github/stale.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-# Configuration for probot-stale - https://github.com/probot/stale
-
-# Number of days of inactivity before an Issue or Pull Request becomes stale
-daysUntilStale: 90
-
-# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
-# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
-daysUntilClose: 30
-
-# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
-onlyLabels: []
-
-# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
-exemptLabels:
- - pinned
- - security
- - planned
-
-# Set to true to ignore issues in a project (defaults to false)
-exemptProjects: false
-
-# Set to true to ignore issues in a milestone (defaults to false)
-exemptMilestones: false
-
-# Set to true to ignore issues with an assignee (defaults to false)
-exemptAssignees: false
-
-# Label to use when marking as stale
-staleLabel: stale
-
-# Limit the number of actions per hour, from 1-30. Default is 30
-limitPerRun: 30
-
-pulls:
- markComment: |-
- This pull request has been marked 'stale' due to lack of recent activity. If there is no further activity, the PR will be closed in another 30 days. Thank you for your contribution!
-
- Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark pull requests as stale.
-
- unmarkComment: >-
- This pull request is no longer marked for closure.
-
- closeComment: >-
- This pull request has been closed due to inactivity. If you feel this is in error, please reopen the pull request or file a new PR with the relevant details.
-
-issues:
- markComment: |-
- This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
-
- Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale.
-
- unmarkComment: >-
- This issue is no longer marked for closure.
-
- closeComment: >-
- This issue has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details.
diff --git a/roles/geerlingguy.gitlab/.github/workflows/ci.yml b/roles/geerlingguy.gitlab/.github/workflows/ci.yml
deleted file mode 100644
index f4f3d3d..0000000
--- a/roles/geerlingguy.gitlab/.github/workflows/ci.yml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-name: CI
-'on':
- pull_request:
- push:
- branches:
- - master
- schedule:
- - cron: "0 7 * * 1"
-
-defaults:
- run:
- working-directory: 'geerlingguy.gitlab'
-
-jobs:
-
- lint:
- name: Lint
- runs-on: ubuntu-latest
- steps:
- - name: Check out the codebase.
- uses: actions/checkout@v2
- with:
- path: 'geerlingguy.gitlab'
-
- - name: Set up Python 3.
- uses: actions/setup-python@v2
- with:
- python-version: '3.x'
-
- - name: Install test dependencies.
- run: pip3 install yamllint
-
- - name: Lint code.
- run: |
- yamllint .
-
- molecule:
- name: Molecule
- runs-on: ubuntu-latest
- strategy:
- matrix:
- include:
- - distro: centos7
- playbook: converge.yml
- - distro: ubuntu1804
- playbook: converge.yml
- - distro: debian9
- playbook: converge.yml
- - distro: centos7
- playbook: version.yml
- - distro: ubuntu1804
- playbook: version.yml
-
- steps:
- - name: Check out the codebase.
- uses: actions/checkout@v2
- with:
- path: 'geerlingguy.gitlab'
-
- - name: Set up Python 3.
- uses: actions/setup-python@v2
- with:
- python-version: '3.x'
-
- - name: Install test dependencies.
- run: pip3 install ansible molecule[docker] docker
-
- - name: Run Molecule tests.
- run: molecule test
- env:
- PY_COLORS: '1'
- ANSIBLE_FORCE_COLOR: '1'
- MOLECULE_DISTRO: ${{ matrix.distro }}
- MOLECULE_PLAYBOOK: ${{ matrix.playbook }}
diff --git a/roles/geerlingguy.gitlab/.github/workflows/release.yml b/roles/geerlingguy.gitlab/.github/workflows/release.yml
deleted file mode 100644
index b7821d0..0000000
--- a/roles/geerlingguy.gitlab/.github/workflows/release.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-# This workflow requires a GALAXY_API_KEY secret present in the GitHub
-# repository or organization.
-#
-# See: https://github.com/marketplace/actions/publish-ansible-role-to-galaxy
-# See: https://github.com/ansible/galaxy/issues/46
-
-name: Release
-'on':
- push:
- tags:
- - '*'
-
-defaults:
- run:
- working-directory: 'geerlingguy.gitlab'
-
-jobs:
-
- release:
- name: Release
- runs-on: ubuntu-latest
- steps:
- - name: Check out the codebase.
- uses: actions/checkout@v2
- with:
- path: 'geerlingguy.gitlab'
-
- - name: Set up Python 3.
- uses: actions/setup-python@v2
- with:
- python-version: '3.x'
-
- - name: Install Ansible.
- run: pip3 install ansible-base
-
- - name: Trigger a new import on Galaxy.
- run: ansible-galaxy role import --api-key ${{ secrets.GALAXY_API_KEY }} $(echo ${{ github.repository }} | cut -d/ -f1) $(echo ${{ github.repository }} | cut -d/ -f2)
diff --git a/roles/geerlingguy.gitlab/.gitignore b/roles/geerlingguy.gitlab/.gitignore
deleted file mode 100644
index 8840c8f..0000000
--- a/roles/geerlingguy.gitlab/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-*.retry
-*/__pycache__
-*.pyc
-.cache
-
diff --git a/roles/geerlingguy.gitlab/.yamllint b/roles/geerlingguy.gitlab/.yamllint
deleted file mode 100644
index 84ecaec..0000000
--- a/roles/geerlingguy.gitlab/.yamllint
+++ /dev/null
@@ -1,10 +0,0 @@
----
-extends: default
-
-rules:
- line-length:
- max: 180
- level: warning
-
-ignore: |
- .github/stale.yml
diff --git a/roles/geerlingguy.gitlab/LICENSE b/roles/geerlingguy.gitlab/LICENSE
deleted file mode 100644
index 4275cf3..0000000
--- a/roles/geerlingguy.gitlab/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2017 Jeff Geerling
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/roles/geerlingguy.gitlab/README.md b/roles/geerlingguy.gitlab/README.md
deleted file mode 100644
index 63463e9..0000000
--- a/roles/geerlingguy.gitlab/README.md
+++ /dev/null
@@ -1,190 +0,0 @@
-# Ansible Role: GitLab
-
-[](https://github.com/geerlingguy/ansible-role-gitlab/actions?query=workflow%3ACI)
-
-Installs GitLab, a Ruby-based front-end to Git, on any RedHat/CentOS or Debian/Ubuntu linux system.
-
-GitLab's default administrator account details are below; be sure to login immediately after installation and change these credentials!
-
- root
- 5iveL!fe
-
-## Requirements
-
-None.
-
-## Role Variables
-
-Available variables are listed below, along with default values (see `defaults/main.yml`):
-
- gitlab_domain: gitlab
- gitlab_external_url: "https://{{ gitlab_domain }}/"
-
-The domain and URL at which the GitLab instance will be accessible. This is set as the `external_url` configuration setting in `gitlab.rb`, and if you want to run GitLab on a different port (besides 80/443), you can specify the port here (e.g. `https://gitlab:8443/` for port 8443).
-
- gitlab_git_data_dir: "/var/opt/gitlab/git-data"
-
-The `gitlab_git_data_dir` is the location where all the Git repositories will be stored. You can use a shared drive or any path on the system.
-
- gitlab_backup_path: "/var/opt/gitlab/backups"
-
-The `gitlab_backup_path` is the location where Gitlab backups will be stored.
-
- gitlab_edition: "gitlab-ce"
-
-The edition of GitLab to install. Usually either `gitlab-ce` (Community Edition) or `gitlab-ee` (Enterprise Edition).
-
- gitlab_version: ''
-
-If you'd like to install a specific version, set the version here (e.g. `11.4.0-ce.0` for Debian/Ubuntu, or `11.4.0-ce.0.el7` for RedHat/CentOS).
-
- gitlab_config_template: "gitlab.rb.j2"
-
-The `gitlab.rb.j2` template packaged with this role is meant to be very generic and serve a variety of use cases. However, many people would like to have a much more customized version, and so you can override this role's default template with your own, adding any additional customizations you need. To do this:
-
- - Create a `templates` directory at the same level as your playbook.
- - Create a `templates\mygitlab.rb.j2` file (just choose a different name from the default template).
- - Set the variable like: `gitlab_config_template: mygitlab.rb.j2` (with the name of your custom template).
-
-### SSL Configuration.
-
- gitlab_redirect_http_to_https: "true"
- gitlab_ssl_certificate: "/etc/gitlab/ssl/{{ gitlab_domain }}.crt"
- gitlab_ssl_certificate_key: "/etc/gitlab/ssl/{{ gitlab_domain }}.key"
-
-GitLab SSL configuration; tells GitLab to redirect normal http requests to https, and the path to the certificate and key (the default values will work for automatic self-signed certificate creation, if set to `true` in the variable below).
-
- # SSL Self-signed Certificate Configuration.
- gitlab_create_self_signed_cert: "true"
- gitlab_self_signed_cert_subj: "/C=US/ST=Missouri/L=Saint Louis/O=IT/CN={{ gitlab_domain }}"
-
-Whether to create a self-signed certificate for serving GitLab over a secure connection. Set `gitlab_self_signed_cert_subj` according to your locality and organization.
-
-### LetsEncrypt Configuration.
-
- gitlab_letsencrypt_enable: "false"
- gitlab_letsencrypt_contact_emails: ["gitlab@example.com"]
- gitlab_letsencrypt_auto_renew_hour: 1
- gitlab_letsencrypt_auto_renew_minute: 30
- gitlab_letsencrypt_auto_renew_day_of_month: "*/7"
- gitlab_letsencrypt_auto_renew: true
-
-GitLab LetsEncrypt configuration; tells GitLab whether to request and use a certificate from LetsEncrypt, if `gitlab_letsencrypt_enable` is set to `"true"`. Multiple contact emails can be configured under `gitlab_letsencrypt_contact_emails` as a list.
-
- # LDAP Configuration.
- gitlab_ldap_enabled: "false"
- gitlab_ldap_host: "example.com"
- gitlab_ldap_port: "389"
- gitlab_ldap_uid: "sAMAccountName"
- gitlab_ldap_method: "plain"
- gitlab_ldap_bind_dn: "CN=Username,CN=Users,DC=example,DC=com"
- gitlab_ldap_password: "password"
- gitlab_ldap_base: "DC=example,DC=com"
-
-GitLab LDAP configuration; if `gitlab_ldap_enabled` is `true`, the rest of the configuration will tell GitLab how to connect to an LDAP server for centralized authentication.
-
- gitlab_dependencies:
- - openssh-server
- - postfix
- - curl
- - openssl
- - tzdata
-
-Dependencies required by GitLab for certain functionality, like timezone support or email. You may change this list in your own playbook if, for example, you would like to install `exim` instead of `postfix`.
-
- gitlab_time_zone: "UTC"
-
-Gitlab timezone.
-
- gitlab_backup_keep_time: "604800"
-
-How long to keep local backups (useful if you don't want backups to fill up your drive!).
-
- gitlab_download_validate_certs: true
-
-Controls whether to validate certificates when downloading the GitLab installation repository install script.
-
- # Email configuration.
- gitlab_email_enabled: "false"
- gitlab_email_from: "gitlab@example.com"
- gitlab_email_display_name: "Gitlab"
- gitlab_email_reply_to: "gitlab@example.com"
-
-Gitlab system mail configuration. Disabled by default; set `gitlab_email_enabled` to `true` to enable, and make sure you enter valid from/reply-to values.
-
- # SMTP Configuration
- gitlab_smtp_enable: "false"
- gitlab_smtp_address: "smtp.server"
- gitlab_smtp_port: "465"
- gitlab_smtp_user_name: "smtp user"
- gitlab_smtp_password: "smtp password"
- gitlab_smtp_domain: "example.com"
- gitlab_smtp_authentication: "login"
- gitlab_smtp_enable_starttls_auto: "true"
- gitlab_smtp_tls: "false"
- gitlab_smtp_openssl_verify_mode: "none"
- gitlab_smtp_ca_path: "/etc/ssl/certs"
- gitlab_smtp_ca_file: "/etc/ssl/certs/ca-certificates.crt"
-
-Gitlab SMTP configuration; of `gitlab_smtp_enable` is `true`, the rest of the configuration will tell GitLab how to send mails using an smtp server.
-
- gitlab_nginx_listen_port: 8080
-
-If you are running GitLab behind a reverse proxy, you may want to override the listen port to something else.
-
- gitlab_nginx_listen_https: "false"
-
-If you are running GitLab behind a reverse proxy, you may wish to terminate SSL at another proxy server or load balancer
-
- gitlab_nginx_ssl_verify_client: ""
- gitlab_nginx_ssl_client_certificate: ""
-
-If you want to enable [2-way SSL Client Authentication](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-2-way-ssl-client-authentication), set `gitlab_nginx_ssl_verify_client` and add a path to the client certificate in `gitlab_nginx_ssl_client_certificate`.
-
- gitlab_default_theme: 2
-
-GitLab includes a number of themes, and you can set the default for all users with this variable. See [the included GitLab themes to choose a default](https://github.com/gitlabhq/gitlabhq/blob/master/config/gitlab.yml.example#L79-L85).
-
- gitlab_extra_settings:
- - gitlab_rails:
- - key: "trusted_proxies"
- value: "['foo', 'bar']"
- - key: "env"
- type: "plain"
- value: |
- {
- "http_proxy" => "https://my_http_proxy.company.com:3128",
- "https_proxy" => "https://my_http_proxy.company.com:3128",
- "no_proxy" => "localhost, 127.0.0.1, company.com"
- }
- - unicorn:
- - key: "worker_processes"
- value: 5
- - key: "pidfile"
- value: "/opt/gitlab/var/unicorn/unicorn.pid"
-
-Gitlab have many other settings ([see official documentation](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-config-template/gitlab.rb.template)), and you can add them with this special variable `gitlab_extra_settings` with the concerned setting and the `key` and `value` keywords.
-
-## Dependencies
-
-None.
-
-## Example Playbook
-
- - hosts: servers
- vars_files:
- - vars/main.yml
- roles:
- - { role: geerlingguy.gitlab }
-
-*Inside `vars/main.yml`*:
-
- gitlab_external_url: "https://gitlab.example.com/"
-
-## License
-
-MIT / BSD
-
-## Author Information
-
-This role was created in 2014 by [Jeff Geerling](http://jeffgeerling.com/), author of [Ansible for DevOps](http://ansiblefordevops.com/).
diff --git a/roles/geerlingguy.gitlab/defaults/main.yml b/roles/geerlingguy.gitlab/defaults/main.yml
deleted file mode 100644
index 0762f77..0000000
--- a/roles/geerlingguy.gitlab/defaults/main.yml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-# General config.
-gitlab_domain: gitlab
-gitlab_external_url: "https://{{ gitlab_domain }}/"
-gitlab_git_data_dir: "/var/opt/gitlab/git-data"
-gitlab_edition: "gitlab-ce"
-gitlab_version: ''
-gitlab_backup_path: "/var/opt/gitlab/backups"
-gitlab_config_template: "gitlab.rb.j2"
-
-# SSL Configuration.
-gitlab_redirect_http_to_https: "true"
-gitlab_ssl_certificate: "/etc/gitlab/ssl/{{ gitlab_domain }}.crt"
-gitlab_ssl_certificate_key: "/etc/gitlab/ssl/{{ gitlab_domain }}.key"
-
-# SSL Self-signed Certificate Configuration.
-gitlab_create_self_signed_cert: "true"
-gitlab_self_signed_cert_subj: "/C=US/ST=Missouri/L=Saint Louis/O=IT/CN={{ gitlab_domain }}"
-
-# LDAP Configuration.
-gitlab_ldap_enabled: "false"
-gitlab_ldap_host: "example.com"
-gitlab_ldap_port: "389"
-gitlab_ldap_uid: "sAMAccountName"
-gitlab_ldap_method: "plain"
-gitlab_ldap_bind_dn: "CN=Username,CN=Users,DC=example,DC=com"
-gitlab_ldap_password: "password"
-gitlab_ldap_base: "DC=example,DC=com"
-
-# SMTP Configuration
-gitlab_smtp_enable: "false"
-gitlab_smtp_address: "smtp.server"
-gitlab_smtp_port: "465"
-gitlab_smtp_user_name: "smtp user"
-gitlab_smtp_password: "smtp password"
-gitlab_smtp_domain: "example.com"
-gitlab_smtp_authentication: "login"
-gitlab_smtp_enable_starttls_auto: "true"
-gitlab_smtp_tls: "false"
-gitlab_smtp_openssl_verify_mode: "none"
-gitlab_smtp_ca_path: "/etc/ssl/certs"
-gitlab_smtp_ca_file: "/etc/ssl/certs/ca-certificates.crt"
-
-# 2-way SSL Client Authentication support.
-gitlab_nginx_ssl_verify_client: ""
-gitlab_nginx_ssl_client_certificate: ""
-
-# Probably best to leave this as the default, unless doing testing.
-gitlab_restart_handler_failed_when: 'gitlab_restart.rc != 0'
-
-# Dependencies.
-gitlab_dependencies:
- - openssh-server
- - postfix
- - curl
- - openssl
- - tzdata
-
-# Optional settings.
-gitlab_time_zone: "UTC"
-gitlab_backup_keep_time: "604800"
-gitlab_download_validate_certs: true
-gitlab_default_theme: '2'
-
-# Email configuration.
-gitlab_email_enabled: "false"
-gitlab_email_from: "gitlab@example.com"
-gitlab_email_display_name: "Gitlab"
-gitlab_email_reply_to: "gitlab@example.com"
-
-# Registry configuration.
-gitlab_registry_enable: "false"
-gitlab_registry_external_url: "https://gitlab.example.com:4567"
-gitlab_registry_nginx_ssl_certificate: "/etc/gitlab/ssl/gitlab.crt"
-gitlab_registry_nginx_ssl_certificate_key: "/etc/gitlab/ssl/gitlab.key"
-
-# LetsEncrypt configuration.
-gitlab_letsencrypt_enable: "false"
-gitlab_letsencrypt_contact_emails: ["gitlab@example.com"]
-gitlab_letsencrypt_auto_renew_hour: 1
-gitlab_letsencrypt_auto_renew_minute: 30
-gitlab_letsencrypt_auto_renew_day_of_month: "*/7"
-gitlab_letsencrypt_auto_renew: true
diff --git a/roles/geerlingguy.gitlab/handlers/main.yml b/roles/geerlingguy.gitlab/handlers/main.yml
deleted file mode 100644
index 2470b5f..0000000
--- a/roles/geerlingguy.gitlab/handlers/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- name: restart gitlab
- command: gitlab-ctl reconfigure
- register: gitlab_restart
- failed_when: gitlab_restart_handler_failed_when | bool
diff --git a/roles/geerlingguy.gitlab/meta/.galaxy_install_info b/roles/geerlingguy.gitlab/meta/.galaxy_install_info
deleted file mode 100644
index d227190..0000000
--- a/roles/geerlingguy.gitlab/meta/.galaxy_install_info
+++ /dev/null
@@ -1,2 +0,0 @@
-install_date: Fri Oct 15 18:59:12 2021
-version: 3.1.0
diff --git a/roles/geerlingguy.gitlab/meta/main.yml b/roles/geerlingguy.gitlab/meta/main.yml
deleted file mode 100644
index 75a0d7d..0000000
--- a/roles/geerlingguy.gitlab/meta/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-dependencies: []
-
-galaxy_info:
- role_name: gitlab
- author: geerlingguy
- description: GitLab Git web interface
- company: "Midwestern Mac, LLC"
- license: "license (BSD, MIT)"
- min_ansible_version: 2.0
- platforms:
- - name: EL
- versions:
- - 7
- - 8
- - name: Debian
- versions:
- - all
- - name: Ubuntu
- versions:
- - all
- galaxy_tags:
- - development
- - web
- - gitlab
- - git
- - repository
- - ci
- - integration
diff --git a/roles/geerlingguy.gitlab/molecule/default/converge.yml b/roles/geerlingguy.gitlab/molecule/default/converge.yml
deleted file mode 100644
index 8bbf802..0000000
--- a/roles/geerlingguy.gitlab/molecule/default/converge.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Converge
- hosts: all
- become: true
-
- vars:
- gitlab_restart_handler_failed_when: false
-
- pre_tasks:
- - name: Update apt cache.
- apt: update_cache=true cache_valid_time=600
- when: ansible_os_family == 'Debian'
- changed_when: false
-
- - name: Remove the .dockerenv file so GitLab Omnibus doesn't get confused.
- file:
- path: /.dockerenv
- state: absent
-
- roles:
- - role: geerlingguy.gitlab
diff --git a/roles/geerlingguy.gitlab/molecule/default/molecule.yml b/roles/geerlingguy.gitlab/molecule/default/molecule.yml
deleted file mode 100644
index 7490710..0000000
--- a/roles/geerlingguy.gitlab/molecule/default/molecule.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-dependency:
- name: galaxy
-driver:
- name: docker
-platforms:
- - name: instance
- image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest"
- command: ${MOLECULE_DOCKER_COMMAND:-""}
- volumes:
- - /sys/fs/cgroup:/sys/fs/cgroup:ro
- privileged: true
- pre_build_image: true
-provisioner:
- name: ansible
- playbooks:
- converge: ${MOLECULE_PLAYBOOK:-converge.yml}
diff --git a/roles/geerlingguy.gitlab/molecule/default/version.yml b/roles/geerlingguy.gitlab/molecule/default/version.yml
deleted file mode 100644
index f7060c9..0000000
--- a/roles/geerlingguy.gitlab/molecule/default/version.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: Converge
- hosts: all
- become: true
-
- vars:
- gitlab_restart_handler_failed_when: false
-
- pre_tasks:
- - name: Update apt cache.
- apt: update_cache=true cache_valid_time=600
- when: ansible_os_family == 'Debian'
- changed_when: false
-
- - name: Remove the .dockerenv file so GitLab Omnibus doesn't get confused.
- file:
- path: /.dockerenv
- state: absent
-
- - name: Set the test GitLab version number for Debian.
- set_fact:
- gitlab_version: '11.4.0-ce.0'
- when: ansible_os_family == 'Debian'
-
- - name: Set the test GitLab version number for RedHat.
- set_fact:
- gitlab_version: '11.4.0-ce.0.el7'
- when: ansible_os_family == 'RedHat'
-
- roles:
- - role: geerlingguy.gitlab
diff --git a/roles/geerlingguy.gitlab/tasks/main.yml b/roles/geerlingguy.gitlab/tasks/main.yml
deleted file mode 100644
index b978c93..0000000
--- a/roles/geerlingguy.gitlab/tasks/main.yml
+++ /dev/null
@@ -1,81 +0,0 @@
----
-- name: Include OS-specific variables.
- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: Check if GitLab configuration file already exists.
- stat: path=/etc/gitlab/gitlab.rb
- register: gitlab_config_file
-
-- name: Check if GitLab is already installed.
- stat: path=/usr/bin/gitlab-ctl
- register: gitlab_file
-
-# Install GitLab and its dependencies.
-- name: Install GitLab dependencies.
- package:
- name: "{{ gitlab_dependencies }}"
- state: present
-
-- name: Install GitLab dependencies (Debian).
- apt:
- name: gnupg2
- state: present
- when: ansible_os_family == 'Debian'
-
-- name: Download GitLab repository installation script.
- get_url:
- url: "{{ gitlab_repository_installation_script_url }}"
- dest: /tmp/gitlab_install_repository.sh
- validate_certs: "{{ gitlab_download_validate_certs }}"
- when: not gitlab_file.stat.exists
-
-- name: Install GitLab repository.
- command: bash /tmp/gitlab_install_repository.sh
- register: output
- when: not gitlab_file.stat.exists
-
-- name: Define the Gitlab package name.
- set_fact:
- gitlab_package_name: "{{ gitlab_edition }}{{ gitlab_package_version_separator }}{{ gitlab_version }}"
- when: gitlab_version | default(false)
-
-- name: Install GitLab
- package:
- name: "{{ gitlab_package_name | default(gitlab_edition) }}"
- state: present
- async: 300
- poll: 5
- when: not gitlab_file.stat.exists
-
-# Start and configure GitLab. Sometimes the first run fails, but after that,
-# restarts fix problems, so ignore failures on this run.
-- name: Reconfigure GitLab (first run).
- command: >
- gitlab-ctl reconfigure
- creates=/var/opt/gitlab/bootstrapped
- failed_when: false
-
-- name: Create GitLab SSL configuration folder.
- file:
- path: /etc/gitlab/ssl
- state: directory
- owner: root
- group: root
- mode: 0700
- when: gitlab_create_self_signed_cert
-
-- name: Create self-signed certificate.
- command: >
- openssl req -new -nodes -x509 -subj "{{ gitlab_self_signed_cert_subj }}"
- -days 3650 -keyout {{ gitlab_ssl_certificate_key }} -out {{ gitlab_ssl_certificate }} -extensions v3_ca
- creates={{ gitlab_ssl_certificate }}
- when: gitlab_create_self_signed_cert
-
-- name: Copy GitLab configuration file.
- template:
- src: "{{ gitlab_config_template }}"
- dest: /etc/gitlab/gitlab.rb
- owner: root
- group: root
- mode: 0600
- notify: restart gitlab
diff --git a/roles/geerlingguy.gitlab/templates/gitlab.rb.j2 b/roles/geerlingguy.gitlab/templates/gitlab.rb.j2
deleted file mode 100644
index 4701776..0000000
--- a/roles/geerlingguy.gitlab/templates/gitlab.rb.j2
+++ /dev/null
@@ -1,117 +0,0 @@
-# The URL through which GitLab will be accessed.
-external_url "{{ gitlab_external_url }}"
-
-# gitlab.yml configuration
-gitlab_rails['time_zone'] = "{{ gitlab_time_zone }}"
-gitlab_rails['backup_keep_time'] = {{ gitlab_backup_keep_time }}
-gitlab_rails['gitlab_email_enabled'] = {{ gitlab_email_enabled }}
-{% if gitlab_email_enabled == "true" %}
-gitlab_rails['gitlab_email_from'] = "{{ gitlab_email_from }}"
-gitlab_rails['gitlab_email_display_name'] = "{{ gitlab_email_display_name }}"
-gitlab_rails['gitlab_email_reply_to'] = "{{ gitlab_email_reply_to }}"
-{% endif %}
-
-# Default Theme
-gitlab_rails['gitlab_default_theme'] = "{{ gitlab_default_theme }}"
-
-# Whether to redirect http to https.
-nginx['redirect_http_to_https'] = {{ gitlab_redirect_http_to_https }}
-nginx['ssl_certificate'] = "{{ gitlab_ssl_certificate }}"
-nginx['ssl_certificate_key'] = "{{ gitlab_ssl_certificate_key }}"
-
-letsencrypt['enable'] = "{{ gitlab_letsencrypt_enable }}"
-{% if gitlab_letsencrypt_enable %}
-letsencrypt['contact_emails'] = "{{ gitlab_letsencrypt_contact_emails | to_json }}"
-letsencrypt['auto_renew_hour'] = "{{ gitlab_letsencrypt_auto_renew_hour }}"
-letsencrypt['auto_renew_minute'] = "{{ gitlab_letsencrypt_auto_renew_minute }}"
-letsencrypt['auto_renew_day_of_month'] = "{{ gitlab_letsencrypt_auto_renew_day_of_month }}"
-letsencrypt['auto_renew'] = "{{ gitlab_letsencrypt_auto_renew }}"
-{% endif %}
-
-# The directory where Git repositories will be stored.
-git_data_dirs({"default" => {"path" => "{{ gitlab_git_data_dir }}"} })
-
-# The directory where Gitlab backups will be stored
-gitlab_rails['backup_path'] = "{{ gitlab_backup_path }}"
-
-# These settings are documented in more detail at
-# https://gitlab.com/gitlab-org/gitlab-ce/blob/master/config/gitlab.yml.example#L118
-gitlab_rails['ldap_enabled'] = {{ gitlab_ldap_enabled }}
-{% if gitlab_ldap_enabled == "true" %}
-gitlab_rails['ldap_host'] = '{{ gitlab_ldap_host }}'
-gitlab_rails['ldap_port'] = {{ gitlab_ldap_port }}
-gitlab_rails['ldap_uid'] = '{{ gitlab_ldap_uid }}'
-gitlab_rails['ldap_method'] = '{{ gitlab_ldap_method}}' # 'ssl' or 'plain'
-gitlab_rails['ldap_bind_dn'] = '{{ gitlab_ldap_bind_dn }}'
-gitlab_rails['ldap_password'] = '{{ gitlab_ldap_password }}'
-gitlab_rails['ldap_allow_username_or_email_login'] = true
-gitlab_rails['ldap_base'] = '{{ gitlab_ldap_base }}'
-{% endif %}
-
-# GitLab Nginx
-## See https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/nginx.md
-{% if gitlab_nginx_listen_port is defined %}
-nginx['listen_port'] = "{{ gitlab_nginx_listen_port }}"
-{% endif %}
-{% if gitlab_nginx_listen_https is defined %}
-nginx['listen_https'] = {{ gitlab_nginx_listen_https }}
-{% endif %}
-
-# Use smtp instead of sendmail/postfix
-# More details and example configuration at
-# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/smtp.md
-gitlab_rails['smtp_enable'] = {{ gitlab_smtp_enable }}
-{% if gitlab_smtp_enable == "true" %}
-gitlab_rails['smtp_address'] = '{{ gitlab_smtp_address }}'
-gitlab_rails['smtp_port'] = {{ gitlab_smtp_port }}
-{% if gitlab_smtp_user_name %}
-gitlab_rails['smtp_user_name'] = '{{ gitlab_smtp_user_name }}'
-{% endif %}
-{% if gitlab_smtp_password %}
-gitlab_rails['smtp_password'] = '{{ gitlab_smtp_password }}'
-{% endif %}
-gitlab_rails['smtp_domain'] = '{{ gitlab_smtp_domain }}'
-{% if gitlab_smtp_authentication %}
-gitlab_rails['smtp_authentication'] = '{{ gitlab_smtp_authentication }}'
-{% endif %}
-gitlab_rails['smtp_enable_starttls_auto'] = {{ gitlab_smtp_enable_starttls_auto }}
-gitlab_rails['smtp_tls'] = {{ gitlab_smtp_tls }}
-gitlab_rails['smtp_openssl_verify_mode'] = '{{ gitlab_smtp_openssl_verify_mode }}'
-gitlab_rails['smtp_ca_path'] = '{{ gitlab_smtp_ca_path }}'
-gitlab_rails['smtp_ca_file'] = '{{ gitlab_smtp_ca_file }}'
-{% endif %}
-
-# 2-way SSL Client Authentication.
-{% if gitlab_nginx_ssl_verify_client %}
-nginx['ssl_verify_client'] = "{{ gitlab_nginx_ssl_verify_client }}"
-{% endif %}
-{% if gitlab_nginx_ssl_client_certificate %}
-nginx['ssl_client_certificate'] = "{{ gitlab_nginx_ssl_client_certificate }}"
-{% endif %}
-
-# GitLab registry.
-registry['enable'] = {{ gitlab_registry_enable }}
-{% if gitlab_registry_enable == "true" %}
-registry_external_url "{{ gitlab_registry_external_url }}"
-registry_nginx['ssl_certificate'] = "{{ gitlab_registry_nginx_ssl_certificate }}"
-registry_nginx['ssl_certificate_key'] = "{{ gitlab_registry_nginx_ssl_certificate_key }}"
-{% endif %}
-
-{% if gitlab_extra_settings is defined %}
-# Extra configuration
-{% for extra in gitlab_extra_settings %}
-{% for setting in extra %}
-{% for kv in extra[setting] %}
-{% if (kv.type is defined and kv.type == 'plain') or (kv.value is not string) %}
-{{ setting }}['{{ kv.key }}'] = {{ kv.value }}
-{% else %}
-{{ setting }}['{{ kv.key }}'] = '{{ kv.value }}'
-{% endif %}
-{% endfor %}
-{% endfor %}
-
-{% endfor %}
-{% endif %}
-
-# To change other settings, see:
-# https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/README.md#changing-gitlab-yml-settings
diff --git a/roles/geerlingguy.gitlab/vars/Debian.yml b/roles/geerlingguy.gitlab/vars/Debian.yml
deleted file mode 100644
index 5da8774..0000000
--- a/roles/geerlingguy.gitlab/vars/Debian.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-gitlab_package_version_separator: '='
-gitlab_repository_installation_script_url: "https://packages.gitlab.com/install/repositories/gitlab/{{ gitlab_edition }}/script.deb.sh"
diff --git a/roles/geerlingguy.gitlab/vars/RedHat.yml b/roles/geerlingguy.gitlab/vars/RedHat.yml
deleted file mode 100644
index e4c0e94..0000000
--- a/roles/geerlingguy.gitlab/vars/RedHat.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-gitlab_package_version_separator: '-'
-gitlab_repository_installation_script_url: "https://packages.gitlab.com/install/repositories/gitlab/{{ gitlab_edition }}/script.rpm.sh"
diff --git a/roles/geerlingguy.java/.github/stale.yml b/roles/geerlingguy.java/.github/stale.yml
index c7ff127..3cc6ec3 100644
--- a/roles/geerlingguy.java/.github/stale.yml
+++ b/roles/geerlingguy.java/.github/stale.yml
@@ -12,6 +12,7 @@ onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
+ - bug
- pinned
- security
- planned
diff --git a/roles/geerlingguy.java/.gitignore b/roles/geerlingguy.java/.gitignore
index f56f5b5..8840c8f 100644
--- a/roles/geerlingguy.java/.gitignore
+++ b/roles/geerlingguy.java/.gitignore
@@ -1,3 +1,5 @@
*.retry
*/__pycache__
*.pyc
+.cache
+
diff --git a/roles/geerlingguy.java/.travis.yml b/roles/geerlingguy.java/.travis.yml
deleted file mode 100644
index ad0738e..0000000
--- a/roles/geerlingguy.java/.travis.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-language: python
-services: docker
-
-env:
- global:
- - ROLE_NAME: java
- matrix:
- - MOLECULE_DISTRO: centos8
- - MOLECULE_DISTRO: centos7
- - MOLECULE_DISTRO: centos6
- - MOLECULE_DISTRO: fedora31
- - MOLECULE_DISTRO: ubuntu2004
- - MOLECULE_DISTRO: ubuntu1804
- - MOLECULE_DISTRO: ubuntu1604
- - MOLECULE_DISTRO: debian10
- - MOLECULE_DISTRO: debian9
-
-install:
- # Install test dependencies.
- - pip install molecule yamllint ansible-lint docker
-
-before_script:
- # Use actual Ansible Galaxy role name for the project directory.
- - cd ../
- - mv ansible-role-$ROLE_NAME geerlingguy.$ROLE_NAME
- - cd geerlingguy.$ROLE_NAME
-
-script:
- # Run tests.
- - molecule test
-
-notifications:
- webhooks: https://galaxy.ansible.com/api/v1/notifications/
diff --git a/roles/geerlingguy.java/.yamllint b/roles/geerlingguy.java/.yamllint
index a3dbc38..f2033dd 100644
--- a/roles/geerlingguy.java/.yamllint
+++ b/roles/geerlingguy.java/.yamllint
@@ -1,6 +1,11 @@
---
extends: default
+
rules:
line-length:
max: 120
level: warning
+
+ignore: |
+ .github/stale.yml
+ .travis.yml
diff --git a/roles/geerlingguy.java/README.md b/roles/geerlingguy.java/README.md
index b35fde8..800b1d3 100644
--- a/roles/geerlingguy.java/README.md
+++ b/roles/geerlingguy.java/README.md
@@ -1,8 +1,8 @@
# Ansible Role: Java
-[](https://travis-ci.org/geerlingguy/ansible-role-java)
+[](https://github.com/geerlingguy/ansible-role-java/actions?query=workflow%3ACI)
-Installs Java for RedHat/CentOS and Debian/Ubuntu linux servers.
+Installs Java for RedHat/CentOS, Amazon and Debian/Ubuntu linux servers.
## Requirements
diff --git a/roles/geerlingguy.java/meta/.galaxy_install_info b/roles/geerlingguy.java/meta/.galaxy_install_info
index 87da7f1..9a7d5ae 100644
--- a/roles/geerlingguy.java/meta/.galaxy_install_info
+++ b/roles/geerlingguy.java/meta/.galaxy_install_info
@@ -1,2 +1,2 @@
-install_date: Fri Oct 15 18:59:11 2021
-version: 1.10.0
+install_date: Thu 08 Feb 2024 08:53:59 PM
+version: 2.3.3
diff --git a/roles/geerlingguy.java/meta/main.yml b/roles/geerlingguy.java/meta/main.yml
index 085bd57..0b16d72 100644
--- a/roles/geerlingguy.java/meta/main.yml
+++ b/roles/geerlingguy.java/meta/main.yml
@@ -7,13 +7,8 @@ galaxy_info:
description: Java for Linux
company: "Midwestern Mac, LLC"
license: "license (BSD, MIT)"
- min_ansible_version: 2.4
+ min_ansible_version: 2.10
platforms:
- - name: EL
- versions:
- - 6
- - 7
- - 8
- name: Fedora
versions:
- all
@@ -23,6 +18,8 @@ galaxy_info:
- jessie
- stretch
- buster
+ - bullseye
+ - bookworm
- name: Ubuntu
versions:
- precise
@@ -30,6 +27,7 @@ galaxy_info:
- xenial
- bionic
- focal
+ - jammy
- name: FreeBSD
versions:
- 10.2
diff --git a/roles/geerlingguy.java/molecule/default/converge.yml b/roles/geerlingguy.java/molecule/default/converge.yml
index c99558d..bf6734f 100644
--- a/roles/geerlingguy.java/molecule/default/converge.yml
+++ b/roles/geerlingguy.java/molecule/default/converge.yml
@@ -5,7 +5,9 @@
pre_tasks:
- name: Update apt cache.
- apt: update_cache=true cache_valid_time=600
+ apt:
+ update_cache: true
+ cache_valid_time: 600
when: ansible_os_family == 'Debian'
changed_when: false
diff --git a/roles/geerlingguy.java/molecule/default/molecule.yml b/roles/geerlingguy.java/molecule/default/molecule.yml
index 2da47dd..147da5d 100644
--- a/roles/geerlingguy.java/molecule/default/molecule.yml
+++ b/roles/geerlingguy.java/molecule/default/molecule.yml
@@ -1,18 +1,18 @@
---
+role_name_check: 1
dependency:
name: galaxy
+ options:
+ ignore-errors: true
driver:
name: docker
-lint: |
- set -e
- yamllint .
- ansible-lint
platforms:
- name: instance
- image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos7}-ansible:latest"
+ image: "geerlingguy/docker-${MOLECULE_DISTRO:-rockylinux8}-ansible:latest"
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- - /sys/fs/cgroup:/sys/fs/cgroup:ro
+ - /sys/fs/cgroup:/sys/fs/cgroup:rw
+ cgroupns_mode: host
privileged: true
pre_build_image: true
provisioner:
diff --git a/roles/geerlingguy.java/tasks/main.yml b/roles/geerlingguy.java/tasks/main.yml
index b2a6ded..965bd9d 100644
--- a/roles/geerlingguy.java/tasks/main.yml
+++ b/roles/geerlingguy.java/tasks/main.yml
@@ -3,18 +3,28 @@
include_vars: "{{ ansible_distribution }}.yml"
when: ansible_distribution == 'FreeBSD' or ansible_distribution == 'Fedora'
+- name: Include OS-specific variables for Amazon.
+ include_vars: "{{ ansible_distribution }}-{{ ansible_distribution_version}}.yml"
+ when: ansible_distribution == 'Amazon'
+
- name: Include version-specific variables for CentOS/RHEL.
include_vars: "RedHat-{{ ansible_distribution_version.split('.')[0] }}.yml"
- when: ansible_distribution == 'CentOS' or
- ansible_distribution == 'Red Hat Enterprise Linux' or
- ansible_distribution == 'RedHat'
+ when: >-
+ ansible_distribution in [
+ 'CentOS',
+ 'Red Hat Enterprise Linux',
+ 'RedHat',
+ 'OracleLinux',
+ 'Rocky',
+ 'AlmaLinux'
+ ]
- name: Include version-specific variables for Ubuntu.
include_vars: "{{ ansible_distribution }}-{{ ansible_distribution_version.split('.')[0] }}.yml"
when: ansible_distribution == 'Ubuntu'
- name: Include version-specific variables for Debian.
- include_vars: "{{ ansible_distribution|title }}-{{ ansible_distribution_version.split('.')[0] }}.yml"
+ include_vars: "{{ ansible_distribution | title }}-{{ ansible_distribution_version.split('.')[0] }}.yml"
when: ansible_os_family == 'Debian'
- name: Define java_packages.
@@ -24,7 +34,10 @@
# Setup/install tasks.
- include_tasks: setup-RedHat.yml
- when: ansible_os_family == 'RedHat'
+ when: ansible_os_family == 'RedHat' and ansible_distribution_file_variety == 'RedHat'
+
+- include_tasks: setup-Amazon.yml
+ when: ansible_distribution == 'Amazon'
- include_tasks: setup-Debian.yml
when: ansible_os_family == 'Debian'
diff --git a/roles/geerlingguy.java/tasks/setup-Debian.yml b/roles/geerlingguy.java/tasks/setup-Debian.yml
index ffeb9ac..c0cb667 100644
--- a/roles/geerlingguy.java/tasks/setup-Debian.yml
+++ b/roles/geerlingguy.java/tasks/setup-Debian.yml
@@ -2,10 +2,10 @@
# See: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=863199 and
# https://github.com/geerlingguy/ansible-role-java/issues/64
- name: Ensure 'man' directory exists.
- file:
+ file: # noqa 208
path: /usr/share/man/man1
state: directory
- recurse: true
+ mode: 0755
when:
- ansible_distribution == 'Ubuntu'
- ansible_distribution_major_version | int >= 18
diff --git a/roles/geerlingguy.java/tasks/setup-FreeBSD.yml b/roles/geerlingguy.java/tasks/setup-FreeBSD.yml
index ba66872..c3eeebb 100644
--- a/roles/geerlingguy.java/tasks/setup-FreeBSD.yml
+++ b/roles/geerlingguy.java/tasks/setup-FreeBSD.yml
@@ -4,8 +4,18 @@
name: "{{ java_packages }}"
state: present
-- name: ensure proc is mounted
- mount: name=/proc fstype=procfs src=proc opts=rw state=mounted
+- name: Ensure proc is mounted
+ mount:
+ name: /proc
+ fstype: procfs
+ src: proc
+ opts: rw
+ state: mounted
-- name: ensure fdesc is mounted
- mount: name=/dev/fd fstype=fdescfs src=fdesc opts=rw state=mounted
+- name: Ensure fdesc is mounted
+ mount:
+ name: /dev/fd
+ fstype: fdescfs
+ src: fdesc
+ opts: rw
+ state: mounted
diff --git a/roles/geerlingguy.java/vars/RedHat-6.yml b/roles/geerlingguy.java/vars/RedHat-6.yml
deleted file mode 100644
index 70694b7..0000000
--- a/roles/geerlingguy.java/vars/RedHat-6.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# JDK version options include:
-# - java
-# - java-1.6.0-openjdk
-# - java-1.7.0-openjdk
-__java_packages:
- - java-1.7.0-openjdk
diff --git a/roles/geerlingguy.java/vars/RedHat-7.yml b/roles/geerlingguy.java/vars/RedHat-7.yml
index 64db579..7b5aa9b 100644
--- a/roles/geerlingguy.java/vars/RedHat-7.yml
+++ b/roles/geerlingguy.java/vars/RedHat-7.yml
@@ -3,6 +3,7 @@
# - java
# - java-1.6.0-openjdk
# - java-1.7.0-openjdk
-# - java-1.8.0-openjdk
+# - java-1.8.0-openjdk-devel
+# - java-11-openjdk-devel
__java_packages:
- java-1.8.0-openjdk
diff --git a/roles/ikke_t.container_image_cleanup/meta/.galaxy_install_info b/roles/ikke_t.container_image_cleanup/meta/.galaxy_install_info
index 150d893..612be65 100644
--- a/roles/ikke_t.container_image_cleanup/meta/.galaxy_install_info
+++ b/roles/ikke_t.container_image_cleanup/meta/.galaxy_install_info
@@ -1,2 +1,2 @@
-install_date: Fri Oct 15 18:59:25 2021
+install_date: Thu 08 Feb 2024 08:54:08 PM
version: master
diff --git a/roles/ikke_t.podman_container_systemd/README.md b/roles/ikke_t.podman_container_systemd/README.md
index 8950042..9ced9b5 100644
--- a/roles/ikke_t.podman_container_systemd/README.md
+++ b/roles/ikke_t.podman_container_systemd/README.md
@@ -18,11 +18,12 @@ What role does:
* on consecutive runs it pulls image again,
and restarts container if image changed (not for pod yet)
* creates systemd file for container or pod
+ * creates kubernetes yaml for pod
+ * creates volume directories for containers if they do not exist. (for pod use DirectoryOrCreate)
* set's container or pod to be always automatically restarted if container dies.
* makes container or pod enter run state at system boot
* adds or removes containers exposed ports to firewall.
* It takes parameter for running rootless containers under given user
- (I didn't test this with pod mode yet)
For reference, see these two blogs about the role:
* [Automate Podman Containers with Ansible 1/2](https://redhatnordicssa.github.io/ansible-podman-containers-1)
@@ -37,8 +38,6 @@ using this module.
* The user should have entries in /etc/sub[gu]id files for namespace range.
If not, this role adds some variables there in order to get something going,
but preferrably you check them.
-* I only tested the single container mode, not the pod mode with several containers.
- Please report back how that part works! :)
* Some control things like memory or other resource limit's won't work as user.
* You want to increase ```systemd_TimeoutStartSec``` heavily, as we can not
prefetch the images before systemd unit start. So systemd needs to wait
@@ -50,7 +49,8 @@ Requirements
Requires system which is capable of running podman, and that podman is found
from package repositories. Role installs podman. Role also installs firewalld
-if user has defined ```container_firewall_ports``` -variable.
+if user has defined ```container_firewall_ports``` -variable. Installs kubeval
+for a pod if ```container_pod_yaml_template_validation: true```.
Role Variables
--------------
@@ -61,19 +61,35 @@ note that some options apply only to other method.
- ```container_image_list``` - list of container images to run.
If more than one image is defined, then the containers will be run in a pod.
-- ```container_image_user``` - optional username to use when authenticating
+ It is possible to define it as a dictionary to include authentication information per image, like so:
+```
+container_image_list:
+ - image: docker.io/imagename
+ user: exampleuser
+ password: examplepw
+ - image: docker.io/imagename2
+```
+- ```container_image_user``` - optional default username to use when authenticating
to remote registries
-- ```container_image_password``` - optional password to use when authenticating
+- ```container_image_password``` - optional default password to use when authenticating
to remote registries
- ```container_name``` - Identify the container in systemd and podman commands.
- Systemd service file be named container_name--container-pod.service.
+ Systemd service file be named container_name--container-pod.service. This can be overwritten with service_name.
- ```container_run_args``` - Anything you pass to podman, except for the name
and image while running single container. Not used for pod.
- ```container_cmd_args``` - Any command and arguments passed to podman-run after specifying the image name. Not used for pod.
- ```container_run_as_user``` - Which user should systemd run container as.
Defaults to root.
-- ```container_run_as_group``` - Which grou should systemd run container as.
+- ```container_run_as_group``` - Which group should systemd run container as.
Defaults to root.
+- ```container_dir_owner``` - Which owner should the volume dirs have.
+ Defaults to container_run_as_user.
+ If you use :U as a volume option podman will set the permissions for the user inside the container automatically.
+ Quote: The :U suffix tells Podman to use the correct host UID and GID based on the UID and GID within the container, to change recursively the owner and group of the source volume. Warning use with caution since this will modify the host filesystem.
+- ```container_dir_group``` - Which group should the volume dirs have.
+ Defaults to container_run_as_group.
+- ```container_dir_mode``` - Which permissions should the volume dirs have.
+ Defaults to '0755'.
- ```container_state``` - container is installed and run if state is
```running```, and stopped and systemd file removed if ```absent```
- ```container_firewall_ports``` - list of ports you have exposed from container
@@ -83,6 +99,25 @@ note that some options apply only to other method.
- ```systemd_tempdir``` - Where to store conmon-pidfile and cidfile for single containers.
Defaults to ``%T`` on systems supporting this specifier (see man 5 systemd.unit) ``/tmp``
otherwise.
+- ```service_name``` - How the systemd service files are named.
+ Defaults to ```"{{ container_name }}-container-pod-{{ container_run_as_user }}.service"```.
+- ```service_files_dir``` - Where to store the systemd service files.
+ Defaults to ```/usr/local/lib/systemd/system``` for root and ```"{{ user_info.home }}/.config/systemd/user``` for a rootless user
+- ```service_files_owner``` - Which user should own the systemd service files.
+ Defaults to root.
+- ```service_files_group``` - Which group should own the systemd service files.
+ Defaults to root.
+- ```service_files_mode``` - Which permissions should the systemd service files have.
+ Defaults to 0644.
+- ```container_pod_yaml``` - Path to the pod yaml file. Required for a pod.
+- ```container_pod_yaml_deploy``` - Wheter to deploy the pod yaml file. Defaults to ``false``
+- ```container_pod_yaml_template``` - Template to use for pod yaml deploy.
+ As the template doesn't include every possible configuration option it is possible to overwrite it with your own template.
+ Defaults to ``templates/container-pod-yaml.j2``.
+- ```container_pod_yaml_template_validation``` - Wheter to validate the deployed pod yaml file. Defaults to ``false``.
+- ```container_pod_labels``` - Defines labels for ```container_pod_yaml_deploy```.
+- ```container_pod_volumes``` - Defines volumes for ```container_pod_yaml_deploy```.
+- ```container_pod_containers``` - Defines containers for ```container_pod_yaml_deploy```.
This playbook doesn't have python module to parse parameters for podman command.
Until that you just need to pass all parameters as you would use podman from
@@ -94,6 +129,10 @@ If you want your
[images to be automatically updated](http://docs.podman.io/en/latest/markdown/podman-auto-update.1.html),
add this label to container_cmd_args: ```--label "io.containers.autoupdate=image"```
+Never use `ansible.builtin.import_role` to execute this role if you intend to use it more
+than once per playbook, or you will fall in
+[this anti-pattern](https://medium.com/opsops/ansible-anti-pattern-import-role-task-with-task-level-vars-a9f5c752c9c3).
+
Dependencies
------------
@@ -115,7 +154,7 @@ Root container:
container_name: lighttpd
container_run_args: >-
--rm
- -v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
+ -v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z,U
--label "io.containers.autoupdate=image"
-p 8080:80
#container_state: absent
@@ -123,7 +162,7 @@ Root container:
container_firewall_ports:
- 8080/tcp
- 8443/tcp
- import_role:
+ ansible.builtin.include_role:
name: podman-container-systemd
```
@@ -135,13 +174,6 @@ Rootless container:
name: rootless_user
comment: I run sample container
-- name: ensure directory
- file:
- name: /tmp/podman-container-systemd
- owner: rootless_user
- group: rootless_user
- state: directory
-
- name: tests container
vars:
container_run_as_user: rootless_user
@@ -151,17 +183,59 @@ Rootless container:
container_name: lighttpd
container_run_args: >-
--rm
- -v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
+ -v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z,U
-p 8080:80
#container_state: absent
container_state: running
container_firewall_ports:
- 8080/tcp
- 8443/tcp
- import_role:
+ ansible.builtin.include_role:
name: podman-container-systemd
```
+Rootless Pod:
+
+```
+- name: ensure user
+ user:
+ name: rootless_user
+ comment: I run sample container
+
+- name: tests pod
+ vars:
+ container_run_as_user: rootless_user
+ container_run_as_group: rootless_user
+ container_image_list:
+ - sebp/lighttpd:latest
+ container_name: lighttpd-pod
+ container_pod_yaml: /home/rootless_user/lighttpd-pod.yml
+ container_pod_yaml_deploy: true
+ container_pod_yaml_template_validation: true
+ container_pod_labels:
+ app: "{{ container_name }}"
+ io.containers.autoupdate: 'image(1)'
+ container_pod_volumes:
+ - name: htdocs
+ hostPath:
+ path: /tmp/podman-container-systemd
+ type: DirectoryOrCreate
+ container_pod_containers:
+ - name: lighttpd
+ image: sebp/lighttpd:latest
+ volumeMounts:
+ - name: htdocs
+ mountPath: /var/www/localhost/htdocs:Z
+ ports:
+ - containerPort: 80
+ hostPort: 8080
+ container_state: running
+ container_firewall_ports:
+ - 8080/tcp
+ - 8443/tcp
+ ansible.builtin.include_role:
+ name: podman-container-systemd
+```
License
-------
diff --git a/roles/ikke_t.podman_container_systemd/defaults/main.yml b/roles/ikke_t.podman_container_systemd/defaults/main.yml
index 879fd31..e4d878d 100644
--- a/roles/ikke_t.podman_container_systemd/defaults/main.yml
+++ b/roles/ikke_t.podman_container_systemd/defaults/main.yml
@@ -8,6 +8,9 @@ container_state: running
# by default we want to restart failed container
container_restart: on-failure
service_files_dir: /usr/local/lib/systemd/system
+service_files_owner: root
+service_files_group: root
+service_files_mode: 0644
systemd_scope: system
systemd_TimeoutStartSec: 15
systemd_RestartSec: 30
@@ -30,9 +33,14 @@ systemd_Wants: []
service_name: "{{ container_name }}-container-pod-{{ container_run_as_user }}.service"
# to sepped up you can disable always checking if podman is installed.
-skip_podman_install: true
+skip_podman_install: false
podman_dependencies_rootless:
- fuse-overlayfs
- slirp4netns
- uidmap
+
+# pod yaml deploy
+container_pod_yaml_deploy: false
+container_pod_yaml_template: templates/container-pod-yaml.j2
+container_pod_yaml_template_validation: false
diff --git a/roles/ikke_t.podman_container_systemd/handlers/main.yml b/roles/ikke_t.podman_container_systemd/handlers/main.yml
index d45c343..07e5c12 100644
--- a/roles/ikke_t.podman_container_systemd/handlers/main.yml
+++ b/roles/ikke_t.podman_container_systemd/handlers/main.yml
@@ -9,16 +9,6 @@
daemon_reload: true
scope: "{{ systemd_scope }}"
-- name: start service
- become: true
- become_user: "{{ container_run_as_user }}"
- environment:
- XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
- systemd:
- name: "{{ service_name }}"
- scope: "{{ systemd_scope }}"
- state: started
-
- name: restart service
become: true
become_user: "{{ container_run_as_user }}"
@@ -28,13 +18,4 @@
name: "{{ service_name }}"
scope: "{{ systemd_scope }}"
state: restarted
-
-- name: enable service
- become: true
- become_user: "{{ container_run_as_user }}"
- environment:
- XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
- systemd:
- name: "{{ service_name }}"
enabled: true
- scope: "{{ systemd_scope }}"
diff --git a/roles/ikke_t.podman_container_systemd/meta/.galaxy_install_info b/roles/ikke_t.podman_container_systemd/meta/.galaxy_install_info
index 8c69d0f..9df408e 100644
--- a/roles/ikke_t.podman_container_systemd/meta/.galaxy_install_info
+++ b/roles/ikke_t.podman_container_systemd/meta/.galaxy_install_info
@@ -1,2 +1,2 @@
-install_date: Fri Oct 15 18:59:22 2021
-version: 2.2.0
+install_date: Thu 08 Feb 2024 08:54:06 PM
+version: 2.5.0
diff --git a/roles/ikke_t.podman_container_systemd/requirements.yml b/roles/ikke_t.podman_container_systemd/requirements.yml
index 397ca6b..07f619b 100644
--- a/roles/ikke_t.podman_container_systemd/requirements.yml
+++ b/roles/ikke_t.podman_container_systemd/requirements.yml
@@ -1,4 +1,5 @@
---
collections:
- ansible.posix
+ - community.general
- containers.podman
diff --git a/roles/ikke_t.podman_container_systemd/tasks/main.yml b/roles/ikke_t.podman_container_systemd/tasks/main.yml
index bfa8f1f..d494955 100644
--- a/roles/ikke_t.podman_container_systemd/tasks/main.yml
+++ b/roles/ikke_t.podman_container_systemd/tasks/main.yml
@@ -1,14 +1,19 @@
---
+- name: Get user information
+ user:
+ name: "{{ container_run_as_user }}"
+ check_mode: true
+ changed_when: false
+ register: user_info
+
+- name: Fails if user "{{ container_run_as_user }}" doesn't exist
+ fail:
+ msg: User "{{ container_run_as_user }}" doesn't exist.
+ when: user_info.name is not defined
+
- name: prepare rootless stuff if needed
block:
-
- - name: get user information
- user:
- name: "{{ container_run_as_user }}"
- check_mode: true
- register: user_info
-
- name: set systemd dir if user is not root
set_fact:
service_files_dir: "{{ user_info.home }}/.config/systemd/user"
@@ -24,38 +29,28 @@
when: container_run_as_user != "root"
-- name: "Find uid of user"
- command: "id -u {{ container_run_as_user }}"
- register: container_run_as_uid
- check_mode: false # Run even in check mode, to avoid fail with --check.
- changed_when: false
-
- name: set systemd runtime dir
set_fact:
- xdg_runtime_dir: "/run/user/{{ container_run_as_uid.stdout }}"
+ xdg_runtime_dir: "/run/user/{{ user_info.uid }}"
changed_when: false
- name: set systemd scope to system if needed
set_fact:
systemd_scope: system
- service_files_dir: /usr/local/lib/systemd/system
- xdg_runtime_dir: "/run/user/{{ container_run_as_uid.stdout }}"
+ service_files_dir: "{{ service_files_dir }}"
when: container_run_as_user == "root"
changed_when: false
- name: create local systemd directory
- when: service_files_dir == '/usr/local/lib/systemd/system'
file:
group: root
mode: u=rwX,go=rX
owner: root
path: /usr/local/lib/systemd/system/
state: directory
+ become: true
+ when: container_run_as_user == "root" and service_files_dir == '/usr/local/lib/systemd/system'
-- name: check if service file exists already
- stat:
- path: "{{ service_files_dir }}/{{ service_name }}"
- register: service_file_before_template
- name: do tasks when "{{ service_name }}" state is "running"
block:
@@ -91,73 +86,71 @@
state: present
when: not skip_podman_install
- - name: check user exists
- user:
- name: "{{ container_run_as_user }}"
-
- name: Check subuid & subgid
import_tasks: check_subid.yml
- - name: running single container, get image Id if it exists and we are root
- # XXX podman doesn't work through sudo for non root users,
- # so skip preload if user
- # https://github.com/containers/libpod/issues/5570
- # command: podman inspect -f {{.Id}} "{{ container_image }}"
- command: "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ item }}"
+ - name: Ensure empty internal variable _container_image_list
+ set_fact:
+ _container_image_list: []
changed_when: false
- register: pre_pull_id
- ignore_errors: true
- when:
- - container_image_list is defined
- - container_image_list | length == 1
- - container_run_as_user == 'root'
+
+ - name: Convert container_image_list to new form
+ set_fact:
+ _container_image_list: "{{ _container_image_list + [{'image': item}] }}"
with_items: "{{ container_image_list }}"
+ when: not (container_image_list | selectattr("image", "defined"))
+ changed_when: false
+ no_log: true
+
+ - name: Always use internal variable for container_image_list
+ set_fact:
+ _container_image_list: "{{ container_image_list }}"
+ when: _container_image_list | length == 0
+ changed_when: false
+ no_log: true
- name: running single container, ensure we have up to date container image
containers.podman.podman_image:
- name: "{{ item }}"
+ name: "{{ item.image }}"
force: true
- username: "{{ container_image_user | default(omit) }}"
- password: "{{ container_image_password | default(omit) }}"
+ username: "{{ item.user | default(container_image_user) | default(omit) }}"
+ password: "{{ item.password | default(container_image_password) | default(omit) }}"
notify: restart service
become: true
become_user: "{{ container_run_as_user }}"
when:
- - container_image_list is defined
- - container_image_list | length == 1
+ - _container_image_list | length == 1
- container_run_as_user == 'root'
- with_items: "{{ container_image_list }}"
-
- - name: running single container, get image Id if it exists
- command:
- "podman image inspect -f '{{ '{{' }}.Id{{ '}}' }}' {{ item }}"
- changed_when: false
- become: true
- become_user: "{{ container_run_as_user }}"
- register: post_pull_id
- ignore_errors: true
- when:
- - container_image_list is defined
- - container_image_list | length == 1
- - container_run_as_user == 'root'
- with_items: "{{ container_image_list }}"
+ - not (item.image | regex_search ('^localhost/.*'))
+ loop: "{{ _container_image_list }}"
+ no_log: true
- name: seems we use several container images, ensure all are up to date
containers.podman.podman_image:
- name: "{{ item }}"
+ name: "{{ item.image }}"
force: true
- username: "{{ container_image_user | default(omit) }}"
- password: "{{ container_image_password | default(omit) }}"
+ username: "{{ item.user | default(container_image_user) | default(omit) }}"
+ password: "{{ item.password | default(container_image_password) | default(omit) }}"
become: true
become_user: "{{ container_run_as_user }}"
- when: container_image_list is defined and container_image_list | length > 1
- with_items: "{{ container_image_list }}"
+ when:
+ - _container_image_list | length > 1
+ - not (item.image | regex_search ('^localhost/.*'))
+ loop: "{{ _container_image_list }}"
+ no_log: true
+
+ - name: Include pod yaml templating
+ ansible.builtin.include_tasks: deploy_pod_yaml.yml
+ when:
+ - container_pod_yaml is defined
+ - container_pod_yaml_deploy
- name: if running pod, ensure configuration file exists
stat:
path: "{{ container_pod_yaml }}"
register: pod_file
when: container_pod_yaml is defined
+
- name: fail if pod configuration file is missing
fail:
msg: >
@@ -179,41 +172,32 @@
- container_run_as_user != "root"
- not user_lingering.stat.exists
- - name: "create systemd service file for container: {{ container_name }}"
- template:
- src: systemd-service-single.j2
- dest: "{{ service_files_dir }}/{{ service_name }}"
- owner: root
- group: root
- mode: 0644
- notify:
- - reload systemctl
- - start service
- - enable service
- register: service_file
- when: container_image_list is defined and container_image_list | length == 1
-
- - name: "create systemd service file for pod: {{ container_name }}"
- template:
- src: systemd-service-pod.j2
- dest: "{{ service_files_dir }}/{{ service_name }}"
- owner: root
- group: root
- mode: 0644
- notify:
- - reload systemctl
- - start service
- - enable service
- register: service_file
- when: container_image_list is defined and container_image_list | length > 1
-
- - name: "ensure {{ service_name }} is restarted due config change"
- debug: msg="config has changed:"
- changed_when: true
- notify: restart service
+ - name: Ensure volume directories exist for {{ container_name }}
+ file:
+ path: "{{ item }}"
+ owner: "{{ container_dir_owner | default(container_run_as_user) }}"
+ group: "{{ container_dir_group | default(container_run_as_group) }}"
+ mode: "{{ container_dir_mode | default(omit) }}"
+ state: directory
+ become: true
+ loop: "{{ container_run_args | regex_findall('-v ([^:]*)') }}"
when:
- - service_file_before_template.stat.exists
- - service_file.changed
+ - _container_image_list | length == 1
+ - container_run_args is defined and container_run_args | length > 0
+ - container_pod_yaml is undefined
+
+ - name: Create systemd service file for {{ container_name }}
+ template:
+ src: "{% if _container_image_list | length == 1 %}systemd-service-single.j2{% else %}systemd-service-pod.j2{% endif %}"
+ dest: "{{ service_files_dir }}/{{ service_name }}"
+ owner: "{{ service_files_owner }}"
+ group: "{{ service_files_group }}"
+ mode: "{{ service_files_mode }}"
+ become: true
+ notify:
+ - reload systemctl
+ - restart service
+ register: service_file
- name: ensure auto update is running for images
become: true
@@ -232,74 +216,58 @@
- name: configure firewall if container_firewall_ports is defined
block:
- - name: set firewall ports state to enabled when container state is running
- set_fact:
- fw_state: enabled
- when: container_state == "running"
-
- - name: disable firewall ports state when container state is not running
- set_fact:
- fw_state: disabled
- when: container_state != "running"
-
- name: ensure firewalld is installed
tags: firewall
package: name=firewalld state=present
+ become: true
when: ansible_pkg_mgr != "atomic_container"
- - name: ensure firewalld is installed (on fedora-iot)
- tags: firewall
- command: >-
- rpm-ostree install --idempotent --unchanged-exit-77
- --allow-inactive firewalld
- register: ostree
- failed_when: not ( ostree.rc == 77 or ostree.rc == 0 )
- changed_when: ostree.rc != 77
+ - name: Ensure firewalld is installed (rpm-ostree)
when: ansible_pkg_mgr == "atomic_container"
+ block:
+ - name: Ensure firewalld is installed (rpm-ostree)
+ tags: firewall
+ community.general.rpm_ostree_pkg:
+ name: firewalld
+ become: true
+ register: ostree
- - name: reboot if new stuff was installed
- reboot:
- reboot_timeout: 300
- when:
- - ansible_pkg_mgr == "atomic_container"
- - ostree.rc != 77
+ - name: Reboot if firewalld was installed
+ reboot:
+ reboot_timeout: 300
+ become: true
+ when: ostree is changed
- - name: ensure firewall service is running
+ - name: Ensure firewall service is running
tags: firewall
- service: name=firewalld state=started
+ service:
+ name: firewalld
+ state: started
+ become: true
- - name: ensure container's exposed ports firewall state
+ - name: Ensure container's exposed ports firewall state
tags: firewall
ansible.posix.firewalld:
port: "{{ item }}"
permanent: true
immediate: true
- state: "{{ fw_state }}"
+ state: "{% if container_state == 'running' %}enabled{% else %}disabled{% endif %}"
+ become: true
with_items: "{{ container_firewall_ports }}"
- - name: Force all notified handlers to run at this point
- meta: flush_handlers
-
when: container_firewall_ports is defined
- name: do cleanup stuff when container_state is "absent"
block:
- - name: ensure "{{ service_name }}" is disabled at boot
- become: true
- become_user: "{{ container_run_as_user }}"
- # become_method: machinectl
- environment:
- XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
- systemd:
- name: "{{ service_name }}"
- enabled: false
- scope: "{{ systemd_scope }}"
- when:
- - service_file_before_template.stat.exists
+ - name: Check if service file exists
+ stat:
+ path: "{{ service_files_dir }}/{{ service_name }}"
+ register: service_file
- - name: ensure "{{ service_name }}" is stopped
+
+ - name: Ensure "{{ service_name }}" is stopped and disabled at boot
become: true
become_user: "{{ container_run_as_user }}"
# become_method: machinectl
@@ -311,17 +279,15 @@
enabled: false
scope: "{{ systemd_scope }}"
when:
- - service_file_before_template.stat.exists
+ - service_file.stat.exists
- name: clean up systemd service file
file:
path: "{{ service_files_dir }}/{{ service_name }}"
state: absent
+ become: true
notify: reload systemctl
- - name: Force all notified handlers to run at this point
- meta: flush_handlers
-
- name: Check if user is lingering
stat:
path: "/var/lib/systemd/linger/{{ container_run_as_user }}"
@@ -341,3 +307,6 @@
when: container_pod_yaml is defined
when: container_state == "absent"
+
+- name: Force all notified handlers to run at this point
+ meta: flush_handlers
diff --git a/roles/ikke_t.podman_container_systemd/templates/systemd-service-single.j2 b/roles/ikke_t.podman_container_systemd/templates/systemd-service-single.j2
index 559a799..a37b0a8 100644
--- a/roles/ikke_t.podman_container_systemd/templates/systemd-service-single.j2
+++ b/roles/ikke_t.podman_container_systemd/templates/systemd-service-single.j2
@@ -29,7 +29,7 @@ User={{ container_run_as_user }}
ExecStart=/usr/bin/podman run --name {{ container_name }} \
{{ container_run_args }} \
--conmon-pidfile {{ pidfile }} --cidfile {{ cidfile }} \
- {{ container_image_list | first }} {% if container_cmd_args is defined %} \
+ {{ _container_image_list | map(attribute='image') | first }} {% if container_cmd_args is defined %} \
{{ container_cmd_args }} {% endif %}
ExecStop=/usr/bin/sh -c "/usr/bin/podman stop -t "{{ container_stop_timeout }}" `cat {{ cidfile }}`"
diff --git a/roles/ikke_t.podman_container_systemd/tests/test.yml b/roles/ikke_t.podman_container_systemd/tests/test.yml
index af8bb36..55d146b 100644
--- a/roles/ikke_t.podman_container_systemd/tests/test.yml
+++ b/roles/ikke_t.podman_container_systemd/tests/test.yml
@@ -14,7 +14,13 @@
# connection: local
# delegate_to: localhost
vars:
-
+ container_state: running
+# container_state: absent
+ container_instances:
+ - name: lighthttpd-1
+ port: 8080
+ - name: lighthttpd-2
+ port: 8081
tasks:
- name: create test dir for www file
file:
@@ -28,31 +34,34 @@
- name: tests container
vars:
- container_state: running
- # container_state: absent
container_image_list:
- sebp/lighttpd:latest
- container_name: lighttpd
+ container_name: "{{ outer_item.name }}"
container_run_args: >-
--rm
-v /tmp/podman-container-systemd:/var/www/localhost/htdocs:Z
-t
- -p 8080:80/tcp
+ -p "{{ outer_item.port }}:80/tcp"
container_firewall_ports:
- - 8080/tcp
+ - "{{ outer_item.port }}/tcp"
- import_role:
+ ansible.builtin.include_role:
name: podman-container-systemd
+ loop: "{{ container_instances }}"
+ loop_control:
+ loop_var: outer_item
- name: Wait for lighttpd to come up
wait_for:
- port: 8080
+ port: "{{ item.port }}"
+ loop: "{{ container_instances }}"
when: container_state == "running"
- name: test if container runs
get_url:
- url: http://localhost:8080
+ url: "http://localhost:{{ item.port }}"
dest: /tmp/podman-container-systemd/index.return.html
+ loop: "{{ container_instances }}"
register: get_url
when: container_state == "running"
@@ -64,6 +73,9 @@
- debug:
msg:
- "Got http://localhost:8080 to test if it worked!"
- - "This sould state 'file' on success: {{ get_url.state }}"
+ - "This should state 'file' on success: {{ get_url.results[idx].state }}"
- "On success, output should say 'Hello world!' here: {{ curl.stdout }}"
+ loop: "{{ container_instances }}"
+ loop_control:
+ index_var: idx
when: container_state == "running"
diff --git a/roles/ikke_t.podman_container_systemd/vars/main.yml b/roles/ikke_t.podman_container_systemd/vars/main.yml
index 5806a2b..b19204e 100644
--- a/roles/ikke_t.podman_container_systemd/vars/main.yml
+++ b/roles/ikke_t.podman_container_systemd/vars/main.yml
@@ -4,3 +4,6 @@
cidpid_base: "{{ systemd_tempdir }}/%n-"
cidfile: "{{ cidpid_base }}cid"
pidfile: "{{ cidpid_base }}pid"
+
+# kubeval
+kubeval_url: "https://github.com/instrumenta/kubeval/releases/latest"
diff --git a/roles/kickstart-rhv-template/README.md b/roles/kickstart-rhv-template/README.md
deleted file mode 100644
index 7e95429..0000000
--- a/roles/kickstart-rhv-template/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-kickstart-rhv-template
-=========
-
-Initiate a kickstart build of a RHEL Virtual Machine, and use it to create a template.
-
-This role's intended use is as an "automated golden image builder". By running this role on a basis, the generated RHV/oVirt template will be kept up to date. The product can be used as a template to quickly build more VMs that are never more than days old.
-
-Requirements
-------------
-
-A working Satellite and RHV/oVirt installation.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/roles/kickstart-rhv-template/defaults/main.yml b/roles/kickstart-rhv-template/defaults/main.yml
deleted file mode 100644
index cf0979d..0000000
--- a/roles/kickstart-rhv-template/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for kickstart-rhv-template
\ No newline at end of file
diff --git a/roles/kickstart-rhv-template/handlers/main.yml b/roles/kickstart-rhv-template/handlers/main.yml
deleted file mode 100644
index b377465..0000000
--- a/roles/kickstart-rhv-template/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for kickstart-rhv-template
\ No newline at end of file
diff --git a/roles/kickstart-rhv-template/meta/main.yml b/roles/kickstart-rhv-template/meta/main.yml
deleted file mode 100644
index 5d50bf4..0000000
--- a/roles/kickstart-rhv-template/meta/main.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-galaxy_info:
- author: your name
- description: your description
- company: your company (optional)
-
- # If the issue tracker for your role is not on github, uncomment the
- # next line and provide a value
- # issue_tracker_url: http://example.com/issue/tracker
-
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
-
- min_ansible_version: 2.4
-
- # If this a Container Enabled role, provide the minimum Ansible Container version.
- # min_ansible_container_version:
-
- # Optionally specify the branch Galaxy will use when accessing the GitHub
- # repo for this role. During role install, if no tags are available,
- # Galaxy will use this branch. During import Galaxy will access files on
- # this branch. If Travis integration is configured, only notifications for this
- # branch will be accepted. Otherwise, in all cases, the repo's default branch
- # (usually master) will be used.
- #github_branch:
-
- #
- # Provide a list of supported platforms, and for each platform a list of versions.
- # If you don't wish to enumerate all versions for a particular platform, use 'all'.
- # To view available platforms and versions (or releases), visit:
- # https://galaxy.ansible.com/api/v1/platforms/
- #
- # platforms:
- # - name: Fedora
- # versions:
- # - all
- # - 25
- # - name: SomePlatform
- # versions:
- # - all
- # - 1.0
- # - 7
- # - 99.99
-
- galaxy_tags: []
- # List tags for your role here, one per line. A tag is a keyword that describes
- # and categorizes the role. Users find roles by searching for tags. Be sure to
- # remove the '[]' above, if you add tags to this list.
- #
- # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
- # Maximum 20 tags per role.
-
-dependencies: []
- # List your role dependencies here, one per line. Be sure to remove the '[]' above,
- # if you add dependencies to this list.
\ No newline at end of file
diff --git a/roles/kickstart-rhv-template/tasks/main.yml b/roles/kickstart-rhv-template/tasks/main.yml
deleted file mode 100644
index 40d22b2..0000000
--- a/roles/kickstart-rhv-template/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# tasks file for kickstart-rhv-template
-#
-
-- name: Create Fresh oVirt/RHV VM
-
-- name: Create Host in Satellite
-
-- name: Kickstart Host
-
-- name: Anonymize Host
-
-- name: Create Template
-
diff --git a/roles/kickstart-rhv-template/tests/inventory b/roles/kickstart-rhv-template/tests/inventory
deleted file mode 100644
index 878877b..0000000
--- a/roles/kickstart-rhv-template/tests/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
-localhost
-
diff --git a/roles/kickstart-rhv-template/tests/test.yml b/roles/kickstart-rhv-template/tests/test.yml
deleted file mode 100644
index 0261e8f..0000000
--- a/roles/kickstart-rhv-template/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- remote_user: root
- roles:
- - kickstart-rhv-template
\ No newline at end of file
diff --git a/roles/kickstart-rhv-template/vars/main.yml b/roles/kickstart-rhv-template/vars/main.yml
deleted file mode 100644
index 9b1fb88..0000000
--- a/roles/kickstart-rhv-template/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for kickstart-rhv-template
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/.ansible-lint b/roles/linux-system-roles.network/.ansible-lint
deleted file mode 100644
index 86012b1..0000000
--- a/roles/linux-system-roles.network/.ansible-lint
+++ /dev/null
@@ -1,12 +0,0 @@
----
-skip_list:
-- '106' # Role name does not match ^[a-z][a-z0-9_]+$ pattern
-- '206' # Variables should have spaces before and after: {{ var_name }}
-- '208' # File permissions unset or incorrect
-- '301' # Commands should not change things if nothing needs doing
-- '303' # Using command rather than module
-- '305' # Use shell only when shell functionality is required
-- '403' # Package installs should not use latest
-- '502' # All tasks should be named
-- '601' # Don't compare to literal True/False
-- '602' # Don't compare to empty string
diff --git a/roles/linux-system-roles.network/.github/stale.yml b/roles/linux-system-roles.network/.github/stale.yml
deleted file mode 100644
index bfb82b2..0000000
--- a/roles/linux-system-roles.network/.github/stale.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# Configuration for probot-stale - https://github.com/probot/stale
-daysUntilStale: 30
-daysUntilClose: 14
-staleLabel: stale
-markComment: >
- Thank you for your contribution! There was no activity in this pull request
- recently. To avoid pull requests to pile up, an automated process marked this
- pull request as stale. It will close this pull request if no further activity
- occurs. The current policy is available at:
- https://github.com//linux-system-roles/network/blob/main/.github/stale.yml
-only: pulls
diff --git a/roles/linux-system-roles.network/.github/workflows/markdownlint.yml b/roles/linux-system-roles.network/.github/workflows/markdownlint.yml
deleted file mode 100644
index dce6140..0000000
--- a/roles/linux-system-roles.network/.github/workflows/markdownlint.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-name: markdownlint
-on: [push, pull_request]
-jobs:
- markdownlint:
- runs-on: ubuntu-latest
- steps:
- - name: Check out code
- uses: actions/checkout@main
- - name: Run mdl
- uses: actionshub/markdownlint@main
diff --git a/roles/linux-system-roles.network/.github/workflows/tox.yml b/roles/linux-system-roles.network/.github/workflows/tox.yml
deleted file mode 100644
index c7add26..0000000
--- a/roles/linux-system-roles.network/.github/workflows/tox.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-# yamllint disable rule:line-length
-name: tox
-on: # yamllint disable-line rule:truthy
- - pull_request
-env:
- TOX_LSR: "git+https://github.com/linux-system-roles/tox-lsr@2.4.0"
- LSR_ANSIBLE_TEST_DOCKER: "true"
- LSR_ANSIBLES: 'ansible==2.8.* ansible==2.9.*'
- LSR_MSCENARIOS: default
- # LSR_EXTRA_PACKAGES: "libdbus-1-dev libgirepository1.0-dev python3-dev"
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-jobs:
- python:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- pyver: ['2.7', '3.6', '3.7', '3.8']
- steps:
- - name: checkout PR
- uses: actions/checkout@v2
- - name: Set up Python
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.pyver }}
- - name: Install platform dependencies, python, tox, tox-lsr
- run: |
- set -euxo pipefail
- python -m pip install --upgrade pip
- sudo apt-get update
- sudo apt-get install git
- pip install "$TOX_LSR"
- lsr_ci_preinstall
- - name: Run tox tests
- run: |
- set -euxo pipefail
- toxpyver=$(echo "${{ matrix.pyver }}" | tr -d .)
- toxenvs="py${toxpyver}"
- case "$toxpyver" in
- 27) toxenvs="${toxenvs},coveralls,flake8,pylint" ;;
- 36) toxenvs="${toxenvs},coveralls,black,yamllint,ansible-lint,collection" ;;
- 37) toxenvs="${toxenvs},coveralls" ;;
- 38) toxenvs="${toxenvs},coveralls" ;;
- esac
- TOXENV="$toxenvs" lsr_ci_runtox
- python-26:
- runs-on: ubuntu-latest
- steps:
- - name: checkout PR
- uses: actions/checkout@v2
- - name: Run py26 tests
- uses: linux-system-roles/lsr-gh-action-py26@1.0.1
diff --git a/roles/linux-system-roles.network/.gitignore b/roles/linux-system-roles.network/.gitignore
deleted file mode 100644
index 69dd322..0000000
--- a/roles/linux-system-roles.network/.gitignore
+++ /dev/null
@@ -1,13 +0,0 @@
-/.cache
-/.coverage
-*.pyc
-/.pytest_cache
-/tests/.coverage
-/tests/htmlcov*
-/tests/__pycache__/
-/tests/remote-coveragedata-*
-/tests/tmp_merge_coveragerc
-/tests/total-*coveragedata
-/.tox
-/.vagrant
-/.vscode
diff --git a/roles/linux-system-roles.network/.lgtm.yml b/roles/linux-system-roles.network/.lgtm.yml
deleted file mode 100644
index da28dfc..0000000
--- a/roles/linux-system-roles.network/.lgtm.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-extraction:
- python:
- python_setup:
- version: 2
diff --git a/roles/linux-system-roles.network/.mdl_style.rb b/roles/linux-system-roles.network/.mdl_style.rb
deleted file mode 100644
index 3f44d3c..0000000
--- a/roles/linux-system-roles.network/.mdl_style.rb
+++ /dev/null
@@ -1,9 +0,0 @@
-all
-# https://github.com/markdownlint/markdownlint/blob/master/docs/RULES.md#md003---header-style
-rule 'MD003', :style => :setext_with_atx
-# https://github.com/markdownlint/markdownlint/blob/master/docs/RULES.md#md013---line-length
-rule 'MD013', :line_length => 88
-# https://github.com/markdownlint/markdownlint/blob/master/docs/RULES.md#md029---ordered-list-item-prefix
-rule 'MD029', :style => :ordered
-# https://github.com/markdownlint/markdownlint/blob/master/docs/RULES.md#md024---multiple-headers-with-the-same-content
-rule "MD024", :allow_different_nesting => true
diff --git a/roles/linux-system-roles.network/.mdlrc b/roles/linux-system-roles.network/.mdlrc
deleted file mode 100644
index 1f82ca2..0000000
--- a/roles/linux-system-roles.network/.mdlrc
+++ /dev/null
@@ -1 +0,0 @@
-style '.mdl_style.rb'
diff --git a/roles/linux-system-roles.network/.travis/custom.sh b/roles/linux-system-roles.network/.travis/custom.sh
deleted file mode 100755
index a929d2a..0000000
--- a/roles/linux-system-roles.network/.travis/custom.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: MIT
-
-set -e
-
-. "$LSR_SCRIPTDIR/utils.sh"
-
-# Write your custom commands here that should be run when `tox -e custom`:
-if lsr_check_python_version python -eq '3.6'; then
- (set -x; cd "${TOPDIR}/tests"; python ./ensure_provider_tests.py)
-fi
diff --git a/roles/linux-system-roles.network/.yamllint.yml b/roles/linux-system-roles.network/.yamllint.yml
deleted file mode 100644
index e50c134..0000000
--- a/roles/linux-system-roles.network/.yamllint.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: MIT
----
-extends: yamllint_defaults.yml
-# possible customizations over the base yamllint config
-# skip the yaml files in the /tests/ directory
-# NOTE: If you want to customize `ignore` you'll have to
-# copy in all of the config from .yamllint.yml, then
-# add your own - so if you want to just add /tests/ to
-# be ignored, you'll have to add the ignores from the base
-# ignore: |
-# /tests/
-# /.tox/
-# skip checking line length
-# NOTE: the above does not apply to `rules` - you do not
-# have to copy all of the rules from the base config
-# rules:
-# line-length: disable
-rules:
- truthy: disable
- line-length:
- ignore: |
- /tests/tests_wireless_plugin_installation_nm.yml
- /tests/tests_team_plugin_installation_nm.yml
diff --git a/roles/linux-system-roles.network/CHANGELOG.md b/roles/linux-system-roles.network/CHANGELOG.md
deleted file mode 100644
index 109910c..0000000
--- a/roles/linux-system-roles.network/CHANGELOG.md
+++ /dev/null
@@ -1,49 +0,0 @@
-Changelog
-=========
-
-[1.3.0] - 2021-04-08
---------------------
-
-### Changes
-
-- Use inclusive language
- - `slave` is deprecated in favor of `port`
- - `master` is deprecated in favor of `controller`
-
-### New features
-
-- Support disabling IPv6
-- Support `dns_options` when using one or more IPv4 nameservers
-- Support Ethtool coalesce settings
-- Support dummy interfaces
-
-### Bug fixes
-
-- Fix static IPv6 support for initscripts provider
-
-[1.2.0] - 2020-08-26
---------------------
-
-### Changes
-
-- Rename ethtool features to use underscores instead of dashes to support
- Jinja2 dot notation. Accept old notation for compatibility with existing
- playbooks.
-
-### New features
-
-- Initial 802.1x authentication support (only EAP-TLS)
-- Wireless support
-- Handle OracleLinux as a RHEL clone
-- Remove dependency on ethtool command line tool
-- initscripts: Support creating and activating bond profiles in one run
-- Ignore up/down states if a profile is not defined and not present on the
- managed host
-- Document bond profiles
-
-### Bug fixes
-
-- NetworkManager: Always rollback checkpoint on failure
-- NetworkManager: Try to reapply changes to reduce network interruptions
-- initscripts: Fix dependencies for Fedora 32
-- Only log actual warnings as Ansible warnings
diff --git a/roles/linux-system-roles.network/LICENSE b/roles/linux-system-roles.network/LICENSE
deleted file mode 100644
index 6117e71..0000000
--- a/roles/linux-system-roles.network/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-BSD-3-Clause License
-
-Copyright (c) 2017-2018 Red Hat, Inc. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
-list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,
-this list of conditions and the following disclaimer in the documentation
-and/or other materials provided with the distribution.
-
-3. Neither the name of the copyright holder nor the names of its contributors
-may be used to endorse or promote products derived from this software without
-specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/roles/linux-system-roles.network/README.md b/roles/linux-system-roles.network/README.md
deleted file mode 100644
index 0648165..0000000
--- a/roles/linux-system-roles.network/README.md
+++ /dev/null
@@ -1,915 +0,0 @@
-linux-system-roles/network
-==========================
-
-[](https://coveralls.io/github/linux-system-roles/network)
-
-[](https://github.com/ambv/black)
-[](https://lgtm.com/projects/g/linux-system-roles/network/context:python)
-
-Overview
---------
-
-The `network` role enables users to configure network on the target machines.
-This role can be used to configure:
-
-- Ethernet interfaces
-- Bridge interfaces
-- Bonded interfaces
-- VLAN interfaces
-- MacVLAN interfaces
-- Infiniband interfaces
-- Wireless (WiFi) interfaces
-- IP configuration
-- 802.1x authentication
-
-Introduction
-------------
-
-The `network` role supports two providers: `nm` and `initscripts`. `nm` is
-used by default in RHEL7 and `initscripts` in RHEL6. These providers can be
-configured per host via the [`network_provider`](#provider) variable. In
-absence of explicit configuration, it is autodetected based on the
-distribution. However, note that either `nm` or `initscripts` is not tied to a certain
-distribution. The `network` role works everywhere the required API is available.
-This means that `nm` requires at least NetworkManager's API version 1.2 available.
-For `initscripts`, the legacy network service is required as used in Fedora or RHEL.
-
-For each host a list of networking profiles can be configured via the
-`network_connections` variable.
-
-- For `initscripts`, profiles correspond to ifcfg files in the
- `/etc/sysconfig/network-scripts/ifcfg-*` directory.
-
-- For `NetworkManager`, profiles correspond to connection profiles as handled by
- NetworkManager. Fedora and RHEL use the `ifcfg-rh-plugin` for NetworkManager,
- which also writes or reads configuration files to `/etc/sysconfig/network-scripts/ifcfg-*`
- for compatibility.
-
-Note that the `network` role primarily operates on networking profiles
-(connections) and not on devices, but it uses the profile name by default as
-the interface name. It is also possible to create generic profiles, by creating
-for example a profile with a certain IP configuration without activating the
-profile. To apply the configuration to the actual networking interface, use the
-`nmcli` commands on the target system.
-
-**Warning**: The `network` role updates or creates all connection profiles on
-the target system as specified in the `network_connections` variable. Therefore,
-the `network` role removes options from the specified profiles if the options are
-only present on the system but not in the `network_connections` variable.
-Exceptions are mentioned below.
-
-Variables
----------
-
-The `network` role is configured via variables starting with `network_` as
-the name prefix. List of variables:
-
-- `network_provider` - The `network_provider` variable allows to set a specific
- provider (`nm` or `initscripts`) . Setting it to `{{
- network_provider_os_default }}`, the provider is set depending on the
- operating system. This is usually `nm` except for RHEL 6 or CentOS 6 systems.
- Changing the provider for an existing profile is not supported. To switch
- providers, it is recommended to first remove profiles with the old provider
- and then create new profiles with the new provider.
-- `network_connections` - The connection profiles are configured as
- `network_connections`, which is a list of dictionaries that include specific
- options.
-- `network_allow_restart` - Certain configurations require the role to restart
- network services. For example, if a wireless connection is configured and
- NetworkManager-wifi is not installed, NetworkManager must be restarted prior
- to the connection being configured. Setting this to `no` will prevent the
- role from restarting network service.
-
-Examples of Variables
----------------------
-
-Setting the variables
-
-```yaml
-network_provider: nm
-network_connections:
- - name: eth0
- #...
-network_allow_restart: yes
-```
-
-Options
--------
-
-The `network_connections` variable is a list of dictionaries that include the
-following options. List of options:
-
-### `name` (required)
-
-The `name` option identifies the connection profile. It is not the name of the
-networking interface for which the profile applies, though we can associate
-the profile with an interface and give them the same name.
-Note that you can have multiple profiles for the same device, but only
-one profile can be active on the device each time.
-For NetworkManager, a connection can only be active at one device each time.
-
-- For `NetworkManager`, the `name` option corresponds to the
- [`connection.id`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.connection.id)
- property option.
- Although NetworkManager supports multiple connections with the same `connection.id`,
- the `network` role cannot handle a duplicate `name`. Specifying a `name` multiple
- times refers to the same connection profile.
-
-- For `initscripts`, the `name` option determines the ifcfg file name `/etc/sysconfig/network-scripts/ifcfg-$NAME`.
- Note that the `name` does not specify the `DEVICE` but a filename. As a consequence,
- `'/'` is not a valid character for the `name`.
-
-You can also use the same connection profile multiple times. Therefore, it is possible
-to create a profile and activate it separately.
-
-### `state`
-
-The `state` option identifies what is the runtime state of each connection profile. The
-`state` option (optional) can be set to the following values:
-
-- `up` - the connection profile is activated
-- `down` - the connection profile is deactivated
-
-#### `state: up`
-
-- For `NetworkManager`, this corresponds to `nmcli connection id {{name}} up`.
-
-- For `initscripts`, this corresponds to `ifup {{name}}`.
-
-When the `state` option is set to `up`, you can also specify the `wait` option (optional):
-
-- `wait: 0` - initiates only the activation, but does not wait until the device is fully
- connected. The connection will be completed in the background, for example after a
- DHCP lease was received.
-- `wait: ` is a timeout that enables you to decide how long you give the device
- to activate. The default is using a suitable timeout. Note that the `wait` option is
- only supported by NetworkManager.
-
-Note that `state: up` always re-activates the profile and possibly changes the
-networking configuration, even if the profile was already active before. As
-a consequence, `state: up` always changes the system.
-
-#### `state: down`
-
-- For `NetworkManager`, it corresponds to `nmcli connection id {{name}} down`.
-
-- For `initscripts`, it corresponds to call `ifdown {{name}}`.
-
-You can deactivate a connection profile, even if is currently not active. As a
-consequence, `state: down` always changes the system.
-
-Note that if the `state` option is unset, the connection profile’s runtime state will
-not be changed.
-
-### `persistent_state`
-
-The `persistent_state` option identifies if a connection profile is persistent (saved on
-disk). The `persistent_state` option can be set to the following values:
-
-#### `persistent_state: present` (default)
-
-Note that if `persistent_state` is `present` and the connection profile contains
-the `type` option, the profile will be created or updated. If the connection profile is
-incomplete (no `type` option), the behavior is undefined. Also, the `present` value
-does not directly result in a change in the network configuration. If the `state` option
-is not set to `up`, the profile is only created or modified, not activated.
-
-For NetworkManager, the new connection profile is created with the `autoconnect`
-option enabled by default. Therefore, NetworkManager can activate the new
-profile on a currently disconnected device. ([rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515)).
-
-#### `persistent_state: absent`
-
-The `absent` value ensures that the profile is not present on the
-target host. If a profile with the given `name` exists, it will be deleted. In this case:
-
-- `NetworkManager` deletes all connection profiles with the corresponding
- `connection.id`. Deleting a profile usually does not change the current networking
- configuration, unless the profile was currently activated on a device. Deleting the
- currently active connection profile disconnects the device. That makes the device
- eligible to autoconnect another connection (for more details, see
- [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515)).
-
-- `initscripts` deletes the ifcfg file in most cases with no impact on the runtime state
- of the system unless some component is watching the sysconfig directory.
-
-**Note**: For profiles that only contain a `state` option, the `network` role only activates
-or deactivates the connection without changing its configuration.
-
-### `type`
-
-The `type` option can be set to the following values:
-
-- `ethernet`
-- `bridge`
-- `bond`
-- `team`
-- `vlan`
-- `macvlan`
-- `infiniband`
-- `wireless`
-
-#### `type: ethernet`
-
-If the type is `ethernet`, then there can be an extra `ethernet` dictionary with the following
-items (options): `autoneg`, `speed` and `duplex`, which correspond to the
-settings of the `ethtool` utility with the same name.
-
-- `autoneg`: `yes` (default) or `no` [if auto-negotiation is enabled or disabled]
-- `speed`: speed in Mbit/s
-- `duplex`: `half` or `full`
-
-Note that the `speed` and `duplex` link settings are required when autonegotiation is
-disabled (`autoneg: no`).
-
-#### `type: bridge`, `type: bond`, `type: team`
-
-The `bridge`, `bond`, `team` device types work similar. Note that `team` is not
-supported in RHEL6 kernels.
-
-For ports, the `port_type` and `controller` properties must be set. Note that ports
-should not have `ip` settings.
-
-The `controller` refers to the `name` of a profile in the Ansible
-playbook. It is neither an interface-name nor a connection-id of
-NetworkManager.
-
-- For NetworkManager, `controller` will be converted to the `connection.uuid`
- of the corresponding profile.
-
-- For initscripts, the controller is looked up as the `DEVICE` from the corresponding
- ifcfg file.
-
-As `controller` refers to other profiles of the same or another play, the order of the
-`connections` list matters. Profiles that are referenced by other profiles need to be
-specified first. Also, `--check` ignores the value of the `controller` and assumes it
-will be present during a real run. That means, in presence of an invalid `controller`,
-`--check` may signal success but the actual play run fails.
-
-The `team` type uses `roundrobin` as the `runner` configuration. No further
-configuration is supported at the moment.
-
-#### `type: vlan`
-
-Similar to `controller`, the `parent` references the connection profile in the ansible
-role.
-
-#### `type: macvlan`
-
-Similar to `controller` and `vlan`, the `parent` references the connection profile in
-the ansible role.
-
-#### `type: wireless`
-
-The `wireless` type supports WPA-PSK (password) authentication and WPA-EAP (802.1x)
-authentication.
-
-`nm` (NetworkManager) is the only supported `network_provider` for this type.
-
-If WPA-EAP is used, ieee802_1x settings must be defined in the
-[ieee802_1x](#-`ieee802_1x`) option.
-
-The following options are supported:
-
-- `ssid`: the SSID of the wireless network (required)
-- `key_mgmt`: `wpa-psk` or `wpa-eap` (required)
-- `password`: password for the network (required if `wpa-psk` is used)
-
-### `autoconnect`
-
-By default, profiles are created with autoconnect enabled.
-
-- For `NetworkManager`, this corresponds to the `connection.autoconnect` property.
-
-- For `initscripts`, this corresponds to the `ONBOOT` property.
-
-### `mac`
-
-The `mac` address is optional and restricts the profile to be usable only on
-devices with the given MAC address. `mac` is only allowed for `type`
-`ethernet` or `infiniband` to match a non-virtual device with the
-profile. The value of the `mac` address needs to be specified in hexadecimal notation
-using colons (for example: `mac: "00:00:5e:00:53:5d"`). To avoid YAML parsing mac
-addresses as integers in sexagesimal (base 60) notation (see
-), it is recommended to always quote the value
-with double quotes and sometimes it is necessary.
-
-- For `NetworkManager`, `mac` is the permanent MAC address, `ethernet.mac-address`.
-
-- For `initscripts`, `mac` is the currently configured MAC address of the device (`HWADDR`).
-
-### `mtu`
-
-The `mtu` option denotes the maximum transmission unit for the profile's
-device. The maximum value depends on the device. For virtual devices, the
-maximum value of the `mtu` option depends on the underlying device.
-
-### `interface_name`
-
-For the `ethernet` and `infiniband` types, the `interface_name` option restricts the
-profile to the given interface by name. This argument is optional and by default the
-profile name is used unless a mac address is specified using the `mac` key. Specifying
-an empty string (`""`) means that the profile is not restricted to a network interface.
-
-**Note:** With [persistent interface naming](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/ch-Consistent_Network_Device_Naming.html),
-the interface is predictable based on the hardware configuration.
-Otherwise, the `mac` address might be an option.
-
-For virtual interface types such as bridges, the `interface_name` is the name of the created
-interface. In case of a missing `interface_name`, the `name` of the profile name is used.
-
-**Note:** The `name` (the profile name) and the `interface_name` (the device name) may be
-different or the profile may not be tied to an interface at all.
-
-### `zone`
-
-The `zone` option sets the firewalld zone for the interface.
-
-Ports to the bridge, bond or team devices cannot specify a zone.
-
-### `ip`
-
-The IP configuration supports the following options:
-
-- `address`
-
- Manual addressing can be specified via a list of addresses under the `address` option.
-
-- `dhcp4`, `auto6`, and `ipv6_disabled`
-
- Also, manual addressing can be specified by setting either `dhcp4` or `auto6`.
- The `dhcp4` key is for DHCPv4 and `auto6` for StateLess Address Auto Configuration
- (SLAAC). Note that the `dhcp4` and `auto6` keys can be omitted and the default key
- depends on the presence of manual addresses. `ipv6_disabled` can be set to disable
- ipv6 for the connection.
-
-- `dhcp4_send_hostname`
-
- If `dhcp4` is enabled, it can be configured whether the DHCPv4 request includes
- the hostname via the `dhcp4_send_hostname` option. Note that `dhcp4_send_hostname`
- is only supported by the `nm` provider and corresponds to
- [`ipv4.dhcp-send-hostname`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv4.dhcp-send-hostname)
- property.
-
-- `dns`
-
- Manual DNS configuration can be specified via a list of addresses given in the
- `dns` option.
-
-- `dns_search`
-
- `dns_search` is only supported for IPv4 nameservers. Manual DNS configuration can
- be specified via a list of domains to search given in the `dns_search` option.
-
-- `dns_options`
-
- `dns_options` is only supported for the NetworkManager provider and IPv4
- nameservers. Manual DNS configuration via a list of DNS options can be given in the
- `dns_options`. The list of supported DNS options for IPv4 nameservers is described
- in [man 5 resolv.conf](https://man7.org/linux/man-pages/man5/resolv.conf.5.html).
- Currently, the list of supported DNS options is:
- - `attempts:n`
- - `debug`
- - `edns0`
- - `ndots:n`
- - `no-check-names`
- - `no-reload`
- - `no-tld-query`
- - `rotate`
- - `single-request`
- - `single-request-reopen`
- - `timeout:n`
- - `trust-ad`
- - `use-vc`
-
- **Note:** The "trust-ad" setting is only honored if the profile contributes name
- servers to resolv.conf, and if all contributing profiles have "trust-ad" enabled.
- When using a caching DNS plugin (dnsmasq or systemd-resolved in NetworkManager.conf)
- then "edns0" and "trust-ad" are automatically added.
-
-- `route_metric4` and `route_metric6`
-
- For `NetworkManager`, `route_metric4` and `route_metric6` corresponds to the
- [`ipv4.route-metric`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv4.route-metric)
- and
- [`ipv6.route-metric`](https://developer.gnome.org/NetworkManager/stable/nm-settings.html#nm-settings.property.ipv6.route-metric)
- properties, respectively. If specified, it determines the route metric for DHCP
- assigned routes and the default route, and thus the priority for multiple
- interfaces.
-
-- `route`
-
- Static route configuration can be specified via a list of routes given in the
- `route` option. The default value is an empty list. Each route is a dictionary with
- the following entries: `network`, `prefix`, `gateway` and `metric`. `network` and
- `prefix` specify the destination network.
- Note that Classless inter-domain routing (CIDR) notation or network mask notation
- are not supported yet.
-
-- `route_append_only`
-
- The `route_append_only` option allows only to add new routes to the
- existing routes on the system.
-
- If the `route_append_only` boolean option is set to `yes`, the specified routes are
- appended to the existing routes. If `route_append_only` is set to `no` (default),
- the current routes are replaced. Note that setting `route_append_only` to `yes`
- without setting `route` has the effect of preserving the current static routes.
-
-- `rule_append_only`
-
- The `rule_append_only` boolean option allows to preserve the current routing rules.
- Note that specifying routing rules is not supported yet.
-
-**Note:** When `route_append_only` or `rule_append_only` is not specified, the network
-role deletes the current routes or routing rules.
-
-**Note:** Ports to the bridge, bond or team devices cannot specify `ip` settings.
-
-### `ethtool`
-
-The ethtool settings allow to enable or disable various features. The names
-correspond to the names used by the `ethtool` utility. Depending on the actual
-kernel and device, changing some options might not be supported.
-
-```yaml
- ethtool:
- features:
- esp_hw_offload: yes|no # optional
- esp_tx_csum_hw_offload: yes|no # optional
- fcoe_mtu: yes|no # optional
- gro: yes|no # optional
- gso: yes|no # optional
- highdma: yes|no # optional
- hw_tc_offload: yes|no # optional
- l2_fwd_offload: yes|no # optional
- loopback: yes|no # optional
- lro: yes|no # optional
- ntuple: yes|no # optional
- rx: yes|no # optional
- rx_all: yes|no # optional
- rx_fcs: yes|no # optional
- rx_gro_hw: yes|no # optional
- rx_udp_tunnel_port_offload: yes|no # optional
- rx_vlan_filter: yes|no # optional
- rx_vlan_stag_filter: yes|no # optional
- rx_vlan_stag_hw_parse: yes|no # optional
- rxhash: yes|no # optional
- rxvlan: yes|no # optional
- sg: yes|no # optional
- tls_hw_record: yes|no # optional
- tls_hw_tx_offload: yes|no # optional
- tso: yes|no # optional
- tx: yes|no # optional
- tx_checksum_fcoe_crc: yes|no # optional
- tx_checksum_ip_generic: yes|no # optional
- tx_checksum_ipv4: yes|no # optional
- tx_checksum_ipv6: yes|no # optional
- tx_checksum_sctp: yes|no # optional
- tx_esp_segmentation: yes|no # optional
- tx_fcoe_segmentation: yes|no # optional
- tx_gre_csum_segmentation: yes|no # optional
- tx_gre_segmentation: yes|no # optional
- tx_gso_partial: yes|no # optional
- tx_gso_robust: yes|no # optional
- tx_ipxip4_segmentation: yes|no # optional
- tx_ipxip6_segmentation: yes|no # optional
- tx_nocache_copy: yes|no # optional
- tx_scatter_gather: yes|no # optional
- tx_scatter_gather_fraglist: yes|no # optional
- tx_sctp_segmentation: yes|no # optional
- tx_tcp_ecn_segmentation: yes|no # optional
- tx_tcp_mangleid_segmentation: yes|no # optional
- tx_tcp_segmentation: yes|no # optional
- tx_tcp6_segmentation: yes|no # optional
- tx_udp_segmentation: yes|no # optional
- tx_udp_tnl_csum_segmentation: yes|no # optional
- tx_udp_tnl_segmentation: yes|no # optional
- tx_vlan_stag_hw_insert: yes|no # optional
- txvlan: yes|no # optional
- coalesce:
- adaptive_rx: yes|no # optional
- adaptive_tx: yes|no # optional
- pkt_rate_high: 0 # optional mininum=0 maximum=0xffffffff
- pkt_rate_low: 0 # optional mininum=0 maximum=0xffffffff
- rx_frames: 0 # optional mininum=0 maximum=0xffffffff
- rx_frames_high: 0 # optional mininum=0 maximum=0xffffffff
- rx_frames_irq: 0 # optional mininum=0 maximum=0xffffffff
- rx_frames_low: 0 # optional mininum=0 maximum=0xffffffff
- rx_usecs: 0 # optional mininum=0 maximum=0xffffffff
- rx_usecs_high: 0 # optional mininum=0 maximum=0xffffffff
- rx_usecs_irq: 0 # optional mininum=0 maximum=0xffffffff
- rx_usecs_low: 0 # optional mininum=0 maximum=0xffffffff
- sample_interval: 0 # optional mininum=0 maximum=0xffffffff
- stats_block_usecs: 0 # optional mininum=0 maximum=0xffffffff
- tx_frames: 0 # optional mininum=0 maximum=0xffffffff
- tx_frames_high: 0 # optional mininum=0 maximum=0xffffffff
- tx_frames_irq: 0 # optional mininum=0 maximum=0xffffffff
- tx_frames_low: 0 # optional mininum=0 maximum=0xffffffff
- tx_usecs: 0 # optional mininum=0 maximum=0xffffffff
- tx_usecs_high: 0 # optional mininum=0 maximum=0xffffffff
- tx_usecs_irq: 0 # optional mininum=0 maximum=0xffffffff
- tx_usecs_low: 0 # optional mininum=0 maximum=0xffffffff
-```
-
-### `ieee802_1x`
-
-Configures 802.1x authentication for an interface.
-
-Currently, NetworkManager is the only supported provider and EAP-TLS is the only
-supported EAP method.
-
-SSL certificates and keys must be deployed on the host prior to running the role.
-
-- `eap`
-
- The allowed EAP method to be used when authenticating to the network with 802.1x.
-
- Currently, `tls` is the default and the only accepted value.
-
-- `identity` (required)
-
- Identity string for EAP authentication methods.
-
-- `private_key` (required)
-
- Absolute path to the client's PEM or PKCS#12 encoded private key used for 802.1x
- authentication.
-
-- `private_key_password`
-
- Password to the private key specified in `private_key`.
-
-- `private_key_password_flags`
-
- List of flags to configure how the private key password is managed.
-
- Multiple flags may be specified.
-
- Valid flags are:
- - `none`
- - `agent-owned`
- - `not-saved`
- - `not-required`
-
- See NetworkManager documentation on "Secret flag types" more details (`man 5
- nm-settings`).
-
-- `client_cert` (required)
-
- Absolute path to the client's PEM encoded certificate used for 802.1x
- authentication.
-
-- `ca_cert`
-
- Absolute path to the PEM encoded certificate authority used to verify the EAP
- server.
-
-- `ca_path`
-
- Absolute path to directory containing additional pem encoded ca certificates used to
- verify the EAP server. Can be used instead of or in addition to ca_cert. Cannot be
- used if system_ca_certs is enabled.
-
-- `system_ca_certs`
-
- If set to `True`, NetworkManager will use the system's trusted ca
- certificates to verify the EAP server.
-
-- `domain_suffix_match`
-
- If set, NetworkManager will ensure the domain name of the EAP server certificate
- matches this string.
-
-### `bond`
-
-The `bond` setting configures the options of bonded interfaces
-(type `bond`). It supports the following options:
-
-- `mode`
-
- Bonding mode. See the
- [kernel documentation](https://www.kernel.org/doc/Documentation/networking/bonding.txt)
- or your distribution `nmcli` documentation for valid values.
- NetworkManager defaults to `balance-rr`.
-
-- `miimon`
-
- Sets the MII link monitoring interval (in milliseconds)
-
-Examples of Options
--------------------
-
-Setting the same connection profile multiple times:
-
-```yaml
-network_connections:
- - name: Wired0
- type: ethernet
- interface_name: eth0
- ip:
- dhcp4: yes
-
- - name: Wired0
- state: up
-```
-
-Activating a preexisting connection profile:
-
-```yaml
-network_connections:
- - name: eth0
- state: up
-```
-
-Deactivating a preexisting connection profile:
-
-```yaml
-network_connections:
- - name: eth0
- state: down
-```
-
-Creating a persistent connection profile:
-
-```yaml
-network_connections:
- - name: eth0
- #persistent_state: present # default
- type: ethernet
- autoconnect: yes
- mac: "00:00:5e:00:53:5d"
- ip:
- dhcp4: yes
-```
-
-Deleting a connection profile named `eth0` (if it exists):
-
-```yaml
-network_connections:
- - name: eth0
- persistent_state: absent
-```
-
-Configuring the Ethernet link settings:
-
-```yaml
-network_connections:
- - name: eth0
- type: ethernet
-
- ethernet:
- autoneg: no
- speed: 1000
- duplex: full
-```
-
-Creating a bridge connection:
-
-```yaml
-network_connections:
- - name: br0
- type: bridge
- #interface_name: br0 # defaults to the connection name
-```
-
-Configuring a bridge connection:
-
-```yaml
-network_connections:
- - name: internal-br0
- interface_name: br0
- type: bridge
- ip:
- dhcp4: no
- auto6: no
-```
-
-Setting `controller` and `port_type`:
-
-```yaml
-network_connections:
- - name: br0-bond0
- type: bond
- interface_name: bond0
- controller: internal-br0
- port_type: bridge
-
- - name: br0-bond0-eth1
- type: ethernet
- interface_name: eth1
- controller: br0-bond0
- port_type: bond
-```
-
-Configuring VLANs:
-
-```yaml
-network_connections:
- - name: eth1-profile
- autoconnet: no
- type: ethernet
- interface_name: eth1
- ip:
- dhcp4: no
- auto6: no
-
- - name: eth1.6
- autoconnect: no
- type: vlan
- parent: eth1-profile
- vlan:
- id: 6
- ip:
- address:
- - 192.0.2.5/24
- auto6: no
-```
-
-Configuring MACVLAN:
-
-```yaml
-network_connections:
- - name: eth0-profile
- type: ethernet
- interface_name: eth0
- ip:
- address:
- - 192.168.0.1/24
-
- - name: veth0
- type: macvlan
- parent: eth0-profile
- macvlan:
- mode: bridge
- promiscuous: yes
- tap: no
- ip:
- address:
- - 192.168.1.1/24
-```
-
-Configuring a wireless connection:
-
-```yaml
-network_connections:
- - name: wlan0
- type: wireless
- wireless:
- ssid: "My WPA2-PSK Network"
- key_mgmt: "wpa-psk"
- # recommend vault encrypting the wireless password
- # see https://docs.ansible.com/ansible/latest/user_guide/vault.html
- password: "p@55w0rD"
-```
-
-Setting the IP configuration:
-
-```yaml
-network_connections:
- - name: eth0
- type: ethernet
- ip:
- route_metric4: 100
- dhcp4: no
- #dhcp4_send_hostname: no
- gateway4: 192.0.2.1
-
- dns:
- - 192.0.2.2
- - 198.51.100.5
- dns_search:
- - example.com
- - subdomain.example.com
- dns_options:
- - rotate
- - timeout:1
-
- route_metric6: -1
- auto6: no
- gateway6: 2001:db8::1
-
- address:
- - 192.0.2.3/24
- - 198.51.100.3/26
- - 2001:db8::80/7
-
- route:
- - network: 198.51.100.128
- prefix: 26
- gateway: 198.51.100.1
- metric: 2
- - network: 198.51.100.64
- prefix: 26
- gateway: 198.51.100.6
- metric: 4
- route_append_only: no
- rule_append_only: yes
-```
-
-Configuring 802.1x:
-
-```yaml
-network_connections:
- - name: eth0
- type: ethernet
- ieee802_1x:
- identity: myhost
- eap: tls
- private_key: /etc/pki/tls/client.key
- # recommend vault encrypting the private key password
- # see https://docs.ansible.com/ansible/latest/user_guide/vault.html
- private_key_password: "p@55w0rD"
- client_cert: /etc/pki/tls/client.pem
- ca_cert: /etc/pki/tls/cacert.pem
- domain_suffix_match: example.com
-```
-
-### Invalid and Wrong Configuration
-
-The `network` role rejects invalid configurations. It is recommended to test the role
-with `--check` first. There is no protection against wrong (but valid) configuration.
-Double-check your configuration before applying it.
-
-Compatibility
--------------
-
-The `network` role supports the same configuration scheme for both providers (`nm`
-and `initscripts`). That means, you can use the same playbook with NetworkManager
-and initscripts. However, note that not every option is handled exactly the same
-by every provider. Do a test run first with `--check`.
-
-It is not supported to create a configuration for one provider, and expect another
-provider to handle them. For example, creating profiles with the `initscripts` provider,
-and later enabling NetworkManager is not guaranteed to work automatically. Possibly,
-you have to adjust the configuration so that it can be used by another provider.
-
-For example, configuring a RHEL6 host with initscripts and upgrading to
-RHEL7 while continuing to use initscripts in RHEL7 is an acceptable scenario. What
-is not guaranteed is to upgrade to RHEL7, disable initscripts and expect NetworkManager
-to take over the configuration automatically.
-
-Depending on NetworkManager's configuration, connections may be stored as ifcfg files
-as well, but it is not guaranteed that plain initscripts can handle these ifcfg files
-after disabling the NetworkManager service.
-
-Limitations
------------
-
-As Ansible usually works via the network, for example via SSH, there are some
-limitations to be considered:
-
-The `network` role does not support bootstraping networking configuration. One option
-may be
-[ansible-pull](https://docs.ansible.com/ansible/playbooks_intro.html#ansible-pull).
-Another option maybe be to initially auto-configure the host during installation (ISO
-based, kickstart, etc.), so that the host is connected to a management LAN or VLAN. It
-strongly depends on your environment.
-
-For `initscripts` provider, deploying a profile merely means to create the ifcfg
-files. Nothing happens automatically until the play issues `ifup` or `ifdown`
-via the `up` or `down` [states](#state) -- unless there are other
-components that rely on the ifcfg files and react on changes.
-
-The `initscripts` provider requires the different profiles to be in the right
-order when they depend on each other. For example the bonding controller device
-needs to be specified before the port devices.
-
-When removing a profile for NetworkManager it also takes the connection
-down and possibly removes virtual interfaces. With the `initscripts` provider
-removing a profile does not change its current runtime state (this is a future
-feature for NetworkManager as well).
-
-For NetworkManager, modifying a connection with autoconnect enabled may result in the
-activation of a new profile on a previously disconnected interface. Also, deleting a
-NetworkManager connection that is currently active results in removing the interface.
-Therefore, the order of the steps should be followed, and carefully handling of
-[autoconnect](#autoconnect) property may be necessary. This should be improved in
-NetworkManager RFE [rh#1401515](https://bugzilla.redhat.com/show_bug.cgi?id=1401515).
-
-It seems difficult to change networking of the target host in a way that breaks
-the current SSH connection of ansible. If you want to do that, ansible-pull might
-be a solution. Alternatively, a combination of `async`/`poll` with changing
-the `ansible_host` midway of the play.
-
-**TODO** The current role does not yet support to easily split the
-play in a pre-configure step, and a second step to activate the new configuration.
-
-In general, to successfully run the play, determine which configuration is
-active in the first place, and then carefully configure a sequence of steps to change to
-the new configuration. The actual solution depends strongly on your environment.
-
-### Handling potential problems
-
-When something goes wrong while configuring networking remotely, you might need
-to get physical access to the machine to recover.
-
-**TODO** NetworkManager supports a
-[checkpoint/rollback](https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.html#gdbus-method-org-freedesktop-NetworkManager.CheckpointCreate)
-feature. At the beginning of the play we could create a checkpoint and if we lose
-connectivity due to an error, NetworkManager would automatically rollback after
-timeout. The limitations is that this would only work with NetworkManager, and
-it is not clear that rollback will result in a working configuration.
-
-*Want to contribute? Take a look at our [contributing
-guidelines](https://github.com/linux-system-roles/network/blob/main/contributing.md)!*
diff --git a/roles/linux-system-roles.network/ansible_pytest_extra_requirements.txt b/roles/linux-system-roles.network/ansible_pytest_extra_requirements.txt
deleted file mode 100644
index 0d79ed5..0000000
--- a/roles/linux-system-roles.network/ansible_pytest_extra_requirements.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# ansible and dependencies for all supported platforms
-ansible ; python_version > "2.6"
-ansible<2.7 ; python_version < "2.7"
-idna<2.8 ; python_version < "2.7"
-PyYAML<5.1 ; python_version < "2.7"
diff --git a/roles/linux-system-roles.network/contributing.md b/roles/linux-system-roles.network/contributing.md
deleted file mode 100644
index 9c3a885..0000000
--- a/roles/linux-system-roles.network/contributing.md
+++ /dev/null
@@ -1,409 +0,0 @@
-Contributing to the Network Linux System Role
-=============================================
-
-Where to start
---------------
-
-- **Bugs and needed implementations** are listed on [Github
- Issues](https://github.com/linux-system-roles/network/issues). Issues labeled with
-[**help
-wanted**](https://github.com/linux-system-roles/network/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22)
-are likely to be suitable for new contributors!
-
-- **Code** is managed on
- [Github](https://github.com/linux-system-roles/network), using [Pull
-Requests](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests).
-
-- The code needs to be **compatible with Python 2.6, 2.7, 3.6, 3.7 and 3.8**.
-
-Code structure
---------------
-
-The repository is structured as follows:
-
-- `./defaults/` - Contains the default role configuration.
-
-- `./examples/` - Contains YAML examples for different configurations.
-
-- `./library/network_connections.py` - Contains the internal Ansible module, which is
- the main script. It controls the communication between the role and Ansible, imports
- the YAML configuration and applies the changes to the provider (i.e. NetworkManager,
- initscripts).
-
-- `./meta/` - Metadata of the project.
-
-- `./module_utils/network_lsr/` - Contains other files that are useful for the network
- role (e.g. the YAML argument validator)
-
-- `./tasks/` - Declaration of the different tasks that the role is going to execute.
-
-- `./tests/playbooks/` - Contains the complete tests for the role. `./tests/test_*.yml`
- are shims to run tests once for every provider. `./tests/tasks/` contains task
- snippets that are used in multiple tests to avoid having the same code repeated
- multiple times.
-
-The rest of files in the root folder mostly serve as configuration files for diferent
-testing tools and bots that help with the manteinance of the project.
-
-The code files will always have the imports on the first place, followed by constants
-and in the last place, classes and methods. The style of python coding for this project
-is [**PEP 8**](https://www.python.org/dev/peps/pep-0008/), with automatic formatting
-thanks to [Python Black](https://black.readthedocs.io/en/stable/). Make sure to install
-the formatter, it will help you a lot throughout the whole coding process!
-
-Configuring Git
----------------
-
-Before starting to contribute, make sure you have the basic git configuration: Your name
-and email. This will be useful when signing your contributions. The following commands
-will set your global name and email, althought you can change it later for every repo:
-
-```bash
-git config --global user.name "Jane Doe"
-git config --global user.email janedoe@example.com
-```
-
-The git editor is your system's default. If you feel more comfortable with a different
-editor for writing your commits (such as Vim), change it with:
-
-```bash
-git config --global core.editor vim
-```
-
-If you want to check your settings, use `git config --list` to see all the settings Git
-can find.
-
-How to contribute
------------------
-
-1. Make a
- [fork](https://help.github.com/en/github/getting-started-with-github/fork-a-repo)
-of this repository.
-
-2. Create a new git branch on your local fork (the name is not relevant) and make the
- changes you need to complete an issue.
-
-3. Do not forget to run unit and integration tests before pushing any changes!
- 1. This project uses [tox](https://tox.readthedocs.io/en/latest/) to run unit tests.
- You can try it with `tox -e py36` in case you want to try it using Python 3.6, or
- just `tox` if you want to run all the tests.
-
- 2. Check the formatting of the code with
- [Python Black](https://black.readthedocs.io/en/stable/)
-
- 3. Check the YAML files are correctly formatted using `tox -e yamllint`.
-
- 4. Integration tests are executed as
- [Ansible Playbooks](https://docs.ansible.com/ansible/latest/user_guide/playbooks.html).
-
- To run them you can use a cloud image like the [CentOS Linux 8.1
- VM](https://cloud.centos.org/centos/8/x86_64/images/CentOS-8-GenericCloud-8.1.1911-20200113.3.x86_64.qcow2)
- and execute the command and download the package
- `standard-test-roles-inventory-qemu` from the Fedora repository:
-
- ```bash
- dnf install standard-test-roles-inventory-qemu
- ```
-
- Note that the last path is the one of the test you want to run:
-
- ```bash
- TEST_SUBJECTS=CentOS-8-GenericCloud-8.1.1911-20200113.3.x86_64.qcow2 \
- ansible-playbook -v -i /usr/share/ansible/inventory/standard-inventory-qcow2 \
- tests/test_default.yml
- ```
-
- 5. Check the markdown format with
- [mdl](https://github.com/markdownlint/markdownlint) after changing any
- markdown document.
-
-4. Once the work is ready and commited, push the branch to your remote fork and click on
- "new Pull Request" on Github.
-
-5. All set! Now wait for the continuous integration to pass and go over your commit if
- there are any errors. If there is no problem with your contribution, the mantainer
- will merge it to the main project.
-
-### Find other images for testing
-
-The CentOS project publishes cloud images for
-[CentOS Linux 6](https://cloud.centos.org/centos/6/images/),
-[CentOS Linux 7](https://cloud.centos.org/centos/7/images/) and
-[CentOS Linux 8](https://cloud.centos.org/centos/8/x86_64/images/).
-
-- For qemu testing cases, we prefer the image architecture to be `x86_64-GenericCloud`.
-- `2003` in `CentOS-7-x86_64-GenericCloud-2003.qcow2c` stands for image released in
- March 2020.
-- We can use the image with extension `.qcow2` and `.qcow2c`.
-- To save the image, right click on the link above, then select "Save link as...".
-
-For Fedora, we recommend to use the [latest qcow2
-images](https://kojipkgs.fedoraproject.org/compose/cloud/).
-
-### Some important tips
-
-- Make sure your fork and branch are up-to-date with the main project. First of all,
- [configure a remote upstream for your
-fork](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/configuring-a-remote-for-a-fork),
-and keep your branch up-to-date with the upstream using
-`git pull --rebase upstream main`.
-
-- Try to make a commit per issue.
-
-- If you are asked to make changes to your PR, don't panic! Many times it is enough to
- amend your previous commit adding the new content to it (`git commit --amend`). Be
-sure to pull the latest upstream changes after that, and use `git push
---force-with-lease` to re-upload your commit with the changes! Another way of doing
-changes to a PR is by [squashing
-commits](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-request-merges#squash-and-merge-your-pull-request-commits).
-
-- There are times when someone has made changes on a file you were modifying while you
- were making changes to your unfinished commit. At times like this, you need to make a
-[**rebase**](https://help.github.com/en/github/using-git/about-git-rebase) with
-conflicts. On the rebase you have to compare what the other person added to what you
-added, and merge both file versions into one that combines it all.
-
-- If you have any doubt, do not hesitate to ask! You can join IRC channel \#systemroles
- on freenode, or ask on the PR/issue itself.
-
-### Naming Ansible Items
-
-- All YAML or Python files, variables, arguments, repositories, and other such names
- should follow standard Python naming conventions of being in
- `snake_case_naming_schemes`.
-
-- Names should be mnemonic and descriptive and not strive to shorten more than
- necessary. Systems support long identifier names, so use them to be descriptive
-
-- All defaults and all arguments to a role should have a name that begins with the role
- name to help avoid collision with other names. Avoid names like `packages` in favor of
- a name like `network_packages`.
-
-- Same argument applies for modules provided in the roles, they also need a `$ROLENAME_`
- prefix: `network_module`. While they are usually implementation details and not intended
- for direct use in playbooks, the unfortunate fact is that importing a role makes them
- available to the rest of the playbook and therefore creates opportunities for name
- collisions.
-
-- Moreover, internal variables (those that are not expected to be set by users) are to
- be prefixed by two underscores: `__network_variable`. This includes variables set by
- `set_fact` and `register`, because they persist in the namespace after the role has
- finished!
-
-- Do not use special characters other than underscore in variable names, even if
- YAML/JSON allow them. (Using such variables in Jinja2 or Python would be then very
- confusing and probably not functional.)
-
-*Find more explanation on this matter in the [meta
-standards](https://github.com/oasis-roles/meta_standards#naming-things).*
-
-### Write a good commit message
-
-Here are a few rules to keep in mind while writing a commit message
-
-1. Separate subject from body with a blank line
-2. Limit the subject line to 50 characters
-3. Capitalize the subject line
-4. Do not end the subject line with a period
-5. Use the imperative mood in the subject line
-6. Wrap the body at 72 characters
-7. Use the body to explain what and why vs. how
-
- A good commit message looks something like this:
-
-```text
- Summarize changes in around 50 characters or less
-
- More detailed explanatory text, if necessary. Wrap it to about 72
- characters or so. In some contexts, the first line is treated as the
- subject of the commit and the rest of the text as the body. The
- blank line separating the summary from the body is critical (unless
- you omit the body entirely); various tools like `log`, `shortlog`
- and `rebase` can get confused if you run the two together.
-
- Explain the problem that this commit is solving. Focus on why you
- are making this change as opposed to how (the code explains that).
- Are there side effects or other unintuitive consequences of this
- change? Here's the place to explain them.
-
- Further paragraphs come after blank lines.
-
- - Bullet points are okay, too
-
- - Typically a hyphen or asterisk is used for the bullet, preceded
- by a single space, with blank lines in between, but conventions
- vary here
-
- If you use an issue tracker, put references to them at the bottom,
- like this:
-
- Resolves: #123
- See also: #456, #789
-
-Do not forget to sign your commit! Use `git commit -s`
-```
-
-This is taken from [chris beams git commit](https://chris.beams.io/posts/git-commit/).
-You may want to read this for a more detailed explanation (and links to other posts on
-how to write a good commit message). This content is licensed under
-[CC-BY-SA](https://creativecommons.org/licenses/by-sa/4.0/).
-
-### Sign off your commit
-
-You need to sign off your commit. By adding your 'Signed-off-by' line to the commit
-messages you adhere to the
-[Developer Certificate of Origin (DCO)](https://developercertificate.org/).
-
-Use the `-s` command-line option to append the `Signed-off-by` line when committing your
-code:
-
-`$ git commit -s`
-
-This is the full text of the Developer Certificate of Origin:
-
-```text
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-1 Letterman Drive
-Suite D4700
-San Francisco, CA, 94129
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-```
-
-### Debugging
-
-When using the `nm` provider, NetworkManager create a checkpoint and reverts the changes
-on failures. This makes it hard to debug the error. To disable this, set the Ansible
-variable `__network_debug_flags` to include the value `disable-checkpoints`. Also tests
-clean up by default in case there are failures. They should be tagged as
-`tests::cleanup` and can be skipped. To use both, run the test playbooks like this:
-
-```bash
-ansible-playbook --skip-tags tests::cleanup \
- -e "__network_debug_flags=disable-checkpoints" \
- -i testhost, tests/playbooks/tests_802_1x.yml
-```
-
-### NetworkManager Documentation
-
-- [NM 1.0](https://lazka.github.io/pgi-docs/#NM-1.0), it contains a full explanation
- about the NetworkManager API.
-
-### Integration tests with podman
-
-1. Create `~/.ansible/collections/ansible_collections/containers/podman/` if this
- directory does not exist and `cd` into this directory.
-
- ```bash
- mkdir -p ~/.ansible/collections/ansible_collections/containers/podman/
- cd ~/.ansible/collections/ansible_collections/containers/podman/
- ```
-
-2. Clone the collection plugins for Ansible-Podman into the current directory.
-
- ```bash
- git clone https://github.com/containers/ansible-podman-collections.git .
- ```
-
-3. Change directory into the `tests` subdirectory.
-
- ```bash
- cd ~/network/tests
- ```
-
-4. Use podman with `-d` to run in the background (daemon). Use `c7` because
- `centos/systemd` is centos7.
-
- ```bash
- podman run --name lsr-ci-c7 --rm --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro \
- -d registry.centos.org/centos/systemd
- ```
-
-5. Use `podman unshare` first to run "podman mount" in root mode, use `-vi` to run
- ansible as inventory in verbose mode, use `-c podman` to use collection plugins. Note,
- the following tests are currently not working with podman:
- - `tests_802_1x_nm.yml`
- - `tests_802_1x_updated_nm.yml`
- - `tests_bond_initscripts.yml`
- - `tests_bond_nm.yml`
- - `tests_bridge_initscripts.yml`
- - `tests_bridge_nm.yml`
- - `tests_default_nm.yml`
- - `tests_ethernet_nm.yml`
- - `tests_reapply_nm.yml`
- - `tests_states_nm.yml`
- - `tests_vlan_mtu_initscripts.yml`
- - `tests_vlan_mtu_nm.yml`
- - `tests_wireless_nm.yml`
-
- ```bash
- podman unshare
- ansible-playbook -vi lsr-ci-c7, -c podman tests_provider_nm.yml
- ```
-
-6. NOTE that this leaves the container running in the background, to kill it:
-
- ```bash
- podman stop lsr-ci-c7
- podman rm lsr-ci-c7
- ```
-
-### Continuous integration
-
-The [continuous integration](https://en.wikipedia.org/wiki/Continuous_integration) (CI)
-contains a set of automated tests that are triggered on a remote server. Some of them
-are immediately triggered when pushing new content to a PR (i.e. the tests hosted on
-TravisCI) while other need to be triggered by members of the project. This second
-set of tests can be manually triggered. To trigger them, write a command as a PR
-comment. The available commands are:
-
-- [citest] - Trigger a re-test for all machines.
-- [citest bad] - Trigger a re-test for all machines with an error or failure status.
-- [citest pending] - Trigger a re-test for all machines with a pending status.
-- [citest commit:] - Whitelist a commit to be tested if the submitter is not
- trusted.
-
-How to reach us
----------------
-
-The mailing list for developers: systemroles@lists.fedorahosted.org
-
-[Subscribe to the mailing list](https://lists.fedorahosted.org/admin/lists/systemroles.lists.fedorahosted.org/)
-
-[Archive of the mailing list](https://lists.fedorahosted.org/archives/list/systemroles@lists.fedorahosted.org/)
-
-If you are using IRC, join the `#systemroles` IRC channel on
-[freenode](https://freenode.net/kb/answer/chat)
-
-*Thanks for contributing and happy coding!!*
diff --git a/roles/linux-system-roles.network/custom_requirements.txt b/roles/linux-system-roles.network/custom_requirements.txt
deleted file mode 100644
index e52eadf..0000000
--- a/roles/linux-system-roles.network/custom_requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Write requirements for running your custom commands in tox here:
-PyYAML; python_version == '2.7' or python_version >= '3.5'
diff --git a/roles/linux-system-roles.network/defaults/main.yml b/roles/linux-system-roles.network/defaults/main.yml
deleted file mode 100644
index 2d8a56e..0000000
--- a/roles/linux-system-roles.network/defaults/main.yml
+++ /dev/null
@@ -1,119 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-network_connections: []
-
-network_allow_restart: no
-
-# Use initscripts for RHEL/CentOS < 7, nm otherwise
-network_provider_os_default: "{{
- 'initscripts' if ansible_distribution in [
- 'RedHat',
- 'CentOS',
- 'OracleLinux'
- ] and ansible_distribution_major_version is version('7', '<')
- else 'nm' }}"
-# If NetworkManager.service is running, assume that 'nm' is currently in-use,
-# otherwise initscripts
-__network_provider_current: "{{
- 'nm' if 'NetworkManager.service' in ansible_facts.services and
- ansible_facts.services['NetworkManager.service']['state'] == 'running'
- else 'initscripts'
- }}"
-# Default to the auto-detected value
-network_provider: "{{ __network_provider_current }}"
-
-# check if any 802.1x connections are defined
-__network_ieee802_1x_connections_defined: "{{ network_connections |
- selectattr('ieee802_1x', 'defined') | list | count > 0 }}"
-
-# check if any wireless connections are defined
-__network_wireless_connections_defined: "{{
- ['wireless'] in network_connections|json_query('[*][type]') }}"
-
-# NetworkManager-wireless is required for wireless connections
-__network_packages_default_wireless: ["{%
- if __network_wireless_connections_defined
- %}NetworkManager-wifi{% endif %}"]
-
-# check if any team connections are defined
-__network_team_connections_defined: "{{
- ['team'] in network_connections|json_query('[*][type]') }}"
-
-# NetworkManager-team is required for team connections
-__network_packages_default_team: ["{%
- if __network_team_connections_defined
- %}NetworkManager-team{% endif %}"]
-
-# wpa_supplicant is required if any 802.1x or wireless connections are defined
-__network_wpa_supplicant_required: "{{
- __network_ieee802_1x_connections_defined or
- __network_wireless_connections_defined }}"
-__network_packages_default_wpa_supplicant: ["{%
- if __network_wpa_supplicant_required
- %}wpa_supplicant{% endif %}"]
-
-# The python-gobject-base package depends on the python version and
-# distribution:
-# - python-gobject-base on RHEL7 (no python2-gobject-base :-/)
-# - python3-gobject-base on Fedora 28+
-__network_packages_default_gobject_packages: ["python{{
- ansible_python['version']['major'] | replace('2', '')}}-gobject-base"]
-
-__network_service_name_default_nm: NetworkManager
-__network_packages_default_nm: "{{['NetworkManager']
- + __network_packages_default_gobject_packages|select()|list()
- + __network_packages_default_wpa_supplicant|select()|list()
- + __network_packages_default_wireless|select()|list()
- + __network_packages_default_team|select()|list()}}"
-
-__network_service_name_default_initscripts: network
-
-# initscripts requires bridge-utils to manage bridges, install it when the
-# 'bridge' type is used in network_connections
-__network_packages_default_initscripts_bridge: ["{%
-if ['bridge'] in network_connections|json_query('[*][type]') and
- ansible_distribution in ['RedHat', 'CentOS', 'OracleLinux'] and
- ansible_distribution_major_version is version('7', '<=')
-%}bridge-utils{% endif %}"]
-__network_packages_default_initscripts_network_scripts: ["{%
-if ansible_distribution in ['RedHat', 'CentOS', 'OracleLinux'] and
- ansible_distribution_major_version is version('7', '<=')
-%}initscripts{% else %}network-scripts{% endif %}"]
-# convert _network_packages_default_initscripts_bridge to an empty list if it
-# contains only the empty string and add it to the default package list
-# |select() filters the list to include only values that evaluate to true
-# (the empty string is false)
-# |list() converts the generator that |select() creates to a list
-__network_packages_default_initscripts: "{{
-__network_packages_default_initscripts_bridge|select()|list()
-+ __network_packages_default_initscripts_network_scripts|select()|list()
-}}"
-
-
-# The user can explicitly set host variables "network_provider",
-# "network_service_name" and "network_packages".
-#
-# Usually, the user only wants to select the "network_provider"
-# (or not set it at all and let it be autodetected via the
-# internal variable "{{ __network_provider_current }}". Hence,
-# depending on the "network_provider", a different set of
-# service-name and packages is chosen.
-#
-# That is done via the internal "__network_provider_setup" dictionary.
-# If the user doesn't explicitly set "network_service_name" or
-# "network_packages" (which he usually wouldn't), then the defaults
-# from "__network_service_name_default_*" and "__network_packages_default_*"
-# apply. These values are hard-coded in this file, but they also could
-# be overwritten as host variables or via vars/*.yml.
-__network_provider_setup:
- nm:
- service_name: "{{ __network_service_name_default_nm }}"
- packages: "{{ __network_packages_default_nm }}"
- initscripts:
- service_name: "{{ __network_service_name_default_initscripts }}"
- packages: "{{ __network_packages_default_initscripts }}"
-
-network_packages: "{{
- __network_provider_setup[network_provider]['packages'] }}"
-network_service_name: "{{
- __network_provider_setup[network_provider]['service_name'] }}"
diff --git a/roles/linux-system-roles.network/examples/bond_simple.yml b/roles/linux-system-roles.network/examples/bond_simple.yml
deleted file mode 100644
index f9db265..0000000
--- a/roles/linux-system-roles.network/examples/bond_simple.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: network-test
- vars:
- network_connections:
- # Specify the bond profile
- - name: bond0
- state: up
- type: bond
- interface_name: bond0
- # ip configuration (optional)
- ip:
- address:
- - "192.0.2.24/24"
- - "2001:db8::23/64"
- # bond configuration settings: (optional)
- bond:
- mode: active-backup
- miimon: 110
-
- # add an ethernet profile to the bond
- - name: member1
- state: up
- type: ethernet
- interface_name: eth1
- controller: bond0
-
- # add a second ethernet profile to the bond
- - name: member2
- state: up
- type: ethernet
- interface_name: eth2
- controller: bond0
- roles:
- - linux-system-roles.network
-...
diff --git a/roles/linux-system-roles.network/examples/bond_with_vlan.yml b/roles/linux-system-roles.network/examples/bond_with_vlan.yml
deleted file mode 100644
index 3e2b1a1..0000000
--- a/roles/linux-system-roles.network/examples/bond_with_vlan.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: network-test
- vars:
- network_connections:
-
- # Create a bond profile, which is the parent of VLAN.
- - name: prod2
- state: up
- type: bond
- interface_name: bond2
- ip:
- dhcp4: no
- auto6: no
- bond:
- mode: active-backup
- miimon: 110
-
- # set an ethernet as port to the bond
- - name: prod2-port1
- state: up
- type: ethernet
- interface_name: "{{ network_interface_name2 }}"
- controller: prod2
-
- # on top of it, create a VLAN with ID 100 and static
- # addressing
- - name: prod2.100
- state: up
- type: vlan
- parent: prod2
- vlan_id: 100
- ip:
- address:
- - "192.0.2.{{ network_iphost }}/24"
-
- roles:
- - linux-system-roles.network
diff --git a/roles/linux-system-roles.network/examples/bridge_with_vlan.yml b/roles/linux-system-roles.network/examples/bridge_with_vlan.yml
deleted file mode 100644
index ebc7c4b..0000000
--- a/roles/linux-system-roles.network/examples/bridge_with_vlan.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: network-test
- vars:
- network_connections:
-
- # Create a bridge profile, which is the parent of VLAN.
- - name: prod2
- state: up
- type: bridge
- interface_name: bridge2
- ip:
- dhcp4: no
- auto6: no
-
- # set an ethernet port to the bridge
- - name: prod2-port1
- state: up
- type: ethernet
- interface_name: "{{ network_interface_name2 }}"
- controller: prod2
- port_type: bridge
-
- # on top of it, create a VLAN with ID 100 and static
- # addressing
- - name: prod2.100
- state: up
- type: vlan
- parent: prod2
- vlan_id: 100
- ip:
- address:
- - "192.0.2.{{ network_iphost }}/24"
-
- roles:
- - linux-system-roles.network
diff --git a/roles/linux-system-roles.network/examples/down_profile.yml b/roles/linux-system-roles.network/examples/down_profile.yml
deleted file mode 120000
index fe44746..0000000
--- a/roles/linux-system-roles.network/examples/down_profile.yml
+++ /dev/null
@@ -1 +0,0 @@
-../tests/playbooks/down_profile.yml
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/examples/dummy_simple.yml b/roles/linux-system-roles.network/examples/dummy_simple.yml
deleted file mode 100644
index db2266a..0000000
--- a/roles/linux-system-roles.network/examples/dummy_simple.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- network_connections:
- # Specify the dummy profile
- - name: dummy0
- state: up
- type: dummy
- interface_name: dummy0
- ip:
- address:
- - "192.0.2.42/30"
-
- roles:
- - linux-system-roles.network
-...
diff --git a/roles/linux-system-roles.network/examples/eth_dns_support.yml b/roles/linux-system-roles.network/examples/eth_dns_support.yml
deleted file mode 100644
index 43c3c2e..0000000
--- a/roles/linux-system-roles.network/examples/eth_dns_support.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- network_connections:
- - name: eth0
- type: ethernet
- ip:
- route_metric4: 100
- dhcp4: no
- gateway4: 192.0.2.1
- dns:
- - 192.0.2.2
- - 198.51.100.5
- dns_search:
- - example.com
- - subdomain.example.com
- dns_options:
- - rotate
- - timeout:1
-
- route_metric6: -1
- auto6: no
- gateway6: 2001:db8::1
-
- address:
- - 192.0.2.3/24
- - 198.51.100.3/26
- - 2001:db8::80/7
-
- route:
- - network: 198.51.100.128
- prefix: 26
- gateway: 198.51.100.1
- metric: 2
- - network: 198.51.100.64
- prefix: 26
- gateway: 198.51.100.6
- metric: 4
- route_append_only: no
- rule_append_only: yes
- roles:
- - linux-system-roles.network
-...
diff --git a/roles/linux-system-roles.network/examples/eth_simple_auto.yml b/roles/linux-system-roles.network/examples/eth_simple_auto.yml
deleted file mode 100644
index 0ba168a..0000000
--- a/roles/linux-system-roles.network/examples/eth_simple_auto.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: network-test
- vars:
- network_connections:
-
- # Create one ethernet profile and activate it.
- # The profile uses automatic IP addressing
- # and is tied to the interface by MAC address.
- - name: prod1
- state: up
- type: ethernet
- autoconnect: yes
- mac: "{{ network_mac1 }}"
- mtu: 1450
-
- roles:
- - linux-system-roles.network
diff --git a/roles/linux-system-roles.network/examples/eth_with_802_1x.yml b/roles/linux-system-roles.network/examples/eth_with_802_1x.yml
deleted file mode 100644
index 92a93a9..0000000
--- a/roles/linux-system-roles.network/examples/eth_with_802_1x.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: network-test
- vars:
- network_connections:
- - name: eth0
- type: ethernet
- ieee802_1x:
- identity: myhost
- eap: tls
- private_key: /etc/pki/tls/client.key
- # recommend vault encrypting the private key password
- # see https://docs.ansible.com/ansible/latest/user_guide/vault.html
- private_key_password: "p@55w0rD"
- client_cert: /etc/pki/tls/client.pem
- ca_cert: /etc/pki/tls/cacert.pem
- domain_suffix_match: example.com
-
- # certs have to be deployed first
- pre_tasks:
- - name: copy certs/keys for 802.1x auth
- copy:
- src: "{{ item }}"
- dest: "/etc/pki/tls/{{ item }}"
- with_items:
- - client.key
- - client.pem
- - cacert.pem
- roles:
- - linux-system-roles.network
diff --git a/roles/linux-system-roles.network/examples/eth_with_vlan.yml b/roles/linux-system-roles.network/examples/eth_with_vlan.yml
deleted file mode 100644
index 69da673..0000000
--- a/roles/linux-system-roles.network/examples/eth_with_vlan.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: network-test
- vars:
- network_connections:
-
- # Create a profile for the underlying device of the VLAN.
- - name: prod2
- type: ethernet
- autoconnect: no
- state: up
- interface_name: "{{ network_interface_name2 }}"
- ip:
- dhcp4: no
- auto6: no
-
- # on top of it, create a VLAN with ID 100 and static
- # addressing
- - name: prod2.100
- state: up
- type: vlan
- parent: prod2
- vlan_id: 100
- ip:
- address:
- - "192.0.2.{{ network_iphost }}/24"
-
- roles:
- - linux-system-roles.network
diff --git a/roles/linux-system-roles.network/examples/ethtool_coalesce.yml b/roles/linux-system-roles.network/examples/ethtool_coalesce.yml
deleted file mode 100644
index d0e8948..0000000
--- a/roles/linux-system-roles.network/examples/ethtool_coalesce.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- tasks:
- - include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ network_interface_name1 }}"
- state: up
- type: ethernet
- ip:
- dhcp4: no
- auto6: no
- ethtool:
- coalesce:
- adaptive_rx: yes
- adaptive_tx: no
- pkt_rate_high: 128
- pkt_rate_low: 128
- rx_frames: 128
- rx_frames_high: 128
- rx_frames_irq: 128
- rx_frames_low: 128
- rx_usecs: 128
- rx_usecs_high: 128
- rx_usecs_irq: 128
- rx_usecs_low: 128
- sample_interval: 128
- stats_block_usecs: 128
- tx_frames: 128
- tx_frames_high: 128
- tx_frames_irq: 128
- tx_frames_low: 128
- tx_usecs: 128
- tx_usecs_high: 128
- tx_usecs_irq: 128
- tx_usecs_low: 128
diff --git a/roles/linux-system-roles.network/examples/ethtool_features.yml b/roles/linux-system-roles.network/examples/ethtool_features.yml
deleted file mode 100644
index c580f89..0000000
--- a/roles/linux-system-roles.network/examples/ethtool_features.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- tasks:
- - include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ network_interface_name1 }}"
- state: up
- type: ethernet
- ip:
- dhcp4: "no"
- auto6: "no"
- ethtool:
- features:
- gro: "no"
- gso: "yes"
- tx_sctp_segmentation: "no"
diff --git a/roles/linux-system-roles.network/examples/ethtool_features_default.yml b/roles/linux-system-roles.network/examples/ethtool_features_default.yml
deleted file mode 100644
index 78965e6..0000000
--- a/roles/linux-system-roles.network/examples/ethtool_features_default.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- tasks:
- - include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ network_interface_name1 }}"
- state: up
- type: ethernet
- ip:
- dhcp4: "no"
- auto6: "no"
diff --git a/roles/linux-system-roles.network/examples/infiniband.yml b/roles/linux-system-roles.network/examples/infiniband.yml
deleted file mode 100644
index 22603d9..0000000
--- a/roles/linux-system-roles.network/examples/infiniband.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: network-test
- vars:
- network_connections:
-
- - name: ib0
- type: infiniband
- interface_name: ib0
-
- # Create a simple infiniband profile
- - name: ib0-10
- interface_name: ib0.000a
- type: infiniband
- autoconnect: yes
- infiniband_p_key: 10
- parent: ib0
- state: up
- ip:
- dhcp4: no
- auto6: no
- address:
- - 198.51.100.133/30
-
- roles:
- - linux-system-roles.network
diff --git a/roles/linux-system-roles.network/examples/inventory b/roles/linux-system-roles.network/examples/inventory
deleted file mode 100644
index 52dae27..0000000
--- a/roles/linux-system-roles.network/examples/inventory
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# an inventory for the examples.
-[network-test]
-v-rhel6 ansible_user=root network_iphost=196 network_mac1=00:00:5e:00:53:00 network_interface_name1=eth0 network_interface_name2=eth1
-v-rhel7 ansible_user=root network_iphost=97 network_mac1=00:00:5e:00:53:01 network_interface_name1=eth0 network_interface_name2=eth1
diff --git a/roles/linux-system-roles.network/examples/ipv6_disabled.yml b/roles/linux-system-roles.network/examples/ipv6_disabled.yml
deleted file mode 100644
index dc29e78..0000000
--- a/roles/linux-system-roles.network/examples/ipv6_disabled.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- network_connections:
- - name: eth0
- type: ethernet
- ip:
- ipv6_disabled: true
- roles:
- - linux-system-roles.network
-...
diff --git a/roles/linux-system-roles.network/examples/macvlan.yml b/roles/linux-system-roles.network/examples/macvlan.yml
deleted file mode 100644
index 90cd09d..0000000
--- a/roles/linux-system-roles.network/examples/macvlan.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: network-test
- vars:
- network_connections:
-
- - name: eth0
- type: ethernet
- state: up
- interface_name: eth0
- ip:
- address:
- - 192.168.0.1/24
-
- # Create a virtual ethernet card bound to eth0
- - name: veth0
- type: macvlan
- state: up
- parent: eth0
- macvlan:
- mode: bridge
- promiscuous: True
- tap: False
- ip:
- address:
- - 192.168.1.1/24
-
- roles:
- - linux-system-roles.network
diff --git a/roles/linux-system-roles.network/examples/remove+down_profile.yml b/roles/linux-system-roles.network/examples/remove+down_profile.yml
deleted file mode 100644
index da2b1b8..0000000
--- a/roles/linux-system-roles.network/examples/remove+down_profile.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Set {{ profile }} down
- hosts: all
- vars:
- network_connections:
- - name: "{{ profile }}"
- persistent_state: absent
- state: down
- roles:
- - linux-system-roles.network
-...
diff --git a/roles/linux-system-roles.network/examples/remove_profile.yml b/roles/linux-system-roles.network/examples/remove_profile.yml
deleted file mode 120000
index d9959bc..0000000
--- a/roles/linux-system-roles.network/examples/remove_profile.yml
+++ /dev/null
@@ -1 +0,0 @@
-../tests/playbooks/remove_profile.yml
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/examples/roles b/roles/linux-system-roles.network/examples/roles
deleted file mode 120000
index a82c5f8..0000000
--- a/roles/linux-system-roles.network/examples/roles
+++ /dev/null
@@ -1 +0,0 @@
-../tests/roles/
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/examples/team_simple.yml b/roles/linux-system-roles.network/examples/team_simple.yml
deleted file mode 100644
index 99c5a38..0000000
--- a/roles/linux-system-roles.network/examples/team_simple.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: network-test
- vars:
- network_connections:
- # Specify the team profile
- - name: team0
- state: up
- type: team
- interface_name: team0
- # ip configuration (optional)
- ip:
- address:
- - "192.0.2.24/24"
- - "2001:db8::23/64"
-
- # add an team profile to the team
- - name: member1
- state: up
- type: ethernet
- interface_name: eth1
- controller: team0
-
- # add a second team profile to the team
- - name: member2
- state: up
- type: ethernet
- interface_name: eth2
- controller: team0
-
- roles:
- - linux-system-roles.network
-...
diff --git a/roles/linux-system-roles.network/examples/wireless_wpa_psk.yml b/roles/linux-system-roles.network/examples/wireless_wpa_psk.yml
deleted file mode 100644
index eeec22f..0000000
--- a/roles/linux-system-roles.network/examples/wireless_wpa_psk.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: network-test
- vars:
- network_connections:
- - name: wlan0
- type: wireless
- wireless:
- ssid: "My WPA2-PSK Network"
- key_mgmt: "wpa-psk"
- # recommend vault encrypting the wireless password
- # see https://docs.ansible.com/ansible/latest/user_guide/vault.html
- password: "p@55w0rD"
- roles:
- - linux-system-roles.network
diff --git a/roles/linux-system-roles.network/library/__init__.py b/roles/linux-system-roles.network/library/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/roles/linux-system-roles.network/library/network_connections.py b/roles/linux-system-roles.network/library/network_connections.py
deleted file mode 100644
index 6eb1581..0000000
--- a/roles/linux-system-roles.network/library/network_connections.py
+++ /dev/null
@@ -1,2505 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# SPDX-License-Identifier: BSD-3-Clause
-
-import errno
-import functools
-import os
-import re
-import shlex
-import socket
-import subprocess
-import time
-import traceback
-import logging
-
-# pylint: disable=import-error, no-name-in-module
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.network_lsr import ethtool # noqa:E501
-from ansible.module_utils.network_lsr import MyError # noqa:E501
-
-from ansible.module_utils.network_lsr.argument_validator import ( # noqa:E501
- ArgUtil,
- ArgValidator_ListConnections,
- ValidationError,
-)
-
-from ansible.module_utils.network_lsr.utils import Util # noqa:E501
-from ansible.module_utils.network_lsr import nm_provider # noqa:E501
-
-# pylint: enable=import-error, no-name-in-module
-
-
-DOCUMENTATION = """
----
-module: network_connections
-author: "Thomas Haller (thaller@redhat.com)"
-short_description: module for network role to manage connection profiles
-requirements: for 'nm' provider requires pygobject, dbus and NetworkManager.
-version_added: "2.0"
-description: Manage networking profiles (connections) for NetworkManager and
- initscripts networking providers.
-options: Documentation needs to be written. Note that the network_connections
- module tightly integrates with the network role and currently it is not
- expected to use this module outside the role. Thus, consult README.md for
- examples for the role.
-"""
-
-
-###############################################################################
-PERSISTENT_STATE = "persistent_state"
-ABSENT_STATE = "absent"
-
-DEFAULT_ACTIVATION_TIMEOUT = 90
-DEFAULT_TIMEOUT = 10
-
-
-class CheckMode:
- PREPARE = "prepare"
- DRY_RUN = "dry-run"
- PRE_RUN = "pre-run"
- REAL_RUN = "real-run"
- DONE = "done"
-
-
-class LogLevel:
- ERROR = "error"
- WARN = "warn"
- INFO = "info"
- DEBUG = "debug"
-
- _LOGGING_LEVEL_MAP = {
- logging.DEBUG: DEBUG,
- logging.INFO: INFO,
- logging.WARN: WARN,
- logging.ERROR: ERROR,
- }
-
- @staticmethod
- def from_logging_level(logging_level):
- return LogLevel._LOGGING_LEVEL_MAP.get(logging_level, LogLevel.ERROR)
-
- @staticmethod
- def fmt(level):
- return "<%-6s" % (str(level) + ">")
-
-
-# cmp() is not available in python 3 anymore
-if "cmp" not in dir(__builtins__):
-
- def cmp(x, y):
- """
- Replacement for built-in function cmp that was removed in Python 3
-
- Compare the two objects x and y and return an integer according to
- the outcome. The return value is negative if x < y, zero if x == y
- and strictly positive if x > y.
- """
-
- return (x > y) - (x < y)
-
-
-class SysUtil:
- @staticmethod
- def _sysctl_read(filename):
- try_count = 0
- while True:
- try_count += 1
- try:
- with open(filename, "r") as f:
- return f.read()
- except Exception:
- if try_count < 5:
- continue
- raise
-
- @staticmethod
- def _link_read_ifindex(ifname):
- c = SysUtil._sysctl_read("/sys/class/net/" + ifname + "/ifindex")
- return int(c.strip())
-
- @staticmethod
- def _link_read_address(ifname):
- c = SysUtil._sysctl_read("/sys/class/net/" + ifname + "/address")
- return Util.mac_norm(c.strip())
-
- @staticmethod
- def _link_read_permaddress(ifname):
- return ethtool.get_perm_addr(ifname)
-
- @staticmethod
- def _link_infos_fetch():
- links = {}
- for ifname in os.listdir("/sys/class/net/"):
- if not os.path.islink("/sys/class/net/" + ifname):
- # /sys/class/net may contain certain entries that are not
- # interface names, like 'bonding_master'. Skip over files
- # that are not links.
- continue
- links[ifname] = {
- "ifindex": SysUtil._link_read_ifindex(ifname),
- "ifname": ifname,
- "address": SysUtil._link_read_address(ifname),
- "perm-address": SysUtil._link_read_permaddress(ifname),
- }
- return links
-
- @classmethod
- def link_infos(cls, refresh=False):
- if refresh:
- linkinfos = None
- else:
- linkinfos = getattr(cls, "_link_infos", None)
- if linkinfos is None:
- try_count = 0
- b = None
- while True:
- try_count += 1
- try:
- # there is a race in that we lookup properties by ifname
- # and interfaces can be renamed. Try to avoid that by
- # fetching the info twice and repeat until we get the same
- # result.
- if b is None:
- b = SysUtil._link_infos_fetch()
- linkinfos = SysUtil._link_infos_fetch()
- if linkinfos != b:
- b = linkinfos
- raise Exception(
- "cannot read stable link-infos. They keep changing"
- )
- except Exception:
- if try_count < 50:
- raise
- continue
- break
- cls._link_infos = linkinfos
- return linkinfos
-
- @classmethod
- def link_info_find(cls, refresh=False, mac=None, ifname=None):
- if mac is not None:
- mac = Util.mac_norm(mac)
- for li in cls.link_infos(refresh).values():
- if mac is not None and mac not in [
- li.get("perm-address", None),
- li.get("address", None),
- ]:
- continue
- if ifname is not None and ifname != li.get("ifname", None):
- continue
- return li
- return None
-
-
-###############################################################################
-
-
-###############################################################################
-
-
-class IfcfgUtil:
-
- FILE_TYPES = ["ifcfg", "keys", "route", "route6", "rule", "rule6"]
-
- @classmethod
- def _file_types(cls, file_type):
- if file_type is None:
- return cls.FILE_TYPES
- else:
- return [file_type]
-
- @classmethod
- def ifcfg_paths(cls, name, file_types=None):
- paths = []
- if file_types is None:
- file_types = cls.FILE_TYPES
- for f in file_types:
- paths.append(cls.ifcfg_path(name, f))
- return paths
-
- @classmethod
- def ifcfg_path(cls, name, file_type=None):
- n = str(name)
- if not name or n == "." or n == ".." or n.find("/") != -1:
- raise MyError("invalid ifcfg-name %s" % (name))
- if file_type is None:
- file_type = "ifcfg"
- if file_type not in cls.FILE_TYPES:
- raise MyError("invalid file-type %s" % (file_type))
- return "/etc/sysconfig/network-scripts/" + file_type + "-" + n
-
- @classmethod
- def KeyValid(cls, name):
- r = getattr(cls, "_CHECKSTR_VALID_KEY", None)
- if r is None:
- r = re.compile("^[a-zA-Z][a-zA-Z0-9_]*$")
- cls._CHECKSTR_VALID_KEY = r
- return bool(r.match(name))
-
- @classmethod
- def ValueEscape(cls, value):
-
- r = getattr(cls, "_re_ValueEscape", None)
- if r is None:
- r = re.compile("^[a-zA-Z_0-9-.]*$")
- cls._re_ValueEscape = r
-
- if r.match(value):
- return value
-
- if any([ord(c) < ord(" ") for c in value]):
- # needs ansic escaping due to ANSI control caracters (newline)
- s = "$'"
- for c in value:
- if ord(c) < ord(c):
- s += "\\" + str(ord(c))
- elif c == "\\" or c == "'":
- s += "\\" + c
- else:
- # non-unicode chars are fine too to take literally
- # as utf8
- s += c
- s += "'"
- else:
- # double quoting
- s = '"'
- for c in value:
- if c == '"' or c == "\\" or c == "$" or c == "`":
- s += "\\" + c
- else:
- # non-unicode chars are fine too to take literally
- # as utf8
- s += c
- s += '"'
- return s
-
- @classmethod
- def _ifcfg_route_merge(cls, route, append_only, current):
- if not append_only or current is None:
- if not route:
- return None
- return "\n".join(route) + "\n"
-
- if route:
- # the 'route' file is processed line by line by initscripts'
- # ifup-route. Hence, the order of the route matters.
- # _ifcfg_route_merge() is not sophisticated enough to understand
- # pre-existing lines. It will only append lines that don't exist
- # yet, which hopefully is correct. It's better to always rewrite
- # the entire file with route_append_only=False.
- changed = False
- c_lines = list(current.split("\n"))
- for r in route:
- if r not in c_lines:
- changed = True
- c_lines.append(r)
- if changed:
- return "\n".join(c_lines) + "\n"
-
- return current
-
- @classmethod
- def ifcfg_create(
- cls, connections, idx, warn_fcn=lambda msg: None, content_current=None
- ):
- connection = connections[idx]
- ip = connection["ip"]
-
- ifcfg = {}
- keys_file = None
- route4_file = None
- route6_file = None
- rule4_file = None
- rule6_file = None
-
- if ip["dhcp4_send_hostname"] is not None:
- warn_fcn("ip.dhcp4_send_hostname is not supported by initscripts provider")
- if ip["route_metric4"] is not None and ip["route_metric4"] >= 0:
- warn_fcn("ip.route_metric4 is not supported by initscripts provider")
- if ip["route_metric6"] is not None and ip["route_metric6"] >= 0:
- warn_fcn("ip.route_metric6 is not supported by initscripts provider")
-
- ifcfg["NM_CONTROLLED"] = "no"
-
- if connection["autoconnect"]:
- ifcfg["ONBOOT"] = "yes"
- else:
- ifcfg["ONBOOT"] = "no"
-
- ifcfg["DEVICE"] = connection["interface_name"]
-
- if connection["type"] == "ethernet":
- ifcfg["TYPE"] = "Ethernet"
- ifcfg["HWADDR"] = connection["mac"]
- elif connection["type"] == "infiniband":
- ifcfg["TYPE"] = "InfiniBand"
- ifcfg["HWADDR"] = connection["mac"]
- ifcfg["CONNECTED_MODE"] = (
- "yes"
- if (connection["infiniband"]["transport_mode"] == "connected")
- else "no"
- )
- if connection["infiniband"]["p_key"] != -1:
- ifcfg["PKEY"] = "yes"
- ifcfg["PKEY_ID"] = str(connection["infiniband"]["p_key"])
- if connection["parent"]:
- ifcfg["PHYSDEV"] = ArgUtil.connection_find_controller(
- connection["parent"], connections, idx
- )
- elif connection["type"] == "bridge":
- ifcfg["TYPE"] = "Bridge"
- elif connection["type"] == "bond":
- ifcfg["TYPE"] = "Bond"
- ifcfg["BONDING_MASTER"] = "yes"
- opts = ["mode=%s" % (connection["bond"]["mode"])]
- if connection["bond"]["miimon"] is not None:
- opts.append(" miimon=%s" % (connection["bond"]["miimon"]))
- ifcfg["BONDING_OPTS"] = " ".join(opts)
- elif connection["type"] == "team":
- ifcfg["DEVICETYPE"] = "Team"
- elif connection["type"] == "vlan":
- ifcfg["VLAN"] = "yes"
- ifcfg["TYPE"] = "Vlan"
- ifcfg["PHYSDEV"] = ArgUtil.connection_find_controller(
- connection["parent"], connections, idx
- )
- ifcfg["VID"] = str(connection["vlan"]["id"])
- else:
- raise MyError("unsupported type %s" % (connection["type"]))
-
- if connection["mtu"]:
- ifcfg["MTU"] = str(connection["mtu"])
-
- ethtool_options = ""
- if "ethernet" in connection:
- if connection["ethernet"]["autoneg"] is not None:
- if connection["ethernet"]["autoneg"]:
- ethtool_options = "autoneg on"
- else:
- ethtool_options = "autoneg off speed %s duplex %s" % (
- connection["ethernet"]["speed"],
- connection["ethernet"]["duplex"],
- )
-
- ethtool_features = connection["ethtool"]["features"]
- configured_features = []
- for feature, setting in ethtool_features.items():
- feature = feature.replace("_", "-")
- value = ""
- if setting:
- value = "on"
- elif setting is not None:
- value = "off"
-
- if value:
- configured_features.append("%s %s" % (feature, value))
-
- if configured_features:
- if ethtool_options:
- ethtool_options += " ; "
- ethtool_options += "-K %s %s" % (
- connection["interface_name"],
- " ".join(configured_features),
- )
-
- ethtool_coalesce = connection["ethtool"]["coalesce"]
- configured_coalesce = []
- for coalesce, setting in ethtool_coalesce.items():
- if setting is not None:
- if isinstance(setting, bool):
- setting = int(setting)
- configured_coalesce.append(
- "%s %s" % (coalesce.replace("_", "-"), setting)
- )
-
- if configured_coalesce:
- if ethtool_options:
- ethtool_options += " ; "
- ethtool_options += "-C %s %s" % (
- connection["interface_name"],
- " ".join(configured_coalesce),
- )
-
- if ethtool_options:
- ifcfg["ETHTOOL_OPTS"] = ethtool_options
-
- if connection["controller"] is not None:
- m = ArgUtil.connection_find_controller(
- connection["controller"], connections, idx
- )
- if connection["port_type"] == "bridge":
- ifcfg["BRIDGE"] = m
- elif connection["port_type"] == "bond":
- ifcfg["MASTER"] = m
- ifcfg["SLAVE"] = "yes"
- elif connection["port_type"] == "team":
- ifcfg["TEAM_MASTER"] = m
- if "TYPE" in ifcfg:
- del ifcfg["TYPE"]
- if connection["type"] != "team":
- ifcfg["DEVICETYPE"] = "TeamPort"
- else:
- raise MyError("invalid port_type '%s'" % (connection["port_type"]))
-
- if ip["route_append_only"] and content_current:
- route4_file = content_current["route"]
- route6_file = content_current["route6"]
- else:
- if connection["zone"]:
- ifcfg["ZONE"] = connection["zone"]
-
- addrs4 = list([a for a in ip["address"] if a["family"] == socket.AF_INET])
- addrs6 = list([a for a in ip["address"] if a["family"] == socket.AF_INET6])
-
- if ip["dhcp4"]:
- ifcfg["BOOTPROTO"] = "dhcp"
- elif addrs4:
- ifcfg["BOOTPROTO"] = "static"
- else:
- ifcfg["BOOTPROTO"] = "none"
- for i in range(0, len(addrs4)):
- addr = addrs4[i]
- ifcfg["IPADDR" + ("" if i == 0 else str(i))] = addr["address"]
- ifcfg["PREFIX" + ("" if i == 0 else str(i))] = str(addr["prefix"])
- if ip["gateway4"] is not None:
- ifcfg["GATEWAY"] = ip["gateway4"]
-
- for idx, dns in enumerate(ip["dns"]):
- ifcfg["DNS" + str(idx + 1)] = dns["address"]
- if ip["dns_search"]:
- ifcfg["DOMAIN"] = " ".join(ip["dns_search"])
-
- if ip["auto6"]:
- ifcfg["IPV6INIT"] = "yes"
- ifcfg["IPV6_AUTOCONF"] = "yes"
- elif addrs6:
- ifcfg["IPV6INIT"] = "yes"
- ifcfg["IPV6_AUTOCONF"] = "no"
- else:
- ifcfg["IPV6INIT"] = "no"
- if addrs6:
- ifcfg["IPV6ADDR"] = (
- addrs6[0]["address"] + "/" + str(addrs6[0]["prefix"])
- )
- if len(addrs6) > 1:
- ifcfg["IPV6ADDR_SECONDARIES"] = " ".join(
- [a["address"] + "/" + str(a["prefix"]) for a in addrs6[1:]]
- )
- if ip["gateway6"] is not None:
- ifcfg["IPV6_DEFAULTGW"] = ip["gateway6"]
-
- route4 = []
- route6 = []
- for r in ip["route"]:
- line = r["network"] + "/" + str(r["prefix"])
- if r["gateway"]:
- line += " via " + r["gateway"]
- if r["metric"] != -1:
- line += " metric " + str(r["metric"])
-
- if r["family"] == socket.AF_INET:
- route4.append(line)
- else:
- route6.append(line)
-
- route4_file = cls._ifcfg_route_merge(
- route4,
- ip["route_append_only"] and content_current,
- content_current["route"] if content_current else None,
- )
- route6_file = cls._ifcfg_route_merge(
- route6,
- ip["route_append_only"] and content_current,
- content_current["route6"] if content_current else None,
- )
-
- if ip["rule_append_only"] and content_current:
- rule4_file = content_current["rule"]
- rule6_file = content_current["rule6"]
-
- for key in list(ifcfg.keys()):
- v = ifcfg[key]
- if v is None:
- del ifcfg[key]
- continue
- if isinstance(v, bool):
- ifcfg[key] = "yes" if v else "no"
-
- return {
- "ifcfg": ifcfg,
- "keys": keys_file,
- "route": route4_file,
- "route6": route6_file,
- "rule": rule4_file,
- "rule6": rule6_file,
- }
-
- @classmethod
- def ifcfg_parse_line(cls, line):
- r1 = getattr(cls, "_re_parse_line1", None)
- if r1 is None:
- r1 = re.compile("^[ \t]*([a-zA-Z_][a-zA-Z_0-9]*)=(.*)$")
- cls._re_parse_line1 = r1
- cls._shlex = shlex
- m = r1.match(line)
- if not m:
- return None
- key = m.group(1)
- val = m.group(2)
- val = val.rstrip()
-
- # shlex isn't up to the task of parsing shell. Whatever,
- # we can only parse shell to a certain degree and this is
- # good enough for now.
- try:
- c = list(cls._shlex.split(val, comments=True, posix=True))
- except Exception:
- return None
- if len(c) != 1:
- return None
- return (key, c[0])
-
- @classmethod
- def ifcfg_parse(cls, content):
- if content is None:
- return None
- ifcfg = {}
- for line in content.splitlines():
- val = cls.ifcfg_parse_line(line)
- if val:
- ifcfg[val[0]] = val[1]
- return ifcfg
-
- @classmethod
- def content_from_dict(cls, ifcfg_all, file_type=None, header=None):
- content = {}
- for file_type in cls._file_types(file_type):
- h = ifcfg_all[file_type]
- if file_type == "ifcfg":
- if header is not None:
- s = header + "\n"
- else:
- s = ""
- for key in sorted(h.keys()):
- value = h[key]
- if not cls.KeyValid(key):
- raise MyError("invalid ifcfg key %s" % (key))
- if value is not None:
- s += key + "=" + cls.ValueEscape(value) + "\n"
- content[file_type] = s
- else:
- content[file_type] = h
-
- return content
-
- @classmethod
- def content_to_dict(cls, content, file_type=None):
- ifcfg_all = {}
- for file_type in cls._file_types(file_type):
- ifcfg_all[file_type] = cls.ifcfg_parse(content[file_type])
- return ifcfg_all
-
- @classmethod
- def content_from_file(cls, name, file_type=None):
- """
- Return dictionary with all file contents for an initscripts profile
- """
- content = {}
- for file_type in cls._file_types(file_type):
- path = cls.ifcfg_path(name, file_type)
- try:
- with open(path, "r") as content_file:
- i_content = content_file.read()
- except Exception:
- i_content = None
- content[file_type] = i_content
- return content
-
- @classmethod
- def content_to_file(cls, name, content, file_type=None):
- for file_type in cls._file_types(file_type):
- path = cls.ifcfg_path(name, file_type)
- h = content[file_type]
- if h is None:
- try:
- os.unlink(path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- else:
- with open(path, "w") as text_file:
- text_file.write(h)
-
- @classmethod
- def connection_seems_active(cls, name):
- # we don't know whether a ifcfg file is currently active,
- # and we also don't know which.
- #
- # Do a very basic guess based on whether the interface
- # is in operstate "up".
- #
- # But first we need to find the interface name. Do
- # some naive parsing and check for DEVICE setting.
- content = cls.content_from_file(name, "ifcfg")
- if content["ifcfg"] is not None:
- content = cls.ifcfg_parse(content["ifcfg"])
- else:
- content = {}
- if "DEVICE" not in content:
- return None
- path = "/sys/class/net/" + content["DEVICE"] + "/operstate"
- try:
- with open(path, "r") as content_file:
- i_content = str(content_file.read())
- except Exception:
- return None
-
- if i_content.strip() != "up":
- return False
-
- return True
-
-
-###############################################################################
-
-
-class NMUtil:
- def __init__(self, nmclient=None):
- if nmclient is None:
- nmclient = Util.NM().Client.new(None)
- self.nmclient = nmclient
-
- def setting_ip_config_get_routes(self, s_ip):
- if s_ip is not None:
- for i in range(0, s_ip.get_num_routes()):
- yield s_ip.get_route(i)
-
- def connection_ensure_setting(self, connection, setting_type):
- setting = connection.get_setting(setting_type)
- if not setting:
- setting = setting_type.new()
- connection.add_setting(setting)
- return setting
-
- def device_is_controller_type(self, dev):
- if dev:
- NM = Util.NM()
- GObject = Util.GObject()
- if (
- GObject.type_is_a(dev, NM.DeviceBond)
- or GObject.type_is_a(dev, NM.DeviceBridge)
- or GObject.type_is_a(dev, NM.DeviceTeam)
- ):
- return True
- return False
-
- def active_connection_list(self, connections=None, black_list=None):
- active_cons = self.nmclient.get_active_connections()
- if connections:
- connections = set(connections)
- active_cons = [
- ac for ac in active_cons if ac.get_connection() in connections
- ]
- if black_list:
- active_cons = [ac for ac in active_cons if ac not in black_list]
- return list(active_cons)
-
- def connection_list(
- self,
- name=None,
- uuid=None,
- black_list=None,
- black_list_names=None,
- black_list_uuids=None,
- ):
- cons = self.nmclient.get_connections()
- if name is not None:
- cons = [c for c in cons if c.get_id() == name]
- if uuid is not None:
- cons = [c for c in cons if c.get_uuid() == uuid]
-
- if black_list:
- cons = [c for c in cons if c not in black_list]
- if black_list_uuids:
- cons = [c for c in cons if c.get_uuid() not in black_list_uuids]
- if black_list_names:
- cons = [c for c in cons if c.get_id() not in black_list_names]
-
- cons = list(cons)
-
- def _cmp(a, b):
- s_a = a.get_setting_connection()
- s_b = b.get_setting_connection()
- if not s_a and not s_b:
- return 0
- if not s_a:
- return 1
- if not s_b:
- return -1
- t_a = s_a.get_timestamp()
- t_b = s_b.get_timestamp()
- if t_a == t_b:
- return 0
- if t_a <= 0:
- return 1
- if t_b <= 0:
- return -1
- return cmp(t_a, t_b)
-
- if Util.PY3:
- # functools.cmp_to_key does not exist in Python 2.6
- cons.sort(key=functools.cmp_to_key(_cmp))
- else:
- cons.sort(cmp=_cmp)
- return cons
-
- def connection_compare(
- self, con_a, con_b, normalize_a=False, normalize_b=False, compare_flags=None
- ):
- NM = Util.NM()
-
- if normalize_a:
- con_a = NM.SimpleConnection.new_clone(con_a)
- try:
- con_a.normalize()
- except Exception:
- pass
- if normalize_b:
- con_b = NM.SimpleConnection.new_clone(con_b)
- try:
- con_b.normalize()
- except Exception:
- pass
- if compare_flags is None:
- compare_flags = NM.SettingCompareFlags.IGNORE_TIMESTAMP
-
- return not (not (con_a.compare(con_b, compare_flags)))
-
- def connection_is_active(self, con):
- NM = Util.NM()
- for ac in self.active_connection_list(connections=[con]):
- if (
- ac.get_state() >= NM.ActiveConnectionState.ACTIVATING
- and ac.get_state() <= NM.ActiveConnectionState.ACTIVATED
- ):
- return True
- return False
-
- def connection_create(self, connections, idx, connection_current=None):
- NM = Util.NM()
-
- connection = connections[idx]
-
- con = NM.SimpleConnection.new()
- s_con = self.connection_ensure_setting(con, NM.SettingConnection)
-
- s_con.set_property(NM.SETTING_CONNECTION_ID, connection["name"])
- s_con.set_property(NM.SETTING_CONNECTION_UUID, connection["nm.uuid"])
- s_con.set_property(NM.SETTING_CONNECTION_AUTOCONNECT, connection["autoconnect"])
- s_con.set_property(
- NM.SETTING_CONNECTION_INTERFACE_NAME, connection["interface_name"]
- )
-
- if connection["type"] == "ethernet":
- s_con.set_property(
- NM.SETTING_CONNECTION_TYPE, NM.SETTING_WIRED_SETTING_NAME
- )
- s_wired = self.connection_ensure_setting(con, NM.SettingWired)
- s_wired.set_property(NM.SETTING_WIRED_MAC_ADDRESS, connection["mac"])
- elif connection["type"] == "infiniband":
- s_con.set_property(
- NM.SETTING_CONNECTION_TYPE, NM.SETTING_INFINIBAND_SETTING_NAME
- )
- s_infiniband = self.connection_ensure_setting(con, NM.SettingInfiniband)
- s_infiniband.set_property(
- NM.SETTING_INFINIBAND_MAC_ADDRESS, connection["mac"]
- )
- s_infiniband.set_property(
- NM.SETTING_INFINIBAND_TRANSPORT_MODE,
- connection["infiniband"]["transport_mode"],
- )
- if connection["infiniband"]["p_key"] != -1:
- s_infiniband.set_property(
- NM.SETTING_INFINIBAND_P_KEY, connection["infiniband"]["p_key"]
- )
- if connection["parent"]:
- s_infiniband.set_property(
- NM.SETTING_INFINIBAND_PARENT,
- ArgUtil.connection_find_controller(
- connection["parent"], connections, idx
- ),
- )
- elif connection["type"] == "bridge":
- s_con.set_property(
- NM.SETTING_CONNECTION_TYPE, NM.SETTING_BRIDGE_SETTING_NAME
- )
- s_bridge = self.connection_ensure_setting(con, NM.SettingBridge)
- s_bridge.set_property(NM.SETTING_BRIDGE_STP, False)
- elif connection["type"] == "bond":
- s_con.set_property(NM.SETTING_CONNECTION_TYPE, NM.SETTING_BOND_SETTING_NAME)
- s_bond = self.connection_ensure_setting(con, NM.SettingBond)
- s_bond.add_option("mode", connection["bond"]["mode"])
- if connection["bond"]["miimon"] is not None:
- s_bond.add_option("miimon", str(connection["bond"]["miimon"]))
- elif connection["type"] == "team":
- s_con.set_property(NM.SETTING_CONNECTION_TYPE, NM.SETTING_TEAM_SETTING_NAME)
- elif connection["type"] == "dummy":
- s_con.set_property(
- NM.SETTING_CONNECTION_TYPE, NM.SETTING_DUMMY_SETTING_NAME
- )
- elif connection["type"] == "vlan":
- s_con.set_property(NM.SETTING_CONNECTION_TYPE, NM.SETTING_VLAN_SETTING_NAME)
- s_vlan = self.connection_ensure_setting(con, NM.SettingVlan)
- s_vlan.set_property(NM.SETTING_VLAN_ID, connection["vlan"]["id"])
- s_vlan.set_property(
- NM.SETTING_VLAN_PARENT,
- ArgUtil.connection_find_controller_uuid(
- connection["parent"], connections, idx
- ),
- )
- elif connection["type"] == "macvlan":
- s_con.set_property(
- NM.SETTING_CONNECTION_TYPE, NM.SETTING_MACVLAN_SETTING_NAME
- )
- # convert mode name to a number (which is actually expected by nm)
- mode = connection["macvlan"]["mode"]
- try:
- mode_id = int(getattr(NM.SettingMacvlanMode, mode.upper()))
- except AttributeError:
- raise MyError("Macvlan mode '%s' is not recognized" % (mode))
- s_macvlan = self.connection_ensure_setting(con, NM.SettingMacvlan)
- s_macvlan.set_property(NM.SETTING_MACVLAN_MODE, mode_id)
- s_macvlan.set_property(
- NM.SETTING_MACVLAN_PROMISCUOUS, connection["macvlan"]["promiscuous"]
- )
- s_macvlan.set_property(NM.SETTING_MACVLAN_TAP, connection["macvlan"]["tap"])
- s_macvlan.set_property(
- NM.SETTING_MACVLAN_PARENT,
- ArgUtil.connection_find_controller(
- connection["parent"], connections, idx
- ),
- )
- elif connection["type"] == "wireless":
- s_con.set_property(
- NM.SETTING_CONNECTION_TYPE, NM.SETTING_WIRELESS_SETTING_NAME
- )
- s_wireless = self.connection_ensure_setting(con, NM.SettingWireless)
- s_wireless.set_property(
- NM.SETTING_WIRELESS_SSID,
- Util.GLib().Bytes.new(connection["wireless"]["ssid"].encode("utf-8")),
- )
-
- s_wireless_sec = self.connection_ensure_setting(
- con, NM.SettingWirelessSecurity
- )
- s_wireless_sec.set_property(
- NM.SETTING_WIRELESS_SECURITY_KEY_MGMT,
- connection["wireless"]["key_mgmt"],
- )
-
- if connection["wireless"]["key_mgmt"] == "wpa-psk":
- s_wireless_sec.set_property(
- NM.SETTING_WIRELESS_SECURITY_PSK, connection["wireless"]["password"]
- )
- else:
- raise MyError("unsupported type %s" % (connection["type"]))
-
- if "ethernet" in connection:
- if connection["ethernet"]["autoneg"] is not None:
- s_wired = self.connection_ensure_setting(con, NM.SettingWired)
- s_wired.set_property(
- NM.SETTING_WIRED_AUTO_NEGOTIATE, connection["ethernet"]["autoneg"]
- )
- s_wired.set_property(
- NM.SETTING_WIRED_DUPLEX, connection["ethernet"]["duplex"]
- )
- s_wired.set_property(
- NM.SETTING_WIRED_SPEED, connection["ethernet"]["speed"]
- )
-
- if hasattr(NM, "SettingEthtool"):
- s_ethtool = self.connection_ensure_setting(con, NM.SettingEthtool)
-
- for feature, setting in connection["ethtool"]["features"].items():
- nm_feature = nm_provider.get_nm_ethtool_feature(feature)
-
- if setting is None:
- if nm_feature:
- s_ethtool.set_feature(nm_feature, NM.Ternary.DEFAULT)
- elif setting:
- s_ethtool.set_feature(nm_feature, NM.Ternary.TRUE)
- else:
- s_ethtool.set_feature(nm_feature, NM.Ternary.FALSE)
-
- for coalesce, setting in connection["ethtool"]["coalesce"].items():
- nm_coalesce = nm_provider.get_nm_ethtool_coalesce(coalesce)
-
- if nm_coalesce:
- if setting is None:
- s_ethtool.option_set(nm_coalesce, None)
- else:
- s_ethtool.option_set_uint32(nm_coalesce, int(setting))
-
- if connection["mtu"]:
- if connection["type"] == "infiniband":
- s_infiniband = self.connection_ensure_setting(con, NM.SettingInfiniband)
- s_infiniband.set_property(NM.SETTING_INFINIBAND_MTU, connection["mtu"])
- else:
- s_wired = self.connection_ensure_setting(con, NM.SettingWired)
- s_wired.set_property(NM.SETTING_WIRED_MTU, connection["mtu"])
-
- if connection["controller"] is not None:
- s_con.set_property(
- NM.SETTING_CONNECTION_SLAVE_TYPE, connection["port_type"]
- )
- s_con.set_property(
- NM.SETTING_CONNECTION_MASTER,
- ArgUtil.connection_find_controller_uuid(
- connection["controller"], connections, idx
- ),
- )
- else:
- if connection["zone"]:
- s_con.set_property(NM.SETTING_CONNECTION_ZONE, connection["zone"])
-
- ip = connection["ip"]
-
- s_ip4 = self.connection_ensure_setting(con, NM.SettingIP4Config)
- s_ip6 = self.connection_ensure_setting(con, NM.SettingIP6Config)
-
- s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, "auto")
- s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "auto")
-
- addrs4 = list([a for a in ip["address"] if a["family"] == socket.AF_INET])
- addrs6 = list([a for a in ip["address"] if a["family"] == socket.AF_INET6])
-
- if ip["dhcp4"]:
- s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, "auto")
- s_ip4.set_property(
- NM.SETTING_IP_CONFIG_DHCP_SEND_HOSTNAME,
- ip["dhcp4_send_hostname"] is not False,
- )
- elif addrs4:
- s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, "manual")
- else:
- s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, "disabled")
- for a in addrs4:
- s_ip4.add_address(
- NM.IPAddress.new(a["family"], a["address"], a["prefix"])
- )
- if ip["gateway4"] is not None:
- s_ip4.set_property(NM.SETTING_IP_CONFIG_GATEWAY, ip["gateway4"])
- if ip["route_metric4"] is not None and ip["route_metric4"] >= 0:
- s_ip4.set_property(
- NM.SETTING_IP_CONFIG_ROUTE_METRIC, ip["route_metric4"]
- )
- for d in ip["dns"]:
- if d["family"] == socket.AF_INET:
- s_ip4.add_dns(d["address"])
- for s in ip["dns_search"]:
- s_ip4.add_dns_search(s)
- s_ip4.clear_dns_options(True)
- for s in ip["dns_options"]:
- s_ip4.add_dns_option(s)
-
- if ip["ipv6_disabled"]:
- s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "disabled")
- elif ip["auto6"]:
- s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "auto")
- elif addrs6:
- s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "manual")
- else:
- # we should not set "ipv6.method=ignore". "ignore" is a legacy mode
- # and not really useful. Instead, we should set "link-local" here.
- #
- # But that fix is a change in behavior for the role, so it needs special
- # care.
- s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, "ignore")
-
- for a in addrs6:
- s_ip6.add_address(
- NM.IPAddress.new(a["family"], a["address"], a["prefix"])
- )
- if ip["gateway6"] is not None:
- s_ip6.set_property(NM.SETTING_IP_CONFIG_GATEWAY, ip["gateway6"])
- if ip["route_metric6"] is not None and ip["route_metric6"] >= 0:
- s_ip6.set_property(
- NM.SETTING_IP_CONFIG_ROUTE_METRIC, ip["route_metric6"]
- )
- for d in ip["dns"]:
- if d["family"] == socket.AF_INET6:
- s_ip6.add_dns(d["address"])
-
- if ip["route_append_only"] and connection_current:
- for r in self.setting_ip_config_get_routes(
- connection_current.get_setting(NM.SettingIP4Config)
- ):
- s_ip4.add_route(r)
- for r in self.setting_ip_config_get_routes(
- connection_current.get_setting(NM.SettingIP6Config)
- ):
- s_ip6.add_route(r)
- for r in ip["route"]:
- rr = NM.IPRoute.new(
- r["family"], r["network"], r["prefix"], r["gateway"], r["metric"]
- )
- if r["family"] == socket.AF_INET:
- s_ip4.add_route(rr)
- else:
- s_ip6.add_route(rr)
-
- if connection["ieee802_1x"]:
- s_8021x = self.connection_ensure_setting(con, NM.Setting8021x)
-
- s_8021x.set_property(
- NM.SETTING_802_1X_EAP, [connection["ieee802_1x"]["eap"]]
- )
- s_8021x.set_property(
- NM.SETTING_802_1X_IDENTITY, connection["ieee802_1x"]["identity"]
- )
-
- s_8021x.set_property(
- NM.SETTING_802_1X_PRIVATE_KEY,
- Util.path_to_glib_bytes(connection["ieee802_1x"]["private_key"]),
- )
-
- if connection["ieee802_1x"]["private_key_password"]:
- s_8021x.set_property(
- NM.SETTING_802_1X_PRIVATE_KEY_PASSWORD,
- connection["ieee802_1x"]["private_key_password"],
- )
-
- if connection["ieee802_1x"]["private_key_password_flags"]:
- s_8021x.set_secret_flags(
- NM.SETTING_802_1X_PRIVATE_KEY_PASSWORD,
- Util.NM().SettingSecretFlags(
- Util.convert_passwd_flags_nm(
- connection["ieee802_1x"]["private_key_password_flags"]
- ),
- ),
- )
-
- s_8021x.set_property(
- NM.SETTING_802_1X_CLIENT_CERT,
- Util.path_to_glib_bytes(connection["ieee802_1x"]["client_cert"]),
- )
-
- if connection["ieee802_1x"]["ca_cert"]:
- s_8021x.set_property(
- NM.SETTING_802_1X_CA_CERT,
- Util.path_to_glib_bytes(connection["ieee802_1x"]["ca_cert"]),
- )
-
- if connection["ieee802_1x"]["ca_path"]:
- s_8021x.set_property(
- NM.SETTING_802_1X_CA_PATH,
- connection["ieee802_1x"]["ca_path"],
- )
-
- s_8021x.set_property(
- NM.SETTING_802_1X_SYSTEM_CA_CERTS,
- connection["ieee802_1x"]["system_ca_certs"],
- )
-
- if connection["ieee802_1x"]["domain_suffix_match"]:
- s_8021x.set_property(
- NM.SETTING_802_1X_DOMAIN_SUFFIX_MATCH,
- connection["ieee802_1x"]["domain_suffix_match"],
- )
-
- try:
- con.normalize()
- except Exception as e:
- raise MyError("created connection failed to normalize: %s" % (e))
- return con
-
- def connection_add(self, con, timeout=10):
- def add_cb(client, result, cb_args):
- con = None
- try:
- con = client.add_connection_finish(result)
- except Exception as e:
- if Util.error_is_cancelled(e):
- return
- cb_args["error"] = str(e)
- cb_args["con"] = con
- Util.GMainLoop().quit()
-
- cancellable = Util.create_cancellable()
- cb_args = {}
- self.nmclient.add_connection_async(con, True, cancellable, add_cb, cb_args)
- if not Util.GMainLoop_run(timeout):
- cancellable.cancel()
- raise MyError("failure to add connection: %s" % ("timeout"))
- if not cb_args.get("con", None):
- raise MyError(
- "failure to add connection: %s"
- % (cb_args.get("error", "unknown error"))
- )
- return cb_args["con"]
-
- def connection_update(self, con, con_new, timeout=10):
- con.replace_settings_from_connection(con_new)
-
- def update_cb(connection, result, cb_args):
- success = False
- try:
- success = connection.commit_changes_finish(result)
- except Exception as e:
- if Util.error_is_cancelled(e):
- return
- cb_args["error"] = str(e)
- cb_args["success"] = success
- Util.GMainLoop().quit()
-
- cancellable = Util.create_cancellable()
- cb_args = {}
- con.commit_changes_async(True, cancellable, update_cb, cb_args)
- if not Util.GMainLoop_run(timeout):
- cancellable.cancel()
- raise MyError("failure to update connection: %s" % ("timeout"))
- if not cb_args.get("success", False):
- raise MyError(
- "failure to update connection: %s"
- % (cb_args.get("error", "unknown error"))
- )
- return True
-
- def create_checkpoint(self, timeout):
- """ Create a new checkpoint """
- checkpoint = Util.call_async_method(
- self.nmclient,
- "checkpoint_create",
- [
- [], # devices, empty list is all devices
- timeout,
- Util.NM().CheckpointCreateFlags.DELETE_NEW_CONNECTIONS
- | Util.NM().CheckpointCreateFlags.DISCONNECT_NEW_DEVICES,
- ],
- )
-
- if checkpoint:
- return checkpoint.get_path()
- return None
-
- def destroy_checkpoint(self, path):
- """ Destroy the specified checkpoint """
- Util.call_async_method(self.nmclient, "checkpoint_destroy", [path])
-
- def rollback_checkpoint(self, path):
- """ Rollback the specified checkpoint """
- Util.call_async_method(
- self.nmclient,
- "checkpoint_rollback",
- [path],
- mainloop_timeout=DEFAULT_ACTIVATION_TIMEOUT,
- )
-
- def connection_activate(self, connection, timeout=15, wait_time=None):
-
- already_retried = False
-
- while True:
-
- def activate_cb(client, result, cb_args):
- active_connection = None
- try:
- active_connection = client.activate_connection_finish(result)
- except Exception as e:
- if Util.error_is_cancelled(e):
- return
- cb_args["error"] = str(e)
- cb_args["active_connection"] = active_connection
- Util.GMainLoop().quit()
-
- cancellable = Util.create_cancellable()
- cb_args = {}
- self.nmclient.activate_connection_async(
- connection, None, None, cancellable, activate_cb, cb_args
- )
- if not Util.GMainLoop_run(timeout):
- cancellable.cancel()
- raise MyError("failure to activate connection: %s" % ("timeout"))
-
- if cb_args.get("active_connection", None):
- ac = cb_args["active_connection"]
- self.connection_activate_wait(ac, wait_time)
- return ac
-
- # there is a bug in NetworkManager, that the connection
- # might already be in the process of activating. In that
- # case, NM would reject the activation request with
- # "Connection '$PROFILE' is not available on the device $DEV
- # at this time."
- #
- # Try to work around it by waiting a bit and retrying.
- if already_retried:
- raise MyError(
- "failure to activate connection: %s"
- % (cb_args.get("error", "unknown error"))
- )
-
- already_retried = True
-
- time.sleep(1)
-
- def connection_activate_wait(self, ac, wait_time):
-
- if not wait_time:
- return
-
- NM = Util.NM()
-
- state = ac.get_state()
- if state == NM.ActiveConnectionState.ACTIVATED:
- return
- if state != NM.ActiveConnectionState.ACTIVATING:
- raise MyError("activation is in unexpected state '%s'" % (state))
-
- def check_activated(ac, dev):
- ac_state = ac.get_state()
-
- # the state reason was for active-connection was introduced
- # in NM 1.8 API. Work around for older library version.
- try:
- ac_reason = ac.get_state_reason()
- except AttributeError:
- ac_reason = None
-
- if dev:
- dev_state = dev.get_state()
-
- if ac_state == NM.ActiveConnectionState.ACTIVATING:
- if (
- self.device_is_controller_type(dev)
- and dev_state >= NM.DeviceState.IP_CONFIG
- and dev_state <= NM.DeviceState.ACTIVATED
- ):
- # controller connections qualify as activated once they
- # reach IP-Config state. That is because they may
- # wait for port devices to attach
- return True, None
- # fall through
- elif ac_state == NM.ActiveConnectionState.ACTIVATED:
- return True, None
- elif ac_state == NM.ActiveConnectionState.DEACTIVATED:
- if (
- not dev
- or (
- ac_reason is not None
- and ac_reason
- != NM.ActiveConnectionStateReason.DEVICE_DISCONNECTED
- )
- or dev.get_active_connection() is not ac
- ):
- return (
- True,
- (
- (ac_reason.value_nick if ac_reason else None)
- or "unknown reason"
- ),
- )
- # the state of the active connection is not very helpful.
- # see if the device-state is better.
- if (
- dev_state <= NM.DeviceState.DISCONNECTED
- or dev_state > NM.DeviceState.DEACTIVATING
- ):
- return (
- True,
- (
- dev.get_state_reason().value_nick
- or (ac_reason.value_nick if ac_reason else None)
- or "unknown reason"
- ),
- )
- # fall through, wait longer for a better state reason.
-
- # wait longer.
- return False, None
-
- dev = Util.first(ac.get_devices())
-
- complete, failure_reason = check_activated(ac, dev)
-
- if not complete:
-
- cb_out = []
-
- def check_activated_cb():
- complete, failure_reason = check_activated(ac, dev)
- if complete:
- cb_out.append(failure_reason)
- Util.GMainLoop().quit()
-
- try:
- # 'state-changed' signal is 1.8 API. Workaround for
- # older libnm API version
- ac_id = ac.connect(
- "state-changed", lambda source, state, reason: check_activated_cb()
- )
- except Exception:
- ac_id = None
- if dev:
- dev_id = dev.connect(
- "notify::state", lambda source, pspec: check_activated_cb()
- )
-
- try:
- if not Util.GMainLoop_run(wait_time):
- raise MyError("connection not fully activated after timeout")
- finally:
- if dev:
- dev.handler_disconnect(dev_id)
- if ac_id is not None:
- ac.handler_disconnect(ac_id)
-
- failure_reason = cb_out[0]
-
- if failure_reason:
- raise MyError("connection not activated: %s" % (failure_reason))
-
- def reapply(self, device, connection=None):
- version_id = 0
- flags = 0
- return Util.call_async_method(
- device, "reapply", [connection, version_id, flags]
- )
-
-
-###############################################################################
-
-
-class RunEnvironment(object):
- def __init__(self):
- self._check_mode = None
-
- @property
- def ifcfg_header(self):
- return None
-
- def log(
- self,
- connections,
- idx,
- severity,
- msg,
- is_changed=False,
- ignore_errors=False,
- warn_traceback=False,
- force_fail=False,
- ):
- raise NotImplementedError()
-
- def run_command(self, argv, encoding=None):
- raise NotImplementedError()
-
- def _check_mode_changed(self, old_check_mode, new_check_mode, connections):
- raise NotImplementedError()
-
- def check_mode_set(self, check_mode, connections=None):
- c = self._check_mode
- self._check_mode = check_mode
- assert (
- (c is None and check_mode in [CheckMode.PREPARE])
- or (
- c == CheckMode.PREPARE
- and check_mode in [CheckMode.PRE_RUN, CheckMode.DRY_RUN]
- )
- or (c == CheckMode.PRE_RUN and check_mode in [CheckMode.REAL_RUN])
- or (c == CheckMode.REAL_RUN and check_mode in [CheckMode.DONE])
- or (c == CheckMode.DRY_RUN and check_mode in [CheckMode.DONE])
- )
- self._check_mode_changed(c, check_mode, connections)
-
-
-class RunEnvironmentAnsible(RunEnvironment):
-
- ARGS = {
- "ignore_errors": {"required": False, "default": False, "type": "bool"},
- "force_state_change": {"required": False, "default": False, "type": "bool"},
- "provider": {"required": True, "default": None, "type": "str"},
- "connections": {"required": False, "default": None, "type": "list"},
- "__debug_flags": {"required": False, "default": "", "type": "str"},
- }
-
- def __init__(self):
- RunEnvironment.__init__(self)
- self._run_results = []
- self._log_idx = 0
- self.on_failure = None
- module = AnsibleModule(argument_spec=self.ARGS, supports_check_mode=True)
- self.module = module
-
- @property
- def ifcfg_header(self):
- return "# this file was created by ansible"
-
- def run_command(self, argv, encoding=None):
- return self.module.run_command(argv, encoding=encoding)
-
- def _run_results_push(self, n_connections):
- c = []
- for cc in range(0, n_connections + 1):
- c.append({"log": []})
- self._run_results.append(c)
-
- @property
- def run_results(self):
- return self._run_results[-1]
-
- def _check_mode_changed(self, old_check_mode, new_check_mode, connections):
- if old_check_mode is None:
- self._run_results_push(len(connections))
- elif old_check_mode == CheckMode.PREPARE:
- self._run_results_push(len(self.run_results) - 1)
- elif old_check_mode == CheckMode.PRE_RUN:
- # when switching from RRE_RUN to REAL_RUN, we drop the run-results
- # we just collected and reset to empty. The PRE_RUN succeeded.
- n_connections = len(self.run_results) - 1
- del self._run_results[-1]
- self._run_results_push(n_connections)
-
- def log(
- self,
- connections,
- idx,
- severity,
- msg,
- is_changed=False,
- ignore_errors=False,
- warn_traceback=False,
- force_fail=False,
- ):
- assert idx >= -1
- self._log_idx += 1
- self.run_results[idx]["log"].append((severity, msg, self._log_idx))
- if severity == LogLevel.ERROR:
- if force_fail or not ignore_errors:
- self.fail_json(
- connections,
- "error: %s" % (msg),
- changed=is_changed,
- warn_traceback=warn_traceback,
- )
-
- def _complete_kwargs_loglines(self, rr, connections, idx):
- if idx == len(connections):
- prefix = "#"
- else:
- c = connections[idx]
- prefix = "#%s, state:%s persistent_state:%s" % (
- idx,
- c["state"],
- c["persistent_state"],
- )
- prefix = prefix + (", '%s'" % (c["name"]))
- for severity, msg, idx in rr["log"]:
- yield (
- idx,
- "[%03d] %s %s: %s" % (idx, LogLevel.fmt(severity), prefix, msg),
- severity,
- )
-
- def _complete_kwargs(self, connections, kwargs, traceback_msg=None, fail=False):
- warning_logs = kwargs.get("warnings", [])
- debug_logs = []
- loglines = []
- for res in self._run_results:
- for idx, rr in enumerate(res):
- loglines.extend(self._complete_kwargs_loglines(rr, connections, idx))
- loglines.sort(key=lambda log_line: log_line[0])
- for idx, log_line, severity in loglines:
- debug_logs.append(log_line)
- if fail:
- warning_logs.append(log_line)
- elif severity >= LogLevel.WARN:
- warning_logs.append(log_line)
- if traceback_msg is not None:
- warning_logs.append(traceback_msg)
- kwargs["warnings"] = warning_logs
- stderr = "\n".join(debug_logs) + "\n"
- kwargs["stderr"] = stderr
- kwargs["_invocation"] = {"module_args": self.module.params}
- return kwargs
-
- def exit_json(self, connections, changed=False, **kwargs):
- kwargs["changed"] = changed
- self.module.exit_json(**self._complete_kwargs(connections, kwargs))
-
- def fail_json(
- self, connections, msg, changed=False, warn_traceback=False, **kwargs
- ):
- if self.on_failure:
- self.on_failure()
-
- traceback_msg = None
- if warn_traceback:
- traceback_msg = "exception: %s" % (traceback.format_exc())
- kwargs["msg"] = msg
- kwargs["changed"] = changed
- self.module.fail_json(
- **self._complete_kwargs(connections, kwargs, traceback_msg, fail=True)
- )
-
-
-###############################################################################
-
-
-class NmLogHandler(logging.Handler):
- def __init__(self, log_func, idx):
- self._log = log_func
- self._idx = idx
- super(NmLogHandler, self).__init__()
-
- def filter(self, record):
- return True
-
- def emit(self, record):
- self._log(
- self._idx, LogLevel.from_logging_level(record.levelno), record.getMessage()
- )
-
-
-class Cmd(object):
- def __init__(
- self,
- run_env,
- connections_unvalidated,
- connection_validator,
- is_check_mode=False,
- ignore_errors=False,
- force_state_change=False,
- debug_flags="",
- ):
- self.run_env = run_env
- self.validate_one_type = None
- self._connections_unvalidated = connections_unvalidated
- self._connection_validator = connection_validator
- self._is_check_mode = is_check_mode
- self._ignore_errors = Util.boolean(ignore_errors)
- self._force_state_change = Util.boolean(force_state_change)
-
- self._connections = None
- self._connections_data = None
- self._check_mode = CheckMode.PREPARE
- self._is_changed_modified_system = False
- self._debug_flags = debug_flags
-
- def run_command(self, argv, encoding=None):
- return self.run_env.run_command(argv, encoding=encoding)
-
- @property
- def is_changed_modified_system(self):
- return self._is_changed_modified_system
-
- @property
- def connections(self):
- c = self._connections
- if c is None:
- try:
- c = self._connection_validator.validate(self._connections_unvalidated)
- except ValidationError as e:
- raise MyError("configuration error: %s" % (e))
- self._connections = c
- return c
-
- @property
- def connections_data(self):
- c = self._connections_data
- if c is None:
- assert self.check_mode in [
- CheckMode.DRY_RUN,
- CheckMode.PRE_RUN,
- CheckMode.REAL_RUN,
- ]
- c = []
- for _ in range(0, len(self.connections)):
- c.append({"changed": False})
- self._connections_data = c
- return c
-
- def connections_data_reset(self):
- for c in self.connections_data:
- c["changed"] = False
-
- def connections_data_set_changed(self, idx, changed=True):
- assert self._check_mode in [
- CheckMode.PRE_RUN,
- CheckMode.DRY_RUN,
- CheckMode.REAL_RUN,
- ]
- if not changed:
- return
- self.connections_data[idx]["changed"] = changed
- if changed and self._check_mode in [CheckMode.DRY_RUN, CheckMode.REAL_RUN]:
- # we only do actual modifications during the REAL_RUN step.
- # And as a special exception, during the DRY_RUN step, which
- # is like REAL_RUN, except not not actually changing anything.
- self._is_changed_modified_system = True
-
- def log_debug(self, idx, msg):
- self.log(idx, LogLevel.DEBUG, msg)
-
- def log_info(self, idx, msg):
- self.log(idx, LogLevel.INFO, msg)
-
- def log_warn(self, idx, msg):
- self.log(idx, LogLevel.WARN, msg)
-
- def log_error(self, idx, msg, warn_traceback=False, force_fail=False):
- self.log(
- idx,
- LogLevel.ERROR,
- msg,
- warn_traceback=warn_traceback,
- force_fail=force_fail,
- )
-
- def log_fatal(self, idx, msg, warn_traceback=False):
- self.log(
- idx, LogLevel.ERROR, msg, warn_traceback=warn_traceback, force_fail=True
- )
-
- def log(self, idx, severity, msg, warn_traceback=False, force_fail=False):
- self.run_env.log(
- self.connections,
- idx,
- severity,
- msg,
- is_changed=self.is_changed_modified_system,
- ignore_errors=self.connection_ignore_errors(self.connections[idx]),
- warn_traceback=warn_traceback,
- force_fail=force_fail,
- )
-
- @staticmethod
- def create(provider, **kwargs):
- if provider == "nm":
- return Cmd_nm(**kwargs)
- elif provider == "initscripts":
- return Cmd_initscripts(**kwargs)
- raise MyError("unsupported provider %s" % (provider))
-
- def connection_force_state_change(self, connection):
- v = connection["force_state_change"]
- if v is not None:
- return v
- return self._force_state_change
-
- def connection_ignore_errors(self, connection):
- v = connection["ignore_errors"]
- if v is not None:
- return v
- return self._ignore_errors
-
- def connection_modified_earlier(self, idx):
- # for index @idx, check if any of the previous profiles [0..idx[
- # modify the connection.
-
- con = self.connections[idx]
- assert con["state"] in ["up", "down"]
-
- # also check, if the current profile is 'up' with a 'type' (which
- # possibly modifies the connection as well)
- if (
- con["state"] == "up"
- and "type" in con
- and self.connections_data[idx]["changed"]
- ):
- return True
-
- for i in reversed(range(idx)):
- c = self.connections[i]
- if "name" not in c:
- continue
- if c["name"] != con["name"]:
- continue
-
- c_state = c["state"]
- c_pstate = c["persistent_state"]
- if c_state == "up" and "type" not in c:
- pass
- elif c_state == "down":
- return True
- elif c_pstate == "absent":
- return True
- elif c_state == "up" or c_pstate == "present":
- if self.connections_data[idx]["changed"]:
- return True
-
- return False
-
- @property
- def check_mode(self):
- return self._check_mode
-
- def check_mode_next(self):
- if self._check_mode == CheckMode.PREPARE:
- if self._is_check_mode:
- c = CheckMode.DRY_RUN
- else:
- c = CheckMode.PRE_RUN
- elif self.check_mode == CheckMode.PRE_RUN:
- self.connections_data_reset()
- c = CheckMode.REAL_RUN
- elif self._check_mode != CheckMode.DONE:
- c = CheckMode.DONE
- else:
- assert False
- self._check_mode = c
- self.run_env.check_mode_set(c)
- return c
-
- def run(self):
- self.run_env.check_mode_set(CheckMode.PREPARE, self.connections)
- for idx, connection in enumerate(self.connections):
- try:
- self._connection_validator.validate_connection_one(
- self.validate_one_type, self.connections, idx
- )
- except ValidationError as e:
- self.log_fatal(idx, str(e))
- self.run_prepare()
- while self.check_mode_next() != CheckMode.DONE:
- if self.check_mode == CheckMode.REAL_RUN:
- self.start_transaction()
-
- # Reasoning for this order:
- # For down/up profiles might need to be present, so do this first
- # Put profile down before removing it if necessary
- # To ensure up does not depend on anything that might be removed,
- # do it last
- for action in ("present", "down", "absent", "up"):
- for idx, connection in enumerate(self.connections):
- try:
- if action in connection["actions"]:
- if action == "absent":
- self.run_action_absent(idx)
- elif action == "present":
- self.run_action_present(idx)
- elif action == "up":
- self.run_action_up(idx)
- elif action == "down":
- self.run_action_down(idx)
- except Exception as error:
- if self.check_mode == CheckMode.REAL_RUN:
- self.rollback_transaction(idx, action, error)
- raise
-
- if self.check_mode == CheckMode.REAL_RUN:
- self.finish_transaction()
-
- def run_prepare(self):
- for idx, connection in enumerate(self.connections):
- if "type" in connection and connection["check_iface_exists"]:
- # when the profile is tied to a certain interface via
- # 'interface_name' or 'mac', check that such an interface
- # exists.
- #
- # This check has many flaws, as we don't check whether the
- # existing interface has the right device type. Also, there is
- # some ambiguity between the current MAC address and the
- # permanent MAC address.
- li_mac = None
- li_ifname = None
- if connection["mac"]:
- li_mac = SysUtil.link_info_find(mac=connection["mac"])
- if not li_mac:
- self.log_fatal(
- idx,
- "profile specifies mac '%s' but no such interface exists"
- % (connection["mac"]),
- )
- if connection["interface_name"]:
- li_ifname = SysUtil.link_info_find(
- ifname=connection["interface_name"]
- )
- if not li_ifname:
- if connection["type"] == "ethernet":
- self.log_fatal(
- idx,
- "profile specifies interface_name '%s' but no such "
- "interface exists" % (connection["interface_name"]),
- )
- elif connection["type"] == "infiniband":
- if connection["infiniband"]["p_key"] != -1:
- self.log_fatal(
- idx,
- "profile specifies interface_name '%s' but no such "
- "infiniband interface exists"
- % (connection["interface_name"]),
- )
- if li_mac and li_ifname and li_mac != li_ifname:
- self.log_fatal(
- idx,
- "profile specifies interface_name '%s' and mac '%s' but no "
- "such interface exists"
- % (connection["interface_name"], connection["mac"]),
- )
-
- def start_transaction(self):
- """ Hook before making changes """
-
- def finish_transaction(self):
- """ Hook for after all changes where made successfuly """
-
- def rollback_transaction(self, idx, action, error):
- """Hook if configuring a profile results in an error
-
- :param idx: Index of the connection that triggered the error
- :param action: Action that triggered the error
- :param error: The error
-
- :type idx: int
- :type action: str
- :type error: Exception
-
- """
- self.log_warn(
- idx, "failure: %s (%s) [[%s]]" % (error, action, traceback.format_exc())
- )
-
- def on_failure(self):
- """ Hook to do any cleanup on failure before exiting """
- pass
-
- def run_action_absent(self, idx):
- raise NotImplementedError()
-
- def run_action_present(self, idx):
- raise NotImplementedError()
-
- def run_action_down(self, idx):
- raise NotImplementedError()
-
- def run_action_up(self, idx):
- raise NotImplementedError()
-
-
-###############################################################################
-
-
-class Cmd_nm(Cmd):
- def __init__(self, **kwargs):
- Cmd.__init__(self, **kwargs)
- self._nmutil = None
- self.validate_one_type = ArgValidator_ListConnections.VALIDATE_ONE_MODE_NM
- self._checkpoint = None
- # pylint: disable=import-error, no-name-in-module
- from ansible.module_utils.network_lsr.nm import provider # noqa:E501
-
- # pylint: enable=import-error, no-name-in-module
-
- self._nm_provider = provider.NetworkManagerProvider()
-
- @property
- def nmutil(self):
- if self._nmutil is None:
- try:
- nmclient = Util.NM().Client.new(None)
- except Exception as e:
- raise MyError("failure loading libnm library: %s" % (e))
- self._nmutil = NMUtil(nmclient)
- return self._nmutil
-
- def run_prepare(self):
- Cmd.run_prepare(self)
-
- names = {}
- for idx, connection in enumerate(self.connections):
- self._check_ethtool_setting_support(idx, connection)
-
- name = connection["name"]
- if not name:
- assert connection["persistent_state"] == "absent"
- continue
- if name in names:
- exists = names[name]["nm.exists"]
- uuid = names[name]["nm.uuid"]
- else:
- c = Util.first(self.nmutil.connection_list(name=name))
-
- exists = c is not None
- if c is not None:
- uuid = c.get_uuid()
- else:
- uuid = Util.create_uuid()
- names[name] = {"nm.exists": exists, "nm.uuid": uuid}
- connection["nm.exists"] = exists
- connection["nm.uuid"] = uuid
-
- def start_transaction(self):
- Cmd.start_transaction(self)
- if "disable-checkpoints" in self._debug_flags:
- pass
- else:
- self._checkpoint = self.nmutil.create_checkpoint(
- len(self.connections) * DEFAULT_ACTIVATION_TIMEOUT
- )
-
- def rollback_transaction(self, idx, action, error):
- Cmd.rollback_transaction(self, idx, action, error)
- self.on_failure()
-
- def finish_transaction(self):
- Cmd.finish_transaction(self)
- if self._checkpoint:
- try:
- self.nmutil.destroy_checkpoint(self._checkpoint)
- finally:
- self._checkpoint = None
-
- def on_failure(self):
- if self._checkpoint:
- try:
- self.nmutil.rollback_checkpoint(self._checkpoint)
- finally:
- self._checkpoint = None
-
- def _check_ethtool_setting_support(self, idx, connection):
- """Check if SettingEthtool support is needed and available
-
- If any ethtool setting is specified, the SettingEthtool
- setting needs to be available. Also NM needs to know about each
- specified setting. Do not check if NM knows about any defaults
-
- """
- NM = Util.NM()
-
- # If the profile is not completely specified, for example if only the
- # runtime change is specified, the ethtool subtree might be missing.
- # Then no checks are required.
- if "ethtool" not in connection:
- return
-
- ethtool_dict = {
- "features": nm_provider.get_nm_ethtool_feature,
- "coalesce": nm_provider.get_nm_ethtool_coalesce,
- }
-
- for ethtool_key, nm_get_name_fcnt in ethtool_dict.items():
- ethtool_settings = connection["ethtool"][ethtool_key]
- specified = dict(
- [(k, v) for k, v in ethtool_settings.items() if v is not None]
- )
-
- if specified and not hasattr(NM, "SettingEthtool"):
- self.log_fatal(
- idx, "ethtool.%s specified but not supported by NM", specified
- )
-
- for option, _ in specified.items():
- nm_name = nm_get_name_fcnt(option)
- if not nm_name:
- self.log_fatal(
- idx,
- "ethtool %s setting %s specified "
- "but not supported by NM" % (ethtool_key, option),
- )
-
- def run_action_absent(self, idx):
- name = self.connections[idx]["name"]
- profile_uuids = set()
-
- if name:
- black_list_names = []
- else:
- # Delete all profiles except explicitly included
- black_list_names = ArgUtil.connection_get_non_absent_names(self.connections)
-
- for nm_profile in self._nm_provider.get_connections():
- if name and nm_profile.get_id() != name:
- continue
- if nm_profile.get_id() not in black_list_names:
- profile_uuids.add(nm_profile.get_uuid())
-
- if not profile_uuids:
- self.log_info(idx, "no connection matches '%s' to delete" % (name))
- return
-
- logger = logging.getLogger()
- log_handler = NmLogHandler(self.log, idx)
- logger.addHandler(log_handler)
- timeout = self.connections[idx].get("wait")
- changed = False
- for profile_uuid in profile_uuids:
- changed |= self._nm_provider.volatilize_connection_by_uuid(
- profile_uuid,
- DEFAULT_TIMEOUT if timeout is None else timeout,
- self.check_mode != CheckMode.REAL_RUN,
- )
- if changed:
- self.connections_data_set_changed(idx)
- logger.removeHandler(log_handler)
-
- def run_action_present(self, idx):
- connection = self.connections[idx]
- con_cur = Util.first(
- self.nmutil.connection_list(
- name=connection["name"], uuid=connection["nm.uuid"]
- )
- )
-
- if not connection.get("type"):
- # if the type is not specified, just check that the connection was
- # found
- if not con_cur:
- self.log_error(
- idx, "Connection not found on system and 'type' not specified"
- )
- return
-
- con_new = self.nmutil.connection_create(self.connections, idx, con_cur)
- if con_cur is None:
- self.log_info(
- idx,
- "add connection %s, %s" % (connection["name"], connection["nm.uuid"]),
- )
- self.connections_data_set_changed(idx)
- if self.check_mode == CheckMode.REAL_RUN:
- try:
- con_cur = self.nmutil.connection_add(con_new)
- except MyError as e:
- self.log_error(idx, "adding connection failed: %s" % (e))
- elif not self.nmutil.connection_compare(con_cur, con_new, normalize_a=True):
- self.log_info(
- idx, "update connection %s, %s" % (con_cur.get_id(), con_cur.get_uuid())
- )
- self.connections_data_set_changed(idx)
- if self.check_mode == CheckMode.REAL_RUN:
- try:
- self.nmutil.connection_update(con_cur, con_new)
- except MyError as e:
- self.log_error(idx, "updating connection failed: %s" % (e))
- else:
- self.log_info(
- idx,
- "connection %s, %s already up to date"
- % (con_cur.get_id(), con_cur.get_uuid()),
- )
-
- if (
- self.check_mode == CheckMode.REAL_RUN
- and connection["ieee802_1x"] is not None
- and connection["ieee802_1x"].get("ca_path")
- ):
- # It seems that NM on Fedora 31
- # (NetworkManager-1.20.4-1.fc31.x86_64) does need some time so that
- # the D-Bus information is actually up-to-date.
- time.sleep(0.1)
- Util.GMainLoop_iterate_all()
- updated_connection = Util.first(
- self.nmutil.connection_list(
- name=connection["name"], uuid=connection["nm.uuid"]
- )
- )
- ca_path = updated_connection.get_setting_802_1x().props.ca_path
- if not ca_path:
- self.log_fatal(
- idx,
- "ieee802_1x.ca_path specified but not supported by "
- "NetworkManager. Please update NetworkManager or use "
- "ieee802_1x.ca_cert.",
- )
- if con_cur is not None:
- self._remove_duplicate_profile(idx, con_cur, connection.get("timeout"))
-
- def _remove_duplicate_profile(self, idx, cur_nm_profile, timeout):
- logger = logging.getLogger()
- log_handler = NmLogHandler(self.log, idx)
- logger.addHandler(log_handler)
-
- for nm_profile in self._nm_provider.get_connections():
- if (
- nm_profile.get_id() == cur_nm_profile.get_id()
- and nm_profile.get_uuid() != cur_nm_profile.get_uuid()
- ):
- if self.check_mode == CheckMode.REAL_RUN:
- self._nm_provider.volatilize_connection_by_uuid(
- uuid=nm_profile.get_uuid(),
- timeout=(DEFAULT_TIMEOUT if timeout is None else timeout),
- check_mode=True,
- )
- self.connections_data_set_changed(idx)
- logger.removeHandler(log_handler)
-
- def run_action_up(self, idx):
- connection = self.connections[idx]
-
- con = Util.first(
- self.nmutil.connection_list(
- name=connection["name"], uuid=connection["nm.uuid"]
- )
- )
- if not con:
- if self.check_mode == CheckMode.REAL_RUN:
- self.log_error(
- idx,
- "up connection %s, %s failed: no connection"
- % (connection["name"], connection["nm.uuid"]),
- )
- else:
- self.log_info(
- idx,
- "up connection %s, %s"
- % (connection["name"], connection["nm.uuid"]),
- )
- return
-
- is_active = self.nmutil.connection_is_active(con)
- is_modified = self.connection_modified_earlier(idx)
- force_state_change = self.connection_force_state_change(connection)
-
- if is_active and not force_state_change and not is_modified:
- self.log_info(
- idx,
- "up connection %s, %s skipped because already active"
- % (con.get_id(), con.get_uuid()),
- )
- return
-
- self.log_info(
- idx,
- "up connection %s, %s (%s)"
- % (
- con.get_id(),
- con.get_uuid(),
- "not-active"
- if not is_active
- else "is-modified"
- if is_modified
- else "force-state-change",
- ),
- )
- self.connections_data_set_changed(idx)
- if self.check_mode == CheckMode.REAL_RUN:
- if self._try_reapply(idx, con):
- return
-
- try:
- ac = self.nmutil.connection_activate(con)
- except MyError as e:
- self.log_error(idx, "up connection failed: %s" % (e))
-
- wait_time = connection["wait"]
- if wait_time is None:
- wait_time = DEFAULT_ACTIVATION_TIMEOUT
-
- try:
- self.nmutil.connection_activate_wait(ac, wait_time)
- except MyError as e:
- self.log_error(idx, "up connection failed while waiting: %s" % (e))
-
- def _try_reapply(self, idx, con):
- """Try to reapply a connection
-
- If there is exactly one active connection with the same UUID activated
- on exactly one device, ask the device to reapply the connection.
-
- :returns: `True`, when the connection was reapplied, `False` otherwise
- :rtype: bool
- """
- NM = Util.NM()
-
- acons = list(self.nmutil.active_connection_list(connections=[con]))
- if len(acons) != 1:
- return False
-
- active_connection = acons[0]
- if active_connection.get_state() == NM.ActiveConnectionState.ACTIVATED:
- devices = active_connection.get_devices()
- if len(devices) == 1:
- try:
- self.nmutil.reapply(devices[0])
- self.log_info(idx, "connection reapplied")
- return True
- except MyError as error:
- self.log_info(idx, "connection reapply failed: %s" % (error))
- return False
-
- def run_action_down(self, idx):
- connection = self.connections[idx]
- logger = logging.getLogger()
- log_handler = NmLogHandler(self.log, idx)
- logger.addHandler(log_handler)
- timeout = connection["wait"]
- if self._nm_provider.deactivate_connection(
- connection["name"],
- 10 if timeout is None else timeout,
- self.check_mode != CheckMode.REAL_RUN,
- ):
- self.connections_data_set_changed(idx)
- logger.removeHandler(log_handler)
-
-
-###############################################################################
-
-
-class Cmd_initscripts(Cmd):
- def __init__(self, **kwargs):
- Cmd.__init__(self, **kwargs)
- self.validate_one_type = (
- ArgValidator_ListConnections.VALIDATE_ONE_MODE_INITSCRIPTS
- )
-
- def run_prepare(self):
- Cmd.run_prepare(self)
- for idx, connection in enumerate(self.connections):
- if connection.get("type") in ["macvlan"]:
- self.log_fatal(
- idx,
- "unsupported type %s for initscripts provider"
- % (connection["type"]),
- )
-
- def check_name(self, idx, name=None):
- if name is None:
- name = self.connections[idx]["name"]
- try:
- f = IfcfgUtil.ifcfg_path(name)
- except MyError:
- self.log_error(idx, "invalid name %s for connection" % (name))
- return None
- return f
-
- def forget_nm_connection(self, path):
- """
- Forget a NetworkManager connection by loading the path of the deleted
- profile. This inverts the effect of loading a profile with
- `NM_CONTROLLED=no` earlier, which made NetworkManager ignore the
- device.
-
- This does not use the Python libnm bindings because they might not be
- present on the system, since the module is currently operating for the
- initscripts provider. If it fails, assume that NetworkManager is not
- present and did not save any state about the corresponding interface.
- """
- try:
- subprocess.call(
- [
- "busctl",
- "--system",
- "call",
- "org.freedesktop.NetworkManager",
- "/org/freedesktop/NetworkManager/Settings",
- "org.freedesktop.NetworkManager.Settings",
- "LoadConnections",
- "as",
- "1",
- path,
- ]
- )
- except Exception:
- pass
-
- def run_action_absent(self, idx):
- n = self.connections[idx]["name"]
- name = n
- if not name:
- names = []
- black_list_names = ArgUtil.connection_get_non_absent_names(self.connections)
- for f in os.listdir("/etc/sysconfig/network-scripts"):
- if not f.startswith("ifcfg-"):
- continue
- name = f[6:]
- if name in black_list_names:
- continue
- if name == "lo":
- continue
- names.append(name)
- else:
- if not self.check_name(idx):
- return
- names = [name]
-
- changed = False
- for name in names:
- for path in IfcfgUtil.ifcfg_paths(name):
- if not os.path.isfile(path):
- continue
- changed = True
- self.log_info(idx, "delete ifcfg-rh file '%s'" % (path))
- self.connections_data_set_changed(idx)
- if self.check_mode == CheckMode.REAL_RUN:
- try:
- os.unlink(path)
- self.forget_nm_connection(path)
- except Exception as e:
- self.log_error(
- idx, "delete ifcfg-rh file '%s' failed: %s" % (path, e)
- )
-
- if not changed:
- self.log_info(
- idx,
- "delete ifcfg-rh files for %s (no files present)"
- % ("'" + n + "'" if n else "*"),
- )
-
- def run_action_present(self, idx):
- if not self.check_name(idx):
- return
-
- connection = self.connections[idx]
- name = connection["name"]
-
- old_content = IfcfgUtil.content_from_file(name)
-
- if not connection.get("type"):
- # if the type is not specified, just check that the connection was
- # found
- if not old_content.get("ifcfg"):
- self.log_error(
- idx, "Connection not found on system and 'type' not present"
- )
- return
-
- ifcfg_all = IfcfgUtil.ifcfg_create(
- self.connections, idx, lambda msg: self.log_warn(idx, msg), old_content
- )
-
- new_content = IfcfgUtil.content_from_dict(
- ifcfg_all, header=self.run_env.ifcfg_header
- )
-
- if old_content == new_content:
- self.log_info(idx, "ifcfg-rh profile '%s' already up to date" % (name))
- return
-
- op = "add" if (old_content["ifcfg"] is None) else "update"
-
- self.log_info(idx, "%s ifcfg-rh profile '%s'" % (op, name))
-
- self.connections_data_set_changed(idx)
- if self.check_mode == CheckMode.REAL_RUN:
- try:
- IfcfgUtil.content_to_file(name, new_content)
- except MyError as e:
- self.log_error(
- idx, "%s ifcfg-rh profile '%s' failed: %s" % (op, name, e)
- )
-
- def _run_action_updown(self, idx, do_up):
- if not self.check_name(idx):
- return
-
- connection = self.connections[idx]
- name = connection["name"]
-
- if connection["wait"] is not None:
- # initscripts don't support wait, they always block until the ifup/ifdown
- # command completes. Silently ignore the argument.
- pass
-
- path = IfcfgUtil.ifcfg_path(name)
- if not os.path.isfile(path):
- if (
- self.check_mode == CheckMode.REAL_RUN
- and connection.get(PERSISTENT_STATE) != ABSENT_STATE
- ):
- self.log_error(idx, "ifcfg file '%s' does not exist" % (path))
- else:
- if self.check_mode != CheckMode.REAL_RUN:
- in_checkmode = " in check mode"
- else:
- in_checkmode = ""
- self.log_info(
- idx, "ifcfg file '%s' does not exist%s" % (path, in_checkmode)
- )
- return
-
- is_active = IfcfgUtil.connection_seems_active(name)
- is_modified = self.connection_modified_earlier(idx)
- force_state_change = self.connection_force_state_change(connection)
-
- if do_up:
- if is_active is True and not force_state_change and not is_modified:
- self.log_info(
- idx, "up connection %s skipped because already active" % (name)
- )
- return
-
- self.log_info(
- idx,
- "up connection %s (%s)"
- % (
- name,
- "not-active"
- if is_active is not True
- else "is-modified"
- if is_modified
- else "force-state-change",
- ),
- )
- cmd = "ifup"
- else:
- if is_active is False and not force_state_change:
- self.log_info(
- idx, "down connection %s skipped because not active" % (name)
- )
- return
-
- self.log_info(
- idx,
- "up connection %s (%s)"
- % (name, "active" if is_active is not False else "force-state-change"),
- )
- cmd = "ifdown"
-
- self.connections_data_set_changed(idx)
- if self.check_mode == CheckMode.REAL_RUN:
- rc, out, err = self.run_env.run_command([cmd, name])
- self.log_info(
- idx,
- "call '%s %s': rc=%d, out='%s', err='%s'" % (cmd, name, rc, out, err),
- )
- if rc != 0:
- self.log_error(
- idx, "call '%s %s' failed with exit status %d" % (cmd, name, rc)
- )
-
- def run_action_up(self, idx):
- self._run_action_updown(idx, True)
-
- def run_action_down(self, idx):
- self._run_action_updown(idx, False)
-
-
-###############################################################################
-
-
-def main():
- connections = None
- cmd = None
- run_env_ansible = RunEnvironmentAnsible()
- try:
- params = run_env_ansible.module.params
- cmd = Cmd.create(
- params["provider"],
- run_env=run_env_ansible,
- connections_unvalidated=params["connections"],
- connection_validator=ArgValidator_ListConnections(),
- is_check_mode=run_env_ansible.module.check_mode,
- ignore_errors=params["ignore_errors"],
- force_state_change=params["force_state_change"],
- debug_flags=params["__debug_flags"],
- )
- connections = cmd.connections
- run_env_ansible.on_failure = cmd.on_failure
- cmd.run()
- except Exception as e:
- run_env_ansible.fail_json(
- connections,
- "fatal error: %s" % (e),
- changed=(cmd is not None and cmd.is_changed_modified_system),
- warn_traceback=not isinstance(e, MyError),
- )
- run_env_ansible.exit_json(
- connections, changed=(cmd is not None and cmd.is_changed_modified_system)
- )
-
-
-if __name__ == "__main__":
- main()
diff --git a/roles/linux-system-roles.network/meta/.galaxy_install_info b/roles/linux-system-roles.network/meta/.galaxy_install_info
deleted file mode 100644
index cae0cfc..0000000
--- a/roles/linux-system-roles.network/meta/.galaxy_install_info
+++ /dev/null
@@ -1,2 +0,0 @@
-install_date: Wed Jun 30 01:12:57 2021
-version: 1.3.0
diff --git a/roles/linux-system-roles.network/meta/main.yml b/roles/linux-system-roles.network/meta/main.yml
deleted file mode 100644
index 38197e4..0000000
--- a/roles/linux-system-roles.network/meta/main.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-galaxy_info:
- author: Thomas Haller , Till Maas
- description: Configure networking
- company: Red Hat, Inc.
- license: BSD-3-Clause
- min_ansible_version: 2.7
- github_branch: main
- galaxy_tags:
- - centos
- - fedora
- - network
- - networking
- - redhat
- - rhel
- - system
- platforms:
- - name: Fedora
- versions:
- - all
- - name: EL
- versions:
- - 6
- - 7
- - 8
diff --git a/roles/linux-system-roles.network/module_utils/__init__.py b/roles/linux-system-roles.network/module_utils/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/__init__.py b/roles/linux-system-roles.network/module_utils/network_lsr/__init__.py
deleted file mode 100644
index 22c717c..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/python3 -tt
-# vim: fileencoding=utf8
-# SPDX-License-Identifier: BSD-3-Clause
-
-
-class MyError(Exception):
- pass
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/argument_validator.py b/roles/linux-system-roles.network/module_utils/network_lsr/argument_validator.py
deleted file mode 100644
index 1bfaeda..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/argument_validator.py
+++ /dev/null
@@ -1,1804 +0,0 @@
-#!/usr/bin/python3 -tt
-# vim: fileencoding=utf8
-# SPDX-License-Identifier: BSD-3-Clause
-
-import posixpath
-import socket
-import re
-
-# pylint: disable=import-error, no-name-in-module
-from ansible.module_utils.network_lsr import MyError # noqa:E501
-from ansible.module_utils.network_lsr.utils import Util # noqa:E501
-
-UINT32_MAX = 0xFFFFFFFF
-
-
-class ArgUtil:
- @staticmethod
- def connection_find_by_name(name, connections, n_connections=None):
- if not name:
- raise ValueError("missing name argument")
- c = None
- for idx, connection in enumerate(connections):
- if n_connections is not None and idx >= n_connections:
- break
- if "name" not in connection or name != connection["name"]:
- continue
-
- if connection["persistent_state"] == "absent":
- c = None
- elif connection["persistent_state"] == "present":
- c = connection
- return c
-
- @staticmethod
- def connection_find_controller(name, connections, n_connections=None):
- c = ArgUtil.connection_find_by_name(name, connections, n_connections)
- if not c:
- raise MyError("invalid controller/parent '%s'" % (name))
- if c["interface_name"] is None:
- raise MyError(
- "invalid controller/parent '%s' which needs an 'interface_name'"
- % (name)
- )
- if not Util.ifname_valid(c["interface_name"]):
- raise MyError(
- "invalid controller/parent '%s' with invalid 'interface_name' ('%s')"
- % (name, c["interface_name"])
- )
- return c["interface_name"]
-
- @staticmethod
- def connection_find_controller_uuid(name, connections, n_connections=None):
- c = ArgUtil.connection_find_by_name(name, connections, n_connections)
- if not c:
- raise MyError("invalid controller/parent '%s'" % (name))
- return c["nm.uuid"]
-
- @staticmethod
- def connection_get_non_absent_names(connections):
- # @idx is the index with state['absent']. This will
- # return the names of all explicitly mentioned profiles.
- # That is, the names of profiles that should not be deleted.
- result = set()
- for connection in connections:
- if "name" not in connection:
- continue
- if not connection["name"]:
- continue
- result.add(connection["name"])
- return result
-
-
-class ValidationError(MyError):
- def __init__(self, name, message):
- Exception.__init__(self, name + ": " + message)
- self.error_message = message
- self.name = name
-
- @staticmethod
- def from_connection(idx, message):
- return ValidationError("connection[" + str(idx) + "]", message)
-
-
-class ArgValidator:
- MISSING = object()
- DEFAULT_SENTINEL = object()
-
- def __init__(self, name=None, required=False, default_value=None):
- self.name = name
- self.required = required
- self.default_value = default_value
-
- def get_default_value(self):
- try:
- return self.default_value()
- except Exception: # pylint: disable=broad-except
- return self.default_value
-
- def validate(self, value):
- """
- Validate and normalize the input dictionary
-
- This validate @value or raises a ValidationError() on error.
- It also returns a normalized value, where the settings are
- converted to appropriate types and default values set. You
- should rely on the normalization to fill unspecified values
- and resolve ambiguity.
-
- You are implementing "types" of ArgValidator instances and
- a major point of them is to implement a suitable validation and
- normalization. The means for that is for subclasses to override
- _validate_impl() and possibly _validate_post(). Some subclasses
- support convenience arguments for simpler validation, like
- ArgValidatorStr.enum_values or ArgValidatorNum.val_min.
- Or ArgValidator.required which is honored by ArgValidatorDict
- to determine whether a mandatory key is missing. Also,
- ArgValidatorDict and ArgValidatorList have a nested parameter
- which is an ArgValidator for the elements of the dictionary and list.
- """
- return self._validate(value, self.name)
-
- def _validate(self, value, name):
- """
- The internal implementation for validate().
-
- This is mostly called from internal code and by validate().
- Usually you would not call this directly nor override it.
- Instead, you would implement either _validate_impl() or
- _validate_post().
- """
- validated = self._validate_impl(value, name)
- return self._validate_post(value, name, validated)
-
- def _validate_impl(self, value, name):
- """
- Implementation of validation.
-
- Subclasses must implement this validation function. It is
- the main hook to implement validate(). On validation error
- it must raise ValidationError() or otherwise return a pre-normalized
- value that gets passed to _validate_post().
- """
- raise NotImplementedError()
-
- # pylint: disable=unused-argument,no-self-use
- def _validate_post(self, value, name, result):
- """
- Post validation of the validated result.
-
- This will be called with the result from _validate_impl().
- By default it does nothing, but subclasses can override
- this to perform additional validation. The use for this
- hook is to split the validation in two steps. When validating
- a dictionary of multiple keys, then _validate_impl() can
- implement the basic pre-validation and pre-normalization of the individual
- keys (which can be in any order). Afterwards, _validate_post()
- can take a more holistic view and validate interdependencies
- between keys and perform additional validation. For example,
- _validate_impl() would validate that the keys are of the correct
- basic type, and _validate_post() would validate that the values
- don't conflict and possibly normalize derived default values.
- """
- return result
-
-
-class ArgValidatorStr(ArgValidator):
- def __init__( # pylint: disable=too-many-arguments
- self,
- name,
- required=False,
- default_value=None,
- enum_values=None,
- allow_empty=False,
- min_length=None,
- max_length=None,
- regex=None,
- ):
- ArgValidator.__init__(self, name, required, default_value)
- self.enum_values = enum_values
- self.allow_empty = allow_empty
- self.regex = regex
-
- if max_length is not None:
- if not isinstance(max_length, int):
- raise ValueError("max_length must be an integer")
- elif max_length < 0:
- raise ValueError("max_length must be a positive integer")
- self.max_length = max_length
-
- if min_length is not None:
- if not isinstance(min_length, int):
- raise ValueError("min_length must be an integer")
- elif min_length < 0:
- raise ValueError("min_length must be a positive integer")
- self.min_length = min_length
-
- def _validate_impl(self, value, name):
- if not isinstance(value, Util.STRING_TYPE):
- raise ValidationError(name, "must be a string but is '%s'" % (value))
- value = str(value)
- if self.enum_values is not None and value not in self.enum_values:
- raise ValidationError(
- name,
- "is '%s' but must be one of '%s'"
- % (value, "' '".join(sorted(self.enum_values))),
- )
- if self.regex is not None and not any(re.match(x, value) for x in self.regex):
- raise ValidationError(
- name,
- "is '%s' which does not match the regex '%s'"
- % (value, "' '".join(sorted(self.regex))),
- )
- if not self.allow_empty and not value:
- raise ValidationError(name, "cannot be empty")
- if not self._validate_string_max_length(value):
- raise ValidationError(
- name, "maximum length is %s characters" % (self.max_length)
- )
- if not self._validate_string_min_length(value):
- raise ValidationError(
- name, "minimum length is %s characters" % (self.min_length)
- )
- return value
-
- def _validate_string_max_length(self, value):
- """
- Ensures that the length of string `value` is less than or equal to
- the maximum length
- """
- if self.max_length is not None:
- return len(str(value)) <= self.max_length
- else:
- return True
-
- def _validate_string_min_length(self, value):
- """
- Ensures that the length of string `value` is more than or equal to
- the minimum length
- """
- if self.min_length is not None:
- return len(str(value)) >= self.min_length
- else:
- return True
-
-
-class ArgValidatorNum(ArgValidator):
- def __init__( # pylint: disable=too-many-arguments
- self,
- name,
- required=False,
- val_min=None,
- val_max=None,
- default_value=ArgValidator.DEFAULT_SENTINEL,
- numeric_type=int,
- ):
- ArgValidator.__init__(
- self,
- name,
- required,
- numeric_type(0)
- if default_value is ArgValidator.DEFAULT_SENTINEL
- else default_value,
- )
- self.val_min = val_min
- self.val_max = val_max
- self.numeric_type = numeric_type
-
- def _validate_impl(self, value, name):
- v = None
- try:
- if isinstance(value, self.numeric_type):
- v = value
- else:
- v2 = self.numeric_type(value)
- if isinstance(value, Util.STRING_TYPE) or v2 == value:
- v = v2
- except Exception:
- pass
- if v is None:
- raise ValidationError(
- name, "must be an integer number but is '%s'" % (value)
- )
- if self.val_min is not None and v < self.val_min:
- raise ValidationError(
- name, "value is %s but cannot be less then %s" % (value, self.val_min)
- )
- if self.val_max is not None and v > self.val_max:
- raise ValidationError(
- name,
- "value is %s but cannot be greater then %s" % (value, self.val_max),
- )
- return v
-
-
-class ArgValidatorBool(ArgValidator):
- def __init__(self, name, required=False, default_value=False):
- ArgValidator.__init__(self, name, required, default_value)
-
- def _validate_impl(self, value, name):
- try:
- if isinstance(value, bool):
- return value
- if isinstance(value, Util.STRING_TYPE) or isinstance(value, int):
- return Util.boolean(value)
- except Exception:
- pass
- raise ValidationError(name, "must be an boolean but is '%s'" % (value))
-
-
-class ArgValidatorDeprecated:
- def __init__(self, name, deprecated_by):
- self.name = name
- self.deprecated_by = deprecated_by
-
-
-class ArgValidatorDict(ArgValidator):
- def __init__(
- self,
- name=None,
- required=False,
- nested=None,
- default_value=None,
- all_missing_during_validate=False,
- ):
- ArgValidator.__init__(self, name, required, default_value)
- if nested is not None:
- self.nested = dict([(v.name, v) for v in nested])
- else:
- self.nested = {}
- self.all_missing_during_validate = all_missing_during_validate
-
- def _validate_impl(self, value, name):
- result = {}
- seen_keys = set()
- try:
- items = list(value.items())
- except AttributeError:
- raise ValidationError(name, "invalid content is not a dictionary")
- for (setting, value) in items:
- try:
- validator = self.nested[setting]
- except KeyError:
- raise ValidationError(name, "invalid key '%s'" % (setting))
- if isinstance(validator, ArgValidatorDeprecated):
- setting = validator.deprecated_by
- validator = self.nested.get(setting, None)
- if setting in seen_keys:
- raise ValidationError(name, "duplicate key '%s'" % (setting))
- seen_keys.add(setting)
- try:
- validated_value = validator._validate(value, name + "." + setting)
- except ValidationError as e:
- raise ValidationError(e.name, e.error_message)
- result[setting] = validated_value
- for (setting, validator) in self.nested.items():
- if setting in seen_keys or isinstance(validator, ArgValidatorDeprecated):
- continue
- if validator.required:
- raise ValidationError(name, "missing required key '%s'" % (setting))
- default_value = validator.get_default_value()
- if (
- not self.all_missing_during_validate
- and default_value is not ArgValidator.MISSING
- ):
- result[setting] = default_value
- return result
-
-
-class ArgValidatorList(ArgValidator):
- def __init__(self, name, nested, default_value=None):
- ArgValidator.__init__(self, name, required=False, default_value=default_value)
- self.nested = nested
-
- def _validate_impl(self, value, name):
-
- if isinstance(value, Util.STRING_TYPE):
- # we expect a list. However, for convenience allow to
- # specify a string, separated by space. Escaping is
- # not supported. If you need that, define a proper list.
- value = [s for s in value.split(" ") if s]
-
- result = []
- for (idx, v) in enumerate(value):
- try:
- vv = self.nested._validate(v, name + "[" + str(idx) + "]")
- except ValidationError as e:
- raise ValidationError(e.name, e.error_message)
- result.append(vv)
- return result
-
-
-class ArgValidatorIP(ArgValidatorStr):
- def __init__(
- self, name, family=None, required=False, default_value=None, plain_address=True
- ):
- ArgValidatorStr.__init__(self, name, required, default_value, None)
- self.family = family
- self.plain_address = plain_address
-
- def _validate_impl(self, value, name):
- v = ArgValidatorStr._validate_impl(self, value, name)
- try:
- addr, family = Util.parse_ip(v, self.family)
- except Exception:
- raise ValidationError(
- name,
- "value '%s' is not a valid IP%s address"
- % (value, Util.addr_family_to_v(self.family)),
- )
- if self.plain_address:
- return addr
- return {"family": family, "address": addr}
-
-
-class ArgValidatorMac(ArgValidatorStr):
- def __init__(self, name, force_len=None, required=False, default_value=None):
- ArgValidatorStr.__init__(self, name, required, default_value, None)
- self.force_len = force_len
-
- def _validate_impl(self, value, name):
- v = ArgValidatorStr._validate_impl(self, value, name)
- try:
- addr = Util.mac_aton(v, self.force_len)
- except MyError:
- raise ValidationError(
- name, "value '%s' is not a valid MAC address" % (value)
- )
- if not addr:
- raise ValidationError(
- name, "value '%s' is not a valid MAC address" % (value)
- )
- return Util.mac_ntoa(addr)
-
-
-class ArgValidatorIPAddr(ArgValidatorDict):
- def __init__(self, name, family=None, required=False, default_value=None):
- ArgValidatorDict.__init__(
- self,
- name,
- required,
- nested=[
- ArgValidatorIP(
- "address", family=family, required=True, plain_address=False
- ),
- ArgValidatorNum("prefix", default_value=None, val_min=0),
- ],
- )
- self.family = family
-
- def _validate_impl(self, value, name):
- if isinstance(value, Util.STRING_TYPE):
- v = str(value)
- if not v:
- raise ValidationError(name, "cannot be empty")
- try:
- return Util.parse_address(v, self.family)
- except Exception:
- raise ValidationError(
- name,
- "value '%s' is not a valid IP%s address with prefix length"
- % (value, Util.addr_family_to_v(self.family)),
- )
- v = ArgValidatorDict._validate_impl(self, value, name)
- return {
- "address": v["address"]["address"],
- "family": v["address"]["family"],
- "prefix": v["prefix"],
- }
-
- def _validate_post(self, value, name, result):
- family = result["family"]
- prefix = result["prefix"]
- if prefix is None:
- prefix = Util.addr_family_default_prefix(family)
- result["prefix"] = prefix
- elif not Util.addr_family_valid_prefix(family, prefix):
- raise ValidationError(name, "invalid prefix %s in '%s'" % (prefix, value))
- return result
-
-
-class ArgValidatorIPRoute(ArgValidatorDict):
- def __init__(self, name, family=None, required=False, default_value=None):
- ArgValidatorDict.__init__(
- self,
- name,
- required,
- nested=[
- ArgValidatorIP(
- "network", family=family, required=True, plain_address=False
- ),
- ArgValidatorNum("prefix", default_value=None, val_min=0),
- ArgValidatorIP(
- "gateway", family=family, default_value=None, plain_address=False
- ),
- ArgValidatorNum(
- "metric", default_value=-1, val_min=-1, val_max=0xFFFFFFFF
- ),
- ],
- )
- self.family = family
-
- def _validate_post(self, value, name, result):
- network = result["network"]
-
- family = network["family"]
- result["network"] = network["address"]
- result["family"] = family
-
- gateway = result["gateway"]
- if gateway is not None:
- if family != gateway["family"]:
- raise ValidationError(
- name,
- "conflicting address family between network and gateway '%s'"
- % (gateway["address"]),
- )
- result["gateway"] = gateway["address"]
-
- prefix = result["prefix"]
- if prefix is None:
- prefix = Util.addr_family_default_prefix(family)
- result["prefix"] = prefix
- elif not Util.addr_family_valid_prefix(family, prefix):
- raise ValidationError(name, "invalid prefix %s in '%s'" % (prefix, value))
-
- return result
-
-
-class ArgValidator_DictIP(ArgValidatorDict):
- REGEX_DNS_OPTIONS = [
- r"^attempts:([1-9]\d*|0)$",
- r"^debug$",
- r"^edns0$",
- r"^ndots:([1-9]\d*|0)$",
- r"^no-check-names$",
- r"^no-reload$",
- r"^no-tld-query$",
- r"^rotate$",
- r"^single-request$",
- r"^single-request-reopen$",
- r"^timeout:([1-9]\d*|0)$",
- r"^trust-ad$",
- r"^use-vc$",
- ]
-
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="ip",
- nested=[
- ArgValidatorBool("dhcp4", default_value=None),
- ArgValidatorBool("dhcp4_send_hostname", default_value=None),
- ArgValidatorIP("gateway4", family=socket.AF_INET),
- ArgValidatorNum(
- "route_metric4", val_min=-1, val_max=0xFFFFFFFF, default_value=None
- ),
- ArgValidatorBool("auto6", default_value=None),
- ArgValidatorBool("ipv6_disabled", default_value=None),
- ArgValidatorIP("gateway6", family=socket.AF_INET6),
- ArgValidatorNum(
- "route_metric6", val_min=-1, val_max=0xFFFFFFFF, default_value=None
- ),
- ArgValidatorList(
- "address",
- nested=ArgValidatorIPAddr("address[?]"),
- default_value=list,
- ),
- ArgValidatorList(
- "route", nested=ArgValidatorIPRoute("route[?]"), default_value=list
- ),
- ArgValidatorBool("route_append_only"),
- ArgValidatorBool("rule_append_only"),
- ArgValidatorList(
- "dns",
- nested=ArgValidatorIP("dns[?]", plain_address=False),
- default_value=list,
- ),
- ArgValidatorList(
- "dns_search",
- nested=ArgValidatorStr("dns_search[?]"),
- default_value=list,
- ),
- ArgValidatorList(
- "dns_options",
- nested=ArgValidatorStr(
- "dns_options[?]", regex=ArgValidator_DictIP.REGEX_DNS_OPTIONS
- ),
- default_value=list,
- ),
- ],
- default_value=lambda: {
- "dhcp4": True,
- "dhcp4_send_hostname": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "gateway6": None,
- "route_metric6": None,
- "address": [],
- "route": [],
- "route_append_only": False,
- "rule_append_only": False,
- "dns": [],
- "dns_search": [],
- "dns_options": [],
- },
- )
-
- def _validate_post(self, value, name, result):
-
- has_ipv6_addresses = any(
- [a for a in result["address"] if a["family"] == socket.AF_INET6]
- )
-
- if result["ipv6_disabled"] is True:
- if result["auto6"] is True:
- raise ValidationError(
- name, "'auto6' and 'ipv6_disabled' are mutually exclusive"
- )
- if has_ipv6_addresses:
- raise ValidationError(
- name,
- "'ipv6_disabled' and static IPv6 addresses are mutually exclusive",
- )
- if result["gateway6"] is not None:
- raise ValidationError(
- name, "'ipv6_disabled' and 'gateway6' are mutually exclusive"
- )
- if result["route_metric6"] is not None:
- raise ValidationError(
- name, "'ipv6_disabled' and 'route_metric6' are mutually exclusive"
- )
- elif result["ipv6_disabled"] is None:
- # "ipv6_disabled" is not explicitly set, we always set it to False.
- # Either "auto6" is enabled or static addresses are set, then this
- # is clearly correct.
- # Even with "auto6:False" and no IPv6 addresses, we at least enable
- # IPv6 link local addresses.
- result["ipv6_disabled"] = False
-
- if result["dhcp4"] is None:
- result["dhcp4"] = result["dhcp4_send_hostname"] is not None or not any(
- [a for a in result["address"] if a["family"] == socket.AF_INET]
- )
-
- if result["auto6"] is None:
- result["auto6"] = not has_ipv6_addresses
-
- if result["dhcp4_send_hostname"] is not None:
- if not result["dhcp4"]:
- raise ValidationError(
- name, "'dhcp4_send_hostname' is only valid if 'dhcp4' is enabled"
- )
- return result
-
-
-class ArgValidator_DictEthernet(ArgValidatorDict):
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="ethernet",
- nested=[
- ArgValidatorBool("autoneg", default_value=None),
- ArgValidatorNum(
- "speed", val_min=0, val_max=0xFFFFFFFF, default_value=0
- ),
- ArgValidatorStr(
- "duplex", enum_values=["half", "full"], default_value=None
- ),
- ],
- default_value=ArgValidator.MISSING,
- )
-
- def get_default_ethernet(self):
- return dict([(k, v.default_value) for k, v in self.nested.items()])
-
- def _validate_post(self, value, name, result):
- has_speed_or_duplex = result["speed"] != 0 or result["duplex"] is not None
- if result["autoneg"] is None:
- if has_speed_or_duplex:
- result["autoneg"] = False
- elif result["autoneg"]:
- if has_speed_or_duplex:
- raise ValidationError(
- name,
- "cannot specify '%s' with 'autoneg' enabled"
- % ("duplex" if result["duplex"] is not None else "speed"),
- )
- else:
- if not has_speed_or_duplex:
- raise ValidationError(
- name, "need to specify 'duplex' and 'speed' with 'autoneg' enabled"
- )
- if has_speed_or_duplex and (result["speed"] == 0 or result["duplex"] is None):
- raise ValidationError(
- name,
- "need to specify both 'speed' and 'duplex' with 'autoneg' disabled",
- )
- return result
-
-
-class ArgValidator_DictEthtool(ArgValidatorDict):
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="ethtool",
- nested=[
- ArgValidator_DictEthtoolFeatures(),
- ArgValidator_DictEthtoolCoalesce(),
- ],
- default_value=ArgValidator.MISSING,
- )
-
- self.default_value = dict(
- [(k, v.default_value) for k, v in self.nested.items()]
- )
-
-
-class ArgValidator_DictEthtoolFeatures(ArgValidatorDict):
- # List of features created with:
- # nmcli connection modify "virbr0" ethtool.feature- on |& \
- # sed -e 's_[,:]_\n_g' | \ # split output in newlines
- # grep ^\ f | \ # select only lines starting with " f"
- # tr -d " ." | \ # remove spaces and fullstops
- # sed -e 's,feature-,ArgValidatorBool(",' \ # add Python code
- # -e 's/$/", default_value=None)],/'
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="features",
- nested=[
- ArgValidatorBool("esp_hw_offload", default_value=None),
- ArgValidatorDeprecated(
- "esp-hw-offload", deprecated_by="esp_hw_offload"
- ),
- ArgValidatorBool("esp_tx_csum_hw_offload", default_value=None),
- ArgValidatorDeprecated(
- "esp-tx-csum-hw-offload",
- deprecated_by="esp_tx_csum_hw_offload",
- ),
- ArgValidatorBool("fcoe_mtu", default_value=None),
- ArgValidatorDeprecated("fcoe-mtu", deprecated_by="fcoe_mtu"),
- ArgValidatorBool("gro", default_value=None),
- ArgValidatorBool("gso", default_value=None),
- ArgValidatorBool("highdma", default_value=None),
- ArgValidatorBool("hw_tc_offload", default_value=None),
- ArgValidatorDeprecated("hw-tc-offload", deprecated_by="hw_tc_offload"),
- ArgValidatorBool("l2_fwd_offload", default_value=None),
- ArgValidatorDeprecated(
- "l2-fwd-offload", deprecated_by="l2_fwd_offload"
- ),
- ArgValidatorBool("loopback", default_value=None),
- ArgValidatorBool("lro", default_value=None),
- ArgValidatorBool("ntuple", default_value=None),
- ArgValidatorBool("rx", default_value=None),
- ArgValidatorBool("rxhash", default_value=None),
- ArgValidatorBool("rxvlan", default_value=None),
- ArgValidatorBool("rx_all", default_value=None),
- ArgValidatorDeprecated("rx-all", deprecated_by="rx_all"),
- ArgValidatorBool("rx_fcs", default_value=None),
- ArgValidatorDeprecated("rx-fcs", deprecated_by="rx_fcs"),
- ArgValidatorBool("rx_gro_hw", default_value=None),
- ArgValidatorDeprecated("rx-gro-hw", deprecated_by="rx_gro_hw"),
- ArgValidatorBool("rx_udp_tunnel_port_offload", default_value=None),
- ArgValidatorDeprecated(
- "rx-udp_tunnel-port-offload",
- deprecated_by="rx_udp_tunnel_port_offload",
- ),
- ArgValidatorBool("rx_vlan_filter", default_value=None),
- ArgValidatorDeprecated(
- "rx-vlan-filter", deprecated_by="rx_vlan_filter"
- ),
- ArgValidatorBool("rx_vlan_stag_filter", default_value=None),
- ArgValidatorDeprecated(
- "rx-vlan-stag-filter",
- deprecated_by="rx_vlan_stag_filter",
- ),
- ArgValidatorBool("rx_vlan_stag_hw_parse", default_value=None),
- ArgValidatorDeprecated(
- "rx-vlan-stag-hw-parse",
- deprecated_by="rx_vlan_stag_hw_parse",
- ),
- ArgValidatorBool("sg", default_value=None),
- ArgValidatorBool("tls_hw_record", default_value=None),
- ArgValidatorDeprecated("tls-hw-record", deprecated_by="tls_hw_record"),
- ArgValidatorBool("tls_hw_tx_offload", default_value=None),
- ArgValidatorDeprecated(
- "tls-hw-tx-offload",
- deprecated_by="tls_hw_tx_offload",
- ),
- ArgValidatorBool("tso", default_value=None),
- ArgValidatorBool("tx", default_value=None),
- ArgValidatorBool("txvlan", default_value=None),
- ArgValidatorBool("tx_checksum_fcoe_crc", default_value=None),
- ArgValidatorDeprecated(
- "tx-checksum-fcoe-crc",
- deprecated_by="tx_checksum_fcoe_crc",
- ),
- ArgValidatorBool("tx_checksum_ipv4", default_value=None),
- ArgValidatorDeprecated(
- "tx-checksum-ipv4",
- deprecated_by="tx_checksum_ipv4",
- ),
- ArgValidatorBool("tx_checksum_ipv6", default_value=None),
- ArgValidatorDeprecated(
- "tx-checksum-ipv6",
- deprecated_by="tx_checksum_ipv6",
- ),
- ArgValidatorBool("tx_checksum_ip_generic", default_value=None),
- ArgValidatorDeprecated(
- "tx-checksum-ip-generic",
- deprecated_by="tx_checksum_ip_generic",
- ),
- ArgValidatorBool("tx_checksum_sctp", default_value=None),
- ArgValidatorDeprecated(
- "tx-checksum-sctp",
- deprecated_by="tx_checksum_sctp",
- ),
- ArgValidatorBool("tx_esp_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-esp-segmentation",
- deprecated_by="tx_esp_segmentation",
- ),
- ArgValidatorBool("tx_fcoe_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-fcoe-segmentation",
- deprecated_by="tx_fcoe_segmentation",
- ),
- ArgValidatorBool("tx_gre_csum_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-gre-csum-segmentation",
- deprecated_by="tx_gre_csum_segmentation",
- ),
- ArgValidatorBool("tx_gre_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-gre-segmentation",
- deprecated_by="tx_gre_segmentation",
- ),
- ArgValidatorBool("tx_gso_partial", default_value=None),
- ArgValidatorDeprecated(
- "tx-gso-partial", deprecated_by="tx_gso_partial"
- ),
- ArgValidatorBool("tx_gso_robust", default_value=None),
- ArgValidatorDeprecated("tx-gso-robust", deprecated_by="tx_gso_robust"),
- ArgValidatorBool("tx_ipxip4_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-ipxip4-segmentation",
- deprecated_by="tx_ipxip4_segmentation",
- ),
- ArgValidatorBool("tx_ipxip6_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-ipxip6-segmentation",
- deprecated_by="tx_ipxip6_segmentation",
- ),
- ArgValidatorBool("tx_nocache_copy", default_value=None),
- ArgValidatorDeprecated(
- "tx-nocache-copy",
- deprecated_by="tx_nocache_copy",
- ),
- ArgValidatorBool("tx_scatter_gather", default_value=None),
- ArgValidatorDeprecated(
- "tx-scatter-gather",
- deprecated_by="tx_scatter_gather",
- ),
- ArgValidatorBool("tx_scatter_gather_fraglist", default_value=None),
- ArgValidatorDeprecated(
- "tx-scatter-gather-fraglist",
- deprecated_by="tx_scatter_gather_fraglist",
- ),
- ArgValidatorBool("tx_sctp_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-sctp-segmentation",
- deprecated_by="tx_sctp_segmentation",
- ),
- ArgValidatorBool("tx_tcp6_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-tcp6-segmentation",
- deprecated_by="tx_tcp6_segmentation",
- ),
- ArgValidatorBool("tx_tcp_ecn_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-tcp-ecn-segmentation",
- deprecated_by="tx_tcp_ecn_segmentation",
- ),
- ArgValidatorBool("tx_tcp_mangleid_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-tcp-mangleid-segmentation",
- deprecated_by="tx_tcp_mangleid_segmentation",
- ),
- ArgValidatorBool("tx_tcp_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-tcp-segmentation",
- deprecated_by="tx_tcp_segmentation",
- ),
- ArgValidatorBool("tx_udp_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-udp-segmentation",
- deprecated_by="tx_udp_segmentation",
- ),
- ArgValidatorBool("tx_udp_tnl_csum_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-udp_tnl-csum-segmentation",
- deprecated_by="tx_udp_tnl_csum_segmentation",
- ),
- ArgValidatorBool("tx_udp_tnl_segmentation", default_value=None),
- ArgValidatorDeprecated(
- "tx-udp_tnl-segmentation",
- deprecated_by="tx_udp_tnl_segmentation",
- ),
- ArgValidatorBool("tx_vlan_stag_hw_insert", default_value=None),
- ArgValidatorDeprecated(
- "tx-vlan-stag-hw-insert",
- deprecated_by="tx_vlan_stag_hw_insert",
- ),
- ],
- )
- self.default_value = dict(
- [
- (name, validator.default_value)
- for name, validator in self.nested.items()
- if not isinstance(validator, ArgValidatorDeprecated)
- ]
- )
-
-
-class ArgValidator_DictEthtoolCoalesce(ArgValidatorDict):
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="coalesce",
- nested=[
- ArgValidatorBool("adaptive_rx", default_value=None),
- ArgValidatorBool("adaptive_tx", default_value=None),
- ArgValidatorNum(
- "pkt_rate_high", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "pkt_rate_low", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "rx_frames", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "rx_frames_high", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "rx_frames_irq", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "rx_frames_low", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "rx_usecs", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "rx_usecs_high", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "rx_usecs_irq", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "rx_usecs_low", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "sample_interval",
- val_min=0,
- val_max=UINT32_MAX,
- default_value=None,
- ),
- ArgValidatorNum(
- "stats_block_usecs",
- val_min=0,
- val_max=UINT32_MAX,
- default_value=None,
- ),
- ArgValidatorNum(
- "tx_frames", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "tx_frames_high", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "tx_frames_irq", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "tx_frames_low", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "tx_usecs", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "tx_usecs_high", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "tx_usecs_irq", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ArgValidatorNum(
- "tx_usecs_low", val_min=0, val_max=UINT32_MAX, default_value=None
- ),
- ],
- )
- self.default_value = dict(
- [(k, v.default_value) for k, v in self.nested.items()]
- )
-
-
-class ArgValidator_DictBond(ArgValidatorDict):
-
- VALID_MODES = [
- "balance-rr",
- "active-backup",
- "balance-xor",
- "broadcast",
- "802.3ad",
- "balance-tlb",
- "balance-alb",
- ]
-
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="bond",
- nested=[
- ArgValidatorStr("mode", enum_values=ArgValidator_DictBond.VALID_MODES),
- ArgValidatorNum(
- "miimon", val_min=0, val_max=1000000, default_value=None
- ),
- ],
- default_value=ArgValidator.MISSING,
- )
-
- def get_default_bond(self):
- return {"mode": ArgValidator_DictBond.VALID_MODES[0], "miimon": None}
-
-
-class ArgValidator_DictInfiniband(ArgValidatorDict):
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="infiniband",
- nested=[
- ArgValidatorStr(
- "transport_mode", enum_values=["datagram", "connected"]
- ),
- ArgValidatorNum("p_key", val_min=-1, val_max=0xFFFF, default_value=-1),
- ],
- default_value=ArgValidator.MISSING,
- )
-
- def get_default_infiniband(self):
- return {"transport_mode": "datagram", "p_key": -1}
-
-
-class ArgValidator_DictVlan(ArgValidatorDict):
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="vlan",
- nested=[ArgValidatorNum("id", val_min=0, val_max=4094, required=True)],
- default_value=ArgValidator.MISSING,
- )
-
- def get_default_vlan(self):
- return {"id": None}
-
-
-class ArgValidator_DictMacvlan(ArgValidatorDict):
-
- VALID_MODES = ["vepa", "bridge", "private", "passthru", "source"]
-
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="macvlan",
- nested=[
- ArgValidatorStr(
- "mode",
- enum_values=ArgValidator_DictMacvlan.VALID_MODES,
- default_value="bridge",
- ),
- ArgValidatorBool("promiscuous", default_value=True),
- ArgValidatorBool("tap", default_value=False),
- ],
- default_value=ArgValidator.MISSING,
- )
-
- def get_default_macvlan(self):
- return {"mode": "bridge", "promiscuous": True, "tap": False}
-
- def _validate_post(self, value, name, result):
- if result["promiscuous"] is False and result["mode"] != "passthru":
- raise ValidationError(
- name, "non promiscuous operation is allowed only in passthru mode"
- )
- return result
-
-
-class ArgValidatorPath(ArgValidatorStr):
- """
- Valides that the value is a valid posix absolute path
- """
-
- def __init__(self, name, required=False, default_value=None):
- ArgValidatorStr.__init__(self, name, required, default_value, None)
-
- def _validate_impl(self, value, name):
- ArgValidatorStr._validate_impl(self, value, name)
-
- if posixpath.isabs(value) is False:
- raise ValidationError(
- name,
- "value '%s' is not a valid posix absolute path" % (value),
- )
- return value
-
-
-class ArgValidator_Dict802_1X(ArgValidatorDict):
-
- VALID_EAP_TYPES = ["tls"]
-
- VALID_PRIVATE_KEY_FLAGS = ["none", "agent-owned", "not-saved", "not-required"]
-
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="ieee802_1x",
- nested=[
- ArgValidatorStr(
- "eap",
- enum_values=ArgValidator_Dict802_1X.VALID_EAP_TYPES,
- default_value="tls",
- ),
- ArgValidatorStr("identity", required=True),
- ArgValidatorPath("private_key", required=True),
- ArgValidatorStr("private_key_password"),
- ArgValidatorList(
- "private_key_password_flags",
- nested=ArgValidatorStr(
- "private_key_password_flags[?]",
- enum_values=ArgValidator_Dict802_1X.VALID_PRIVATE_KEY_FLAGS,
- ),
- default_value=None,
- ),
- ArgValidatorPath("client_cert", required=True),
- ArgValidatorPath("ca_cert"),
- ArgValidatorPath("ca_path"),
- ArgValidatorBool("system_ca_certs", default_value=False),
- ArgValidatorStr("domain_suffix_match", required=False),
- ],
- default_value=None,
- )
-
- def _validate_post(self, value, name, result):
- if result["system_ca_certs"] is True and result["ca_path"] is not None:
- raise ValidationError(
- name,
- "ca_path will be ignored by NetworkManager if system_ca_certs is used",
- )
- return result
-
-
-class ArgValidator_DictWireless(ArgValidatorDict):
-
- VALID_KEY_MGMT = [
- "wpa-psk",
- "wpa-eap",
- ]
-
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="wireless",
- nested=[
- ArgValidatorStr("ssid", max_length=32),
- ArgValidatorStr(
- "key_mgmt", enum_values=ArgValidator_DictWireless.VALID_KEY_MGMT
- ),
- ArgValidatorStr("password", default_value=None, max_length=63),
- ],
- default_value=None,
- )
-
- def _validate_post(self, value, name, result):
- if result["key_mgmt"] == "wpa-psk":
- if result["password"] is None:
- raise ValidationError(
- name,
- "must supply a password if using 'wpa-psk' key management",
- )
- else:
- if result["password"] is not None:
- raise ValidationError(
- name,
- "password only allowed if using 'wpa-psk' key management",
- )
-
- return result
-
-
-class ArgValidator_DictConnection(ArgValidatorDict):
-
- VALID_PERSISTENT_STATES = ["absent", "present"]
- VALID_STATES = VALID_PERSISTENT_STATES + ["up", "down"]
- VALID_TYPES = [
- "ethernet",
- "infiniband",
- "bridge",
- "team",
- "bond",
- "vlan",
- "macvlan",
- "wireless",
- "dummy",
- ]
- VALID_PORT_TYPES = ["bridge", "bond", "team"]
-
- def __init__(self):
- ArgValidatorDict.__init__(
- self,
- name="connection",
- nested=[
- ArgValidatorStr("name"),
- ArgValidatorStr(
- "state", enum_values=ArgValidator_DictConnection.VALID_STATES
- ),
- ArgValidatorStr(
- "persistent_state",
- enum_values=ArgValidator_DictConnection.VALID_PERSISTENT_STATES,
- ),
- ArgValidatorBool("force_state_change", default_value=None),
- ArgValidatorNum(
- "wait",
- val_min=0,
- val_max=3600,
- numeric_type=float,
- default_value=None,
- ),
- ArgValidatorStr(
- "type", enum_values=ArgValidator_DictConnection.VALID_TYPES
- ),
- ArgValidatorBool("autoconnect", default_value=True),
- ArgValidatorStr(
- "port_type",
- enum_values=ArgValidator_DictConnection.VALID_PORT_TYPES,
- ),
- ArgValidatorDeprecated(
- "slave_type",
- deprecated_by="port_type",
- ),
- ArgValidatorStr("controller"),
- ArgValidatorDeprecated("master", deprecated_by="controller"),
- ArgValidatorStr("interface_name", allow_empty=True),
- ArgValidatorMac("mac"),
- ArgValidatorNum(
- "mtu", val_min=0, val_max=0xFFFFFFFF, default_value=None
- ),
- ArgValidatorStr("zone"),
- ArgValidatorBool("check_iface_exists", default_value=True),
- ArgValidatorStr("parent"),
- ArgValidatorBool("ignore_errors", default_value=None),
- ArgValidator_DictIP(),
- ArgValidator_DictEthernet(),
- ArgValidator_DictEthtool(),
- ArgValidator_DictBond(),
- ArgValidator_DictInfiniband(),
- ArgValidator_DictVlan(),
- ArgValidator_DictMacvlan(),
- ArgValidator_Dict802_1X(),
- ArgValidator_DictWireless(),
- # deprecated options:
- ArgValidatorStr(
- "infiniband_transport_mode",
- enum_values=["datagram", "connected"],
- default_value=ArgValidator.MISSING,
- ),
- ArgValidatorNum(
- "infiniband_p_key",
- val_min=-1,
- val_max=0xFFFF,
- default_value=ArgValidator.MISSING,
- ),
- ArgValidatorNum(
- "vlan_id",
- val_min=0,
- val_max=4094,
- default_value=ArgValidator.MISSING,
- ),
- ],
- default_value=dict,
- all_missing_during_validate=True,
- )
-
- # valid field based on specified state, used to set defaults and reject
- # bad values
- self.VALID_FIELDS = []
-
- def _validate_post_state(self, value, name, result):
- """
- Validate state definitions and create a corresponding list of actions.
- """
- actions = []
- state = result.get("state")
- persistent_state = result.get("persistent_state")
-
- if state in self.VALID_PERSISTENT_STATES:
- if persistent_state:
- raise ValidationError(
- name,
- "State cannot be '{0}' if persistent_state is specified".format(
- state
- ),
- )
- persistent_state = state
- state = None
-
- # default persistent_state to present (not done via default_value in the
- # ArgValidatorStr, the value will only be set at the end of
- # _validate_post()
- if not persistent_state:
- persistent_state = "present"
-
- # If the profile should be absent at the end, it needs to be present in
- # the meantime to allow to (de)activate it. This is only possible if it
- # is completely defined, for which `type` needs to be specified.
- # Otherwise, downing is happening on a best-effort basis
- if persistent_state == "absent" and state and result.get("type"):
- actions.append("present")
-
- actions.append(persistent_state)
-
- # Change the runtime state if necessary
- if state:
- actions.append(state)
-
- result["state"] = state
- result["persistent_state"] = persistent_state
- result["actions"] = actions
-
- return result
-
- def _validate_post_fields(self, value, name, result):
- """
- Validate the allowed fields (settings depending on the requested state).
- FIXME: Maybe it should check whether "up"/"down" is present in the
- actions instead of checking the runtime state from "state" to switch
- from state to actions after the state parsing is done.
- """
- state = result.get("state")
- persistent_state = result.get("persistent_state")
-
- # minimal settings not related to runtime changes
- valid_fields = ["actions", "ignore_errors", "name", "persistent_state", "state"]
-
- # when type is present, a profile is completely specified (using
- # defaults or other settings)
- if "type" in result:
- valid_fields += list(self.nested.keys())
-
- # If there are no runtime changes, "wait" and "force_state_change" do
- # not make sense
- # FIXME: Maybe this restriction can be removed. Need to make sure that
- # defaults for wait or force_state_change do not interfer
- if not state:
- while "wait" in valid_fields:
- valid_fields.remove("wait")
- while "force_state_change" in valid_fields:
- valid_fields.remove("force_state_change")
- else:
- valid_fields += ["force_state_change", "wait"]
-
- # FIXME: Maybe just accept all values, even if they are not
- # needed/meaningful in the respective context
- valid_fields = set(valid_fields)
- for k in result:
- if k not in valid_fields:
- raise ValidationError(
- name + "." + k,
- "property is not allowed for state '%s' and persistent_state '%s'"
- % (state, persistent_state),
- )
-
- if "name" not in result:
- if persistent_state == "absent":
- result["name"] = "" # set to empty string to mean *absent all others*
- else:
- raise ValidationError(name, "missing 'name'")
-
- # FIXME: Seems to be a duplicate check since "wait" will be removed from
- # valid_keys when state is considered to be not True
- if "wait" in result and not state:
- raise ValidationError(
- name + ".wait",
- "'wait' is not allowed for state '%s'" % (result["state"]),
- )
-
- result["state"] = state
- result["persistent_state"] = persistent_state
-
- self.VALID_FIELDS = valid_fields
- return result
-
- def _validate_post_wireless(self, value, name, result):
- """
- Validate wireless settings
- """
- if "type" in result:
- if result["type"] == "wireless":
- if "wireless" in result:
- if (
- result["wireless"]["key_mgmt"] == "wpa-eap"
- and "ieee802_1x" not in result
- ):
- raise ValidationError(
- name + ".wireless",
- "key management set to wpa-eap but no "
- "'ieee802_1x' settings defined",
- )
- else:
- raise ValidationError(
- name + ".wireless",
- "must define 'wireless' settings for 'type' 'wireless'",
- )
-
- else:
- if "wireless" in result:
- raise ValidationError(
- name + ".wireless",
- "'wireless' settings are not allowed for 'type' '%s'"
- % (result["type"]),
- )
-
- return result
-
- def _validate_post(self, value, name, result):
- result = self._validate_post_state(value, name, result)
- result = self._validate_post_fields(value, name, result)
- result = self._validate_post_wireless(value, name, result)
-
- if "type" in result:
-
- if "controller" in result:
- if "port_type" not in result:
- result["port_type"] = None
- if result["controller"] == result["name"]:
- raise ValidationError(
- name + ".controller", '"controller" cannot refer to itself'
- )
- else:
- if "port_type" in result:
- raise ValidationError(
- name + ".port_type",
- "'port_type' requires a 'controller' property",
- )
-
- if "ip" in result:
- if "controller" in result:
- raise ValidationError(
- name + ".ip", 'a port cannot have an "ip" property'
- )
- else:
- if "controller" not in result:
- result["ip"] = self.nested["ip"].get_default_value()
-
- if "zone" in result:
- if "controller" in result:
- raise ValidationError(
- name + ".zone", '"zone" cannot be configured for port types'
- )
- else:
- result["zone"] = None
-
- if "mac" in result:
- if result["type"] not in ["ethernet", "infiniband"]:
- raise ValidationError(
- name + ".mac",
- "a 'mac' address is only allowed for type 'ethernet' "
- "or 'infiniband'",
- )
- maclen = len(Util.mac_aton(result["mac"]))
- if result["type"] == "ethernet" and maclen != 6:
- raise ValidationError(
- name + ".mac",
- "a 'mac' address for type ethernet requires 6 octets "
- "but is '%s'" % result["mac"],
- )
- if result["type"] == "infiniband" and maclen != 20:
- raise ValidationError(
- name + ".mac",
- "a 'mac' address for type ethernet requires 20 octets "
- "but is '%s'" % result["mac"],
- )
-
- if result["type"] == "infiniband":
- if "infiniband" not in result:
- result["infiniband"] = self.nested[
- "infiniband"
- ].get_default_infiniband()
- if "infiniband_transport_mode" in result:
- result["infiniband"]["transport_mode"] = result[
- "infiniband_transport_mode"
- ]
- del result["infiniband_transport_mode"]
- if "infiniband_p_key" in result:
- result["infiniband"]["p_key"] = result["infiniband_p_key"]
- del result["infiniband_p_key"]
- else:
- if "infiniband_transport_mode" in result:
- raise ValidationError(
- name + ".infiniband_transport_mode",
- "cannot mix deprecated 'infiniband_transport_mode' "
- "property with 'infiniband' settings",
- )
- if "infiniband_p_key" in result:
- raise ValidationError(
- name + ".infiniband_p_key",
- "cannot mix deprecated 'infiniband_p_key' property "
- "with 'infiniband' settings",
- )
- if result["infiniband"]["transport_mode"] is None:
- result["infiniband"]["transport_mode"] = "datagram"
- if result["infiniband"]["p_key"] != -1:
- if "mac" not in result and "parent" not in result:
- raise ValidationError(
- name + ".infiniband.p_key",
- "a infiniband device with 'infiniband.p_key' "
- "property also needs 'mac' or 'parent' property",
- )
- else:
- if "infiniband" in result:
- raise ValidationError(
- name + ".infiniband",
- "'infiniband' settings are only allowed for type 'infiniband'",
- )
- if "infiniband_transport_mode" in result:
- raise ValidationError(
- name + ".infiniband_transport_mode",
- "a 'infiniband_transport_mode' property is only "
- "allowed for type 'infiniband'",
- )
- if "infiniband_p_key" in result:
- raise ValidationError(
- name + ".infiniband_p_key",
- "a 'infiniband_p_key' property is only allowed for "
- "type 'infiniband'",
- )
-
- if "interface_name" in result:
- # Ignore empty interface_name
- if result["interface_name"] == "":
- del result["interface_name"]
- elif not Util.ifname_valid(result["interface_name"]):
- raise ValidationError(
- name + ".interface_name",
- "invalid 'interface_name' '%s'" % (result["interface_name"]),
- )
- else:
- if not result.get("mac"):
- if not Util.ifname_valid(result["name"]):
- raise ValidationError(
- name + ".interface_name",
- "'interface_name' as 'name' '%s' is not valid"
- % (result["name"]),
- )
- result["interface_name"] = result["name"]
-
- if "interface_name" not in result and result["type"] in [
- "bond",
- "bridge",
- "macvlan",
- "team",
- "vlan",
- ]:
- raise ValidationError(
- name + ".interface_name",
- "type '%s' requires 'interface_name'" % (result["type"]),
- )
-
- if result["type"] == "vlan":
- if "vlan" not in result:
- if "vlan_id" not in result:
- raise ValidationError(
- name + ".vlan", 'missing "vlan" settings for "type" "vlan"'
- )
- result["vlan"] = self.nested["vlan"].get_default_vlan()
- result["vlan"]["id"] = result["vlan_id"]
- del result["vlan_id"]
- else:
- if "vlan_id" in result:
- raise ValidationError(
- name + ".vlan_id",
- "don't use the deprecated 'vlan_id' together with the "
- "'vlan' settings'",
- )
- if "parent" not in result:
- raise ValidationError(
- name + ".parent", 'missing "parent" for "type" "vlan"'
- )
- else:
- if "vlan" in result:
- raise ValidationError(
- name + ".vlan", '"vlan" is only allowed for "type" "vlan"'
- )
- if "vlan_id" in result:
- raise ValidationError(
- name + ".vlan_id", '"vlan_id" is only allowed for "type" "vlan"'
- )
-
- if "parent" in result:
- if result["type"] not in ["vlan", "macvlan", "infiniband"]:
- raise ValidationError(
- name + ".parent",
- "'parent' is only allowed for type 'vlan', 'macvlan' or "
- "'infiniband'",
- )
- if result["parent"] == result["name"]:
- raise ValidationError(
- name + ".parent", '"parent" cannot refer to itself'
- )
-
- if result["type"] == "bond":
- if "bond" not in result:
- result["bond"] = self.nested["bond"].get_default_bond()
- else:
- if "bond" in result:
- raise ValidationError(
- name + ".bond",
- "'bond' settings are not allowed for 'type' '%s'"
- % (result["type"]),
- )
-
- if result["type"] in ["ethernet", "vlan", "bridge", "bond", "team"]:
- if "ethernet" not in result:
- result["ethernet"] = self.nested["ethernet"].get_default_ethernet()
- else:
- if "ethernet" in result:
- raise ValidationError(
- name + ".ethernet",
- "'ethernet' settings are not allowed for 'type' '%s'"
- % (result["type"]),
- )
-
- if result["type"] == "macvlan":
- if "macvlan" not in result:
- result["macvlan"] = self.nested["macvlan"].get_default_macvlan()
- else:
- if "macvlan" in result:
- raise ValidationError(
- name + ".macvlan",
- "'macvlan' settings are not allowed for 'type' '%s'"
- % (result["type"]),
- )
-
- if "ieee802_1x" in result and result["type"] not in [
- "ethernet",
- "wireless",
- ]:
- raise ValidationError(
- name + ".ieee802_1x",
- "802.1x settings only allowed for ethernet or wireless interfaces.",
- )
-
- for name in self.VALID_FIELDS:
- if name in result:
- continue
- validator = self.nested[name]
- if not isinstance(validator, ArgValidatorDeprecated):
- value = validator.get_default_value()
- if value is not ArgValidator.MISSING:
- result[name] = value
-
- return result
-
-
-class ArgValidator_ListConnections(ArgValidatorList):
- def __init__(self):
- ArgValidatorList.__init__(
- self,
- name="connections",
- nested=ArgValidator_DictConnection(),
- default_value=list,
- )
-
- def _validate_post(self, value, name, result):
- for idx, connection in enumerate(result):
- if "type" in connection:
- if connection["controller"]:
- c = ArgUtil.connection_find_by_name(
- connection["controller"], result, idx
- )
- if not c:
- raise ValidationError(
- name + "[" + str(idx) + "].controller",
- "references non-existing 'controller' connection '%s'"
- % (connection["controller"]),
- )
- if c["type"] not in ArgValidator_DictConnection.VALID_PORT_TYPES:
- raise ValidationError(
- name + "[" + str(idx) + "].controller",
- "references 'controller' connection '%s' which is "
- "not a controller "
- "type by '%s'" % (connection["controller"], c["type"]),
- )
- if connection["port_type"] is None:
- connection["port_type"] = c["type"]
- elif connection["port_type"] != c["type"]:
- raise ValidationError(
- name + "[" + str(idx) + "].controller",
- "references 'controller' connection '%s' which is "
- "of type '%s' instead of port_type '%s'"
- % (
- connection["controller"],
- c["type"],
- connection["port_type"],
- ),
- )
- if connection["parent"]:
- if not ArgUtil.connection_find_by_name(
- connection["parent"], result, idx
- ):
- raise ValidationError(
- name + "[" + str(idx) + "].parent",
- "references non-existing 'parent' connection '%s'"
- % (connection["parent"]),
- )
- return result
-
- VALIDATE_ONE_MODE_NM = "nm"
- VALIDATE_ONE_MODE_INITSCRIPTS = "initscripts"
-
- def validate_connection_one(self, mode, connections, idx):
- connection = connections[idx]
- if "type" not in connection:
- return
-
- if (connection["parent"]) and (
- (
- (mode == self.VALIDATE_ONE_MODE_INITSCRIPTS)
- and (connection["type"] == "vlan")
- )
- or (
- (connection["type"] == "infiniband")
- and (connection["infiniband"]["p_key"] != -1)
- )
- ):
- try:
- ArgUtil.connection_find_controller(
- connection["parent"], connections, idx
- )
- except MyError:
- raise ValidationError.from_connection(
- idx,
- "profile references a parent '%s' which has 'interface_name' "
- "missing" % (connection["parent"]),
- )
-
- if (connection["controller"]) and (mode == self.VALIDATE_ONE_MODE_INITSCRIPTS):
- try:
- ArgUtil.connection_find_controller(
- connection["controller"], connections, idx
- )
- except MyError:
- raise ValidationError.from_connection(
- idx,
- "profile references a controller '%s' which has 'interface_name' "
- "missing" % (connection["controller"]),
- )
-
- # check if 802.1x connection is valid
- if connection["ieee802_1x"]:
- if mode == self.VALIDATE_ONE_MODE_INITSCRIPTS:
- raise ValidationError.from_connection(
- idx,
- "802.1x authentication is not supported by initscripts. "
- "Configure 802.1x in /etc/wpa_supplicant.conf "
- "if you need to use initscripts.",
- )
-
- # check if wireless connection is valid
- if connection["type"] == "wireless":
- if mode == self.VALIDATE_ONE_MODE_INITSCRIPTS:
- raise ValidationError.from_connection(
- idx,
- "Wireless WPA auth is not supported by initscripts. "
- "Configure wireless connection in /etc/wpa_supplicant.conf "
- "if you need to use initscripts.",
- )
-
- # initscripts does not support ip.dns_options, so raise errors when network
- # provider is initscripts
- if connection["ip"]["dns_options"]:
- if mode == self.VALIDATE_ONE_MODE_INITSCRIPTS:
- raise ValidationError.from_connection(
- idx,
- "ip.dns_options is not supported by initscripts.",
- )
- # initscripts does not support ip.ipv6_disabled, so raise errors when network
- # provider is initscripts
- if connection["ip"]["ipv6_disabled"]:
- if mode == self.VALIDATE_ONE_MODE_INITSCRIPTS:
- raise ValidationError.from_connection(
- idx,
- "ip.ipv6_disabled is not supported by initscripts.",
- )
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/ethtool.py b/roles/linux-system-roles.network/module_utils/network_lsr/ethtool.py
deleted file mode 100644
index 21e2152..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/ethtool.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-
-import array
-import struct
-import fcntl
-import socket
-
-from .utils import Util
-
-ETHTOOL_GPERMADDR = 0x00000020
-SIOCETHTOOL = 0x8946
-MAX_ADDR_LEN = 32
-IFNAMESIZ = 16
-
-
-def get_perm_addr(ifname):
- """
- Return the Permanent address value for the specified interface using the
- ETHTOOL_GPERMADDR ioctl command.
-
- Please for further documentation, see:
- https://github.com/torvalds/linux/blob/master/include/uapi/linux/ethtool.h#L734
- https://github.com/torvalds/linux/blob/master/include/uapi/linux/ethtool.h#L1388
- https://git.kernel.org/pub/scm/network/ethtool/ethtool.git/tree/ethtool.c#n4172
- """
- try:
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sockfd = sock.fileno()
- ifname = ifname.encode("utf-8")
- if len(ifname) > IFNAMESIZ:
- return None
-
- ecmd = array.array(
- "B",
- struct.pack(
- "II%is" % MAX_ADDR_LEN,
- ETHTOOL_GPERMADDR,
- MAX_ADDR_LEN,
- b"\x00" * MAX_ADDR_LEN,
- ),
- )
- ifreq = struct.pack("%isP" % IFNAMESIZ, ifname, ecmd.buffer_info()[0])
-
- fcntl.ioctl(sockfd, SIOCETHTOOL, ifreq)
- try:
- res = ecmd.tobytes()
- except AttributeError: # tobytes() is not available in python2
- res = ecmd.tostring()
- _, size, perm_addr = struct.unpack("II%is" % MAX_ADDR_LEN, res)
- perm_addr = Util.mac_ntoa(perm_addr[:size])
- except IOError:
- perm_addr = None
- finally:
- sock.close()
-
- return perm_addr
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/nm/__init__.py b/roles/linux-system-roles.network/module_utils/network_lsr/nm/__init__.py
deleted file mode 100644
index 58fbb5a..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/nm/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Relative import is not support by ansible 2.8 yet
-# pylint: disable=import-error, no-name-in-module
-from ansible.module_utils.network_lsr.nm import provider # noqa:E501
-
-# pylint: enable=import-error, no-name-in-module
-
-provider.NetworkManagerProvider
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/nm/active_connection.py b/roles/linux-system-roles.network/module_utils/network_lsr/nm/active_connection.py
deleted file mode 100644
index a6c5a37..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/nm/active_connection.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-
-# Handle NM.ActiveConnection
-
-import logging
-
-# Relative import is not support by ansible 2.8 yet
-# pylint: disable=import-error, no-name-in-module
-from ansible.module_utils.network_lsr.nm import client # noqa:E501
-from ansible.module_utils.network_lsr.nm import error # noqa:E501
-
-# pylint: enable=import-error, no-name-in-module
-
-
-NM_AC_STATE_CHANGED_SIGNAL = "state-changed"
-
-
-def deactivate_active_connection(nm_ac, timeout, check_mode):
- if not nm_ac or nm_ac.props.state == client.NM.ActiveConnectionState.DEACTIVATED:
- logging.info("Connection is not active, no need to deactivate")
- return False
- if not check_mode:
- main_loop = client.get_mainloop(timeout)
- logging.debug(
- "Deactivating {id} with timeout {timeout}".format(
- id=nm_ac.get_id(), timeout=timeout
- )
- )
- user_data = main_loop
- handler_id = nm_ac.connect(
- NM_AC_STATE_CHANGED_SIGNAL, _nm_ac_state_change_callback, user_data
- )
- logging.debug(
- "Registered {signal} on client.NM.ActiveConnection {id}".format(
- signal=NM_AC_STATE_CHANGED_SIGNAL, id=nm_ac.get_id()
- )
- )
- if nm_ac.props.state != client.NM.ActiveConnectionState.DEACTIVATING:
- nm_client = client.get_client()
- user_data = (main_loop, nm_ac, nm_ac.get_id(), handler_id)
- nm_client.deactivate_connection_async(
- nm_ac,
- main_loop.cancellable,
- _nm_ac_deactivate_call_back,
- user_data,
- )
- logging.debug(
- "Deactivating client.NM.ActiveConnection {0}".format(nm_ac.get_id())
- )
- main_loop.run()
- return True
-
-
-def _nm_ac_state_change_callback(nm_ac, state, reason, user_data):
- main_loop = user_data
- if main_loop.is_cancelled:
- return
- logging.debug(
- "Got client.NM.ActiveConnection state change: {id}: {state} {reason}".format(
- id=nm_ac.get_id(), state=state, reason=reason
- )
- )
- if nm_ac.props.state == client.NM.ActiveConnectionState.DEACTIVATED:
- logging.debug(
- "client.NM.ActiveConnection {0} is deactivated".format(nm_ac.get_id())
- )
- main_loop.quit()
-
-
-def _nm_ac_deactivate_call_back(nm_client, result, user_data):
- main_loop, nm_ac, nm_ac_id, handler_id = user_data
- logging.debug("client.NM.ActiveConnection deactivating callback")
- if main_loop.is_cancelled:
- if nm_ac:
- nm_ac.handler_disconnect(handler_id)
- return
-
- try:
- success = nm_client.deactivate_connection_finish(result)
- except client.GLib.Error as e:
- if e.matches(
- client.NM.ManagerError.quark(), client.NM.ManagerError.CONNECTIONNOTACTIVE
- ):
- logging.info(
- "Connection is not active on {0}, no need to deactivate".format(
- nm_ac_id
- )
- )
- if nm_ac:
- nm_ac.handler_disconnect(handler_id)
- main_loop.quit()
- return
- else:
- _deactivate_fail(
- main_loop,
- handler_id,
- nm_ac,
- "Failed to deactivate connection {id}, error={error}".format(
- id=nm_ac_id, error=e
- ),
- )
- return
- except Exception as e:
- _deactivate_fail(
- main_loop,
- handler_id,
- nm_ac,
- "Failed to deactivate connection {id}, error={error}".format(
- id=nm_ac_id, error=e
- ),
- )
- return
-
- if not success:
- _deactivate_fail(
- main_loop,
- handler_id,
- nm_ac,
- "Failed to deactivate connection {0}, error='None "
- "returned from deactivate_connection_finish()'".format(nm_ac_id),
- )
-
-
-def _deactivate_fail(main_loop, handler_id, nm_ac, msg):
- if nm_ac:
- nm_ac.handler_disconnect(handler_id)
- logging.error(msg)
- main_loop.fail(error.LsrNetworkNmError(msg))
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/nm/client.py b/roles/linux-system-roles.network/module_utils/network_lsr/nm/client.py
deleted file mode 100644
index 4992887..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/nm/client.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-
-import logging
-
-# Relative import is not support by ansible 2.8 yet
-# pylint: disable=import-error, no-name-in-module
-from ansible.module_utils.network_lsr.nm import error # noqa:E501
-
-import gi
-
-try:
- gi.require_version("NM", "1.0")
-
- # It is required to state the NM version before importing it
- # But this break the flake8 rule: https://www.flake8rules.com/rules/E402.html
- # Use NOQA: E402 to suppress it.
- from gi.repository import NM # NOQA: E402
- from gi.repository import GLib # NOQA: E402
- from gi.repository import Gio # NOQA: E402
-
- # pylint: enable=import-error, no-name-in-module
-
- NM
- GLib
- Gio
-except ValueError:
- # This is to workaround a bug in ansible 2.9 which causes
- # this code to be executed on the control node, where NM
- # is not guaranteed to exist. On the other hand, it is
- # ensured on the managed nodes as NM package is installed
- # in the network role. Therefore, this exception handling
- # does not affect the network installation and configuration
- # on the managed nodes.
- pass
-
-
-def get_client():
- return NM.Client.new()
-
-
-class _NmMainLoop(object):
- def __init__(self, timeout):
- self._mainloop = GLib.MainLoop()
- self._cancellable = Gio.Cancellable.new()
- self._timeout = timeout
- self._timeout_id = None
-
- def run(self):
- logging.debug("NM mainloop running")
- user_data = None
- self._timeout_id = GLib.timeout_add(
- int(self._timeout * 1000),
- self._timeout_call_back,
- user_data,
- )
- logging.debug("Added timeout checker")
- self._mainloop.run()
-
- def _timeout_call_back(self, _user_data):
- logging.error("Timeout")
- self.fail(error.LsrNetworkNmError("Timeout"))
-
- @property
- def cancellable(self):
- return self._cancellable
-
- @property
- def is_cancelled(self):
- if self._cancellable:
- return self._cancellable.is_cancelled()
- return True
-
- def _clean_up(self):
- logging.debug("NM mainloop cleaning up")
- if self._timeout_id:
- logging.debug("Removing timeout checker")
- GLib.source_remove(self._timeout_id)
- self._timeout_id = None
- if self._cancellable:
- logging.debug("Canceling all pending tasks")
- self._cancellable.cancel()
- self._cancellable = None
- self._mainloop = None
-
- def quit(self):
- logging.debug("NM mainloop quiting")
- self._mainloop.quit()
- self._clean_up()
-
- def fail(self, exception):
- self.quit()
- raise exception
-
-
-def get_mainloop(timeout):
- return _NmMainLoop(timeout)
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/nm/connection.py b/roles/linux-system-roles.network/module_utils/network_lsr/nm/connection.py
deleted file mode 100644
index 6982034..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/nm/connection.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-
-# Handle NM.RemoteConnection
-
-import logging
-
-# Relative import is not support by ansible 2.8 yet
-# pylint: disable=import-error, no-name-in-module
-from ansible.module_utils.network_lsr.nm import client # noqa:E501
-from ansible.module_utils.network_lsr.nm import error # noqa:E501
-
-# pylint: enable=import-error, no-name-in-module
-
-
-def delete_remote_connection(nm_profile, timeout, check_mode):
- if not nm_profile:
- logging.info("NULL NM.RemoteConnection, no need to delete")
- return False
-
- if not check_mode:
- main_loop = client.get_mainloop(timeout)
- user_data = main_loop
- nm_profile.delete_async(
- main_loop.cancellable,
- _nm_profile_delete_call_back,
- user_data,
- )
- logging.debug(
- "Deleting profile {id}/{uuid} with timeout {timeout}".format(
- id=nm_profile.get_id(), uuid=nm_profile.get_uuid(), timeout=timeout
- )
- )
- main_loop.run()
- return True
-
-
-def _nm_profile_delete_call_back(nm_profile, result, user_data):
- main_loop = user_data
- if main_loop.is_cancelled:
- return
-
- try:
- success = nm_profile.delete_finish(result)
- except Exception as e:
- main_loop.fail(
- error.LsrNetworkNmError(
- "Connection deletion aborted on {id}/{uuid}: error={error}".format(
- id=nm_profile.get_id(), uuid=nm_profile.get_uuid(), error=e
- )
- )
- )
- if success:
- main_loop.quit()
- else:
- main_loop.fail(
- error.LsrNetworkNmError(
- "Connection deletion aborted on {id}/{uuid}: error=unknown".format(
- id=nm_profile.get_id(), uuid=nm_profile.get_uuid()
- )
- )
- )
-
-
-def volatilize_remote_connection(nm_profile, timeout, check_mode):
- if not nm_profile:
- logging.info("NULL NM.RemoteConnection, no need to volatilize")
- return False
- if not check_mode:
- main_loop = client.get_mainloop(timeout)
- user_data = main_loop
- nm_profile.update2(
- None, # settings
- client.NM.SettingsUpdate2Flags.IN_MEMORY_ONLY
- | client.NM.SettingsUpdate2Flags.VOLATILE,
- None, # args
- main_loop.cancellable,
- _nm_profile_volatile_update2_call_back,
- user_data,
- )
- logging.debug(
- "Volatilizing profile {id}/{uuid} with timeout {timeout}".format(
- id=nm_profile.get_id(), uuid=nm_profile.get_uuid(), timeout=timeout
- )
- )
- main_loop.run()
- return True
-
-
-def _nm_profile_volatile_update2_call_back(nm_profile, result, user_data):
- main_loop = user_data
- if main_loop.is_cancelled:
- return
-
- try:
- success = nm_profile.update2_finish(result)
- except Exception as e:
- main_loop.fail(
- error.LsrNetworkNmError(
- "Connection volatilize aborted on {id}/{uuid}: error={error}".format(
- id=nm_profile.get_id(), uuid=nm_profile.get_uuid(), error=e
- )
- )
- )
- if success:
- main_loop.quit()
- else:
- main_loop.fail(
- error.LsrNetworkNmError(
- "Connection volatilize aborted on {id}/{uuid}: error=unknown".format(
- id=nm_profile.get_id(), uuid=nm_profile.get_uuid()
- )
- )
- )
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/nm/error.py b/roles/linux-system-roles.network/module_utils/network_lsr/nm/error.py
deleted file mode 100644
index 42014ec..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/nm/error.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-
-
-class LsrNetworkNmError(Exception):
- pass
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/nm/provider.py b/roles/linux-system-roles.network/module_utils/network_lsr/nm/provider.py
deleted file mode 100644
index 52e7502..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/nm/provider.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-
-import logging
-
-# Relative import is not support by ansible 2.8 yet
-# pylint: disable=import-error, no-name-in-module
-from ansible.module_utils.network_lsr.nm import active_connection # noqa:E501
-from ansible.module_utils.network_lsr.nm import client # noqa:E501
-from ansible.module_utils.network_lsr.nm import connection # noqa:E501
-
-# pylint: enable=import-error, no-name-in-module
-
-
-class NetworkManagerProvider:
- def deactivate_connection(self, connection_name, timeout, check_mode):
- """
- Return True if changed.
- """
- nm_client = client.get_client()
- changed = False
- for nm_ac in nm_client.get_active_connections():
- nm_profile = nm_ac.get_connection()
- if nm_profile and nm_profile.get_id() == connection_name:
- changed |= active_connection.deactivate_active_connection(
- nm_ac, timeout, check_mode
- )
- if not changed:
- logging.info("No active connection for {0}".format(connection_name))
-
- return changed
-
- def volatilize_connection_by_uuid(self, uuid, timeout, check_mode):
- """
- Mark NM.RemoteConnection as volatile(delete on deactivation) via Update2,
- if not supported, delete the profile.
-
- Return True if changed.
- """
- nm_client = client.get_client()
- changed = False
- for nm_profile in nm_client.get_connections():
- if nm_profile and nm_profile.get_uuid() == uuid:
- if hasattr(nm_profile, "update2"):
- changed |= connection.volatilize_remote_connection(
- nm_profile, timeout, check_mode
- )
- else:
- changed |= connection.delete_remote_connection(
- nm_profile, timeout, check_mode
- )
- if not changed:
- logging.info("No connection with UUID {0} to volatilize".format(uuid))
-
- return changed
-
- def get_connections(self):
- nm_client = client.get_client()
- return nm_client.get_connections()
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/nm_provider.py b/roles/linux-system-roles.network/module_utils/network_lsr/nm_provider.py
deleted file mode 100644
index c75242a..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/nm_provider.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-""" Support for NetworkManager aka the NM provider """
-
-# pylint: disable=import-error, no-name-in-module
-from ansible.module_utils.network_lsr.utils import Util # noqa:E501
-
-ETHTOOL_FEATURE_PREFIX = "ETHTOOL_OPTNAME_FEATURE_"
-ETHTOOL_COALESCE_PREFIX = "ETHTOOL_OPTNAME_COALESCE_"
-
-
-def get_nm_ethtool_feature(name):
- """
- Translate ethtool feature into Network Manager name
-
- :param name: Name of the feature
- :type name: str
- :returns: Name of the feature to be used by `NM.SettingEthtool.set_feature()`
- :rtype: str
- """
-
- name = ETHTOOL_FEATURE_PREFIX + name.upper()
-
- feature = getattr(Util.NM(), name, None)
- return feature
-
-
-def get_nm_ethtool_coalesce(name):
- """
- Translate ethtool coalesce into Network Manager name
-
- :param name: Name of the coalesce
- :type name: str
- :returns: Name of the setting to be used by `NM.SettingEthtool.set_coalesce()`
- :rtype: str
- """
-
- name = ETHTOOL_COALESCE_PREFIX + name.upper()
-
- coalesce = getattr(Util.NM(), name, None)
- return coalesce
diff --git a/roles/linux-system-roles.network/module_utils/network_lsr/utils.py b/roles/linux-system-roles.network/module_utils/network_lsr/utils.py
deleted file mode 100644
index 73d9528..0000000
--- a/roles/linux-system-roles.network/module_utils/network_lsr/utils.py
+++ /dev/null
@@ -1,350 +0,0 @@
-#!/usr/bin/python3 -tt
-# SPDX-License-Identifier: BSD-3-Clause
-# vim: fileencoding=utf8
-
-import socket
-import sys
-import uuid
-
-# pylint: disable=import-error, no-name-in-module
-from ansible.module_utils.network_lsr import MyError # noqa:E501
-
-
-class Util:
-
- PY3 = sys.version_info[0] == 3
-
- STRING_TYPE = str if PY3 else basestring # noqa:F821
-
- @staticmethod
- def first(iterable, default=None, pred=None):
- for v in iterable:
- if pred is None or pred(v):
- return v
- return default
-
- @staticmethod
- def path_to_glib_bytes(path):
- """
- Converts a path to a GLib.Bytes object that can be accepted by NM
- """
- return Util.GLib().Bytes.new(("file://%s\x00" % path).encode("utf-8"))
-
- @staticmethod
- def convert_passwd_flags_nm(secret_flags):
- """
- Converts an array of "secret flags" strings
- to an integer represantion understood by NetworkManager
- """
-
- flag_int = 0
-
- if "none" in secret_flags:
- flag_int += 0
- if "agent-owned" in secret_flags:
- flag_int += 1
- if "not-saved" in secret_flags:
- flag_int += 2
- if "not-required" in secret_flags:
- flag_int += 4
-
- return flag_int
-
- @classmethod
- def create_uuid(cls):
- return str(uuid.uuid4())
-
- @classmethod
- def NM(cls):
- n = getattr(cls, "_NM", None)
- if n is None:
- # Installing pygobject in a tox virtualenv does not work out of the
- # box
- # pylint: disable=import-error
- import gi
-
- gi.require_version("NM", "1.0")
- from gi.repository import NM, GLib, Gio, GObject
-
- cls._NM = NM
- cls._GLib = GLib
- cls._Gio = Gio
- cls._GObject = GObject
- n = NM
- return n
-
- @classmethod
- def GLib(cls):
- cls.NM()
- return cls._GLib
-
- @classmethod
- def Gio(cls):
- cls.NM()
- return cls._Gio
-
- @classmethod
- def GObject(cls):
- cls.NM()
- return cls._GObject
-
- @classmethod
- def Timestamp(cls):
- return cls.GLib().get_monotonic_time()
-
- @classmethod
- def GMainLoop(cls):
- gmainloop = getattr(cls, "_GMainLoop", None)
- if gmainloop is None:
- gmainloop = cls.GLib().MainLoop()
- cls._GMainLoop = gmainloop
- return gmainloop
-
- @classmethod
- def GMainLoop_run(cls, timeout=None):
- if timeout is None:
- cls.GMainLoop().run()
- return True
-
- GLib = cls.GLib()
- timeout_reached = []
- loop = cls.GMainLoop()
-
- def _timeout_cb(unused):
- timeout_reached.append(1)
- loop.quit()
- return False
-
- timeout_id = GLib.timeout_add(int(timeout * 1000), _timeout_cb, None)
- loop.run()
- if not timeout_reached:
- GLib.source_remove(timeout_id)
- return not timeout_reached
-
- @classmethod
- def GMainLoop_iterate(cls, may_block=False):
- return cls.GMainLoop().get_context().iteration(may_block)
-
- @classmethod
- def GMainLoop_iterate_all(cls):
- c = 0
- while cls.GMainLoop_iterate():
- c += 1
- return c
-
- @classmethod
- def call_async_method(cls, object_, action, args, mainloop_timeout=10):
- """ Asynchronously call a NetworkManager method """
- cancellable = cls.create_cancellable()
- async_action = action + "_async"
- # NM does not use a uniform naming for the async methods,
- # for checkpoints it is:
- # NMClient.checkpoint_create() and NMClient.checkpoint_create_finish(),
- # but for reapply it is:
- # NMDevice.reapply_async() and NMDevice.reapply_finish()
- # NMDevice.reapply() is a synchronous version
- # Therefore check if there is a method if an `async` suffix and use the
- # one without the suffix otherwise
- if not hasattr(object_, async_action):
- async_action = action
- finish = action + "_finish"
- user_data = {}
-
- fullargs = []
- fullargs += args
- fullargs += (cancellable, cls.create_callback(finish), user_data)
-
- getattr(object_, async_action)(*fullargs)
-
- if not cls.GMainLoop_run(mainloop_timeout):
- cancellable.cancel()
- raise MyError("failure to call %s.%s(): timeout" % (object_, async_action))
-
- success = user_data.get("success", None)
- if success is not None:
- return success
-
- raise MyError(
- "failure to %s checkpoint: %s: %r"
- % (action, user_data.get("error", "unknown error"), user_data)
- )
-
- @classmethod
- def create_cancellable(cls):
- return cls.Gio().Cancellable.new()
-
- @classmethod
- def create_callback(cls, finish_method):
- """
- Create a callback that will return the result of the finish method and
- quit the GMainLoop
-
- :param finish_method str: Name of the finish method to call from the
- source object in the callback
- """
-
- def callback(source_object, res, user_data):
- success = None
- try:
- success = getattr(source_object, finish_method)(res)
- except Exception as e:
- if cls.error_is_cancelled(e):
- return
- user_data["error"] = str(e)
- user_data["success"] = success
- cls.GMainLoop().quit()
-
- return callback
-
- @classmethod
- def error_is_cancelled(cls, e):
- GLib = cls.GLib()
- if isinstance(e, GLib.GError):
- if (
- e.domain == "g-io-error-quark"
- and e.code == cls.Gio().IOErrorEnum.CANCELLED
- ):
- return True
- return False
-
- @staticmethod
- def ifname_valid(ifname):
- # see dev_valid_name() in kernel's net/core/dev.c
- if not ifname:
- return False
- if ifname in [".", ".."]:
- return False
- if len(ifname) >= 16:
- return False
- if any([c == "/" or c == ":" or c.isspace() for c in ifname]):
- return False
- # FIXME: encoding issues regarding python unicode string
- return True
-
- @staticmethod
- def mac_aton(mac_str, force_len=None):
- # we also accept None and '' for convenience.
- # - None yiels None
- # - '' yields []
- if mac_str is None:
- return mac_str
- i = 0
- b = []
- for c in mac_str:
- if i == 2:
- if c != ":":
- raise MyError("not a valid MAC address: '%s'" % (mac_str))
- i = 0
- continue
- try:
- if i == 0:
- n = int(c, 16) * 16
- i = 1
- else:
- assert i == 1
- n = n + int(c, 16)
- i = 2
- b.append(n)
- except Exception:
- raise MyError("not a valid MAC address: '%s'" % (mac_str))
- if i == 1:
- raise MyError("not a valid MAC address: '%s'" % (mac_str))
- if force_len is not None:
- if force_len != len(b):
- raise MyError(
- "not a valid MAC address of length %s: '%s'" % (force_len, mac_str)
- )
- return b
-
- @staticmethod
- def mac_ntoa(mac):
- if mac is None:
- return None
- # bytearray() is needed for python2 compatibility
- return ":".join(["%02x" % c for c in bytearray(mac)])
-
- @staticmethod
- def mac_norm(mac_str, force_len=None):
- return Util.mac_ntoa(Util.mac_aton(mac_str, force_len))
-
- @staticmethod
- def boolean(arg):
- if arg is None or isinstance(arg, bool):
- return arg
- arg0 = arg
- if isinstance(arg, Util.STRING_TYPE):
- arg = arg.lower()
-
- if arg in ["y", "yes", "on", "1", "true", 1, True]:
- return True
- if arg in ["n", "no", "off", "0", "false", 0, False]:
- return False
-
- raise MyError("value '%s' is not a boolean" % (arg0))
-
- @staticmethod
- def parse_ip(addr, family=None):
- if addr is None:
- return (None, None)
- if family is not None:
- Util.addr_family_check(family)
- a = socket.inet_pton(family, addr)
- else:
- a = None
- family = None
- try:
- a = socket.inet_pton(socket.AF_INET, addr)
- family = socket.AF_INET
- except Exception:
- a = socket.inet_pton(socket.AF_INET6, addr)
- family = socket.AF_INET6
- return (socket.inet_ntop(family, a), family)
-
- @staticmethod
- def addr_family_check(family):
- if family != socket.AF_INET and family != socket.AF_INET6:
- raise MyError("invalid address family %s" % (family))
-
- @staticmethod
- def addr_family_to_v(family):
- if family is None:
- return ""
- if family == socket.AF_INET:
- return "v4"
- if family == socket.AF_INET6:
- return "v6"
- raise MyError("invalid address family '%s'" % (family))
-
- @staticmethod
- def addr_family_default_prefix(family):
- Util.addr_family_check(family)
- if family == socket.AF_INET:
- return 24
- else:
- return 64
-
- @staticmethod
- def addr_family_valid_prefix(family, prefix):
- Util.addr_family_check(family)
- if family == socket.AF_INET:
- m = 32
- else:
- m = 128
- return prefix >= 0 and prefix <= m
-
- @staticmethod
- def parse_address(address, family=None):
- try:
- parts = address.split()
- addr_parts = parts[0].split("/")
- if len(addr_parts) != 2:
- raise MyError("expect two addr-parts: ADDR/PLEN")
- a, family = Util.parse_ip(addr_parts[0], family)
- prefix = int(addr_parts[1])
- if not Util.addr_family_valid_prefix(family, prefix):
- raise MyError("invalid prefix %s" % (prefix))
- if len(parts) > 1:
- raise MyError("too many parts")
- return {"address": a, "family": family, "prefix": prefix}
- except Exception:
- raise MyError("invalid address '%s'" % (address))
diff --git a/roles/linux-system-roles.network/molecule/default/Dockerfile.j2 b/roles/linux-system-roles.network/molecule/default/Dockerfile.j2
deleted file mode 100644
index 7d2fbe8..0000000
--- a/roles/linux-system-roles.network/molecule/default/Dockerfile.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Molecule managed
-
-{% if item.registry is defined %}
-FROM {{ item.registry.url }}/{{ item.image }}
-{% else %}
-FROM {{ item.image }}
-{% endif %}
-
-RUN set -euo pipefail; \
- pkgs="python sudo yum-plugin-ovl bash"; \
- if grep 'CentOS release 6' /etc/centos-release > /dev/null 2>&1; then \
- for file in /etc/yum.repos.d/CentOS-*.repo; do \
- if ! grep '^baseurl=.*vault[.]centos[.]org' "$file"; then \
- sed -i -e 's,^mirrorlist,#mirrorlist,' \
- -e 's,^#baseurl=,baseurl=,' \
- -e 's,mirror.centos.org/centos/$releasever,vault.centos.org/6.10,' \
- "$file"; \
- fi; \
- done; \
- pkgs="$pkgs upstart chkconfig initscripts"; \
- fi; \
- if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \
- elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python3 sudo python3-devel python3-dnf bash && dnf clean all; \
- elif [ $(command -v yum) ]; then yum makecache fast && yum install -y $pkgs && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
- elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \
- elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \
- elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi
diff --git a/roles/linux-system-roles.network/molecule/default/molecule.yml b/roles/linux-system-roles.network/molecule/default/molecule.yml
deleted file mode 100644
index 91fc962..0000000
--- a/roles/linux-system-roles.network/molecule/default/molecule.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-# SPDX-License-Identifier: MIT
----
-dependency:
- name: galaxy
-driver:
- name: ${LSR_MOLECULE_DRIVER:-docker}
-platforms:
- - name: centos-6
- image: registry.centos.org/centos:6
- volumes:
- - /sys/fs/cgroup:/sys/fs/cgroup:ro
- privileged: true
- command: /sbin/init
- - name: centos-7
- image: registry.centos.org/centos/systemd:latest
- volumes:
- - /sys/fs/cgroup:/sys/fs/cgroup:ro
- privileged: true
- command: /usr/lib/systemd/systemd --system
- - name: centos-8
- image: registry.centos.org/centos:8
- volumes:
- - /sys/fs/cgroup:/sys/fs/cgroup:ro
- privileged: true
- command: /usr/lib/systemd/systemd --system
-provisioner:
- name: ansible
- log: true
- playbooks:
- converge: ../../tests/tests_default.yml
-scenario:
- name: default
- test_sequence:
- - destroy
- - create
- - converge
- - idempotence
- - check
- - destroy
diff --git a/roles/linux-system-roles.network/molecule_extra_requirements.txt b/roles/linux-system-roles.network/molecule_extra_requirements.txt
deleted file mode 100644
index 5ff4857..0000000
--- a/roles/linux-system-roles.network/molecule_extra_requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Write extra requirements for running molecule here:
diff --git a/roles/linux-system-roles.network/pylint_extra_requirements.txt b/roles/linux-system-roles.network/pylint_extra_requirements.txt
deleted file mode 100644
index 796e4d0..0000000
--- a/roles/linux-system-roles.network/pylint_extra_requirements.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Write extra requirements for running pylint here:
-mock
-pytest
diff --git a/roles/linux-system-roles.network/pylintrc b/roles/linux-system-roles.network/pylintrc
deleted file mode 100644
index 3cd3739..0000000
--- a/roles/linux-system-roles.network/pylintrc
+++ /dev/null
@@ -1,546 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# This file was generated using `pylint --generate-rcfile > pylintrc` command.
-[MASTER]
-
-# A comma-separated list of package or module names from where C extensions may
-# be loaded. Extensions are loading into the active Python interpreter and may
-# run arbitrary code
-extension-pkg-whitelist=
-
-# Add files or directories to the blacklist. They should be base names, not
-# paths.
-ignore=.git,.tox
-
-# Add files or directories matching the regex patterns to the blacklist. The
-# regex matches against base names, not paths.
-ignore-patterns=
-
-# Python code to execute, usually for sys.path manipulation such as
-# pygtk.require().
-#init-hook=
-
-# Use multiple processes to speed up Pylint.
-jobs=1
-
-# List of plugins (as comma separated values of python modules names) to load,
-# usually to register additional checkers.
-load-plugins=
-
-# Pickle collected data for later comparisons.
-persistent=yes
-
-# Specify a configuration file.
-#rcfile=
-
-# When enabled, pylint would attempt to guess common misconfiguration and emit
-# user-friendly hints instead of false-positive error messages
-suggestion-mode=yes
-
-# Allow loading of arbitrary C extensions. Extensions are imported into the
-# active Python interpreter and may run arbitrary code.
-unsafe-load-any-extension=no
-
-
-[MESSAGES CONTROL]
-
-# Only show warnings with the listed confidence levels. Leave empty to show
-# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
-confidence=
-
-# Disable the message, report, category or checker with the given id(s). You
-# can either give multiple identifiers separated by comma (,) or put this
-# option multiple times (only on the command line, not in the configuration
-# file where it should appear only once).You can also use "--disable=all" to
-# disable everything first and then reenable specific checks. For example, if
-# you want to run only the similarities checker, you can use "--disable=all
-# --enable=similarities". If you want to run only the classes checker, but have
-# no Warning level messages displayed, use"--disable=all --enable=classes
-# --disable=W"
-disable=wrong-import-position
-#disable=print-statement,
-# parameter-unpacking,
-# unpacking-in-except,
-# old-raise-syntax,
-# backtick,
-# long-suffix,
-# old-ne-operator,
-# old-octal-literal,
-# import-star-module-level,
-# non-ascii-bytes-literal,
-# raw-checker-failed,
-# bad-inline-option,
-# locally-disabled,
-# locally-enabled,
-# file-ignored,
-# suppressed-message,
-# useless-suppression,
-# deprecated-pragma,
-# apply-builtin,
-# basestring-builtin,
-# buffer-builtin,
-# cmp-builtin,
-# coerce-builtin,
-# execfile-builtin,
-# file-builtin,
-# long-builtin,
-# raw_input-builtin,
-# reduce-builtin,
-# standarderror-builtin,
-# unicode-builtin,
-# xrange-builtin,
-# coerce-method,
-# delslice-method,
-# getslice-method,
-# setslice-method,
-# no-absolute-import,
-# old-division,
-# dict-iter-method,
-# dict-view-method,
-# next-method-called,
-# metaclass-assignment,
-# indexing-exception,
-# raising-string,
-# reload-builtin,
-# oct-method,
-# hex-method,
-# nonzero-method,
-# cmp-method,
-# input-builtin,
-# round-builtin,
-# intern-builtin,
-# unichr-builtin,
-# map-builtin-not-iterating,
-# zip-builtin-not-iterating,
-# range-builtin-not-iterating,
-# filter-builtin-not-iterating,
-# using-cmp-argument,
-# eq-without-hash,
-# div-method,
-# idiv-method,
-# rdiv-method,
-# exception-message-attribute,
-# invalid-str-codec,
-# sys-max-int,
-# bad-python3-import,
-# deprecated-string-function,
-# deprecated-str-translate-call,
-# deprecated-itertools-function,
-# deprecated-types-field,
-# next-method-defined,
-# dict-items-not-iterating,
-# dict-keys-not-iterating,
-# dict-values-not-iterating
-
-# Enable the message, report, category or checker with the given id(s). You can
-# either give multiple identifier separated by comma (,) or put this option
-# multiple time (only on the command line, not in the configuration file where
-# it should appear only once). See also the "--disable" option for examples.
-enable=c-extension-no-member
-
-
-[REPORTS]
-
-# Python expression which should return a note less than 10 (10 is the highest
-# note). You have access to the variables errors warning, statement which
-# respectively contain the number of errors / warnings messages and the total
-# number of statements analyzed. This is used by the global evaluation report
-# (RP0004).
-evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
-
-# Template used to display messages. This is a python new-style format string
-# used to format the message information. See doc for all details
-#msg-template=
-
-# Set the output format. Available formats are text, parseable, colorized, json
-# and msvs (visual studio).You can also give a reporter class, eg
-# mypackage.mymodule.MyReporterClass.
-output-format=text
-
-# Tells whether to display a full report or only the messages
-reports=no
-
-# Activate the evaluation score.
-score=yes
-
-
-[REFACTORING]
-
-# Maximum number of nested blocks for function / method body
-max-nested-blocks=5
-
-# Complete name of functions that never returns. When checking for
-# inconsistent-return-statements if a never returning function is called then
-# it will be considered as an explicit return statement and no message will be
-# printed.
-never-returning-functions=optparse.Values,sys.exit
-
-
-[LOGGING]
-
-# Logging modules to check that the string format arguments are in logging
-# function parameter format
-logging-modules=logging
-
-
-[TYPECHECK]
-
-# List of decorators that produce context managers, such as
-# contextlib.contextmanager. Add to this list to register other decorators that
-# produce valid context managers.
-contextmanager-decorators=contextlib.contextmanager
-
-# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E1101 when accessed. Python regular
-# expressions are accepted.
-generated-members=
-
-# Tells whether missing members accessed in mixin class should be ignored. A
-# mixin class is detected if its name ends with "mixin" (case insensitive).
-ignore-mixin-members=yes
-
-# This flag controls whether pylint should warn about no-member and similar
-# checks whenever an opaque object is returned when inferring. The inference
-# can return multiple potential results while evaluating a Python object, but
-# some branches might not be evaluated, which results in partial inference. In
-# that case, it might be useful to still emit no-member and other checks for
-# the rest of the inferred objects.
-ignore-on-opaque-inference=yes
-
-# List of class names for which member attributes should not be checked (useful
-# for classes with dynamically set attributes). This supports the use of
-# qualified names.
-ignored-classes=optparse.Values,thread._local,_thread._local
-
-# List of module names for which member attributes should not be checked
-# (useful for modules/projects where namespaces are manipulated during runtime
-# and thus existing member attributes cannot be deduced by static analysis. It
-# supports qualified module names, as well as Unix pattern matching.
-ignored-modules=
-
-# Show a hint with possible names when a member name was not found. The aspect
-# of finding the hint is based on edit distance.
-missing-member-hint=yes
-
-# The minimum edit distance a name should have in order to be considered a
-# similar match for a missing member name.
-missing-member-hint-distance=1
-
-# The total number of similar names that should be taken in consideration when
-# showing a hint for a missing member.
-missing-member-max-choices=1
-
-
-[FORMAT]
-
-# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
-expected-line-ending-format=
-
-# Regexp for a line that is allowed to be longer than the limit.
-ignore-long-lines=^\s*(# )??$
-
-# Number of spaces of indent required inside a hanging or continued line.
-indent-after-paren=4
-
-# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
-# tab).
-indent-string=' '
-
-# Maximum number of characters on a single line.
-max-line-length=88
-
-# Maximum number of lines in a module
-max-module-lines=1000
-
-# List of optional constructs for which whitespace checking is disabled. `dict-
-# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
-# `trailing-comma` allows a space between comma and closing bracket: (a, ).
-# `empty-line` allows space-only lines.
-no-space-check=trailing-comma,
- dict-separator
-
-# Allow the body of a class to be on the same line as the declaration if body
-# contains single statement.
-single-line-class-stmt=no
-
-# Allow the body of an if to be on the same line as the test if there is no
-# else.
-single-line-if-stmt=no
-
-
-[SPELLING]
-
-# Limits count of emitted suggestions for spelling mistakes
-max-spelling-suggestions=4
-
-# Spelling dictionary name. Available dictionaries: none. To make it working
-# install python-enchant package.
-spelling-dict=
-
-# List of comma separated words that should not be checked.
-spelling-ignore-words=
-
-# A path to a file that contains private dictionary; one word per line.
-spelling-private-dict-file=
-
-# Tells whether to store unknown words to indicated private dictionary in
-# --spelling-private-dict-file option instead of raising a message.
-spelling-store-unknown-words=no
-
-
-[SIMILARITIES]
-
-# Ignore comments when computing similarities.
-ignore-comments=yes
-
-# Ignore docstrings when computing similarities.
-ignore-docstrings=yes
-
-# Ignore imports when computing similarities.
-ignore-imports=no
-
-# Minimum lines number of a similarity.
-min-similarity-lines=4
-
-
-[VARIABLES]
-
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid to define new builtins when possible.
-additional-builtins=
-
-# Tells whether unused global variables should be treated as a violation.
-allow-global-unused-variables=yes
-
-# List of strings which can identify a callback function by name. A callback
-# name must start or end with one of those strings.
-callbacks=cb_,
- _cb
-
-# A regular expression matching the name of dummy variables (i.e. expectedly
-# not used).
-dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
-
-# Argument names that match this expression will be ignored. Default to name
-# with leading underscore
-ignored-argument-names=_.*|^ignored_|^unused_
-
-# Tells whether we should check for unused import in __init__ files.
-init-import=no
-
-# List of qualified module names which can have objects that can redefine
-# builtins.
-redefining-builtins-modules=six.moves,past.builtins,future.builtins
-
-
-[BASIC]
-
-# Naming style matching correct argument names
-argument-naming-style=snake_case
-
-# Regular expression matching correct argument names. Overrides argument-
-# naming-style
-#argument-rgx=
-
-# Naming style matching correct attribute names
-attr-naming-style=snake_case
-
-# Regular expression matching correct attribute names. Overrides attr-naming-
-# style
-#attr-rgx=
-
-# Bad variable names which should always be refused, separated by a comma
-bad-names=foo,
- bar,
- baz,
- toto,
- tutu,
- tata
-
-# Naming style matching correct class attribute names
-class-attribute-naming-style=any
-
-# Regular expression matching correct class attribute names. Overrides class-
-# attribute-naming-style
-#class-attribute-rgx=
-
-# Naming style matching correct class names
-class-naming-style=PascalCase
-
-# Regular expression matching correct class names. Overrides class-naming-style
-#class-rgx=
-
-# Naming style matching correct constant names
-const-naming-style=UPPER_CASE
-
-# Regular expression matching correct constant names. Overrides const-naming-
-# style
-#const-rgx=
-
-# Minimum line length for functions/classes that require docstrings, shorter
-# ones are exempt.
-docstring-min-length=-1
-
-# Naming style matching correct function names
-function-naming-style=snake_case
-
-# Regular expression matching correct function names. Overrides function-
-# naming-style
-#function-rgx=
-
-# Good variable names which should always be accepted, separated by a comma
-good-names=i,
- j,
- k,
- ex,
- Run,
- _
-
-# Include a hint for the correct naming format with invalid-name
-include-naming-hint=no
-
-# Naming style matching correct inline iteration names
-inlinevar-naming-style=any
-
-# Regular expression matching correct inline iteration names. Overrides
-# inlinevar-naming-style
-#inlinevar-rgx=
-
-# Naming style matching correct method names
-method-naming-style=snake_case
-
-# Regular expression matching correct method names. Overrides method-naming-
-# style
-#method-rgx=
-
-# Naming style matching correct module names
-module-naming-style=snake_case
-
-# Regular expression matching correct module names. Overrides module-naming-
-# style
-#module-rgx=
-
-# Colon-delimited sets of names that determine each other's naming style when
-# the name regexes allow several styles.
-name-group=
-
-# Regular expression which should only match function or class names that do
-# not require a docstring.
-no-docstring-rgx=^_
-
-# List of decorators that produce properties, such as abc.abstractproperty. Add
-# to this list to register other decorators that produce valid properties.
-property-classes=abc.abstractproperty
-
-# Naming style matching correct variable names
-variable-naming-style=snake_case
-
-# Regular expression matching correct variable names. Overrides variable-
-# naming-style
-#variable-rgx=
-
-
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-notes=FIXME,
- XXX,
- TODO
-
-
-[IMPORTS]
-
-# Allow wildcard imports from modules that define __all__.
-allow-wildcard-with-all=no
-
-# Analyse import fallback blocks. This can be used to support both Python 2 and
-# 3 compatible code, which means that the block might have code that exists
-# only in one or another interpreter, leading to false positives when analysed.
-analyse-fallback-blocks=no
-
-# Deprecated modules which should not be used, separated by a comma
-deprecated-modules=regsub,
- TERMIOS,
- Bastion,
- rexec
-
-# Create a graph of external dependencies in the given file (report RP0402 must
-# not be disabled)
-ext-import-graph=
-
-# Create a graph of every (i.e. internal and external) dependencies in the
-# given file (report RP0402 must not be disabled)
-import-graph=
-
-# Create a graph of internal dependencies in the given file (report RP0402 must
-# not be disabled)
-int-import-graph=
-
-# Force import order to recognize a module as part of the standard
-# compatibility libraries.
-known-standard-library=
-
-# Force import order to recognize a module as part of a third party library.
-known-third-party=enchant
-
-
-[DESIGN]
-
-# Maximum number of arguments for function / method
-max-args=5
-
-# Maximum number of attributes for a class (see R0902).
-max-attributes=7
-
-# Maximum number of boolean expressions in a if statement
-max-bool-expr=5
-
-# Maximum number of branch for function / method body
-max-branches=12
-
-# Maximum number of locals for function / method body
-max-locals=15
-
-# Maximum number of parents for a class (see R0901).
-max-parents=7
-
-# Maximum number of public methods for a class (see R0904).
-max-public-methods=20
-
-# Maximum number of return / yield for function / method body
-max-returns=6
-
-# Maximum number of statements in function / method body
-max-statements=50
-
-# Minimum number of public methods for a class (see R0903).
-min-public-methods=2
-
-
-[CLASSES]
-
-# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,
- __new__,
- setUp
-
-# List of member names, which should be excluded from the protected access
-# warning.
-exclude-protected=_asdict,
- _fields,
- _replace,
- _source,
- _make
-
-# List of valid names for the first argument in a class method.
-valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-valid-metaclass-classmethod-first-arg=mcs
-
-
-[EXCEPTIONS]
-
-# Exceptions that will emit a warning when being caught. Defaults to
-# "Exception"
-overgeneral-exceptions=Exception
diff --git a/roles/linux-system-roles.network/pytest_extra_requirements.txt b/roles/linux-system-roles.network/pytest_extra_requirements.txt
deleted file mode 100644
index 9e2d328..0000000
--- a/roles/linux-system-roles.network/pytest_extra_requirements.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Write extra requirements for running pytest here:
-# If you need ansible then uncomment the following line:
-#-ransible_pytest_extra_requirements.txt
-# If you need mock then uncomment the following line:
-mock ; python_version < "3.0"
-# ansible and dependencies for all supported platforms
-ansible ; python_version > "2.6"
-ansible<2.7 ; python_version < "2.7"
-idna<2.8 ; python_version < "2.7"
-PyYAML<5.1 ; python_version < "2.7"
diff --git a/roles/linux-system-roles.network/scripts/print_all_options.py b/roles/linux-system-roles.network/scripts/print_all_options.py
deleted file mode 100755
index b414fe8..0000000
--- a/roles/linux-system-roles.network/scripts/print_all_options.py
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/python3 -tt
-# SPDX-License-Identifier: BSD-3-Clause
-# Helper to print all options that the module in the network role accepts for
-# profiles
-
-from collections.abc import Mapping
-from collections.abc import Sequence
-from copy import deepcopy
-from unittest import mock
-import os
-import sys
-
-PRIORITIES = (
- "name",
- "type",
- "interface_name",
- "mac",
- "state",
- "persistent_state",
- "controller",
- "port_type",
- "parent",
- "ignore_errors",
- "force_state_change",
- "check_iface_exists",
- "autoconnect",
- "wait",
- "zone",
- "mtu",
- "ip",
- "ethernet",
- "ethtool",
- "bridge",
- "bond",
- "team",
- "vlan",
- "wireless",
- "macvlan",
- "infiniband",
-)
-
-
-import yaml
-
-parentdir = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
-
-with mock.patch.object(
- sys,
- "path",
- [parentdir, os.path.join(parentdir, "module_utils/network_lsr")] + sys.path,
-):
- with mock.patch.dict(
- "sys.modules",
- {"ansible": mock.Mock(), "ansible.module_utils": __import__("module_utils")},
- ):
- import argument_validator as av
-
-COMMENT = "@@"
-EMPTY = "/EMPTY/"
-
-
-def parse_validator(validator):
- default = validator.default_value
- if isinstance(validator, av.ArgValidatorDict):
- res = {}
- for k, v in validator.nested.items():
- if (
- v.name
- not in (
- "infiniband_transport_mode",
- "infiniband_p_key",
- "vlan_id",
- )
- and not isinstance(v, av.ArgValidatorDeprecated)
- ):
- name = k
- if not validator.required:
- pass
- # name += " DICT optional"
- res[name] = parse_validator(v)
- elif isinstance(validator, av.ArgValidatorList):
- res = [parse_validator(validator.nested)]
- elif isinstance(validator, av.ArgValidatorNum):
-
- minval = validator.val_min
- maxval = validator.val_max
- comment = f" {COMMENT}"
- if not validator.required:
- comment += " optional"
- if minval is not None:
- comment += " mininum=" + str(minval)
- if maxval:
- if maxval == 0xFFFFFFFF:
- maxval = hex(maxval)
- comment += " maximum=" + str(maxval)
-
- if default is not None:
- res = str(default)
- elif minval is not None:
- res = str(minval)
- elif maxval is not None:
- res = str(maxval)
- else:
- res = ""
-
- res += comment
- elif isinstance(validator, av.ArgValidatorIP):
- res = f"{EMPTY} {COMMENT} IP Address"
- elif isinstance(validator, av.ArgValidatorStr):
- if default:
- res = default
- elif validator.enum_values:
- res = "|".join(validator.enum_values)
- else:
- res = EMPTY
- if not validator.required:
- res += f" {COMMENT} optional"
-
- # res += " " + str(validator.__class__)
- elif isinstance(validator, av.ArgValidatorBool):
- if default is not None:
- res = "yes" if default else "no"
- else:
- res = "yes|no"
-
- if not validator.required:
- res += f" {COMMENT} optional"
- else:
- res = validator.name + f" {COMMENT} FIXME " + str(validator.__class__)
-
- return res
-
-
-def represent_dict(dumper, data):
- """
- Represent dictionary with insert order
- """
- value = []
-
- for item_key, item_value in data.items():
- node_key = dumper.represent_data(item_key)
- node_value = dumper.represent_data(item_value)
- value.append((node_key, node_value))
-
- return yaml.nodes.MappingNode("tag:yaml.org,2002:map", value)
-
-
-def priority_sorted(data):
- if isinstance(data, Sequence) and not isinstance(data, str):
- return [priority_sorted(item) for item in data]
-
- if isinstance(data, Mapping):
- sorted_data = {}
- for key in sorted(data, key=prioritize):
- sorted_data[key] = priority_sorted(data[key])
- return sorted_data
-
- return deepcopy(data)
-
-
-def prioritize(key):
- try:
- priority = PRIORITIES.index(key)
- except ValueError:
- priority = len(PRIORITIES)
- return (priority, key)
-
-
-yaml.add_representer(dict, represent_dict)
-sorted_data = priority_sorted([parse_validator(av.ArgValidator_DictConnection())])
-yaml_example = (
- yaml.dump(
- sorted_data,
- explicit_start=True,
- default_flow_style=False,
- width=100,
- )
- .replace(COMMENT, "#")
- .replace(EMPTY, "")
-)
-
-# yaml_example = re.sub(r"# ([^:]*):", r": # \1", yaml_example)
-
-print(yaml_example)
diff --git a/roles/linux-system-roles.network/tasks/main.yml b/roles/linux-system-roles.network/tasks/main.yml
deleted file mode 100644
index dfc5481..0000000
--- a/roles/linux-system-roles.network/tasks/main.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# get service facts, used in defaults/main.yml
----
-- name: Check which services are running
- service_facts:
- no_log: true
-
-# needed for ansible_facts.packages
-- name: Check which packages are installed
- package_facts:
- no_log: true
-
-- name: Print network provider
- debug:
- msg: "Using network provider: {{ network_provider }}"
-
-# Depending on the plugins, checking installed packages might be slow
-# for example subscription manager might slow this down
-# Therefore install packages only when rpm does not find them
-- name: Install packages
- package:
- name: "{{ network_packages }}"
- state: present
- when:
- - not network_packages is subset(ansible_facts.packages.keys())
- register: __network_package_install
-
-# If network packages changed and wireless or team connections are specified,
-# NetworkManager must be restarted
-- name: Restart NetworkManager due to wireless or team interfaces
- service:
- name: NetworkManager
- state: restarted
- when:
- - __network_wireless_connections_defined
- or __network_team_connections_defined
- - network_provider == "nm"
- - network_allow_restart
- # ansible-lint wants this to be a handler, but this is not appropriate as
- # NetworkManager must be restarted prior to the connections being created.
- # see (https://docs.ansible.com/ansible-lint/rules/default_rules.html)
- - __network_package_install.changed # noqa 503
-
-- name: Enable and start NetworkManager
- service:
- name: "{{ network_service_name }}"
- state: started
- enabled: true
- when:
- - network_provider == "nm"
- no_log: true
-
-# If any 802.1x connections are used, the wpa_supplicant
-# service is required to be running
-- name: Enable and start wpa_supplicant
- service:
- name: wpa_supplicant
- state: started
- enabled: true
- when:
- - network_provider == "nm"
- - __network_wpa_supplicant_required
-
-- name: Enable network service
- service:
- name: "{{ network_service_name }}"
- enabled: true
- when:
- - network_provider == "initscripts"
- no_log: true
-
-- name: Ensure initscripts network file dependency is present
- copy:
- dest: /etc/sysconfig/network
- content: "# Created by network system role"
- mode: "0644"
- force: false
- when:
- - network_provider == "initscripts"
-
-- name: Configure networking connection profiles
- network_connections:
- provider: "{{ network_provider | mandatory }}"
- ignore_errors: "{{ network_ignore_errors | default(omit) }}"
- force_state_change: "{{ network_force_state_change | default(omit) }}"
- connections: "{{ network_connections | default([]) }}"
- __debug_flags: "{{ __network_debug_flags | default(omit) }}"
- register: __network_connections_result
-
-- name: Show debug messages
- debug: var=__network_connections_result
-
-- name: Re-test connectivity
- ping:
diff --git a/roles/linux-system-roles.network/tests/.gitignore b/roles/linux-system-roles.network/tests/.gitignore
deleted file mode 100644
index cb7340c..0000000
--- a/roles/linux-system-roles.network/tests/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/*.retry
-/inventory
diff --git a/roles/linux-system-roles.network/tests/covstats b/roles/linux-system-roles.network/tests/covstats
deleted file mode 100755
index 310327d..0000000
--- a/roles/linux-system-roles.network/tests/covstats
+++ /dev/null
@@ -1,16 +0,0 @@
-#! /bin/bash
-# SPDX-License-Identifier: BSD-3-Clause
-
-if [ "$#" -lt 1 ]
-then
- echo "USAGE: ${0} coverage_data_file..."
- echo "Show Statistics for each coverage data file"
- exit 1
-fi
-
-for coverage_file in "${@}"
-do
- echo "coverage statistics for: ${coverage_file}:"
- COVERAGE_FILE="${coverage_file}" coverage report
- echo
-done
diff --git a/roles/linux-system-roles.network/tests/ensure_provider_tests.py b/roles/linux-system-roles.network/tests/ensure_provider_tests.py
deleted file mode 100755
index 078e99d..0000000
--- a/roles/linux-system-roles.network/tests/ensure_provider_tests.py
+++ /dev/null
@@ -1,227 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: BSD-3-Clause
-""" Check that there is a playbook to run all role tests with both providers
-"""
-# vim: fileencoding=utf8
-
-import glob
-import os
-import sys
-
-
-GET_NM_VERSION = """
- - block:
- - name: Install NetworkManager
- package:
- name: NetworkManager
- state: present
- - name: Get NetworkManager version
- command: rpm -q --qf "%{version}" NetworkManager
- args:
- warn: false
- register: NetworkManager_version
- when: true
- when:
- - ansible_distribution_major_version != '6'
- tags:
- - always
-"""
-
-MINIMUM_NM_VERSION_CHECK = """
- - NetworkManager_version.stdout is version({minimum_nm_version}, '>=')
-"""
-
-EXTRA_RUN_CONDITION_PREFIX = " - "
-
-RUN_PLAYBOOK_WITH_NM = """# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook '{test_playbook}' with nm as provider
- tasks:
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-{get_nm_version}
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: {test_playbook}
- when:
- - ansible_distribution_major_version != '6'
-{minimum_nm_version_check}{extra_run_condition}"""
-
-MINIMUM_VERSION = "minimum_version"
-EXTRA_RUN_CONDITION = "extra_run_condition"
-NM_ONLY_TESTS = {
- "playbooks/tests_802_1x_updated.yml": {},
- "playbooks/tests_802_1x.yml": {},
- "playbooks/tests_eth_dns_support.yml": {},
- "playbooks/tests_dummy.yml": {},
- "playbooks/tests_ethtool_features.yml": {
- MINIMUM_VERSION: "'1.20.0'",
- "comment": "# NetworkManager 1.20.0 introduced ethtool settings support",
- },
- "playbooks/tests_ipv6_disabled.yml": {
- EXTRA_RUN_CONDITION: "ansible_distribution_major_version == '8'",
- },
- "playbooks/tests_provider.yml": {
- MINIMUM_VERSION: "'1.20.0'",
- "comment": "# NetworKmanager 1.20.0 added support for forgetting profiles",
- },
- "playbooks/tests_ethtool_coalesce.yml": {
- MINIMUM_VERSION: "'1.25.1'",
- "comment": "# NetworkManager 1.25.1 introduced ethtool coalesce support",
- },
- "playbooks/tests_802_1x_updated.yml": {},
- "playbooks/tests_802_1x.yml": {},
- "playbooks/tests_reapply.yml": {},
- # team interface is not supported on Fedora
- "playbooks/tests_team.yml": {
- EXTRA_RUN_CONDITION: "ansible_distribution != 'Fedora'",
- },
- "playbooks/tests_team_plugin_installation.yml": {},
- # mac80211_hwsim (used for tests_wireless) only seems to be available
- # and working on RHEL/CentOS 7
- "playbooks/tests_wireless.yml": {
- EXTRA_RUN_CONDITION: "ansible_distribution_major_version == '7'",
- },
- "playbooks/tests_wireless_plugin_installation.yml": {},
-}
-
-IGNORE = [
- # checked by tests_regression_nm.yml
- "playbooks/tests_checkpoint_cleanup.yml",
-]
-
-RUN_PLAYBOOK_WITH_INITSCRIPTS = """# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-- hosts: all
- name: Run playbook '{test_playbook}' with initscripts as provider
- tasks:
- - name: Set network provider to 'initscripts'
- set_fact:
- network_provider: initscripts
- tags:
- - always
-
-- import_playbook: {test_playbook}
-"""
-
-
-def create_nm_playbook(test_playbook):
- fileroot = os.path.splitext(os.path.basename(test_playbook))[0]
- nm_testfile = fileroot + "_nm.yml"
-
- minimum_nm_version = NM_ONLY_TESTS.get(test_playbook, {}).get(MINIMUM_VERSION)
- extra_run_condition = NM_ONLY_TESTS.get(test_playbook, {}).get(
- EXTRA_RUN_CONDITION, ""
- )
- if extra_run_condition:
- extra_run_condition = "{}{}\n".format(
- EXTRA_RUN_CONDITION_PREFIX, extra_run_condition
- )
-
- nm_version_check = ""
- if minimum_nm_version:
- nm_version_check = MINIMUM_NM_VERSION_CHECK.format(
- minimum_nm_version=minimum_nm_version
- )
-
- nominal_nm_testfile_data = RUN_PLAYBOOK_WITH_NM.format(
- test_playbook=test_playbook,
- get_nm_version=minimum_nm_version and GET_NM_VERSION or "",
- minimum_nm_version_check=nm_version_check,
- extra_run_condition=extra_run_condition,
- )
-
- return nm_testfile, nominal_nm_testfile_data
-
-
-def create_initscripts_playbook(test_playbook):
- fileroot = os.path.splitext(os.path.basename(test_playbook))[0]
- init_testfile = fileroot + "_initscripts.yml"
-
- nominal_data = RUN_PLAYBOOK_WITH_INITSCRIPTS.format(test_playbook=test_playbook)
-
- return init_testfile, nominal_data
-
-
-def check_playbook(generate, testfile, test_playbook, nominal_data):
- is_missing = False
- returncode = None
- if generate:
- print(testfile)
- with open(testfile, "w") as ofile:
- ofile.write(nominal_data)
-
- if not os.path.isfile(testfile) and not generate:
- is_missing = True
- else:
- with open(testfile) as ifile:
- testdata = ifile.read()
- if testdata != nominal_data:
- print(f"ERROR: Playbook does not match nominal value: {testfile}")
- returncode = 1
-
- return is_missing, returncode
-
-
-def main():
- testsfiles = glob.glob("playbooks/tests_*.yml")
- missing = []
- returncode = 0
-
- # Generate files when specified
- generate = bool(len(sys.argv) > 1 and sys.argv[1] == "generate")
-
- if not testsfiles:
- print("ERROR: No tests found")
- returncode = 1
-
- for test_playbook in testsfiles:
- if test_playbook in IGNORE:
- continue
-
- nm_testfile, nominal_nm_testfile_data = create_nm_playbook(test_playbook)
-
- is_missing, new_returncode = check_playbook(
- generate=generate,
- testfile=nm_testfile,
- test_playbook=test_playbook,
- nominal_data=nominal_nm_testfile_data,
- )
- if is_missing:
- missing.append(test_playbook)
- if new_returncode:
- returncode = new_returncode
-
- if test_playbook not in NM_ONLY_TESTS:
- init_testfile, nominal_init_testfile_data = create_initscripts_playbook(
- test_playbook
- )
- is_missing, new_returncode = check_playbook(
- generate=generate,
- testfile=init_testfile,
- test_playbook=test_playbook,
- nominal_data=nominal_init_testfile_data,
- )
- if is_missing:
- missing.append(test_playbook)
- if new_returncode:
- returncode = new_returncode
-
- if missing:
- print("ERROR: No NM or initscripts tests found for:\n" + ", \n".join(missing))
- print("Try to generate them with '{} generate'".format(sys.argv[0]))
- returncode = 1
-
- return returncode
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/roles/linux-system-roles.network/tests/files/cacert.key b/roles/linux-system-roles.network/tests/files/cacert.key
deleted file mode 100644
index ee6710d..0000000
--- a/roles/linux-system-roles.network/tests/files/cacert.key
+++ /dev/null
@@ -1,30 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-Proc-Type: 4,ENCRYPTED
-DEK-Info: AES-256-CBC,B773C37C13C791B1B2F735A7D6D22F1D
-
-KcpCACKK2i/zLDkH/e2bM/3hzyuC7UkSJ32Vn2xvH6ukKzOpt71PJjtzucY3TgB7
-T8fYDJ0OGFfW/97M9OSjY10+wo/Vn+aTTCJWe2Y0+JeoV+bFJq33fuP0SlJI1PIU
-CrxnWhFUM3iaDHjuJ32GaUCkLozKTRdb5KT0BttSdSudnT+9d6zHejCwvYEaGek0
-C3fifoN2xC47P+63UF40KWMP0+j83ZRtHXUUgQ9E0Eqmbag6jTBh2TvV/PiaWlRv
-YCVMapOBs0ktSPPJACygRJcR63MocS9of7aRaPMCDP7HpzrjzKnHqJ+bPteuaE4k
-UmVOlrBsJb4g/zpfT4Ee2waT/mKEiRtNhf8a7DNkc34I50iMqhOojM1zRPtQugO6
-5BGhFeciHCe7RzHvltWJRmLrl+H7Z8wvusxbSQRM5ZT18+wgBkgTb8dA3bmZS0Ws
-JYcd9BN8zbsxETo/IFZ2gFOaVvOymVE5mscRR21RsiBi1vfqjl+pAt4ZrlGwVpxL
-3z3yvT3lAx8Cgeg8dCxrDNb14Xwk+hkBblExLMXsUGCsRXJglk9QVPE0XjKD9XNa
-mZnBHOpAsdPun58PRiaPpC+VgaFBhzPHTyBczCG1sjpkOiTJpGLpgveAq4wOXQGH
-PMcux4ZDARYbJfGXANNqloIO3PHDPuhVmSAJZSMixDd4SLKjT6tALdqIv1BvOLl7
-Ay0y3Vie4oGc4EWjHqQA+r+6CATHHXtIOvWLJQ4/KQa/R+pTp0qDtXdOeHaAZzhv
-BpqvQUouKUyxXlGFZrGUq9l+sFtjLlcKP33Yb2WHg4ct0gAVDIA6SK4rNH6+h/NS
-rFQNOvArTeZgLCaG6htJh68WLF8p6687s4bKNM8niZ5VcsFTvMYPbfF5WdE0l53s
-fZpZBf1v03ZRJYg2V9a0sNPEysaIaTJzs5lFeya78iTF/Epo4GtTHv8sWebVwh/H
-FYINLIcPzzxAvw7a+7ymIsYZphomuEoCCoX85DPPbXfZOb2Bdysfdr7uyRsB480E
-or6+gQxZJWxcO5tMR7+G8EuUgnPMelVNczw3UJHM+sl4Kjh9q3hF4ppWFTIOaPQ/
-BL3qPE/ZxSFC8UcG+QJEbNmPPQLXnpWPUZ3GmyH/+pPUZCkcWanpn0W3chGlJCsW
-spkDMt/dpPtje1q7rfrWCVAYo4AeYzigSuxoyfpBfqcpD6wAssPQmWj4fFr91RW7
-p/iLlACpevyecALrJpU65yGWDvGWlx+dEqvdz7FRUSTkVrted/W3pmro8eDAInWx
-17VM0hHfNE00hwpGaga2CY8q3EC+3kApSE6d8dbBtSzBp4YZsGq+p+Xkj7mTc/rn
-mXJazUSPjNhWooI+0pN2VxB3HRBloNjsQOLaWVcSiv6l3wKl70ZbBjPkikO05k+v
-QXayu3i9RjXvhT974atOqoqCSigc8ROsCYGxgHjwVMU9Spc9i8y6PrgX9ID6yk9f
-9YcJjmtEi6MYh0uXNkx2m6utMjgcuAqP8yfPqeBRK2SOoLuBM9JKP8tjwq4ZBawj
-SuWe82zTRjR2oXMgNy6gBBDGky+W7kNaNw/KksZUxdiNhzeDRbDG8hMJI1HcY4xQ
------END RSA PRIVATE KEY-----
diff --git a/roles/linux-system-roles.network/tests/files/cacert.pem b/roles/linux-system-roles.network/tests/files/cacert.pem
deleted file mode 100644
index 5f0181e..0000000
--- a/roles/linux-system-roles.network/tests/files/cacert.pem
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDizCCAnOgAwIBAgIUG1DftQ2xyrN+HE+KHLFmKHZnIkcwDQYJKoZIhvcNAQEL
-BQAwVDELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE
-CgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQMA4GA1UEAwwHVGVzdCBDQTAgFw0yMDA1
-MDMwNTI2MTFaGA8yMjk0MDIxNTA1MjYxMVowVDELMAkGA1UEBhMCWFgxFTATBgNV
-BAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQ
-MA4GA1UEAwwHVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-AMGAmO9ugnI/jaw4qNTyh/O65BNEvzOIwLU0mo3wTOSiakoOuC0gqO4S+0FOmC6v
-ceoArS+GllowzrgnnmM4EH9hqmiLeFKa4Z2graIm2W86ayN5k3psiMolONOZ8y0r
-nAMj84FifDYIOHoYbKUeN5BDsotrHbrZ/PZhlZgN1ou3gapXqM12TkXdzaj//vRd
-CORjwO1ubpzb17PFUNOLWaDf3ohfoMCG08UkGwIGK0mouJ1yflda27MCcLzmDxV8
-4dfI//R/6WtN1hzWSW9ae99VwSjlACH2go/0fDD+K9jvKkEVRZAqBEnM3voQCOah
-P9NMJ30R9Sh8B/D2KXGyIU0CAwEAAaNTMFEwHQYDVR0OBBYEFDUKdAwDiWpUpayU
-mjiWEcMcXjQdMB8GA1UdIwQYMBaAFDUKdAwDiWpUpayUmjiWEcMcXjQdMA8GA1Ud
-EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAKEyNiDawDJeaauDUmHgdNlG
-WuBlvn4Lph/+J27njmAoIbKv3aDw+kndxI02ryCZTJOm8a1NqHfkNct4ny+Cj4cz
-rNoZIyMucVoKGgCMYb5zwYtW3W7RshUZoBdQDBLiIuktNsWTyqss3yVPPq8Q1JJY
-89dtjCNydL6dunFSrGjVJ2K5HaTyidti2IN9g2Sbxmxgoz71ZP09xmBxaY+O738M
-z5nRdrb2DX0flmv5pcqSzn7063t9FGKOp2bF9NTpcEWkultsCOvsVcsO4X/18L4J
-3W8FVltyCvunv4GQecWqlNHTRT+QI2h48EVEzHQnOGEe9q1C8WVGeQ3cZXMei8k=
------END CERTIFICATE-----
diff --git a/roles/linux-system-roles.network/tests/files/client.key b/roles/linux-system-roles.network/tests/files/client.key
deleted file mode 100644
index 061b192..0000000
--- a/roles/linux-system-roles.network/tests/files/client.key
+++ /dev/null
@@ -1,31 +0,0 @@
-# password=test
------BEGIN RSA PRIVATE KEY-----
-Proc-Type: 4,ENCRYPTED
-DEK-Info: AES-256-CBC,C4A5E9A189773AB0F3CE3DCC98F208AE
-
-LPNSExpEERS+/qHJxd8puT+EaZ/dZ20gkU/C2eaNNJerzr4moSXG4ioh5ggz4utQ
-w57fD5OYqPiloNIawi/Ta5Opo3zU+iMZPVQALLbemXWXmNMxqxNCGdonc4enxMoN
-auLxpdYPW+infFmf0UPwZjWkrLnK8XFapTGDaNesfgMNSRVSt+DQL3xeKUjcuXfh
-rYvF26/Ls8NHB0tCU449vCa5ta1fHPT78B0cWgCmhcg/L8/0veBYfwxnyu6l3E6Q
-RXWcyaJoihhCSg9kCZOqQFKDtz3B9G8/G8P5n5udN2TYUK0ieCktocOip0r/aUfk
-Rz/NPjej18tuvA9e+uho2DuEj7OV1Rt0Fr6G2NySDYAIjlzM1+GoDdX3R8Rva2eX
-SJYEjQvvLMAXU9wLEGd2u9jw3h8g2rNPF34Mo/fZsU6f83WceN7wzaDjKBM9TC/U
-DjeUpJ2LHr3SduRoq5K7PqTG6LlRx4ZC06P8Gwu/cjlHqHuMlLE6wWPHowp9O08S
-zMzJji6csSzZ5x5U41xiBJd19G0tbfjGBOvxhVLC3hmfqMtRwgeKSZMUz5f0iFvS
-V4LE/ZNXWv5OybEzMyIiQBRB0G8mq5BkQ3rU9uTMO6Xc6mosQy0jiCsQLYaX2IoT
-kyU6ZqPgAeBD3g5zCGudcF4qqY3pWRU6cijpivsuyX58YmulhQJsB2rnoImv8ZOR
-4Uw+fvAx38v/dH/aAGKNdQV/4z+CXpAX4SdqYgBx9wXu6Wva31AVrbDrKnpSlWYF
-M9gAHgpuhW9OH7du/y7sePU6k37fHtqDX0V5XoyeRxixR+KGb8k3tt0HFA1GExSu
-XyXcOOfwec7xNQjZBM9jREI0yO1tCbHEeLsLpQnf31cpfSQumBZoiim6Vyk7vCN8
-YBJ9qiVNrFiVogWl5hUrSS2MLQP1ZQBkedmOeKZpkZ26GW5yY0y27v2mHdhU2Dvd
-otvLGiVKxSXlu+tqt1WkMvu6hcfrDZDCONW7emGW7xs2vdYdvADVlYs/Eb0WFXb1
-tLkwg3v7I23LeFRrKX4Fm5/biG4GuR4sj9iPLayrKWhpujIVFJqHTI3YhjIU56Qp
-uPuClnoFsKrWS9DXaziuuXmLZlXH3e5aOO+M2H3JmXTRCojyjKlIJiJJmHGrfwfe
-oJkSF+ABs2zrpteXU+Cnfn8V01TrtxPYIBF3CbOMZEvwgjPLX0UNtnss0hXH4rJe
-9yF/PiKWehUow8q4Gpwt2PnLkUWyL21GwCwXf5Cq3yRAKtyrJTlJsdYV1f3brzfb
-JkBgKaFJ44Ee7D75PAio8g/BIDpvUdZVXwn3FizjfAU+HhXonPSYb2M34C6I/frk
-mJPgZ5hbpt1SoCCER48+rQygiLdNQH6OsuhJeEElPFYwNo6i5jZsZ9iE0rmJxGgk
-m7Mhi491NdK8L6Kh8kM2Dgupsfcstmx4+pI3gmgnsYZApmFoQlfcg4MhbWqxznv+
-cPm1n2SZMoMLru44vbnjW+ZAggen5zNZOrsVt8UImSBVKfAIrgDUuYIv7uqUiKHI
-yHmAkZDlqEbpkbUG9m60OeuEIgpN7MT3Kod387ZyOu9uaTZWdD18/N83E4eFecND
------END RSA PRIVATE KEY-----
diff --git a/roles/linux-system-roles.network/tests/files/client.key.nocrypt b/roles/linux-system-roles.network/tests/files/client.key.nocrypt
deleted file mode 100644
index b1e1dbf..0000000
--- a/roles/linux-system-roles.network/tests/files/client.key.nocrypt
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAuQipSk9+0rd/qBMDRiFzV8vDksaueVphejGEgiQhqtUDgjc/
-ot/o7M8fFVC6wau2ixTnEMHuZXgoBOKATxX805FggEsLLL98OnN7AyTTKOtHVfIm
-gK3fJ1Y9l95+2nuJWhmaan0vr3YMp6z3lSa+hlhhTYx/mIvTZho/K3+METg8DEfl
-QUSkhAlrFSEahr2Pu/yETr8c+8vKTDnZDLvcFyyuDtAz+clEQUVndWJQpQpSVfR9
-4xKuzaj10mUA9Utv4RkbNJ78/KgdTbaGIOVLUnYCJUg8d3/YV7aNCqraHBAZ9aoP
-S4dl46KXC3qpaEBFfKaF+RcPSVUtc4eCQ74kKwIDAQABAoIBAGoArUN2IVjEaSy3
-n7OIrFSK1oL6sa+x+JARWDFaU7NTj0wFLL65ee5Yhh0m/6a+IbiyA+IUx+d3m62Y
-uRsVpJ7r9RXqZ/99v8SYrctSSGpzx41USXyEn4ggnu6nN5MhHMHyUwVYrH3fqkZR
-EBFxfcrnTO8pY1vYFwayWKgpzOt7ip30JF1E7RH0IWfA2koJ+hZgSumPmF31btBK
-eqDaQ168u0at6I7nYvRIWVT68D2k+PMb/c/rlOUYSyy+VfCgnShWD+m1hlyaDF1c
-cbVvOhsul3rFeEqbToGN/6yyDDcyolTvYxMm3vb6jmoExZyRsShv0XyhokSuCN9P
-v5SeNpkCgYEA7OpIlsZUoTXm2ffCQiZd8gRtKk0O3dzmWTkcNEgj2uUNH6ANNy3W
-gLojKeF2EyC3appRWLVRYN/m6r/Qj+rztZfW3Jw1UJQV+tLEOBzk3yBnRdh1aRgW
-8YTH1+HJqlJ/2iKJRKRhseM5AHiTslp7ude6cWQxO52pJ6Rbp1z3fBUCgYEAx/B4
-LreIDJYDnYSyL/CvVkHEn1hCYX0oBpefzV6ofYDqv0OLe8BWOBsShQ3Crh0FuQTa
-xV2xc+OzDewlu2OwNm4/X0qjXvoWkEMLBXKEHjPyxnbHLCYaaA/9ENmVIkc8aZWE
-p7KcCYGlfiHpbdYWAD8KYdv5CsFHFbwhPwrD7z8CgYAEtsSq+1dDvebR/3QGDO1h
-m2TwqofZMkQDEnfVMnpEKLqSHoUky+ywswNwGeRXjRcZL+jecv0jiFD36skjk/E1
-c8f6q8ED0W5+hyMQWsLTDboAUcZESQ5rz9CKIxv4H5wbowRIMV0gRP0lXUDTE6nS
-kNBM4Ul5fjGXcFXChr8F4QKBgGSmAeoKi9tCHTnLVePaNnmmi/Nm+6uV1HNVGqXI
-k+rx3bpAp1O5o+2Ee1MtdSYvB/V2oyadnrnnEvjcOrZVXZxY7V/r88fY/0jJ5x9r
-4WRO5FTR8DuiRsLB4bP8xB1IXPoNwYSl3fTPJd8T9S1MizC+i1xt3rVyTHV9igLx
-SWcDAoGBAMoynJvQUOssWwFTtNQK0ptz95rrTkO2bri+8MJfSh8tessekwPHVe6M
-SBofFhDiesrHBHczJ61qDnb3GemA0kEbo023mxNo0HPam+OFgX5mrihizBZnRZjh
-aecVouDd0uwacsB76fwP6Fl5GhkFvOSBKr2IKNJjUMXyvW8/XGZE
------END RSA PRIVATE KEY-----
diff --git a/roles/linux-system-roles.network/tests/files/client.pem b/roles/linux-system-roles.network/tests/files/client.pem
deleted file mode 100644
index a2f4517..0000000
--- a/roles/linux-system-roles.network/tests/files/client.pem
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDrDCCApSgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwVDELMAkGA1UEBhMCWFgx
-FTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55
-IEx0ZDEQMA4GA1UEAwwHVGVzdCBDQTAgFw0yMDA1MDMwODUxMTdaGA8yMjk0MDIx
-NTA4NTExN1owXzELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEc
-MBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEbMBkGA1UEAwwSY2xpZW50LmV4
-YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuQipSk9+
-0rd/qBMDRiFzV8vDksaueVphejGEgiQhqtUDgjc/ot/o7M8fFVC6wau2ixTnEMHu
-ZXgoBOKATxX805FggEsLLL98OnN7AyTTKOtHVfImgK3fJ1Y9l95+2nuJWhmaan0v
-r3YMp6z3lSa+hlhhTYx/mIvTZho/K3+METg8DEflQUSkhAlrFSEahr2Pu/yETr8c
-+8vKTDnZDLvcFyyuDtAz+clEQUVndWJQpQpSVfR94xKuzaj10mUA9Utv4RkbNJ78
-/KgdTbaGIOVLUnYCJUg8d3/YV7aNCqraHBAZ9aoPS4dl46KXC3qpaEBFfKaF+RcP
-SVUtc4eCQ74kKwIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1P
-cGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUoUCV4T3pFwaQ
-HYSlCr8Iqdd+/TcwHwYDVR0jBBgwFoAUNQp0DAOJalSlrJSaOJYRwxxeNB0wDQYJ
-KoZIhvcNAQELBQADggEBALXhDSFirybmhZXcHuSqXn0tLp6mZintW+91B81bDUtO
-FuCrWqXwV0iensm94mOeykGIR/r0Y0Y4uqOHpIznY+q5NIek0qIdirbdr5mCXK5y
-fxXVIMM14GMTyIR9A4+IZaRkFbcrVnBhOdUpTQjp88jlzDr5jdyjTEnOZyOJH9kL
-Qpd417iB4X5TxuQ2xe5EgHOCb8OfxO0a2BzlwtfUQAkz2v+h0RlVBwQFcE2NCJ3z
-hvF3AWGl+5pkfWpY6d+1EPI3+82C6uRf8be/WKHPKu3i0irrVtZdMsKNkRiD5UUK
-S4Y0WnoVu/DWSR8h9iPGSFKMkUcjFI8hgc4YQ6G4Odc=
------END CERTIFICATE-----
diff --git a/roles/linux-system-roles.network/tests/files/dh.pem b/roles/linux-system-roles.network/tests/files/dh.pem
deleted file mode 100644
index 4dfaa5a..0000000
--- a/roles/linux-system-roles.network/tests/files/dh.pem
+++ /dev/null
@@ -1,8 +0,0 @@
------BEGIN DH PARAMETERS-----
-MIIBCAKCAQEAjbYPkANn2XGqDGCzse9wAfM0I5WJpp+Xl+iNJFmaKXBguo0BPYQt
-hZOpJbKL3aNaFsRxhdAJ8UXzBP6oIzCejcGti+jw+xtVk8ietWEK6e91yi+Ak2g2
-/Xtt9hoYQkeoe5hkcv35NcJ0xdQwlSvMbY/j8HtKamx/A3zu+YPQAe/3AOe3L+JT
-iEL5Gw00NPVnyEWKX4fVchAbMUkRsQKeXtsyOyDc4/RccjfLa1toyj8PRommK5UH
-dkSqi04FTOUIx6aTwt21EehJuggLVDShoQdxGV+FzXmdtelLmerGMtVPBbf8DSkN
-MKMBEg4d28DzjXPAWUHMD+JGPzAlvf87EwIBAg==
------END DH PARAMETERS-----
diff --git a/roles/linux-system-roles.network/tests/files/server.key b/roles/linux-system-roles.network/tests/files/server.key
deleted file mode 100644
index 872ea8f..0000000
--- a/roles/linux-system-roles.network/tests/files/server.key
+++ /dev/null
@@ -1,31 +0,0 @@
-# password=test
------BEGIN RSA PRIVATE KEY-----
-Proc-Type: 4,ENCRYPTED
-DEK-Info: AES-256-CBC,ED349A8B098E2D1DB70C30F77EF599AB
-
-j1rzje2sWFk3B9kD6eE7WrqVDynFEJ3t3kdOv0iUvH5Ybll1C7Qx3EFEdoM4z2OV
-E6q3nr2DOvpMPox1DvBdIipWOQWJxkZyBHqNn4v4GR4c0uxLswsk7XSBQLUclRsn
-QBGO6x8pcEA9u/O3PSrTt+pVozWrXWmR2UHNM//9WUsRpWF4Lv0EINzsfwmD7aJQ
-nRcSfXsCggXP6wnJX5dgo5PlRm6R+bodgzePr0QRlh8TT6wnixZfWalYM5iUKlEF
-GcE+VejZuBL69byl2AcRt8I5tQ+UZxmzhKPSsYN0NKD8vbcVVnp2sre/rbdTzWz5
-laF386g1M8QBimDE/V3Bw5b9Bg1ZP3arlpugVXGVNA+HFti8PVdkaMqLgkFIC2Xu
-OwmNKffAPIItuB8leg5A76oLoIlllRqjWO9M/O+MqAlrJ96xLRiUeGkez4Pp7eFV
-30YrlOXyzwZKfXoOPIfE5Mbz4CPqR67XuqW8jOryIGOryMB17b0+vdRpDY0wxk8/
-lGmc5rglDxLFA8dNemAHDednasCuVlrbsQsZRnPkKavXiSu7QCbvm1frAXZfnyRp
-TpPmE6L4+nEy8PQnK/IxOCqRcy6e1SPezRpajRjB5ooDT8hDmDkG47NdnrB+kOKL
-5LIpATLSGS9IVk0RW/M8EqJP1kRh2JOCQT3V+gUN0ttz8bjZpivKnp76/ztg0lo0
-oC2lhuXV5HOYHw1z5jDazsYpQDYoHgYWXnzPJJp6Ecn+nkjZMKQjDV9ZqE1miPrZ
-E4V0ULNmWaAQHvwc98yR97ui1YHmw5XVMoeDhy3fhB6IOyaGGdEj9o2iQr8kp9GC
-dxBKK/xMOU6kwDF9Nsfh46veRGTbhAJdGeWqdxscdCupkO8KRtZqzL454+9GnYfe
-n1f7wxJh7aTLNjF2an5Qa9v7uU6D58+9blxG7ls5qGt4xjBNAXCc8bPpmLqeCW4G
-Xz8iwxECvwWIQ+SjUcXuP8+/NO58B14kDNP03+1gA7AHIesa2CTvHLCyMPaN2oGK
-3R4LNxQQDNygEzRj8vHjURU1FNRJ4RjCi7SbqoOsl31Hvef6j0lcW0Sz4UICcCJI
-p4NPnApoaHewL4exvlJ80qPbFscuVevXBlUC2LdxXS+9E+c0NaLauEeNYCUoaBDi
-HIpbxRKXmqLc4LAKYVuEcIBFhdXp3UC9niVd7Nrguu0lUJXC78OzpltxWrqX/u4E
-O2aCNK0Yg9U+rxm6wyccqEyptIS2GRCIpUGD/LVF3mOC16NB/JeYGrOWvDptdCeg
-9pJrakJjE1Fm3pg4Xc74bT6IDj0EKwKSvZhtlcsM9JaXWChe/ZrDPPI/NP6MuyW4
-jcqpa9HPBBSyaxKsEPXFJhdhrz8VfsU2e5VvcALaJaAOpHwZgaNUpvpsY4LPW9mi
-lHsecEBiq6re0r7TAgBE1AnlaI4ho0fKSgSub3NWUZlEaBK3X2n/Li6op6LIsvM5
-iySYaAluQy4dANww0KhQHMIh0jbuZGzmG2Hxk/poorYRf60YJlbTnHVD/FKUdFX+
-rUow0iy8Ez1uF272u5orYW2tBbkhSaieKOT8f4HFCxUsgITbd8Lf/XJ6l6Qns6SK
------END RSA PRIVATE KEY-----
diff --git a/roles/linux-system-roles.network/tests/files/server.pem b/roles/linux-system-roles.network/tests/files/server.pem
deleted file mode 100644
index 461b3ac..0000000
--- a/roles/linux-system-roles.network/tests/files/server.pem
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDrDCCApSgAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwVDELMAkGA1UEBhMCWFgx
-FTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55
-IEx0ZDEQMA4GA1UEAwwHVGVzdCBDQTAgFw0yMDA1MDMwODUxMzBaGA8yMjk0MDIx
-NTA4NTEzMFowXzELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEc
-MBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEbMBkGA1UEAwwSc2VydmVyLmV4
-YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcC44Amd
-KQBDwR67aMTPqmNu6HfjadZqsD2xZj5XMVdn4karqsVYIbKMOq+SRzgm5aZ/kzQI
-CpXJMXfj16cID6BCxNecfJVOfvPyI0kCUbMf1YZiRG2FmB2VsG8AVDGWmn4a7SmX
-yaCA0ac8dkipnlCF2nddLhcBak/Ls+hjRYN7VSLLvxO8KT42ivhuP9YgGY1K5Yta
-e90H4HBiKxbnkwOUxi9wobERSXSLgb4e+uX8WRrqxIIYmHF+Gzv5kilRFrPwKBmo
-3idVPrqjschZe0o8m/nbNo3SzWGI9fdXn0+KgZoQdG3ZixX6uOhrCqJ3iJmnHkp4
-aXKL5Y7JmX/FFQIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1P
-cGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUx7TXCxUioob7
-5r/1kMypCYy9Mj0wHwYDVR0jBBgwFoAUNQp0DAOJalSlrJSaOJYRwxxeNB0wDQYJ
-KoZIhvcNAQELBQADggEBAKtTPl4WJuxfMeut+aEw7vVRU+z5A7D35nlZPQI5nBTt
-ybgqMNIjdcYT/JwT2GhbzcObc3STNEo582clVN9gTpK7mYKzBBf69nTsWeZzPuNt
-JQbVbK4RHwFvyosJcw6NfzxE9OxeXhTcKQDQSGKP338sAWoapEZlXNrYOIJac6HX
-Xo3dQqx/8BdO9hSv1u0/zClnL5lbk1RBylS24wIe8wLoiy4ftLjL4aOYOlonj7HU
-hknTY6L30oOpG5VtH8SEv3xveH/5GNKwfoGltTzemCgVfb9IhyVTLB3tIv8OW6k1
-y3+YEzVniVB4gtJ5UniLN1V4lBf6t7MGn0ybAEbOxPI=
------END CERTIFICATE-----
diff --git a/roles/linux-system-roles.network/tests/get_coverage.sh b/roles/linux-system-roles.network/tests/get_coverage.sh
deleted file mode 100755
index 858a8cf..0000000
--- a/roles/linux-system-roles.network/tests/get_coverage.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#! /bin/bash
-# SPDX-License-Identifier: BSD-3-Clause
-
-if [ -n "${DEBUG}" ]
-then
- set -x
-fi
-set -e
-
-if [ "$#" -lt 2 ]
-then
- echo "USAGE: ${0} host playbook"
- echo "Get coverage info from host for playbook"
- exit 1
-fi
-
-host="${1}"
-shift
-playbook="${1}"
-
-coverage_data="remote-coveragedata-${host}-${playbook%.yml}"
-coverage="/root/.local/bin/coverage"
-
-echo "Getting coverage for ${playbook} on ${host}" >&2
-
-call_ansible() {
- local module="${1}"
- shift
- local args="${1}"
- shift
- ansible -m "${module}" -i "${host}", -a "${args}" all "${@}"
-}
-
-remote_coverage_dir="$(mktemp -d /tmp/remote_coverage-XXXXXX)"
-trap "rm -rf '${remote_coverage_dir}'" EXIT
-ansible-playbook -i "${host}", get_coverage.yml -e "test_playbook=${playbook} destdir=${remote_coverage_dir}"
-
-#COVERAGE_FILE=remote-coverage coverage combine remote-coverage/tests_*/*/root/.coverage
-./merge_coverage.sh coverage "${coverage_data}"-tmp $(find "${remote_coverage_dir}" -type f | tr , _)
-
-cat > tmp_merge_coveragerc <> tmp_merge_coveragerc
-done
-
-COVERAGE_FILE="${coverage_data}" coverage combine --rcfile tmp_merge_coveragerc "${coverage_data}"-tmp
-
-test -n "${DEBUG}" && cat tmp_merge_coveragerc
-rm tmp_merge_coveragerc
-
-COVERAGE_FILE="${coverage_data}" coverage report ||:
-COVERAGE_FILE="${coverage_data}" coverage html --directory "htmlcov-${coverage_data}" ||:
-
-echo "Coverage collected in: ${coverage_data}"
diff --git a/roles/linux-system-roles.network/tests/get_coverage.yml b/roles/linux-system-roles.network/tests/get_coverage.yml
deleted file mode 100644
index 14893f1..0000000
--- a/roles/linux-system-roles.network/tests/get_coverage.yml
+++ /dev/null
@@ -1,82 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-# This expects the variable test_playbook to be set from the outside
-- name: Prepare for coverage extraction
- hosts: all
- tasks:
- # Use set_fact to set variables to make them available in all plays
- # 'vars:' Would only set variables for the current play
- - name: set facts
- set_fact:
- coverage_module: network_connections
- coverage: /root/.local/bin/coverage
- destdir: "remote_coverage/{{ test_playbook }}"
-
- # This uses variables from the other set_fact task, therefore it needs to
- # be its own task
- - name: set more facts
- set_fact:
- coverage_file:
- # yamllint disable-line rule:line-length
- ansible-coverage-{{ coverage_module }}-{{ test_playbook|replace('.yml', '') }}
-
- - name: debug info
- debug:
- msg:
- # yamllint disable-line rule:line-length
- Getting coverage for '{{ coverage_module }}' with '{{ test_playbook }}'
-
- # combine data in case old data is left there
- - command: "{{ coverage }} combine"
- environment:
- COVERAGE_FILE: "{{ coverage_file }}"
- ignore_errors: yes
-
- - name: remove old data
- file:
- state: absent
- path: "{{ coverage_file }}"
-
- - name: find coverage files to delete
- find:
- path: "{{ ansible_env.HOME }}"
- patterns: ".coverage.*"
- hidden: yes
- register: files_to_delete
-
- - name: remove old data
- file:
- path: "{{ item.path }}"
- state: absent
- with_items: "{{ files_to_delete.files }}"
-
- - name: copy coveragerc
- copy:
- content: "[run]\ndisable_warnings = no-data-collected\n"
- dest: .coveragerc
-
- - name: install latest pip
- pip:
- name: coverage
- extra_args: --user --upgrade
-
-- import_playbook: "{{ test_playbook }}"
- vars:
- ansible_python_interpreter:
- # yamllint disable-line rule:line-length
- "{{ coverage }} run -p --include /*/modules/network_connections.py,/*/module_utils/network_lsr/*"
-
-- name: Gather coverage data
- hosts: all
- tasks:
- - shell: "{{ coverage }} combine .coverage.*"
- environment:
- COVERAGE_FILE: "{{ coverage_file }}"
-
-- name: Get coverage data
- hosts: all
- tasks:
- - fetch:
- src: "{{ coverage_file }}"
- dest: "{{ destdir }}"
- flat: no
diff --git a/roles/linux-system-roles.network/tests/get_total_coverage.sh b/roles/linux-system-roles.network/tests/get_total_coverage.sh
deleted file mode 100755
index ca61746..0000000
--- a/roles/linux-system-roles.network/tests/get_total_coverage.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#! /bin/bash
-# SPDX-License-Identifier: BSD-3-Clause
-
-set -e
-coverage_data=total-coveragedata
-testhost="${1}"
-
-if [ "$#" -lt 1 ]
-then
- echo "USAGE: ${0} host"
- echo "Get local and all remote coverage data for host"
- exit 1
-fi
-
-rm -f remote-coveragedata* "${coveragedata}"
-
-
-# collect pytest coverage
-tox -e py26,py27,py36,py37 -- --cov-append
-
-for test_playbook in tests_*.yml
-do
- ./get_coverage.sh "${testhost}" "${test_playbook}"
-done
-
-./merge_coverage.sh coverage "total-remote-coveragedata" remote-coveragedata-*
-./covstats .coverage remote-coveragedata-* "total-remote-coveragedata"
-
-./merge_coverage.sh coverage "${coverage_data}" .coverage remote-coveragedata-*
-echo "Total coverage:"
-COVERAGE_FILE="${coverage_data}" coverage report ||:
-COVERAGE_FILE="${coverage_data}" coverage html --directory "htmlcov-${coverage_data}" ||:
-echo "Open HTML report with:"
-echo "xdg-open htmlcov-${coverage_data}/index.html"
diff --git a/roles/linux-system-roles.network/tests/integration/conftest.py b/roles/linux-system-roles.network/tests/integration/conftest.py
deleted file mode 100644
index 1fb2c70..0000000
--- a/roles/linux-system-roles.network/tests/integration/conftest.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# -*- coding: utf-8 -*
-# SPDX-License-Identifier: BSD-3-Clause
-
-
-def pytest_addoption(parser):
- parser.addoption(
- "--provider", action="store", default="nm", help="Network provider"
- )
diff --git a/roles/linux-system-roles.network/tests/integration/test_ethernet.py b/roles/linux-system-roles.network/tests/integration/test_ethernet.py
deleted file mode 100644
index d104d23..0000000
--- a/roles/linux-system-roles.network/tests/integration/test_ethernet.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# -*- coding: utf-8 -*
-# SPDX-License-Identifier: BSD-3-Clause
-import logging
-import os
-import subprocess
-
-import pytest
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-parent_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", ".."))
-
-with mock.patch.dict(
- "sys.modules",
- {
- "ansible.module_utils.basic": mock.Mock(),
- },
-):
- import network_connections as nc
-
-
-class PytestRunEnvironment(nc.RunEnvironment):
- def log(self, connections, idx, severity, msg, **kwargs):
- if severity == nc.LogLevel.ERROR:
- logging.error("Error: {}".format(connections[idx]))
- raise RuntimeError(msg)
- else:
- logging.debug("Log: {}".format(connections[idx]))
-
- def run_command(self, argv, encoding=None):
- command = subprocess.Popen(
- argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
- )
- return_code = command.wait()
- out, err = command.communicate()
- return return_code, out.decode("utf-8"), err.decode("utf-8")
-
- def _check_mode_changed(self, *args, **kwargs):
- pass
-
-
-def _configure_network(connections, provider):
- cmd = nc.Cmd.create(
- provider,
- run_env=PytestRunEnvironment(),
- connections_unvalidated=connections,
- connection_validator=nc.ArgValidator_ListConnections(),
- )
- cmd.run()
-
-
-@pytest.fixture(scope="session")
-def provider(request):
- return request.config.getoption("--provider")
-
-
-@pytest.fixture
-def testnic1():
- veth_name = "testeth"
- try:
- subprocess.call(
- [
- "ip",
- "link",
- "add",
- veth_name,
- "type",
- "veth",
- "peer",
- "name",
- veth_name + "peer",
- ],
- close_fds=True,
- )
- yield veth_name
- finally:
- subprocess.call(["ip", "link", "delete", veth_name])
-
-
-def _get_ip_addresses(interface):
- ip_address = subprocess.check_output(["ip", "address", "show", interface])
- return ip_address.decode("UTF-8")
-
-
-@pytest.fixture
-def network_lsr_nm_mock():
- with mock.patch.dict(
- "sys.modules",
- {
- "ansible.module_utils.basic": mock.Mock(),
- },
- ):
- yield
-
-
-def test_static_ip_with_ethernet(testnic1, provider, network_lsr_nm_mock):
- ip_address = "192.0.2.127/24"
- connections = [
- {
- "name": testnic1,
- "type": "ethernet",
- "state": "up",
- "ip": {"address": [ip_address]},
- }
- ]
- _configure_network(connections, provider)
- assert ip_address in _get_ip_addresses(testnic1)
- if provider == "initscripts":
- assert os.path.exists("/etc/sysconfig/network-scripts/ifcfg-" + testnic1)
- else:
- subprocess.check_call(["nmcli", "connection", "show", testnic1])
diff --git a/roles/linux-system-roles.network/tests/merge_coverage.sh b/roles/linux-system-roles.network/tests/merge_coverage.sh
deleted file mode 100755
index a33e94d..0000000
--- a/roles/linux-system-roles.network/tests/merge_coverage.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#! /bin/bash
-# SPDX-License-Identifier: BSD-3-Clause
-
-if [ -n "${DEBUG}" ]
-then
- set -x
-fi
-set -e
-
-if [ "$#" -lt 3 ]
-then
- echo "USAGE: ${0} path_to_coverage_binary output_file input_files..."
- echo "Merges all input_files into output file without removing input_files"
- exit 1
-fi
-
-# path to coverage binary
-coverage="${1}"
-shift
-
-# read by coverage binary
-export COVERAGE_FILE="${1}"
-shift
-
-tempdir="$(mktemp -d /tmp/coverage_merge-XXXXXX)"
-trap "rm -rf '${tempdir}'" EXIT
-
-cp --backup=numbered -- "${@}" "${tempdir}"
-# FIXME: Would not work if coverage files are not hidden but they are by
-# default
-shopt -s dotglob
-"${coverage}" combine "${tempdir}/"*
-
-echo "Merged data into ${COVERAGE_FILE}"
-./covstats "${COVERAGE_FILE}"
diff --git a/roles/linux-system-roles.network/tests/module_utils b/roles/linux-system-roles.network/tests/module_utils
deleted file mode 120000
index c3ce1a4..0000000
--- a/roles/linux-system-roles.network/tests/module_utils
+++ /dev/null
@@ -1 +0,0 @@
-../module_utils/
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/tests/modules b/roles/linux-system-roles.network/tests/modules
deleted file mode 120000
index d6bf720..0000000
--- a/roles/linux-system-roles.network/tests/modules
+++ /dev/null
@@ -1 +0,0 @@
-../library/
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/tests/playbooks/down_profile.yml b/roles/linux-system-roles.network/tests/playbooks/down_profile.yml
deleted file mode 100644
index 5087240..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/down_profile.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Set {{ profile }} down
- hosts: all
- vars:
- network_connections:
- - name: "{{ profile }}"
- state: down
- roles:
- - linux-system-roles.network
diff --git a/roles/linux-system-roles.network/tests/playbooks/files b/roles/linux-system-roles.network/tests/playbooks/files
deleted file mode 120000
index feb1228..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/files
+++ /dev/null
@@ -1 +0,0 @@
-../files
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/tests/playbooks/integration_pytest_python3.yml b/roles/linux-system-roles.network/tests/playbooks/integration_pytest_python3.yml
deleted file mode 100644
index 075355b..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/integration_pytest_python3.yml
+++ /dev/null
@@ -1,149 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Install dependencies for integration tests
- hosts: all
- vars:
- - rpmdependencies:
- - git
- - python3-pip
- - rsync
-
- tasks:
- - name: Install rpm dependencies
- package:
- state: present
- name: "{{ rpmdependencies }}"
-
- - name: Install Pytest
- command: "pip3 install pytest"
-
-
-# Import needed in order to install initscripts dependencies on the remote
-# system.
-- import_playbook: "../tests_default_initscripts.yml"
-
-# Import needed in order to install Network Manager dependencies on the remote
-# system.
-- import_playbook: "../tests_default_nm.yml"
-
-
-- name: Run Pytest tests
- hosts: all
- tasks:
- - block:
- - name: create tempdir for code to test
- tempfile:
- state: directory
- prefix: lsrtest_
- register: _rundir
-
- - name: get tempfile for tar
- tempfile:
- prefix: lsrtest_
- suffix: ".tar"
- register: temptar
- delegate_to: localhost
-
- - include_tasks: ../tasks/get_modules_and_utils_paths.yml
-
- - name: get tests directory
- set_fact:
- tests_directory: "{{ lookup('first_found', params) }}"
- vars:
- params:
- files:
- - tests
- - network
- paths:
- - "../.."
-
- # TODO: using tar and copying the file is a workaround for the
- # synchronize module that does not work in test-harness. Related issue:
- # https://github.com/linux-system-roles/test-harness/issues/102
- #
- - name: Create Tar file
- command: >
- tar -cvf {{ temptar.path }} --exclude "*.pyc"
- --exclude "__pycache__"
- -C {{ tests_directory | realpath | dirname }}
- {{ tests_directory | basename }}
- -C {{ modules_parent_and_dir.stdout_lines[0] }}
- {{ modules_parent_and_dir.stdout_lines[1] }}
- -C {{ module_utils_parent_and_dir.stdout_lines[0] }}
- {{ module_utils_parent_and_dir.stdout_lines[1] }}
- delegate_to: localhost
-
- - name: Copy testrepo.tar to the remote system
- copy:
- src: "{{ temptar.path }}"
- dest: "{{ _rundir.path }}"
-
- - name: Untar testrepo.tar
- command: tar xf {{ temptar.path | basename }}
- args:
- chdir: "{{ _rundir.path }}"
-
- - file:
- state: directory
- path: "{{ _rundir.path }}/ansible"
-
- - name: Move module_utils to ansible directory
- shell: |
- if [ -d {{ _rundir.path }}/module_utils ]; then
- mv {{ _rundir.path }}/module_utils {{ _rundir.path }}/ansible
- fi
-
- - name: Fake out python module directories, primarily for python2
- shell: |
- for dir in $(find {{ _rundir.path }} -type d -print); do
- if [ ! -f "$dir/__init__.py" ]; then
- touch "$dir/__init__.py"
- fi
- done
-
- - set_fact:
- _lsr_python_path: "{{
- _rundir.path ~ '/' ~
- modules_parent_and_dir.stdout_lines[1] ~ ':' ~ _rundir.path
- }}"
-
- - debug:
- msg: path {{ _lsr_python_path }}
- - command: ls -alrtFR {{ _rundir.path }}
-
- - block:
- - name: Run pytest with nm
- command: >
- pytest
- {{ _rundir.path }}/{{ tests_directory | basename }}/integration/
- --provider=nm
- register: playbook_run
- environment:
- PYTHONPATH: "{{ _lsr_python_path }}"
- always:
- - debug:
- var: playbook_run.stdout_lines
-
- - block:
- - name: Run pytest with initscripts
- command: >
- pytest
- {{ _rundir.path }}/{{ tests_directory | basename }}/integration/
- --provider=initscripts
- register: playbook_run
- environment:
- PYTHONPATH: "{{ _lsr_python_path }}"
- always:
- - debug:
- var: playbook_run.stdout_lines
- always:
- - name: remove local tar file
- file:
- state: absent
- path: "{{ temptar.path }}"
- delegate_to: localhost
-
- - name: remove tempdir
- file:
- state: absent
- path: "{{ _rundir.path }}"
diff --git a/roles/linux-system-roles.network/tests/playbooks/manual_test_ethtool_coalesce.yml b/roles/linux-system-roles.network/tests/playbooks/manual_test_ethtool_coalesce.yml
deleted file mode 100644
index 8b2c456..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/manual_test_ethtool_coalesce.yml
+++ /dev/null
@@ -1,115 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- interface: "{{ network_interface_name1 }}"
- type: "{{ network_interface_type1 }}"
- tasks:
- - name: "INIT: Ethtool coalesce tests"
- debug:
- msg: "##################################################"
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: present
- - include_tasks: tasks/assert_device_present.yml
- - name: Install ethtool (test dependency)
- package:
- name: ethtool
- state: present
- - block:
- - name: >-
- TEST: I can create a profile without changing the ethtool coalesce.
- debug:
- msg: "##################################################"
- - name: Get current device coalesce
- command: "ethtool --show-coalesce {{ interface }}"
- register: original_ethtool_coalesce
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: ethernet
- ip:
- dhcp4: "no"
- auto6: "no"
- - name: Get current device coalesce
- command: "ethtool --show-coalesce {{ interface }}"
- register: ethtool_coalesce
- - name: "ASSERT: The profile does not change the ethtool coalesce"
- assert:
- that:
- - original_ethtool_coalesce.stdout == ethtool_coalesce.stdout
- - name: >-
- TEST: I can set rx-frames and adaptive-tx.
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: ethernet
- ip:
- dhcp4: "no"
- auto6: "no"
- ethtool:
- coalesce:
- rx_frames: 1
- tx_frames: 1
- - name: Get current device coalesce
- command: "ethtool --show-coalesce {{ interface }}"
- register: ethtool_coalesce
- - name:
- debug:
- var: ethtool_coalesce.stdout_lines
- - name: Assert device coalesce
- assert:
- that:
- - >-
- 'rx-frames: 1' in
- ethtool_coalesce.stdout_lines
- - >-
- 'tx-frames: 1' in
- ethtool_coalesce.stdout_lines
- - name: "TEST: I can reset coalesce to their original value."
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: ethernet
- ip:
- dhcp4: "no"
- auto6: "no"
- - name: Get current device coalesce
- command: "ethtool --show-coalesce {{ interface }}"
- register: ethtool_coalesce
- # Resetting the ethtools only works with NetworkManager
- - name: "ASSERT: The profile does not change the ethtool coalesce"
- assert:
- that:
- - original_ethtool_coalesce.stdout == ethtool_coalesce.stdout
- when:
- network_provider == 'nm'
- always:
- - block:
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
- ignore_errors: true
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: absent
- tags:
- - "tests::cleanup"
diff --git a/roles/linux-system-roles.network/tests/playbooks/remove_profile.yml b/roles/linux-system-roles.network/tests/playbooks/remove_profile.yml
deleted file mode 100644
index a50e848..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/remove_profile.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Remove {{ profile }}
- hosts: all
- vars:
- network_connections:
- - name: "{{ profile }}"
- persistent_state: absent
- roles:
- - linux-system-roles.network
diff --git a/roles/linux-system-roles.network/tests/playbooks/roles b/roles/linux-system-roles.network/tests/playbooks/roles
deleted file mode 120000
index 7b9ade8..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/roles
+++ /dev/null
@@ -1 +0,0 @@
-../roles/
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/tests/playbooks/run_tasks.yml b/roles/linux-system-roles.network/tests/playbooks/run_tasks.yml
deleted file mode 100644
index ea56720..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/run_tasks.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Run the tasklist {{ task }}
- hosts: all
- tasks:
- - include_tasks: "{{ task }}"
diff --git a/roles/linux-system-roles.network/tests/playbooks/tasks b/roles/linux-system-roles.network/tests/playbooks/tasks
deleted file mode 120000
index 93c76d6..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tasks
+++ /dev/null
@@ -1 +0,0 @@
-../tasks/
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_802_1x.yml b/roles/linux-system-roles.network/tests/playbooks/tests_802_1x.yml
deleted file mode 100644
index 9cce1ae..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_802_1x.yml
+++ /dev/null
@@ -1,124 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- interface: 802-1x-test
- tasks:
- - name: "INIT: 802.1x tests"
- debug:
- msg: "##################################################"
- - include_tasks: tasks/setup_802.1x.yml
- - block:
- - name: "TEST: 802.1x profile with private key password and ca cert"
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- interface_name: veth2
- state: up
- type: ethernet
- ip:
- address:
- - 203.0.113.2/24
- dhcp4: "no"
- auto6: "no"
- ieee802_1x:
- identity: myhost
- eap: tls
- private_key: /etc/pki/tls/client.key
- private_key_password: test
- private_key_password_flags:
- - none
- client_cert: /etc/pki/tls/client.pem
- ca_cert: /etc/pki/tls/cacert.pem
- - name: "TEST: I can ping the EAP server"
- command: ping -c1 203.0.113.1
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
- - name: >-
- TEST: 802.1x profile with unencrypted private key,
- domain suffix match, and system ca certs
- debug:
- msg: "##################################################"
- - name: Copy cacert to system truststore
- copy:
- src: cacert.pem
- dest: /etc/pki/ca-trust/source/anchors/cacert.pem
- mode: 0644
- - name: Update ca trust
- command: update-ca-trust
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- interface_name: veth2
- state: up
- type: ethernet
- ip:
- address:
- - 203.0.113.2/24
- dhcp4: "no"
- auto6: "no"
- ieee802_1x:
- identity: myhost
- eap: tls
- private_key: /etc/pki/tls/client.key.nocrypt
- client_cert: /etc/pki/tls/client.pem
- private_key_password_flags:
- - not-required
- system_ca_certs: True
- domain_suffix_match: example.com
- - name: "TEST: I can ping the EAP server"
- command: ping -c1 203.0.113.1
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
-
- - include_tasks: tasks/test_802.1x_capath.yml
- always:
- - block:
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
- - name: br1
- persistent_state: absent
- state: down
- ignore_errors: true
- - include_tasks: tasks/cleanup_802_1x_server.yml
- - name: Remove test certificates
- file:
- state: absent
- path: "/etc/pki/tls/{{ item }}"
- with_items:
- - client.key
- - client.key.nocrypt
- - client.pem
- - cacert.pem
- - name: Remove test CA
- file:
- state: absent
- path: "{{ item }}"
- with_items:
- - /etc/pki/tls/my_ca_certs
- - /etc/pki/ca-trust/source/anchors/cacert.pem
- - name: Update ca trust
- command: update-ca-trust
- tags:
- - "tests::cleanup"
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_802_1x_updated.yml b/roles/linux-system-roles.network/tests/playbooks/tests_802_1x_updated.yml
deleted file mode 100644
index 82d5734..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_802_1x_updated.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- tasks:
- - name: Update NetworkManager
- package:
- name: NetworkManager
- state: latest
- - name: Restart NetworkManager
- service:
- name: NetworkManager
- state: restarted
-- import_playbook: tests_802_1x.yml
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_bond.yml b/roles/linux-system-roles.network/tests/playbooks/tests_bond.yml
deleted file mode 100644
index 69f07f8..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_bond.yml
+++ /dev/null
@@ -1,97 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- controller_profile: bond0
- controller_device: nm-bond
- port1_profile: bond0.0
- dhcp_interface1: test1
- port2_profile: bond0.1
- dhcp_interface2: test2
- tasks:
- - name: "INIT Prepare setup"
- debug:
- msg: "##################################################"
- - import_tasks: tasks/create_test_interfaces_with_dhcp.yml
- - import_tasks: tasks/assert_device_present.yml
- vars:
- interface: "{{ dhcp_interface1 }}"
- - import_tasks: tasks/assert_device_present.yml
- vars:
- interface: "{{ dhcp_interface2 }}"
- - block:
- - name: "TEST Add Bond with 2 ports"
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- # Create a bond controller
- - name: "{{ controller_profile }}"
- state: up
- type: bond
- interface_name: "{{ controller_device }}"
- bond:
- mode: active-backup
- miimon: 110
- # add an ethernet to the bond
- - name: "{{ port1_profile }}"
- state: up
- type: ethernet
- interface_name: "{{ dhcp_interface1 }}"
- controller: "{{ controller_profile }}"
- # add a second ethernet to the bond
- - name: "{{ port2_profile }}"
- state: up
- type: ethernet
- interface_name: "{{ dhcp_interface2 }}"
- controller: "{{ controller_profile }}"
- - import_tasks: tasks/assert_device_present.yml
- vars:
- interface: "{{ controller_device }}"
- - include_tasks: tasks/assert_profile_present.yml
- vars:
- profile: "{{ item }}"
- loop:
- - "{{ controller_profile }}"
- - "{{ port1_profile }}"
- - "{{ port2_profile }}"
- - command: grep 'Polling Interval'
- /proc/net/bonding/{{ controller_device }}
- name: "** TEST check polling interval"
- register: result
- until: "'110' in result.stdout"
- - command: ip -4 a s {{ controller_device }}
- name: "** TEST check IPv4"
- register: result
- until: "'192.0.2' in result.stdout"
- retries: 20
- delay: 2
- - command: ip -6 a s {{ controller_device }}
- name: "** TEST check IPv6"
- register: result
- until: "'2001' in result.stdout"
- retries: 20
- delay: 2
- always:
- - block:
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ port2_profile }}"
- persistent_state: absent
- state: down
- - name: "{{ port1_profile }}"
- persistent_state: absent
- state: down
- - name: "{{ controller_profile }}"
- persistent_state: absent
- state: down
- ignore_errors: true
- - command: ip link del {{ controller_device }}
- ignore_errors: true
- - import_tasks: tasks/remove_test_interfaces_with_dhcp.yml
- tags:
- - "tests::cleanup"
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_bond_deprecated.yml b/roles/linux-system-roles.network/tests/playbooks/tests_bond_deprecated.yml
deleted file mode 100644
index f37e19a..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_bond_deprecated.yml
+++ /dev/null
@@ -1,97 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- controller_profile: bond0
- controller_device: nm-bond
- port1_profile: bond0.0
- dhcp_interface1: test1
- port2_profile: bond0.1
- dhcp_interface2: test2
- tasks:
- - name: "INIT Prepare setup"
- debug:
- msg: "##################################################"
- - import_tasks: tasks/create_test_interfaces_with_dhcp.yml
- - import_tasks: tasks/assert_device_present.yml
- vars:
- interface: "{{ dhcp_interface1 }}"
- - import_tasks: tasks/assert_device_present.yml
- vars:
- interface: "{{ dhcp_interface2 }}"
- - block:
- - name: "TEST Add Bond with 2 ports using deprecated 'master' argument"
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- # Create a bond controller
- - name: "{{ controller_profile }}"
- state: up
- type: bond
- interface_name: "{{ controller_device }}"
- bond:
- mode: active-backup
- miimon: 110
- # add an ethernet to the bond
- - name: "{{ port1_profile }}"
- state: up
- type: ethernet
- interface_name: "{{ dhcp_interface1 }}"
- master: "{{ controller_profile }}"
- # add a second ethernet to the bond
- - name: "{{ port2_profile }}"
- state: up
- type: ethernet
- interface_name: "{{ dhcp_interface2 }}"
- master: "{{ controller_profile }}"
- - import_tasks: tasks/assert_device_present.yml
- vars:
- interface: "{{ controller_device }}"
- - include_tasks: tasks/assert_profile_present.yml
- vars:
- profile: "{{ item }}"
- loop:
- - "{{ controller_profile }}"
- - "{{ port1_profile }}"
- - "{{ port2_profile }}"
- - command: grep 'Polling Interval'
- /proc/net/bonding/{{ controller_device }}
- name: "** TEST check polling interval"
- register: result
- until: "'110' in result.stdout"
- - command: ip -4 a s {{ controller_device }}
- name: "** TEST check IPv4"
- register: result
- until: "'192.0.2' in result.stdout"
- retries: 20
- delay: 2
- - command: ip -6 a s {{ controller_device }}
- name: "** TEST check IPv6"
- register: result
- until: "'2001' in result.stdout"
- retries: 20
- delay: 2
- always:
- - block:
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ port2_profile }}"
- persistent_state: absent
- state: down
- - name: "{{ port1_profile }}"
- persistent_state: absent
- state: down
- - name: "{{ controller_profile }}"
- persistent_state: absent
- state: down
- ignore_errors: true
- - command: ip link del {{ controller_device }}
- ignore_errors: true
- - import_tasks: tasks/remove_test_interfaces_with_dhcp.yml
- tags:
- - "tests::cleanup"
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_bridge.yml b/roles/linux-system-roles.network/tests/playbooks/tests_bridge.yml
deleted file mode 100644
index d79d6ad..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_bridge.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Test configuring bridges
- hosts: all
- vars:
- interface: LSR-TST-br31
-
- tasks:
- - name: "set interface={{ interface }}"
- set_fact:
- interface: "{{ interface }}"
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/assert_device_absent.yml
-
-- name: Add test bridge
- hosts: all
- vars:
- network_connections:
- - name: "{{ interface }}"
- interface_name: "{{ interface }}"
- state: up
- type: bridge
- ip:
- dhcp4: no
- auto6: yes
- roles:
- - linux-system-roles.network
-
-- import_playbook: run_tasks.yml
- vars:
- task: tasks/assert_device_present.yml
-
-- import_playbook: run_tasks.yml
- vars:
- profile: "{{ interface }}"
- task: tasks/assert_profile_present.yml
-
-- import_playbook: down_profile.yml
- vars:
- profile: "{{ interface }}"
-# FIXME: assert profile/device down
-
-- import_playbook: remove_profile.yml
- vars:
- profile: "{{ interface }}"
-
-- import_playbook: run_tasks.yml
- vars:
- profile: "{{ interface }}"
- task: tasks/assert_profile_absent.yml
-
-# FIXME: Devices might still be left when profile is absent
-# - import_playbook: run_tasks.yml
-# vars:
-# task: tasks/assert_device_absent.yml
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_checkpoint_cleanup.yml b/roles/linux-system-roles.network/tests/playbooks/tests_checkpoint_cleanup.yml
deleted file mode 100644
index 18e3fd7..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_checkpoint_cleanup.yml
+++ /dev/null
@@ -1,82 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This test is supposed to check that checkpoints are properly cleaned-up after
-# failures in the module. This test currently uses the initscripts provider to
-# mark a device as unmanaged for NM and then tries to activiate it using NM.
-# This failed without removing the checkpoint.
----
-- hosts: all
- vars:
- interface: cptstbr
- profile: "{{ interface }}"
- network_provider: nm
- pre_tasks:
- - debug:
- msg: Inside states tests
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/assert_device_absent.yml
- roles:
- - linux-system-roles.network
- tasks:
- - block:
- # Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1832897
- - package:
- name: dbus-tools
- state: present
- # create test profile
- - include_role:
- name: linux-system-roles.network
- vars:
- network_provider: initscripts
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: bridge
- ip:
- dhcp4: false
- auto6: false
- - include_tasks: tasks/assert_device_present.yml
- - include_tasks: tasks/assert_profile_present.yml
- # Use internal module directly for speedup
- - network_connections:
- provider: nm
- connections:
- - name: "{{ interface }}"
- state: up
- type: bridge
- ip:
- dhcp4: false
- auto6: false
- ignore_errors: true
- register: error_trigger
- - assert:
- fail_msg: The module call did not fail. Therefore the test
- condition was not triggered. This test needs to be adjusted or
- dropped.
- that: error_trigger.failed
- # yamllint disable-line rule:line-length
- - command: busctl --system tree --list org.freedesktop.NetworkManager
- register: nm_dbus_objects
- - debug:
- var: nm_dbus_objects
- - name: Assert that no checkpoints are left
- assert:
- fail_msg: Checkpoints not cleaned up
- that: >
- '/org/freedesktop/NetworkManager/Checkpoint/' not in
- nm_dbus_objects.stdout_lines
- always:
- - block:
- # Use internal module directly for speedup
- - network_connections:
- provider: nm
- connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
- - file:
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ interface }}"
- state: absent
- - command: ip link del "{{ interface }}"
- ignore_errors: true
- tags:
- - "tests::cleanup"
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_dummy.yml b/roles/linux-system-roles.network/tests/playbooks/tests_dummy.yml
deleted file mode 100644
index 8fe8762..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_dummy.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- interface: dummy0
- profile: "{{ interface }}"
- lsr_fail_debug:
- - __network_connections_result
- tasks:
- - debug:
- msg: "this is: playbooks/tests_dummy.yml"
- tags:
- - always
-
- - block:
- - include_tasks: tasks/run_test.yml
- vars:
- lsr_description: Create a dummy interface
- lsr_setup:
- - tasks/delete_interface.yml
- - tasks/assert_device_absent.yml
- lsr_test:
- - tasks/create_dummy_profile.yml
- lsr_assert:
- - tasks/assert_profile_present.yml
- - tasks/assert_device_present.yml
- lsr_cleanup:
- - tasks/cleanup_profile+device.yml
- tags:
- - tests::dummy:create
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_eth_dns_support.yml b/roles/linux-system-roles.network/tests/playbooks/tests_eth_dns_support.yml
deleted file mode 100644
index 0fe5c09..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_eth_dns_support.yml
+++ /dev/null
@@ -1,110 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
-
-- name: Test configuring ethernet devices
- hosts: all
- vars:
- type: veth
- interface: ethtest0
-
-
- tasks:
- - name: "set type={{ type }} and interface={{ interface }}"
- set_fact:
- type: "{{ type }}"
- interface: "{{ interface }}"
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: present
- - include_tasks: tasks/assert_device_present.yml
-
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- interface_name: "{{ interface }}"
- state: up
- type: ethernet
- autoconnect: yes
- ip:
- route_metric4: 100
- dhcp4: no
- gateway4: 192.0.2.1
- dns:
- - 192.0.2.2
- - 198.51.100.5
- dns_search:
- - example.com
- - example.org
- dns_options:
- - rotate
- - timeout:1
-
- route_metric6: -1
- auto6: no
- gateway6: 2001:db8::1
-
- address:
- - 192.0.2.3/24
- - 198.51.100.3/26
- - 2001:db8::80/7
-
- route:
- - network: 198.51.100.128
- prefix: 26
- gateway: 198.51.100.1
- metric: 2
- - network: 198.51.100.64
- prefix: 26
- gateway: 198.51.100.6
- metric: 4
- route_append_only: no
- rule_append_only: yes
-
- - name: Verify nmcli connection DNS entry
- shell: |
- set -euxo pipefail
- nmcli connection show {{ interface }} | grep ipv4.dns
- register: ipv4_dns
- ignore_errors: yes
-
- - name: "Assert that DNS addresses are configured correctly"
- assert:
- that:
- - "'192.0.2.2' in ipv4_dns.stdout"
- - "'198.51.100.5' in ipv4_dns.stdout"
- msg: "DNS addresses are configured incorrectly"
-
- - name: "Assert that DNS search domains are configured correctly"
- assert:
- that:
- - "'example.com' in ipv4_dns.stdout"
- - "'example.org' in ipv4_dns.stdout"
- msg: "DNS search domains are configured incorrectly"
-
- - name: "Assert that DNS options are configured correctly"
- assert:
- that:
- - "'rotate' in ipv4_dns.stdout"
- - "'timeout:1' in ipv4_dns.stdout"
- msg: "DNS options are configured incorrectly"
-
-- import_playbook: down_profile.yml
- vars:
- profile: "{{ interface }}"
-# FIXME: assert profile/device down
-- import_playbook: remove_profile.yml
- vars:
- profile: "{{ interface }}"
-# FIXME: assert profile away
-- name: Remove interfaces
- hosts: all
- tasks:
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: absent
- - include_tasks: tasks/assert_device_absent.yml
-...
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_ethernet.yml b/roles/linux-system-roles.network/tests/playbooks/tests_ethernet.yml
deleted file mode 100644
index cd02579..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_ethernet.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- tasks:
- - debug:
- msg: Inside ethernet tests
- - debug:
- var: network_provider
-
-- name: Test configuring ethernet devices
- hosts: all
- vars:
- type: veth
- interface: lsr27
-
- tasks:
- - name: "set type={{ type }} and interface={{ interface }}"
- set_fact:
- type: "{{ type }}"
- interface: "{{ interface }}"
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: present
- - include_tasks: tasks/assert_device_present.yml
-
-- name: Test static interface up
- hosts: all
- vars:
- network_connections:
- - name: "{{ interface }}"
- interface_name: "{{ interface }}"
- state: up
- type: ethernet
- autoconnect: yes
- ip:
- address: 192.0.2.1/24
- roles:
- - linux-system-roles.network
- tasks:
- - include_tasks: tasks/assert_output_in_stderr_without_warnings.yml
-
-- hosts: all
- tasks:
- - debug:
- var: network_provider
-
-# FIXME: assert profile present
-# FIXME: assert profile/device up + IP address
-- import_playbook: down_profile.yml
- vars:
- profile: "{{ interface }}"
-# FIXME: assert profile/device down
-- import_playbook: remove_profile.yml
- vars:
- profile: "{{ interface }}"
-# FIXME: assert profile away
-- name: Remove interfaces
- hosts: all
- tasks:
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: absent
- - include_tasks: tasks/assert_device_absent.yml
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_coalesce.yml b/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_coalesce.yml
deleted file mode 100644
index 62ff0e1..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_coalesce.yml
+++ /dev/null
@@ -1,102 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- interface: testnic1
- type: veth
- tasks:
- - debug:
- msg: "this is: playbooks/tests_ethtool_.coalesceyml"
- tags:
- - always
-
- - name: "INIT: Ethtool coalesce tests"
- debug:
- msg: "##################################################"
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: present
- - include_tasks: tasks/assert_device_present.yml
- - name: Install ethtool (test dependency)
- package:
- name: ethtool
- state: present
-
- - block:
- - name: >-
- TEST: I can create a profile without any coalescing option.
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- type: ethernet
- autoconnect: no
- ip:
- dhcp4: no
- auto6: no
- - name: Get profile's coalescing options
- command: nmcli -g ethtool.coalesce-rx-frames c show {{ interface }}
- register: no_coalesce
- - name: "ASSERT: The profile does not contain coalescing options"
- assert:
- that: no_coalesce.stdout == ""
- - name: >-
- TEST: I can set rx-frames.
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- type: ethernet
- autoconnect: no
- ip:
- dhcp4: no
- auto6: no
- ethtool:
- coalesce:
- rx_frames: 128
- - name: Get profile's coalescing options
- command: nmcli -g ethtool.coalesce-rx-frames c show {{ interface }}
- register: with_coalesce
- - name: Assert coalesce options set in profile
- assert:
- that: with_coalesce.stdout == '128'
- - name: "TEST: I can clear coalescing options"
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- type: ethernet
- autoconnect: no
- ip:
- dhcp4: no
- auto6: no
- - name: Get profile's coalescing options
- command: nmcli -g ethtool.coalesce-rx-frames c show {{ interface }}
- register: profile
- - name: "ASSERT: The profile does reset coalescing options"
- assert:
- that: no_coalesce.stdout == ""
- always:
- - block:
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- ignore_errors: true
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: absent
- tags:
- - "tests::cleanup"
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_features.yml b/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_features.yml
deleted file mode 100644
index 43fddc3..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_ethtool_features.yml
+++ /dev/null
@@ -1,200 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- interface: testnic1
- type: veth
- tasks:
- - debug:
- msg: "this is: playbooks/tests_ethtool_features.yml"
- tags:
- - always
-
- - name: "INIT: Ethtool feeatures tests"
- debug:
- msg: "##################################################"
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: present
- - include_tasks: tasks/assert_device_present.yml
- - name: Install ethtool (test dependency)
- package:
- name: ethtool
- state: present
-
-
- - block:
- - name: >-
- TEST: I can create a profile without changing the ethtool features.
- debug:
- msg: "##################################################"
- - name: Get current device features
- command: "ethtool --show-features {{ interface }}"
- register: original_ethtool_features
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: ethernet
- ip:
- dhcp4: "no"
- auto6: "no"
- - name: Get current device features
- command: "ethtool --show-features {{ interface }}"
- register: ethtool_features
- - name: "ASSERT: The profile does not change the ethtool features"
- assert:
- that:
- - original_ethtool_features.stdout == ethtool_features.stdout
-
-
- - name: >-
- TEST: I can disable gro and tx-tcp-segmentation and enable gso.
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: ethernet
- ip:
- dhcp4: "no"
- auto6: "no"
- ethtool:
- features:
- gro: "no"
- gso: "yes"
- tx-tcp-segmentation: "no"
- - name: Get current device features
- command: "ethtool --show-features {{ interface }}"
- register: ethtool_features
- - name:
- debug:
- var: ethtool_features.stdout_lines
- - name: Assert device features
- assert:
- that:
- - >-
- 'generic-receive-offload: off' in
- ethtool_features.stdout_lines
- - >-
- 'generic-segmentation-offload: on' in
- ethtool_features.stdout_lines
- - >-
- 'tx-tcp-segmentation: off' in
- ethtool_features.stdout_lines | map('trim')
-
-
- - name: >-
- TEST: I can enable tx_tcp_segmentation (using underscores).
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: ethernet
- ip:
- dhcp4: "no"
- auto6: "no"
- ethtool:
- features:
- tx_tcp_segmentation: "yes"
- - name: Get current device features
- command: "ethtool --show-features {{ interface }}"
- register: ethtool_features
- - name:
- debug:
- var: ethtool_features.stdout_lines
- - name: Assert device features
- assert:
- that:
- - >-
- 'tx-tcp-segmentation: on' in
- ethtool_features.stdout_lines | map('trim')
-
-
- - name: I cannot change tx_tcp_segmentation and tx-tcp-segmentation at
- the same time.
- block:
- - name: >-
- TEST: Change feature with both underscores and dashes.
- debug:
- msg: "##################################################"
- - network_connections:
- provider: "{{ network_provider | mandatory }}"
- connections:
- - name: "{{ interface }}"
- state: up
- type: ethernet
- ip:
- dhcp4: "no"
- auto6: "no"
- ethtool:
- features:
- tx_tcp_segmentation: "no"
- tx-tcp-segmentation: "no"
- register: __network_connections_result
- rescue:
- - name: Show network_connections result
- debug:
- var: __network_connections_result
- - assert:
- that:
- - '{{ "fatal error: configuration error:
- connections[0].ethtool.features: duplicate key
- ''tx_tcp_segmentation''" in
- __network_connections_result.msg }}'
- always:
- - name: Check failure
- debug:
- var: __network_connections_result
- - assert:
- that: "{{ __network_connections_result.failed == true }}"
-
-
- - name: "TEST: I can reset features to their original value."
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: ethernet
- ip:
- dhcp4: "no"
- auto6: "no"
- - name: Get current device features
- command: "ethtool --show-features {{ interface }}"
- register: ethtool_features
- # Resetting the ethtools only works with NetworkManager
- - name: "ASSERT: The profile does not change the ethtool features"
- assert:
- that:
- - original_ethtool_features.stdout == ethtool_features.stdout
- when:
- network_provider == 'nm'
- always:
- - block:
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
- ignore_errors: true
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: absent
- tags:
- - "tests::cleanup"
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_ipv6.yml b/roles/linux-system-roles.network/tests/playbooks/tests_ipv6.yml
deleted file mode 100644
index 98d98d1..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_ipv6.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- type: veth
- interface: veth0
- tasks:
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: present
- - name: Set up gateway ip on veth peer
- shell: |
- ip netns add ns1
- ip link set peer{{ interface }} netns ns1
- ip netns exec ns1 ip -6 addr add 2001:db8::1/32 dev peer{{ interface }}
- ip netns exec ns1 ip link set peer{{ interface }} up
- when:
- # netns not available on RHEL/CentOS 6
- - ansible_distribution_major_version != '6'
- - block:
- - name: >-
- TEST: I can configure an interface with static ipv6 config
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- type: ethernet
- state: up
- ip:
- dhcp4: false
- auto6: false
- address:
- - "2001:db8::2/32"
- - "2001:db8::3/32"
- - "2001:db8::4/32"
- gateway6: "2001:db8::1"
- - include_tasks: tasks/assert_device_present.yml
- - include_tasks: tasks/assert_profile_present.yml
- vars:
- profile: "{{ interface }}"
- - name: Get ip address information
- command: "ip addr show {{ interface }}"
- register: ip_addr
- - name:
- debug:
- var: ip_addr.stdout
- - name: Assert ipv6 addresses are correctly set
- assert:
- that:
- - >-
- 'inet6 2001:db8::2/32' in ip_addr.stdout
- - >-
- 'inet6 2001:db8::3/32' in ip_addr.stdout
- - >-
- 'inet6 2001:db8::4/32' in ip_addr.stdout
- - name: Get ipv6 routes
- command: "ip -6 route"
- register: ipv6_route
- - name:
- debug:
- var: ipv6_route.stdout
- - name: Assert default ipv6 route is set
- assert:
- that:
- - >-
- "default via 2001:db8::1 dev {{ interface }}"
- in ipv6_route.stdout
- - name: Test gateway can be pinged
- command: ping6 -c1 2001:db8::1
- when:
- - ansible_distribution_major_version != '6'
- always:
- - name: "TEARDOWN: remove profiles."
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
- ignore_errors: true
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: absent
- - name: Clean up namespace
- command: ip netns delete ns1
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_ipv6_disabled.yml b/roles/linux-system-roles.network/tests/playbooks/tests_ipv6_disabled.yml
deleted file mode 100644
index 590b346..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_ipv6_disabled.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
-
-- name: Test configuring ethernet devices
- hosts: all
- vars:
- type: veth
- interface: ethtest0
-
- tasks:
- - name: "set type={{ type }} and interface={{ interface }}"
- set_fact:
- type: "{{ type }}"
- interface: "{{ interface }}"
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: present
- - include_tasks: tasks/assert_device_present.yml
-
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- interface_name: "{{ interface }}"
- type: ethernet
- ip:
- ipv6_disabled: true
-
- - name: Verify nmcli connection ipv6.method
- shell: |
- set -euxo pipefail
- nmcli connection show {{ interface }} | grep ipv6.method
- register: ipv6_method
- ignore_errors: yes
-
- - name: "Assert that ipv6.method disabled is configured correctly"
- assert:
- that:
- - "'disabled' in ipv6_method.stdout"
- msg: "ipv6.method disabled is configured incorrectly"
-
-- import_playbook: down_profile.yml
- vars:
- profile: "{{ interface }}"
-# FIXME: assert profile/device down
-- import_playbook: remove_profile.yml
- vars:
- profile: "{{ interface }}"
-# FIXME: assert profile away
-- name: Remove interfaces
- hosts: all
- tasks:
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: absent
- - include_tasks: tasks/assert_device_absent.yml
-...
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_provider.yml b/roles/linux-system-roles.network/tests/playbooks/tests_provider.yml
deleted file mode 100644
index 1db2d08..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_provider.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- interface: testnic1
- profile: "{{ interface }}"
- lsr_fail_debug:
- - __network_connections_result
- tasks:
- - debug:
- msg: "this is: playbooks/tests_states.yml"
- tags:
- - always
-
-
- - block:
- - include_tasks: tasks/run_test.yml
- vars:
- state: present
- lsr_description: I can manage a veth interface with NM after I
- managed it with initscripts.
- lsr_setup:
- - tasks/setup_test_interface.yml
- # run role once with defaults but nm provider to ensure that
- # NetworKManager is running
- - tasks/provider/default_with_nm.yml
- - tasks/provider/create_and_remove_with_initscripts.yml
- lsr_test:
- - tasks/provider/create_with_nm.yml
- lsr_assert:
- - tasks/assert_profile_present.yml
- lsr_cleanup:
- - tasks/cleanup_profile+device.yml
- tags:
- - tests::provider:initscripts_to_nm
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_reapply.yml b/roles/linux-system-roles.network/tests/playbooks/tests_reapply.yml
deleted file mode 100644
index 4b1cb09..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_reapply.yml
+++ /dev/null
@@ -1,66 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This test should check whether the NMDevice.reapply method is called by the
-# role. This is probably a good candidate to test with pytest directly instead
-# of via Ansible. Until there is better test support for this, just check the
-# log output for the respective log message.
----
-- hosts: all
- vars:
- interface: rpltstbr
- profile: "{{ interface }}"
- network_provider: nm
- pre_tasks:
- - debug:
- msg: Inside states tests
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/assert_device_absent.yml
- roles:
- - linux-system-roles.network
- tasks:
- - block:
- # create test profile
- - include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: bridge
- ip:
- dhcp4: false
- auto6: false
- - include_tasks: tasks/assert_device_present.yml
- - include_tasks: tasks/assert_profile_present.yml
- # Use internal module to get output
- - network_connections:
- provider: nm
- connections:
- - name: "{{ interface }}"
- state: up
- type: bridge
- ip:
- address:
- - 192.0.2.72/31
- dhcp4: false
- auto6: false
- ignore_errors: true
- register: test_module_run
- - debug:
- var: test_module_run
- - name: Assert that reapply is found in log output
- assert:
- fail_msg: Reapply not found in log output
- that: "{{ 'connection reapplied' in test_module_run.stderr }}"
- always:
- - block:
- # Use internal module directly for speedup
- - network_connections:
- provider: nm
- connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
- - command: ip link del "{{ interface }}"
- ignore_errors: true
- tags:
- - "tests::cleanup"
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_states.yml b/roles/linux-system-roles.network/tests/playbooks/tests_states.yml
deleted file mode 100644
index eec27c0..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_states.yml
+++ /dev/null
@@ -1,137 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- interface: statebr
- profile: "{{ interface }}"
- lsr_fail_debug:
- - __network_connections_result
- tasks:
- - debug:
- msg: "this is: playbooks/tests_states.yml"
- tags:
- - always
-
-
- - block:
- - include_tasks: tasks/run_test.yml
- vars:
- lsr_description: I can create a profile
- lsr_setup:
- - tasks/delete_interface.yml
- - tasks/assert_device_absent.yml
- lsr_test:
- - tasks/create_bridge_profile.yml
- lsr_assert:
- - tasks/assert_profile_present.yml
- lsr_assert_when:
- # Device should be present because of autoconnect: true by
- # default for NM (this might be considered a bug)
- - what: tasks/assert_device_present.yml
- when: "{{ network_provider == 'nm' }}"
- lsr_cleanup:
- - tasks/cleanup_profile+device.yml
- tags:
- - tests::states:create
-
- - block:
- - include_tasks: tasks/run_test.yml
- vars:
- lsr_description: I can create a profile without autoconnect
- lsr_setup:
- - tasks/delete_interface.yml
- - tasks/assert_device_absent.yml
- lsr_test:
- - tasks/create_bridge_profile_no_autoconnect.yml
- lsr_assert:
- # Device should be absent because of autoconnect: false
- - tasks/assert_device_absent.yml
- - tasks/assert_profile_present.yml
- lsr_cleanup:
- - tasks/cleanup_profile+device.yml
- tags:
- - tests::states:create_without_autoconnect
-
- - block:
- - include_tasks: tasks/run_test.yml
- vars:
- lsr_description: I can activate an existing profile
- lsr_setup:
- - tasks/create_bridge_profile.yml
- lsr_test:
- - tasks/activate_profile.yml
- lsr_assert:
- - tasks/assert_device_present.yml
- - tasks/assert_profile_present.yml
- lsr_cleanup:
- - tasks/cleanup_profile+device.yml
- tags:
- - tests::states:activate
-
- - block:
- - include_tasks: tasks/run_test.yml
- vars:
- lsr_description: I can remove an existing profile without taking it
- down
- lsr_setup:
- - tasks/create_bridge_profile.yml
- - tasks/activate_profile.yml
- lsr_test:
- - tasks/remove_profile.yml
- lsr_assert:
- - tasks/assert_device_present.yml
- - tasks/assert_profile_absent.yml
- lsr_cleanup:
- - tasks/cleanup_profile+device.yml
- tags:
- - tests::states:remove_up
-
- - block:
- - include_tasks: tasks/run_test.yml
- vars:
- lsr_description: I can take a profile down that is absent
- lsr_setup:
- - tasks/create_bridge_profile.yml
- - tasks/activate_profile.yml
- - tasks/remove_profile.yml
- lsr_test:
- - tasks/remove+down_profile.yml
- lsr_assert:
- - tasks/assert_profile_absent.yml
- lsr_assert_when:
- - what: tasks/assert_device_absent.yml
- when: "{{ network_provider == 'nm' }}"
- lsr_cleanup:
- - tasks/cleanup_profile+device.yml
- tags:
- - tests::states:remove_down
-
- - block:
- - include_tasks: tasks/run_test.yml
- vars:
- lsr_description: I will not get an error when I try to
- remove an absent profile
- lsr_setup:
- - tasks/create_bridge_profile.yml
- - tasks/activate_profile.yml
- - tasks/remove+down_profile.yml
- lsr_test:
- - tasks/remove+down_profile.yml
- lsr_assert:
- - tasks/assert_profile_absent.yml
- # FIXME: This needs to be included before lsr_assert_when but
- # after the role ran to ensure that NetworkManager is actually
- # installed but it is not an assert.
- - tasks/get_NetworkManager_NVR.yml
- lsr_assert_when:
- - what: tasks/assert_device_absent.yml
- # NetworkManager 1.18.4 from CentOS does not seem to remove the
- # virtual interface in this case but it seems to work with
- # 1:NetworkManager-1.27.0-26129.d0a2eb8f05.el7
- when: "{{ network_provider == 'nm' and
- NetworkManager_NVR != 'NetworkManager-1.18.4-3.el7'
- }}"
- lsr_cleanup:
- - tasks/cleanup_profile+device.yml
- tags:
- - tests::states:remove_down_twice
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_team.yml b/roles/linux-system-roles.network/tests/playbooks/tests_team.yml
deleted file mode 100644
index 67a7b80..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_team.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- interface: team0
- profile: "{{ interface }}"
- lsr_fail_debug:
- - __network_connections_result
- tasks:
- - debug:
- msg: "this is: playbooks/tests_team.yml"
- tags:
- - always
-
- - block:
- - include_tasks: tasks/run_test.yml
- vars:
- lsr_description: Create a team interface without any port attached
- lsr_setup:
- - tasks/delete_interface.yml
- - tasks/assert_device_absent.yml
- lsr_test:
- - tasks/create_team_profile.yml
- lsr_assert:
- - tasks/assert_profile_present.yml
- - tasks/assert_device_present.yml
- lsr_cleanup:
- - tasks/cleanup_profile+device.yml
- tags:
- - tests::team:create
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_team_plugin_installation.yml b/roles/linux-system-roles.network/tests/playbooks/tests_team_plugin_installation.yml
deleted file mode 100644
index 88690a3..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_team_plugin_installation.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- tasks:
- - name: remove the NetworkManager-team package
- package:
- name: "NetworkManager-team"
- state: absent
-
- - name: "get the rpm package facts"
- package_facts:
- manager: "auto"
-
- - name: "Assert NetworkManager-team removed before team configuration"
- assert:
- that:
- - "'NetworkManager-team' not in ansible_facts.packages"
- msg: "NetworkManager-team is not removed before team configuration"
-
- - name: "Team interface configuration"
- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- # Specify the team profile
- - name: team0
- persistent_state: present
- type: team
- interface_name: team0
-
- - name: "get the rpm package facts"
- package_facts:
- manager: "auto"
-
- - name: "Assert NetworkManager-team is installed after team configuration"
- assert:
- that:
- - "'NetworkManager-team' in ansible_facts.packages"
- msg: "NetworkManager-team is not installed after team configuration"
-...
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_vlan_mtu.yml b/roles/linux-system-roles.network/tests/playbooks/tests_vlan_mtu.yml
deleted file mode 100644
index 029b599..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_vlan_mtu.yml
+++ /dev/null
@@ -1,67 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- type: veth
- interface: lsr101
- vlan_interface: lsr101.90
- tasks:
- - include_tasks: tasks/show_interfaces.yml
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: present
- - include_tasks: tasks/assert_device_present.yml
- - name: >-
- TEST: I can configure the MTU for a vlan interface without autoconnect.
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- type: ethernet
- state: up
- mtu: 1492
- autoconnect: false
- ip:
- dhcp4: false
- auto6: false
-
- - name: "{{ vlan_interface }}"
- parent: "{{ interface }}"
- type: vlan
- vlan_id: 90
- mtu: 1280
- state: up
- autoconnect: false
- ip:
- dhcp4: false
- auto6: false
- - include_tasks: tasks/assert_device_present.yml
- vars:
- interface: "{{ vlan_interface }}"
- - include_tasks: tasks/assert_profile_present.yml
- vars:
- profile: "{{ item }}"
- loop:
- - "{{ interface }}"
- - "{{ vlan_interface }}"
-
- - name: "TEARDOWN: remove profiles."
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
- - name: "{{ vlan_interface }}"
- persistent_state: absent
- state: down
- ignore_errors: true
- - include_tasks: tasks/manage_test_interface.yml
- vars:
- state: absent
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_wireless.yml b/roles/linux-system-roles.network/tests/playbooks/tests_wireless.yml
deleted file mode 100644
index 822a15e..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_wireless.yml
+++ /dev/null
@@ -1,88 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- vars:
- interface: wlan0
- tasks:
- - name: "INIT: wireless tests"
- debug:
- msg: "##################################################"
- - include_tasks: tasks/setup_mock_wifi.yml
- - name: Copy client certs
- copy:
- src: "{{ item }}"
- dest: "/etc/pki/tls/{{ item }}"
- mode: 0644
- with_items:
- - client.key
- - client.pem
- - cacert.pem
- - block:
- - name: "TEST: wireless connection with WPA-PSK"
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_allow_restart: true
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: wireless
- ip:
- address:
- - 203.0.113.2/24
- dhcp4: "no"
- auto6: "no"
- wireless:
- ssid: "mock_wifi"
- key_mgmt: "wpa-psk"
- password: "p@55w0rD"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
- - name: "TEST: wireless connection with 802.1x TLS-EAP"
- debug:
- msg: "##################################################"
- - import_role:
- name: linux-system-roles.network
- vars:
- network_allow_restart: true
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: wireless
- ip:
- address:
- - 203.0.113.2/24
- dhcp4: "no"
- auto6: "no"
- wireless:
- ssid: "mock_wifi"
- key_mgmt: "wpa-eap"
- ieee802_1x:
- identity: myhost
- eap: tls
- private_key: /etc/pki/tls/client.key
- private_key_password: test
- private_key_password_flags:
- - none
- client_cert: /etc/pki/tls/client.pem
- ca_cert: /etc/pki/tls/cacert.pem
- always:
- - block:
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
- ignore_errors: true
- - include_tasks: tasks/cleanup_mock_wifi.yml
- tags:
- - "tests::cleanup"
diff --git a/roles/linux-system-roles.network/tests/playbooks/tests_wireless_plugin_installation.yml b/roles/linux-system-roles.network/tests/playbooks/tests_wireless_plugin_installation.yml
deleted file mode 100644
index 8288bc7..0000000
--- a/roles/linux-system-roles.network/tests/playbooks/tests_wireless_plugin_installation.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- tasks:
- - name: remove the NetworkManager-wifi package
- package:
- name: "NetworkManager-wifi"
- state: absent
-
- - name: "get the rpm package facts"
- package_facts:
- manager: "auto"
-
- - name: "Assert NetworkManager-wifi removed before wireless configuration"
- assert:
- that:
- - "'NetworkManager-wifi' not in ansible_facts.packages"
- msg: "NetworkManager-wifi is not removed before wirelss configuration"
-
- - name: "wireless configuration"
- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: wlan0
- type: wireless
- wireless:
- ssid: "My WPA2-PSK Network"
- key_mgmt: "wpa-psk"
- password: "p@55w0rD"
-
- - name: "get the rpm package facts"
- package_facts:
- manager: "auto"
-
- - name: "Assert NetworkManager-wifi installed after wireless configuration"
- assert:
- that:
- - "'NetworkManager-wifi' in ansible_facts.packages"
- msg: "NetworkManager-wifi is not installed after wireless configured"
diff --git a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/defaults b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/defaults
deleted file mode 120000
index feb6623..0000000
--- a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/defaults
+++ /dev/null
@@ -1 +0,0 @@
-../../../defaults/
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/library b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/library
deleted file mode 120000
index d0b7393..0000000
--- a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library/
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/meta b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/meta
deleted file mode 120000
index a8df40c..0000000
--- a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/meta
+++ /dev/null
@@ -1 +0,0 @@
-../../../meta/
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/module_utils b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/module_utils
deleted file mode 120000
index ad35115..0000000
--- a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/module_utils
+++ /dev/null
@@ -1 +0,0 @@
-../../../module_utils/
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/tasks b/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/tasks
deleted file mode 120000
index f5bbba4..0000000
--- a/roles/linux-system-roles.network/tests/roles/linux-system-roles.network/tasks
+++ /dev/null
@@ -1 +0,0 @@
-../../../tasks/
\ No newline at end of file
diff --git a/roles/linux-system-roles.network/tests/setup_module_utils.sh b/roles/linux-system-roles.network/tests/setup_module_utils.sh
deleted file mode 100755
index 18d6a00..0000000
--- a/roles/linux-system-roles.network/tests/setup_module_utils.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: MIT
-
-set -euo pipefail
-
-if [ -n "${DEBUG:-}" ] ; then
- set -x
-fi
-
-if [ ! -d "${1:-}" ] ; then
- echo Either ansible is not installed, or there is no ansible/module_utils
- echo in $1 - Skipping
- exit 0
-fi
-
-if [ ! -d "${2:-}" ] ; then
- echo Role has no module_utils - Skipping
- exit 0
-fi
-
-# we need absolute path for $2
-absmoddir=$( readlink -f "$2" )
-
-# clean up old links to module_utils
-for item in "$1"/* ; do
- if lnitem=$( readlink "$item" ) && test -n "$lnitem" ; then
- case "$lnitem" in
- *"${2}"*) rm -f "$item" ;;
- esac
- fi
-done
-
-# add new links to module_utils
-for item in "$absmoddir"/* ; do
- case "$item" in
- *__pycache__) continue;;
- *.pyc) continue;;
- esac
- bnitem=$( basename "$item" )
- ln -s "$item" "$1/$bnitem"
-done
diff --git a/roles/linux-system-roles.network/tests/tasks/activate_profile.yml b/roles/linux-system-roles.network/tests/tasks/activate_profile.yml
deleted file mode 100644
index dea878c..0000000
--- a/roles/linux-system-roles.network/tests/tasks/activate_profile.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/assert_device_absent.yml b/roles/linux-system-roles.network/tests/tasks/assert_device_absent.yml
deleted file mode 100644
index 39fa30a..0000000
--- a/roles/linux-system-roles.network/tests/tasks/assert_device_absent.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include: get_interface_stat.yml
-- name: "assert that interface {{ interface }} is absent"
- assert:
- that: not interface_stat.stat.exists
- msg: "{{ interface }} exists"
diff --git a/roles/linux-system-roles.network/tests/tasks/assert_device_present.yml b/roles/linux-system-roles.network/tests/tasks/assert_device_present.yml
deleted file mode 100644
index 583b729..0000000
--- a/roles/linux-system-roles.network/tests/tasks/assert_device_present.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include: get_interface_stat.yml
-- name: "assert that interface {{ interface }} is present"
- assert:
- that: interface_stat.stat.exists
- msg: "{{ interface }} does not exist"
diff --git a/roles/linux-system-roles.network/tests/tasks/assert_output_in_stderr_without_warnings.yml b/roles/linux-system-roles.network/tests/tasks/assert_output_in_stderr_without_warnings.yml
deleted file mode 100644
index d760d3d..0000000
--- a/roles/linux-system-roles.network/tests/tasks/assert_output_in_stderr_without_warnings.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: "Assert that warnings is empty"
- assert:
- that:
- - "'warnings' not in __network_connections_result"
- msg: "There are unexpected warnings"
-- name: "Assert that there is output in stderr"
- assert:
- that:
- - "'stderr' in __network_connections_result"
- msg: "There are no messages in stderr"
diff --git a/roles/linux-system-roles.network/tests/tasks/assert_profile_absent.yml b/roles/linux-system-roles.network/tests/tasks/assert_profile_absent.yml
deleted file mode 100644
index 13f6eb1..0000000
--- a/roles/linux-system-roles.network/tests/tasks/assert_profile_absent.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include: get_profile_stat.yml
-- name: "assert that profile '{{ profile }}' is absent"
- assert:
- that: not lsr_net_profile_exists
- msg: "profile {{ profile }} does exist"
diff --git a/roles/linux-system-roles.network/tests/tasks/assert_profile_present.yml b/roles/linux-system-roles.network/tests/tasks/assert_profile_present.yml
deleted file mode 100644
index 8e3bb0b..0000000
--- a/roles/linux-system-roles.network/tests/tasks/assert_profile_present.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include: get_profile_stat.yml
-- name: "assert that profile '{{ profile }}' is present"
- assert:
- that: lsr_net_profile_exists
- msg: "profile {{ profile }} does not exist"
diff --git a/roles/linux-system-roles.network/tests/tasks/cleanup_802_1x_server.yml b/roles/linux-system-roles.network/tests/tasks/cleanup_802_1x_server.yml
deleted file mode 100644
index 2d1a888..0000000
--- a/roles/linux-system-roles.network/tests/tasks/cleanup_802_1x_server.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Remove test interfaces
- shell: |
- ip netns delete ns1
- ip link delete veth1-br
- ip link delete veth2-br
- ip link delete br1
-
-- name: Kill hostapd process
- shell: pkill hostapd
-- name: Remove certs and config
- file:
- state: absent
- path: "{{ item }}"
- with_items:
- - /etc/pki/tls/hostapd_test
- - /etc/hostapd/wired.conf
- - /etc/hostapd/hostapd.eap_user
diff --git a/roles/linux-system-roles.network/tests/tasks/cleanup_mock_wifi.yml b/roles/linux-system-roles.network/tests/tasks/cleanup_mock_wifi.yml
deleted file mode 100644
index a80f337..0000000
--- a/roles/linux-system-roles.network/tests/tasks/cleanup_mock_wifi.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Unload mac80211_hwsim module
- shell: modprobe -r mac80211_hwsim
-
-- name: Kill hostapd process
- shell: pkill hostapd
diff --git a/roles/linux-system-roles.network/tests/tasks/cleanup_profile+device.yml b/roles/linux-system-roles.network/tests/tasks/cleanup_profile+device.yml
deleted file mode 100644
index 92d1eba..0000000
--- a/roles/linux-system-roles.network/tests/tasks/cleanup_profile+device.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- shell: |
- nmcli con delete {{ interface }}
- nmcli con load /etc/sysconfig/network-scripts/ifcfg-{{ interface }}
- rm -f /etc/sysconfig/network-scripts/ifcfg-{{ interface }}
- ip link del {{ interface }}
- ignore_errors: true
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/create_and_remove_interface.yml b/roles/linux-system-roles.network/tests/tasks/create_and_remove_interface.yml
deleted file mode 100644
index 7ed96d4..0000000
--- a/roles/linux-system-roles.network/tests/tasks/create_and_remove_interface.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_tasks: show_interfaces.yml
-- include_tasks: manage_test_interface.yml
- vars:
- state: absent
-- include_tasks: show_interfaces.yml
-- include_tasks: assert_device_absent.yml
-
-- include_tasks: manage_test_interface.yml
- vars:
- state: present
-- include_tasks: show_interfaces.yml
-- include_tasks: assert_device_present.yml
-
-- include_tasks: manage_test_interface.yml
- vars:
- state: absent
-- include_tasks: show_interfaces.yml
-- include_tasks: assert_device_absent.yml
diff --git a/roles/linux-system-roles.network/tests/tasks/create_bridge_profile.yml b/roles/linux-system-roles.network/tests/tasks/create_bridge_profile.yml
deleted file mode 100644
index e846127..0000000
--- a/roles/linux-system-roles.network/tests/tasks/create_bridge_profile.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: present
- type: bridge
- ip:
- dhcp4: false
- auto6: false
-- debug:
- var: __network_connections_result
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/create_bridge_profile_no_autoconnect.yml b/roles/linux-system-roles.network/tests/tasks/create_bridge_profile_no_autoconnect.yml
deleted file mode 100644
index 308f57d..0000000
--- a/roles/linux-system-roles.network/tests/tasks/create_bridge_profile_no_autoconnect.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- autoconnect: false
- persistent_state: present
- type: bridge
- ip:
- dhcp4: false
- auto6: false
-- debug:
- var: __network_connections_result
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/create_dummy_profile.yml b/roles/linux-system-roles.network/tests/tasks/create_dummy_profile.yml
deleted file mode 100644
index 950be6b..0000000
--- a/roles/linux-system-roles.network/tests/tasks/create_dummy_profile.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- type: dummy
- ip:
- address:
- - "192.0.2.42/30"
-- debug:
- var: __network_connections_result
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/create_team_profile.yml b/roles/linux-system-roles.network/tests/tasks/create_team_profile.yml
deleted file mode 100644
index 8bd36b0..0000000
--- a/roles/linux-system-roles.network/tests/tasks/create_team_profile.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: present
- type: team
- ip:
- dhcp4: false
- auto6: false
-- debug:
- var: __network_connections_result
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/create_test_interfaces_with_dhcp.yml b/roles/linux-system-roles.network/tests/tasks/create_test_interfaces_with_dhcp.yml
deleted file mode 100644
index 97d27b3..0000000
--- a/roles/linux-system-roles.network/tests/tasks/create_test_interfaces_with_dhcp.yml
+++ /dev/null
@@ -1,73 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Install dnsmasq
- package:
- name: dnsmasq
- state: present
-
-
-- name: Create test interfaces
- shell: |
- # NM to see veth devices starting with test* as managed after ip add..
- echo 'ENV{ID_NET_DRIVER}=="veth",\
- ENV{INTERFACE}=="test*", \
- ENV{NM_UNMANAGED}="0"' >/etc/udev/rules.d/88-veth.rules
- udevadm control --reload-rules
- udevadm settle --timeout=5
-
- # Setuptwo devices with IPv4/IPv6 auto support
- ip link add {{dhcp_interface1}} type veth peer name {{dhcp_interface1}}p
- ip link set {{dhcp_interface1}}p up
- ip link add {{dhcp_interface2}} type veth peer name {{dhcp_interface2}}p
- ip link set {{dhcp_interface2}}p up
-
- # Create the 'testbr' - providing both 10.x ipv4 and 2620:52:0 ipv6 dhcp
- ip link add name testbr type bridge forward_delay 0
- ip link set testbr up
- ip addr add 192.0.2.1/24 dev testbr
- ip -6 addr add 2001:DB8::1/32 dev testbr
-
- if grep 'release 6' /etc/redhat-release; then
- # We need bridge-utils and radvd only in rhel6
- if ! rpm -q --quiet radvd; then yum -y install radvd; fi
- if ! rpm -q --quiet bridge-utils; then yum -y install bridge-utils; fi
-
- # We need to add iptables rule to allow dhcp request
- iptables -I INPUT -i testbr -p udp --dport 67:68 --sport 67:68 -j ACCEPT
-
- # Add {{dhcp_interface1}}, {{dhcp_interface2}} peers into the testbr
- brctl addif testbr {{dhcp_interface1}}p
- brctl addif testbr {{dhcp_interface2}}p
-
- # in RHEL6 /run is not present
- mkdir -p /run
-
- # and dnsmasq does not support ipv6
- dnsmasq \
- --pid-file=/run/dhcp_testbr.pid \
- --dhcp-leasefile=/run/dhcp_testbr.lease \
- --dhcp-range=192.0.2.1,192.0.2.254,240 \
- --interface=testbr --bind-interfaces
-
- # start radvd for ipv6
- echo 'interface testbr {' > /etc/radvd.conf
- echo ' AdvSendAdvert on;' >> /etc/radvd.conf
- echo ' prefix 2001:DB8::/64 { ' >> /etc/radvd.conf
- echo ' AdvOnLink on; }; ' >> /etc/radvd.conf
- echo ' }; ' >> /etc/radvd.conf
-
- # enable ipv6 forwarding
- sysctl -w net.ipv6.conf.all.forwarding=1
- service radvd restart
-
- else
- ip link set {{dhcp_interface1}}p master testbr
- ip link set {{dhcp_interface2}}p master testbr
- # Run joint DHCP4/DHCP6 server with RA enabled in veth namespace
- dnsmasq \
- --pid-file=/run/dhcp_testbr.pid \
- --dhcp-leasefile=/run/dhcp_testbr.lease \
- --dhcp-range=192.0.2.1,192.0.2.254,240 \
- --dhcp-range=2001:DB8::10,2001:DB8::1FF,slaac,64,240 \
- --enable-ra --interface=testbr --bind-interfaces
- fi
diff --git a/roles/linux-system-roles.network/tests/tasks/delete_interface.yml b/roles/linux-system-roles.network/tests/tasks/delete_interface.yml
deleted file mode 100644
index 064e17f..0000000
--- a/roles/linux-system-roles.network/tests/tasks/delete_interface.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: remove test interface if necessary
- command: "ip link del {{ interface }}"
- ignore_errors: true
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/el_repo_setup.yml b/roles/linux-system-roles.network/tests/tasks/el_repo_setup.yml
deleted file mode 100644
index 0656e8c..0000000
--- a/roles/linux-system-roles.network/tests/tasks/el_repo_setup.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-- name: Fix CentOS6 Base repo
- copy:
- dest: /etc/yum.repos.d/CentOS-Base.repo
- content: |
- [base]
- name=CentOS-$releasever - Base
- baseurl=https://vault.centos.org/6.10/os/$basearch/
- gpgcheck=1
- gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
-
- [updates]
- name=CentOS-$releasever - Updates
- baseurl=https://vault.centos.org/6.10/updates/$basearch/
- gpgcheck=1
- gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
-
- [extras]
- name=CentOS-$releasever - Extras
- baseurl=https://vault.centos.org/6.10/extras/$basearch/
- gpgcheck=1
- gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
- when:
- - ansible_distribution == 'CentOS'
- - ansible_distribution_major_version == '6'
-- include_tasks: enable_epel.yml
diff --git a/roles/linux-system-roles.network/tests/tasks/enable_epel.yml b/roles/linux-system-roles.network/tests/tasks/enable_epel.yml
deleted file mode 100644
index 7924bd4..0000000
--- a/roles/linux-system-roles.network/tests/tasks/enable_epel.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Enable EPEL {{ ansible_distribution_major_version }}
- # yamllint disable-line rule:line-length
- command: yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
- args:
- warn: false
- creates: /etc/yum.repos.d/epel.repo
- when:
- - ansible_distribution in ['RedHat', 'CentOS']
- - ansible_distribution_major_version in ['7', '8']
-
-- name: Enable EPEL 6
- copy:
- dest: /etc/yum.repos.d/epel.repo
- content: |
- [epel]
- name=Extra Packages for Enterprise Linux 6 - $basearch
- baseurl=https://archives.fedoraproject.org/pub/archive/epel/6/$basearch
- enabled=1
- gpgcheck=0
- when:
- - ansible_distribution in ['RedHat', 'CentOS']
- - ansible_distribution_major_version == '6'
diff --git a/roles/linux-system-roles.network/tests/tasks/get_NetworkManager_NVR.yml b/roles/linux-system-roles.network/tests/tasks/get_NetworkManager_NVR.yml
deleted file mode 100644
index 6e0b4e6..0000000
--- a/roles/linux-system-roles.network/tests/tasks/get_NetworkManager_NVR.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- block:
- - name: Get NetworkManager RPM version
- command:
- cmd: rpm -qa --qf '%{name}-%{version}-%{release}\n' NetworkManager
- warn: false
- register: __rpm_q_NetworkManager
-
- - name: Store NetworkManager version
- set_fact:
- NetworkManager_NVR: "{{ __rpm_q_NetworkManager.stdout }}"
-
- - name: Show NetworkManager version
- debug:
- var: NetworkManager_NVR
- tags:
- - always
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/get_current_interfaces.yml b/roles/linux-system-roles.network/tests/tasks/get_current_interfaces.yml
deleted file mode 100644
index 33a4a76..0000000
--- a/roles/linux-system-roles.network/tests/tasks/get_current_interfaces.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- command: ls -1
- args:
- chdir: /sys/class/net
- register: _current_interfaces
-- set_fact:
- current_interfaces: "{{ _current_interfaces.stdout_lines }}"
diff --git a/roles/linux-system-roles.network/tests/tasks/get_interface_stat.yml b/roles/linux-system-roles.network/tests/tasks/get_interface_stat.yml
deleted file mode 100644
index a8b8e5b..0000000
--- a/roles/linux-system-roles.network/tests/tasks/get_interface_stat.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: "Get stat for interface {{ interface }}"
- stat:
- get_attributes: false
- get_checksum: false
- get_mime: false
- path: "/sys/class/net/{{ interface }}"
- register: interface_stat
diff --git a/roles/linux-system-roles.network/tests/tasks/get_modules_and_utils_paths.yml b/roles/linux-system-roles.network/tests/tasks/get_modules_and_utils_paths.yml
deleted file mode 100644
index c71f169..0000000
--- a/roles/linux-system-roles.network/tests/tasks/get_modules_and_utils_paths.yml
+++ /dev/null
@@ -1,92 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: set collection paths
- set_fact:
- collection_paths: |
- {{
- (lookup("env","ANSIBLE_COLLECTIONS_PATH").split(":") +
- lookup("env","ANSIBLE_COLLECTIONS_PATHS").split(":") +
- lookup("config", "COLLECTIONS_PATHS")) |
- select | list
- }}
-
-- name: set search paths
- set_fact:
- modules_search_path: |
- {{
- (lookup("env", "ANSIBLE_LIBRARY").split(":") +
- ["../../library", "../library"] +
- lookup("config", "DEFAULT_MODULE_PATH")) |
- select | list
- }}
- module_utils_search_path: |
- {{
- (lookup("env", "ANSIBLE_MODULE_UTILS").split(":") +
- ["../../module_utils", "../module_utils"] +
- lookup("config", "DEFAULT_MODULE_UTILS_PATH")) |
- select | list
- }}
-
-# the output should be something like
-# - path to parent directory to chdir to in order to use tar
-# - relative path under parent directory to tar
-# e.g. for the local role case
-# - ../..
-# - library
-# would translate to tar -C ../.. library
-# for the collection case
-# - /home/user/.ansible/collections
-# - ansible_collections/fedora/linux_system_roles/plugins/modules
-# would translate to tar -C /home/user/.ansible/collections \
-# ansible_collections/fedora/linux_system_roles/plugins/modules
-- name: find parent directory and path of modules
- shell: |
- set -euxo pipefail
- for dir in {{ modules_search_path | join(" ") }}; do
- if [ -f "$dir/network_connections.py" ]; then
- readlink -f "$(dirname "$dir")"
- basename "$dir"
- exit 0
- fi
- done
- for dir in {{ collection_paths | join(" ") }}; do
- if [ ! -d "$dir" ]; then continue; fi
- cd "$dir"
- for subdir in ansible_collections/*/*/plugins/modules; do
- if [ -f "$subdir/network_connections.py" ]; then
- echo "$dir"
- echo "$subdir"
- exit 0
- fi
- done
- done
- echo network_connections.py not found
- exit 1
- delegate_to: localhost
- register: modules_parent_and_dir
-
-- name: find parent directory and path of module_utils
- shell: |
- set -euxo pipefail
- for dir in {{ module_utils_search_path | join(" ") }}; do
- if [ -d "$dir/network_lsr" ]; then
- readlink -f "$(dirname "$dir")"
- basename "$dir"
- exit 0
- fi
- done
- for dir in {{ collection_paths | join(" ") }}; do
- if [ ! -d "$dir" ]; then continue; fi
- cd "$dir"
- for subdir in ansible_collections/*/*/plugins/module_utils; do
- if [ -d "$subdir/network_lsr" ]; then
- echo "$dir"
- echo "$subdir"
- exit 0
- fi
- done
- done
- echo network_lsr not found
- exit 1
- delegate_to: localhost
- register: module_utils_parent_and_dir
diff --git a/roles/linux-system-roles.network/tests/tasks/get_profile_stat.yml b/roles/linux-system-roles.network/tests/tasks/get_profile_stat.yml
deleted file mode 100644
index efe3a9e..0000000
--- a/roles/linux-system-roles.network/tests/tasks/get_profile_stat.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- set_fact: lsr_net_profile_exists=false
-
-- name: stat profile file
- stat:
- get_attributes: false
- get_checksum: false
- get_mime: false
- path: /etc/sysconfig/network-scripts/ifcfg-{{ profile }}
- register: profile_stat
-
-- set_fact: lsr_net_profile_exists=true
- when: profile_stat.stat.exists
-
-# When certain profile is marked as absent but still up, the `nmcli connection`
-# still show it with FILENAME starting with /run. Only consider profile exists
-# when its FILENAME is in /etc folder
-- shell: nmcli -f NAME,FILENAME connection show |grep {{ profile }} | grep /etc
- register: nm_profile_exists
- ignore_errors: yes
-
-- set_fact: lsr_net_profile_exists=true
- when: nm_profile_exists.rc == 0
diff --git a/roles/linux-system-roles.network/tests/tasks/manage_test_interface.yml b/roles/linux-system-roles.network/tests/tasks/manage_test_interface.yml
deleted file mode 100644
index f421c27..0000000
--- a/roles/linux-system-roles.network/tests/tasks/manage_test_interface.yml
+++ /dev/null
@@ -1,59 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- fail:
- msg: "state needs to be present or absent, not '{{ state }}'"
- when: state not in ["present", "absent"]
-
-- fail:
- msg: "type needs to be dummy, tap or veth, not '{{ type }}'"
- when: type not in ["dummy", "tap", "veth"]
-
-- include: show_interfaces.yml
-
-- name: Install iproute
- package:
- name: iproute
- state: present
-
-# veth
-- name: Create veth interface {{ interface }}
- command: "{{ item }}"
- with_items:
- - ip link add {{ interface }} type veth peer name peer{{ interface }}
- - ip link set peer{{ interface }} up
- - ip link set {{ interface }} up
- when: "type == 'veth' and state == 'present' and
- interface not in current_interfaces"
-- name: Set up veth as managed by NetworkManager
- shell: nmcli d set {{ interface }} managed true
- # The varible for `network_provider` is not exists yet,
- # just ignore error for initscripts
- ignore_errors: yes
- when: "type == 'veth' and state == 'present'"
-
-- name: Delete veth interface {{ interface }}
- command: ip link del {{ interface }} type veth
- when: "type == 'veth' and state == 'absent' and
- interface in current_interfaces"
-
-# dummy
-- name: Create dummy interface {{ interface }}
- command: ip link add "{{ interface }}" type dummy
- when: "type == 'dummy' and state == 'present' and
- interface not in current_interfaces"
-
-- name: Delete dummy interface {{ interface }}
- command: ip link del "{{ interface }}" type dummy
- when: "type == 'dummy' and state == 'absent' and
- interface in current_interfaces"
-
-# tap
-- name: Create tap interface {{ interface }}
- command: ip tuntap add dev {{ interface }} mode tap
- when: "type == 'tap' and state == 'present'
- and interface not in current_interfaces"
-
-- name: Delete tap interface {{ interface }}
- command: ip tuntap del dev {{ interface }} mode tap
- when: "type == 'tap' and state == 'absent' and
- interface in current_interfaces"
diff --git a/roles/linux-system-roles.network/tests/tasks/provider/create_and_remove_with_initscripts.yml b/roles/linux-system-roles.network/tests/tasks/provider/create_and_remove_with_initscripts.yml
deleted file mode 100644
index fd011cb..0000000
--- a/roles/linux-system-roles.network/tests/tasks/provider/create_and_remove_with_initscripts.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- persistent_state: present
- type: ethernet
- autoconnect: yes
- ip:
- address: 192.0.2.1/24
- network_provider: initscripts
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: down
- persistent_state: absent
- network_provider: initscripts
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/provider/create_with_nm.yml b/roles/linux-system-roles.network/tests/tasks/provider/create_with_nm.yml
deleted file mode 100644
index 077841c..0000000
--- a/roles/linux-system-roles.network/tests/tasks/provider/create_with_nm.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- state: up
- persistent_state: present
- type: ethernet
- autoconnect: yes
- ip:
- address: 192.0.2.1/24
- network_provider: nm
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/provider/default_with_nm.yml b/roles/linux-system-roles.network/tests/tasks/provider/default_with_nm.yml
deleted file mode 100644
index 967bb7f..0000000
--- a/roles/linux-system-roles.network/tests/tasks/provider/default_with_nm.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections: []
- network_provider: nm
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/remove+down_profile.yml b/roles/linux-system-roles.network/tests/tasks/remove+down_profile.yml
deleted file mode 100644
index 99c0c8e..0000000
--- a/roles/linux-system-roles.network/tests/tasks/remove+down_profile.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
- state: down
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/remove_profile.yml b/roles/linux-system-roles.network/tests/tasks/remove_profile.yml
deleted file mode 100644
index a7dbc12..0000000
--- a/roles/linux-system-roles.network/tests/tasks/remove_profile.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface }}"
- persistent_state: absent
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/remove_test_interfaces_with_dhcp.yml b/roles/linux-system-roles.network/tests/tasks/remove_test_interfaces_with_dhcp.yml
deleted file mode 100644
index 59b13a6..0000000
--- a/roles/linux-system-roles.network/tests/tasks/remove_test_interfaces_with_dhcp.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Remove test interfaces
- shell: |
- ip link delete {{dhcp_interface1}}
- ip link delete {{dhcp_interface2}}
- ip link delete testbr
-
- # Remove udev rule for NM to see veth devices starting with test*.....
- rm -rf /etc/udev/rules.d/88-veth.rules
- udevadm control --reload-rules
- udevadm settle --timeout=5
-
-
-- name: Stop dnsmasq/radvd services
- shell: |
- pkill -F /run/dhcp_testbr.pid
- rm -rf /run/dhcp_testbr.pid
- rm -rf /run/dhcp_testbr.lease
- if grep 'release 6' /etc/redhat-release; then
- # Stop radvd server
- service radvd stop
- iptables -D INPUT -i testbr -p udp --dport 67:68 --sport 67:68 -j ACCEPT
-
- fi
diff --git a/roles/linux-system-roles.network/tests/tasks/run_test.yml b/roles/linux-system-roles.network/tests/tasks/run_test.yml
deleted file mode 100644
index cc9676e..0000000
--- a/roles/linux-system-roles.network/tests/tasks/run_test.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Run test
- block:
- - name: "TEST: {{ lsr_description }}"
- debug:
- msg: "########## {{ lsr_description }} ##########"
-
- - debug:
- var: "{{ item }}"
- loop:
- - lsr_description
- - lsr_setup
- - lsr_test
- - lsr_assert
- - lsr_assert_when
- - lsr_fail_debug
- - lsr_cleanup
-
- - include_tasks: tasks/show_interfaces.yml
-
- - name: setup
- include_tasks: "{{ item }}"
- loop: "{{ lsr_setup }}"
- tags:
- - "tests::setup"
-
- - name: test
- include_tasks: "{{ item }}"
- loop: "{{ lsr_test }}"
- tags:
- - "tests::test"
-
- - name: asserts
- include_tasks: "{{ item }}"
- loop: "{{ lsr_assert }}"
- tags:
- - "tests::assert"
-
- - name: conditional asserts
- include_tasks: "{{ item['what'] }}"
- when:
- - "{{ item['when'] }}"
- loop: "{{ lsr_assert_when|default([]) }}"
-
- - name: "Success in test '{{ lsr_description }}'"
- debug:
- msg: "+++++ Success in test '{{ lsr_description }}' +++++"
-
- rescue:
- - name: "Failure in test '{{ lsr_description }}'"
- debug:
- msg: "!!!!! Failure in test '{{ lsr_description }}' !!!!!"
-
- - debug:
- var: "{{ item }}"
- loop: "{{ lsr_fail_debug | default([]) }}"
-
- - fail:
- msg: "!!!!! Failure in test '{{ lsr_description }}' !!!!!"
-
- always:
- - name: cleanup
- include_tasks: "{{ item }}"
- loop: "{{ lsr_cleanup }}"
- tags:
- - "tests::cleanup"
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/setup_802.1x.yml b/roles/linux-system-roles.network/tests/tasks/setup_802.1x.yml
deleted file mode 100644
index e83e049..0000000
--- a/roles/linux-system-roles.network/tests/tasks/setup_802.1x.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-- include_tasks: tasks/setup_802_1x_server.yml
-- name: Copy client certs
- copy:
- src: "{{ item }}"
- dest: "/etc/pki/tls/{{ item }}"
- mode: 0644
- with_items:
- - client.key
- - client.key.nocrypt
- - client.pem
- - cacert.pem
diff --git a/roles/linux-system-roles.network/tests/tasks/setup_802_1x_server.yml b/roles/linux-system-roles.network/tests/tasks/setup_802_1x_server.yml
deleted file mode 100644
index 49d1ce1..0000000
--- a/roles/linux-system-roles.network/tests/tasks/setup_802_1x_server.yml
+++ /dev/null
@@ -1,75 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Install hostapd
- package:
- name: hostapd
- state: present
-
-- name: Create directory for test certificates
- file:
- state: directory
- path: /etc/pki/tls/hostapd_test
-- name: Copy server certificates
- copy:
- src: "{{ item }}"
- dest: "/etc/pki/tls/hostapd_test/{{ item }}"
- with_items:
- - server.key
- - dh.pem
- - server.pem
- - cacert.pem
-
-- name: Create test interfaces
- shell: |
- ip link add veth1 type veth peer name veth1-br
- ip link add veth2 type veth peer name veth2-br
-
- ip link add br1 type bridge
- ip link set br1 up
-
- ip netns add ns1
-
- ip link set veth1 netns ns1
-
- ip netns exec ns1 ip addr add 203.0.113.1/24 dev veth1
-
- ip link set veth1-br up
- ip link set veth2-br up
-
- ip link set veth1-br master br1
- ip link set veth2-br master br1
-
- ip netns exec ns1 ip link set veth1 up
- ip link set veth2 up
-
- # Enable forwarding of EAP 802.1x messages through software bridge "br1".
- echo 8 > /sys/class/net/br1/bridge/group_fwd_mask
-
-- name: Create hostapd config
- copy:
- content: |
- interface=veth1
- driver=wired
- debug=2
- ieee8021x=1
- eap_reauth_period=3600
- eap_server=1
- use_pae_group_addr=1
- eap_user_file=/etc/hostapd/hostapd.eap_user
- ca_cert=/etc/pki/tls/hostapd_test/cacert.pem
- dh_file=/etc/pki/tls/hostapd_test/dh.pem
- server_cert=/etc/pki/tls/hostapd_test/server.pem
- private_key=/etc/pki/tls/hostapd_test/server.key
- private_key_passwd=test
- logger_syslog=-1
- logger_syslog_level=0
- dest: /etc/hostapd/wired.conf
-
-- name: Create eap_user_file config
- copy:
- content: |
- * TLS
- dest: /etc/hostapd/hostapd.eap_user
-
-- name: Run hostapd in namespace
- shell: ip netns exec ns1 hostapd -B /etc/hostapd/wired.conf && sleep 5
diff --git a/roles/linux-system-roles.network/tests/tasks/setup_mock_wifi.yml b/roles/linux-system-roles.network/tests/tasks/setup_mock_wifi.yml
deleted file mode 100644
index 997b704..0000000
--- a/roles/linux-system-roles.network/tests/tasks/setup_mock_wifi.yml
+++ /dev/null
@@ -1,82 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Install packages required to set up mock wifi network
- package:
- name:
- - hostapd
- - NetworkManager
- - wpa_supplicant
- state: present
-
-- name: Ensure NetworkManager is running
- service:
- name: NetworkManager
- state: started
-
-- name: Copy server certificates
- copy:
- src: "{{ item }}"
- dest: "/etc/pki/tls/{{ item }}"
- with_items:
- - server.key
- - dh.pem
- - server.pem
- - cacert.pem
-
-- name: Create hostapd config
- copy:
- content: |
- interface=wlan1
- driver=nl80211
- ctrl_interface=/var/run/hostapd
- ctrl_interface_group=0
- ssid=mock_wifi
- country_code=EN
- hw_mode=g
- channel=7
- auth_algs=3
- wpa=3
- ieee8021x=1
- eapol_version=1
- wpa_key_mgmt=WPA-EAP WPA-PSK
- wpa_passphrase=p@55w0rD
- eap_reauth_period=3600
- eap_server=1
- use_pae_group_addr=1
- eap_user_file=/etc/hostapd/hostapd.eap_user
- ca_cert=/etc/pki/tls/cacert.pem
- dh_file=/etc/pki/tls/dh.pem
- server_cert=/etc/pki/tls/server.pem
- private_key=/etc/pki/tls/server.key
- private_key_passwd=test
- logger_syslog=-1
- logger_syslog_level=0
- dest: /etc/hostapd/wireless.conf
-
-- name: Create eap_user_file config
- copy:
- content: |
- * TLS
- dest: /etc/hostapd/hostapd.eap_user
-
-- name: Load mac80211_hwsim kernel module to mock a wifi network
- shell: modprobe mac80211_hwsim && sleep 5
-
-- name: Restart NetworkManager and wpa_supplicant
- service:
- name: "{{ item }}"
- state: restarted
- with_items:
- - NetworkManager
- - wpa_supplicant
-
-- name: Configure wlan0 and wlan1 (mock wifi interfaces)
- shell: |
- ip link set up wlan0
- ip link set up wlan1
- nmcli device set wlan1 managed off
- ip add add 203.0.113.1/24 dev wlan1
- sleep 5
-
-- name: Start hostapd
- shell: hostapd -B /etc/hostapd/wireless.conf && sleep 5
diff --git a/roles/linux-system-roles.network/tests/tasks/setup_test_interface.yml b/roles/linux-system-roles.network/tests/tasks/setup_test_interface.yml
deleted file mode 100644
index fb767f3..0000000
--- a/roles/linux-system-roles.network/tests/tasks/setup_test_interface.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include_tasks: tasks/manage_test_interface.yml
- vars:
- state: present
- type: veth
-...
diff --git a/roles/linux-system-roles.network/tests/tasks/show_interfaces.yml b/roles/linux-system-roles.network/tests/tasks/show_interfaces.yml
deleted file mode 100644
index 6c2fbec..0000000
--- a/roles/linux-system-roles.network/tests/tasks/show_interfaces.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- include: get_current_interfaces.yml
-- debug:
- msg: "current_interfaces: {{ current_interfaces }}"
diff --git a/roles/linux-system-roles.network/tests/tasks/test_802.1x_capath.yml b/roles/linux-system-roles.network/tests/tasks/test_802.1x_capath.yml
deleted file mode 100644
index bae8e27..0000000
--- a/roles/linux-system-roles.network/tests/tasks/test_802.1x_capath.yml
+++ /dev/null
@@ -1,108 +0,0 @@
----
-- name: >-
- TEST: 802.1x profile with unencrypted private key and ca_path
- debug:
- msg: "##################################################"
-- set_fact:
- # Fixed versions/NVRs:
- # 1.25.2
- # NetworkManager-1.24.2-1.fc33
- # NetworkManager-1.22.14-1.fc32
- # NetworkManager-1.20.12-1.fc31
- # 1.18.8
- __NM_capath_ignored_NVRs:
- - NetworkManager-1.18.0-5.el7.x86_64
- - NetworkManager-1.18.4-3.el7.x86_64
- - NetworkManager-1.20.0-3.el8.x86_64
- - NetworkManager-1.22.8-4.el8.x86_64
- - NetworkManager-1.20.4-1.fc31.x86_64
- - NetworkManager-1.22.10-1.fc32.x86_64
- - NetworkManager-1.22.12-1.fc32.x86_64
-- name: Create directory for ca_path test
- file:
- path: "/etc/pki/tls/my_ca_certs"
- state: directory
- mode: 0755
-- name: Copy cacert to ca_path
- copy:
- src: "cacert.pem"
- dest: "/etc/pki/tls/my_ca_certs/cacert.pem"
- mode: 0644
-- name: Install openssl (test dependency)
- package:
- name: openssl
- state: present
-- name: Hash cacert
- command: openssl x509 -hash -noout
- -in /etc/pki/tls/my_ca_certs/cacert.pem
- register: cacert_hash
-- name: Add symlink for cacert
- file:
- state: link
- path: "/etc/pki/tls/my_ca_certs/{{ cacert_hash.stdout }}.0"
- src: cacert.pem
-- name: Get NetworkManager version
- command:
- cmd: rpm -qa NetworkManager
- warn: false
- register: __network_NM_NVR
-- block:
- - import_role:
- name: linux-system-roles.network
- vars:
- network_connections:
- - name: "{{ interface | default('802-1x-test') }}"
- interface_name: veth2
- state: up
- type: ethernet
- ip:
- address:
- - 203.0.113.2/24
- dhcp4: "no"
- auto6: "no"
- ieee802_1x:
- identity: myhost_capath
- eap: tls
- private_key: /etc/pki/tls/client.key.nocrypt
- client_cert: /etc/pki/tls/client.pem
- private_key_password_flags:
- - not-required
- ca_path: /etc/pki/tls/my_ca_certs
- - name: "TEST: I can ping the EAP server"
- command: ping -c1 203.0.113.1
- - name: trigger failure in case the role did not fail
- fail:
- msg: after test
- rescue:
- - debug:
- var: "{{ item }}"
- with_items:
- - ansible_failed_result
- - ansible_failed_task
- - __network_NM_NVR.stdout
- - __NM_capath_ignored_NVRs
-
- - name: Assert role behavior
- vars:
- expected_failure: __network_NM_NVR.stdout in __NM_capath_ignored_NVRs
- failure: __network_connections_result.failed
- assert:
- that: (failure and expected_failure) or
- (not failure and not expected_failure)
- msg: "Role {{ failure and 'failed' or 'did not fail' }} but was expected
- {{ expected_failure and '' or 'not' }} to fail.
- NM NVR: {{ __network_NM_NVR.stdout }}"
- - name: Assert role failure
- assert:
- that: "
- 'ieee802_1x.ca_path specified but not supported by NetworkManager'
- in __network_connections_result.stderr"
- when:
- - __network_connections_result.failed
-
-
- - name: Assert ping succeeded
- assert:
- that:
- - "not 'cmd' in ansible_failed_result"
-...
diff --git a/roles/linux-system-roles.network/tests/tests_802_1x_nm.yml b/roles/linux-system-roles.network/tests/tests_802_1x_nm.yml
deleted file mode 100644
index a27d8ea..0000000
--- a/roles/linux-system-roles.network/tests/tests_802_1x_nm.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_802_1x.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_802_1x.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_802_1x_updated_nm.yml b/roles/linux-system-roles.network/tests/tests_802_1x_updated_nm.yml
deleted file mode 100644
index 5a25f5b..0000000
--- a/roles/linux-system-roles.network/tests/tests_802_1x_updated_nm.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_802_1x_updated.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_802_1x_updated.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_bond_deprecated_initscripts.yml b/roles/linux-system-roles.network/tests/tests_bond_deprecated_initscripts.yml
deleted file mode 100644
index 1e74bcc..0000000
--- a/roles/linux-system-roles.network/tests/tests_bond_deprecated_initscripts.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-- hosts: all
- name: Run playbook 'playbooks/tests_bond_deprecated.yml' with initscripts
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Set network provider to 'initscripts'
- set_fact:
- network_provider: initscripts
- tags:
- - always
-
-- import_playbook: playbooks/tests_bond_deprecated.yml
diff --git a/roles/linux-system-roles.network/tests/tests_bond_deprecated_nm.yml b/roles/linux-system-roles.network/tests/tests_bond_deprecated_nm.yml
deleted file mode 100644
index 5a910a2..0000000
--- a/roles/linux-system-roles.network/tests/tests_bond_deprecated_nm.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_bond_deprecated.yml' with nm as provider
- tasks:
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_bond_deprecated.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_bond_initscripts.yml b/roles/linux-system-roles.network/tests/tests_bond_initscripts.yml
deleted file mode 100644
index 32fcc32..0000000
--- a/roles/linux-system-roles.network/tests/tests_bond_initscripts.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-- hosts: all
- name: Run playbook 'playbooks/tests_bond.yml' with initscripts as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Set network provider to 'initscripts'
- set_fact:
- network_provider: initscripts
- tags:
- - always
-
-- import_playbook: playbooks/tests_bond.yml
diff --git a/roles/linux-system-roles.network/tests/tests_bond_nm.yml b/roles/linux-system-roles.network/tests/tests_bond_nm.yml
deleted file mode 100644
index 7075d95..0000000
--- a/roles/linux-system-roles.network/tests/tests_bond_nm.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_bond.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_bond.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_bridge_initscripts.yml b/roles/linux-system-roles.network/tests/tests_bridge_initscripts.yml
deleted file mode 100644
index 8ce42e6..0000000
--- a/roles/linux-system-roles.network/tests/tests_bridge_initscripts.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-- hosts: all
- name: Run playbook 'playbooks/tests_bridge.yml' with initscripts as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Set network provider to 'initscripts'
- set_fact:
- network_provider: initscripts
- tags:
- - always
-
-- import_playbook: playbooks/tests_bridge.yml
diff --git a/roles/linux-system-roles.network/tests/tests_bridge_nm.yml b/roles/linux-system-roles.network/tests/tests_bridge_nm.yml
deleted file mode 100644
index 3d1b53a..0000000
--- a/roles/linux-system-roles.network/tests/tests_bridge_nm.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_bridge.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_bridge.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_default.yml b/roles/linux-system-roles.network/tests/tests_default.yml
deleted file mode 100644
index e196314..0000000
--- a/roles/linux-system-roles.network/tests/tests_default.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Test executing the role with default parameters
- hosts: all
- roles:
- - linux-system-roles.network
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Test warning and info logs
- assert:
- that:
- - "'warnings' not in __network_connections_result"
- msg: "There are warnings"
diff --git a/roles/linux-system-roles.network/tests/tests_default_initscripts.yml b/roles/linux-system-roles.network/tests/tests_default_initscripts.yml
deleted file mode 100644
index 006889c..0000000
--- a/roles/linux-system-roles.network/tests/tests_default_initscripts.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Set network provider to 'initscripts'
- set_fact:
- network_provider: initscripts
-
-- import_playbook: tests_default.yml
diff --git a/roles/linux-system-roles.network/tests/tests_default_nm.yml b/roles/linux-system-roles.network/tests/tests_default_nm.yml
deleted file mode 100644
index 54bc3e1..0000000
--- a/roles/linux-system-roles.network/tests/tests_default_nm.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
-
-# The test should run with NetworkManager, therefore it cannot run on
-# RHEL/CentOS 6
-- import_playbook: tests_default.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_dummy_nm.yml b/roles/linux-system-roles.network/tests/tests_dummy_nm.yml
deleted file mode 100644
index 63bb99b..0000000
--- a/roles/linux-system-roles.network/tests/tests_dummy_nm.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_dummy.yml' with nm as provider
- tasks:
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_dummy.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_eth_dns_support_nm.yml b/roles/linux-system-roles.network/tests/tests_eth_dns_support_nm.yml
deleted file mode 100644
index b35284c..0000000
--- a/roles/linux-system-roles.network/tests/tests_eth_dns_support_nm.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_eth_dns_support.yml' with nm as provider
- tasks:
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_eth_dns_support.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_ethernet_initscripts.yml b/roles/linux-system-roles.network/tests/tests_ethernet_initscripts.yml
deleted file mode 100644
index 366b052..0000000
--- a/roles/linux-system-roles.network/tests/tests_ethernet_initscripts.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-- hosts: all
- name: Run playbook 'playbooks/tests_ethernet.yml' with initscripts as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'initscripts'
- set_fact:
- network_provider: initscripts
- tags:
- - always
-
-- import_playbook: playbooks/tests_ethernet.yml
diff --git a/roles/linux-system-roles.network/tests/tests_ethernet_nm.yml b/roles/linux-system-roles.network/tests/tests_ethernet_nm.yml
deleted file mode 100644
index 238172d..0000000
--- a/roles/linux-system-roles.network/tests/tests_ethernet_nm.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_ethernet.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_ethernet.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_ethtool_coalesce_nm.yml b/roles/linux-system-roles.network/tests/tests_ethtool_coalesce_nm.yml
deleted file mode 100644
index f38294a..0000000
--- a/roles/linux-system-roles.network/tests/tests_ethtool_coalesce_nm.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_ethtool_coalesce.yml' with nm as provider
- tasks:
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
- - block:
- - name: Install NetworkManager
- package:
- name: NetworkManager
- state: present
- - name: Get NetworkManager version
- command: rpm -q --qf "%{version}" NetworkManager
- args:
- warn: false
- register: NetworkManager_version
- when: true
- when:
- - ansible_distribution_major_version != '6'
- tags:
- - always
-
-
-# workaround for: https://github.com/ansible/ansible/issues/27973
-# There is no way in Ansible to abort a playbook hosts with specific OS
-# releases Therefore we include the playbook with the tests only if the hosts
-# would support it.
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_ethtool_coalesce.yml
- when:
- - ansible_distribution_major_version != '6'
-
- - NetworkManager_version.stdout is version('1.25.1', '>=')
diff --git a/roles/linux-system-roles.network/tests/tests_ethtool_features_initscripts.yml b/roles/linux-system-roles.network/tests/tests_ethtool_features_initscripts.yml
deleted file mode 100644
index 5bac5d3..0000000
--- a/roles/linux-system-roles.network/tests/tests_ethtool_features_initscripts.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# set network provider and gather facts
-- hosts: all
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
- - name: Set network provider to 'initscripts'
- set_fact:
- network_provider: initscripts
-
-# workaround for: https://github.com/ansible/ansible/issues/27973
-# There is no way in Ansible to abort a playbook hosts with specific OS
-# releases Therefore we include the playbook with the tests only if the hosts
-# would support it.
-- import_playbook: playbooks/tests_ethtool_features.yml
diff --git a/roles/linux-system-roles.network/tests/tests_ethtool_features_nm.yml b/roles/linux-system-roles.network/tests/tests_ethtool_features_nm.yml
deleted file mode 100644
index 2027862..0000000
--- a/roles/linux-system-roles.network/tests/tests_ethtool_features_nm.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_ethtool_features.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
- - block:
- - name: Install NetworkManager
- package:
- name: NetworkManager
- state: present
- - name: Get NetworkManager version
- command: rpm -q --qf "%{version}" NetworkManager
- args:
- warn: false
- register: NetworkManager_version
- when: true
- when:
- - ansible_distribution_major_version != '6'
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_ethtool_features.yml
- when:
- - ansible_distribution_major_version != '6'
-
- - NetworkManager_version.stdout is version('1.20.0', '>=')
diff --git a/roles/linux-system-roles.network/tests/tests_helpers_and_asserts.yml b/roles/linux-system-roles.network/tests/tests_helpers_and_asserts.yml
deleted file mode 100644
index 64e2875..0000000
--- a/roles/linux-system-roles.network/tests/tests_helpers_and_asserts.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- name: Check that creating and removing test devices and assertions work
- hosts: all
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: test veth interface management
- include_tasks: tasks/create_and_remove_interface.yml
- vars:
- type: veth
- interface: veth1298
-
- - name: test veth interface management
- include_tasks: tasks/create_and_remove_interface.yml
- vars:
- type: dummy
- interface: dummy1298
-
-# FIXME: when: does not seem to work with include_tasks, therefore this cannot
-# be safely tested for now
-# - name: test tap interfaces
-# include_tasks: tasks/create_and_remove_interface.yml
-# vars:
-# - type: tap
-# - interface: tap1298
-# when: ansible_distribution_major_version > 6
-# # ip tuntap does not exist on RHEL6
-# # FIXME: Maybe use some other tool to manage devices, openvpn can do
-# # this, but it is in EPEL
diff --git a/roles/linux-system-roles.network/tests/tests_integration_pytest.yml b/roles/linux-system-roles.network/tests/tests_integration_pytest.yml
deleted file mode 100644
index 9b80bd4..0000000
--- a/roles/linux-system-roles.network/tests/tests_integration_pytest.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
-- import_playbook: playbooks/integration_pytest_python3.yml
- when: (ansible_distribution in ["CentOS", "RedHat"] and
- ansible_distribution_major_version == "8") or
- ansible_distribution == "Fedora"
diff --git a/roles/linux-system-roles.network/tests/tests_ipv6_disabled_nm.yml b/roles/linux-system-roles.network/tests/tests_ipv6_disabled_nm.yml
deleted file mode 100644
index 24ee62d..0000000
--- a/roles/linux-system-roles.network/tests/tests_ipv6_disabled_nm.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_ipv6_disabled.yml' with nm as provider
- tasks:
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_ipv6_disabled.yml
- when:
- - ansible_distribution_major_version != '6'
- - ansible_distribution_major_version == '8'
diff --git a/roles/linux-system-roles.network/tests/tests_ipv6_initscripts.yml b/roles/linux-system-roles.network/tests/tests_ipv6_initscripts.yml
deleted file mode 100644
index 736a8e0..0000000
--- a/roles/linux-system-roles.network/tests/tests_ipv6_initscripts.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-- hosts: all
- name: Run playbook 'playbooks/tests_ipv6.yml' with initscripts as provider
- tasks:
- - name: Set network provider to 'initscripts'
- set_fact:
- network_provider: initscripts
- tags:
- - always
-
-- import_playbook: playbooks/tests_ipv6.yml
diff --git a/roles/linux-system-roles.network/tests/tests_ipv6_nm.yml b/roles/linux-system-roles.network/tests/tests_ipv6_nm.yml
deleted file mode 100644
index f186912..0000000
--- a/roles/linux-system-roles.network/tests/tests_ipv6_nm.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_ipv6.yml' with nm as provider
- tasks:
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_ipv6.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_provider_nm.yml b/roles/linux-system-roles.network/tests/tests_provider_nm.yml
deleted file mode 100644
index 67fcffe..0000000
--- a/roles/linux-system-roles.network/tests/tests_provider_nm.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_provider.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
- - block:
- - name: Install NetworkManager
- package:
- name: NetworkManager
- state: present
- - name: Get NetworkManager version
- command: rpm -q --qf "%{version}" NetworkManager
- args:
- warn: false
- register: NetworkManager_version
- when: true
- when:
- - ansible_distribution_major_version != '6'
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_provider.yml
- when:
- - ansible_distribution_major_version != '6'
-
- - NetworkManager_version.stdout is version('1.20.0', '>=')
diff --git a/roles/linux-system-roles.network/tests/tests_reapply_nm.yml b/roles/linux-system-roles.network/tests/tests_reapply_nm.yml
deleted file mode 100644
index eb48ddb..0000000
--- a/roles/linux-system-roles.network/tests/tests_reapply_nm.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_reapply.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_reapply.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_regression_nm.yml b/roles/linux-system-roles.network/tests/tests_regression_nm.yml
deleted file mode 100644
index b2c46e9..0000000
--- a/roles/linux-system-roles.network/tests/tests_regression_nm.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-# set network provider and gather facts
-- hosts: all
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- - name: Install NetworkManager
- package:
- name: NetworkManager
- state: present
- - name: Get NetworkManager version
- command: rpm -q --qf "%{version}" NetworkManager
- args:
- warn: "no"
- when: true
- register: NetworkManager_version
-
-# workaround for: https://github.com/ansible/ansible/issues/27973
-# There is no way in Ansible to abort a playbook hosts with specific OS
-# releases Therefore we include the playbook with the tests only if the hosts
-# would support it.
-# The test requires NetworkManager, therefore it cannot run on RHEL/CentOS 6
-- import_playbook: playbooks/tests_checkpoint_cleanup.yml
- when:
- - ansible_distribution_major_version != '6'
- # The test depends on behavior that is only visible with newer NM
- - NetworkManager_version.stdout is version('1.22.0', '>=')
diff --git a/roles/linux-system-roles.network/tests/tests_states_initscripts.yml b/roles/linux-system-roles.network/tests/tests_states_initscripts.yml
deleted file mode 100644
index fa94103..0000000
--- a/roles/linux-system-roles.network/tests/tests_states_initscripts.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-- hosts: all
- name: Run playbook 'playbooks/tests_states.yml' with initscripts as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'initscripts'
- set_fact:
- network_provider: initscripts
- tags:
- - always
-
-- import_playbook: playbooks/tests_states.yml
diff --git a/roles/linux-system-roles.network/tests/tests_states_nm.yml b/roles/linux-system-roles.network/tests/tests_states_nm.yml
deleted file mode 100644
index 34c8a24..0000000
--- a/roles/linux-system-roles.network/tests/tests_states_nm.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_states.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_states.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_team_nm.yml b/roles/linux-system-roles.network/tests/tests_team_nm.yml
deleted file mode 100644
index 8048029..0000000
--- a/roles/linux-system-roles.network/tests/tests_team_nm.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_team.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_team.yml
- when:
- - ansible_distribution_major_version != '6'
- - ansible_distribution != 'Fedora'
diff --git a/roles/linux-system-roles.network/tests/tests_team_plugin_installation_nm.yml b/roles/linux-system-roles.network/tests/tests_team_plugin_installation_nm.yml
deleted file mode 100644
index 4572736..0000000
--- a/roles/linux-system-roles.network/tests/tests_team_plugin_installation_nm.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_team_plugin_installation.yml' with nm as provider
- tasks:
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_team_plugin_installation.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_unit.yml b/roles/linux-system-roles.network/tests/tests_unit.yml
deleted file mode 100644
index 44dfaec..0000000
--- a/roles/linux-system-roles.network/tests/tests_unit.yml
+++ /dev/null
@@ -1,160 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
----
-- hosts: all
- name: Setup for test running
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Install dependencies
- package:
- name: "{{ item }}"
- state: present
- # Ignore error because some package names might not be available
- ignore_errors: true
- loop:
- - NetworkManager-libnm
- - python2-gobject-base
- - python3-gobject-base
- - python-gobject-base
- - python2-mock
-
-- hosts: all
- name: execute python unit tests
- tasks:
- - block:
- - name: create tempdir for code to test
- tempfile:
- state: directory
- prefix: lsrtest_
- register: _rundir
-
- - name: get tempfile for tar
- tempfile:
- prefix: lsrtest_
- suffix: ".tar"
- register: temptar
- delegate_to: localhost
-
- - include_tasks: tasks/get_modules_and_utils_paths.yml
-
- # TODO: using tar and copying the file is a workaround for the
- # synchronize module that does not work in test-harness. Related issue:
- # https://github.com/linux-system-roles/test-harness/issues/102
- #
- - name: Create Tar file
- command: >
- tar -cvf {{ temptar.path }} --exclude "*.pyc"
- --exclude "__pycache__"
- -C {{ modules_parent_and_dir.stdout_lines[0] }}
- {{ modules_parent_and_dir.stdout_lines[1] }}
- -C {{ module_utils_parent_and_dir.stdout_lines[0] }}
- {{ module_utils_parent_and_dir.stdout_lines[1] }}
- delegate_to: localhost
-
- - name: Copy testrepo.tar to the remote system
- copy:
- src: "{{ temptar.path }}"
- dest: "{{ _rundir.path }}"
-
- - name: Untar testrepo.tar
- command: tar -xvf {{ temptar.path | basename }}
- args:
- chdir: "{{ _rundir.path }}"
-
- - file:
- state: directory
- path: "{{ item }}"
- loop:
- - "{{ _rundir.path }}/ansible"
- - "{{ _rundir.path }}/ansible/module_utils"
-
- - name: Move module_utils to ansible directory
- shell: |
- if [ -d {{ _rundir.path }}/module_utils ]; then
- mv {{ _rundir.path }}/module_utils {{ _rundir.path }}/ansible
- fi
-
- - name: Fake out python module directories, primarily for python2
- shell: |
- for dir in $(find {{ _rundir.path }} -type d -print); do
- if [ ! -f "$dir/__init__.py" ]; then
- touch "$dir/__init__.py"
- fi
- done
-
- - name: Copy unit test to remote system
- copy:
- src: unit/test_network_connections.py
- dest: "{{ _rundir.path }}"
-
- - set_fact:
- _lsr_python_path: "{{
- _rundir.path ~ '/' ~
- modules_parent_and_dir.stdout_lines[1] ~ ':' ~
- _rundir.path ~ '/' ~ 'ansible' ~ '/' ~
- module_utils_parent_and_dir.stdout_lines[1] ~ ':' ~
- _rundir.path ~ '/' ~
- module_utils_parent_and_dir.stdout_lines[1] ~ ':' ~
- _rundir.path
- }}"
-
- - command: ls -alrtFR {{ _rundir.path }}
- - debug:
- msg: path {{ _lsr_python_path }}
-
- - name: Check if python2 is available
- command: python2 --version
- ignore_errors: true
- register: python2_available
- when: true
-
- - name: Run python2 unit tests
- command: >
- python2 {{ _rundir.path }}/test_network_connections.py --verbose
- environment:
- PYTHONPATH: "{{ _lsr_python_path }}"
- when: >
- python2_available is succeeded and ansible_distribution != 'Fedora'
- register: python2_result
-
- - name: Check if python3 is available
- command: python3 --version
- ignore_errors: true
- register: python3_available
- when: true
-
- - name: Run python3 unit tests
- command: >
- python3 {{ _rundir.path }}/test_network_connections.py --verbose
- environment:
- PYTHONPATH: "{{ _lsr_python_path }}"
- when: python3_available is succeeded
- register: python3_result
-
- - name: Show python2 unit test results
- debug:
- var: python2_result.stderr_lines
- when: python2_result is succeeded
-
- - name: Show python3 unit test results
- debug:
- var: python3_result.stderr_lines
- when: python3_result is succeeded
-
- always:
- - name: remove local tar file
- file:
- state: absent
- path: "{{ temptar.path }}"
- delegate_to: localhost
-
- - name: remove tempdir
- file:
- state: absent
- path: "{{ _rundir.path }}"
-
- - name: Ensure that at least one python unit test ran
- fail:
- msg: Tests did not run with python2 or python3
- when: not python2_available is succeeded and
- not python3_available is succeeded
diff --git a/roles/linux-system-roles.network/tests/tests_vlan_mtu_initscripts.yml b/roles/linux-system-roles.network/tests/tests_vlan_mtu_initscripts.yml
deleted file mode 100644
index dcd5d74..0000000
--- a/roles/linux-system-roles.network/tests/tests_vlan_mtu_initscripts.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-- hosts: all
- name: Run playbook 'playbooks/tests_vlan_mtu.yml' with initscripts as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'initscripts'
- set_fact:
- network_provider: initscripts
- tags:
- - always
-
-- import_playbook: playbooks/tests_vlan_mtu.yml
diff --git a/roles/linux-system-roles.network/tests/tests_vlan_mtu_nm.yml b/roles/linux-system-roles.network/tests/tests_vlan_mtu_nm.yml
deleted file mode 100644
index c38263c..0000000
--- a/roles/linux-system-roles.network/tests/tests_vlan_mtu_nm.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_vlan_mtu.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_vlan_mtu.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/tests_wireless_nm.yml b/roles/linux-system-roles.network/tests/tests_wireless_nm.yml
deleted file mode 100644
index 03b5ad6..0000000
--- a/roles/linux-system-roles.network/tests/tests_wireless_nm.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_wireless.yml' with nm as provider
- tasks:
- - include_tasks: tasks/el_repo_setup.yml
-
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_wireless.yml
- when:
- - ansible_distribution_major_version != '6'
- - ansible_distribution_major_version == '7'
diff --git a/roles/linux-system-roles.network/tests/tests_wireless_plugin_installation_nm.yml b/roles/linux-system-roles.network/tests/tests_wireless_plugin_installation_nm.yml
deleted file mode 100644
index 5e55f50..0000000
--- a/roles/linux-system-roles.network/tests/tests_wireless_plugin_installation_nm.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# This file was generated by ensure_provider_tests.py
----
-# set network provider and gather facts
-- hosts: all
- name: Run playbook 'playbooks/tests_wireless_plugin_installation.yml' with nm as provider
- tasks:
- - name: Set network provider to 'nm'
- set_fact:
- network_provider: nm
- tags:
- - always
-
-
-# The test requires or should run with NetworkManager, therefore it cannot run
-# on RHEL/CentOS 6
-- import_playbook: playbooks/tests_wireless_plugin_installation.yml
- when:
- - ansible_distribution_major_version != '6'
diff --git a/roles/linux-system-roles.network/tests/unit/test_network_connections.py b/roles/linux-system-roles.network/tests/unit/test_network_connections.py
deleted file mode 100644
index b14e7b3..0000000
--- a/roles/linux-system-roles.network/tests/unit/test_network_connections.py
+++ /dev/null
@@ -1,3425 +0,0 @@
-#!/usr/bin/env python
-""" Tests for network_connections Ansible module """
-# SPDX-License-Identifier: BSD-3-Clause
-import copy
-import itertools
-import pprint as pprint_
-import socket
-import sys
-import unittest
-
-try:
- from unittest import mock
-except ImportError: # py2
- import mock
-
-sys.modules["ansible.module_utils.basic"] = mock.Mock()
-
-# pylint: disable=import-error, wrong-import-position
-
-import network_lsr
-import network_lsr.argument_validator
-from network_connections import IfcfgUtil, NMUtil, SysUtil, Util
-from network_lsr.argument_validator import ValidationError
-
-try:
- my_test_skipIf = unittest.skipIf
-except AttributeError:
- # python 2.6 workaround
- def my_test_skipIf(condition, reason):
- if condition:
- return lambda x: None
- else:
- return lambda x: x
-
-
-try:
- nmutil = NMUtil()
- assert nmutil
-except Exception:
- # NMUtil is not supported, for example on RHEL 6 or without
- # pygobject.
- nmutil = None
-
-if nmutil:
- NM = Util.NM()
- GObject = Util.GObject()
-
-
-def pprint(msg, obj):
- print("PRINT: %s\n" % (msg))
-
- p = pprint_.PrettyPrinter(indent=4)
- p.pprint(obj)
- if nmutil is not None and isinstance(obj, NM.Connection):
- obj.dump()
-
-
-ARGS_CONNECTIONS = network_lsr.argument_validator.ArgValidator_ListConnections()
-VALIDATE_ONE_MODE_INITSCRIPTS = ARGS_CONNECTIONS.VALIDATE_ONE_MODE_INITSCRIPTS
-VALIDATE_ONE_MODE_NM = ARGS_CONNECTIONS.VALIDATE_ONE_MODE_NM
-
-ETHTOOL_FEATURES_DEFAULTS = {
- "esp_hw_offload": None,
- "esp_tx_csum_hw_offload": None,
- "fcoe_mtu": None,
- "gro": None,
- "gso": None,
- "highdma": None,
- "hw_tc_offload": None,
- "l2_fwd_offload": None,
- "loopback": None,
- "lro": None,
- "ntuple": None,
- "rx": None,
- "rx_all": None,
- "rx_fcs": None,
- "rx_gro_hw": None,
- "rx_udp_tunnel_port_offload": None,
- "rx_vlan_filter": None,
- "rx_vlan_stag_filter": None,
- "rx_vlan_stag_hw_parse": None,
- "rxhash": None,
- "rxvlan": None,
- "sg": None,
- "tls_hw_record": None,
- "tls_hw_tx_offload": None,
- "tso": None,
- "tx": None,
- "tx_checksum_fcoe_crc": None,
- "tx_checksum_ip_generic": None,
- "tx_checksum_ipv4": None,
- "tx_checksum_ipv6": None,
- "tx_checksum_sctp": None,
- "tx_esp_segmentation": None,
- "tx_fcoe_segmentation": None,
- "tx_gre_csum_segmentation": None,
- "tx_gre_segmentation": None,
- "tx_gso_partial": None,
- "tx_gso_robust": None,
- "tx_ipxip4_segmentation": None,
- "tx_ipxip6_segmentation": None,
- "tx_nocache_copy": None,
- "tx_scatter_gather": None,
- "tx_scatter_gather_fraglist": None,
- "tx_sctp_segmentation": None,
- "tx_tcp_ecn_segmentation": None,
- "tx_tcp_mangleid_segmentation": None,
- "tx_tcp_segmentation": None,
- "tx_tcp6_segmentation": None,
- "tx_udp_segmentation": None,
- "tx_udp_tnl_csum_segmentation": None,
- "tx_udp_tnl_segmentation": None,
- "tx_vlan_stag_hw_insert": None,
- "txvlan": None,
-}
-
-
-ETHTOOL_COALESCE_DEFAULTS = {
- "adaptive_rx": None,
- "adaptive_tx": None,
- "pkt_rate_high": None,
- "pkt_rate_low": None,
- "rx_frames": None,
- "rx_frames_high": None,
- "rx_frames_irq": None,
- "rx_frames_low": None,
- "rx_usecs": None,
- "rx_usecs_high": None,
- "rx_usecs_irq": None,
- "rx_usecs_low": None,
- "sample_interval": None,
- "stats_block_usecs": None,
- "tx_frames": None,
- "tx_frames_high": None,
- "tx_frames_irq": None,
- "tx_frames_low": None,
- "tx_usecs": None,
- "tx_usecs_high": None,
- "tx_usecs_irq": None,
- "tx_usecs_low": None,
-}
-
-ETHTOOL_DEFAULTS = {
- "features": ETHTOOL_FEATURES_DEFAULTS,
- "coalesce": ETHTOOL_COALESCE_DEFAULTS,
-}
-
-ETHERNET_DEFAULTS = {"autoneg": None, "duplex": None, "speed": 0}
-
-
-class TestValidator(unittest.TestCase):
- def setUp(self):
- # default values when "type" is specified and state is not
- self.default_connection_settings = {
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "ignore_errors": None,
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "5",
- "parent": None,
- "port_type": None,
- "zone": None,
- }
-
- def assertValidationError(self, v, value):
- self.assertRaises(ValidationError, v.validate, value)
-
- def assert_nm_connection_routes_expected(self, connection, route_list_expected):
- parser = network_lsr.argument_validator.ArgValidatorIPRoute("route[?]")
- route_list_exp = [parser.validate(r) for r in route_list_expected]
- route_list_new = itertools.chain(
- nmutil.setting_ip_config_get_routes(
- connection.get_setting(NM.SettingIP4Config)
- ),
- nmutil.setting_ip_config_get_routes(
- connection.get_setting(NM.SettingIP6Config)
- ),
- )
- route_list_new = [
- {
- "family": r.get_family(),
- "network": r.get_dest(),
- "prefix": int(r.get_prefix()),
- "gateway": r.get_next_hop(),
- "metric": int(r.get_metric()),
- }
- for r in route_list_new
- ]
- self.assertEqual(route_list_exp, route_list_new)
-
- def do_connections_check_invalid(self, input_connections):
- self.assertValidationError(ARGS_CONNECTIONS, input_connections)
-
- def do_connections_validate_nm(self, input_connections, **kwargs):
- if not nmutil:
- return
- connections = ARGS_CONNECTIONS.validate(input_connections)
- for connection in connections:
- if "type" in connection:
- connection["nm.exists"] = False
- connection["nm.uuid"] = Util.create_uuid()
-
- mode = VALIDATE_ONE_MODE_NM
- for idx, connection in enumerate(connections):
- try:
- ARGS_CONNECTIONS.validate_connection_one(mode, connections, idx)
- except ValidationError:
- continue
- if "type" in connection:
- con_new = nmutil.connection_create(connections, idx)
- self.assertTrue(con_new)
- self.assertTrue(con_new.verify())
- if "nm_route_list_current" in kwargs:
- parser = network_lsr.argument_validator.ArgValidatorIPRoute(
- "route[?]"
- )
- s4 = con_new.get_setting(NM.SettingIP4Config)
- s6 = con_new.get_setting(NM.SettingIP6Config)
- s4.clear_routes()
- s6.clear_routes()
- for r in kwargs["nm_route_list_current"][idx]:
- r = parser.validate(r)
- r = NM.IPRoute.new(
- r["family"],
- r["network"],
- r["prefix"],
- r["gateway"],
- r["metric"],
- )
- if r.get_family() == socket.AF_INET:
- s4.add_route(r)
- else:
- s6.add_route(r)
- con_new = nmutil.connection_create(
- connections, idx, connection_current=con_new
- )
- self.assertTrue(con_new)
- self.assertTrue(con_new.verify())
- if "nm_route_list_expected" in kwargs:
- self.assert_nm_connection_routes_expected(
- con_new, kwargs["nm_route_list_expected"][idx]
- )
-
- def do_connections_validate_ifcfg(self, input_connections, **kwargs):
- mode = VALIDATE_ONE_MODE_INITSCRIPTS
- connections = ARGS_CONNECTIONS.validate(input_connections)
- for idx, connection in enumerate(connections):
- try:
- ARGS_CONNECTIONS.validate_connection_one(mode, connections, idx)
- except ValidationError:
- continue
- if "type" not in connection:
- continue
- if (
- connection["type"] in ["macvlan", "wireless"]
- or connection["ieee802_1x"]
- ):
- # initscripts do not support this type. Skip the test.
- continue
- content_current = kwargs.get("initscripts_content_current", None)
- if content_current:
- content_current = content_current[idx]
- c = IfcfgUtil.ifcfg_create(
- connections, idx, content_current=content_current
- )
- # pprint("con[%s] = \"%s\"" % (idx, connections[idx]['name']), c)
- exp = kwargs.get("initscripts_dict_expected", None)
- if exp is not None:
- self.assertEqual(exp[idx], c)
-
- def do_connections_validate(
- self, expected_connections, input_connections, **kwargs
- ):
- connections = ARGS_CONNECTIONS.validate(input_connections)
- self.assertEqual(expected_connections, connections)
- self.do_connections_validate_nm(input_connections, **kwargs)
- self.do_connections_validate_ifcfg(input_connections, **kwargs)
-
- def test_validate_str(self):
-
- v = network_lsr.argument_validator.ArgValidatorStr("state")
- self.assertEqual("a", v.validate("a"))
- self.assertValidationError(v, 1)
- self.assertValidationError(v, None)
-
- v = network_lsr.argument_validator.ArgValidatorStr("state", required=True)
- self.assertValidationError(v, None)
-
- v = network_lsr.argument_validator.ArgValidatorStr(
- "test_max_length", max_length=13
- )
- self.assertEqual("less_than_13", v.validate("less_than_13"))
- self.assertValidationError(v, "longer_than_13")
-
- v = network_lsr.argument_validator.ArgValidatorStr(
- "test_min_length", min_length=13
- )
- self.assertEqual("longer_than_13", v.validate("longer_than_13"))
- self.assertValidationError(v, "less_than_13")
-
- v = network_lsr.argument_validator.ArgValidatorStr(
- "test_min_max_length", min_length=10, max_length=15
- )
- self.assertEqual("13_characters", v.validate("13_characters"))
- self.assertValidationError(v, "too_short")
- self.assertValidationError(v, "string_is_too_long")
-
- self.assertRaises(
- ValueError,
- network_lsr.argument_validator.ArgValidatorStr,
- "non_int",
- min_length="string",
- )
- self.assertRaises(
- ValueError,
- network_lsr.argument_validator.ArgValidatorStr,
- "non_int",
- max_length="string",
- )
- self.assertRaises(
- ValueError,
- network_lsr.argument_validator.ArgValidatorStr,
- "negative_int",
- min_length=-5,
- )
- self.assertRaises(
- ValueError,
- network_lsr.argument_validator.ArgValidatorStr,
- "negative_int",
- max_length=-5,
- )
-
- def test_validate_int(self):
-
- v = network_lsr.argument_validator.ArgValidatorNum(
- "state", default_value=None, numeric_type=float
- )
- self.assertEqual(1, v.validate(1))
- self.assertEqual(1.5, v.validate(1.5))
- self.assertEqual(1.5, v.validate("1.5"))
- self.assertValidationError(v, None)
- self.assertValidationError(v, "1a")
-
- v = network_lsr.argument_validator.ArgValidatorNum("state", default_value=None)
- self.assertEqual(1, v.validate(1))
- self.assertEqual(1, v.validate(1.0))
- self.assertEqual(1, v.validate("1"))
- self.assertValidationError(v, None)
- self.assertValidationError(v, None)
- self.assertValidationError(v, 1.5)
- self.assertValidationError(v, "1.5")
-
- v = network_lsr.argument_validator.ArgValidatorNum("state", required=True)
- self.assertValidationError(v, None)
-
- def test_validate_bool(self):
-
- v = network_lsr.argument_validator.ArgValidatorBool("state")
- self.assertEqual(True, v.validate("yes"))
- self.assertEqual(True, v.validate("yeS"))
- self.assertEqual(True, v.validate("Y"))
- self.assertEqual(True, v.validate(True))
- self.assertEqual(True, v.validate("True"))
- self.assertEqual(True, v.validate("1"))
- self.assertEqual(True, v.validate(1))
-
- self.assertEqual(False, v.validate("no"))
- self.assertEqual(False, v.validate("nO"))
- self.assertEqual(False, v.validate("N"))
- self.assertEqual(False, v.validate(False))
- self.assertEqual(False, v.validate("False"))
- self.assertEqual(False, v.validate("0"))
- self.assertEqual(False, v.validate(0))
-
- self.assertValidationError(v, 2)
- self.assertValidationError(v, -1)
- self.assertValidationError(v, "Ye")
- self.assertValidationError(v, "")
- self.assertValidationError(v, None)
- v = network_lsr.argument_validator.ArgValidatorBool("state", required=True)
- self.assertValidationError(v, None)
-
- def test_validate_dict(self):
-
- v = network_lsr.argument_validator.ArgValidatorDict(
- "dict",
- nested=[
- network_lsr.argument_validator.ArgValidatorNum("i", required=True),
- network_lsr.argument_validator.ArgValidatorStr(
- "s", required=False, default_value="s_default"
- ),
- network_lsr.argument_validator.ArgValidatorStr(
- "l",
- required=False,
- default_value=network_lsr.argument_validator.ArgValidator.MISSING,
- ),
- ],
- )
-
- self.assertEqual({"i": 5, "s": "s_default"}, v.validate({"i": "5"}))
- self.assertEqual(
- {"i": 5, "s": "s_default", "l": "6"}, v.validate({"i": "5", "l": "6"})
- )
- self.assertValidationError(v, {"k": 1})
-
- def test_validate_list(self):
-
- v = network_lsr.argument_validator.ArgValidatorList(
- "list", nested=network_lsr.argument_validator.ArgValidatorNum("i")
- )
- self.assertEqual([1, 5], v.validate(["1", 5]))
- self.assertValidationError(v, [1, "s"])
-
- def test_empty(self):
- self.maxDiff = None
- self.do_connections_validate([], [])
-
- def test_ethernet_two_defaults(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "ignore_errors": None,
- "interface_name": "5",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "5",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": None,
- "type": "ethernet",
- "zone": None,
- },
- {
- "actions": ["present"],
- "ignore_errors": None,
- "name": "5",
- "persistent_state": "present",
- "state": None,
- },
- ],
- [{"name": "5", "type": "ethernet"}, {"name": "5"}],
- )
-
- def test_up_ethernet(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "5",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "5",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- }
- ],
- [{"name": "5", "state": "up", "type": "ethernet"}],
- )
-
- def test_up_ethernet_no_autoconnect(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": False,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "5",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "5",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- }
- ],
- [{"name": "5", "state": "up", "type": "ethernet", "autoconnect": "no"}],
- initscripts_dict_expected=[
- {
- "ifcfg": {
- "BOOTPROTO": "dhcp",
- "IPV6INIT": "yes",
- "IPV6_AUTOCONF": "yes",
- "NM_CONTROLLED": "no",
- "ONBOOT": "no",
- "TYPE": "Ethernet",
- "DEVICE": "5",
- },
- "keys": None,
- "route": None,
- "route6": None,
- "rule": None,
- "rule6": None,
- }
- ],
- )
-
- def test_invalid_autoconnect(self):
- self.maxDiff = None
- self.do_connections_check_invalid([{"name": "a", "autoconnect": True}])
-
- def test_absent(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["absent"],
- "ignore_errors": None,
- "name": "5",
- "persistent_state": "absent",
- "state": None,
- }
- ],
- [{"name": "5", "persistent_state": "absent"}],
- )
-
- def test_up_ethernet_mac_mtu_static_ip(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": None,
- "ip": {
- "dhcp4": False,
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dns": [],
- "address": [
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.174.5",
- }
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- },
- "mac": "52:54:00:44:9f:ba",
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": 1450,
- "name": "prod1",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "prod1",
- "state": "up",
- "type": "ethernet",
- "autoconnect": "yes",
- "mac": "52:54:00:44:9f:ba",
- "mtu": 1450,
- "ip": {"address": "192.168.174.5/24"},
- }
- ],
- )
-
- def test_up_single_v4_dns(self):
- self.maxDiff = None
- # set single IPv4 DNS server
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "prod1",
- "ip": {
- "dhcp4": False,
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dns": [{"address": "192.168.174.1", "family": socket.AF_INET}],
- "address": [
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.174.5",
- }
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "prod1",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "prod1",
- "state": "up",
- "type": "ethernet",
- "autoconnect": "yes",
- "ip": {"address": "192.168.174.5/24", "dns": "192.168.174.1"},
- }
- ],
- )
-
- def test_ipv6_static(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "prod1",
- "ip": {
- "gateway6": "2001:db8::1",
- "gateway4": None,
- "route_metric4": None,
- "auto6": False,
- "ipv6_disabled": False,
- "dhcp4": False,
- "address": [
- {
- "address": "2001:db8::2",
- "family": socket.AF_INET6,
- "prefix": 32,
- },
- {
- "address": "2001:db8::3",
- "family": socket.AF_INET6,
- "prefix": 32,
- },
- {
- "address": "2001:db8::4",
- "family": socket.AF_INET6,
- "prefix": 32,
- },
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "prod1",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "prod1",
- "state": "up",
- "type": "ethernet",
- "ip": {
- "dhcp4": "no",
- "auto6": "no",
- "address": [
- "2001:db8::2/32",
- "2001:db8::3/32",
- "2001:db8::4/32",
- ],
- "gateway6": "2001:db8::1",
- },
- }
- ],
- initscripts_dict_expected=[
- {
- "ifcfg": {
- "BOOTPROTO": "none",
- "IPV6INIT": "yes",
- "IPV6_AUTOCONF": "no",
- "IPV6ADDR": "2001:db8::2/32",
- "IPV6ADDR_SECONDARIES": "2001:db8::3/32 2001:db8::4/32",
- "IPV6_DEFAULTGW": "2001:db8::1",
- "NM_CONTROLLED": "no",
- "ONBOOT": "yes",
- "TYPE": "Ethernet",
- "DEVICE": "prod1",
- },
- "keys": None,
- "route": None,
- "route6": None,
- "rule": None,
- "rule6": None,
- }
- ],
- )
-
- def test_routes(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": None,
- "ip": {
- "dhcp4": False,
- "auto6": True,
- "ipv6_disabled": False,
- "address": [
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.176.5",
- },
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.177.5",
- },
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "dns": [],
- },
- "mac": "52:54:00:44:9f:ba",
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": 1450,
- "name": "prod1",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- },
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "prod.100",
- "ip": {
- "dhcp4": False,
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "auto6": False,
- "ipv6_disabled": False,
- "dns": [],
- "address": [
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.174.5",
- },
- {
- "prefix": 65,
- "family": socket.AF_INET6,
- "address": "a:b:c::6",
- },
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [
- {
- "family": socket.AF_INET,
- "network": "192.168.5.0",
- "prefix": 24,
- "gateway": None,
- "metric": -1,
- }
- ],
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "prod.100",
- "parent": "prod1",
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "vlan",
- "vlan": {"id": 100},
- "wait": None,
- "zone": None,
- },
- ],
- [
- {
- "name": "prod1",
- "state": "up",
- "type": "ethernet",
- "autoconnect": "yes",
- "mac": "52:54:00:44:9f:ba",
- "mtu": 1450,
- "ip": {"address": "192.168.176.5/24 192.168.177.5/24"},
- },
- {
- "name": "prod.100",
- "state": "up",
- "type": "vlan",
- "parent": "prod1",
- "vlan": {"id": "100"},
- "ip": {
- "address": [
- "192.168.174.5/24",
- {"address": "a:b:c::6", "prefix": 65},
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [{"network": "192.168.5.0"}],
- },
- },
- ],
- )
-
- def test_vlan(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": None,
- "ip": {
- "dhcp4": False,
- "auto6": True,
- "address": [
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.176.5",
- },
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.177.5",
- },
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "ipv6_disabled": False,
- "dns": [],
- },
- "mac": "52:54:00:44:9f:ba",
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": 1450,
- "name": "prod1",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- },
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "prod.100",
- "ip": {
- "dhcp4": False,
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "ipv6_disabled": False,
- "auto6": False,
- "dns": [],
- "address": [
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.174.5",
- },
- {
- "prefix": 65,
- "family": socket.AF_INET6,
- "address": "a:b:c::6",
- },
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [
- {
- "family": socket.AF_INET,
- "network": "192.168.5.0",
- "prefix": 24,
- "gateway": None,
- "metric": -1,
- }
- ],
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "prod.100",
- "parent": "prod1",
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "vlan",
- "vlan": {"id": 101},
- "wait": None,
- "zone": None,
- },
- ],
- [
- {
- "name": "prod1",
- "state": "up",
- "type": "ethernet",
- "autoconnect": "yes",
- "mac": "52:54:00:44:9f:ba",
- "mtu": 1450,
- "ip": {"address": "192.168.176.5/24 192.168.177.5/24"},
- },
- {
- "name": "prod.100",
- "state": "up",
- "type": "vlan",
- "parent": "prod1",
- "vlan_id": 101,
- "ip": {
- "address": [
- "192.168.174.5/24",
- {"address": "a:b:c::6", "prefix": 65},
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [{"network": "192.168.5.0"}],
- },
- },
- ],
- )
-
- def test_macvlan(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "eth0",
- "ip": {
- "dhcp4": False,
- "auto6": False,
- "address": [
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.122.3",
- }
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "ipv6_disabled": False,
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "dns": [],
- },
- "mac": "33:24:10:24:2f:b9",
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": 1450,
- "name": "eth0-parent",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- },
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "veth0",
- "ip": {
- "dhcp4": False,
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "ipv6_disabled": False,
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "auto6": False,
- "dns": [],
- "address": [
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.244.1",
- }
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [
- {
- "family": socket.AF_INET,
- "network": "192.168.244.0",
- "prefix": 24,
- "gateway": None,
- "metric": -1,
- }
- ],
- },
- "mac": None,
- "macvlan": {"mode": "bridge", "promiscuous": True, "tap": False},
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "veth0.0",
- "parent": "eth0-parent",
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "macvlan",
- "wait": None,
- "zone": None,
- },
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "veth1",
- "ip": {
- "dhcp4": False,
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "ipv6_disabled": False,
- "auto6": False,
- "dns": [],
- "address": [
- {
- "prefix": 24,
- "family": socket.AF_INET,
- "address": "192.168.245.7",
- }
- ],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [
- {
- "family": socket.AF_INET,
- "network": "192.168.245.0",
- "prefix": 24,
- "gateway": None,
- "metric": -1,
- }
- ],
- },
- "mac": None,
- "macvlan": {"mode": "passthru", "promiscuous": False, "tap": True},
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "veth0.1",
- "parent": "eth0-parent",
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "macvlan",
- "wait": None,
- "zone": None,
- },
- ],
- [
- {
- "name": "eth0-parent",
- "state": "up",
- "type": "ethernet",
- "autoconnect": "yes",
- "interface_name": "eth0",
- "mac": "33:24:10:24:2f:b9",
- "mtu": 1450,
- "ip": {"address": "192.168.122.3/24", "auto6": False},
- },
- {
- "name": "veth0.0",
- "state": "up",
- "type": "macvlan",
- "parent": "eth0-parent",
- "interface_name": "veth0",
- "macvlan": {"mode": "bridge", "promiscuous": True, "tap": False},
- "ip": {
- "address": "192.168.244.1/24",
- "auto6": False,
- "route_append_only": False,
- "rule_append_only": False,
- "route": [{"network": "192.168.244.0"}],
- },
- },
- {
- "name": "veth0.1",
- "state": "up",
- "type": "macvlan",
- "parent": "eth0-parent",
- "interface_name": "veth1",
- "macvlan": {"mode": "passthru", "promiscuous": False, "tap": True},
- "ip": {
- "address": "192.168.245.7/24",
- "auto6": False,
- "route_append_only": False,
- "rule_append_only": False,
- "route": [{"network": "192.168.245.0"}],
- },
- },
- ],
- )
-
- def test_bridge_no_dhcp4_auto6(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "bridge2",
- "ip": {
- "address": [],
- "auto6": False,
- "dhcp4": False,
- "dhcp4_send_hostname": None,
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "gateway4": None,
- "gateway6": None,
- "ipv6_disabled": False,
- "route": [],
- "route_append_only": False,
- "route_metric4": None,
- "route_metric6": None,
- "rule_append_only": False,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "prod2",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "bridge",
- "wait": None,
- "zone": None,
- },
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "eth1",
- "ip": {
- "address": [],
- "auto6": True,
- "dhcp4": True,
- "dhcp4_send_hostname": None,
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "gateway4": None,
- "gateway6": None,
- "ipv6_disabled": False,
- "route": [],
- "route_append_only": False,
- "route_metric4": None,
- "route_metric6": None,
- "rule_append_only": False,
- },
- "mac": None,
- "controller": "prod2",
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "prod2-port1",
- "parent": None,
- "persistent_state": "present",
- "port_type": "bridge",
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- },
- ],
- [
- {
- "name": "prod2",
- "state": "up",
- "type": "bridge",
- "interface_name": "bridge2",
- "ip": {"dhcp4": False, "auto6": False},
- },
- {
- "name": "prod2-port1",
- "state": "up",
- "type": "ethernet",
- "interface_name": "eth1",
- "controller": "prod2",
- },
- ],
- )
-
- def test_bond(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "bond": {"mode": "balance-rr", "miimon": None},
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "bond1",
- "ip": {
- "dhcp4": True,
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dns": [],
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "bond1",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "bond",
- "wait": None,
- "zone": None,
- }
- ],
- [{"name": "bond1", "state": "up", "type": "bond"}],
- )
-
- def test_bond_active_backup(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "bond": {"mode": "active-backup", "miimon": None},
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "bond1",
- "ip": {
- "dhcp4": True,
- "route_metric6": None,
- "route_metric4": None,
- "dns_options": [],
- "dns_search": [],
- "dhcp4_send_hostname": None,
- "gateway6": None,
- "gateway4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dns": [],
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "bond1",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "bond",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "bond1",
- "state": "up",
- "type": "bond",
- "bond": {"mode": "active-backup"},
- }
- ],
- )
-
- def test_invalid_values(self):
- self.maxDiff = None
- self.do_connections_check_invalid([{}])
- self.do_connections_check_invalid([{"name": "b", "xxx": 5}])
-
- def test_ethernet_mac_address(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "ignore_errors": None,
- "interface_name": None,
- "ip": {
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "dhcp4_send_hostname": None,
- "gateway4": None,
- "gateway6": None,
- "route_metric4": None,
- "route_metric6": None,
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- },
- "mac": "aa:bb:cc:dd:ee:ff",
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "5",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": None,
- "type": "ethernet",
- "zone": None,
- }
- ],
- [{"name": "5", "type": "ethernet", "mac": "AA:bb:cC:DD:ee:FF"}],
- )
-
- def test_ethernet_speed_settings(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": {"autoneg": False, "duplex": "half", "speed": 400},
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "5",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "5",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "5",
- "state": "up",
- "type": "ethernet",
- "ip": {},
- "ethernet": {"duplex": "half", "speed": 400},
- }
- ],
- initscripts_dict_expected=[
- {
- "ifcfg": {
- "BOOTPROTO": "dhcp",
- "ETHTOOL_OPTS": "autoneg off speed 400 duplex half",
- "IPV6INIT": "yes",
- "IPV6_AUTOCONF": "yes",
- "NM_CONTROLLED": "no",
- "ONBOOT": "yes",
- "TYPE": "Ethernet",
- "DEVICE": "5",
- },
- "keys": None,
- "route": None,
- "route6": None,
- "rule": None,
- "rule6": None,
- }
- ],
- )
-
- def test_bridge2(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "6643-controller",
- "ip": {
- "address": [],
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "dhcp4_send_hostname": None,
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "gateway4": None,
- "gateway6": None,
- "route": [],
- "route_append_only": False,
- "route_metric4": None,
- "route_metric6": None,
- "rule_append_only": False,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "6643-controller",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "bridge",
- "wait": None,
- "zone": None,
- },
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "6643",
- "ip": {
- "address": [],
- "auto6": True,
- "dhcp4_send_hostname": None,
- "dhcp4": True,
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "gateway4": None,
- "gateway6": None,
- "ipv6_disabled": False,
- "route": [],
- "route_append_only": False,
- "route_metric4": None,
- "route_metric6": None,
- "rule_append_only": False,
- },
- "mac": None,
- "controller": "6643-controller",
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "6643",
- "parent": None,
- "persistent_state": "present",
- "port_type": "bridge",
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- },
- ],
- [
- {"name": "6643-controller", "state": "up", "type": "bridge"},
- {
- "name": "6643",
- "state": "up",
- "type": "ethernet",
- "controller": "6643-controller",
- },
- ],
- )
-
- def test_infiniband(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "infiniband": {"p_key": -1, "transport_mode": "datagram"},
- "interface_name": None,
- "ip": {
- "address": [],
- "auto6": True,
- "dhcp4": True,
- "dhcp4_send_hostname": None,
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "gateway4": None,
- "gateway6": None,
- "ipv6_disabled": False,
- "route": [],
- "route_append_only": False,
- "route_metric4": None,
- "route_metric6": None,
- "rule_append_only": False,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "infiniband.1",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "infiniband",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "infiniband.1",
- "interface_name": "",
- "state": "up",
- "type": "infiniband",
- }
- ],
- initscripts_dict_expected=[
- {
- "ifcfg": {
- "BOOTPROTO": "dhcp",
- "CONNECTED_MODE": "no",
- "IPV6INIT": "yes",
- "IPV6_AUTOCONF": "yes",
- "NM_CONTROLLED": "no",
- "ONBOOT": "yes",
- "TYPE": "InfiniBand",
- },
- "keys": None,
- "route": None,
- "route6": None,
- "rule": None,
- "rule6": None,
- }
- ],
- )
-
- def test_infiniband2(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "infiniband": {"p_key": 5, "transport_mode": "datagram"},
- "interface_name": None,
- "ip": {
- "address": [],
- "auto6": True,
- "dhcp4": True,
- "dhcp4_send_hostname": None,
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "gateway4": None,
- "gateway6": None,
- "ipv6_disabled": False,
- "route": [],
- "route_append_only": False,
- "route_metric4": None,
- "route_metric6": None,
- "rule_append_only": False,
- },
- "mac": "11:22:33:44:55:66:77:88:99:00:"
- "11:22:33:44:55:66:77:88:99:00",
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "infiniband.2",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "infiniband",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "infiniband.2",
- "state": "up",
- "type": "infiniband",
- "mac": "11:22:33:44:55:66:77:88:99:00:"
- "11:22:33:44:55:66:77:88:99:00",
- "infiniband_p_key": 5,
- }
- ],
- initscripts_dict_expected=[
- {
- "ifcfg": {
- "BOOTPROTO": "dhcp",
- "CONNECTED_MODE": "no",
- "HWADDR": "11:22:33:44:55:66:77:88:99:00:"
- "11:22:33:44:55:66:77:88:99:00",
- "IPV6INIT": "yes",
- "IPV6_AUTOCONF": "yes",
- "NM_CONTROLLED": "no",
- "ONBOOT": "yes",
- "PKEY": "yes",
- "PKEY_ID": "5",
- "TYPE": "InfiniBand",
- },
- "keys": None,
- "route": None,
- "route6": None,
- "rule": None,
- "rule6": None,
- }
- ],
- )
-
- def test_route_metric_prefix(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "555",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [
- {
- "family": socket.AF_INET,
- "network": "192.168.45.0",
- "prefix": 24,
- "gateway": None,
- "metric": 545,
- },
- {
- "family": socket.AF_INET,
- "network": "192.168.46.0",
- "prefix": 30,
- "gateway": None,
- "metric": -1,
- },
- ],
- "dns": [],
- "dns_options": [],
- "dns_search": ["aa", "bb"],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "555",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "555",
- "state": "up",
- "type": "ethernet",
- "ip": {
- "dns_search": ["aa", "bb"],
- "route": [
- {"network": "192.168.45.0", "metric": 545},
- {"network": "192.168.46.0", "prefix": 30},
- ],
- },
- }
- ],
- initscripts_dict_expected=[
- {
- "ifcfg": {
- "BOOTPROTO": "dhcp",
- "DOMAIN": "aa bb",
- "IPV6INIT": "yes",
- "IPV6_AUTOCONF": "yes",
- "NM_CONTROLLED": "no",
- "ONBOOT": "yes",
- "TYPE": "Ethernet",
- "DEVICE": "555",
- },
- "keys": None,
- "route": "192.168.45.0/24 metric 545\n192.168.46.0/30\n",
- "route6": None,
- "rule": None,
- "rule6": None,
- }
- ],
- )
-
- def test_route_v6(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "e556",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": True,
- "rule_append_only": False,
- "route": [
- {
- "family": socket.AF_INET,
- "network": "192.168.45.0",
- "prefix": 24,
- "gateway": None,
- "metric": 545,
- },
- {
- "family": socket.AF_INET,
- "network": "192.168.46.0",
- "prefix": 30,
- "gateway": None,
- "metric": -1,
- },
- {
- "family": socket.AF_INET6,
- "network": "a:b:c:d::",
- "prefix": 64,
- "gateway": None,
- "metric": -1,
- },
- ],
- "dns": [],
- "dns_options": [],
- "dns_search": ["aa", "bb"],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": None,
- "mtu": None,
- "name": "e556",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": "external",
- }
- ],
- [
- {
- "name": "e556",
- "state": "up",
- "type": "ethernet",
- "zone": "external",
- "ip": {
- "dns_search": ["aa", "bb"],
- "route_append_only": True,
- "rule_append_only": False,
- "route": [
- {"network": "192.168.45.0", "metric": 545},
- {"network": "192.168.46.0", "prefix": 30},
- {"network": "a:b:c:d::"},
- ],
- },
- }
- ],
- nm_route_list_current=[
- [
- {"network": "192.168.40.0", "prefix": 24, "metric": 545},
- {"network": "192.168.46.0", "prefix": 30},
- {"network": "a:b:c:f::"},
- ]
- ],
- nm_route_list_expected=[
- [
- {"network": "192.168.40.0", "prefix": 24, "metric": 545},
- {"network": "192.168.46.0", "prefix": 30},
- {"network": "192.168.45.0", "prefix": 24, "metric": 545},
- {"network": "a:b:c:f::"},
- {"network": "a:b:c:d::"},
- ]
- ],
- initscripts_content_current=[
- {
- "ifcfg": "",
- "keys": None,
- "route": "192.168.40.0/24 metric 545\n192.168.46.0/30",
- "route6": "a:b:c:f::/64",
- "rule": None,
- "rule6": None,
- }
- ],
- initscripts_dict_expected=[
- {
- "ifcfg": {
- "BOOTPROTO": "dhcp",
- "DOMAIN": "aa bb",
- "IPV6INIT": "yes",
- "IPV6_AUTOCONF": "yes",
- "NM_CONTROLLED": "no",
- "ONBOOT": "yes",
- "TYPE": "Ethernet",
- "ZONE": "external",
- "DEVICE": "e556",
- },
- "keys": None,
- "route": "192.168.40.0/24 metric 545\n192.168.46.0/30\n"
- "192.168.45.0/24 metric 545\n",
- "route6": "a:b:c:f::/64\na:b:c:d::/64\n",
- "rule": None,
- "rule6": None,
- }
- ],
- )
-
- def test_802_1x_1(self):
- """
- Test private key with password
- """
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "eth0",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "private_key_password": "p@55w0rD",
- "private_key_password_flags": None,
- "client_cert": "/etc/pki/tls/client.pem",
- "ca_cert": "/etc/pki/tls/cacert.pem",
- "ca_path": None,
- "system_ca_certs": False,
- "domain_suffix_match": None,
- },
- "wireless": None,
- "mtu": None,
- "name": "eth0",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "eth0",
- "state": "up",
- "type": "ethernet",
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "private_key_password": "p@55w0rD",
- "client_cert": "/etc/pki/tls/client.pem",
- "ca_cert": "/etc/pki/tls/cacert.pem",
- },
- }
- ],
- )
-
- def test_802_1x_2(self):
- """
- Test 802.1x profile with unencrypted private key,
- domain suffix match, and system ca certs
- """
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "eth0",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "private_key_password": None,
- "private_key_password_flags": ["not-required"],
- "client_cert": "/etc/pki/tls/client.pem",
- "ca_cert": None,
- "ca_path": None,
- "system_ca_certs": True,
- "domain_suffix_match": "example.com",
- },
- "wireless": None,
- "mtu": None,
- "name": "eth0",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "eth0",
- "state": "up",
- "type": "ethernet",
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "client_cert": "/etc/pki/tls/client.pem",
- "private_key_password_flags": ["not-required"],
- "system_ca_certs": True,
- "domain_suffix_match": "example.com",
- },
- }
- ],
- )
-
- def test_802_1x_3(self):
- """
- Test 802.1x profile with unencrypted private key and ca_path
- """
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethernet": ETHERNET_DEFAULTS,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "eth0",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "private_key_password": None,
- "private_key_password_flags": ["not-required"],
- "client_cert": "/etc/pki/tls/client.pem",
- "ca_cert": None,
- "ca_path": "/etc/pki/tls/my_ca_certs",
- "system_ca_certs": False,
- "domain_suffix_match": None,
- },
- "wireless": None,
- "mtu": None,
- "name": "eth0",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "ethernet",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "eth0",
- "state": "up",
- "type": "ethernet",
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "client_cert": "/etc/pki/tls/client.pem",
- "private_key_password_flags": ["not-required"],
- "ca_path": "/etc/pki/tls/my_ca_certs",
- },
- }
- ],
- )
-
- def test_wireless_psk(self):
- """
- Test wireless connection with wpa-psk auth
- """
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "wireless1",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": None,
- "wireless": {
- "ssid": "test wireless network",
- "key_mgmt": "wpa-psk",
- "password": "p@55w0rD",
- },
- "mtu": None,
- "name": "wireless1",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "wireless",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "wireless1",
- "state": "up",
- "type": "wireless",
- "wireless": {
- "ssid": "test wireless network",
- "key_mgmt": "wpa-psk",
- "password": "p@55w0rD",
- },
- }
- ],
- )
-
- def test_wireless_eap(self):
- """
- Test wireless connection with wpa-eap
- """
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present", "up"],
- "autoconnect": True,
- "check_iface_exists": True,
- "ethtool": ETHTOOL_DEFAULTS,
- "force_state_change": None,
- "ignore_errors": None,
- "interface_name": "wireless1",
- "ip": {
- "gateway6": None,
- "gateway4": None,
- "route_metric4": None,
- "auto6": True,
- "ipv6_disabled": False,
- "dhcp4": True,
- "address": [],
- "route_append_only": False,
- "rule_append_only": False,
- "route": [],
- "dns": [],
- "dns_options": [],
- "dns_search": [],
- "route_metric6": None,
- "dhcp4_send_hostname": None,
- },
- "mac": None,
- "controller": None,
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "private_key_password": "p@55w0rD",
- "private_key_password_flags": None,
- "client_cert": "/etc/pki/tls/client.pem",
- "ca_cert": "/etc/pki/tls/cacert.pem",
- "ca_path": None,
- "system_ca_certs": False,
- "domain_suffix_match": None,
- },
- "wireless": {
- "ssid": "test wireless network",
- "password": None,
- "key_mgmt": "wpa-eap",
- },
- "mtu": None,
- "name": "wireless1",
- "parent": None,
- "persistent_state": "present",
- "port_type": None,
- "state": "up",
- "type": "wireless",
- "wait": None,
- "zone": None,
- }
- ],
- [
- {
- "name": "wireless1",
- "state": "up",
- "type": "wireless",
- "wireless": {
- "ssid": "test wireless network",
- "key_mgmt": "wpa-eap",
- },
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "private_key_password": "p@55w0rD",
- "client_cert": "/etc/pki/tls/client.pem",
- "ca_cert": "/etc/pki/tls/cacert.pem",
- },
- }
- ],
- )
-
- def test_invalid_cert_path(self):
- """
- should fail if a relative path is used for 802.1x certs/keys
- """
- self.maxDiff = None
- self.do_connections_check_invalid(
- [
- {
- "name": "eth0",
- "state": "up",
- "type": "ethernet",
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "client.key",
- "client_cert": "client.pem",
- "private_key_password_flags": ["not-required"],
- "system_ca_certs": True,
- },
- }
- ]
- )
-
- def test_invalid_password_flag(self):
- """
- should fail if an invalid private key password flag is set
- """
- self.maxDiff = None
- self.do_connections_check_invalid(
- [
- {
- "name": "eth0",
- "state": "up",
- "type": "ethernet",
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "client_cert": "/etc/pki/tls/client.pem",
- "private_key_password_flags": ["bad-flag"],
- "system_ca_certs": True,
- },
- }
- ]
- )
-
- def test_802_1x_ca_path_and_system_ca_certs(self):
- """
- should fail if ca_path and system_ca_certs are used together
- """
- self.maxDiff = None
- self.do_connections_check_invalid(
- [
- {
- "name": "eth0",
- "state": "up",
- "type": "ethernet",
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "client_cert": "/etc/pki/tls/client.pem",
- "private_key_password_flags": ["not-required"],
- "ca_path": "/etc/pki/my_ca_certs",
- "system_ca_certs": True,
- },
- }
- ]
- )
-
- def test_802_1x_initscripts(self):
- """
- should fail to create ieee802_1x connection with initscripts
- """
- input_connections = [
- {
- "name": "eth0",
- "state": "up",
- "type": "ethernet",
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "client_cert": "/etc/pki/tls/client.pem",
- "private_key_password_flags": ["not-required"],
- "system_ca_certs": True,
- },
- }
- ]
-
- connections = ARGS_CONNECTIONS.validate(input_connections)
-
- self.assertRaises(
- ValidationError,
- ARGS_CONNECTIONS.validate_connection_one,
- VALIDATE_ONE_MODE_INITSCRIPTS,
- connections,
- 0,
- )
-
- def test_802_1x_unsupported_type(self):
- """
- should fail if a non ethernet/wireless connection has 802.1x settings defined
- """
- self.do_connections_check_invalid(
- [
- {
- "name": "bond0",
- "state": "up",
- "type": "bond",
- "ieee802_1x": {
- "identity": "myhost",
- "eap": "tls",
- "private_key": "/etc/pki/tls/client.key",
- "client_cert": "/etc/pki/tls/client.pem",
- "private_key_password_flags": ["not-required"],
- "system_ca_certs": True,
- },
- }
- ]
- )
-
- def test_wireless_initscripts(self):
- """
- should fail to create wireless connection with initscripts
- """
- input_connections = [
- {
- "name": "wireless1",
- "state": "up",
- "type": "wireless",
- "wireless": {
- "ssid": "test wireless network",
- "key_mgmt": "wpa-psk",
- "password": "p@55w0rD",
- },
- }
- ]
-
- connections = ARGS_CONNECTIONS.validate(input_connections)
-
- self.assertRaises(
- ValidationError,
- ARGS_CONNECTIONS.validate_connection_one,
- VALIDATE_ONE_MODE_INITSCRIPTS,
- connections,
- 0,
- )
-
- def test_wireless_unsupported_type(self):
- """
- should fail if a non wireless connection has wireless settings defined
- """
- self.do_connections_check_invalid(
- [
- {
- "name": "wireless-bond",
- "state": "up",
- "type": "bond",
- "wireless": {
- "ssid": "test wireless network",
- "key_mgmt": "wpa-psk",
- "password": "p@55w0rD",
- },
- }
- ]
- )
-
- def test_wireless_ssid_too_long(self):
- """
- should fail if ssid longer than 32 bytes
- """
- self.do_connections_check_invalid(
- [
- {
- "name": "wireless1",
- "state": "up",
- "type": "wireless",
- "wireless": {
- "ssid": "test wireless network with ssid too long",
- "key_mgmt": "wpa-psk",
- "password": "p@55w0rD",
- },
- }
- ]
- )
-
- def test_wireless_no_password(self):
- """
- should fail if wpa-psk is selected and no password provided
- """
- self.do_connections_check_invalid(
- [
- {
- "name": "wireless1",
- "state": "up",
- "type": "wireless",
- "wireless": {
- "ssid": "test wireless network",
- "key_mgmt": "wpa-psk",
- },
- }
- ]
- )
-
- def test_wireless_password_too_long(self):
- """
- should fail if wpa-psk is selected and no password provided
- """
- self.do_connections_check_invalid(
- [
- {
- "name": "wireless1",
- "state": "up",
- "type": "wireless",
- "wireless": {
- "ssid": "test wireless network",
- "key_mgmt": "wpa-psk",
- "password": "This password is too long and should "
- "not be able to validate properly",
- },
- }
- ]
- )
-
- def test_wireless_no_802_1x_for_wpa_eap(self):
- """
- should fail if no 802.1x parameters are defined for a wireless
- connection with key_mgmt=wpa-eap
- """
- self.do_connections_check_invalid(
- [
- {
- "name": "wireless1",
- "state": "up",
- "type": "wireless",
- "wireless": {
- "ssid": "test wireless network",
- "key_mgmt": "wpa-eap",
- },
- }
- ]
- )
-
- def test_wireless_no_options_defined(self):
- """
- should fail if a connection of type='wireless' does not
- have any 'wireless' settings defined
- """
- self.do_connections_check_invalid(
- [{"name": "wireless1", "state": "up", "type": "wireless"}]
- )
-
- def test_invalid_mac(self):
- self.maxDiff = None
- self.do_connections_check_invalid(
- [{"name": "b", "type": "ethernet", "mac": "aa:b"}]
- )
-
- def test_interface_name_ethernet_default(self):
- """ Use the profile name as interface_name for ethernet profiles """
- cons_without_interface_name = [{"name": "eth0", "type": "ethernet"}]
- connections = ARGS_CONNECTIONS.validate(cons_without_interface_name)
- self.assertTrue(connections[0]["interface_name"] == "eth0")
-
- def test_interface_name_ethernet_mac(self):
- """ Do not set interface_name when mac is specified """
- cons_without_interface_name = [
- {"name": "eth0", "type": "ethernet", "mac": "3b:0b:88:16:6d:1a"}
- ]
- connections = ARGS_CONNECTIONS.validate(cons_without_interface_name)
- self.assertTrue(connections[0]["interface_name"] is None)
-
- def test_interface_name_ethernet_empty(self):
- """ Allow not to restrict the profile to an interface """
- network_connections = [
- {"name": "internal_network", "type": "ethernet", "interface_name": ""}
- ]
- connections = ARGS_CONNECTIONS.validate(network_connections)
-
- self.assertTrue(connections[0]["interface_name"] is None)
-
- def test_interface_name_ethernet_None(self):
- """ Check that interface_name cannot be None """
- network_connections = [
- {"name": "internal_network", "type": "ethernet", "interface_name": None}
- ]
- self.assertRaises(
- ValidationError, ARGS_CONNECTIONS.validate, network_connections
- )
-
- def test_interface_name_ethernet_explicit(self):
- """ Use the explicitly provided interface name """
- network_connections = [
- {"name": "internal", "type": "ethernet", "interface_name": "eth0"}
- ]
- connections = ARGS_CONNECTIONS.validate(network_connections)
- self.assertEqual(connections[0]["interface_name"], "eth0")
-
- def test_interface_name_ethernet_invalid_profile(self):
- """Require explicit interface_name when the profile name is not a
- valid interface_name"""
- network_connections = [{"name": "internal:main", "type": "ethernet"}]
- self.assertRaises(
- ValidationError, ARGS_CONNECTIONS.validate, network_connections
- )
- network_connections = [
- {"name": "internal:main", "type": "ethernet", "interface_name": "eth0"}
- ]
- connections = ARGS_CONNECTIONS.validate(network_connections)
- self.assertTrue(connections[0]["interface_name"] == "eth0")
-
- def test_interface_name_ethernet_invalid_interface_name(self):
- network_connections = [
- {"name": "internal", "type": "ethernet", "interface_name": "invalid:name"}
- ]
- self.assertRaises(
- ValidationError, ARGS_CONNECTIONS.validate, network_connections
- )
-
- def test_interface_name_bond_empty_interface_name(self):
- network_connections = [
- {"name": "internal", "type": "bond", "interface_name": "invalid:name"}
- ]
- self.assertRaises(
- ValidationError, ARGS_CONNECTIONS.validate, network_connections
- )
-
- def test_interface_name_bond_profile_as_interface_name(self):
- network_connections = [{"name": "internal", "type": "bond"}]
- connections = ARGS_CONNECTIONS.validate(network_connections)
- self.assertEqual(connections[0]["interface_name"], "internal")
-
- def check_connection(self, connection, expected):
- reduced_connection = {}
- for setting in expected:
- reduced_connection[setting] = connection[setting]
- self.assertEqual(reduced_connection, expected)
-
- def check_partial_connection_zero(self, network_config, expected):
- connections = ARGS_CONNECTIONS.validate([network_config])
- self.check_connection(connections[0], expected)
-
- def check_one_connection_with_defaults(
- self, network_config, expected_changed_settings
- ):
- self.maxDiff = None
- expected = self.default_connection_settings
- expected.update(expected_changed_settings)
-
- self.do_connections_validate([expected], [network_config])
-
- def test_default_states(self):
- self.check_partial_connection_zero(
- {"name": "eth0"},
- {"actions": ["present"], "persistent_state": "present", "state": None},
- )
-
- def test_invalid_persistent_state_up(self):
- network_connections = [{"name": "internal", "persistent_state": "up"}]
- self.assertRaises(
- ValidationError, ARGS_CONNECTIONS.validate, network_connections
- )
-
- def test_invalid_persistent_state_down(self):
- network_connections = [{"name": "internal", "persistent_state": "down"}]
- self.assertRaises(
- ValidationError, ARGS_CONNECTIONS.validate, network_connections
- )
-
- def test_invalid_state_test(self):
- network_connections = [{"name": "internal", "state": "test"}]
- self.assertRaises(
- ValidationError, ARGS_CONNECTIONS.validate, network_connections
- )
-
- def test_default_states_type(self):
- self.check_partial_connection_zero(
- {"name": "eth0", "type": "ethernet"},
- {"actions": ["present"], "persistent_state": "present", "state": None},
- )
-
- def test_persistent_state_present(self):
- self.check_partial_connection_zero(
- {"name": "eth0", "persistent_state": "present", "type": "ethernet"},
- {"actions": ["present"], "persistent_state": "present", "state": None},
- )
-
- def test_state_present(self):
- self.check_partial_connection_zero(
- {"name": "eth0", "state": "present", "type": "ethernet"},
- {"actions": ["present"], "persistent_state": "present", "state": None},
- )
-
- def test_state_absent(self):
- self.check_partial_connection_zero(
- {"name": "eth0", "state": "absent"},
- {"actions": ["absent"], "persistent_state": "absent", "state": None},
- )
-
- def test_persistent_state_absent(self):
- self.check_partial_connection_zero(
- {"name": "eth0", "persistent_state": "absent"},
- {"actions": ["absent"], "persistent_state": "absent", "state": None},
- )
-
- def test_state_present_up(self):
- self.check_partial_connection_zero(
- {
- "name": "eth0",
- "persistent_state": "present",
- "state": "up",
- "type": "ethernet",
- },
- {
- "actions": ["present", "up"],
- "persistent_state": "present",
- "state": "up",
- },
- )
-
- def test_state_present_down(self):
- self.check_partial_connection_zero(
- {
- "name": "eth0",
- "persistent_state": "present",
- "state": "down",
- "type": "ethernet",
- },
- {
- "actions": ["present", "down"],
- "persistent_state": "present",
- "state": "down",
- },
- )
-
- def test_state_absent_up_no_type(self):
- self.check_partial_connection_zero(
- {"name": "eth0", "persistent_state": "absent", "state": "up"},
- {"actions": ["absent", "up"], "persistent_state": "absent", "state": "up"},
- )
-
- def test_state_absent_up_type(self):
- # if type is specified, present should happen, too
- self.check_partial_connection_zero(
- {
- "name": "eth0",
- "persistent_state": "absent",
- "state": "up",
- "type": "ethernet",
- },
- {
- "actions": ["present", "absent", "up"],
- "persistent_state": "absent",
- "state": "up",
- },
- )
-
- def test_state_absent_down(self):
- # if type is specified, present should happen, too
- self.check_partial_connection_zero(
- {"name": "eth0", "persistent_state": "absent", "state": "down"},
- {
- "actions": ["absent", "down"],
- "persistent_state": "absent",
- "state": "down",
- },
- )
-
- def test_state_up_no_type(self):
- self.check_partial_connection_zero(
- {"name": "eth0", "state": "up"},
- {
- "actions": ["present", "up"],
- "persistent_state": "present",
- "state": "up",
- },
- )
-
- def test_state_up_type(self):
- self.check_partial_connection_zero(
- {"name": "eth0", "state": "up", "type": "ethernet"},
- {
- "actions": ["present", "up"],
- "persistent_state": "present",
- "state": "up",
- },
- )
-
- def test_state_down_no_type(self):
- self.check_partial_connection_zero(
- {"name": "eth0", "state": "down"},
- {
- "actions": ["present", "down"],
- "persistent_state": "present",
- "state": "down",
- },
- )
-
- def test_full_state_present_no_type(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["present"],
- "ignore_errors": None,
- "name": "eth0",
- "state": None,
- "persistent_state": "present",
- }
- ],
- [{"name": "eth0", "persistent_state": "present"}],
- )
-
- def test_full_state_present_type_defaults(self):
- self.check_one_connection_with_defaults(
- {"name": "eth0", "type": "ethernet", "persistent_state": "present"},
- {
- "actions": ["present"],
- "interface_name": "eth0",
- "name": "eth0",
- "persistent_state": "present",
- "state": None,
- "type": "ethernet",
- },
- )
-
- def test_full_state_absent_no_type(self):
- self.maxDiff = None
- self.do_connections_validate(
- [
- {
- "actions": ["absent"],
- "ignore_errors": None,
- "name": "eth0",
- "state": None,
- "persistent_state": "absent",
- }
- ],
- [{"name": "eth0", "persistent_state": "absent"}],
- )
-
- def test_full_state_absent_defaults(self):
- self.maxDiff = None
- self.check_one_connection_with_defaults(
- {"name": "eth0", "persistent_state": "absent", "type": "ethernet"},
- {
- "actions": ["absent"],
- "ignore_errors": None,
- "name": "eth0",
- "state": None,
- "persistent_state": "absent",
- "type": "ethernet",
- "interface_name": "eth0",
- },
- )
-
- def _test_ethtool_changes(self, input_ethtool, expected_ethtool):
- """
- When passing a dictionary 'input_features' with each feature and their
- value to change, and a dictionary 'expected_features' with the expected
- result in the configuration, the expected and resulting connection are
- created and validated.
- """
- expected_ethtool_full = copy.deepcopy(ETHTOOL_DEFAULTS)
- for key in list(expected_ethtool_full):
- if key in expected_ethtool:
- expected_ethtool_full[key].update(expected_ethtool[key])
-
- input_connection = {
- "ethtool": input_ethtool,
- "name": "5",
- "persistent_state": "present",
- "type": "ethernet",
- }
-
- expected_connection = {
- "actions": ["present"],
- "ethtool": expected_ethtool_full,
- "interface_name": "5",
- "persistent_state": "present",
- "state": None,
- "type": "ethernet",
- }
- self.check_one_connection_with_defaults(input_connection, expected_connection)
-
- def test_set_ethtool_feature(self):
- """
- When passing the name of an non-deprecated ethtool feature, their
- current version is updated.
- """
- input_ethtool = {"features": {"tx_tcp_segmentation": "yes"}}
- expected_ethtool = {"features": {"tx_tcp_segmentation": True}}
- self._test_ethtool_changes(input_ethtool, expected_ethtool)
-
- def test_set_deprecated_ethtool_feature(self):
- """
- When passing a deprecated name, their current version is updated.
- """
- input_ethtool = {"features": {"esp-hw-offload": "yes"}}
- expected_ethtool = {"features": {"esp_hw_offload": True}}
- self._test_ethtool_changes(input_ethtool, expected_ethtool)
-
- def test_invalid_ethtool_settings(self):
- """
- When both the deprecated and current version of a feature are stated,
- a Validation Error is raised.
- """
- input_features = {"tx-tcp-segmentation": "yes", "tx_tcp_segmentation": "yes"}
- features_validator = (
- network_lsr.argument_validator.ArgValidator_DictEthtoolFeatures()
- )
- self.assertValidationError(features_validator, input_features)
-
- def test_deprecated_ethtool_names(self):
- """
- Test that for each validator in
- ArgValidator_DictEthtoolFeatures.nested there is another non-deprecated
- validator that has the name from the deprecated_by attribute"
- """
- validators = (
- network_lsr.argument_validator.ArgValidator_DictEthtoolFeatures().nested
- )
- for name, validator in validators.items():
- if isinstance(
- validator, network_lsr.argument_validator.ArgValidatorDeprecated
- ):
- assert validator.deprecated_by in validators.keys()
-
- def test_valid_persistent_state(self):
- """
- Test that when persistent_state is present and state is set to present
- or absent, a ValidationError raises.
- """
- validator = network_lsr.argument_validator.ArgValidator_DictConnection()
- input_connection = {
- "name": "test",
- "persistent_state": "present",
- "state": "present",
- "type": "ethernet",
- }
- self.assertValidationError(validator, input_connection)
- input_connection.update({"state": "absent"})
- self.assertValidationError(validator, input_connection)
-
- def test_dns_options_argvalidator(self):
- """
- Test that argvalidator for validating dns_options value is correctly defined.
- """
- validator = network_lsr.argument_validator.ArgValidator_DictIP()
-
- false_testcase_1 = {
- "dns_options": ["attempts:01"],
- }
- false_testcase_2 = {
- "dns_options": ["debug$"],
- }
- false_testcase_3 = {
- "dns_options": ["edns00"],
- }
- false_testcase_4 = {
- "dns_options": ["ndots:"],
- }
- false_testcase_5 = {
- "dns_options": ["no-check-name"],
- }
- false_testcase_6 = {
- "dns_options": ["no-rel0ad"],
- }
- false_testcase_7 = {
- "dns_options": ["bugno-tld-query"],
- }
- false_testcase_8 = {
- "dns_options": ["etator"],
- }
- false_testcase_9 = {
- "dns_options": ["singlerequest"],
- }
- false_testcase_10 = {
- "dns_options": ["single-request-reopen:2"],
- }
- false_testcase_11 = {
- "dns_options": ["timeout"],
- }
- false_testcase_12 = {
- "dns_options": ["*trust-ad*"],
- }
- false_testcase_13 = {
- "dns_options": ["use1-vc2-use-vc"],
- }
-
- self.assertValidationError(validator, false_testcase_1)
- self.assertValidationError(validator, false_testcase_2)
- self.assertValidationError(validator, false_testcase_3)
- self.assertValidationError(validator, false_testcase_4)
- self.assertValidationError(validator, false_testcase_5)
- self.assertValidationError(validator, false_testcase_6)
- self.assertValidationError(validator, false_testcase_7)
- self.assertValidationError(validator, false_testcase_8)
- self.assertValidationError(validator, false_testcase_9)
- self.assertValidationError(validator, false_testcase_10)
- self.assertValidationError(validator, false_testcase_11)
- self.assertValidationError(validator, false_testcase_12)
- self.assertValidationError(validator, false_testcase_13)
-
- true_testcase_1 = {
- "dns_options": ["attempts:3"],
- }
- true_testcase_2 = {
- "dns_options": ["debug"],
- }
- true_testcase_3 = {
- "dns_options": ["ndots:3", "single-request-reopen"],
- }
- true_testcase_4 = {
- "dns_options": ["ndots:2", "timeout:3"],
- }
- true_testcase_5 = {
- "dns_options": ["no-check-names"],
- }
- true_testcase_6 = {
- "dns_options": ["no-reload"],
- }
- true_testcase_7 = {
- "dns_options": ["no-tld-query"],
- }
- true_testcase_8 = {
- "dns_options": ["rotate"],
- }
- true_testcase_9 = {
- "dns_options": ["single-request"],
- }
- true_testcase_10 = {
- "dns_options": ["single-request-reopen"],
- }
- true_testcase_11 = {
- "dns_options": ["trust-ad"],
- }
- true_testcase_12 = {
- "dns_options": ["use-vc"],
- }
-
- self.assertEqual(
- validator.validate(true_testcase_1)["dns_options"], ["attempts:3"]
- )
- self.assertEqual(validator.validate(true_testcase_2)["dns_options"], ["debug"])
- self.assertEqual(
- validator.validate(true_testcase_3)["dns_options"],
- ["ndots:3", "single-request-reopen"],
- )
- self.assertEqual(
- validator.validate(true_testcase_4)["dns_options"], ["ndots:2", "timeout:3"]
- )
- self.assertEqual(
- validator.validate(true_testcase_5)["dns_options"], ["no-check-names"]
- )
- self.assertEqual(
- validator.validate(true_testcase_6)["dns_options"], ["no-reload"]
- )
- self.assertEqual(
- validator.validate(true_testcase_7)["dns_options"], ["no-tld-query"]
- )
- self.assertEqual(validator.validate(true_testcase_8)["dns_options"], ["rotate"])
- self.assertEqual(
- validator.validate(true_testcase_9)["dns_options"], ["single-request"]
- )
- self.assertEqual(
- validator.validate(true_testcase_10)["dns_options"],
- ["single-request-reopen"],
- )
- self.assertEqual(
- validator.validate(true_testcase_11)["dns_options"], ["trust-ad"]
- )
- self.assertEqual(
- validator.validate(true_testcase_12)["dns_options"], ["use-vc"]
- )
-
- def test_set_deprecated_master(self):
- """
- When passing the deprecated "master" it is updated to "controller".
- """
- input_connections = [
- {
- "name": "prod2",
- "state": "up",
- "type": "bridge",
- },
- {
- "name": "prod2-port1",
- "state": "up",
- "type": "ethernet",
- "interface_name": "eth1",
- "master": "prod2",
- },
- ]
- connections = ARGS_CONNECTIONS.validate(input_connections)
- self.assertTrue(len(connections) == 2)
- for connection in connections:
- self.assertTrue("controller" in connection)
- self.assertTrue("master" not in connection)
-
- def test_set_deprecated_slave_type(self):
- """
- When passing the deprecated "slave_type" it is updated to "port_type".
- """
- input_connections = [
- {
- "name": "prod2",
- "state": "up",
- "type": "bridge",
- },
- {
- "name": "prod2-port1",
- "state": "up",
- "type": "ethernet",
- "interface_name": "eth1",
- "controller": "prod2",
- "slave_type": "bridge",
- },
- ]
- connections = ARGS_CONNECTIONS.validate(input_connections)
- self.assertTrue(len(connections) == 2)
- for connection in connections:
- self.assertTrue("port_type" in connection)
- self.assertTrue("slave_type" not in connection)
-
-
-@my_test_skipIf(nmutil is None, "no support for NM (libnm via pygobject)")
-class TestNM(unittest.TestCase):
- def test_connection_ensure_setting(self):
- con = NM.SimpleConnection.new()
- self.assertIsNotNone(con)
- self.assertTrue(GObject.type_is_a(con, NM.Connection))
-
- s = nmutil.connection_ensure_setting(con, NM.SettingWired)
- self.assertIsNotNone(s)
- self.assertTrue(GObject.type_is_a(s, NM.SettingWired))
-
- s2 = nmutil.connection_ensure_setting(con, NM.SettingWired)
- self.assertIsNotNone(s2)
- self.assertIs(s, s2)
- self.assertTrue(GObject.type_is_a(s, NM.SettingWired))
-
- def test_connection_list(self):
- connections = nmutil.connection_list()
- self.assertIsNotNone(connections)
-
- def test_path_to_glib_bytes(self):
- result = Util.path_to_glib_bytes("/my/test/path")
- self.assertIsInstance(result, Util.GLib().Bytes)
- self.assertEqual(result.get_data(), b"file:///my/test/path\x00")
-
-
-class TestUtils(unittest.TestCase):
- def test_mac_ntoa(self):
- mac_bytes = b"\xaa\xbb\xcc\xdd\xee\xff"
- self.assertEqual(Util.mac_ntoa(mac_bytes), "aa:bb:cc:dd:ee:ff")
-
- def test_convert_passwd_flags_nm(self):
- test_cases = [
- ([], 0),
- (["none"], 0),
- (["agent-owned"], 1),
- (["not-saved"], 2),
- (["agent-owned", "not-saved"], 3),
- (
- ["not-required"],
- 4,
- ),
- (["agent-owned", "not-required"], 5),
- (["not-saved", "not-required"], 6),
- (["agent-owned", "not-saved", "not-required"], 7),
- ]
-
- for test_case in test_cases:
- result = Util.convert_passwd_flags_nm(test_case[0])
- self.assertEqual(result, test_case[1])
-
-
-class TestSysUtils(unittest.TestCase):
- def test_link_read_permaddress(self):
- self.assertEqual(SysUtil._link_read_permaddress("lo"), "00:00:00:00:00:00")
- self.assertEqual(SysUtil._link_read_permaddress("fakeiface"), None)
- self.assertEqual(SysUtil._link_read_permaddress("morethansixteenchars"), None)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/linux-system-roles.network/tests/unit/test_nm_provider.py b/roles/linux-system-roles.network/tests/unit/test_nm_provider.py
deleted file mode 100644
index ed8563f..0000000
--- a/roles/linux-system-roles.network/tests/unit/test_nm_provider.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-""" Tests for network_connections Ansible module """
-# SPDX-License-Identifier: BSD-3-Clause
-
-import os
-import sys
-
-TESTS_BASEDIR = os.path.dirname(os.path.abspath(__file__))
-sys.path.insert(1, os.path.join(TESTS_BASEDIR, "../..", "library"))
-sys.path.insert(1, os.path.join(TESTS_BASEDIR, "../..", "module_utils"))
-
-try:
- from unittest import mock
-except ImportError: # py2
- import mock
-
-sys.modules["ansible"] = mock.Mock()
-sys.modules["ansible.module_utils.basic"] = mock.Mock()
-sys.modules["ansible.module_utils"] = mock.Mock()
-sys.modules["ansible.module_utils.network_lsr"] = __import__("network_lsr")
-
-with mock.patch.dict("sys.modules", {"gi": mock.Mock(), "gi.repository": mock.Mock()}):
- # pylint: disable=import-error, wrong-import-position
- from network_lsr import nm_provider
-
-
-def test_get_nm_ethtool_feature():
- """ Test get_nm_ethtool_feature() """
- with mock.patch.object(nm_provider.Util, "NM") as nm_mock:
- nm_feature = nm_provider.get_nm_ethtool_feature("esp_hw_offload")
- assert nm_feature == nm_mock.return_value.ETHTOOL_OPTNAME_FEATURE_ESP_HW_OFFLOAD
-
-
-def test_get_nm_ethtool_coalesce():
- """ Test get_nm_ethtool_coalesce() """
- with mock.patch.object(nm_provider.Util, "NM") as nm_mock:
- nm_feature = nm_provider.get_nm_ethtool_coalesce("rx_frames")
- assert nm_feature == nm_mock.return_value.ETHTOOL_OPTNAME_COALESCE_RX_FRAMES
diff --git a/roles/linux-system-roles.network/tox.ini b/roles/linux-system-roles.network/tox.ini
deleted file mode 100644
index 6ff26e7..0000000
--- a/roles/linux-system-roles.network/tox.ini
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: MIT
-[lsr_config]
-lsr_enable = true
-
-[lsr_yamllint]
-configfile = .yamllint.yml
-configbasename = .yamllint.yml
-
-[lsr_ansible-lint]
-configfile = .ansible-lint
-
-[testenv]
-setenv =
- RUN_PYLINT_EXCLUDE = ^(\..*|ensure_provider_tests\.py|print_all_options\.py)$
- RUN_PYTEST_SETUP_MODULE_UTILS = true
- RUN_PYLINT_SETUP_MODULE_UTILS = true
- RUN_PYTEST_EXTRA_ARGS = -v
- RUN_FLAKE8_EXTRA_ARGS = --exclude tests/ensure_provider_tests.py,scripts/print_all_options.py,tests/network/ensure_provider_tests.py,.svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg
- LSR_PUBLISH_COVERAGE = normal
-
-[testenv:shellcheck]
-commands = bash -c 'echo shellcheck is currently not enabled - please fix this'
diff --git a/roles/oatakan.ansible-role-ovirt/.travis.yml b/roles/oatakan.ansible-role-ovirt/.travis.yml
deleted file mode 100644
index 36bbf62..0000000
--- a/roles/oatakan.ansible-role-ovirt/.travis.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-language: python
-python: "2.7"
-
-# Use the new container infrastructure
-sudo: false
-
-# Install ansible
-addons:
- apt:
- packages:
- - python-pip
-
-install:
- # Install ansible
- - pip install ansible
-
- # Check ansible version
- - ansible --version
-
- # Create ansible.cfg with correct roles_path
- - printf '[defaults]\nroles_path=../' >ansible.cfg
-
-script:
- # Basic role syntax check
- - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
-
-notifications:
- webhooks: https://galaxy.ansible.com/api/v1/notifications/
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/README.md b/roles/oatakan.ansible-role-ovirt/README.md
deleted file mode 100644
index 796c7e3..0000000
--- a/roles/oatakan.ansible-role-ovirt/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the ovirt module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/roles/oatakan.ansible-role-ovirt/defaults/main.yml b/roles/oatakan.ansible-role-ovirt/defaults/main.yml
deleted file mode 100644
index ee1404d..0000000
--- a/roles/oatakan.ansible-role-ovirt/defaults/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-
-role_action: provision
-
-ovirt:
- url: ''
- username: ''
- password: ''
-
-ansible_port: 22
-instance_wait_retry_limit: 600
-instance_wait_connection_timeout: 300
-
-ip_wait_retry_limit: 600
-
-wait_for_static_ip_assigned: yes
-
diff --git a/roles/oatakan.ansible-role-ovirt/meta/.galaxy_install_info b/roles/oatakan.ansible-role-ovirt/meta/.galaxy_install_info
deleted file mode 100644
index a037129..0000000
--- a/roles/oatakan.ansible-role-ovirt/meta/.galaxy_install_info
+++ /dev/null
@@ -1,2 +0,0 @@
-install_date: Fri Oct 15 18:59:19 2021
-version: ''
diff --git a/roles/oatakan.ansible-role-ovirt/meta/main.yml b/roles/oatakan.ansible-role-ovirt/meta/main.yml
deleted file mode 100644
index 15dc819..0000000
--- a/roles/oatakan.ansible-role-ovirt/meta/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-galaxy_info:
- author: Orcun Atakan
- description: Ansible galaxy role for creating virtual machines on ovirt/rhev
- company: Red Hat
-
- license: license (GPLv2, CC-BY, etc)
-
- min_ansible_version: 1.2
-
- platforms:
- - name: EL
- versions:
- - all
-
- categories:
- - all
-
- galaxy_tags:
- - ovirt
- - rhev
-
-collections:
- - ovirt.ovirt:==1.3.1
-
-dependencies: []
diff --git a/roles/oatakan.ansible-role-ovirt/tasks/deprovision.yml b/roles/oatakan.ansible-role-ovirt/tasks/deprovision.yml
deleted file mode 100644
index 477d815..0000000
--- a/roles/oatakan.ansible-role-ovirt/tasks/deprovision.yml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-
-- name: remove vms
- ovirt_vm:
- auth: "{{ ovirt_auth }}"
- name: "{{ item }}"
- cluster: "{{ providers.ovirt.cluster | default('Default') }}"
- storage_domain: "{{ item.storage_domain | default(omit) }}"
- state: absent
- wait: yes
- async: 7200
- poll: 0
- register: undeploy
- loop: "{{ ansible_play_hosts | intersect(nodes | map(attribute='name') | list) }}"
- when:
- - nodes is defined
- - hostvars[item].name is defined
-
-- name: wait for vms to be deleted
- async_status:
- jid: "{{ item.ansible_job_id }}"
- register: vm_remove
- until: vm_remove.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
- loop: "{{ undeploy.results }}"
- when:
- - nodes is defined
- - undeploy.results is defined
- - item.ansible_job_id is defined
-
-- name: delete additional disks
- ovirt_disk:
- auth: "{{ ovirt_auth }}"
- name: "{% if item.1.name_prefix | default(true) %}{{ item.0.name }}_{% endif %}{{ item.1.name }}"
- vm_name: "{{ item.0.name }}"
- storage_domain: "{{ item.1.storage_domain | default(omit) }}"
- state: absent
- wait: no
- async: 7200
- poll: 0
- register: delete_disks
- with_subelements:
- - "{{ nodes | json_query(query) }}"
- - disks
- - skip_missing: yes
- when:
- - nodes is defined
- - item.1 is defined
- - item.1.storage_domain is defined
- vars:
- query: "@[?contains(`{{ ansible_play_hosts }}`, name)]"
-
-- name: wait for disk deletion to complete
- async_status:
- jid: "{{ item.ansible_job_id }}"
- register: disks_deletion
- until: disks_deletion.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
- loop: "{{ delete_disks.results }}"
- when:
- - nodes is defined
- - delete_disks.results is defined
- - item.ansible_job_id is defined
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/tasks/main.yml b/roles/oatakan.ansible-role-ovirt/tasks/main.yml
deleted file mode 100644
index 8f71530..0000000
--- a/roles/oatakan.ansible-role-ovirt/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-
-- block:
- - name: obtain SSO token with using username/password credentials
- ovirt_auth:
- url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
- username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
- password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
- insecure: yes
-
- - include_tasks: provision.yml
- when: role_action == 'provision'
-
- - include_tasks: deprovision.yml
- run_once: yes
- when: role_action == 'deprovision'
- always:
- - name: revoke the SSO token
- ovirt_auth:
- url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
- username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
- password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
- insecure: yes
- ovirt_auth: "{{ ovirt_auth }}"
- state: absent
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/tasks/preflight_check.yml b/roles/oatakan.ansible-role-ovirt/tasks/preflight_check.yml
deleted file mode 100644
index 644057e..0000000
--- a/roles/oatakan.ansible-role-ovirt/tasks/preflight_check.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-
-- name: fail if cluster name is not specified
- fail:
- msg: "cluster name is not specified, please specify providers.ovirt.cluster"
- when: (providers.ovirt.cluster | default(None)) is undefined
-
-- name: get the datacenter name
- ovirt_datacenter_info:
- auth: "{{ ovirt_auth }}"
- pattern: "Clusters.name = {{ providers.ovirt.cluster }}"
- register: datacenter_info
-
-- name: fail if datacenter is not found
- fail:
- msg: "data center is not found"
- when: datacenter_info.ovirt_datacenters | length == 0
-
-- name: get storage information
- ovirt_storage_domain_info:
- auth: "{{ ovirt_auth }}"
- pattern: "datacenter={{ datacenter_info.ovirt_datacenters[0].name }}"
- register: storage_info
-
-- name: set data domain
- set_fact:
- disk_storage_domain: "{{ storage_info.ovirt_storage_domains|json_query(the_query)|list|first|default(None) }}"
- vars:
- the_query: "[?type=='data']"
-
-- name: set iso domain (deprecated as of oVirt/RHV 4.3)
- set_fact:
- iso_domain: "{{ storage_info.ovirt_storage_domains|json_query(the_query)|list|first|default(None) }}"
- vars:
- the_query: "[?type=='iso']"
-
-- include_tasks: template_check.yml
- loop: "{{ nodes }}"
- loop_control:
- loop_var: node
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/tasks/provision.yml b/roles/oatakan.ansible-role-ovirt/tasks/provision.yml
deleted file mode 100644
index 75ea7fb..0000000
--- a/roles/oatakan.ansible-role-ovirt/tasks/provision.yml
+++ /dev/null
@@ -1,183 +0,0 @@
----
-
-- include_tasks: preflight_check.yml
-
-- name: clone from template
- ovirt_vm:
- auth: "{{ ovirt_auth }}"
- name: "{{ item.name }}"
- template: "{{ item.template | default(omit) }}"
- cluster: "{{ providers.ovirt.cluster | default('Default') }}"
- state: present
- wait: yes
- memory: "{{ item.memory }}MiB"
- memory_max: "{{ ((item.memory_max | string) + 'MiB') if item.memory_max is defined else omit }}"
- memory_guaranteed: "{{ ((item.memory_guaranteed | string) + 'MiB') if item.memory_guaranteed is defined else omit }}"
- cpu_mode: "{{ item.cpu_mode | default(omit) }}"
- cpu_cores: "{{ item.cpu_cores | default(omit) }}"
- cpu_sockets: "{{ item.cpu }}"
- cpu_threads: "{{ item.cpu_threads | default(omit) }}"
- cd_iso: "{{ node_iso_file[item] | default(omit) }}"
- bios_type: "{{ item.bios_type | default(omit) }}"
- ballooning_enabled: "{{ item.ballooning_enabled | default(omit) }}"
- graphical_console: "{{ item.graphical_console | default(omit) }}"
- host: "{{ item.host | default(omit) }}"
- host_devices: "{{ item.host_devices | default(omit) }}"
- placement_policy: "{{ item.placement_policy | default(omit) }}"
- storage_domain: "{{ item.storage_domain | default(omit) }}"
- type: "{{ item.type | default('server') }}"
- high_availability: true
- nics:
- - name: nic1
- profile_name: "{{ item.networks[0].profile_name | default(item.networks[0].name) }}"
- network: "{{ item.networks[0].name }}"
- custom_properties: "{{ item.custom_properties | default(omit) }}"
- operating_system: "{{ item.operating_system | default(omit) }}"
- async: 7200
- poll: 0
- register: deploy
- loop: "{{ nodes }}"
- when:
- - nodes is defined
-
-- name: wait for instance creation to complete
- async_status:
- jid: "{{ item.ansible_job_id }}"
- register: deployed_instances
- until: deployed_instances.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
- no_log: true
- loop: "{{ deploy.results }}"
- when:
- - nodes is defined
- - deploy.results is defined
- - item.ansible_job_id is defined
-
-- name: create additional disks
- ovirt_disk:
- auth: "{{ ovirt_auth }}"
- name: "{% if item.1.name_prefix | default(true) %}{{ item.0.name }}_{% endif %}{{ item.1.name }}"
- vm_name: "{{ item.0.name }}"
- size: "{{ item.1.size | default(omit) }}"
- format: "{{ item.1.format | default(omit) }}"
- interface: "{{ item.1.interface | default(omit) }}"
- bootable: "{{ item.1.bootable | default(omit) }}"
- storage_domain: "{{ item.1.storage_domain | default(omit) }}"
- activate: yes
- state: present
- wait: yes
- async: 7200
- poll: 0
- register: create_disks
- with_subelements:
- - "{{ nodes }}"
- - disks
- - skip_missing: yes
- when:
- - nodes is defined
- - item.1 is defined
-
-- name: wait for disk creation to complete
- async_status:
- jid: "{{ item.ansible_job_id }}"
- register: disks_creation
- until: disks_creation.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
- loop: "{{ create_disks.results }}"
- when:
- - nodes is defined
- - create_disks.results is defined
- - item.ansible_job_id is defined
-
-- include_tasks: wait_for_disk_pre29.yml
- when: ansible_version.full is version('2.9', '<')
-
-- include_tasks: wait_for_disk.yml
- when: ansible_version.full is version('2.9', '>=')
-
-- name: linux - start and customize
- ovirt_vm:
- auth: "{{ ovirt_auth }}"
- name: "{{ item.name }}"
- state: running
- cloud_init:
- nic_boot_protocol: "{{ 'static' if item.networks[0].ip is defined and item.networks[0].netmask is defined and item.networks[0].gateway is defined else 'dhcp' }}"
- nic_ip_address: "{{ item.networks[0].ip | default('') }}"
- nic_netmask: "{{ item.networks[0].netmask | default('') }}"
- nic_gateway: "{{ item.networks[0].gateway | default('') }}"
- nic_name: "{{ item.networks[0].nic_name | default(item.networks[0].device_name) | default('eth0') }}"
- nic_on_boot: true
- host_name: "{{ item.name }}.{{ item.domain | default('') }}"
- dns_servers: "{{ (item.dns_servers | default([])) | join(' ') }}"
- custom_script: "{{ item.custom_script | default('') }}"
- user_name: "{{ item.user_name | default('') }}"
- root_password: "{{ item.root_password | default('') }}"
- async: 7200
- poll: 0
- register: deploy_linux
- loop: "{{ nodes }}"
- when:
- - nodes is defined
- - item.sysprep is not defined
-
-- name: windows - start and customize
- ovirt_vm:
- auth: "{{ ovirt_auth }}"
- name: "{{ item.name }}"
- state: running
- sysprep:
- custom_script: "{{ lookup('template', 'templates/unattended.xml.j2') }}"
- host_name: "{{ item.name | default('') }}"
- domain: "{{ item.domain | default('') }}"
- user_name: "{{ item.user_name | default(ansible_user) }}"
- root_password: "{{ item.root_password | default(ansible_password) }}"
- async: 7200
- poll: 0
- register: deploy_windows
- loop: "{{ nodes }}"
- when:
- - nodes is defined
- - item.sysprep is defined
-
-- name: combine deployment results
- set_fact:
- deploy_results: "{{ deploy_results|default([]) + [ item ] }}"
- loop: "{{ deploy_linux.results + deploy_windows.results }}"
- when:
- - nodes is defined
- - item.ansible_job_id is defined
-
-- name: wait for vms to be started
- async_status:
- jid: "{{ item.ansible_job_id }}"
- register: instances
- until: instances.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
- loop: "{{ deploy_results }}"
- when:
- - nodes is defined
- - deploy_results is defined
- - item.ansible_job_id is defined
-
-- name: assign tags to provisioned vms
- ovirt_tag:
- name: "{{ item.1 }}_{{ item.0.item.item[item.1] }}"
- vms: ["{{ item.0.item.item.name }}"]
- state: attached
- with_nested:
- - "{{ instances.results }}"
- - [ 'app_name', 'role' ]
- when:
- - nodes is defined
- - instances.results is defined
- - item.0.vm is defined
- - item.0.item.item[item.1] is defined
-
-- include_tasks: wait_for_ip_pre29.yml
- when: ansible_version.full is version('2.9', '<')
-
-- include_tasks: wait_for_ip.yml
- when: ansible_version.full is version('2.9', '>=')
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/tasks/template_check.yml b/roles/oatakan.ansible-role-ovirt/tasks/template_check.yml
deleted file mode 100644
index 6e839d5..0000000
--- a/roles/oatakan.ansible-role-ovirt/tasks/template_check.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-
-- name: fail if no template and disk specified
- fail:
- msg: "at least one disk must be specified when no template is used"
- when:
- - node.template is undefined
- - node.disks[0].size is undefined | default(False)
-
-- name: fail if both template and iso is specified
- fail:
- msg: "template and cd_iso are mutually exclusive, only define one of them"
- when:
- - node.template is defined
- - node.cd_iso is defined
-
-- block:
- - name: check if template exists
- ovirt_template_info:
- auth: "{{ ovirt_auth }}"
- pattern: "name={{ node.template }} and datacenter={{ datacenter_info.ovirt_datacenters[0].name }}"
- register: template_info
-
- - name: fail with message
- fail:
- msg: "template ({{ node.template }}) could not be found, make sure it exists"
- when: ( template_info.ovirt_templates | default([]) ) | length == 0
- when: node.template is defined
-
-- block:
- - name: check iso file on data domain
- ovirt_disk_info:
- auth: "{{ ovirt_auth }}"
- pattern: "name={{ node.cd_iso }}"
- register: ovirt_disk_main_iso
-
- - name: fail with message
- fail:
- msg: "iso file ({{ node.cd_iso }}) could not be found on the data domain and iso domain does not exists"
- when:
- - (ovirt_disk_main_iso.ovirt_disks[0].id | default(None)) is undefined
- - iso_domain is undefined or iso_domain|length == 0
- when: node.cd_iso is defined
-
-- name: set iso file
- set_fact:
- node_iso_file: '{{ node_iso_file | default({}) | combine({node.name: ovirt_disk_main_iso.ovirt_disks[0].id | default(node.cd_iso) | default(None)}) }}'
- when: (node_iso_file[node.name] | default(None)) is undefined
-
-- name: set os type
- set_fact:
- nodes_os_type: '{{ nodes_os_type | default({}) | combine({node.name: node.os_type | default(template_info.ovirt_templates[0].os.type) | default(None)}) }}'
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/tasks/wait_for_disk.yml b/roles/oatakan.ansible-role-ovirt/tasks/wait_for_disk.yml
deleted file mode 100644
index 5c4e220..0000000
--- a/roles/oatakan.ansible-role-ovirt/tasks/wait_for_disk.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-- name: wait until the image is unlocked by the oVirt engine
- ovirt_disk_info:
- auth: "{{ ovirt_auth }}"
- pattern: "name={% if item.1.name_prefix | default(true) %}{{ item.0.name }}_{% endif %}{{ item.1.name }}"
- register: ovirt_disk_info
- until: (ovirt_disk_info.ovirt_disks is defined) and (ovirt_disk_info.ovirt_disks | length > 0) and (ovirt_disk_info.ovirt_disks[0].status != "locked")
- retries: 10
- delay: 3
- with_subelements:
- - "{{ nodes }}"
- - disks
- - skip_missing: yes
- when:
- - nodes is defined
- - disks_creation.results is defined
- - item.1 is defined
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/tasks/wait_for_disk_pre29.yml b/roles/oatakan.ansible-role-ovirt/tasks/wait_for_disk_pre29.yml
deleted file mode 100644
index a47bcfc..0000000
--- a/roles/oatakan.ansible-role-ovirt/tasks/wait_for_disk_pre29.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-
-- name: wait until the image is unlocked by the oVirt engine (<2.9)
- ovirt_disk_facts:
- auth: "{{ ovirt_auth }}"
- pattern: "name={% if item.1.name_prefix | default(true) %}{{ item.0.name }}_{% endif %}{{ item.1.name }}"
- until: (ovirt_disks is defined) and (ovirt_disks | length > 0) and (ovirt_disks[0].status != "locked")
- retries: 10
- delay: 3
- with_subelements:
- - "{{ nodes }}"
- - disks
- - skip_missing: yes
- when:
- - nodes is defined
- - disks_creation.results is defined
- - item.1 is defined
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/tasks/wait_for_ip.yml b/roles/oatakan.ansible-role-ovirt/tasks/wait_for_ip.yml
deleted file mode 100644
index 9c445bc..0000000
--- a/roles/oatakan.ansible-role-ovirt/tasks/wait_for_ip.yml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-
-- name: waiting for ip address to be assigned
- ovirt_nic_info:
- auth: "{{ ovirt_auth }}"
- vm: "{{ item.vm.name }}"
- name: nic1
- fetch_nested: yes
- nested_attributes:
- - ips
- register: nics
- until:
- - nics.ovirt_nics | length > 0
- - nics.ovirt_nics[0].reported_devices | length > 0
- - reported_ipv4_addresses | length
- retries: 300
- delay: 10
- loop: "{{ instances.results }}"
- when:
- - nodes is defined
- - instances.results is defined
- - item.vm is defined
- vars:
- reported_ipv4_addresses: "{{ (nics.ovirt_nics[0].reported_devices | json_query('[*].ips[?version==`v4`].address') | flatten) | default([]) }}"
-
-- name: waiting for servers to come online on predefined ip
- wait_for:
- host: "{{ item.networks[0].ip }}"
- port: "{{ item.ansible_port | default(ansible_port) | default('22') }}"
- loop: "{{ nodes }}"
- when:
- - nodes is defined
- - item.networks is defined
- - item.networks[0].ip is defined
-
-- name: waiting for servers to come online on dhcp ip
- wait_for:
- host: "{{ (item.ovirt_nics[0].reported_devices | json_query('[*].ips[?version==`v4`].address'))[0][0] | default('') }}"
- port: "{{ item.item.item.item.ansible_port | default(ansible_port) | default('22') }}"
- loop: "{{ nics.results }}"
- when:
- - nodes is defined
- - nics.results is defined
- - item.ovirt_nics is defined
- - item.item.item.item.networks is defined
- - item.item.item.item.networks[0].ip is not defined
-
-- name: waiting for ovirt to show the predefined ip
- ovirt_nic_info:
- auth: "{{ ovirt_auth }}"
- vm: "{{ item.name }}"
- name: nic1
- fetch_nested: yes
- nested_attributes:
- - ips
- register: nics
- until:
- - nics.ovirt_nics | length > 0
- - nics.ovirt_nics[0].reported_devices | length > 0
- - item.networks[0].ip in reported_ipv4_addresses
- retries: "{{ ip_wait_retry_limit }}"
- delay: 10
- loop: "{{ nodes }}"
- when:
- - wait_for_static_ip_assigned|bool
- - nodes is defined
- - item.networks | length > 0
- - item.networks[0].ip is defined
- vars:
- reported_ipv4_addresses: "{{ (nics.ovirt_nics[0].reported_devices | json_query('[*].ips[?version==`v4`].address') | flatten) | default([]) }}"
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/tasks/wait_for_ip_pre29.yml b/roles/oatakan.ansible-role-ovirt/tasks/wait_for_ip_pre29.yml
deleted file mode 100644
index 2f2ee18..0000000
--- a/roles/oatakan.ansible-role-ovirt/tasks/wait_for_ip_pre29.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-
-- name: waiting for ip address to be assigned (<2.9)
- ovirt_nic_facts:
- auth: "{{ ovirt_auth }}"
- vm: "{{ item.vm.name }}"
- name: nic1
- register: nics
- until:
- - nics.ansible_facts.ovirt_nics | length > 0
- - nics.ansible_facts.ovirt_nics[0].reported_devices | length > 0
- - reported_ipv4_addresses | length
- retries: 300
- delay: 10
- loop: "{{ instances.results }}"
- when:
- - nodes is defined
- - instances.results is defined
- - item.vm is defined
- vars:
- reported_ipv4_addresses: "{{ (nics.ovirt_nics[0].reported_devices | json_query('[*].ips[?version==`v4`].address') | flatten) | default([]) }}"
-
-- name: waiting for servers to come online on predefined ip
- wait_for:
- host: "{{ item.networks[0].ip }}"
- port: "{{ item.ansible_port | default(ansible_port) | default('22') }}"
- loop: "{{ nodes }}"
- when:
- - nodes is defined
- - item.networks is defined
- - item.networks[0].ip is defined
-
-- name: waiting for servers to come online on dhcp ip (<2.9)
- wait_for:
- host: "{{ (item.ansible_facts.ovirt_nics[0].reported_devices[0].ips | json_query('[?version==`v4`].address'))[0] }}"
- port: "{{ item.item.item.item.ansible_port | default(ansible_port) | default('22') }}"
- loop: "{{ nics.results }}"
- when:
- - nodes is defined
- - nics.results is defined
- - item.ansible_facts is defined
- - item.item.item.item.networks is defined
- - item.item.item.item.networks[0].ip is not defined
-
-- name: waiting for ovirt to show the predefined ip (<2.9)
- ovirt_nic_facts:
- auth: "{{ ovirt_auth }}"
- vm: "{{ item.name }}"
- name: nic1
- fetch_nested: yes
- nested_attributes:
- - ips
- register: nics
- until:
- - nics.ansible_facts.ovirt_nics | length > 0
- - nics.ansible_facts.ovirt_nics[0].reported_devices | length > 0
- - item.networks[0].ip in reported_ipv4_addresses
- retries: "{{ ip_wait_retry_limit }}"
- delay: 10
- loop: "{{ nodes }}"
- when:
- - wait_for_static_ip_assigned|bool
- - nodes is defined
- - item.networks | length > 0
- - item.networks[0].ip is defined
- vars:
- reported_ipv4_addresses: "{{ (nics.ovirt_nics[0].reported_devices | json_query('[*].ips[?version==`v4`].address') | flatten) | default([]) }}"
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/templates/cloud_init.yml.j2 b/roles/oatakan.ansible-role-ovirt/templates/cloud_init.yml.j2
deleted file mode 100644
index cbcf082..0000000
--- a/roles/oatakan.ansible-role-ovirt/templates/cloud_init.yml.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-cloud_init:
-{% if item.networks[0].ip is defined and item.networks[0].netmask is defined and item.networks[0].gateway is defined %}
- nic_boot_protocol: static
- nic_ip_address: "{{ item.networks[0].ip }}"
- nic_netmask: "{{ item.networks[0].netmask }}"
- nic_gateway: "{{ item.networks[0].gateway }}"
-{% else %}
- nic_boot_protocol: dhcp
-{% endif %}
- nic_name: {{ item.networks[0].nic_name | default(item.networks[0].device_name) | default('eth0') }}
- host_name: "{{ item.name }}.{{ item.domain | default('') }}"
-{% if item.dns_servers is defined %}
- dns_servers: "{{ item.dns_servers|join(' ') }}"
-{% endif %}
-{% if item.user_name is defined %}
- user_name: "{{ item.user_name }}"
-{% endif %}
-{% if item.root_password is defined %}
- root_password: "{{ item.root_password }}"
-{% endif %}
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/templates/unattended.xml.j2 b/roles/oatakan.ansible-role-ovirt/templates/unattended.xml.j2
deleted file mode 100644
index 593412d..0000000
--- a/roles/oatakan.ansible-role-ovirt/templates/unattended.xml.j2
+++ /dev/null
@@ -1,226 +0,0 @@
-
-
-
-
-
-
- {{ ansible_password | b64encode | b64decode }}
- true
-
-
-
-
- {{ ansible_password | b64encode | b64decode }}
- true
-
- {{ item.user_name | default('vagrant') }} User
- {{ item.user_name | default('vagrant') }}
- Administrators
- {{ item.user_name | default('vagrant') }}
-
-
-
-
- true
- true
- Home
- 1
-{% if not '2008' in (windows_distro_name | default(item.template)) %}
- true
- true
- true
-{% endif %}
- true
- true
-
-
-
- {{ ansible_password | b64encode | b64decode }}
- true
-
- {{ item.user_name | default('vagrant') }}
- true
-
-
-
- cmd.exe /c powershell -Command "Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Force"
- Set Execution Policy 64 Bit
- 1
- true
-
-{% if not '2008' in (windows_distro_name | default(item.template)) %}
-
- cmd.exe /c powershell -Command "Set-NetConnectionProfile -NetworkCategory Private"
- Set network connection profile to private
- 2
- true
-
-
- cmd.exe /c winrm quickconfig -q
- winrm quickconfig -q
- 4
- true
-
-
- cmd.exe /c winrm quickconfig -transport:http
- winrm quickconfig -transport:http
- 5
- true
-
-
- cmd.exe /c winrm set winrm/config @{MaxTimeoutms="1800000"}
- Win RM MaxTimoutms
- 6
- true
-
-
- cmd.exe /c winrm set winrm/config/winrs @{MaxMemoryPerShellMB="800"}
- Win RM MaxMemoryPerShellMB
- 7
- true
-
-
- cmd.exe /c winrm set winrm/config/service @{AllowUnencrypted="true"}
- Win RM AllowUnencrypted
- 8
- true
-
-
- cmd.exe /c winrm set winrm/config/service/auth @{Basic="true"}
- Win RM auth Basic
- 9
- true
-
-
- cmd.exe /c winrm set winrm/config/client/auth @{Basic="true"}
- Win RM client auth Basic
- 10
- true
-
-
- cmd.exe /c winrm set winrm/config/listener?Address=*+Transport=HTTP @{Port="5985"}
- Win RM listener Address/Port
- 11
- true
-
-
- cmd.exe /c netsh firewall add portopening TCP 5985 "Port 5985"
- Win RM port open
- 12
- true
-
-
- cmd.exe /c net stop winrm
- Stop Win RM Service
- 13
- true
-
-
- cmd.exe /c sc config winrm start= auto
- Win RM Autostart
- 14
- true
-
-
- cmd.exe /c net start winrm
- Start Win RM Service
- 15
- true
-
-{% else %}
-
- cmd.exe /c reg add "HKLM\System\CurrentControlSet\Control\Network\NewNetworkWindowOff" /f
- Network prompt
- 2
- true
-
-
- cmd.exe /c powershell -Command "{{ set_network_to_private }}"
- Set network connection profile to private
- 3
- true
-
-{% endif %}
-
- cmd.exe /c powershell -Command "& $([scriptblock]::Create((New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'))) -ForceNewSSLCert -EnableCredSSP"
- Enable winrm
- 20
- true
-
-
- cmd.exe /c powershell -Command "Enable-WSManCredSSP -Role Server -Force"
- Enable winrm server role
- 21
- true
-
-
- cmd.exe /c powershell -Command "Set-Item -Path 'WSMan:\localhost\Service\Auth\CredSSP' -Value $true"
- Enable credssp authentication
- 22
- true
-
-
- cmd.exe /c powershell -Command "Resize-Partition -DriveLetter C -Size (Get-PartitionSupportedSize -DriveLetter C).Sizemax -ErrorAction SilentlyContinue"
- Extend OS disk
- 23
- true
-
-{% if item.networks is defined and item.networks[0].ip is defined and item.networks[0].gateway is defined and item.networks[0].netmask is defined %}
-{% if not '2008' in (windows_distro_name | default(item.template)) %}
-
- cmd.exe /c powershell -Command "New-NetIPAddress –IPAddress {{ item.networks[0].ip }} -DefaultGateway {{ item.networks[0].gateway }} -PrefixLength {{ (item.networks[0].ip + '/' + item.networks[0].netmask) | ipaddr('prefix') }} -InterfaceIndex (Get-NetAdapter | Where-Object { ($_.Name -like '*Ethernet*') -and ($_.Status -like 'Up') })[0].InterfaceIndex"
- Set static ip
- 50
- true
-
-{% else %}
-
- cmd.exe /c netsh int ipv4 set address "Local Area connection" static {{ item.networks[0].ip }} {{ item.networks[0].netmask }} {{ item.networks[0].gateway }}
- Set static ip
- 50
- true
-
-{% endif %}
-{% if item.networks[0].dns_servers is defined %}
-{% if not '2008' in (windows_distro_name | default(item.template)) %}
-
- cmd.exe /c powershell -Command "Set-DNSClientServerAddress –InterfaceIndex (Get-NetAdapter | Where-Object { ($_.Name -like '*Ethernet*') -and ($_.Status -like 'Up') })[0].InterfaceIndex –ServerAddresses {{ item.networks[0].dns_servers|join(',') }}"
- Set static ip
- 51
- true
-
-{% else %}
-
- cmd.exe /c netsh int ipv4 set dns "Local Area connection" static {{ item.networks[0].dns_servers[0] }}
- Set static ip
- 51
- true
-
-{% endif %}
-{% endif %}
-{% endif %}
-
- false
-
-
-
-
-
- false
-
-
- {{ item.name }}
- Central Standard Time
-
-
-
- true
-
-
-
- true
-
-
-
-
-
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/tests/inventory b/roles/oatakan.ansible-role-ovirt/tests/inventory
deleted file mode 100644
index d18580b..0000000
--- a/roles/oatakan.ansible-role-ovirt/tests/inventory
+++ /dev/null
@@ -1 +0,0 @@
-localhost
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/tests/test.yml b/roles/oatakan.ansible-role-ovirt/tests/test.yml
deleted file mode 100644
index 5ec89eb..0000000
--- a/roles/oatakan.ansible-role-ovirt/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- remote_user: root
- roles:
- - ansible-role-ovirt
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-ovirt/vars/main.yml b/roles/oatakan.ansible-role-ovirt/vars/main.yml
deleted file mode 100644
index 646a4fb..0000000
--- a/roles/oatakan.ansible-role-ovirt/vars/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-
-set_network_to_private: "([Activator]::CreateInstance([Type]::GetTypeFromCLSID([Guid]'{DCB00C01-570F-4A9B-8D69-199FDBA5723B}'))).GetNetworkConnections() | % {$_.GetNetwork().SetCategory(1)}"
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-windows-ad-controller/README.md b/roles/oatakan.ansible-role-windows-ad-controller/README.md
deleted file mode 100644
index b19fadc..0000000
--- a/roles/oatakan.ansible-role-windows-ad-controller/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-# ansible-role-windows-ad-controller
-Ansible role to deploy Windows AD Controller
diff --git a/roles/oatakan.ansible-role-windows-ad-controller/defaults/main.yml b/roles/oatakan.ansible-role-windows-ad-controller/defaults/main.yml
deleted file mode 100644
index 3d7836e..0000000
--- a/roles/oatakan.ansible-role-windows-ad-controller/defaults/main.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-
-dns_domain_name: "example.com"
-domain_admin_password: "sX{88h:_P#G:]TC#"
-domain_admin_username: Admin
-
-users_password: "PiP@ssw0rd14"
-ad_users:
- - name: user1
- username: user1
- email: user1
- - name: user2
- username: user2
- email: user2
-
-ad_groups:
- - name: Ansible Users
- scope: global
-
-child_ous:
- - name: Company OU
- description: Test organization
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-windows-ad-controller/meta/.galaxy_install_info b/roles/oatakan.ansible-role-windows-ad-controller/meta/.galaxy_install_info
deleted file mode 100644
index 4048434..0000000
--- a/roles/oatakan.ansible-role-windows-ad-controller/meta/.galaxy_install_info
+++ /dev/null
@@ -1,2 +0,0 @@
-install_date: Fri Oct 15 18:59:14 2021
-version: ''
diff --git a/roles/oatakan.ansible-role-windows-ad-controller/meta/main.yml b/roles/oatakan.ansible-role-windows-ad-controller/meta/main.yml
deleted file mode 100644
index b8024b5..0000000
--- a/roles/oatakan.ansible-role-windows-ad-controller/meta/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-galaxy_info:
- author: Orcun Atakan
- description: Ansible galaxy role for installing Windows AD Controller
- company: Red Hat
-
- license: license (GPLv2, CC-BY, etc)
-
- min_ansible_version: 1.2
-
- platforms:
- - name: Windows
- versions:
- - all
-
- categories:
- - all
-
- galaxy_tags:
- - windows
- - active directory
- - ad
-
-dependencies: []
diff --git a/roles/oatakan.ansible-role-windows-ad-controller/tasks/main.yml b/roles/oatakan.ansible-role-windows-ad-controller/tasks/main.yml
deleted file mode 100644
index e4f517e..0000000
--- a/roles/oatakan.ansible-role-windows-ad-controller/tasks/main.yml
+++ /dev/null
@@ -1,132 +0,0 @@
----
-
-- name: ensure required powershell module is present
- win_psmodule:
- name: xActiveDirectory
- state: present
-
-- name: enable windows features
- win_dsc:
- resource_name: WindowsFeature
- Name: "{{ item }}"
- IncludeAllSubFeature: True
- Ensure: Present
- register: install_ad
- ignore_errors: yes
- loop:
- - AD-Domain-Services
-
-- name: reboot if needed
- win_reboot:
-# when: item.reboot_required
-# loop: "{{ install_ad.results }}"
-# run_once: yes
-
-- name: add a new domain
- win_dsc:
- resource_name: xADDomain
- DomainName: "{{ dns_domain_name }}"
- DomainAdministratorCredential_username: "{{ domain_admin_username }}@{{ dns_domain_name }}"
- DomainAdministratorCredential_password: "{{ domain_admin_password }}"
- SafemodeAdministratorPassword_username: "{{ domain_admin_username }}@{{ dns_domain_name }}"
- SafemodeAdministratorPassword_password: "{{ domain_admin_password }}"
- register: add_domain
- ignore_errors: yes
-
-- name: set parent dn
- set_fact:
- parent_dn: "DC={{ dns_domain_name.split('.') | join(',DC=') }}"
-
-- name: reboot if needed
- win_reboot:
- when: add_domain.reboot_required
-
-- name: wait for AD domain
- win_dsc:
- resource_name: xWaitForADDomain
- DomainName: "{{ dns_domain_name }}"
-
-- name: adjust password policy
- win_dsc:
- resource_name: xADDomainDefaultPasswordPolicy
- DomainName: "{{ dns_domain_name }}"
- ComplexityEnabled: False
- MinPasswordLength: 8
- PasswordHistoryCount: 10
-
-- name: add child OU
- win_dsc:
- resource_name: xADOrganizationalUnit
- Name: "{{ item.name }}"
- Path: "{{ parent_dn }}"
- Description: "{{ item.description }}"
- Ensure: Present
- register: child_ou
- loop: "{{ child_ous }}"
-
-- name: add groups
- win_dsc:
- resource_name: xADGroup
- GroupName: "{{ item.name }}"
- GroupScope: "{{ item.scope }}"
- Ensure: Present
- loop: "{{ ad_groups }}"
-
-- name: add domain admin user
- win_dsc:
- resource_name: xADUser
- UserName: "{{ domain_admin_username }}"
- UserPrincipalName: "{{ domain_admin_username }}@{{ dns_domain_name }}"
- Password_username: "{{ domain_admin_username }}"
- Password_password: "{{ domain_admin_password }}"
- DomainName: "{{ dns_domain_name }}"
- Enabled: True
- GivenName: "{{ domain_admin_username }}"
- Surname: user
- Company: AnsibleByRedHat
- EmailAddress: "{{ domain_admin_username }}@{{ dns_domain_name }}"
- PasswordNeverExpires: True
- Ensure: Present
- ignore_errors: yes
-
-- name: add admin user to Domain Admins group
- win_dsc:
- resource_name: xADGroup
- GroupName: Domain Admins
- MembersToInclude: "{{ domain_admin_username }}"
- ignore_errors: yes
-
-- name: add domain users
- win_dsc:
- resource_name: xADUser
- UserName: "{{ item.username }}"
- UserPrincipalName: "{{ item.username }}@{{ dns_domain_name }}"
- Password_username: "{{ item.username }}"
- Password_password: "{{ users_password }}"
- DomainName: "{{ dns_domain_name }}"
- DomainAdministratorCredential_username: "{{ domain_admin_username }}@{{ dns_domain_name }}"
- DomainAdministratorCredential_password: "{{ domain_admin_password }}"
- Enabled: True
- GivenName: "{{ item.name }}"
- Surname: user
- Company: AnsibleByRedHat
- EmailAddress: "{{ item.username }}@{{ dns_domain_name }}"
- Ensure: Present
- loop: "{{ ad_users }}"
- ignore_errors: yes
-
-- name: add domain users to groups
- win_dsc:
- resource_name: xADGroup
- GroupName: "{{ item }}"
- MembersToInclude: "{{ ad_users | map(attribute='username') | list }}"
- loop:
- - Ansible Users
- - Remote Desktop Users
-
-- name: ensure registry service is running
- win_dsc:
- resource_name: Service
- Name: TermService
- StartupType: Automatic
- State: Running
\ No newline at end of file
diff --git a/roles/oatakan.ansible-role-windows-ad-controller/tests/inventory b/roles/oatakan.ansible-role-windows-ad-controller/tests/inventory
deleted file mode 100644
index 878877b..0000000
--- a/roles/oatakan.ansible-role-windows-ad-controller/tests/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
-localhost
-
diff --git a/roles/oatakan.ansible-role-windows-ad-controller/tests/test.yml b/roles/oatakan.ansible-role-windows-ad-controller/tests/test.yml
deleted file mode 100644
index d4f353e..0000000
--- a/roles/oatakan.ansible-role-windows-ad-controller/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- remote_user: root
- roles:
- - windows-ad-controller
\ No newline at end of file
diff --git a/roles/oatakan.rhel_ovirt_template/README.md b/roles/oatakan.rhel_ovirt_template/README.md
index 14fa258..a7e198c 100644
--- a/roles/oatakan.rhel_ovirt_template/README.md
+++ b/roles/oatakan.rhel_ovirt_template/README.md
@@ -39,12 +39,13 @@ Example Playbook
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+ ---
# import ovirt.ovirt collections
- name: create a ovirt rhel template
hosts: all
- gather_facts: False
+ gather_facts: false
connection: local
- become: no
+ become: false
vars:
template_force: yes #overwrite existing template with the same name
export_ovf: no # export the template to export domain upon creation
@@ -75,6 +76,18 @@ Including an example of how to use your role (for instance, with variables passe
roles:
- oatakan.rhel_ovirt_template
+ ---
+ # import ovirt.ovirt collections
+ - name: delete a ovirt rhel template
+ hosts: all
+ gather_facts: false
+ connection: local
+ become: false
+
+ roles:
+ - role: oatakan.rhel_ovirt_template
+ role_action: deprovision
+
License
-------
diff --git a/roles/oatakan.rhel_ovirt_template/defaults/main.yml b/roles/oatakan.rhel_ovirt_template/defaults/main.yml
index b0fb3fa..a5455d0 100644
--- a/roles/oatakan.rhel_ovirt_template/defaults/main.yml
+++ b/roles/oatakan.rhel_ovirt_template/defaults/main.yml
@@ -1,12 +1,14 @@
---
+role_action: provision #provision or deprovision
+
install_updates: yes
instance_wait_retry_limit: 300
instance_wait_connection_timeout: 600
# this will remove existing template with the same name
-template_force: no
-template_found: no
+template_force: false
+template_found: false
export_ovf: no
@@ -22,13 +24,13 @@ qemu_second_cdrom_device_bus_type: ide
qemu_second_cdrom_device_bus_id: 3
qemu_second_cdrom_device_bus_unit: 0
-local_administrator_password: Chang3MyP@ssw0rd21
+local_administrator_password: ''
local_account_username: ansible
-local_account_password: Chang3MyP@ssw0rd21
+local_account_password: ''
-distro_name: rhel8
-iso_file_name: CentOS-8.4.2105-x86_64-dvd1.iso
-linux_ks_folder: rhel8
+distro_name: rhel9
+iso_file_name: CentOS-Stream-9-latest-x86_64-dvd1.iso
+linux_ks_folder: rhel9
template_vm_name: centos84-x64-v1
template_vm_root_disk_size: 10
@@ -36,7 +38,7 @@ template_vm_root_disk_format: cow
template_vm_root_disk_interface: virtio
template_vm_memory: 4096
template_vm_cpu: 2
-template_vm_guest_id: rhel_7x64
+template_vm_guest_id: rhel_8x64
template_vm_efi: no
template_vm_network_name: ovirtmgmt
template_vm_ip_address: 192.168.10.96
@@ -51,6 +53,8 @@ template_convert_timeout: 600
template_convert_seal: no
template_selinux_enabled: no
+permit_root_login_with_password: true
+
ovirt_datacenter: mydatacenter
ovirt_cluster: production
ovirt_folder: template
@@ -64,4 +68,7 @@ os_short_names:
guest_id: rhel_7x64
rhel8:
ks_folder: rhel8
+ guest_id: rhel_8x64
+ rhel9:
+ ks_folder: rhel9
guest_id: rhel_8x64
\ No newline at end of file
diff --git a/roles/oatakan.rhel_ovirt_template/meta/.galaxy_install_info b/roles/oatakan.rhel_ovirt_template/meta/.galaxy_install_info
index 6f0d050..2e815ff 100644
--- a/roles/oatakan.rhel_ovirt_template/meta/.galaxy_install_info
+++ b/roles/oatakan.rhel_ovirt_template/meta/.galaxy_install_info
@@ -1,2 +1,2 @@
-install_date: Fri Oct 15 18:59:21 2021
+install_date: Thu 08 Feb 2024 08:54:05 PM
version: master
diff --git a/roles/oatakan.rhel_ovirt_template/tasks/main.yml b/roles/oatakan.rhel_ovirt_template/tasks/main.yml
index 347abb1..cf662f6 100644
--- a/roles/oatakan.rhel_ovirt_template/tasks/main.yml
+++ b/roles/oatakan.rhel_ovirt_template/tasks/main.yml
@@ -19,120 +19,8 @@
- include_tasks: remove_template.yml
when:
- - template_force|bool
+ - remove_template
- template_found|bool
-- block:
- - include_tasks: make_iso.yml
-
- - include_tasks: provision_vm.yml
-
- - name: refresh inventory
- meta: refresh_inventory
-
- - name: clear gathered facts
- meta: clear_facts
-
- - name: clear any host errors
- meta: clear_host_errors
-
- - name: add host
- add_host:
- hostname: template_vm
- ansible_host: '{{ template_vm_ip_address }}'
- host_key_checking: false
- ansible_user: "{{ local_account_username }}"
- ansible_password: "{{ local_account_password }}"
- ansible_port: "{{ vm_ansible_port | default('22') }}"
- ansible_ssh_common_args: '-o UserKnownHostsFile=/dev/null'
- ansible_python_interpreter: auto
-
- - name: run setup module
- setup:
- delegate_to: template_vm
- connection: ssh
-
- - block:
- - include_role:
- name: oatakan.rhn
- apply:
- delegate_to: template_vm
- connection: ssh
- become: yes
-
- - include_role:
- name: oatakan.rhel_upgrade
- apply:
- delegate_to: template_vm
- connection: ssh
- become: yes
- when: install_updates|bool
-
- - include_role:
- name: oatakan.rhel_template_build
- apply:
- delegate_to: template_vm
- connection: ssh
- become: yes
- vars:
- target_ovirt: yes
-
- always:
- - include_role:
- name: oatakan.rhn
- apply:
- delegate_to: template_vm
- connection: ssh
- become: yes
- vars:
- role_action: unregister
-
- - name: force handlers to run before stoppping the vm
- meta: flush_handlers
-
- - name: refresh SSO credentials
- ovirt.ovirt.ovirt_auth:
- url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
- username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
- password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
- insecure: yes
-
- - include_tasks: stop_vm.yml
-
- - include_tasks: convert_to_template.yml
-
- - include_tasks: export_ovf.yml
- when: export_ovf|bool
-
- rescue:
- - name: refresh SSO credentials
- ovirt.ovirt.ovirt_auth:
- url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
- username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
- password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
- insecure: yes
-
- - include_tasks: remove_template.yml
- when: remove_vm_on_error|bool
-
- always:
- - name: refresh SSO credentials
- ovirt.ovirt.ovirt_auth:
- url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
- username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
- password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
- insecure: yes
-
- - include_tasks: remove_vm.yml
-
- - include_tasks: datastore_iso_remove.yml
-
- - name: remove temporary directory
- file:
- path: "{{ temp_directory }}"
- state: absent
-
- - name: logout from oVirt
- ovirt.ovirt.ovirt_auth:
- state: absent
- ovirt_auth: "{{ ovirt_auth }}"
\ No newline at end of file
+- include_tasks: provision.yml
+ when: role_action == 'provision'
\ No newline at end of file
diff --git a/roles/oatakan.rhel_ovirt_template/tasks/preflight_check.yml b/roles/oatakan.rhel_ovirt_template/tasks/preflight_check.yml
index 1c74465..1bbf77c 100644
--- a/roles/oatakan.rhel_ovirt_template/tasks/preflight_check.yml
+++ b/roles/oatakan.rhel_ovirt_template/tasks/preflight_check.yml
@@ -1,5 +1,9 @@
---
+- name: set template_found to false
+ set_fact:
+ template_found: false
+
- name: get the datacenter name
ovirt.ovirt.ovirt_datacenter_info:
auth: "{{ ovirt_auth }}"
@@ -35,36 +39,38 @@
register: template_info
- block:
- - name: set template_found to yes
+ - name: set template_found to true
set_fact:
- template_found: yes
+ template_found: true
- name: fail with message
fail:
msg: "Existing template found on ovirt/rhv: {{ template.name }}"
- when: not template_force|bool
+ when: not remove_template
when:
- template_info.ovirt_templates is defined
- template_info.ovirt_templates | length > 0
-- name: check iso file on data domain
- ovirt.ovirt.ovirt_disk_info:
- auth: "{{ ovirt_auth }}"
- pattern: "name={{ iso_file_name }}"
- register: ovirt_disk_main_iso
- when: iso_file_name is defined
+- block:
+ - name: check iso file on data domain
+ ovirt.ovirt.ovirt_disk_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ iso_file_name }}"
+ register: ovirt_disk_main_iso
+ when: iso_file_name is defined
-- name: set file id of the iso file
- set_fact:
- iso_file_id: "{{ ovirt_disk_main_iso.ovirt_disks[0].id }}"
- when:
- - ovirt_disk_main_iso.ovirt_disks | length > 0
- - ovirt_disk_main_iso.ovirt_disks[0].id is defined
- - ovirt_disk_main_iso.ovirt_disks[0].content_type == 'iso'
+ - name: set file id of the iso file
+ set_fact:
+ iso_file_id: "{{ ovirt_disk_main_iso.ovirt_disks[0].id }}"
+ when:
+ - ovirt_disk_main_iso.ovirt_disks | length > 0
+ - ovirt_disk_main_iso.ovirt_disks[0].id is defined
+ - ovirt_disk_main_iso.ovirt_disks[0].content_type == 'iso'
-- name: fail with message
- fail:
- msg: "iso file ({{ iso_file_name }}) could not be found on the data domain and iso domain does not exists"
- when:
- - iso_file_id is undefined
- - iso_domain is undefined or iso_domain|length == 0
\ No newline at end of file
+ - name: fail with message
+ fail:
+ msg: "iso file ({{ iso_file_name }}) could not be found on the data domain and iso domain does not exists"
+ when:
+ - iso_file_id is undefined
+ - iso_domain is undefined or iso_domain|length == 0
+ when: role_action == 'provision'
\ No newline at end of file
diff --git a/roles/oatakan.rhel_ovirt_template/tasks/preflight_check_pre29.yml b/roles/oatakan.rhel_ovirt_template/tasks/preflight_check_pre29.yml
index 05451a2..a09772e 100644
--- a/roles/oatakan.rhel_ovirt_template/tasks/preflight_check_pre29.yml
+++ b/roles/oatakan.rhel_ovirt_template/tasks/preflight_check_pre29.yml
@@ -1,5 +1,9 @@
---
+- name: set template_found to false
+ set_fact:
+ template_found: false
+
- name: get the datacenter name (<2.9)
ovirt_datacenter_facts:
auth: "{{ ovirt_auth }}"
@@ -33,35 +37,37 @@
- block:
- - name: set template_found to yes
+ - name: set template_found to true
set_fact:
- template_found: yes
+ template_found: true
- name: fail with message
fail:
msg: "Existing template found on ovirt/rhv: {{ template.name }}"
- when: not template_force|bool
+ when: not remove_template
when:
- ovirt_templates is defined
- ovirt_templates | length > 0
-- name: check iso file on data domain
- ovirt_disk_facts:
- auth: "{{ ovirt_auth }}"
- pattern: "name={{ iso_file_name }}"
- when: iso_file_name is defined
+- block:
+ - name: check iso file on data domain
+ ovirt_disk_facts:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ iso_file_name }}"
+ when: iso_file_name is defined
-- name: set file id of the iso file
- set_fact:
- iso_file_id: "{{ ovirt_disks[0].id }}"
- when:
- - ovirt_disks | length > 0
- - ovirt_disks[0].id is defined
- - ovirt_disks[0].content_type == 'iso'
+ - name: set file id of the iso file
+ set_fact:
+ iso_file_id: "{{ ovirt_disks[0].id }}"
+ when:
+ - ovirt_disks | length > 0
+ - ovirt_disks[0].id is defined
+ - ovirt_disks[0].content_type == 'iso'
-- name: fail with message
- fail:
- msg: "iso file ({{ template.name }}) could not be found on the data domain and iso domain does not exists"
- when:
- - iso_file_id is undefined
- - iso_domain is undefined or iso_domain|length == 0
\ No newline at end of file
+ - name: fail with message
+ fail:
+ msg: "iso file ({{ template.name }}) could not be found on the data domain and iso domain does not exists"
+ when:
+ - iso_file_id is undefined
+ - iso_domain is undefined or iso_domain|length == 0
+ when: role_action == 'provision'
\ No newline at end of file
diff --git a/roles/oatakan.rhel_ovirt_template/templates/rhel8/ks.cfg.j2 b/roles/oatakan.rhel_ovirt_template/templates/rhel8/ks.cfg.j2
index 48eb85a..c2aced3 100644
--- a/roles/oatakan.rhel_ovirt_template/templates/rhel8/ks.cfg.j2
+++ b/roles/oatakan.rhel_ovirt_template/templates/rhel8/ks.cfg.j2
@@ -13,7 +13,7 @@ network --bootproto=dhcp
network --hostname=localhost.localdomain
rootpw {{ local_administrator_password }}
-authconfig --enableshadow --passalgo=sha512
+authselect --enableshadow --passalgo=sha512
{% if template_selinux_enabled is undefined or not template_selinux_enabled %}
selinux --disabled
diff --git a/roles/oatakan.rhel_ovirt_template/vars/main.yml b/roles/oatakan.rhel_ovirt_template/vars/main.yml
index 37585c5..9d06b68 100644
--- a/roles/oatakan.rhel_ovirt_template/vars/main.yml
+++ b/roles/oatakan.rhel_ovirt_template/vars/main.yml
@@ -6,6 +6,8 @@ iso_file: "linux_{{ distro_name }}_ks{{ awx_job_id | default('') }}.iso"
export_dir: "{{ playbook_dir }}/{{ temp_directory }}"
+remove_template: "{{ true if (role_action == 'deprovision' or template_force|bool) else false }}"
+
providers:
ovirt:
datacenter: "{{ ovirt_datacenter }}"
@@ -39,8 +41,8 @@ template:
dns_servers: "{{ template_vm_dns_servers }}"
cd_iso: "{{ iso_file_id | default(iso_file_name) }}" # if using data domain, file name does not work, need to use id
-linux_ks_folder: "{{ os_short_names[(distro_name|default('rhel_80'))].ks_folder | default('rhel8') }}"
-template_vm_guest_id: "{{ os_short_names[(distro_name|default('rhel_80'))].guest_id | default('rhel_8x64') }}"
+linux_ks_folder: "{{ os_short_names[(distro_name|default('rhel8'))].ks_folder | default('rhel8') }}"
+template_vm_guest_id: "{{ os_short_names[(distro_name|default('rhel8'))].guest_id | default('rhel_8x64') }}"
qemu_cmdline_second_iso:
- -device
diff --git a/roles/oatakan.rhel_template_build/README.md b/roles/oatakan.rhel_template_build/README.md
index aee6ed1..979883b 100644
--- a/roles/oatakan.rhel_template_build/README.md
+++ b/roles/oatakan.rhel_template_build/README.md
@@ -1,2 +1,39 @@
# oatakan.rhel_template_build
Ansible role to configure RHEL/CentOS via Packer Ansible provisioner
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of roles that this role utilizes, you can add them into your projects roles/requirements.txt:
+
+- oatakan.linux_parallels_tools # only required if running on a parallels VM on a desktop
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - oatakan.rhel_template_build
+
+License
+-------
+
+MIT
+
+Author Information
+------------------
+
+Orcun Atakan
+
diff --git a/roles/oatakan.rhel_template_build/defaults/main.yml b/roles/oatakan.rhel_template_build/defaults/main.yml
index 37c53ca..c2b85e3 100644
--- a/roles/oatakan.rhel_template_build/defaults/main.yml
+++ b/roles/oatakan.rhel_template_build/defaults/main.yml
@@ -1,10 +1,9 @@
---
-target_vagrant: no
-target_ovirt: no
+target_vagrant: false
+target_ovirt: false
local_account_username: ansible
-local_account_password: Chang3MyP@ssw0rd21
ovirt_guest_agent_service_name: ovirt-guest-agent
qemu_guest_agent_service_name: qemu-guest-agent
@@ -15,5 +14,9 @@ ovirt_guest_agent_package_name:
qemu_guest_agent_package_name: qemu-guest-agent
+parallels_tools_role: oatakan.linux_parallels_tools
+
the_root_vgname: vg00
-the_root_lvname: root
\ No newline at end of file
+the_root_lvname: root
+
+permit_root_login_with_password: true
diff --git a/roles/oatakan.rhel_template_build/meta/.galaxy_install_info b/roles/oatakan.rhel_template_build/meta/.galaxy_install_info
index 2ad1198..52e18a3 100644
--- a/roles/oatakan.rhel_template_build/meta/.galaxy_install_info
+++ b/roles/oatakan.rhel_template_build/meta/.galaxy_install_info
@@ -1,2 +1,2 @@
-install_date: Fri Oct 15 18:59:20 2021
+install_date: Thu 08 Feb 2024 08:54:04 PM
version: master
diff --git a/roles/oatakan.rhel_template_build/meta/main.yml b/roles/oatakan.rhel_template_build/meta/main.yml
index c9e20cc..54557e6 100644
--- a/roles/oatakan.rhel_template_build/meta/main.yml
+++ b/roles/oatakan.rhel_template_build/meta/main.yml
@@ -1,20 +1,22 @@
---
-dependencies: []
-
galaxy_info:
- author: oatakan
- description: RedHat/CentOS template build.
+ author: Orcun Atakan
+ description: Ansible role to configure RHEL/CentOS, it can be used as part of an os build process.
role_name: rhel_template_build
- company: "Red Hat"
- license: "license (BSD, MIT)"
- min_ansible_version: 2.4
+ namespace: oatakan
+ company: Red Hat
+
+ license: MIT
+
+ min_ansible_version: '2.9'
+
platforms:
- name: EL
versions:
- - 6
- - 7
- - 8
+ - all
+
galaxy_tags:
+ - rhel
- cloud
- system
- packaging
diff --git a/roles/oatakan.rhel_template_build/tasks/cloud-init.yml b/roles/oatakan.rhel_template_build/tasks/cloud-init.yml
index 8f5ed0b..03cb0eb 100644
--- a/roles/oatakan.rhel_template_build/tasks/cloud-init.yml
+++ b/roles/oatakan.rhel_template_build/tasks/cloud-init.yml
@@ -1,21 +1,56 @@
---
- name: ensure cloud-init packages are installed
- package:
+ ansible.builtin.package:
name:
- cloud-init
- cloud-utils-growpart
- gdisk
-- block:
+- name: enable cloud-init related services
+ ansible.builtin.service:
+ name: '{{ item }}'
+ enabled: true
+ loop:
+ - cloud-init
+ - cloud-init-local
+ - cloud-config
+ - cloud-final
+ when: ansible_distribution_major_version|int >= 9
+
+- name: cloud-init config
+ block:
+ - name: check for cloud.cfg file
+ ansible.builtin.stat:
+ path: /etc/cloud/cloud.cfg
+ register: check_cloud_cfg
+
+ - name: ensure root login
+ block:
+ - name: ensure root login is enabled in cloud-init config
+ ansible.builtin.replace:
+ path: /etc/cloud/cloud.cfg
+ regexp: '^(disable_root\: ).*$'
+ replace: '\1false'
+
+ - name: ensure password login is enabled in cloud-init config
+ ansible.builtin.replace:
+ path: /etc/cloud/cloud.cfg
+ regexp: '^(ssh_pwauth\: ).*$'
+ replace: '\1true'
+ when: check_cloud_cfg.stat.exists
+ when: permit_root_login_with_password|bool
+
+- name: ensure script created
+ block:
- name: ensure cloud-init scripts directory exists
- file:
+ ansible.builtin.file:
path: /var/lib/cloud/scripts/per-instance
state: directory
mode: '0755'
- name: create growpart cloud-init script to grow partition on boot
- template:
+ ansible.builtin.template:
src: grow_part.sh.j2
dest: /var/lib/cloud/scripts/per-instance/grow_part.sh
mode: u=rwx,g=rx,o=rx
@@ -28,4 +63,4 @@
# - ansible_lvm.pvs is defined
# - ansible_cmdline is defined
# - ansible_cmdline.root is defined
-# - ansible_lvm.lvs[the_root_lvname].vg in ansible_cmdline.root
\ No newline at end of file
+# - ansible_lvm.lvs[the_root_lvname].vg in ansible_cmdline.root
diff --git a/roles/oatakan.rhel_template_build/tasks/grow_part.yml b/roles/oatakan.rhel_template_build/tasks/grow_part.yml
index e5d4ba7..3fb94c3 100644
--- a/roles/oatakan.rhel_template_build/tasks/grow_part.yml
+++ b/roles/oatakan.rhel_template_build/tasks/grow_part.yml
@@ -1,25 +1,26 @@
---
- name: ensure growpart packages are installed
- package:
+ ansible.builtin.package:
name:
- cloud-utils-growpart
- gdisk
- name: create growpart cloud-init script to grow partition on boot
- template:
+ ansible.builtin.template:
src: grow_part.sh.j2
dest: /usr/local/bin/grow_part.sh
mode: u=rwx,g=rx,o=rx
- name: copy growpart service
- copy:
+ ansible.builtin.copy:
src: growpart.service
dest: /etc/systemd/system/growpart.service
+ mode: '0755'
- name: reload service
- systemd:
+ ansible.builtin.systemd:
name: growpart
- daemon_reload: yes
- enabled: yes
- state: stopped
\ No newline at end of file
+ daemon_reload: true
+ enabled: true
+ state: stopped
diff --git a/roles/oatakan.rhel_template_build/tasks/main.yml b/roles/oatakan.rhel_template_build/tasks/main.yml
index 84fca08..80e4bd9 100644
--- a/roles/oatakan.rhel_template_build/tasks/main.yml
+++ b/roles/oatakan.rhel_template_build/tasks/main.yml
@@ -1,12 +1,12 @@
---
- name: Get the current kernel release.
- command: uname -r
+ ansible.builtin.command: uname -r
changed_when: false
register: kernel_release
- name: Ensure necessary packages are installed.
- yum:
+ ansible.builtin.yum:
name:
- wget
- perl
@@ -20,23 +20,38 @@
- cifs-utils
state: present
+- name: install dbus-tools on RHEL 8+
+ ansible.builtin.yum:
+ name:
+ - dbus-tools
+ state: present
+ when: ansible_distribution_major_version|int >= 8
+
- name: Ensure libselinux-python package is installed.
- yum:
+ ansible.builtin.yum:
name:
- libselinux-python
state: present
when: ansible_distribution_major_version|int < 8
-- name: Ensure python3-libselinux package is installed.
- yum:
- name:
- - python3-libselinux
- state: present
+- name: set python
+ block:
+ - name: Ensure python3 packages are installed.
+ ansible.builtin.yum:
+ name:
+ - python3
+ - python3-libselinux
+ state: present
+
+ - name: set python
+ community.general.alternatives:
+ name: python
+ path: /usr/bin/python3
when: ansible_distribution_major_version|int == 8
# Fix slow DNS.
- name: Fix slow DNS (adapted from Bento).
- lineinfile:
+ ansible.builtin.lineinfile:
dest: /etc/sysconfig/network
regexp: '^RES_OPTIONS'
line: 'RES_OPTIONS="single-request-reopen"'
@@ -44,31 +59,32 @@
# see https://fedoraproject.org/wiki/Changes/NetworkManager_keyfile_instead_of_ifcfg_rh
- name: ensure older style network config files for greater compatibility
- copy:
+ ansible.builtin.copy:
dest: /etc/NetworkManager/conf.d/99-main-plugins.conf
+ mode: '0644'
content: |
[main]
plugins=ifcfg-rh
when: ansible_distribution_major_version|int == 8
- name: Restart network service (explicitly).
- service:
+ ansible.builtin.service:
name: network
state: restarted
when: ansible_distribution_major_version|int < 8
- name: Restart NetworkManager service (explicitly).
- service:
+ ansible.builtin.service:
name: NetworkManager
state: restarted
when: ansible_distribution_major_version|int == 8
- name: Ensure we can still connect
- wait_for_connection:
+ ansible.builtin.wait_for_connection:
# SSH daemon configuration.
- name: Configure SSH daemon.
- lineinfile:
+ ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
@@ -79,60 +95,70 @@
# Local user SSH configuration.
- name: Configure local user .ssh directory.
- file:
+ ansible.builtin.file:
path: /home/{{ local_account_username }}/.ssh
state: directory
owner: "{{ local_account_username }}"
group: "{{ local_account_username }}"
- mode: 0700
+ mode: '0700'
- name: Get Vagrant's public key.
- get_url:
- url: https://github.com/mitchellh/vagrant/raw/master/keys/vagrant.pub
+ ansible.builtin.get_url:
+ url: https://github.com/hashicorp/vagrant/raw/master/keys/vagrant.pub
dest: /home/{{ local_account_username }}/.ssh/authorized_keys
owner: "{{ local_account_username }}"
group: "{{ local_account_username }}"
- mode: 0600
- ignore_errors: yes
+ mode: '0600'
+ ignore_errors: true
when: target_vagrant | bool
- name: autolabel on boot
- command: fixfiles onboot
- changed_when: False
+ ansible.builtin.command: fixfiles onboot
+ changed_when: false
-- include_tasks: cloud-init.yml
+- name: cloud init
+ ansible.builtin.include_tasks: cloud-init.yml
when: target_ovirt | bool
-- include_tasks: grow_part.yml
+- name: grow partition
+ ansible.builtin.include_tasks: grow_part.yml
when: not (target_ovirt | bool)
-- include_tasks: ovirt.yml
+- name: ovirt agent
+ ansible.builtin.include_tasks: ovirt.yml
when: target_ovirt | bool
# VirtualBox tools installation.
- name: Check if VirtualBox is running the guest VM.
- stat:
+ ansible.builtin.stat:
path: /home/{{ local_account_username }}/.vbox_version
register: virtualbox_check
-- include_tasks: virtualbox.yml
+- name: virtualbox guest additions
+ ansible.builtin.include_tasks: virtualbox.yml
when: virtualbox_check.stat.exists
# VMware tools installation.
- name: Check if VMWare is running the guest VM.
- shell: |
+ ansible.builtin.shell: |
set -o pipefail
cat /proc/scsi/scsi | grep VMware
changed_when: false
failed_when: false
register: vmware_check
-- include_tasks: vmware.yml
+- name: vmware tools
+ ansible.builtin.include_tasks: vmware.yml
when: vmware_check.rc == 0
+- name: parallels tools
+ ansible.builtin.include_role:
+ name: "{{ parallels_tools_role }}"
+ when: ('Parallels' in (ansible_product_name | default('', true))) or (ansible_product_name == None and 'Parallels' in ansible_interfaces[0].interface_name)
+
# Cleanup tasks.
- name: Remove unneeded packages.
- yum:
+ ansible.builtin.yum:
name:
- cpp
- kernel-devel
@@ -141,57 +167,64 @@
state: absent
- name: Clean up yum.
- command: yum clean all
- args:
- warn: no
+ ansible.builtin.command: yum clean all
changed_when: false
+ no_log: true
- name: Flag the system for re-configuration
- file:
+ ansible.builtin.file:
path: /.unconfigured
state: touch
+ mode: '0644'
- name: Reset hostname to localhost.localadmin
- copy:
+ ansible.builtin.copy:
content: 'localhost.localdomain'
dest: /etc/hostname
+ mode: '0644'
- name: Remove RedHat interface persistence (step 1).
- file:
+ ansible.builtin.file:
path: /etc/udev/rules.d/70-persistent-net.rules
state: absent
+- name: Ensure NetworkManager config file is removed
+ ansible.builtin.file:
+ path: /etc/NetworkManager/system-connections/{{ ansible_default_ipv4.interface | default('eth0') }}.nmconnection
+ state: absent
+
- name: Check for network config file
- stat:
+ ansible.builtin.stat:
path: /etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4.interface | default('eth0') }}
register: network_config_file
-- name: Remove RedHat interface persistence (step 2).
- lineinfile:
- dest: "{{ network_config_file.stat.path }}"
- regexp: "{{ item }}"
- state: absent
- loop:
- - '^HWADDR'
- - '^UUID'
- - '^IPADDR'
- - '^NETMASK'
- - '^GATEWAY'
- when: network_config_file.stat.exists
+- name: Remove interface
+ block:
+ - name: Remove RedHat interface persistence (step 2).
+ ansible.builtin.lineinfile:
+ dest: "{{ network_config_file.stat.path }}"
+ regexp: "{{ item }}"
+ state: absent
+ loop:
+ - '^HWADDR'
+ - '^UUID'
+ - '^IPADDR'
+ - '^NETMASK'
+ - '^GATEWAY'
-- name: Set interface to DHCP
- lineinfile:
- dest: "{{ network_config_file.stat.path }}"
- regexp: '^BOOTPROTO='
- line: BOOTPROTO=dhcp
+ - name: Set interface to DHCP
+ ansible.builtin.lineinfile:
+ dest: "{{ network_config_file.stat.path }}"
+ regexp: '^BOOTPROTO='
+ line: BOOTPROTO=dhcp
when: network_config_file.stat.exists
- name: Force logs to rotate (step 1)
- shell: /usr/sbin/logrotate -f /etc/logrotate.conf
+ ansible.builtin.command: /usr/sbin/logrotate -f /etc/logrotate.conf
changed_when: false
- name: Find any log files to delete
- find:
+ ansible.builtin.find:
paths: /var/log
patterns:
- "*-????????"
@@ -199,21 +232,21 @@
register: find_log_files
- name: Force logs to rotate (step 2)
- file:
+ ansible.builtin.file:
path: "{{ item.path }}"
state: absent
loop: "{{ find_log_files.files }}"
+ failed_when: false
- name: Clear audit log and wtmp (step 1)
- shell: cat /dev/null > /var/log/audit/audit.log
+ ansible.builtin.shell: cat /dev/null > /var/log/audit/audit.log
changed_when: false
- name: Clear audit log and wtmp (step 2)
- shell: cat /dev/null > /var/log/wtmp
+ ansible.builtin.shell: cat /dev/null > /var/log/wtmp
changed_when: false
- name: Remove ssh-host files
- command: rm -fr /etc/ssh/ssh_host_*
+ ansible.builtin.command: rm -fr /etc/ssh/ssh_host_*
changed_when: false
- args:
- warn: false
\ No newline at end of file
+ no_log: true
diff --git a/roles/oatakan.rhel_template_build/tasks/ovirt.yml b/roles/oatakan.rhel_template_build/tasks/ovirt.yml
index 9db925d..b67aa6a 100644
--- a/roles/oatakan.rhel_template_build/tasks/ovirt.yml
+++ b/roles/oatakan.rhel_template_build/tasks/ovirt.yml
@@ -1,48 +1,47 @@
---
-- name: import epel gpg key
- rpm_key:
- state: present
- key: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}
- when: ansible_distribution == 'CentOS'
+- name: epel
+ block:
+ - name: import epel gpg key
+ ansible.builtin.rpm_key:
+ state: present
+ key: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}
-- name: ensure epel is installed
- yum:
- name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
- state: present
- register: install_epel
- until: '"error" not in install_epel'
- retries: 5
- delay: 10
+ - name: ensure epel is installed
+ ansible.builtin.yum:
+ name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
+ state: present
+ register: install_epel
+ until: '"error" not in install_epel'
+ retries: 5
+ delay: 10
when: ansible_distribution == 'CentOS'
# rhevm-guest-agent-common package is not yet available for RHEL 8
- name: ensure ovirt guest agent package is installed
- package:
+ ansible.builtin.package:
name: "{{ ovirt_guest_agent_package_name[ansible_distribution] }}"
register: ovirt_package_installation
when: ansible_distribution_major_version|int < 8
-# try installing qemu package on RHEL/CentOS 8 for now
- name: ensure qemu guest agent package is installed
- package:
+ ansible.builtin.package:
name: "{{ qemu_guest_agent_package_name }}"
- when: ansible_distribution_major_version|int == 8
+ when: ansible_distribution_major_version|int >= 8
register: qemu_package_installation
- ignore_errors: yes
+ ignore_errors: true
-- name: ensure ovirt guest agent is enabled
- service:
- name: "{{ ovirt_guest_agent_service_name }}"
- enabled: yes
- when:
- - ansible_distribution_major_version|int < 8
- - ovirt_package_installation is succeeded
+- name: enable ovirt guest agent
+ block:
+ - name: ensure ovirt guest agent is enabled (RHEL < 8)
+ ansible.builtin.service:
+ name: "{{ ovirt_guest_agent_service_name }}"
+ enabled: true
+ when: ansible_distribution_major_version|int < 8
-- name: ensure qemu guest agent is enabled
- service:
- name: "{{ qemu_guest_agent_service_name }}"
- enabled: yes
- when:
- - ansible_distribution_major_version|int == 8
- - qemu_package_installation is succeeded
\ No newline at end of file
+ - name: ensure qemu guest agent is enabled (RHEL >= 8)
+ ansible.builtin.service:
+ name: "{{ qemu_guest_agent_service_name }}"
+ enabled: true
+ when: ansible_distribution_major_version|int >= 8
+ when: qemu_package_installation is succeeded
diff --git a/roles/oatakan.rhel_template_build/tasks/virtualbox.yml b/roles/oatakan.rhel_template_build/tasks/virtualbox.yml
index 390cba3..dbc57cd 100644
--- a/roles/oatakan.rhel_template_build/tasks/virtualbox.yml
+++ b/roles/oatakan.rhel_template_build/tasks/virtualbox.yml
@@ -1,34 +1,37 @@
---
- name: Get VirtualBox version.
- slurp:
+ ansible.builtin.slurp:
src: /home/{{ local_account_username }}/.vbox_version
register: get_virtualbox_version
- name: Set VirtualBox version.
- set_fact:
+ ansible.builtin.set_fact:
virtualbox_version: "{{ get_virtualbox_version['content'] | b64decode }}"
-- name: Mount VirtualBox guest additions ISO.
- mount:
- name: /tmp/vbox
- src: "/home/{{ local_account_username }}/VBoxGuestAdditions_{{ virtualbox_version }}.iso"
- opts: loop
- state: mounted
- fstype: iso9660
+- name: install virtualbox guest additions
+ block:
+ - name: Mount VirtualBox guest additions ISO.
+ ansible.posix.mount:
+ name: /tmp/vbox
+ src: "/home/{{ local_account_username }}/VBoxGuestAdditions_{{ virtualbox_version }}.iso"
+ opts: loop
+ state: mounted
+ fstype: iso9660
-- name: Run VirtualBox guest additions installation.
- shell: sh /tmp/vbox/VBoxLinuxAdditions.run
- changed_when: false
- failed_when: false
+ - name: Run VirtualBox guest additions installation.
+ ansible.builtin.shell: sh /tmp/vbox/VBoxLinuxAdditions.run
+ changed_when: false
+ failed_when: false
-- name: Unmount VirtualBox guest additions ISO.
- mount:
- name: /tmp/vbox
- src: "/home/{{ local_account_username }}/VBoxGuestAdditions_{{ virtualbox_version }}.iso"
- state: absent
- fstype: iso9660
+ always:
+ - name: Unmount VirtualBox guest additions ISO.
+ ansible.posix.mount:
+ name: /tmp/vbox
+ src: "/home/{{ local_account_username }}/VBoxGuestAdditions_{{ virtualbox_version }}.iso"
+ state: absent
+ fstype: iso9660
-- name: Delete VirtualBox guest additions ISO.
- file:
- path: "/home/{{ local_account_username }}/VBoxGuestAdditions_{{ virtualbox_version }}.iso"
- state: absent
+ - name: Delete VirtualBox guest additions ISO.
+ ansible.builtin.file:
+ path: "/home/{{ local_account_username }}/VBoxGuestAdditions_{{ virtualbox_version }}.iso"
+ state: absent
diff --git a/roles/oatakan.rhel_template_build/tasks/vmware-tools.yml b/roles/oatakan.rhel_template_build/tasks/vmware-tools.yml
index 15a44bf..1c6728b 100644
--- a/roles/oatakan.rhel_template_build/tasks/vmware-tools.yml
+++ b/roles/oatakan.rhel_template_build/tasks/vmware-tools.yml
@@ -1,65 +1,71 @@
---
- name: Add VMWare tools repository.
- template:
+ ansible.builtin.template:
src: vmware-tools.repo.j2
dest: /etc/yum.repos.d/vmware-tools.repo
+ mode: '0644'
- name: Import VMWare tools GPG keys.
- rpm_key:
+ ansible.builtin.rpm_key:
key: "https://packages.vmware.com/tools/keys/VMWARE-PACKAGING-GPG-RSA-KEY.pub"
state: present
-- name: Create temporary directories for VMware tools.
- file:
- path: "/tmp/{{ item }}"
- state: directory
- loop:
- - vmfusion
- - vmfusion-archive
+- name: Install vmware tools
+ block:
+ - name: Create temporary directories for VMware tools.
+ ansible.builtin.file:
+ path: "/tmp/{{ item }}"
+ state: directory
+ mode: '0755'
+ loop:
+ - vmfusion
+ - vmfusion-archive
-- name: Mount VMware tools.
- mount:
- name: /tmp/vmfusion
- src: /home/{{ local_account_username }}/linux.iso
- fstype: iso9660
- opts: loop
- state: mounted
+ - name: Mount VMware tools.
+ ansible.posix.mount:
+ name: /tmp/vmfusion
+ src: /home/{{ local_account_username }}/linux.iso
+ fstype: iso9660
+ opts: loop
+ state: mounted
-- name: Find any VMwareTools file.
- find:
- paths: /tmp/vmfusion
- patterns: "^VMwareTools-*.tar.gz"
- use_regex: yes
- register: vmware_tools_files
+ - name: Find any VMwareTools file.
+ ansible.builtin.find:
+ paths: /tmp/vmfusion
+ patterns: "^VMwareTools-*.tar.gz"
+ use_regex: true
+ register: vmware_tools_files
-- block:
- - name: Decompress VMware Tools installer into archive folder.
- unarchive:
- src: "{{ vmware_tools_files.files[0] }}"
- dest: /tmp/vmfusion-archive
- remote_src: yes
+ - name: run the installer
+ block:
+ - name: Decompress VMware Tools installer into archive folder.
+ ansible.builtin.unarchive:
+ src: "{{ vmware_tools_files.files[0] }}"
+ dest: /tmp/vmfusion-archive
+ remote_src: true
- - name: Run the VMware tools installer.
- shell: /tmp/vmfusion-archive/vmware-tools-distrib/vmware-install.pl --default
- changed_when: false
- when: vmware_tools_files.matched > 0
+ - name: Run the VMware tools installer.
+ ansible.builtin.shell: /tmp/vmfusion-archive/vmware-tools-distrib/vmware-install.pl --default
+ changed_when: false
+ when: vmware_tools_files.matched > 0
-- name: Unmount VMware tools.
- mount:
- name: /tmp/vmfusion
- src: /home/{{ local_account_username }}/linux.iso
- fstype: iso9660
- state: absent
+ always:
+ - name: Unmount VMware tools.
+ ansible.posix.mount:
+ name: /tmp/vmfusion
+ src: /home/{{ local_account_username }}/linux.iso
+ fstype: iso9660
+ state: absent
-- name: Remove temporary directories for VMware tools.
- file:
- path: "/tmp/{{ item }}"
- state: absent
- loop:
- - vmfusion
- - vmfusion-archive
+ - name: Remove temporary directories for VMware tools.
+ ansible.builtin.file:
+ path: "/tmp/{{ item }}"
+ state: absent
+ loop:
+ - vmfusion
+ - vmfusion-archive
-- name: Delete VMware Tools.
- file:
- path: /home/{{ local_account_username }}/linux.iso
- state: absent
\ No newline at end of file
+ - name: Delete VMware Tools.
+ ansible.builtin.file:
+ path: /home/{{ local_account_username }}/linux.iso
+ state: absent
diff --git a/roles/oatakan.rhel_template_build/tasks/vmware.yml b/roles/oatakan.rhel_template_build/tasks/vmware.yml
index a646260..0409137 100644
--- a/roles/oatakan.rhel_template_build/tasks/vmware.yml
+++ b/roles/oatakan.rhel_template_build/tasks/vmware.yml
@@ -1,16 +1,17 @@
---
- name: Add vmhgfs module (RHEL 6).
- template:
+ ansible.builtin.template:
src: vmhgfs.modules.j2
dest: /etc/sysconfig/modules/vmhgfs.modules
- mode: 0755
+ mode: '0755'
when: ansible_distribution_major_version|int <= 6
- name: Install open-vm-tools.
- yum:
+ ansible.builtin.yum:
name: open-vm-tools
state: present
when: ansible_distribution_major_version|int >= 7
-- include_tasks: vmware-tools.yml
+- name: vmware tools installation
+ ansible.builtin.include_tasks: vmware-tools.yml
when: ansible_distribution_major_version|int <= 6
diff --git a/roles/oatakan.rhel_template_build/templates/grow_part.sh.j2 b/roles/oatakan.rhel_template_build/templates/grow_part.sh.j2
index b74e452..bd2003a 100644
--- a/roles/oatakan.rhel_template_build/templates/grow_part.sh.j2
+++ b/roles/oatakan.rhel_template_build/templates/grow_part.sh.j2
@@ -2,9 +2,19 @@
the_root_vgname='{{ ansible_lvm.lvs[the_root_lvname].vg | default('vg00') }}'
the_root_lvname='{{ the_root_lvname | default('root') }}'
+
+error=$(vgdisplay $the_root_vgname 2>&1 >/dev/null)
+device_name=$(echo "$error" | grep -o '/dev/[^[:space:]]*')
+
+if [[ -n "$device_name" ]]; then
+ # need to remove the lvmdevice and add it back
+ lvmdevices --yes --deldev $device_name
+ lvmdevices --yes --adddev $device_name
+fi
+
the_root_pvname=$(vgdisplay -v $the_root_vgname 2> /dev/null | awk '/PV Name/ {print $3}')
the_root_pv_partnum=$(echo $the_root_pvname | grep -o '[0-9]$')
-the_root_pv_device=$(echo $the_root_pvname | grep -o '.*[^0-9]')
+the_root_pv_device="/dev/$(lsblk -ndo PKNAME $the_root_pvname)"
the_root_mount_point=$(lsblk -l -o NAME,MOUNTPOINT | grep $the_root_vgname-$the_root_lvname | awk '{print $2}')
/usr/bin/growpart $the_root_pv_device $the_root_pv_partnum
diff --git a/roles/oatakan.windows_ovirt_guest_agent/.travis.yml b/roles/oatakan.windows_ovirt_guest_agent/.travis.yml
deleted file mode 100644
index 36bbf62..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/.travis.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-language: python
-python: "2.7"
-
-# Use the new container infrastructure
-sudo: false
-
-# Install ansible
-addons:
- apt:
- packages:
- - python-pip
-
-install:
- # Install ansible
- - pip install ansible
-
- # Check ansible version
- - ansible --version
-
- # Create ansible.cfg with correct roles_path
- - printf '[defaults]\nroles_path=../' >ansible.cfg
-
-script:
- # Basic role syntax check
- - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
-
-notifications:
- webhooks: https://galaxy.ansible.com/api/v1/notifications/
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_guest_agent/LICENSE b/roles/oatakan.windows_ovirt_guest_agent/LICENSE
deleted file mode 100644
index dc1b6e7..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2019 Orcun Atakan
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/roles/oatakan.windows_ovirt_guest_agent/README.md b/roles/oatakan.windows_ovirt_guest_agent/README.md
deleted file mode 100644
index 70effcd..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# ansible-role-windows_ovirt_guest_agent
-This repo contains an Ansible role that installs ovirt agent for Windows images.
-
-Role Name
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of roles that this role utilizes:
-
-- oatakan.windows_virtio
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - oatakan.windows_ovirt_guest_agent
-
-License
--------
-
-MIT
-
-Author Information
-------------------
-
-Orcun Atakan
-
diff --git a/roles/oatakan.windows_ovirt_guest_agent/defaults/main.yml b/roles/oatakan.windows_ovirt_guest_agent/defaults/main.yml
deleted file mode 100644
index 38d5370..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/defaults/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-
-ovirt_package: ovirt-guest-agent
-ovirt_guest_agent_service_name: ovirt-guest-agent
-ovirt_win_iso_url: https://resources.ovirt.org/pub/ovirt-4.3/iso/oVirt-toolsSetup/4.3-3/el7/oVirt-toolsSetup-4.3-3.el7.iso
-ovirt_win_iso_name: oVirt-toolsSetup.iso
-
-virtio_role: oatakan.windows_virtio
-
-windows_service_status_code:
- 1: Stopped
- 4: Running
- 9: Not Installed
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_guest_agent/handlers/main.yml b/roles/oatakan.windows_ovirt_guest_agent/handlers/main.yml
deleted file mode 100644
index 66dcf2f..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/handlers/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-- name: restart Ovirt Guest Agent
- service: name={{ ovirt_guest_agent_service_name }} state=restarted
-
-- name: enabled Ovirt Guest Agent
- service: name={{ ovirt_guest_agent_service_name }} enabled=yes
diff --git a/roles/oatakan.windows_ovirt_guest_agent/meta/.galaxy_install_info b/roles/oatakan.windows_ovirt_guest_agent/meta/.galaxy_install_info
deleted file mode 100644
index e68ade5..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/meta/.galaxy_install_info
+++ /dev/null
@@ -1,2 +0,0 @@
-install_date: Fri Oct 15 18:59:15 2021
-version: master
diff --git a/roles/oatakan.windows_ovirt_guest_agent/meta/main.yml b/roles/oatakan.windows_ovirt_guest_agent/meta/main.yml
deleted file mode 100644
index c1cd130..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/meta/main.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-galaxy_info:
- author: Orcun Atakan
- description: Ansible galaxy role for installing ovirt agent on Windows images.
- role_name: windows_ovirt_guest_agent
- company: Red Hat
-
- license: MIT
-
- min_ansible_version: 2.5
-
- platforms:
- - name: Windows
- versions:
- - all
-
- cloud_platforms:
- - ovirt
-
- galaxy_tags:
- - windows
- - ovirt
- - rhev
- - rhv
- - cloud
- - multicloud
-
-dependencies: []
diff --git a/roles/oatakan.windows_ovirt_guest_agent/tasks/Linux.yml b/roles/oatakan.windows_ovirt_guest_agent/tasks/Linux.yml
deleted file mode 100644
index df364b1..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/tasks/Linux.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-
-- name: install Ovirt Guest Agent
- become: true
- package:
- name: "{{ item }}"
- with_items: "{{ ovirt_package }}"
- notify:
- - enabled Ovirt Guest Agent
- - restart Ovirt Guest Agent
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_guest_agent/tasks/Windows.yml b/roles/oatakan.windows_ovirt_guest_agent/tasks/Windows.yml
deleted file mode 100644
index 3ce671d..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/tasks/Windows.yml
+++ /dev/null
@@ -1,68 +0,0 @@
----
-
-- name: "{{ ansible_distribution | lower }} | import virtio role"
- import_role:
- name: "{{ virtio_role }}"
- vars:
- virtio_win_iso_url: "{{ ovirt_win_iso_url }}"
- virtio_win_iso_name: "{{ ovirt_win_iso_name }}"
- virtio_win_ovirt: true
- virtio_win_iso_path: ''
-
-- debug:
- var: virtio_win_iso_path
-
-#- name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent"
-# win_dsc:
-# resource_name: Package
-# Path: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe'
-# ProductId: '{9B265631-958D-415B-9925-53DEEC43E31D}'
-# Name: QEMU guest agent
-# Arguments: >
-# /S
-
-- block:
- - name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent"
- win_shell: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe /S'
- args:
- executable: cmd
- creates: "{{ ansible_env['ProgramFiles(x86)'] }}\\oVirt Guest Tools"
- async: 1000
- poll: 0
-
- rescue:
- - name: "{{ ansible_distribution | lower }} | install Ovirt Guest Agent"
- win_shell: '{{ virtio_win_iso_path }}\ovirt-guest-tools-setup.exe /S'
- args:
- executable: cmd
- creates: "{{ ansible_env['ProgramFiles(x86)'] }}\\oVirt Guest Tools"
-
-- name: "{{ ansible_distribution | lower }} | wait for system to be online"
- wait_for_connection:
- connect_timeout: 10
- sleep: 5
- delay: 90
- timeout: 300
-
-- name: "{{ ansible_distribution | lower }} | get service information"
- win_shell: Get-Service OVirtGuestService | ConvertTo-Json
- register: register_service_info
- ignore_errors: yes
-
-- name: "{{ ansible_distribution | lower }} | set fact from service info"
- set_fact:
- ovirt_guest_agent_service_status: "{{ register_service_info.stdout | default('DEFAULT') | from_json }}"
- when: register_service_info is success
- ignore_errors: yes
-
-- name: "{{ ansible_distribution | lower }} | set fact as not installed"
- set_fact:
- ovirt_guest_agent_service_status:
- DisplayName: 'ovirt-guest-agent'
- Status: 9
- when: register_service_info is undefined
-
-- name: "{{ ansible_distribution | lower }} | service display status"
- debug:
- msg: "{{ ovirt_guest_agent_service_status['DisplayName'] }} is {{ windows_service_status_code[ovirt_guest_agent_service_status['Status']] | lower }}"
- when: ovirt_guest_agent_service_status is defined
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_guest_agent/tasks/main.yml b/roles/oatakan.windows_ovirt_guest_agent/tasks/main.yml
deleted file mode 100644
index 569774b..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/tasks/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-- name: include distribution task
- include_tasks: "{{ ansible_os_family }}.yml"
-
-- name: force all notified handlers to run here
- meta: flush_handlers
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_guest_agent/tests/inventory b/roles/oatakan.windows_ovirt_guest_agent/tests/inventory
deleted file mode 100644
index d18580b..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/tests/inventory
+++ /dev/null
@@ -1 +0,0 @@
-localhost
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_guest_agent/tests/test.yml b/roles/oatakan.windows_ovirt_guest_agent/tests/test.yml
deleted file mode 100644
index d17df2e..0000000
--- a/roles/oatakan.windows_ovirt_guest_agent/tests/test.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- hosts: localhost
- remote_user: Administrator
- vars:
- ansible_port: 5986
- ansible_connection: winrm
- ansible_winrm_transport: credssp
- ansible_winrm_server_cert_validation: ignore
- roles:
- - oatakan.windows_ovirt_guest_agent
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/.travis.yml b/roles/oatakan.windows_ovirt_template/.travis.yml
deleted file mode 100644
index 36bbf62..0000000
--- a/roles/oatakan.windows_ovirt_template/.travis.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-language: python
-python: "2.7"
-
-# Use the new container infrastructure
-sudo: false
-
-# Install ansible
-addons:
- apt:
- packages:
- - python-pip
-
-install:
- # Install ansible
- - pip install ansible
-
- # Check ansible version
- - ansible --version
-
- # Create ansible.cfg with correct roles_path
- - printf '[defaults]\nroles_path=../' >ansible.cfg
-
-script:
- # Basic role syntax check
- - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
-
-notifications:
- webhooks: https://galaxy.ansible.com/api/v1/notifications/
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/LICENSE b/roles/oatakan.windows_ovirt_template/LICENSE
deleted file mode 100644
index 8a6444e..0000000
--- a/roles/oatakan.windows_ovirt_template/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2020 Orcun Atakan
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/README.md b/roles/oatakan.windows_ovirt_template/README.md
deleted file mode 100644
index 9d68ecc..0000000
--- a/roles/oatakan.windows_ovirt_template/README.md
+++ /dev/null
@@ -1,98 +0,0 @@
-# windows_ovirt_template
-This repo contains an Ansible role that builds a Windows VM template from an ISO file on Ovirt/RHV.
-You can run this role as a part of CI/CD pipeline for building Windows templates on Ovirt/RHV from an ISO file.
-
-> **_Note:_** This role is provided as an example only. Do not use this in production. You can fork/clone and add/remove steps for your environment based on your organization's security and operational requirements.
-
-Requirements
-------------
-
-You need to have the following packages installed on your ansible control machine:
-
-- mkisofs
-
-You need to enable qemu_cmdline hook on your RHV/Ovirt environment, this is required to enable attaching multiple iso files. Follow the instructions documented here:
-
-https://www.ovirt.org/develop/developer-guide/vdsm/hook/qemucmdline.html
-
-Before you can use this role, you need to make sure you have Windows install media iso file uploaded to a iso domain on your RHV/Ovirt environment.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-Import ovirt.ovirt collections.
-
-A list of roles that this role utilizes, make sure to call this out in requirements.yml file under roles directory or download manually:
-
-- oatakan.windows_template_build
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- # import ovirt.ovirt collections
- - name: create a ovirt windows template
- hosts: all
- gather_facts: False
- connection: local
- become: no
- vars:
- template_force: yes #overwrite existing template with the same name
- export_ovf: no # export the template to export domain upon creation
- local_account_password: ''
- local_administrator_password: ''
- distro_name: win2019 # this needs to be one of the standard values see 'os_short_names' var
- template_vm_name: win2019_template
- template_vm_root_disk_size: 30
- template_vm_guest_id: windows_2019x64
- template_vm_memory: 4096
- template_vm_efi: false # you need to install efi file to use this, false should be fine in most cases
- iso_file_name: '' # name of the iso file
- iso_image_index: '' # put index number here from the order inside the iso, for example 1 - standard, 2 - core etc
- iso_product_key: ''
- vm_ansible_port: 5986
- vm_ansible_winrm_transport: credssp
- vm_upgrade_powershell: false # only needed for 2008 R2
- install_updates: false # it will take longer to build with the updates, set to true if you want the updates
-
- ovirt_datacenter: '' # name of the datacenter
- ovirt_cluster: '' # name of the cluster
- ovirt_data_domain: '' # name of the data domain
- ovirt_export_domain: '' # name of the iso domain
- ovirt_iso_domain: '' # this is deprecated as of 4.3 you can omit if not used
-
- template_vm_network_name: ovirtmgmt
- template_vm_ip_address: 192.168.10.95 # static ip is required
- template_vm_netmask: 255.255.255.0
- template_vm_gateway: 192.168.10.254
- template_vm_domain: example.com
- template_vm_dns_servers:
- - 8.8.4.4
- - 8.8.8.8
-
- roles:
- - oatakan.windows_ovirt_template
-
-For disconnected environments, you can overwrite this variable to point to a local copy of a script to enable winrm:
-
-**winrm_enable_script_url:** https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1
-
-you can also localize virtio-win and update the virtio_iso_url variable to point to your local url:
-
-**virtio_iso_url:** https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.173-2/virtio-win.iso
-
-License
--------
-
-MIT
-
-Author Information
-------------------
-
-Orcun Atakan
diff --git a/roles/oatakan.windows_ovirt_template/defaults/main.yml b/roles/oatakan.windows_ovirt_template/defaults/main.yml
deleted file mode 100644
index 7be489c..0000000
--- a/roles/oatakan.windows_ovirt_template/defaults/main.yml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-
-install_updates: yes
-instance_wait_retry_limit: 300
-instance_wait_connection_timeout: 400
-instance_stop_retry_limit: 60
-
-# this will remove existing template with the same name
-template_force: no
-template_found: no
-
-export_ovf: no
-
-enable_auto_logon: yes
-
-remove_vm_on_error: yes
-vm_failed: no
-
-custom_efi_enabled: no
-custom_efi_path: /usr/share/edk2.git/ovmf-x64/OVMF_CODE-pure-efi.fd
-
-qemu_second_cdrom_device_bus_type: ide
-qemu_second_cdrom_device_bus_id: 3 # tested with Q35 chipset, if using I440FX chipset, try setting this to 1
-qemu_second_cdrom_device_bus_unit: 0 # tested with Q35 chipset, if using I440FX chipset, try setting this to 1
-
-virtio_iso_url: https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.173-2/virtio-win.iso
-winrm_enable_script_url: https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1
-
-set_network_to_private: '([Activator]::CreateInstance([Type]::GetTypeFromCLSID([Guid]"{DCB00C01-570F-4A9B-8D69-199FDBA5723B}"))).GetNetworkConnections() | % {$_.GetNetwork().SetCategory(1)}'
-
-windows_build_role: oatakan.windows_template_build
-
-local_administrator_password: Chang3MyP@ssw0rd21
-local_account_username: ansible
-local_account_password: Chang3MyP@ssw0rd21
-
-distro_name: win2019
-iso_file_name: 17763.253.190108-0006.rs5_release_svc_refresh_SERVER_EVAL_x64FRE_en-us.iso
-windows_sysprep_template_folder: windows_server
-
-vm_ansible_port: 5986
-vm_ansible_winrm_transport: credssp
-vm_upgrade_powershell: no
-
-template_vm_name: windows-2019-standard-core-auto
-template_vm_root_disk_size: 30
-template_vm_root_disk_format: cow
-template_vm_root_disk_interface: virtio
-template_vm_memory: 4096
-template_vm_cpu: 2
-template_vm_guest_id: windows_2019x64
-template_vm_efi: no
-template_vm_network_name: ovirtmgmt
-template_vm_ip_address: 192.168.10.95
-template_vm_netmask: 255.255.255.0
-template_vm_gateway: 192.168.10.254
-template_vm_domain: home.ad
-template_vm_dns_servers:
- - 192.168.1.254
- - 8.8.8.8
-
-template_convert_timeout: 900
-template_convert_seal: no
-template_timezone: 'GMT Standard Time'
-
-ovirt_datacenter: mydatacenter
-ovirt_cluster: production
-ovirt_data_domain: data_domain
-ovirt_export_domain: export_domain
-ovirt_iso_domain: iso_domain
-
-os_short_names:
- win2008: 2k8R2
- win2012: 2k12R2
- win2016: 2k16
- win2019: 2k19
- win2022: 2k19 # 2k22 is not *yet* available on virtio iso
- win10: w10
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/meta/.galaxy_install_info b/roles/oatakan.windows_ovirt_template/meta/.galaxy_install_info
deleted file mode 100644
index cd1267d..0000000
--- a/roles/oatakan.windows_ovirt_template/meta/.galaxy_install_info
+++ /dev/null
@@ -1,2 +0,0 @@
-install_date: Fri Oct 15 18:59:12 2021
-version: ''
diff --git a/roles/oatakan.windows_ovirt_template/meta/main.yml b/roles/oatakan.windows_ovirt_template/meta/main.yml
deleted file mode 100644
index a62662a..0000000
--- a/roles/oatakan.windows_ovirt_template/meta/main.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-galaxy_info:
- author: Orcun Atakan
- description: Ansible galaxy role for building a Windows VM templates from an ISO file on Ovirt/RHV.
- role_name: windows_ovirt_template
- company: Red Hat
-
- license: MIT
-
- min_ansible_version: 2.5
-
- platforms:
- - name: Windows
- versions:
- - all
-
- cloud_platforms:
- - oVirt
-
- galaxy_tags:
- - windows
- - ovirt
- - rhv
- - cloud
- - multicloud
- - template
-
-dependencies: []
diff --git a/roles/oatakan.windows_ovirt_template/tasks/convert_to_template.yml b/roles/oatakan.windows_ovirt_template/tasks/convert_to_template.yml
deleted file mode 100644
index b184c16..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/convert_to_template.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: convert to template
- ovirt.ovirt.ovirt_template:
- auth: "{{ ovirt_auth }}"
- name: "{{ template.name }}"
- vm: "{{ template.name }}"
- cluster: "{{ providers.ovirt.cluster }}"
- timeout: "{{ template_convert_timeout }}"
- seal: "{{ template_convert_seal }}"
- when: template is defined
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/datastore_iso_remove.yml b/roles/oatakan.windows_ovirt_template/tasks/datastore_iso_remove.yml
deleted file mode 100644
index f42f425..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/datastore_iso_remove.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-
-- block:
- - name: remove iso file from data_domain
- ovirt.ovirt.ovirt_disk:
- auth: "{{ ovirt_auth }}"
- name: "{{ iso_file }}"
- storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"
- state: absent
- rescue:
- - include_tasks: wait_iso_disk_unlock_pre29.yml
- when: ansible_version.full is version('2.9', '<')
-
- - include_tasks: wait_iso_disk_unlock.yml
- when: ansible_version.full is version('2.9', '>=')
-
- - name: remove iso file from data_domain
- ovirt.ovirt.ovirt_disk:
- auth: "{{ ovirt_auth }}"
- name: "{{ iso_file }}"
- storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"
- state: absent
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/datastore_upload.yml b/roles/oatakan.windows_ovirt_template/tasks/datastore_upload.yml
deleted file mode 100644
index 5dbde3c..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/datastore_upload.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-
-- name: validate file
- stat:
- path: "{{ playbook_dir }}/{{ temp_directory }}/windows_{{ distro_name }}_autounattend_autogen.iso"
- get_checksum: no
- register: iso_file_check
-
-- name: upload iso file to data_domain
- ovirt.ovirt.ovirt_disk:
- auth: "{{ ovirt_auth }}"
- name: "{{ iso_file }}"
- upload_image_path: "{{ iso_file_check.stat.path }}"
- storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"
- size: "{{ (iso_file_check.stat.size/1024/1024)|round(0, 'ceil')|int|string }}MiB"
- wait: true
- bootable: true
- format: raw
- content_type: iso
- force: yes
- register: disk_iso_file
- when: iso_file_check.stat.exists
-
-- name: set iso file disk id
- set_fact:
- ks_iso_file_disk_id: "{{ disk_iso_file.disk.id }}"
- ks_iso_file_image_id: "{{ disk_iso_file.disk.image_id }}"
- ovirt_datacenter_id: "{{ disk_iso_file.disk.quota.href | regex_replace('^/ovirt-engine/api/datacenters/(.*)/quotas.*$', '\\1') }}"
- ovirt_datastore_id: "{{ disk_iso_file.disk.storage_domains[0].id }}"
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/export_ovf.yml b/roles/oatakan.windows_ovirt_template/tasks/export_ovf.yml
deleted file mode 100644
index 8a38d6f..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/export_ovf.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-
-- name: export template to export domain
- ovirt.ovirt.ovirt_template:
- auth: "{{ ovirt_auth }}"
- state: exported
- name: "{{ template.name }}"
- export_domain: "{{ providers.ovirt.export_domain }}"
- cluster: "{{ providers.ovirt.cluster }}"
- async: 7200
- poll: 0
- register: export_ovf_file
-
-- name: wait for export to complete
- async_status:
- jid: "{{ export_ovf_file.ansible_job_id }}"
- register: ovf
- until: ovf.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/main.yml b/roles/oatakan.windows_ovirt_template/tasks/main.yml
deleted file mode 100644
index eb70636..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/main.yml
+++ /dev/null
@@ -1,119 +0,0 @@
----
-
-- name: obtain SSO token with using username/password credentials
- ovirt.ovirt.ovirt_auth:
- url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
- username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
- password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
- insecure: yes
-
-- include_tasks: preflight_check_pre29.yml
- when: ansible_version.full is version('2.9', '<')
-
-- include_tasks: preflight_check.yml
- when: ansible_version.full is version('2.9', '>=')
-
-# remove existing template
-- block:
-
- - include_tasks: remove_template.yml
-
- when:
- - template_force|bool
- - template_found|bool
-
-- block:
- - include_tasks: make_iso.yml
-
- - include_tasks: provision_vm.yml
-
- - name: refresh inventory
- meta: refresh_inventory
-
- - name: clear gathered facts
- meta: clear_facts
-
- - name: clear any host errors
- meta: clear_host_errors
-
- - name: add host
- add_host:
- hostname: template_host
- ansible_host: "{{ template_vm_ip_address }}"
- ansible_user: "{{ unattend.local_accounts[0].name }}"
- ansible_password: "{{ unattend.local_accounts[0].password }}"
- ansible_port: "{{ vm_ansible_port | default('5986') }}"
- ansible_connection: winrm
- ansible_winrm_transport: "{{ vm_ansible_winrm_transport | default('credssp') }}"
- ansible_winrm_server_cert_validation: ignore
- ansible_winrm_operation_timeout_sec: 250
- ansible_winrm_read_timeout_sec: 280
- ansible_win_async_startup_timeout: 60
-
- - include_role:
- name: "{{ windows_build_role }}"
- apply:
- vars:
- target_ovirt: yes
- install_updates: yes
- remove_apps: yes
- clean_up_components: yes
- upgrade_powershell: "{{ vm_upgrade_powershell | default('no') }}"
- delegate_to: template_host
-
- - name: refresh SSO credentials
- ovirt.ovirt.ovirt_auth:
- url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
- username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
- password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
- insecure: yes
-
- - include_tasks: stop_vm.yml
-
- - include_tasks: convert_to_template.yml
-
- - include_tasks: export_ovf.yml
- when: export_ovf|bool
-
- rescue:
- - name: refresh SSO credentials
- ovirt.ovirt.ovirt_auth:
- url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
- username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
- password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
- insecure: yes
-
- - include_tasks: remove_template.yml
- when: remove_vm_on_error|bool
-
- - name: set vm_failed variable
- set_fact:
- vm_failed: yes
-
- always:
- - name: refresh SSO credentials
- ovirt.ovirt.ovirt_auth:
- url: "{{ lookup('env', 'OVIRT_URL')|default(ovirt.url, true) }}"
- username: "{{ lookup('env', 'OVIRT_USERNAME')|default(ovirt.username, true) }}"
- password: "{{ lookup('env', 'OVIRT_PASSWORD')|default(ovirt.password, true) }}"
- insecure: yes
-
- - include_tasks: remove_vm.yml
- when: remove_vm_on_error|bool or (not remove_vm_on_error|bool and not vm_failed|bool)
-
- - include_tasks: datastore_iso_remove.yml
-
- - name: remove temporary directory
- file:
- path: "{{ temp_directory }}"
- state: absent
-
- - name: logout from oVirt
- ovirt.ovirt.ovirt_auth:
- state: absent
- ovirt_auth: "{{ ovirt_auth }}"
-
-- name: fail if needed
- fail:
- msg: "fail to create a template."
- when: vm_failed|bool
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/make_iso.yml b/roles/oatakan.windows_ovirt_template/tasks/make_iso.yml
deleted file mode 100644
index 89242c1..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/make_iso.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- block:
- - name: create temporary directory
- file:
- path: "{{ temp_directory }}/ks_iso"
- state: directory
-
- - name: create Autounattend.xml file
- template:
- src: "{{ windows_sysprep_template_folder }}/Autounattend.xml.j2"
- dest: "{{ temp_directory }}/ks_iso/Autounattend.xml"
-
- - name: download ConfigureRemotingForAnsible.ps1 script
- get_url:
- url: "{{ winrm_enable_script_url }}"
- dest: "{{ temp_directory }}/ks_iso/ConfigureRemotingForAnsible.ps1"
- register: download_script
- until: download_script is success
- delay: 3
- retries: 5
-
- - name: include virtio drivers
- include_tasks: virtio_drivers.yml
-
- - name: create iso
- command: mkisofs -V ADDISO -r -iso-level 4 -o {{ playbook_dir }}/{{ temp_directory }}/windows_{{ distro_name }}_autounattend_autogen.iso .
- args:
- chdir: "{{ playbook_dir }}/{{ temp_directory }}/ks_iso"
-
- - include_tasks: datastore_upload.yml
- always:
- - name: remove temporary files
- file:
- path: "{{ temp_directory }}/{{ item }}"
- state: absent
- loop:
- - windows_{{ distro_name }}_autounattend_autogen.iso
- - virtio_win.iso
- - ks_iso/
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/preflight_check.yml b/roles/oatakan.windows_ovirt_template/tasks/preflight_check.yml
deleted file mode 100644
index 7f423ac..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/preflight_check.yml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-
-- name: get the datacenter name
- ovirt.ovirt.ovirt_datacenter_info:
- auth: "{{ ovirt_auth }}"
- pattern: "Clusters.name = {{ providers.ovirt.cluster }}"
- register: datacenter_info
-
-- name: get storage information
- ovirt.ovirt.ovirt_storage_domain_info:
- auth: "{{ ovirt_auth }}"
- pattern: "datacenter={{ datacenter_info.ovirt_datacenters[0].name }}"
- register: storage_info
- when:
- - template_disk_storage is undefined
-
-- name: get data domain
- set_fact:
- disk_storage_domain: "{{ storage_info.ovirt_storage_domains|json_query(the_query)|list|first|default(None) }}"
- when:
- - template_disk_storage is undefined
- vars:
- the_query: "[?type=='data']"
-
-- name: get iso domain (deprecated as of oVirt/RHV 4.3)
- set_fact:
- iso_domain: "{{ storage_info.ovirt_storage_domains|json_query(the_query)|list|first|default(None) }}"
- vars:
- the_query: "[?type=='iso']"
-
-- name: check if template already exists
- ovirt.ovirt.ovirt_template_info:
- auth: "{{ ovirt_auth }}"
- pattern: "name={{ template.name }} and datacenter={{ datacenter_info.ovirt_datacenters[0].name }}"
- register: template_info
-
-- block:
- - name: set template_found to yes
- set_fact:
- template_found: yes
-
- - name: fail with message
- fail:
- msg: "Existing template found on ovirt/rhv: {{ template.name }}"
- when: not template_force|bool
- when:
- - template_info.ovirt_templates is defined
- - template_info.ovirt_templates | length > 0
-
-- name: check iso file on data domain
- ovirt.ovirt.ovirt_disk_info:
- auth: "{{ ovirt_auth }}"
- pattern: "name={{ iso_file_name }}"
- register: ovirt_disk_main_iso
- when: iso_file_name is defined
-
-- debug:
- msg: "{{ ovirt_disk_main_iso }}"
-
-- name: set file id of the iso file
- set_fact:
- iso_file_id: "{{ ovirt_disk_main_iso.ovirt_disks[0].id }}"
- when:
- - ovirt_disk_main_iso.ovirt_disks | length > 0
- - ovirt_disk_main_iso.ovirt_disks[0].id is defined
- - ovirt_disk_main_iso.ovirt_disks[0].content_type == 'iso'
-
-- name: fail with message
- fail:
- msg: "iso file ({{ iso_file_name }}) could not be found on the data domain and iso domain does not exists"
- when:
- - iso_file_id is undefined
- - iso_domain is undefined or iso_domain|length == 0
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/preflight_check_pre29.yml b/roles/oatakan.windows_ovirt_template/tasks/preflight_check_pre29.yml
deleted file mode 100644
index e4cc192..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/preflight_check_pre29.yml
+++ /dev/null
@@ -1,69 +0,0 @@
----
-
-- name: get the datacenter name (<2.9)
- ovirt.ovirt.ovirt_datacenter_facts:
- auth: "{{ ovirt_auth }}"
- pattern: "Clusters.name = {{ providers.ovirt.cluster }}"
-
-- name: get storage information (<2.9)
- ovirt.ovirt.ovirt_storage_domain_facts:
- auth: "{{ ovirt_auth }}"
- pattern: "datacenter={{ ovirt_datacenters[0].name }}"
- when:
- - template_disk_storage is undefined
-
-- name: get data domain (<2.9)
- set_fact:
- disk_storage_domain: "{{ ovirt_storage_domains|json_query(the_query)|list|first }}"
- when:
- - template_disk_storage is undefined
- vars:
- the_query: "[?type=='data']"
-
-- name: get iso domain (deprecated as of oVirt/RHV 4.3) (<2.9)
- set_fact:
- iso_domain: "{{ ovirt_storage_domains|json_query(the_query)|list|first }}"
- vars:
- the_query: "[?type=='iso']"
-
-- name: check if template already exists (<2.9)
- ovirt.ovirt.ovirt_template_facts:
- auth: "{{ ovirt_auth }}"
- pattern: "name={{ template.name }} and datacenter={{ ovirt_datacenters[0].name }}"
-
-- block:
- - name: set template_found to yes
- set_fact:
- template_found: yes
-
- - name: fail with message
- fail:
- msg: "Existing template found on ovirt/rhv: {{ template.name }}"
- when: not template_force|bool
- when:
- - ovirt_templates is defined
- - ovirt_templates | length > 0
-
-- name: check iso file on data domain
- ovirt.ovirt.ovirt_disk_facts:
- auth: "{{ ovirt_auth }}"
- pattern: "name={{ iso_file_name }}"
- when: iso_file_name is defined
-
-- debug:
- msg: "{{ ovirt_disks }}"
-
-- name: set file id of the iso file
- set_fact:
- iso_file_id: "{{ ovirt_disks[0].id }}"
- when:
- - ovirt_disks | length > 0
- - ovirt_disks[0].id is defined
- - ovirt_disks[0].content_type == 'iso'
-
-- name: fail with message
- fail:
- msg: "iso file ({{ iso_file_name }}) could not be found on the data domain and iso domain does not exists"
- when:
- - iso_file_id is undefined
- - iso_domain is undefined or iso_domain|length == 0
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/provision_vm.yml b/roles/oatakan.windows_ovirt_template/tasks/provision_vm.yml
deleted file mode 100644
index 2c161d6..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/provision_vm.yml
+++ /dev/null
@@ -1,124 +0,0 @@
----
-
-- name: provision a new vm
- ovirt.ovirt.ovirt_vm:
- auth: "{{ ovirt_auth }}"
- name: "{{ template.name }}"
- cluster: "{{ providers.ovirt.cluster|default('Default') }}"
- state: present
- wait: yes
- memory: "{{ template.memory }}MiB"
- cpu_sockets: "{{ template.cpu }}"
- bios_type: "{{ template.bios_type | default(omit) }}"
- boot_devices:
- - hd
- - cdrom
- cd_iso: "{{ template.cd_iso }}"
- type: server
- high_availability: true
- nics:
- - name: nic1
- profile_name: "{{ template.networks[0].name }}"
- network: "{{ template.networks[0].name }}"
- custom_properties: "{{ custom_properties | default(omit) }}"
- operating_system: "{{ template_vm_guest_id | default(omit) }}"
- timezone: "{{ template_timezone | default(omit) }}"
- async: 7200
- poll: 0
- register: deploy
-
-- name: wait for instance creation to complete
- async_status: jid="{{ deploy.ansible_job_id }}"
- register: instance
- until: instance.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
-
-- name: create a disk
- ovirt.ovirt.ovirt_disk:
- auth: "{{ ovirt_auth }}"
- name: "{% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}"
- vm_name: "{{ template.name }}"
- size: "{{ item.size | default(omit) }}"
- format: "{{ item.format | default(omit) }}"
- interface: "{{ item.interface | default(omit) }}"
- bootable: "{{ item.bootable | default(omit) }}"
- storage_domain: "{{ item.storage_domain | default(omit) }}"
- activate: yes
- state: present
- wait: yes
- async: 7200
- poll: 0
- register: create_disks
- loop: "{{ template.disks }}"
- when:
- - template is defined
- - template.disks is defined
-
-- name: wait for disk creation to complete
- async_status:
- jid: "{{ item.ansible_job_id }}"
- register: disks_creation
- until: disks_creation.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
- loop: "{{ create_disks.results }}"
- when:
- - template is defined
- - create_disks.results is defined
- - item.ansible_job_id is defined
-
-- include_tasks: wait_disk_unlock_pre29.yml
- when:
- - ansible_version.full is version('2.9', '<')
- - template is defined
- - template.disks is defined
- - disks_creation.results is defined
-
-- include_tasks: wait_disk_unlock.yml
- when:
- - ansible_version.full is version('2.9', '>=')
- - template is defined
- - template.disks is defined
- - disks_creation.results is defined
-
-- name: assign tags to provisioned vms
- ovirt.ovirt.ovirt_tag:
- name: "{{ item }}_{{ instance.item.item[item] }}"
- vms: ["{{ instance.item.item.name }}"]
- state: attached
- loop:
- - app_name
- - role
- when:
- - template is defined
- - instance is defined
- - instance.vm is defined
- - instance.item.item[item] is defined
-
-- name: start vm
- ovirt.ovirt.ovirt_vm:
- auth: "{{ ovirt_auth }}"
- name: "{{ template.name }}"
- cluster: "{{ providers.ovirt.cluster|default('Default') }}"
- state: running
- async: 7200
- poll: 0
- register: start
-
-- name: wait for instance creation to complete
- async_status: jid="{{ start.ansible_job_id }}"
- register: instance
- until: instance.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
-
-- name: waiting for server to come online
- wait_for:
- host: "{{ template.networks[0].ip }}"
- port: "{{ template.ansible_port | default(vm_ansible_port) | default(ansible_port) | default('5986') }}"
- timeout: "{{ instance_wait_connection_timeout }}"
- when:
- - instance is changed
- - template is defined
- ignore_errors: yes
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/remove_template.yml b/roles/oatakan.windows_ovirt_template/tasks/remove_template.yml
deleted file mode 100644
index bce3ff0..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/remove_template.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-
-- name: remove template
- ovirt.ovirt.ovirt_template:
- auth: "{{ ovirt_auth }}"
- cluster: "{{ providers.ovirt.cluster }}"
- name: "{{ template.name }}"
- state: absent
- async: 7200
- poll: 0
- register: undeploy
- when: template is defined
-
-- name: wait for template deletion to complete
- async_status:
- jid: "{{ undeploy.ansible_job_id }}"
- register: instance
- until: instance.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/remove_vm.yml b/roles/oatakan.windows_ovirt_template/tasks/remove_vm.yml
deleted file mode 100644
index e61487d..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/remove_vm.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-
-- name: remove vm
- ovirt.ovirt.ovirt_vm:
- auth: "{{ ovirt_auth }}"
- cluster: "{{ providers.ovirt.cluster }}"
- name: "{{ template.name }}"
- state: absent
- async: 7200
- poll: 0
- register: undeploy
- when: template is defined
-
-- name: wait for template deletion to complete
- async_status:
- jid: "{{ undeploy.ansible_job_id }}"
- register: instance
- until: instance.finished
- retries: "{{ instance_wait_retry_limit }}"
- delay: 10
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/stop_vm.yml b/roles/oatakan.windows_ovirt_template/tasks/stop_vm.yml
deleted file mode 100644
index 588458b..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/stop_vm.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-
-- block:
-
- - name: wait for server to stop responding
- wait_for:
- host: "{{ template_vm_ip_address }}"
- port: "{{ vm_ansible_port | default('5986') }}"
- timeout: 120
- state: stopped
- ignore_errors: yes
-
- - include_tasks: wait_vm_poweredoff_pre29.yml
- when: ansible_version.full is version('2.9', '<')
-
- - include_tasks: wait_vm_poweredoff.yml
- when: ansible_version.full is version('2.9', '>=')
-
- rescue:
- - name: stop vm
- ovirt.ovirt.ovirt_vm:
- auth: "{{ ovirt_auth }}"
- cluster: "{{ providers.ovirt.cluster }}"
- name: "{{ template.name }}"
- state: stopped
- when: template is defined
-
- - include_tasks: wait_vm_poweredoff_pre29.yml
- when: ansible_version.full is version('2.9', '<')
-
- - include_tasks: wait_vm_poweredoff.yml
- when: ansible_version.full is version('2.9', '>=')
-
-- name: reconfigure vm
- ovirt.ovirt.ovirt_vm:
- auth: "{{ ovirt_auth }}"
- cluster: "{{ providers.ovirt.cluster }}"
- name: "{{ template.name }}"
- boot_devices:
- - hd
- cd_iso: ""
- custom_properties: "{{ custom_properties_efi if (template_vm_efi|bool and custom_efi_enabled|bool) else ([{}]) }}"
- force: yes
- state: present
- when: template is defined
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/virtio_drivers.yml b/roles/oatakan.windows_ovirt_template/tasks/virtio_drivers.yml
deleted file mode 100644
index 6b62380..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/virtio_drivers.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-
-- name: download virtio win iso file
- get_url:
- url: "{{ virtio_iso_url }}"
- dest: "{{ temp_directory }}/virtio_win.iso"
- register: download_virtio_iso
- until: download_virtio_iso is success
- delay: 3
- retries: 5
-
-- name: set list of directories to copy
- set_fact:
- virtio_iso_list_of_directories_to_extract:
- - /viostor/{{ os_short_names[distro_name] | default('2k16') }}/amd64
- - /NetKVM/{{ os_short_names[distro_name] | default('2k16') }}/amd64
-
-- name: get a list of files from template iso
- shell: >
- set -o pipefail &&
- isoinfo -f -R -i {{ playbook_dir }}/{{ temp_directory }}/virtio_win.iso |
- grep -E "^{{ virtio_iso_list_of_directories_to_extract | join('|^') }}"
- changed_when: False
- register: virtio_iso_list_of_files
-
-- name: copy files from virtio iso to target
- shell: |
- set -o pipefail &&
- isoinfo -f -R -i {{ playbook_dir }}/{{ temp_directory }}/virtio_win.iso |\
- grep -E "^{{ virtio_iso_list_of_directories_to_extract | join('|^') }}" | while read line; do
- d=$(dirname $line)
- od=".${d}"
- [ -f $od ] && rm -f $od
- [ -d $od ] || mkdir -p $od
- [ -d ".${line}" ] || isoinfo -R -i \
- {{ playbook_dir }}/{{ temp_directory }}/virtio_win.iso -x $line > ".${line}"
- done
- changed_when: True
- args:
- chdir: "{{ playbook_dir }}/{{ temp_directory }}/ks_iso"
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock.yml
deleted file mode 100644
index aa231d6..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-
-- name: wait until the image is unlocked by the oVirt engine
- ovirt.ovirt.ovirt_disk_info:
- auth: "{{ ovirt_auth }}"
- pattern: "name={% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}"
- register: ovirt_disk_info
- until: (ovirt_disk_info.ovirt_disks is defined) and (ovirt_disk_info.ovirt_disks | length > 0) and (ovirt_disk_info.ovirt_disks[0].status != "locked")
- retries: 10
- delay: 3
- loop: "{{ template.disks }}"
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock_pre29.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock_pre29.yml
deleted file mode 100644
index 693d74c..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/wait_disk_unlock_pre29.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-
-- name: wait until the image is unlocked by the oVirt engine (<2.9)
- ovirt_disk_facts:
- auth: "{{ ovirt_auth }}"
- pattern: "name={% if item.name_prefix | default(false) %}{{ template.name }}_{% endif %}{{ item.name }}"
- until: (ovirt_disks is defined) and (ovirt_disks | length > 0) and (ovirt_disks[0].status != "locked")
- retries: 10
- delay: 3
- loop: "{{ template.disks }}"
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock.yml
deleted file mode 100644
index 7f0795e..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-
-- name: wait until the disk is unlocked by the oVirt engine
- ovirt.ovirt.ovirt_disk_info:
- auth: "{{ ovirt_auth }}"
- pattern: "name={{ iso_file }}"
- register: ovirt_disk_info
- until: (ovirt_disk_info.ovirt_disks is defined) and (ovirt_disk_info.ovirt_disks | length > 0) and (ovirt_disk_info.ovirt_disks[0].status != "locked")
- retries: 10
- delay: 3
- when: iso_file is defined
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock_pre29.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock_pre29.yml
deleted file mode 100644
index ff7a35f..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/wait_iso_disk_unlock_pre29.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-
-- name: wait until the disk is unlocked by the oVirt engine (<2.9)
- ovirt_disk_facts:
- auth: "{{ ovirt_auth }}"
- pattern: "name={{ iso_file }}"
- until: (ovirt_disks is defined) and (ovirt_disks | length > 0) and (ovirt_disks[0].status != "locked")
- retries: 10
- delay: 3
- when: iso_file is defined
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff.yml
deleted file mode 100644
index 75a44c3..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-
-- name: wait for vm status to be poweredoff
- ovirt.ovirt.ovirt_vm_info:
- auth: "{{ ovirt_auth }}"
- pattern: name={{ template.name }} and cluster={{ providers.ovirt.cluster }}
- register: ovirt_vm_info_result
- until:
- - ovirt_vm_info_result.ovirt_vms is defined
- - ovirt_vm_info_result.ovirt_vms|length > 0
- - ovirt_vm_info_result.ovirt_vms[0].status == 'down'
- delay: 5
- retries: "{{ instance_stop_retry_limit }}"
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff_pre29.yml b/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff_pre29.yml
deleted file mode 100644
index 0bb61bf..0000000
--- a/roles/oatakan.windows_ovirt_template/tasks/wait_vm_poweredoff_pre29.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-
-- name: wait for vm status to be poweredoff
- ovirt_vm_facts:
- auth: "{{ ovirt_auth }}"
- pattern: name={{ template.name }} and cluster={{ providers.ovirt.cluster }}
- until:
- - ovirt_vms is defined
- - ovirt_vms|length > 0
- - ovirt_vms[0].status == 'down'
- delay: 5
- retries: "{{ instance_stop_retry_limit }}"
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/templates/windows_server/Autounattend.xml.j2 b/roles/oatakan.windows_ovirt_template/templates/windows_server/Autounattend.xml.j2
deleted file mode 100644
index a5a1d5c..0000000
--- a/roles/oatakan.windows_ovirt_template/templates/windows_server/Autounattend.xml.j2
+++ /dev/null
@@ -1,447 +0,0 @@
-
-
-
-
-
-
-
- E:\
-
-
-
-
-
-
- en-US
-
- en-US
- en-US
- en-US
- en-US
- en-US
-
-
-
-
-{% if template_vm_efi is undefined or not template_vm_efi|bool %}
-
-
- true
- NTFS
-
- 1
- 1
-
-
- NTFS
-
- C
- 2
- 2
-
-
- 0
- true
-
-
- Primary
- 1
- 350
-
-
- 2
- Primary
- true
-
-
-{% else %}
-
-
- 1
- NTFS
- 1
-
- de94bba4-06d1-4d40-a16a-bfd50179d6ac
-
-
- 2
- FAT32
- 2
-
-
-
- 3
- 3
-
-
- NTFS
- C
- 4
- 4
-
-
-
- 0
- true
-
-
- 1
- Primary
- 300
-
-
- 2
- EFI
- 100
-
-
- 3
- MSR
- 128
-
-
- 4
- Primary
- true
-
-
-{% endif %}
-
-
-
-
-
-
- /IMAGE/INDEX
- {{ iso_image_index }}
-
-
-
- 0
-{% if template_vm_efi is undefined or not template_vm_efi|bool %}
- 2
-{% else %}
- 4
-{% endif %}
-
-
-
-
- true
- Ansible
- Your Org.
-
-{% if unattend.product_key is defined and unattend.product_key|length %}
- {{ unattend.product_key | trim }}
-{% endif %}
- Never
-
-
-
-
-
-
- 1
-
-
- false
- false
-
-
-
-
- en-US
- en-US
- en-US
- en-US
-
-
-
- true
-{% if not '2008' in distro_name %}
- true
- true
- true
-{% endif %}
- true
- Home
- 1
-
- {{ settings.time_zone | default('UTC') }}
-
-{% if unattend.administrator_password is defined %}
-
- {{ unattend.administrator_password }}
- true
-
-{% endif %}
-{% if unattend.local_accounts is defined %}
-
-{% for local_account in unattend.local_accounts %}
-
-{% if local_account.password is defined %}
-
- {{ local_account.password }}
- true
-
-{% endif %}
-{% if local_account.description is defined %}
- {{ local_account.description }}
-{% endif %}
-{% if local_account.display_name is defined %}
- {{ local_account.display_name }}
-{% endif %}
-{% if local_account.group is defined %}
- {{ local_account.group }}
-{% endif %}
-{% if local_account.name is defined %}
- {{ local_account.name }}
-{% endif %}
-
-{% endfor %}
-
-{% endif %}
-
-{% if enable_auto_logon and unattend.local_accounts and unattend.local_accounts[0].name and unattend.local_accounts[0].password %}
-
-
- {{ unattend.local_accounts[0].password }}
- true
-
- {{ unattend.local_accounts[0].name }}
- true
-
-{% endif %}
-
-
- cmd.exe /c powershell -Command "Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Force"
- Set Execution Policy 64 Bit
- 1
- true
-
-{% if not '2008' in distro_name %}
-
- cmd.exe /c powershell -Command "Set-NetConnectionProfile -NetworkCategory Private"
- Set network connection profile to private
- 2
- true
-
-{% else %}
-
- cmd.exe /c powershell –EncodedCommand {{ set_network_to_private | b64encode(encoding='utf-16-le') }}
- Set network connection profile to private
- 2
- true
-
-
- cmd.exe /c winrm quickconfig -q
- winrm quickconfig -q
- 4
- true
-
-
- cmd.exe /c winrm quickconfig -transport:http
- winrm quickconfig -transport:http
- 5
- true
-
-
- cmd.exe /c winrm set winrm/config @{MaxTimeoutms="1800000"}
- Win RM MaxTimoutms
- 6
- true
-
-
- cmd.exe /c winrm set winrm/config/winrs @{MaxMemoryPerShellMB="800"}
- Win RM MaxMemoryPerShellMB
- 7
- true
-
-
- cmd.exe /c winrm set winrm/config/service @{AllowUnencrypted="true"}
- Win RM AllowUnencrypted
- 8
- true
-
-
- cmd.exe /c winrm set winrm/config/service/auth @{Basic="true"}
- Win RM auth Basic
- 9
- true
-
-
- cmd.exe /c winrm set winrm/config/client/auth @{Basic="true"}
- Win RM client auth Basic
- 10
- true
-
-
- cmd.exe /c winrm set winrm/config/listener?Address=*+Transport=HTTP @{Port="5985"}
- Win RM listener Address/Port
- 11
- true
-
-
- cmd.exe /c netsh firewall add portopening TCP 5985 "Port 5985"
- Win RM port open
- 12
- true
-
-
- cmd.exe /c net stop winrm
- Stop Win RM Service
- 13
- true
-
-
- cmd.exe /c sc config winrm start= auto
- Win RM Autostart
- 14
- true
-
-
- cmd.exe /c net start winrm
- Start Win RM Service
- 15
- true
-
-
- cmd.exe /c dism /online /enable-feature /featurename:NetFx2-ServerCore
- Enable NetFx2-ServerCore feature
- 16
- true
-
-
- cmd.exe /c dism /online /enable-feature /featurename:NetFx2-ServerCore-WOW64
- Enable NetFx2-ServerCore feature
- 17
- true
-
-
- cmd.exe /c dism /online /enable-feature /featurename:MicrosoftWindowsPowerShell
- Enable MicrosoftWindowsPowerShell feature
- 18
- true
-
-
- cmd.exe /c dism /online /enable-feature /featurename:ServerManager-PSH-Cmdlets
- Enable ServerManager-PSH-Cmdlets feature
- 19
- true
-
-{% endif %}
-{% if '2016' in distro_name %}
-
- cmd.exe /c reg add HKLM\SOFTWARE\Microsoft\.NETFramework\v4.0.30319 /v SchUseStrongCrypto /t REG_DWORD /d 1 /reg:64 /f
- Configure security protocol
- 19
- true
-
-{% endif %}
-
- cmd.exe /c powershell -ExecutionPolicy ByPass -File E:\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
- Enable winrm
- 20
- true
-
-
- cmd.exe /c powershell -Command "Enable-WSManCredSSP -Role Server -Force"
- Enable winrm server role
- 21
- true
-
-
- cmd.exe /c powershell -Command "Set-Item -Path 'WSMan:\localhost\Service\Auth\CredSSP' -Value $true"
- Enable credssp authentication
- 22
- true
-
-{% if template.networks is defined and template.networks[0].ip is defined and template.networks[0].gateway is defined and template.networks[0].netmask is defined %}
-{% if not '2008' in distro_name %}
-
- cmd.exe /c powershell -Command "New-NetIPAddress –IPAddress {{ template.networks[0].ip }} -DefaultGateway {{ template.networks[0].gateway }} -PrefixLength {{ (template.networks[0].ip + '/' + template.networks[0].netmask) | ipaddr('prefix') }} -InterfaceIndex (Get-NetAdapter).InterfaceIndex"
- Set static ip
- 50
- true
-
-{% else %}
-
- cmd.exe /c netsh int ipv4 set address "Local Area connection" static {{ template.networks[0].ip }} {{ template.networks[0].netmask }} {{ template.networks[0].gateway }}
- Set static ip
- 50
- true
-
-{% endif %}
-{% if template.networks[0].dns_servers is defined %}
-{% if not '2008' in distro_name %}
-
- cmd.exe /c powershell -Command "Set-DNSClientServerAddress –InterfaceIndex (Get-NetAdapter).InterfaceIndex –ServerAddresses {{ template.networks[0].dns_servers|join(',') }}"
- Set static ip
- 51
- true
-
-{% else %}
-
- cmd.exe /c netsh int ipv4 set dns "Local Area connection" static {{ template.networks[0].dns_servers[0] }}
- Set static ip
- 51
- true
-
-{% endif %}
-{% endif %}
-{% endif %}
-
-
-
-
-
- true
-
-
- false
- false
-
-
-
-
- true
- Google
- Google
- http://www.google.com/search?q={searchTerms}
-
-
- true
- true
- about:blank
-
-
- false
-
-
- 0
-
-
-
-
- true
- Remote Desktop
- all
-
-
-
-
- true
-
-
- {{ settings.skip_auto_activation | default('true') }}
-
-
- *
-
-
-
-
- false
-
-
-
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tests/inventory b/roles/oatakan.windows_ovirt_template/tests/inventory
deleted file mode 100644
index d18580b..0000000
--- a/roles/oatakan.windows_ovirt_template/tests/inventory
+++ /dev/null
@@ -1 +0,0 @@
-localhost
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/tests/test.yml b/roles/oatakan.windows_ovirt_template/tests/test.yml
deleted file mode 100644
index 2c0141f..0000000
--- a/roles/oatakan.windows_ovirt_template/tests/test.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- hosts: localhost
- gather_facts: False
- connection: local
- become: no
- roles:
- - ../.
\ No newline at end of file
diff --git a/roles/oatakan.windows_ovirt_template/vars/main.yml b/roles/oatakan.windows_ovirt_template/vars/main.yml
deleted file mode 100644
index 2464930..0000000
--- a/roles/oatakan.windows_ovirt_template/vars/main.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-
-temp_directory: tmp{{ awx_job_id | default('') }}
-
-iso_file: "windows_{{ distro_name }}_autounattend{{ awx_job_id | default('') }}.iso"
-
-export_dir: "{{ playbook_dir }}/{{ temp_directory }}"
-
-unattend:
- administrator_password: "{{ local_administrator_password }}"
- local_accounts:
- - name: "{{ local_account_username }}"
- display_name: "{{ local_account_username }}"
- description: "{{ local_account_username }} user"
- group: Administrators
- password: "{{ local_account_password }}"
- settings:
- computer_name: wintemp
- time_zone: UTC
- skip_auto_activation: true
- product_key: "{{ iso_product_key | default('') }}"
-
-providers:
- ovirt:
- datacenter: "{{ ovirt_datacenter }}"
- cluster: "{{ ovirt_cluster }}"
- data_domain: "{{ ovirt_data_domain }}"
- export_domain: "{{ ovirt_export_domain }}"
- iso_domain: "{{ ovirt_iso_domain }}"
-
-template:
- name: "{{ template_vm_name }}"
- role: windows_template
- app_name: windows_template_generate
- domain: "{{ template_vm_domain }}"
- disks:
- - name: "{{ template_vm_name }}"
- size: "{{ template_vm_root_disk_size }}GiB"
- format: "{{ template_vm_root_disk_format }}"
- interface: "{{ template_vm_root_disk_interface | default('virtio') }}"
- bootable: yes
- storage_domain: "{{ providers.ovirt.data_domain | default('data_domain') }}"
- memory: "{{ template_vm_memory }}"
- cpu: "{{ template_vm_cpu }}"
- bios_type: "{{ ('q35_ovmf') if (template_vm_efi|bool and not custom_efi_enabled|bool) else (omit) }}"
- networks:
- - name: "{{ template_vm_network_name }}"
- ip: "{{ template_vm_ip_address }}"
- netmask: "{{ template_vm_netmask }}"
- gateway: "{{ template_vm_gateway }}"
- domain: "{{ template_vm_domain }}"
- device_type: e1000
- dns_servers: "{{ template_vm_dns_servers }}"
- cd_iso: "{{ iso_file_id | default(iso_file_name) }}" # if using data domain, file name does not work, need to use id
-
-qemu_cmdline_second_iso:
- - -device
- - ide-cd,bus={{ qemu_second_cdrom_device_bus_type }}.{{ qemu_second_cdrom_device_bus_id }},unit={{ qemu_second_cdrom_device_bus_unit }},drive=drive-ua-0001,id=ua-0001,bootindex=3
- - -drive
- - format=raw,if=none,id=drive-ua-0001,werror=report,rerror=report,readonly=on,file=/rhev/data-center/{{ ovirt_datacenter_id }}/{{ ovirt_datastore_id }}/images/{{ ks_iso_file_disk_id }}/{{ ks_iso_file_image_id }}
-
-qemu_cmdline_efi:
- - -drive
- - if=pflash,format=raw,readonly,file={{ custom_efi_path }}
-
-custom_properties:
- - name: qemu_cmdline
- value: "{{ ((qemu_cmdline_second_iso + qemu_cmdline_efi) | to_json) if (template_vm_efi|bool and custom_efi_enabled|bool) else (qemu_cmdline_second_iso | to_json) }}"
-
-custom_properties_efi:
- - name: qemu_cmdline
- value: "{{ (qemu_cmdline_efi | to_json) if (template_vm_efi|bool and custom_efi_enabled|bool) else ('[]') }}"
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/README.md b/roles/oatakan.windows_template_build/README.md
index 904aeb6..c384cd5 100644
--- a/roles/oatakan.windows_template_build/README.md
+++ b/roles/oatakan.windows_template_build/README.md
@@ -22,10 +22,13 @@ A list of roles that this role utilizes:
- oatakan.windows_ec2_ena_driver
- oatakan.windows_ovirt_guest_agent
- oatakan.windows_powershell_upgrade
+- oatakan.windows_configure_update
- oatakan.windows_update
- oatakan.windows_virtio
- oatakan.windows_vmware_tools
- oatakan.windows_virtualbox_guest_additions
+- oatakan.windows_parallels_tools
+- oatakan.windows_hotfix
Example Playbook
----------------
diff --git a/roles/oatakan.windows_template_build/defaults/main.yml b/roles/oatakan.windows_template_build/defaults/main.yml
index 4c5e86d..eb54832 100644
--- a/roles/oatakan.windows_template_build/defaults/main.yml
+++ b/roles/oatakan.windows_template_build/defaults/main.yml
@@ -1,46 +1,76 @@
---
-install_updates: yes
-remove_apps: no
-clean_up_components: yes
-upgrade_powershell: no
+install_updates: true
+remove_apps: false
+clean_up_components: true
+upgrade_powershell: false
powershell_target_version: 3.0
default_temp_directory: 'C:\Windows\Temp'
update_retry_limit: 10
upgrade_wait_timeout: 600
-set_network_to_private: '([Activator]::CreateInstance([Type]::GetTypeFromCLSID([Guid]"{DCB00C01-570F-4A9B-8D69-199FDBA5723B}"))).GetNetworkConnections() | % {$_.GetNetwork().SetCategory(1)}'
+win_update_server: '' #wsus server ip/hostname
-enable_tlsv12_hotfix_download_location: "{{ ansible_env.TEMP }}"
-enable_tlsv12_hotfix:
- kb: KB3080079
- file: Windows6.1-KB3080079-x64.msu
- url: https://download.microsoft.com/download/F/4/1/F4154AD2-2119-48B4-BF99-CC15F68E110D/Windows6.1-KB3080079-x64.msu
+set_network_to_private: "([Activator]::CreateInstance([Type]::GetTypeFromCLSID([Guid]'{DCB00C01-570F-4A9B-8D69-199FDBA5723B}'))).GetNetworkConnections() | % {$_.GetNetwork().SetCategory(1)}"
+expand_disk: !unsafe "$i=(gwmi -n root/cimv2 Win32_DiskPartition|?{$_.BootPartition }).Index;'sel dis 0',\\\"sel par $($i*2+2)\\\",'extend'|& diskpart *>$null"
-enable_tls_support_hotfix_download_location: 'C:\Windows\Temp'
-# no longer available
-#enable_tls_support_hotfix:
-# kb: kb3154518
-# file: windows6.1-kb3154518-x64.msu
-# url: http://download.microsoft.com/download/6/8/0/680ee424-358c-4fdf-a0de-b45dee07b711/windows6.1-kb3154518-x64.msu
+win2008_hotfixes:
+ # this update is needed to support ssl support on Windows Server 2008 R2
+ - kb: KB4474419
+ file: windows6.1-kb4474419-v3-x64_b5614c6cea5cb4e198717789633dca16308ef79c.msu
+ url: http://catalog.s.download.windowsupdate.com/c/msdownload/update/software/secu/2019/09/windows6.1-kb4474419-v3-x64_b5614c6cea5cb4e198717789633dca16308ef79c.msu
+ # this is servicing stack update to enable any recent updates
+ - kb: KB3080079
+ file: Windows6.1-KB3080079-x64.msu
+ url: https://download.microsoft.com/download/F/4/1/F4154AD2-2119-48B4-BF99-CC15F68E110D/Windows6.1-KB3080079-x64.msu
-# fix: https://support.microsoft.com/en-us/topic/security-and-quality-rollup-for-net-framework-3-5-1-for-windows-7-sp1-and-windows-server-2008-r2-sp1-kb-4040980-71f9f600-4878-a9d4-6b36-93cafad2eefe
-enable_tls_support_hotfix:
- kb: kb4040980
- file: windows6.1-kb4040980-x64_83282fb5210091802984ead0d4175879056d602c.msu
- url: http://download.windowsupdate.com/c/msdownload/update/software/secu/2017/09/windows6.1-kb4040980-x64_83282fb5210091802984ead0d4175879056d602c.msu
+win2012_hotfixes:
+ os_6_2:
+ - kb: KB2901982
+ file: windows8-rt-kb2901982-x64_21dae8200edae3339a8c8580e516e00d7dacdfe3.msu
+ url: http://catalog.s.download.windowsupdate.com/d/msdownload/update/software/ftpk/2015/01/windows8-rt-kb2901982-x64_21dae8200edae3339a8c8580e516e00d7dacdfe3.msu
+ os_6_3:
+ # this update is needed to enable .NET clients to use https (tslv12) on Windows 8.1 and Windows Server 2012 R2
+ # see https://www.microsoft.com/en-us/download/confirmation.aspx?id=42883
+ - kb: KB2978041
+ file: windows8.1-kb2978041-x64_93d7dd68c7487670c0ab4d5eb154a0ef5e40a306.msu
+ url: http://download.windowsupdate.com/c/msdownload/update/software/secu/2014/09/windows8.1-kb2978041-x64_93d7dd68c7487670c0ab4d5eb154a0ef5e40a306.msu
+ # this is servicing stack update to enable any recent updates
+ - kb: KB5018922
+ file: windows8.1-kb5018922-x64_3aa7832b7586e11304f8fee5e09b6829b32d1833.msu
+ url: https://catalog.s.download.windowsupdate.com/c/msdownload/update/software/secu/2022/10/windows8.1-kb5018922-x64_3aa7832b7586e11304f8fee5e09b6829b32d1833.msu
+ # this a security update, it updates cipher suite for TLS, which prevents 'SSL: DH_KEY_TOO_SMALL' error with credssp
+ - kb: KB3042058
+ file: windows8.1-kb3042058-x64_c73bfac2ad93aed131627e7482bacbd89d0a0850.msu
+ url: https://catalog.s.download.windowsupdate.com/d/msdownload/update/software/secu/2015/09/windows8.1-kb3042058-x64_c73bfac2ad93aed131627e7482bacbd89d0a0850.msu
+ enable_winrm: true
-dot_net_security_hotfix_download_location: 'C:\Windows\Temp'
-# no longer available
-#dot_net_security_hotfix:
-# kb: KB2898850
-# file: Windows8.1-KB2898850-x64.msu
-# url: http://download.microsoft.com/download/C/6/9/C690CC33-18F7-405D-B18A-0A8E199E531C/Windows8.1-KB2898850-x64.msu
+win2008_hotfixes_archived:
+ # no longer available
+ # enable tls support hotfix:
+ - kb: kb3154518
+ file: windows6.1-kb3154518-x64.msu
+ url: http://download.microsoft.com/download/6/8/0/680ee424-358c-4fdf-a0de-b45dee07b711/windows6.1-kb3154518-x64.msu
+ # fix: https://support.microsoft.com/en-us/topic/security-and-quality-rollup-for-net-framework-3-5-1-for-windows-7-sp1-and-windows-server-2008-r2-sp1-kb-4040980-71f9f600-4878-a9d4-6b36-93cafad2eefe
+ # enable tls support hotfix:
+ - kb: kb4040980
+ file: windows6.1-kb4040980-x64_83282fb5210091802984ead0d4175879056d602c.msu
+ url: http://download.windowsupdate.com/c/msdownload/update/software/secu/2017/09/windows6.1-kb4040980-x64_83282fb5210091802984ead0d4175879056d602c.msu
-dot_net_security_hotfix:
- kb: KB2898850
- file: windows8.1-kb2898850-x64_9ffdfdeac9011569d1b14cf2dbf926257c50186d.msu
- url: http://download.windowsupdate.com/d/msdownload/update/software/secu/2014/04/windows8.1-kb2898850-x64_9ffdfdeac9011569d1b14cf2dbf926257c50186d.msu
+win2012_hotfixes_archived:
+ # no longer available
+ # dot net security hotfix:
+ - kb: KB2898850
+ file: Windows8.1-KB2898850-x64.msu
+ url: http://download.microsoft.com/download/C/6/9/C690CC33-18F7-405D-B18A-0A8E199E531C/Windows8.1-KB2898850-x64.msu
+ # superseded
+ # dot net security hotfix:
+ - kb: KB2898850
+ file: windows8.1-kb2898850-x64_9ffdfdeac9011569d1b14cf2dbf926257c50186d.msu
+ url: http://download.windowsupdate.com/d/msdownload/update/software/secu/2014/04/windows8.1-kb2898850-x64_9ffdfdeac9011569d1b14cf2dbf926257c50186d.msu
+
+winrm_enable_script_url: https://raw.githubusercontent.com/ansible/ansible-documentation/devel/examples/scripts/ConfigureRemotingForAnsible.ps1
+enable_winrm_command: "& $([scriptblock]::Create((New-Object Net.WebClient).DownloadString('{{ winrm_enable_script_url }}'))) -ForceNewSSLCert -EnableCredSSP"
windows_update_agent_url: http://download.windowsupdate.com/windowsupdate/redist/standalone/7.6.7600.320/windowsupdateagent-7.6-x64.exe
@@ -49,32 +79,40 @@ bleachbit_download_url: https://download.bleachbit.org/BleachBit-4.0.0-portable.
sdelete_download_url: https://download.sysinternals.com/files/SDelete.zip
ultradefrag_download_url: https://downloads.sourceforge.net/project/ultradefrag/stable-release/7.1.4/ultradefrag-portable-7.1.4.bin.amd64.zip
-enable_auto_logon: yes
+enable_auto_logon: true
-target_ovirt: no
-target_qemu: no
-target_ec2: no
-target_vagrant: no
+target_ovirt: false
+target_qemu: false
+target_ec2: false
+target_vagrant: false
+target_openstack: false
-bleachbit_clean: yes
-bleachbit_free_disk_space: yes
+bleachbit_clean: true
+bleachbit_free_disk_space: true
ec2_ena_driver_role: oatakan.windows_ec2_ena_driver
ovirt_guest_agent_role: oatakan.windows_ovirt_guest_agent
virtio_role: oatakan.windows_virtio
vmware_tools_role: oatakan.windows_vmware_tools
virtualbox_guest_additions_role: oatakan.windows_virtualbox_guest_additions
+parallels_tools_role: oatakan.windows_parallels_tools
+windows_configure_update_role: oatakan.windows_configure_update
windows_update_role: oatakan.windows_update
windows_powershell_upgrade_role: oatakan.windows_powershell_upgrade
+windows_hotfix_role: oatakan.windows_hotfix
policy:
- allow_unauthenticated_guest_access: no
+ allow_unauthenticated_guest_access: false
+ disable_eos_reminder: true
+ install_webclient_service: false # installed on workstation by default, only applies to server
+
+webclient_maximum_file_size: 0xffffffff # 4GB default value is 50 MB
local_administrator_password: Chang3MyP@ssw0rd21
local_account_username: ansible
local_account_password: Chang3MyP@ssw0rd21
-shutdown_instance: yes
+shutdown_instance: true
winsxs_cleanmgr_file:
2008r2: '{{ ansible_env.windir }}\winsxs\amd64_microsoft-windows-cleanmgr_31bf3856ad364e35_6.1.7600.16385_none_c9392808773cd7da\cleanmgr.exe'
diff --git a/roles/oatakan.windows_template_build/handlers/main.yml b/roles/oatakan.windows_template_build/handlers/main.yml
index 66372ec..3427044 100644
--- a/roles/oatakan.windows_template_build/handlers/main.yml
+++ b/roles/oatakan.windows_template_build/handlers/main.yml
@@ -1,5 +1,17 @@
---
+- name: get Windows ADK uninstall command
+ win_reg_stat:
+ path: HKLM:\SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall\{d794748d-72e9-45d7-9ab7-83d6c4c80f7f}
+ name: QuietUninstallString
+ register: windows_adk_uninstall_string
+
+- name: uninstall Windows ADK
+ win_shell: "{{ windows_adk_uninstall_string.value }}"
+ args:
+ executable: cmd
+ when: windows_adk_uninstall_string.value is defined
+
- name: ensure Windows ADK with DISM is removed
win_chocolatey:
name: windows-adk-deploy
diff --git a/roles/oatakan.windows_template_build/meta/.galaxy_install_info b/roles/oatakan.windows_template_build/meta/.galaxy_install_info
index b8e9ec4..868dc4e 100644
--- a/roles/oatakan.windows_template_build/meta/.galaxy_install_info
+++ b/roles/oatakan.windows_template_build/meta/.galaxy_install_info
@@ -1,2 +1,2 @@
-install_date: Fri Oct 15 18:59:14 2021
+install_date: Thu 08 Feb 2024 08:54:01 PM
version: master
diff --git a/roles/oatakan.windows_template_build/meta/main.yml b/roles/oatakan.windows_template_build/meta/main.yml
index 64b84ad..a498600 100644
--- a/roles/oatakan.windows_template_build/meta/main.yml
+++ b/roles/oatakan.windows_template_build/meta/main.yml
@@ -3,6 +3,7 @@ galaxy_info:
author: Orcun Atakan
description: Ansible galaxy role for building a Windows template on any cloud platform(ovirt/rhev, VMware, EC2, Azure etc.)
role_name: windows_template_build
+ namespace: oatakan
company: Red Hat
license: MIT
@@ -14,14 +15,6 @@ galaxy_info:
versions:
- all
- cloud_platforms:
- - amazon
- - google
- - azure
- - azure
- - vmware
- - ovirt
-
galaxy_tags:
- windows
- ec2
@@ -35,5 +28,3 @@ galaxy_info:
- cloud
- multicloud
- template
-
-dependencies: []
diff --git a/roles/oatakan.windows_template_build/tasks/clean-up-components.yml b/roles/oatakan.windows_template_build/tasks/clean-up-components.yml
deleted file mode 100644
index 38d22bb..0000000
--- a/roles/oatakan.windows_template_build/tasks/clean-up-components.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-
-- name: clean up components and update files
- win_shell: Dism.exe /online /Cleanup-Image /StartComponentCleanup /ResetBase
- when: "'Windows Server 2008' not in ansible_distribution"
- ignore_errors: yes
-
-- include_tasks: clean-up-with-cleanmgr.yml
- when: "'Windows Server 2008' in ansible_distribution"
-
-- name: clean up components and update files
- win_shell: Dism.exe /online /Cleanup-Image /SpSuperseded
- when: "'Windows Server 2008' in ansible_distribution"
- ignore_errors: yes
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/clean-up-with-cleanmgr.yml b/roles/oatakan.windows_template_build/tasks/clean-up-with-cleanmgr.yml
deleted file mode 100644
index 14358f1..0000000
--- a/roles/oatakan.windows_template_build/tasks/clean-up-with-cleanmgr.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-
-- block:
-
- - name: check for cleanmgr executable
- win_stat:
- path: '{{ ansible_env.windir }}\System32\cleanmgr.exe'
- register: check_cleanmgr_file
-
- - include_tasks: copy_cleanmgr.yml
- vars:
- os_short_name: 2008r2
- when:
- - not check_cleanmgr_file.stat.exists
- - ('Windows Server 2008 R2' in ansible_distribution)
-
- - include_tasks: copy_cleanmgr.yml
- vars:
- os_short_name: 2012
- when:
- - not check_cleanmgr_file.stat.exists
- - ('Windows Server 2012' in ansible_distribution)
- - (not 'Windows Server 2012 R2' in ansible_distribution)
-
- - name: get free space
- win_shell: Get-PSDrive C | Select-Object Free | ConvertTo-Json
- register: free_space_before_cleanup
-
- - name: ensure cleanup registry paths exist
- win_regedit:
- path: HKLM:\Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\{{ item }}
- loop: "{{ cleanup_registry_keys }}"
-
- - name: set cleanup registry keys
- win_regedit:
- path: HKLM:\Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\{{ item }}
- name: StateFlags0012
- data: 2
- type: dword
- loop: "{{ cleanup_registry_keys }}"
-
- - name: run cleanmgr
- win_shell: cleanmgr /sagerun:12
-
- - name: wait for cleanmgr to finish
- win_shell: (get-wmiobject win32_process | where-object {$_.processname -eq 'cleanmgr.exe'} | measure).count
- register: check_cleanmgr_process
- until: check_cleanmgr_process.stdout is defined and check_cleanmgr_process.stdout|int == 0
- delay: 5
- retries: 300
-
- - name: get free space
- win_shell: Get-PSDrive C | Select-Object Free | ConvertTo-Json
- register: free_space_after_cleanup
-
- - debug:
- msg:
- - "Free space before cleanup: {{ ((free_space_before_cleanup.stdout | from_json)['Free']|int / (1024*1024*1024)) | round(2, 'floor') }} GB"
- - "Free space after cleanup: {{ ((free_space_after_cleanup.stdout | from_json)['Free']|int / (1024*1024*1024)) | round(2, 'floor') }} GB"
-
- rescue:
- - name: ignore any errors
- debug:
- msg: "ignoring any error with clean up with cleanmgr"
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/clean-up.yml b/roles/oatakan.windows_template_build/tasks/clean-up.yml
deleted file mode 100644
index 1af40d1..0000000
--- a/roles/oatakan.windows_template_build/tasks/clean-up.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-
-- name: remove page file
- win_regedit:
- path: HKLM:\System\CurrentControlSet\Control\Session Manager\Memory Management
- name: PagingFiles
- data: ""
- state: present
- register: cleanup_pagefile_removal
-
-- name: reboot server after clearing page file
- win_reboot:
- when: cleanup_pagefile_removal is changed
-
-- name: cleanup the temp folders
- win_file:
- path: '{{ item }}'
- state: absent
- ignore_errors: yes
- loop:
- - C:\Temp
- - C:\Windows\Panther
- - C:\Windows\Temp
-
-- name: cleanup the C:\Recovery folder
- win_shell: Remove-Item -Path C:\Recovery -Force -Recurse
- ignore_errors: yes
-
-- name: check to see if WinSXS ManifestCache folder exist
- win_stat:
- path: '{{ ansible_env.windir }}\winsxs\ManifestCache'
- register: winsxs_dir
-
-- name: clear out the WinSXS ManifestCache folder
- win_shell: |
- &cmd.exe /c Takeown /f %windir%\winsxs\ManifestCache\*
- &cmd.exe /c Icacls %windir%\winsxs\ManifestCache\* /GRANT administrators:F
- &cmd.exe /c Del /q %windir%\winsxs\ManifestCache\*
- when:
- - winsxs_dir.stat is defined
- - winsxs_dir.stat.exists
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/cloudbase-init.yml b/roles/oatakan.windows_template_build/tasks/cloudbase-init.yml
index 3ec21d2..00ecd0a 100644
--- a/roles/oatakan.windows_template_build/tasks/cloudbase-init.yml
+++ b/roles/oatakan.windows_template_build/tasks/cloudbase-init.yml
@@ -7,3 +7,7 @@
arguments:
- /qn
state: present
+ register: install_cloudbase_init
+ until: install_cloudbase_init is success
+ delay: 3
+ retries: 5
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/compact.yml b/roles/oatakan.windows_template_build/tasks/compact.yml
index 1259414..32e601a 100644
--- a/roles/oatakan.windows_template_build/tasks/compact.yml
+++ b/roles/oatakan.windows_template_build/tasks/compact.yml
@@ -33,17 +33,11 @@
retries: 60
when: "'Windows Server 2008' in ansible_distribution"
-- name: stop windows update service
- win_service:
- name: wuauserv
- state: stopped
- ignore_errors: yes
-
- name: delete update directory
win_file:
path: C:\Windows\SoftwareDistribution\Download
state: absent
- ignore_errors: yes
+ ignore_errors: true
- name: remove windows update settings
win_regedit:
@@ -55,21 +49,15 @@
- PingID
- AccountDomainSid
-- name: start windows update service
- win_service:
- name: wuauserv
- state: started
- ignore_errors: yes
-
- name: create update directory
win_file:
path: C:\Windows\SoftwareDistribution\Download
state: directory
- ignore_errors: yes
+ ignore_errors: true
- name: reset windows update
win_shell: wuauclt /resetauthorization /detectnow
- ignore_errors: yes
+ ignore_errors: true
- name: clean with bleachbit
win_shell: >
@@ -85,7 +73,7 @@
when:
- bleachbit_clean|bool
- download_bleachbit is success
- ignore_errors: yes
+ ignore_errors: true
- name: create temp directory
win_file:
@@ -101,20 +89,23 @@
until: download_ultradefrag is success
delay: 3
retries: 5
+ ignore_errors: true
-- name: unzip ultradefrag
- win_unzip:
- src: '{{ temp_directory }}\win_build\ultradefrag.zip'
- dest: '{{ temp_directory }}\win_build'
+- block:
+ - name: unzip ultradefrag
+ win_unzip:
+ src: '{{ temp_directory }}\win_build\ultradefrag.zip'
+ dest: '{{ temp_directory }}\win_build'
-- name: set udefrag extract directory
- set_fact:
- udefrag_dir: '{{ temp_directory }}\win_build\ultradefrag-portable-7.1.4.amd64'
+ - name: set udefrag extract directory
+ set_fact:
+ udefrag_dir: '{{ temp_directory }}\win_build\ultradefrag-portable-7.1.4.amd64'
-- name: defrag with ultradefrag
- win_shell: '{{ udefrag_dir }}\udefrag.exe --optimize --repeat C:'
- args:
- executable: cmd
+ - name: defrag with ultradefrag
+ win_shell: '{{ udefrag_dir }}\udefrag.exe --optimize --repeat C:'
+ args:
+ executable: cmd
+ when: download_ultradefrag is success
- name: download sdelete
win_get_url:
@@ -161,7 +152,7 @@
when:
- bleachbit_free_disk_space|bool
- download_bleachbit is success
- ignore_errors: yes
+ ignore_errors: true
- name: remove bleachbit files
win_file:
diff --git a/roles/oatakan.windows_template_build/tasks/copy_cleanmgr.yml b/roles/oatakan.windows_template_build/tasks/copy_cleanmgr.yml
index e4d79df..5a2471f 100644
--- a/roles/oatakan.windows_template_build/tasks/copy_cleanmgr.yml
+++ b/roles/oatakan.windows_template_build/tasks/copy_cleanmgr.yml
@@ -14,7 +14,7 @@
win_copy:
src: "{{ winsxs_cleanmgr_file[os_short_name] }}"
dest: '{{ ansible_env.windir }}\System32\cleanmgr.exe'
- remote_src: yes
+ remote_src: true
when:
- check_winsxs_cleanmgr_file.stat.exists
- check_winsxs_cleanmgr_mui_file.stat.exists
@@ -23,7 +23,7 @@
win_copy:
src: "{{ winsxs_cleanmgr_mui_file[os_short_name] }}"
dest: '{{ ansible_env.windir }}\System32\en-US\cleanmgr.exe.mui'
- remote_src: yes
+ remote_src: true
when:
- check_winsxs_cleanmgr_file.stat.exists
- check_winsxs_cleanmgr_mui_file.stat.exists
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/disable-auto-logon.yml b/roles/oatakan.windows_template_build/tasks/disable-auto-logon.yml
deleted file mode 100644
index 952846e..0000000
--- a/roles/oatakan.windows_template_build/tasks/disable-auto-logon.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-- name: disable auto login
- win_regedit:
- path: HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon
- name: "{{ item.name }}"
- state: absent
- loop: "{{ autologin_registry }}"
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/enable-rdp.yml b/roles/oatakan.windows_template_build/tasks/enable-rdp.yml
deleted file mode 100644
index 922bec9..0000000
--- a/roles/oatakan.windows_template_build/tasks/enable-rdp.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-
-- name: enable RDP port
- win_firewall_rule:
- name: Remote Desktop
- localport: 3389
- action: allow
- direction: in
- protocol: tcp
- state: present
- enabled: yes
-
-- name: enable RDP
- win_regedit:
- path: HKLM:\System\CurrentControlSet\Control\Terminal Server
- name: fDenyTSConnections
- data: 0
- type: dword
diff --git a/roles/oatakan.windows_template_build/tasks/enable-tlsv12.yml b/roles/oatakan.windows_template_build/tasks/enable-tlsv12.yml
deleted file mode 100644
index bab6ac1..0000000
--- a/roles/oatakan.windows_template_build/tasks/enable-tlsv12.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-
-- block:
- - name: test SSL connection
- win_shell: "[System.Net.WebRequest]::Create('https://github.com').GetResponse()"
-
- rescue:
- - name: enable TLSv1.2 support
- win_regedit:
- path: HKLM:\SYSTEM\CurrentControlSet\Control\SecurityProviders\SCHANNEL\Protocols\TLS 1.2\{{ item.type }}
- name: '{{ item.property }}'
- data: '{{ item.value }}'
- type: dword
- state: present
- register: enable_tls12
- loop:
- - type: Server
- property: Enabled
- value: 1
- - type: Server
- property: DisabledByDefault
- value: 0
- - type: Client
- property: Enabled
- value: 1
- - type: Client
- property: DisabledByDefault
- value: 0
-
- - name: enable strong crypto
- win_regedit:
- path: HKLM:\{{ item }}
- name: SchUseStrongCrypto
- data: 1
- type: dword
- state: present
- loop:
- - 'SOFTWARE\Microsoft\.NETFramework\v4.0.30319'
- - 'SOFTWARE\WOW6432Node\Microsoft\.NETFramework\v4.0.30319'
-
- - name: reboot if TLS config was applied
- win_reboot:
- when: enable_tls12 is changed
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/hotfix-tlsv12.yml b/roles/oatakan.windows_template_build/tasks/hotfix-tlsv12.yml
deleted file mode 100644
index a694a08..0000000
--- a/roles/oatakan.windows_template_build/tasks/hotfix-tlsv12.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-
-- name: ensure Windows ADK with DISM is installed
- win_chocolatey:
- name: windows-adk-deploy
- state: present
- version: 10.0.17134.0
- register: install_windows_adk_deploy
- notify: ensure Windows ADK with DISM is removed
-
-- name: ensure PATH contains Windows ADK
- win_path:
- scope: machine
- state: present
- elements: "C:\\Program Files (x86)\\Windows Kits\\10\\Assessment and Deployment Kit\\Deployment Tools\\amd64\\DISM"
-
-- pause:
- seconds: 10
-
-- name: download hotfix
- win_get_url:
- url: '{{ enable_tlsv12_hotfix.url }}'
- dest: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }}'
- register: download_hotfix
- until: download_hotfix is success
- delay: 3
- retries: 5
-
-- block:
- - name: install hotfix (PS >= 4)
- win_hotfix:
- source: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }}'
- state: present
- register: hotfix_install
- when: ansible_powershell_version is version('4', '>=')
- rescue:
- - name: install hotfix using shell
- win_shell: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }} /quiet /norestart'
- register: hotfix_install
-
-- name: install hotfix (PS == 3)
- win_shell: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }} /quiet /norestart'
- register: hotfix_install
- when: ansible_powershell_version is version('3', '==')
-
-- name: ensure hotfix file is removed
- win_file:
- path: '{{ enable_tlsv12_hotfix_download_location }}\{{ enable_tlsv12_hotfix.file }}'
- state: absent
-
-- name: reboot if needed
- win_reboot:
- when: hotfix_install.reboot_required | default(False)
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/main.yml b/roles/oatakan.windows_template_build/tasks/main.yml
index e65720f..672df27 100644
--- a/roles/oatakan.windows_template_build/tasks/main.yml
+++ b/roles/oatakan.windows_template_build/tasks/main.yml
@@ -7,38 +7,91 @@
- name: run setup module
setup:
-- include_tasks: hotfix-tlsv12.yml
+- block:
+ # This is needed where many tasks and polls run against the windows target where it reaches the limit
+ # Default value is 1500
+ - name: increase MaxConcurrentOperationsPerUser
+ ansible.windows.win_shell: |
+ winrm set winrm/config/service @{MaxConcurrentOperationsPerUser="20000"}
+ args:
+ executable: cmd
+
+ # first we need to fix SSL connections with the hotfix
+ - include_role:
+ name: "{{ windows_hotfix_role }}"
+ vars:
+ hotfix: "{{ win2008_hotfixes[0] }}"
+
+ - include_tasks: install_dism.yml
+
+ # enable TLS 1.2 with an hotfix
+ - include_role:
+ name: "{{ windows_hotfix_role }}"
+ vars:
+ hotfix: "{{ win2008_hotfixes[1] }}"
when: "'Windows Server 2008' in ansible_distribution or 'Windows 7' in ansible_distribution"
-- include_tasks: enable-tlsv12.yml
+- include_tasks: enable_tlsv12.yml
-- include_tasks: update-agent-win2008.yml
+- include_tasks: update_agent_win2008.yml
when: "'Windows Server 2008' in ansible_distribution or 'Windows 7' in ansible_distribution"
-- include_tasks: security-update-win2012.yml
- when: "'Windows Server 2012' in ansible_distribution or 'Windows 8' in ansible_distribution"
-
-- include_tasks: disable-auto-logon.yml
-
- include_role:
- name: "{{ windows_update_role }}"
+ name: "{{ windows_hotfix_role }}"
+ loop: "{{ win2012_hotfixes[os_version_name | default('os_6_3')] }}"
+ loop_control:
+ loop_var: hotfix
+ when: "'Windows Server 2012' in ansible_distribution or 'Windows 8' in ansible_distribution"
+
+- include_tasks: disable_auto_logon.yml
+
+- block:
+ - include_role:
+ name: "{{ windows_configure_update_role }}"
+ vars:
+ role_action: register
+ wsus_server: "{{ win_update_server }}"
+ register_with_wsus: true
+ when: win_update_server | length > 0
+
+ - include_role:
+ name: "{{ windows_update_role }}"
+ vars:
+ win_update_server_selection: "{{ 'managed_server' if (win_update_server | length > 0) else 'default' }}"
+ always:
+ - include_role:
+ name: "{{ windows_configure_update_role }}"
+ vars:
+ role_action: unregister
+ when: win_update_server | length > 0
when: install_updates | bool
+- name: ensure windows update service stopped and disabled
+ ansible.windows.win_service:
+ name: wuauserv
+ state: stopped
+ start_mode: disabled
+ ignore_errors: true
+
- include_role:
name: "{{ ovirt_guest_agent_role }}"
when: target_ovirt | bool and not target_qemu | bool
- include_role:
name: "{{ virtio_role }}"
- when: target_qemu | bool or ('KubeVirt' in ansible_system_vendor | default(''))
+ when: target_qemu | bool or ('KubeVirt' in (ansible_system_vendor | default('', true)))
- include_role:
name: "{{ virtualbox_guest_additions_role }}"
- when: "'VirtualBox' in ansible_product_name"
+ when: ('VirtualBox' in (ansible_product_name | default('', true)))
- include_role:
name: "{{ vmware_tools_role }}"
- when: "'VMware' in ansible_product_name"
+ when: ('VMware' in (ansible_product_name | default('', true)))
+
+- include_role:
+ name: "{{ parallels_tools_role }}"
+ when: ('Parallels' in (ansible_product_name | default('', true))) or (ansible_product_name == None and 'Parallels' in ansible_interfaces[0].interface_name)
- include_tasks: startup.yml
@@ -47,21 +100,21 @@
- include_tasks: power.yml
when: (ansible_os_product_type == 'workstation') | default(False)
-- include_tasks: enable-rdp.yml
+- include_tasks: enable_rdp.yml
- include_tasks: cloudbase-init.yml
when:
- - "'VMware' not in ansible_product_name"
- - "'VirtualBox' not in ansible_product_name"
- - ('KubeVirt' not in ansible_system_vendor | default(False))
- - ('Red Hat' not in ansible_system_vendor | default(False))
+ - ('VMware' not in (ansible_product_name | default('', true)))
+ - ('VirtualBox' not in (ansible_product_name | default('', true)))
+ - ('KubeVirt' not in (ansible_system_vendor | default('', true)))
+ - ('Red Hat' not in (ansible_system_vendor | default('', true))) or target_openstack | bool
- not target_ovirt | bool
- not target_vagrant | bool
- block:
- - include_tasks: remove-apps-alt-2.yml
+ - include_tasks: remove_apps-alt-2.yml
- - include_tasks: remove-onedrive.yml
+ - include_tasks: remove_onedrive.yml
when:
- remove_apps | bool
- (ansible_os_product_type == 'workstation') | default(False)
@@ -73,14 +126,21 @@
- name: run all handlers here
meta: flush_handlers
-- include_tasks: clean-up-components.yml
+- include_tasks: clean_up_components.yml
when: clean_up_components | bool
-- include_tasks: clean-up.yml
+- include_tasks: clean_up.yml
- include_tasks: sysprep.yml
- include_tasks: compact.yml
+- name: ensure windows update service is enabled
+ ansible.windows.win_service:
+ name: wuauserv
+ state: stopped
+ start_mode: auto
+ ignore_errors: true
+
- include_tasks: shutdown.yml
- when: shutdown_instance | bool
\ No newline at end of file
+ when: shutdown_instance | bool
diff --git a/roles/oatakan.windows_template_build/tasks/policy.yml b/roles/oatakan.windows_template_build/tasks/policy.yml
index 536634e..da97374 100644
--- a/roles/oatakan.windows_template_build/tasks/policy.yml
+++ b/roles/oatakan.windows_template_build/tasks/policy.yml
@@ -9,17 +9,68 @@
type: dword
when: policy.allow_unauthenticated_guest_access|bool
+# webdav support policy
+- block:
+ - block:
+ - name: enable WebDAV-Redirector feature on Server (2016+)
+ win_feature:
+ name: WebDAV-Redirector
+ state: present
+ register: enable_webdav_redirector
+ when:
+ - ('Windows Server 2008' not in ansible_distribution)
+ - ('Windows Server 2012' not in ansible_distribution)
+
+ - name: enable Desktop-Experience feature on Server (2008-2012)
+ win_feature:
+ name: Desktop-Experience
+ state: present
+ register: enable_desktop_experience
+ when: ('Windows Server 2008' in ansible_distribution or 'Windows Server 2012' in ansible_distribution)
+
+ - name: reboot if needed
+ win_reboot:
+ when: (enable_webdav_redirector is changed and enable_webdav_redirector.reboot_required) or (enable_desktop_experience is changed and enable_desktop_experience.reboot_required)
+
+ when: (ansible_os_product_type | default('server')) == 'server'
+
+ - name: set webclient maximum file size
+ win_regedit:
+ path: HKLM:\SYSTEM\CurrentControlSet\Services\WebClient\Parameters
+ name: FileSizeLimitinBytes
+ data: "{{ webclient_maximum_file_size }}"
+ type: dword
+
+ - name: ensure webclient service is started in auto mode
+ win_service:
+ name: webclient
+ start_mode: auto
+ state: started
+
+ when:
+ - policy.install_webclient_service|bool
+ - ansible_os_installation_type | default('server') | lower != 'server core'
+
- name: set connection profile to private (Windows 10)
win_shell: Set-NetConnectionProfile -NetworkCategory Private
when:
- "'Windows 10' in ansible_distribution"
- name: set connection profile to private (Windows 7)
- win_shell: '{{ set_network_to_private }}'
+ win_shell: "{{ set_network_to_private }}"
+ when: "'Windows 7' in ansible_distribution"
+
+- name: disable end of support notification (Windows 7,8)
+ win_regedit:
+ path: HKCU:\Software\Microsoft\Windows\CurrentVersion\EOSNotify
+ name: DiscontinueEOS
+ data: 1
+ type: dword
when:
- - "'Windows 7' in ansible_distribution"
+ - (policy.disable_eos_reminder | default(true))|bool
+ - ('Windows 7' in ansible_distribution) or ('Windows 8' in ansible_distribution)
- name: Ensure local account password doesn't expire
win_user:
name: "{{ ansible_user }}"
- password_never_expires: yes
\ No newline at end of file
+ password_never_expires: true
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/power.yml b/roles/oatakan.windows_template_build/tasks/power.yml
index 9a01f68..5c09fb4 100644
--- a/roles/oatakan.windows_template_build/tasks/power.yml
+++ b/roles/oatakan.windows_template_build/tasks/power.yml
@@ -1,6 +1,16 @@
---
-- name: change power plan to high performance
- win_power_plan:
- name: high performance
- ignore_errors: yes
\ No newline at end of file
+- block:
+ - name: change power plan to high performance
+ win_power_plan:
+ name: high performance
+
+ rescue:
+ - name: use powershell to change plan to high performance
+ win_shell: |
+ powercfg -setactive 8c5e7fda-e8bf-4a96-9a85-a6e23a8c635c
+ #powercfg /change monitor-timeout-ac 0
+ powercfg /change disk-timeout-ac 0
+ powercfg /change standby-timeout-ac 0
+ powercfg /change hibernate-timeout-ac 0
+ ignore_errors: true
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/remove-apps-alt-2.yml b/roles/oatakan.windows_template_build/tasks/remove-apps-alt-2.yml
deleted file mode 100644
index d425737..0000000
--- a/roles/oatakan.windows_template_build/tasks/remove-apps-alt-2.yml
+++ /dev/null
@@ -1,96 +0,0 @@
----
-
-- name: remove default apps
- win_shell: |
- $ErrorActionPreference = "Stop"
- $apps = @(
- "Microsoft.3DBuilder",
- "Microsoft.Appconnector",
- "Microsoft.BingFinance",
- "Microsoft.BingNews",
- "Microsoft.BingSports",
- "Microsoft.BingWeather",
- "Microsoft.FreshPaint",
- "Microsoft.Getstarted",
- "Microsoft.MicrosoftOfficeHub",
- "Microsoft.MicrosoftSolitaireCollection",
- "Microsoft.MicrosoftStickyNotes",
- "Microsoft.Office.OneNote",
- "Microsoft.OneConnect",
- "Microsoft.People",
- "Microsoft.SkypeApp",
- "Microsoft.Windows.Photos",
- "Microsoft.WindowsAlarms",
- "Microsoft.WindowsCalculator",
- "Microsoft.WindowsCamera",
- "Microsoft.WindowsMaps",
- "Microsoft.WindowsPhone",
- "Microsoft.WindowsSoundRecorder",
- "Microsoft.XboxApp",
- "Microsoft.ZuneMusic",
- "Microsoft.ZuneVideo",
- "Microsoft.WindowsCommunicationsApps",
- "Microsoft.MinecraftUWP",
- "Microsoft.MicrosoftPowerBIForWindows",
- "Microsoft.NetworkSpeedTest",
- "Microsoft.CommsPhone",
- "Microsoft.ConnectivityStore",
- "Microsoft.Messaging",
- "Microsoft.Office.Sway",
- "Microsoft.OneConnect",
- "Microsoft.WindowsFeedbackHub",
- "Microsoft.BingFoodAndDrink",
- "Microsoft.BingTravel",
- "Microsoft.BingHealthAndFitness",
- "Microsoft.WindowsReadingList",
- "Microsoft.MSPaint",
- "Microsoft.Microsoft3DViewer",
- "Microsoft.Print3D",
- "9E2F88E3.Twitter",
- "PandoraMediaInc.29680B314EFC2",
- "Flipboard.Flipboard",
- "ShazamEntertainmentLtd.Shazam",
- "king.com.CandyCrushSaga",
- "king.com.CandyCrushSodaSaga",
- "king.com.*",
- "ClearChannelRadioDigital.iHeartRadio",
- "4DF9E0F8.Netflix",
- "6Wunderkinder.Wunderlist",
- "Drawboard.DrawboardPDF",
- "2FE3CB00.PicsArt-PhotoStudio",
- "D52A8D61.FarmVille2CountryEscape",
- "TuneIn.TuneInRadio",
- "GAMELOFTSA.Asphalt8Airborne",
- "TheNewYorkTimes.NYTCrossword",
- "DB6EA5DB.CyberLinkMediaSuiteEssentials",
- "Facebook.Facebook",
- "flaregamesGmbH.RoyalRevolt2",
- "Playtika.CaesarsSlotsFreeCasino",
- "A278AB0D.MarchofEmpires",
- "KeeperSecurityInc.Keeper",
- "ThumbmunkeysLtd.PhototasticCollage",
- "XINGAG.XING",
- "89006A2E.AutodeskSketchBook",
- "D5EA27B7.Duolingo-LearnLanguagesforFree",
- "46928bounde.EclipseManager",
- "ActiproSoftwareLLC.562882FEEB491"
- )
- foreach ($app in $apps) {
- Get-AppxPackage -Name $app -AllUsers | Remove-AppxPackage -AllUsers
- Get-AppxProvisionedPackage -Online | Where-Object { $_.DisplayName -like $app } | Remove-AppxProvisionedPackage -Online
- }
- register: cleanup_win10_remove
- until: cleanup_win10_remove is successful
- retries: 5
- delay: 1
- ignore_errors: yes
-
-- name: prevent suggested applications from returning
- win_regedit:
- path: HKLM:\SOFTWARE\Policies\Microsoft\Windows\Cloud Content
- name: DisableWindowsConsumerFeatures
- data: 1
- datatype: dword
-
-- name: reboot to effect pending changes
- win_reboot:
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/remove-apps-alt.yml b/roles/oatakan.windows_template_build/tasks/remove-apps-alt.yml
deleted file mode 100644
index f891088..0000000
--- a/roles/oatakan.windows_template_build/tasks/remove-apps-alt.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-
-- name: remove user apps
- script: RemoveUserApps.ps1
- register: cleanup_win10_remove
- until: cleanup_win10_remove is successful
- retries: 3
- delay: 1
- ignore_errors: yes
-
-#- name: disable windows store
-# win_regedit:
-# path: HKLM:\Software\Policies\Microsoft\WindowsStore
-# name: AutoDownload
-# data: 00000002
-# type: dword
-#
-#- name: disable content delivery manager
-# win_regedit:
-# path: HKCU:\Software\Microsoft\Windows\CurrentVersion\ContentDeliveryManager
-# name: SilentInstalledAppsEnabled
-# data: 00000000
-# type: dword
-#
-#- name: disable windows store
-# win_regedit:
-# path: HKLM:\Software\Policies\Microsoft\Windows\CloudContent
-# name: DisableWindowsConsumerFeatures
-# data: 00000001
-# type: dword
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/remove-apps.yml b/roles/oatakan.windows_template_build/tasks/remove-apps.yml
deleted file mode 100644
index 3857d13..0000000
--- a/roles/oatakan.windows_template_build/tasks/remove-apps.yml
+++ /dev/null
@@ -1,97 +0,0 @@
----
-
-- name: Setup the xWebAdministration module
- win_psmodule:
- name: DSCR_AppxPackage
- state: present
-
-- name: remove packages
- win_dsc:
- resource_name: cAppxProvisionedPackageSet
- Ensure: Absent
- PackageName:
- - Microsoft.3DBuilder
- - Microsoft.Appconnector
- - Microsoft.BingFinance
- - Microsoft.BingNews
- - Microsoft.BingSports
- - Microsoft.BingWeather
- - Microsoft.FreshPaint
- - Microsoft.Getstarted
- - Microsoft.MicrosoftOfficeHub
- - Microsoft.MicrosoftSolitaireCollection
- - Microsoft.MicrosoftStickyNotes
- - Microsoft.Office.OneNote
- - Microsoft.OneConnect
- - Microsoft.People
- - Microsoft.SkypeApp
- - Microsoft.Windows.Photos
- - Microsoft.WindowsAlarms
- - Microsoft.WindowsCalculator
- - Microsoft.WindowsCamera
- - Microsoft.WindowsMaps
- - Microsoft.WindowsPhone
- - Microsoft.WindowsSoundRecorder
- - Microsoft.XboxApp
- - Microsoft.ZuneMusic
- - Microsoft.ZuneVideo
- - Microsoft.WindowsCommunicationsApps
- - Microsoft.MinecraftUWP
- - Microsoft.MicrosoftPowerBIForWindows
- - Microsoft.NetworkSpeedTest
- - Microsoft.CommsPhone
- - Microsoft.ConnectivityStore
- - Microsoft.Messaging
- - Microsoft.Office.Sway
- - Microsoft.OneConnect
- - Microsoft.WindowsFeedbackHub
- - Microsoft.BingFoodAndDrink
- - Microsoft.BingTravel
- - Microsoft.BingHealthAndFitness
- - Microsoft.WindowsReadingList
- - Microsoft.MSPaint
- - Microsoft.Microsoft3DViewer
- - Microsoft.Print3D
- - 9E2F88E3.Twitter
- - PandoraMediaInc.29680B314EFC2
- - Flipboard.Flipboard
- - ShazamEntertainmentLtd.Shazam
- - king.com.CandyCrushSaga
- - king.com.CandyCrushSodaSaga
- - king.com.*
- - ClearChannelRadioDigital.iHeartRadio
- - 4DF9E0F8.Netflix
- - 6Wunderkinder.Wunderlist
- - Drawboard.DrawboardPDF
- - 2FE3CB00.PicsArt-PhotoStudio
- - D52A8D61.FarmVille2CountryEscape
- - TuneIn.TuneInRadio
- - GAMELOFTSA.Asphalt8Airborne
- - TheNewYorkTimes.NYTCrossword
- - DB6EA5DB.CyberLinkMediaSuiteEssentials
- - Facebook.Facebook
- - flaregamesGmbH.RoyalRevolt2
- - Playtika.CaesarsSlotsFreeCasino
- - A278AB0D.MarchofEmpires
- - KeeperSecurityInc.Keeper
- - ThumbmunkeysLtd.PhototasticCollage
- - XINGAG.XING
- - 89006A2E.AutodeskSketchBook
- - D5EA27B7.Duolingo-LearnLanguagesforFree
- - 46928bounde.EclipseManager
- - ActiproSoftwareLLC.562882FEEB491-
- register: cleanup_win10_remove
- until: cleanup_win10_remove is successful
- retries: 3
- delay: 1
- ignore_errors: yes
-
-- name: prevent suggested applications from returning
- win_regedit:
- path: HKLM:\SOFTWARE\Policies\Microsoft\Windows\Cloud Content
- name: DisableWindowsConsumerFeatures
- data: 1
- datatype: dword
-
-- name: reboot to effect pending changes
- win_reboot:
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/remove-onedrive.yml b/roles/oatakan.windows_template_build/tasks/remove-onedrive.yml
deleted file mode 100644
index 9f88618..0000000
--- a/roles/oatakan.windows_template_build/tasks/remove-onedrive.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-
-- name: kill onedrive process
- win_shell: Stop-Process -Name OneDrive
- ignore_errors: yes
-
-- name: uninstall onedrive
- win_shell: '{{ ansible_env.SystemRoot }}\SysWOW64\OneDriveSetup.exe /uninstall'
- ignore_errors: yes
-
-- name: remove onedrivesync package
- win_shell: get-appxpackage *Microsoft.OneDriveSync* | remove-appxpackage -AllUsers
- ignore_errors: yes
-
-- name: remove onedrive directories
- win_file:
- path: '{{ item }}'
- state: absent
- ignore_errors: yes
- loop:
- - '{{ ansible_env.USERPROFILE }}\OneDrive'
- - '{{ ansible_env.LOCALAPPDATA }}\Microsoft\OneDrive'
- - '{{ ansible_env.ProgramData }}\Microsoft OneDrive'
- - C:\OneDriveTemp
-
-- name: delete registry keys
- win_regedit:
- path: '{{ item }}'
- state: absent
- delete_key: yes
- loop:
- - HKCR:\CLSID\{018D5C66-4533-4307-9B53-224DE2ED1FE6}
- - HKCR:\Wow6432Node\CLSID\{018D5C66-4533-4307-9B53-224DE2ED1FE6}
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/security-update-win2012.yml b/roles/oatakan.windows_template_build/tasks/security-update-win2012.yml
deleted file mode 100644
index 6f826cb..0000000
--- a/roles/oatakan.windows_template_build/tasks/security-update-win2012.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-# this update is needed to enable .NET clients to use https (tslv12) on Windows 8.1 and Windows Server 2012 R2
-# see https://www.microsoft.com/en-us/download/confirmation.aspx?id=42883
-
-- name: download hotfix
- win_get_url:
- url: '{{ dot_net_security_hotfix.url }}'
- dest: '{{ dot_net_security_hotfix_download_location }}\{{ dot_net_security_hotfix.file }}'
- register: download_hotfix
- until: download_hotfix is success
- delay: 3
- retries: 5
-
-- block:
- - name: install hotfix (PS >= 4)
- win_hotfix:
- source: '{{ dot_net_security_hotfix_download_location }}\{{ dot_net_security_hotfix.file }}'
- state: present
- register: hotfix_install
- when: ansible_powershell_version is version('4', '>=')
- rescue:
- - name: install hotfix using shell
- win_shell: '{{ dot_net_security_hotfix_download_location }}\{{ dot_net_security_hotfix.file }} /quiet /norestart'
- register: hotfix_install
-
-- name: install hotfix (PS == 3)
- win_shell: '{{ dot_net_security_hotfix_download_location }}\{{ dot_net_security_hotfix.file }} /quiet /norestart'
- register: hotfix_install
- when: ansible_powershell_version is version('3', '==')
-
-- name: ensure hotfix file is removed
- win_file:
- path: '{{ dot_net_security_hotfix_download_location }}\{{ dot_net_security_hotfix.file }}'
- state: absent
-
-- name: reboot if needed
- win_reboot:
- when: hotfix_install.reboot_required | default(False)
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/tasks/shutdown.yml b/roles/oatakan.windows_template_build/tasks/shutdown.yml
index ba0da79..ac46394 100644
--- a/roles/oatakan.windows_template_build/tasks/shutdown.yml
+++ b/roles/oatakan.windows_template_build/tasks/shutdown.yml
@@ -2,4 +2,4 @@
- name: run sysprep-shutdown scheduled task
win_shell: schtasks.exe /Run /TN "sysprep-shutdown"
- ignore_errors: yes
+ ignore_errors: true
diff --git a/roles/oatakan.windows_template_build/tasks/sysprep.yml b/roles/oatakan.windows_template_build/tasks/sysprep.yml
index f66fd0e..3a3659f 100644
--- a/roles/oatakan.windows_template_build/tasks/sysprep.yml
+++ b/roles/oatakan.windows_template_build/tasks/sysprep.yml
@@ -16,8 +16,8 @@
state: directory
- name: enable winrm
- win_shell: '& $([scriptblock]::Create((New-Object Net.WebClient).DownloadString("https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1"))) -ForceNewSSLCert -EnableCredSSP'
- ignore_errors: yes
+ win_shell: '{{ enable_winrm_command }}'
+ ignore_errors: true
when: "'Windows Server 2008' in ansible_distribution or 'Windows 7' in ansible_distribution"
- name: copy unattend.xml
@@ -25,9 +25,9 @@
src: unattend.xml.j2
dest: C:\Windows\system32\sysprep\unattend.xml
when:
- - ('VMware' not in ansible_product_name) or ('VMware' in ansible_product_name and target_vagrant | bool)
+ - ('VMware' not in (ansible_product_name | default('', true))) or ('VMware' in (ansible_product_name | default('', true)) and target_vagrant | bool)
- not target_ovirt | bool
- - not ('KubeVirt' in ansible_system_vendor | default(''))
+ - not ('KubeVirt' in ansible_system_vendor | default('', true))
#- name: run sysprep
# win_shell: C:\Windows\system32\sysprep\sysprep.exe /generalize /shutdown /oobe /quiet
@@ -40,32 +40,32 @@
win_scheduled_task:
name: sysprep-shutdown
username: SYSTEM
- disallow_start_if_on_batteries: no
- stop_if_going_on_batteries: no
+ disallow_start_if_on_batteries: false
+ stop_if_going_on_batteries: false
actions:
- path: powershell.exe
arguments: Remove-Item -Path WSMan:\localhost\Listener\* -Recurse -Force
- path: C:\windows\system32\sysprep\sysprep.exe
arguments: /generalize /oobe /quiet /shutdown
when:
- - ('VMware' not in ansible_product_name) or ('VMware' in ansible_product_name and target_vagrant | bool) or (target_ovirt | bool) or ('KubeVirt' in ansible_system_vendor | default(''))
+ - ('VMware' not in (ansible_product_name | default('', true))) or ('VMware' in (ansible_product_name | default('', true)) and target_vagrant | bool) or (target_ovirt | bool) or ('KubeVirt' in ansible_system_vendor | default(''))
- name: create scheduled task to delete WinRM listeners and shutdown
win_scheduled_task:
name: sysprep-shutdown
username: SYSTEM
- disallow_start_if_on_batteries: no
- stop_if_going_on_batteries: no
+ disallow_start_if_on_batteries: false
+ stop_if_going_on_batteries: false
actions:
- path: powershell.exe
arguments: Remove-Item -Path WSMan:\localhost\Listener\* -Recurse -Force
- path: shutdown.exe
arguments: /s /t 10 /f /d p:4:1 /c "Ansible Shutdown"
when:
- - "'VMware' in ansible_product_name"
+ - ('VMware' in (ansible_product_name | default('', true)))
- not target_vagrant | bool
- not target_ovirt | bool
- - not ('KubeVirt' in ansible_system_vendor | default(''))
+ - not ('KubeVirt' in (ansible_system_vendor | default('', true)))
- name: set flag to recreate pagefile after next sysprep
win_shell: |
diff --git a/roles/oatakan.windows_template_build/tasks/update-agent-win2008.yml b/roles/oatakan.windows_template_build/tasks/update-agent-win2008.yml
deleted file mode 100644
index 5b47060..0000000
--- a/roles/oatakan.windows_template_build/tasks/update-agent-win2008.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# this updates windows update which is needed to install further updates
-# see https://docs.microsoft.com/en-US/troubleshoot/windows-client/deployment/update-windows-update-agent
-
-- name: ensure Windows Update Agent on 2008 is installed
- win_package:
- path: "{{ windows_update_agent_url }}"
- arguments:
- - /quiet
- - /norestart
- - /wuforce
- creates_path: C:\Windows\System32\wuaueng.dll
- creates_version: 7.6.7600.320
\ No newline at end of file
diff --git a/roles/oatakan.windows_template_build/templates/unattend.xml.j2 b/roles/oatakan.windows_template_build/templates/unattend.xml.j2
index b271c88..07346ef 100644
--- a/roles/oatakan.windows_template_build/templates/unattend.xml.j2
+++ b/roles/oatakan.windows_template_build/templates/unattend.xml.j2
@@ -1,7 +1,7 @@
-
+
{% if unattend.administrator_password is defined %}
@@ -41,7 +41,7 @@
true
Home
1
-{% if not '2008' in ansible_distribution or not 'Windows 7' in ansible_distribution %}
+{% if not 'Windows Server 2008' in ansible_distribution and not 'Windows 7' in ansible_distribution %}
true
true
true
@@ -60,22 +60,49 @@
{% endif %}
+{% if not 'Windows Server 2008' in ansible_distribution and not 'Windows 7' in ansible_distribution %}
- cmd.exe /c powershell -Command "& $([scriptblock]::Create((New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'))) -ForceNewSSLCert -EnableCredSSP"
- Enable winrm
+ cmd.exe /c powershell -Command "Resize-Partition -DriveLetter C -Size (Get-PartitionSupportedSize -DriveLetter C).Sizemax -ErrorAction SilentlyContinue"
+ Resize partition
1
true
+
+ cmd.exe /c powershell -Command "Set-NetConnectionProfile -NetworkCategory Private"
+ Set network connection profile to private
+ 2
+ true
+
+{% else %}
+
+ cmd.exe /c powershell –Command "{{ expand_disk }}"
+ Resize partition
+ 1
+ true
+
+
+ cmd.exe /c powershell –Command "{{ set_network_to_private }}"
+ Set network connection profile to private
+ 2
+ true
+
+{% endif %}
+
+ cmd.exe /c powershell -Command "{{ enable_winrm_command }}"
+ Enable winrm
+ 3
+ true
+
cmd.exe /c powershell -Command "Enable-WSManCredSSP -Role Server -Force"
Enable winrm server role
- 2
+ 4
true
cmd.exe /c powershell -Command "Set-Item -Path 'WSMan:\localhost\Service\Auth\CredSSP' -Value $true"
Enable credssp authentication
- 3
+ 5
true
@@ -83,7 +110,7 @@
-
+
false
@@ -92,7 +119,7 @@
{{ settings.time_zone | default('Central Standard Time') }}
-
+
{{ settings.skip_auto_activation | default('true') }}
diff --git a/roles/oatakan.windows_template_build/vars/main.yml b/roles/oatakan.windows_template_build/vars/main.yml
index e14b634..40ece9d 100644
--- a/roles/oatakan.windows_template_build/vars/main.yml
+++ b/roles/oatakan.windows_template_build/vars/main.yml
@@ -21,4 +21,15 @@ autologin_registry:
- name: DefaultUserName
data: "{{ unattend.local_accounts[0].name }}"
- name: DefaultPassword
- data: "{{ unattend.local_accounts[0].password }}"
\ No newline at end of file
+ data: "{{ unattend.local_accounts[0].password }}"
+
+win_architecture_list:
+ arm_64_bit_processor: arm64
+ arm_32_bit_processor: arm
+ 64_bit: amd64
+ 32_bit: x86
+
+win_architecture: "{{ win_architecture_list[(ansible_architecture | default('64-bit'))|replace('-','_')|replace(' ','_')|lower] }}"
+
+os_version: "{{ ansible_kernel.split('.')[0] }}.{{ ansible_kernel.split('.')[1] }}"
+os_version_name: "os_{{ ansible_kernel.split('.')[0] }}_{{ ansible_kernel.split('.')[1] }}"
diff --git a/roles/oatakan.windows_update/README.md b/roles/oatakan.windows_update/README.md
index 4ed01a1..58a567b 100644
--- a/roles/oatakan.windows_update/README.md
+++ b/roles/oatakan.windows_update/README.md
@@ -14,6 +14,10 @@ Role Variables
Dependencies
------------
+A list of roles that this role utilizes:
+
+- oatakan.windows_hotfix
+
Example Playbook
----------------
@@ -36,7 +40,7 @@ Including an example of how to use your role (for instance, with variables passe
For disconnected environments, you can overwrite this variable to point to a local copy of a script to enable winrm:
-**winrm_enable_script_url:** https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1
+**winrm_enable_script_url:** https://raw.githubusercontent.com/ansible/ansible-documentation/devel/examples/scripts/ConfigureRemotingForAnsible.ps1
you can also localize virtio-win and update the virtio_iso_url variable to point to your local url:
diff --git a/roles/oatakan.windows_update/defaults/main.yml b/roles/oatakan.windows_update/defaults/main.yml
index b8d1381..5dcef91 100644
--- a/roles/oatakan.windows_update/defaults/main.yml
+++ b/roles/oatakan.windows_update/defaults/main.yml
@@ -1,17 +1,31 @@
---
+install_updates_retry_limit: 300
update_retry_count: 0
update_retry_limit: 10
-win_update_category_names:
- - CriticalUpdates
- - DefinitionUpdates
- - SecurityUpdates
- - UpdateRollups
- - Updates
-hotfix_download_location: "{{ ansible_env.TEMP }}"
+win_update_category_names: '*'
+#win_update_category_names:
+# - CriticalUpdates
+# - DefinitionUpdates
+# - FeaturePacks
+# - SecurityUpdates
+# - UpdateRollups
+# - Updates
-hotfixes_group_1:
+win_update_reject_list: []
+win_update_accept_list: []
+win_update_server_selection: default
+win_update_disable_firewall: yes
+failed_kb: []
+
+windows_update_disable_firewall: true
+
+temp_directory: "{{ ansible_env.TEMP }}"
+
+windows_hotfix_role: oatakan.windows_hotfix
+
+hotfixes:
- kb: KB3020369
file: Windows6.1-KB3020369-x64.msu
url: https://download.microsoft.com/download/F/D/3/FD3728D5-0D2F-44A6-B7DA-1215CC0C9B75/Windows6.1-KB3020369-x64.msu
diff --git a/roles/oatakan.windows_update/handlers/main.yml b/roles/oatakan.windows_update/handlers/main.yml
index 66372ec..3427044 100644
--- a/roles/oatakan.windows_update/handlers/main.yml
+++ b/roles/oatakan.windows_update/handlers/main.yml
@@ -1,5 +1,17 @@
---
+- name: get Windows ADK uninstall command
+ win_reg_stat:
+ path: HKLM:\SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall\{d794748d-72e9-45d7-9ab7-83d6c4c80f7f}
+ name: QuietUninstallString
+ register: windows_adk_uninstall_string
+
+- name: uninstall Windows ADK
+ win_shell: "{{ windows_adk_uninstall_string.value }}"
+ args:
+ executable: cmd
+ when: windows_adk_uninstall_string.value is defined
+
- name: ensure Windows ADK with DISM is removed
win_chocolatey:
name: windows-adk-deploy
diff --git a/roles/oatakan.windows_update/meta/.galaxy_install_info b/roles/oatakan.windows_update/meta/.galaxy_install_info
index 507346c..63b0e3f 100644
--- a/roles/oatakan.windows_update/meta/.galaxy_install_info
+++ b/roles/oatakan.windows_update/meta/.galaxy_install_info
@@ -1,2 +1,2 @@
-install_date: Fri Oct 15 18:59:19 2021
+install_date: Thu 08 Feb 2024 08:54:03 PM
version: master
diff --git a/roles/oatakan.windows_update/tasks/main.yml b/roles/oatakan.windows_update/tasks/main.yml
index 5f1aa06..cdac57e 100644
--- a/roles/oatakan.windows_update/tasks/main.yml
+++ b/roles/oatakan.windows_update/tasks/main.yml
@@ -1,18 +1,33 @@
---
+- debug:
+ msg: "win update server: {{ win_update_server }}"
+ when: win_update_server is defined
+
- name: disable firewall for Domain, Public and Private profiles
win_shell: Set-NetFirewallProfile -Profile Domain,Public,Private -Enabled False
- when: "'Windows Server 2012' in ansible_distribution"
+ when:
+ - "'Windows Server 2012' in ansible_distribution or 'Windows 8' in ansible_distribution"
+ - windows_update_disable_firewall | bool
- name: disable firewall for Domain, Public and Private profiles
win_shell: netsh advfirewall set allprofiles state off
- when: "'Windows Server 2008' in ansible_distribution or 'Windows 7' in ansible_distribution"
+ when:
+ - "'Windows Server 2008' in ansible_distribution or 'Windows 7' in ansible_distribution"
+ - windows_update_disable_firewall | bool
- name: get used space before update
win_shell: Get-PSDrive C | Select-Object Used | ConvertTo-Json
register: used_space_before_update
ignore_errors: yes
+- name: reset some facts
+ set_fact:
+ update_retry_count: 0
+ missing_hotfixes: []
+ failed_kb: []
+ _reject_list: []
+
- include_tasks: updates-all.yml
when:
- "'Windows Server 2008' not in ansible_distribution"
@@ -42,8 +57,12 @@
- name: enabled firewall for Domain, Public and Private profiles
win_shell: Set-NetFirewallProfile -Profile Domain,Public,Private -Enabled True
- when: "'Windows Server 2012' in ansible_distribution"
+ when:
+ - "'Windows Server 2012' in ansible_distribution or 'Windows 8' in ansible_distribution"
+ - windows_update_disable_firewall | bool
- name: enable firewall for Domain, Public and Private profiles
- win_shell: netsh advfirewall set allprofiles state on
- when: "'Windows Server 2008' in ansible_distribution or 'Windows 7' in ansible_distribution"
\ No newline at end of file
+ win_shell: netsh advfirewall set allprofiles state on
+ when:
+ - "'Windows Server 2008' in ansible_distribution or 'Windows 7' in ansible_distribution"
+ - windows_update_disable_firewall | bool
\ No newline at end of file
diff --git a/roles/oatakan.windows_update/tasks/updates-all.yml b/roles/oatakan.windows_update/tasks/updates-all.yml
index d85c8ad..1e130cf 100644
--- a/roles/oatakan.windows_update/tasks/updates-all.yml
+++ b/roles/oatakan.windows_update/tasks/updates-all.yml
@@ -1,19 +1,32 @@
---
+- block:
+ - name: check for available updates
+ win_updates:
+ category_names: "{{ win_update_category_names }}"
+ reject_list: "{{ win_update_reject_list | default(omit) }}"
+ server_selection: "{{ win_update_server_selection }}"
+ state: searched
+ register: available_updates
+ rescue:
+ - name: ensure we have connection
+ wait_for_connection:
-- name: check for available updates
- win_updates:
- category_names: "{{ win_update_category_names }}"
- blacklist: "{{ win_update_blacklist | default(omit) }}"
- state: searched
- register: available_updates
+ - name: check for available updates (retry)
+ win_updates:
+ category_names: "{{ win_update_category_names }}"
+ reject_list: "{{ win_update_reject_list | default(omit) }}"
+ server_selection: "{{ win_update_server_selection }}"
+ state: searched
+ register: available_updates
- debug:
- msg: |
+ msg: "{{ _msg.split('\n')[:-1] }}"
+ vars:
+ _msg: |
{{ inventory_hostname }} has {{ available_updates.found_update_count }} updates available.
{% for update in updates %}
- {{ update.title }}
{% endfor %}
- vars:
updates: "{{ (available_updates.updates.values() | list) if (available_updates.updates is mapping) else (available_updates.updates) }}"
when: available_updates.updates is defined
@@ -22,8 +35,20 @@
- available_updates.updates is defined
- available_updates.found_update_count > 0
+# see https://learn.microsoft.com/en-us/sharepoint/troubleshoot/administration/800703fa-illegal-operation-error
+# error code 0x800703FA happens with some updates when user is not logged in
+# remove the registry key if added during the update
+- include_tasks: force_user_registry.yml
+ vars:
+ task_state: absent
+ when:
+ - disable_force_unload_registry is defined
+ - disable_force_unload_registry is not skipped
+ - disable_force_unload_registry is changed
+
- name: check for missing updates
win_updates:
+ server_selection: "{{ win_update_server_selection }}"
state: searched
register: available_updates
diff --git a/roles/oatakan.windows_update/tasks/updates-powershell.yml b/roles/oatakan.windows_update/tasks/updates-powershell.yml
index a80612b..d72c10b 100644
--- a/roles/oatakan.windows_update/tasks/updates-powershell.yml
+++ b/roles/oatakan.windows_update/tasks/updates-powershell.yml
@@ -4,6 +4,7 @@
block:
- name: check for available updates
win_updates:
+ server_selection: "{{ win_update_server_selection }}"
category_names:
- CriticalUpdates
- DefinitionUpdates
@@ -14,16 +15,23 @@
register: available_updates
- debug:
- msg: |
+ msg: "{{ _msg.split('\n')[:-1] }}"
+ vars:
+ _msg: |
{{ inventory_hostname }} has {{ available_updates.found_update_count }} updates available.
{% for update in updates %}
- {{ update.title }}
{% endfor %}
- vars:
updates: "{{ (available_updates.updates.values() | list) if (available_updates.updates is mapping) else (available_updates.updates) }}"
when: available_updates.updates is defined
- block:
+ - name: ensure there is connection
+ wait_for_connection:
+ delay: 60
+ sleep: 10
+ timeout: 600
+
- name: install windows updates using powershell script
script: win-updates.ps1
become: yes
@@ -56,6 +64,7 @@
when: update_reboot_required_key.exists
- name: check for missing updates
+ server_selection: "{{ win_update_server_selection }}"
win_updates:
category_names:
- CriticalUpdates
@@ -67,12 +76,13 @@
register: missing_updates
- debug:
- msg: |
+ msg: "{{ _msg.split('\n')[:-1] }}"
+ vars:
+ _msg: |
{{ inventory_hostname }} has {{ missing_updates.found_update_count }} updates still missing.
{% for update in updates %}
- {{ update.title }}
{% endfor %}
- vars:
updates: "{{ (missing_updates.updates.values() | list) if (missing_updates.updates is mapping) else (missing_updates.updates) }}"
when: missing_updates.updates is defined
diff --git a/roles/oatakan.windows_update/tasks/updates-win2008r2.yml b/roles/oatakan.windows_update/tasks/updates-win2008r2.yml
index e2fe4a3..9f9630c 100644
--- a/roles/oatakan.windows_update/tasks/updates-win2008r2.yml
+++ b/roles/oatakan.windows_update/tasks/updates-win2008r2.yml
@@ -1,72 +1,40 @@
---
-- name: ensure Windows ADK with DISM is installed
- win_chocolatey:
- name: windows-adk-deploy
- state: present
- version: 10.0.17134.0
- register: install_windows_adk_deploy
- notify: ensure Windows ADK with DISM is removed
+- include_tasks: install_dism.yml
-- name: ensure PATH contains Windows ADK
- win_path:
- scope: machine
- state: present
- elements: "C:\\Program Files (x86)\\Windows Kits\\10\\Assessment and Deployment Kit\\Deployment Tools\\amd64\\DISM"
+- name: check installed kbs
+ win_shell: wmic qfe | ConvertTo-Json
+ register: installed_kbs
-- name: download hotfix group 1
- win_get_url:
- url: '{{ item.url }}'
- dest: '{{ hotfix_download_location }}\{{ item.file }}'
- loop: "{{ hotfixes_group_1 }}"
+- name: missing hotfixes
+ set_fact:
+ missing_hotfixes: "{{ hotfixes | json_query(query) }}"
+ vars:
+ set_installed_kbs: "{{ (installed_kbs.stdout | from_json) | reject('match', '^$') | reject('match', '^Caption *') | map('regex_replace', '^.* (KB[0-9]+) .*', '\\1') | list }}"
+ query: "[?!contains(`{{ set_installed_kbs }}`, kb)]"
-- block:
- - name: install hotfix group 1 (PS >= 4)
- win_hotfix:
- source: '{{ hotfix_download_location }}\{{ item.file }}'
- state: present
- register: hotfix_install_group_1
- loop: "{{ hotfixes_group_1 }}"
- when: ansible_powershell_version is version('4', '>=')
- rescue:
- - name: install hotfix using shell
- win_shell: '{{ hotfix_download_location }}\{{ item.file }} /quiet /norestart'
- register: hotfix_install_group_1
- loop: "{{ hotfixes_group_1 }}"
-
-- name: install hotfix (PS == 3)
- win_shell: '{{ hotfix_download_location }}\{{ item.file }} /quiet /norestart'
- register: hotfix_install_group_1
- loop: "{{ hotfixes_group_1 }}"
- when: ansible_powershell_version is version('3', '==')
-
-- name: debug hotfix installation result
- debug:
- var: hotfix_install_group_1
-
-- name: ensure hotfix file is removed (group 1)
- win_file:
- path: '{{ hotfix_download_location }}\{{ item.file }}'
- state: absent
- loop: "{{ hotfixes_group_1 }}"
-
-- name: reboot from starting update
- win_reboot:
+- include_role:
+ name: "{{ windows_hotfix_role }}"
+ loop: "{{ missing_hotfixes }}"
+ loop_control:
+ loop_var: hotfix
- name: check for available updates
win_updates:
+ server_selection: "{{ win_update_server_selection }}"
category_names: "{{ win_update_category_names }}"
- blacklist: "{{ win_update_blacklist | default(omit) }}"
+ reject_list: "{{ win_update_reject_list | default(omit) }}"
state: searched
register: available_updates
- debug:
- msg: |
+ msg: "{{ _msg.split('\n')[:-1] }}"
+ vars:
+ _msg: |
{{ inventory_hostname }} has {{ available_updates.found_update_count }} updates available.
{% for update in updates %}
- {{ update.title }}
{% endfor %}
- vars:
updates: "{{ (available_updates.updates.values() | list) if (available_updates.updates is mapping) else (available_updates.updates) }}"
when: available_updates.updates is defined
@@ -77,6 +45,7 @@
- name: check for missing updates.
win_updates:
+ server_selection: "{{ win_update_server_selection }}"
state: searched
register: available_updates
diff --git a/roles/oatakan.windows_update/tasks/updates-with-retry.yml b/roles/oatakan.windows_update/tasks/updates-with-retry.yml
index 2c40d7c..9e814a3 100644
--- a/roles/oatakan.windows_update/tasks/updates-with-retry.yml
+++ b/roles/oatakan.windows_update/tasks/updates-with-retry.yml
@@ -3,15 +3,54 @@
- name: update over multiple reboots
block:
- block:
+ - name: set reject list
+ set_fact:
+ _reject_list: "{{ (win_update_reject_list | default([])) + (failed_kb | default([])) }}"
+ when: (win_update_reject_list | length) or (failed_kb | length)
+
- name: install all windows updates
win_updates:
+ server_selection: "{{ win_update_server_selection }}"
category_names: "{{ win_update_category_names }}"
- blacklist: "{{ (win_update_blacklist | default([])) + (failed_kb | default([])) }}"
- whitelist: "{{ win_update_whitelist | default(omit) }}"
- reboot: yes
+ reject_list: "{{ _reject_list | default(omit) }}"
+ accept_list: "{{ win_update_accept_list | default(omit) }}"
+ reboot: false
+ async: 7200
+ poll: 0
+ register: installed_updates_async
+
+ - name: wait for updates to finish
+ async_status:
+ jid: "{{ installed_updates_async.ansible_job_id }}"
register: installed_updates
+ until: installed_updates.finished
+ retries: "{{ install_updates_retry_limit }}"
+ delay: 30
+
+ - name: reboot the system if required
+ win_reboot:
+ reboot_timeout: 7200
+ when: installed_updates.reboot_required
rescue:
+ - name: ensure there is connection
+ wait_for_connection:
+ delay: 60
+ sleep: 10
+ timeout: 600
+
+ # see https://learn.microsoft.com/en-us/sharepoint/troubleshoot/administration/800703fa-illegal-operation-error
+ # error code 0x800703FA happens with some updates when user is not logged in
+ # add the registry key to disable forcefully unloading users registry at user logoff
+ - include_tasks: force_user_registry.yml
+ vars:
+ task_state: present
+ when:
+ - installed_updates is defined
+ - installed_updates is failed
+ - installed_updates.msg is defined
+ - ('0x800703FA' in installed_updates.msg)
+
- name: reboot the system to recover from a failed update
win_reboot:
reboot_timeout: 7200
@@ -36,32 +75,50 @@
delay: 60
sleep: 10
timeout: 600
+ - block:
+ - name: work on any skipped KB
+ win_updates:
+ server_selection: "{{ win_update_server_selection }}"
+ category_names: "{{ win_update_category_names }}"
+ reject_list: "{{ win_update_reject_list | default(omit) }}"
+ accept_list: "{{ failed_kb | default(omit) }}"
+ reboot: false
+ async: 7200
+ poll: 0
+ register: installed_updates_retry_skipped_async
- - name: work on any skipped KB
- win_updates:
- category_names: "{{ win_update_category_names }}"
- blacklist: "{{ win_update_blacklist | default(omit) }}"
- whitelist: "{{ failed_kb | default([]) }}"
- reboot: yes
- register: installed_updates_retry_skipped
+ - name: wait for updates to finish
+ async_status:
+ jid: "{{ installed_updates_retry_skipped_async.ansible_job_id }}"
+ register: installed_updates_retry_skipped
+ until: installed_updates_retry_skipped.finished
+ retries: "{{ install_updates_retry_limit }}"
+ delay: 30
+
+ - name: reboot the system if required
+ win_reboot:
+ reboot_timeout: 7200
+ when: installed_updates_retry_skipped.reboot_required
when:
- failed_kb is defined
- failed_kb | length > 0
- name: check for missing updates
win_updates:
+ server_selection: "{{ win_update_server_selection }}"
category_names: "{{ win_update_category_names }}"
- blacklist: "{{ win_update_blacklist | default(omit) }}"
+ reject_list: "{{ win_update_reject_list | default(omit) }}"
state: searched
register: missing_updates
- debug:
- msg: |
+ msg: "{{ _msg.split('\n')[:-1] }}"
+ vars:
+ _msg: |
{{ inventory_hostname }} has {{ missing_updates.found_update_count }} updates still missing.
{% for update in updates %}
- {{ update.title }}
{% endfor %}
- vars:
updates: "{{ (missing_updates.updates.values() | list) if (missing_updates.updates is mapping) else (missing_updates.updates) }}"
when: missing_updates.updates is defined
@@ -72,7 +129,7 @@
{{ (update_retry_limit | int) - (update_retry_count | int) }} more retries left'
when:
- missing_updates.found_update_count > 0
- - ((update_retry_limit | int) - (update_retry_count | int) >= 0)
+ - ((update_retry_limit | int) - (update_retry_count | int) > 0)
rescue:
- name: set update count
@@ -80,7 +137,14 @@
update_retry_count: '{{ update_retry_count | default(0) | int + 1 }}'
- debug:
- msg: "Still more updates remaining - retrying... ({{ update_retry_count }}/{{ update_retry_limit }})"
+ msg: "Still more updates ({{ current_update_count }}) remaining - retrying... ({{ update_retry_count }}/{{ update_retry_limit }})"
+ vars:
+ current_update_count: "{{ missing_updates.found_update_count | default(installed_updates.found_update_count) | default('-') }}"
+
+ - name: ensure system is reachable
+ wait_for_connection:
+ sleep: 10
+ timeout: 600
- include_tasks: updates-with-retry.yml
- when: ((update_retry_limit | int) - (update_retry_count | int) >= 0)
\ No newline at end of file
+ when: ((update_retry_limit | int) - (update_retry_count | int) > 0)
\ No newline at end of file
diff --git a/roles/oatakan.windows_virtio/meta/.galaxy_install_info b/roles/oatakan.windows_virtio/meta/.galaxy_install_info
index 9260faa..b91aad1 100644
--- a/roles/oatakan.windows_virtio/meta/.galaxy_install_info
+++ b/roles/oatakan.windows_virtio/meta/.galaxy_install_info
@@ -1,2 +1,2 @@
-install_date: Fri Oct 15 18:59:17 2021
+install_date: Thu 08 Feb 2024 08:54:02 PM
version: master
diff --git a/roles/requirements.yml b/roles/requirements.yml
index 45490e0..5944844 100644
--- a/roles/requirements.yml
+++ b/roles/requirements.yml
@@ -1,21 +1,9 @@
# Java
- name: geerlingguy.java
-# Node.js (Using this repo temporarily, as it fixes a package naming bug (See #95))
-# - src: https://github.com/halkeye/ansible-role-nodejs
-# version: halkeye-patch-1
-# Gitlab
-- name: geerlingguy.gitlab
# Windows Ovirt Template
-- name: oatakan.windows_ovirt_template
- src: git+https://github.com/oatakan/ansible-role-windows_ovirt_template.git
- name: oatakan.windows_template_build
-- name: oatakan.ansible-role-windows-ad-controller
- src: git+https://github.com/oatakan/ansible-role-windows-ad-controller.git
-- name: oatakan.windows_ovirt_guest_agent
- name: oatakan.windows_virtio
- name: oatakan.windows_update
-- name: oatakan.ansible-role-ovirt
- src: git+https://github.com/oatakan/ansible-role-ovirt.git
- name: oatakan.rhel_template_build
- name: oatakan.rhel_ovirt_template
- name: ikke_t.podman_container_systemd
diff --git a/roles/systemli.apt_repositories/.editorconfig b/roles/systemli.apt_repositories/.editorconfig
deleted file mode 100644
index 73c5657..0000000
--- a/roles/systemli.apt_repositories/.editorconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-# http://editorconfig.org
-
-root = true
-
-[*]
-charset = utf-8
-end_of_line = lf
-insert_final_newline = true
-trim_trailing_whitespace = true
-
-[*.{py,rst,ini}]
-indent_style = space
-indent_size = 4
-
-[*.{yml,yaml}]
-indent_style = space
-indent_size = 2
diff --git a/roles/systemli.apt_repositories/.github/dependabot.yml b/roles/systemli.apt_repositories/.github/dependabot.yml
deleted file mode 100644
index 900df32..0000000
--- a/roles/systemli.apt_repositories/.github/dependabot.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-version: 2
-updates:
- - package-ecosystem: "github-actions"
- directory: "/"
- schedule:
- interval: "daily"
diff --git a/roles/systemli.apt_repositories/.github/workflows/main.yml b/roles/systemli.apt_repositories/.github/workflows/main.yml
deleted file mode 100644
index 0fef583..0000000
--- a/roles/systemli.apt_repositories/.github/workflows/main.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-name: Integration
-on:
- pull_request:
- push:
- schedule:
- # * is a special character in YAML so you have to quote this string
- # first of each month
- - cron: '0 0 1 * *'
-
-jobs:
-
- test:
- name: Molecule
- runs-on: ubuntu-latest
- strategy:
- matrix:
- distro:
- - debian10
- - debian9
- - ubuntu2004
- - ubuntu1804
-
- steps:
- - name: Check out the codebase.
- uses: actions/checkout@v2
-
- - name: Set up Python 3.
- uses: actions/setup-python@v2
- with:
- python-version: '3.x'
-
- - name: Install test dependencies.
- run: pip3 install ansible-lint==4.3.7 molecule[docker] molecule-goss yamllint
-
- - name: Run Molecule tests.
- run: molecule test -s docker
- env:
- PY_COLORS: '1'
- ANSIBLE_FORCE_COLOR: '1'
- MOLECULE_DISTRO: ${{ matrix.distro }}
-
- release:
- name: Ansible Galaxy
- needs:
- - test
- runs-on: ubuntu-latest
- steps:
- - name: Publish to Ansible Galaxy
- uses: robertdebock/galaxy-action@1.1.1
- with:
- galaxy_api_key: ${{ secrets.galaxy_api_key }}
- if: github.ref == 'refs/heads/master'
diff --git a/roles/systemli.apt_repositories/.yamllint b/roles/systemli.apt_repositories/.yamllint
deleted file mode 100644
index 8827676..0000000
--- a/roles/systemli.apt_repositories/.yamllint
+++ /dev/null
@@ -1,33 +0,0 @@
----
-# Based on ansible-lint config
-extends: default
-
-rules:
- braces:
- max-spaces-inside: 1
- level: error
- brackets:
- max-spaces-inside: 1
- level: error
- colons:
- max-spaces-after: -1
- level: error
- commas:
- max-spaces-after: -1
- level: error
- comments: disable
- comments-indentation: disable
- document-start: disable
- empty-lines:
- max: 3
- level: error
- hyphens:
- level: error
- indentation: disable
- key-duplicates: enable
- line-length: disable
- new-line-at-end-of-file: disable
- new-lines:
- type: unix
- trailing-spaces: disable
- truthy: disable
diff --git a/roles/systemli.apt_repositories/LICENSE b/roles/systemli.apt_repositories/LICENSE
deleted file mode 100644
index 733c072..0000000
--- a/roles/systemli.apt_repositories/LICENSE
+++ /dev/null
@@ -1,675 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- {one line to give the program's name and a brief idea of what it does.}
- Copyright (C) {year} {name of author}
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- {project} Copyright (C) {year} {fullname}
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-.
-
diff --git a/roles/systemli.apt_repositories/README.md b/roles/systemli.apt_repositories/README.md
deleted file mode 100644
index 6ca73db..0000000
--- a/roles/systemli.apt_repositories/README.md
+++ /dev/null
@@ -1,107 +0,0 @@
-apt_repositories
-================
-
-[](https://github.com/systemli/ansible-role-apt_repositories/actions?query=workflow%3AIntegration)
-
-Add third-party repositories in Debian and derivates and pin their packages.
-Follows guide from [Debian wiki](https://wiki.debian.org/DebianRepository/UseThirdParty).
-
-It defaults to deb822, but also allows single line style ([manpage](https://manpages.debian.org/buster/apt/sources.list.5.en.html#THE_DEB_AND_DEB-SRC_TYPES:_GENERAL_FORMAT)).
-
-Requirements
-------------
-
-Debian 9+ or Ubuntu 18.04+. Other versions of Debian/Ubuntu might be supported as well, but aren't tested.
-
-Role Variables
---------------
-
-```
-apt_repositories:
- - url: https://...
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- MY_ARMORED_KEY
- ...
-```
-
-Further possible variables (and their defaults) are:
-
-```
-apt_repositories:
- - url: https://...
- name: "{{ item.url|urlsplit('hostname') }}"
- types: deb
- suites: "{{ ansible_distribution_release }}"
- components: main
- packages: []
- key_path: # a file path in the role `files` dir instead of `key`
-```
-
-Furthermore, it supports `preset` values. For an example see `vars/gitlab.yml`.
-Presets can be partially overridden.
-
-Current presets:
-
- - gitlab
- - grafana
- - jitsi
- - kubic
- - prosody
- - sury
-
-PRs welcome!
-
-Example Playbook
-----------------
-
-```
-- hosts: server
- roles:
- - systemli.apt_repositories
- vars:
- apt_repositories:
- - name: packages.gitlab.com
- url: https://packages.gitlab.com/gitlab/gitlab-ce/debian/
- key: "{{ gitlab_ce_key }}"
- packages:
- - gitlab-ce
-```
-
-or
-
-```
-- hosts: server
- roles:
- - systemli.apt_repositories
- vars:
- apt_repositories:
- - preset: gitlab
-```
-
-or just add it as a dependency for `ansible-galaxy`:
-
-```
-# meta/main.yml
-...
-dependencies:
- - role: systemli.apt_repositories
- vars:
- apt_repositories:
- - name: download.jitsi.org
- url: https://download.jitsi.org/
- key_path: jitsi-archive-keyring.gpg
- suites: stable/
- components: ''
- packages: "{{ jitsi_meet_packages }}"
-```
-
-License
--------
-
-GPLv3
-
-Author Information
-------------------
-
-systemli.org
diff --git a/roles/systemli.apt_repositories/defaults/main.yml b/roles/systemli.apt_repositories/defaults/main.yml
deleted file mode 100644
index 74e0ea6..0000000
--- a/roles/systemli.apt_repositories/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-
-apt_repositories_dependencies:
- - ca-certificates
- - file
- - gnupg
-
-apt_repositories_absent: []
diff --git a/roles/systemli.apt_repositories/files/gitlab.asc b/roles/systemli.apt_repositories/files/gitlab.asc
deleted file mode 100644
index d7b27ff..0000000
--- a/roles/systemli.apt_repositories/files/gitlab.asc
+++ /dev/null
@@ -1,52 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQINBF5dI2sBEACyGx5isuXqEV2zJGIx8rlJFCGw6A9g5Zk/9Hj50UpXNuOXlvQl
-7vq91m2CAh88Jad7OiMHIJJhX3ZJEOf/pUx/16QKumsaEyBk9CegxUG9jAQXsjL3
-WLyP0/l27UzNrOAFB+IUGjsoP+32gsSPiF5P485mirIJNojIAFzDQl3Uo4FbvqYU
-9AIRk5kV4nEYz1aKXAovIUsyqrztMtwlAG2xqdwVpGD2A4/w8I143qPGjjhEQmf4
-/EeS4CP9ztyLAx+01t2Acwa7Bygsb5KQPuT25UlevuxdDy/Rd5Zn/Lzwr2GQqjUs
-6GbM0t1HYjh57e4V+p0qMf6jxXfrDCbehgzFvGS0cx/d7hWHm5sXZIt3gxpjBQU2
-8MQWtrR8Y3nTBkCHwOKsXdsdD+YHxTq/yuvxl1Bcyshp29cGWv1es3wn2Z6i9tWe
-asGfVewJZiXFSEqSBGguEmLyCAZcWgXvHOV2kc66wG4d4TGIxmoo9GBqEtBftCVH
-MGDHt7zeg2hg6EIsx8/nj1duO5nBnbnik5iG8Xv46e/aw2p4DfTdfxHpjvyJudyN
-+UI5eSuuuXhyTZWedd5K1Q3+0CmACJ39t/NA6g7cZaw3boFKw3fTWIgOVTvC3y5v
-d7wsuyGUk9xNhHLcu6HjB4VPGzcTwQWMFf6+I4qGAUykU5mjTJchQeqmQwARAQAB
-tEJHaXRMYWIgQi5WLiAocGFja2FnZSByZXBvc2l0b3J5IHNpZ25pbmcga2V5KSA8
-cGFja2FnZXNAZ2l0bGFiLmNvbT6JAlQEEwEKAD4WIQT2QD9lRKOIY9qgtuA/AWGK
-UTEvPwUCXl0jawIbAwUJA8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRA/
-AWGKUTEvP3/+EACEpR4JwFz7fAZxKoTzq1xkv7JiVC1jDnfZ6U6tumMDSIBLqlZX
-Jv/lixuXC/GCnyiduqqpO14RCkHrCECzNeu7+lt+eiBUpOKvDgkNid6FLMoulu1w
-hDhQWss6+40dIWwa5i8maIFg6WOwIiI24PW9T+ywrf2Gfv9mB1YP3ob+8Cx1EVb/
-sf5mu1SGHvq2PqNvPeyY3W5vU7rB0Ax5Kcn3e0Z+tUSC8fV7TCg9hm9o2Ou928K4
-hmvdFfR0t47cXt1wmZ/pjrWcezVqeIrMJyWtje4hgcO3TSXsfvedEdYn8Q/BgVRw
-9KL4DkR1HSemSsPB4YyOwLscjV6p5OCPm0PhPPXUGIdImcQH7jYuEXNi5nnN5dX4
-197ooTB2UCk8r0QtnhcQUE2ph46mylcksbR0nKhGh5bYW3jfd0X+MP36reo+EFQ7
-Sw35f7P7QvZqnEE8rd5fX3GImKm38xJi+9bGb4IH8WuslUZUMapgQqqBfw1k5+mP
-BBqKWSdEsP1i7LBv9jVOaauMYQPLZcodx5prgjrB89V1hCKu+ZQl/hzoCwmeSruD
-LUqX/RFeleZO2VeKXh1a/VQ69ThqZ7gyXcrvHopPPGTr9IESoV9/qcZWplEccP9b
-FuY9t6HuSpcL7SlbsRVQ0NBQrsQeZR2J0YgvRc3JWgZAfcE5MK2jcoWKCLkCDQRe
-XSNrARAApHc0R4tfPntr5bhTuXU/iVLyxlAlzdEv1XsdDC8YBYehT72Jpvpphtq7
-sKVsuC59l8szojgO/gW//yKSuc3Gm5h58+HpIthjviGcvZXf/JcN7Pps0UGkLeQN
-2+IRZgbA6CAAPh2njE60v5iXgS91bxlSJi8GVHq1h28kbKQeqUYthu9yA2+8J4Fz
-ivYV2VImKLSxbQlc86tl6rMKKIIOph+N4WujJgd5HZ80n2qp1608X3+9CXvtBasX
-VCI2ZqCuWjffVCOQzsqRbJ6LQyMbgti/23F4Yqjqp+8eyiDNL6MyWJCBbtkW3Imi
-FHfR0sQIM6I7fk0hvt9ljx9SG6az/s3qWK5ceQ7XbJgCAVS4yVixfgIjWvNE5ggE
-QNOmeF9r76t0+0xsdMYJR6lxdaQI8AAYaoMXTkCXX2DrASOjjEP65Oq/d42xpSf9
-tG6XIq+xtRQyFWSMc+HfTlEHbfGReAEBlJBZhNoAwpuDckOC08vw7v2ybS5PYjJ4
-5Kzdwej0ga03Wg9hrAFd/lVa5eO4pzMLuexLplhpIbJjYwCUGS4cc/LQ2jq4fue5
-oxDpWPN+JrBH8oyqy91b10e70ohHppN8dQoCa79ySgMxDim92oHCkGnaVyULYDqJ
-zy0zqbi3tJu639c4pbcggxtAAr0I3ot8HPhKiNJRA6u8HTm//xEAEQEAAYkCPAQY
-AQoAJhYhBPZAP2VEo4hj2qC24D8BYYpRMS8/BQJeXSNrAhsMBQkDwmcAAAoJED8B
-YYpRMS8/vzQP/iO0poRR9ZYjonP5GGIARRnF+xpWCRTZVSHLcAfS0ujZ7ekXoeeS
-JNMJ/7T4Yk1EJ9MTFZ83Jj4UybKO3Rw+/iPmcPpqUQGaEReYLlx7SyxmsOBXf+Q9
-PtyUmGO47tL+eAPInYyxsWGib/EeOw4KQrfByAIPWu0aeNeXadzxBLIkqD863H5q
-nTDrXOw6SLprlGt2zlc+XQKDv3DZez6wTcp205xdaNs55Bfk9pmKUS/ey3ZP7GvC
-CDEGxuWulVSKL2DYtq0sEZD7pQYSy8gBTqXLQAyfmPDcxe9Lczhk3UYrUUomN1/w
-+VE09q75yNqkaHdckVt+aYAHMgQ0ilmwTg6+OlEK+ZQkUT94viB6YW7B0M4uzols
-9FSDxXea/uKn75jTSkA3GAXf7O5hqbkDDctJbtO2pPdLDxbXN95iZ9xpgRE3exGl
-ucjgV5XGpLO4XXf0GTzug/TJAtNljJ/44+6meO0WwOwLMMhAJVxcp1fpbtgRmrcJ
-8bAsCkV5EO8SeQZDu2C8I9tMGlJ1VLTAfv6Lv2Z89B1AOOweGz4I48i9lux+HdXd
-HewnA37zx0XNjNQmqiG85UWUusnDxF0Je2jEhGIpHK/KdyI1BfNzX3d5HVoM1VE3
-THtRZHnetoMek8L5x/ciYQNIt40rQ6MHtPEo1ZC4346DP6eJmeX1DGGI
-=91uZ
------END PGP PUBLIC KEY BLOCK-----
diff --git a/roles/systemli.apt_repositories/files/gitlab.gpg b/roles/systemli.apt_repositories/files/gitlab.gpg
deleted file mode 100644
index e38045d..0000000
Binary files a/roles/systemli.apt_repositories/files/gitlab.gpg and /dev/null differ
diff --git a/roles/systemli.apt_repositories/files/grafana.gpg b/roles/systemli.apt_repositories/files/grafana.gpg
deleted file mode 100644
index c74f292..0000000
--- a/roles/systemli.apt_repositories/files/grafana.gpg
+++ /dev/null
@@ -1,30 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mQENBFiHXVIBCADr3VDEAGpq9Sg/xrPVu1GGqWGXdbnTbbNKeveCtFHZz7/GSATW
-iwiY1skvlAOBiIKCqJEji0rZZgd8WxuhdfugiCBk1hDTMWCpjI0P+YymV77jHjYB
-jHrKNlhb+aLjEd9Gf2EtbKUT1fvGUkzlVrcRGSX/XR9MBZlgja7NIyuVbn3uwZQ4
-jflWSNSlvMpohNxTFkrBFTRrCJXhbDLfCS46+so22CP3+1VQyqJ7/6RWK9v9KYdS
-AVNgILXMggSrMqha4WA1a/ktczVQXNtP8IuPxTdp9pNYsklOTmrFVeq3mXsvWh9Q
-lIhpYHIZlTZ5wVBq4wTRchsXC5MubIhz+ASDABEBAAG0GkdyYWZhbmEgPGluZm9A
-Z3JhZmFuYS5jb20+iQE4BBMBAgAiBQJYh11SAhsDBgsJCAcDAgYVCAIJCgsEFgID
-AQIeAQIXgAAKCRCMjDTFJAmMthxJB/9Id6JrwqRkJW+eSBb71FGQmRsJvNFR8J+3
-NPVhJNkTFFOM7TnjAMUIv+LYEURqGcceTNAN1aHq/7n/8ybXucCS0CnDYyNYpyVs
-tWJ3FOQK3jPrmziDCWPQATqMM/Z2auXVFWrDFqfh2xKZNjuix0w2nyuWB8U0CG2U
-89w+ksPJblGGU5xLPPzDQoAqyZXY3gpGGTkCuohMq2RWYbp/QJSQagYhQkKZoJhr
-XJlnw4At6R1A5UUPzDw6WJqMRkGrkieE6ApIgf1vZSmnLRpXkqquRTAEyGT8Pugg
-ee6YkD19/LK6ED6gn32StY770U9ti560U7oRjrOPK/Kjp4+qBtkQuQENBFiHXVIB
-CACz4hO1g/4fKO9QWLcbSWpB75lbNgt1kHXP0UcW8TE0DIgqrifod09lC85adIz0
-zdhs+00lLqckM5wNbp2r+pd5rRaxOsMw2V+c/y1Pt3qZxupmPc5l5lL6jzbEVR9g
-ygPaE+iabTk9Np2OZQ7Qv5gIDzivqK2mRHXaHTzoQn2dA/3xpFcxnen9dvu7LCpA
-CdScSj9/UIRKk9PHIgr2RJhcjzLx0u1PxN9MEqfIsIJUUgZOoDsr8oCs44PGGIMm
-cK1CKALLLiC4ZM58B56jRyXo18MqB6VYsC1X9wkcIs72thL3tThXO70oDGcoXzoo
-ywAHBH63EzEyduInOhecDIKlABEBAAGJAR8EGAECAAkFAliHXVICGwwACgkQjIw0
-xSQJjLbWSwf/VIM5wEFBY4QLGUAfqfjDyfGXpcha58Y24Vv3n6MwJqnCIbTAaeWf
-30CZ/wHg3NNIMB7I31vgmMOEbHQdv0LPTi9TG205VQeehcpNtZRZQ0D8TIetbxyi
-Emmn9osig9U3/7jaAWBabE/9bGx4TF3eLlEH9wmFrNYeXvgRqmyqVoqhIMCNAAOY
-REYyHyy9mzr9ywkwl0aroBqhzKIPyFlatZy9oRKllY/CCKO9RJy4DZidLphuwzqU
-ymdQ1sqe5nKvwG5GvcncPc3O7LMevDBWnpNNkgERnVxCqpm90TuE3ONbirnU4+/S
-tUsVU1DERc1fjOCnAm4pKIlNYphISIE7OQ==
-=0pMC
------END PGP PUBLIC KEY BLOCK-----
diff --git a/roles/systemli.apt_repositories/files/jitsi-archive-keyring.gpg b/roles/systemli.apt_repositories/files/jitsi-archive-keyring.gpg
deleted file mode 100644
index cf70caa..0000000
Binary files a/roles/systemli.apt_repositories/files/jitsi-archive-keyring.gpg and /dev/null differ
diff --git a/roles/systemli.apt_repositories/files/kubic.asc b/roles/systemli.apt_repositories/files/kubic.asc
deleted file mode 100644
index 704c04b..0000000
--- a/roles/systemli.apt_repositories/files/kubic.asc
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.4.5 (GNU/Linux)
-
-mQENBFtkV0cBCADStSTCG5qgYtzmWfymHZqxxhfwfS6fdHJcbGUeXsI5dxjeCWhs
-XarZm6rWZOd5WfSmpXhbKOyM6Ll+6bpSl5ICHLa6fcpizYWEPa8fpg9EGl0cF12G
-GgVLnnOZ6NIbsoW0LHt2YN0jn8xKVwyPp7KLHB2paZh+KuURERG406GXY/DgCxUx
-Ffgdelym/gfmt3DSq6GAQRRGHyucMvPYm53r+jVcKsf2Bp6E1XAfqBrD5r0maaCU
-Wvd7bi0B2Q0hIX0rfDCBpl4rFqvyaMPgn+Bkl6IW37zCkWIXqf1E5eDm/XzP881s
-+yAvi+JfDwt7AE+Hd2dSf273o3WUdYJGRwyZABEBAAG0OGRldmVsOmt1YmljIE9C
-UyBQcm9qZWN0IDxkZXZlbDprdWJpY0BidWlsZC5vcGVuc3VzZS5vcmc+iQE+BBMB
-CAAoBQJfcJJOAhsDBQkIKusHBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRBN
-ZDkDdQYKpB0xCACmtCT6ruPiQa4l0DEptZ+u3NNbZfSVGH4fE4hyTjLbzrCxqcoh
-xJvDKxspuJ85wWFWMtl57+lFFE1KP0AX2XTT+/v2vN1PIfwgOSw3yp2sgWuIXFAi
-89YSjSh8G0SGAH90A9YFMnTbllzGoGURjSX03iasW3A408ljbDehA6rpS3t3FD7P
-PnUF6204orYu00Qvc54an/xVJzxupb69MKW5EeK7x8MJnIToT8hIdOdGVD6axsis
-x+1U71oMK1gBke7p4QPUdhJFpSUd6kT8bcO+7rYouoljFNYkUfwnqtUn7525fkfg
-uDqqXvOJMpJ/sK1ajHOeehp5T4Q45L/qUCb3iEYEExECAAYFAltkV0cACgkQOzAR
-t2udZSOoswCdF44NTN09DwhPFbNYhEMb9juP5ykAn0bcELvuKmgDwEwZMrPQkG8t
-Pu9n
-=42uC
------END PGP PUBLIC KEY BLOCK-----
diff --git a/roles/systemli.apt_repositories/files/prosody-debian-packages.gpg b/roles/systemli.apt_repositories/files/prosody-debian-packages.gpg
deleted file mode 100644
index 117d429..0000000
Binary files a/roles/systemli.apt_repositories/files/prosody-debian-packages.gpg and /dev/null differ
diff --git a/roles/systemli.apt_repositories/files/sury.gpg b/roles/systemli.apt_repositories/files/sury.gpg
deleted file mode 100644
index d244923..0000000
Binary files a/roles/systemli.apt_repositories/files/sury.gpg and /dev/null differ
diff --git a/roles/systemli.apt_repositories/meta/.galaxy_install_info b/roles/systemli.apt_repositories/meta/.galaxy_install_info
deleted file mode 100644
index 9abccc9..0000000
--- a/roles/systemli.apt_repositories/meta/.galaxy_install_info
+++ /dev/null
@@ -1,2 +0,0 @@
-install_date: Tue Apr 20 16:13:57 2021
-version: master
diff --git a/roles/systemli.apt_repositories/meta/main.yml b/roles/systemli.apt_repositories/meta/main.yml
deleted file mode 100644
index 3f8011f..0000000
--- a/roles/systemli.apt_repositories/meta/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-galaxy_info:
- role_name: apt_repositories
- author: systemli.org
- description: Add third-party repositories on Debian in a secure way
- license:
- - GPL-3.0-only
- min_ansible_version: 2.4
- platforms:
- - name: Debian
- versions:
- - stretch
- - buster
- - name: Ubuntu
- versions:
- - bionic
- - focal
-
- galaxy_tags:
- - apt
- - https
- - secure
-
-dependencies: []
diff --git a/roles/systemli.apt_repositories/molecule/default/INSTALL.rst b/roles/systemli.apt_repositories/molecule/default/INSTALL.rst
deleted file mode 100644
index 6a44bde..0000000
--- a/roles/systemli.apt_repositories/molecule/default/INSTALL.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-*******
-Docker driver installation guide
-*******
-
-Requirements
-============
-
-* Docker Engine
-
-Install
-=======
-
-Please refer to the `Virtual environment`_ documentation for installation best
-practices. If not using a virtual environment, please consider passing the
-widely recommended `'--user' flag`_ when invoking ``pip``.
-
-.. _Virtual environment: https://virtualenv.pypa.io/en/latest/
-.. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site
-
-.. code-block:: bash
-
- $ pip install 'molecule[docker]'
diff --git a/roles/systemli.apt_repositories/molecule/default/converge.yml b/roles/systemli.apt_repositories/molecule/default/converge.yml
deleted file mode 100644
index b605da4..0000000
--- a/roles/systemli.apt_repositories/molecule/default/converge.yml
+++ /dev/null
@@ -1,71 +0,0 @@
----
-- name: Converge
- hosts: all
- become: true
- roles:
- - ansible-role-apt_repositories
- vars:
- apt_repositories:
- # all vars given
- - name: packages.gitlab.com
- url: https://packages.gitlab.com/gitlab/gitlab-ce/{{ ansible_distribution | lower }}/
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
-
- mQINBF5dI2sBEACyGx5isuXqEV2zJGIx8rlJFCGw6A9g5Zk/9Hj50UpXNuOXlvQl
- 7vq91m2CAh88Jad7OiMHIJJhX3ZJEOf/pUx/16QKumsaEyBk9CegxUG9jAQXsjL3
- WLyP0/l27UzNrOAFB+IUGjsoP+32gsSPiF5P485mirIJNojIAFzDQl3Uo4FbvqYU
- 9AIRk5kV4nEYz1aKXAovIUsyqrztMtwlAG2xqdwVpGD2A4/w8I143qPGjjhEQmf4
- /EeS4CP9ztyLAx+01t2Acwa7Bygsb5KQPuT25UlevuxdDy/Rd5Zn/Lzwr2GQqjUs
- 6GbM0t1HYjh57e4V+p0qMf6jxXfrDCbehgzFvGS0cx/d7hWHm5sXZIt3gxpjBQU2
- 8MQWtrR8Y3nTBkCHwOKsXdsdD+YHxTq/yuvxl1Bcyshp29cGWv1es3wn2Z6i9tWe
- asGfVewJZiXFSEqSBGguEmLyCAZcWgXvHOV2kc66wG4d4TGIxmoo9GBqEtBftCVH
- MGDHt7zeg2hg6EIsx8/nj1duO5nBnbnik5iG8Xv46e/aw2p4DfTdfxHpjvyJudyN
- +UI5eSuuuXhyTZWedd5K1Q3+0CmACJ39t/NA6g7cZaw3boFKw3fTWIgOVTvC3y5v
- d7wsuyGUk9xNhHLcu6HjB4VPGzcTwQWMFf6+I4qGAUykU5mjTJchQeqmQwARAQAB
- tEJHaXRMYWIgQi5WLiAocGFja2FnZSByZXBvc2l0b3J5IHNpZ25pbmcga2V5KSA8
- cGFja2FnZXNAZ2l0bGFiLmNvbT6JAlQEEwEKAD4WIQT2QD9lRKOIY9qgtuA/AWGK
- UTEvPwUCXl0jawIbAwUJA8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRA/
- AWGKUTEvP3/+EACEpR4JwFz7fAZxKoTzq1xkv7JiVC1jDnfZ6U6tumMDSIBLqlZX
- Jv/lixuXC/GCnyiduqqpO14RCkHrCECzNeu7+lt+eiBUpOKvDgkNid6FLMoulu1w
- hDhQWss6+40dIWwa5i8maIFg6WOwIiI24PW9T+ywrf2Gfv9mB1YP3ob+8Cx1EVb/
- sf5mu1SGHvq2PqNvPeyY3W5vU7rB0Ax5Kcn3e0Z+tUSC8fV7TCg9hm9o2Ou928K4
- hmvdFfR0t47cXt1wmZ/pjrWcezVqeIrMJyWtje4hgcO3TSXsfvedEdYn8Q/BgVRw
- 9KL4DkR1HSemSsPB4YyOwLscjV6p5OCPm0PhPPXUGIdImcQH7jYuEXNi5nnN5dX4
- 197ooTB2UCk8r0QtnhcQUE2ph46mylcksbR0nKhGh5bYW3jfd0X+MP36reo+EFQ7
- Sw35f7P7QvZqnEE8rd5fX3GImKm38xJi+9bGb4IH8WuslUZUMapgQqqBfw1k5+mP
- BBqKWSdEsP1i7LBv9jVOaauMYQPLZcodx5prgjrB89V1hCKu+ZQl/hzoCwmeSruD
- LUqX/RFeleZO2VeKXh1a/VQ69ThqZ7gyXcrvHopPPGTr9IESoV9/qcZWplEccP9b
- FuY9t6HuSpcL7SlbsRVQ0NBQrsQeZR2J0YgvRc3JWgZAfcE5MK2jcoWKCLkCDQRe
- XSNrARAApHc0R4tfPntr5bhTuXU/iVLyxlAlzdEv1XsdDC8YBYehT72Jpvpphtq7
- sKVsuC59l8szojgO/gW//yKSuc3Gm5h58+HpIthjviGcvZXf/JcN7Pps0UGkLeQN
- 2+IRZgbA6CAAPh2njE60v5iXgS91bxlSJi8GVHq1h28kbKQeqUYthu9yA2+8J4Fz
- ivYV2VImKLSxbQlc86tl6rMKKIIOph+N4WujJgd5HZ80n2qp1608X3+9CXvtBasX
- VCI2ZqCuWjffVCOQzsqRbJ6LQyMbgti/23F4Yqjqp+8eyiDNL6MyWJCBbtkW3Imi
- FHfR0sQIM6I7fk0hvt9ljx9SG6az/s3qWK5ceQ7XbJgCAVS4yVixfgIjWvNE5ggE
- QNOmeF9r76t0+0xsdMYJR6lxdaQI8AAYaoMXTkCXX2DrASOjjEP65Oq/d42xpSf9
- tG6XIq+xtRQyFWSMc+HfTlEHbfGReAEBlJBZhNoAwpuDckOC08vw7v2ybS5PYjJ4
- 5Kzdwej0ga03Wg9hrAFd/lVa5eO4pzMLuexLplhpIbJjYwCUGS4cc/LQ2jq4fue5
- oxDpWPN+JrBH8oyqy91b10e70ohHppN8dQoCa79ySgMxDim92oHCkGnaVyULYDqJ
- zy0zqbi3tJu639c4pbcggxtAAr0I3ot8HPhKiNJRA6u8HTm//xEAEQEAAYkCPAQY
- AQoAJhYhBPZAP2VEo4hj2qC24D8BYYpRMS8/BQJeXSNrAhsMBQkDwmcAAAoJED8B
- YYpRMS8/vzQP/iO0poRR9ZYjonP5GGIARRnF+xpWCRTZVSHLcAfS0ujZ7ekXoeeS
- JNMJ/7T4Yk1EJ9MTFZ83Jj4UybKO3Rw+/iPmcPpqUQGaEReYLlx7SyxmsOBXf+Q9
- PtyUmGO47tL+eAPInYyxsWGib/EeOw4KQrfByAIPWu0aeNeXadzxBLIkqD863H5q
- nTDrXOw6SLprlGt2zlc+XQKDv3DZez6wTcp205xdaNs55Bfk9pmKUS/ey3ZP7GvC
- CDEGxuWulVSKL2DYtq0sEZD7pQYSy8gBTqXLQAyfmPDcxe9Lczhk3UYrUUomN1/w
- +VE09q75yNqkaHdckVt+aYAHMgQ0ilmwTg6+OlEK+ZQkUT94viB6YW7B0M4uzols
- 9FSDxXea/uKn75jTSkA3GAXf7O5hqbkDDctJbtO2pPdLDxbXN95iZ9xpgRE3exGl
- ucjgV5XGpLO4XXf0GTzug/TJAtNljJ/44+6meO0WwOwLMMhAJVxcp1fpbtgRmrcJ
- 8bAsCkV5EO8SeQZDu2C8I9tMGlJ1VLTAfv6Lv2Z89B1AOOweGz4I48i9lux+HdXd
- HewnA37zx0XNjNQmqiG85UWUusnDxF0Je2jEhGIpHK/KdyI1BfNzX3d5HVoM1VE3
- THtRZHnetoMek8L5x/ciYQNIt40rQ6MHtPEo1ZC4346DP6eJmeX1DGGI
- =91uZ
- -----END PGP PUBLIC KEY BLOCK-----
- packages:
- - gitlab-ce
- # full preset
- - preset: gitlab
- # override preset partially (binary key)
- - preset: gitlab
- key_path: gitlab.gpg
diff --git a/roles/systemli.apt_repositories/molecule/default/molecule.yml b/roles/systemli.apt_repositories/molecule/default/molecule.yml
deleted file mode 100644
index 98254ca..0000000
--- a/roles/systemli.apt_repositories/molecule/default/molecule.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-driver:
- name: vagrant
- provider:
- name: virtualbox
-platforms:
- - name: bionic64-repo
- box: ubuntu/bionic64
-lint: |
- set -e
- yamllint .
- ansible-lint
-provisioner:
- name: ansible
- become: True
diff --git a/roles/systemli.apt_repositories/molecule/default/prepare.yml b/roles/systemli.apt_repositories/molecule/default/prepare.yml
deleted file mode 100644
index 4b18d48..0000000
--- a/roles/systemli.apt_repositories/molecule/default/prepare.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: Prepare
- hosts: all
- gather_facts: false
- tasks:
- - name: Install python for Ansible
- raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
- become: true
- changed_when: false
diff --git a/roles/systemli.apt_repositories/molecule/default/roles/ansible-role-apt_repositories b/roles/systemli.apt_repositories/molecule/default/roles/ansible-role-apt_repositories
deleted file mode 120000
index a8a4f8c..0000000
--- a/roles/systemli.apt_repositories/molecule/default/roles/ansible-role-apt_repositories
+++ /dev/null
@@ -1 +0,0 @@
-../../..
\ No newline at end of file
diff --git a/roles/systemli.apt_repositories/molecule/default/tests/test_default.py b/roles/systemli.apt_repositories/molecule/default/tests/test_default.py
deleted file mode 100644
index 9e0e189..0000000
--- a/roles/systemli.apt_repositories/molecule/default/tests/test_default.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-
-import testinfra.utils.ansible_runner
-
-testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
- os.environ['MOLECULE_INVENTORY_FILE']
-).get_hosts('all')
-
-
-def test_hosts_file(host):
- f = host.file('/etc/hosts')
-
- assert f.exists
- assert f.user == 'root'
- assert f.group == 'root'
diff --git a/roles/systemli.apt_repositories/molecule/default/tests/test_default.yml b/roles/systemli.apt_repositories/molecule/default/tests/test_default.yml
deleted file mode 100644
index 7f40386..0000000
--- a/roles/systemli.apt_repositories/molecule/default/tests/test_default.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-# Molecule managed
-
----
-file:
- /etc/hosts:
- exists: true
- owner: root
- group: root
diff --git a/roles/systemli.apt_repositories/molecule/default/verify.yml b/roles/systemli.apt_repositories/molecule/default/verify.yml
deleted file mode 100644
index a82dd6f..0000000
--- a/roles/systemli.apt_repositories/molecule/default/verify.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# This is an example playbook to execute Ansible tests.
-
-- name: Verify
- hosts: all
- tasks:
- - name: Example assertion
- assert:
- that: true
diff --git a/roles/systemli.apt_repositories/molecule/docker/INSTALL.rst b/roles/systemli.apt_repositories/molecule/docker/INSTALL.rst
deleted file mode 100644
index 1a063f0..0000000
--- a/roles/systemli.apt_repositories/molecule/docker/INSTALL.rst
+++ /dev/null
@@ -1,39 +0,0 @@
-*******
-Install
-*******
-
-This set of playbooks have specific dependencies on Ansible due to the modules
-being used.
-
-Requirements
-============
-
-* Ansible 2.2 or higher
-* Docker Engine
-* docker-py
-
-Install OS dependencies on Debian 9 (Stretch)
-
-.. code-block:: bash
-
- # apt-get update
- # apt-get install -y python-pip libssl-dev python-docker
- ## If installing Molecule from source.
- # apt-get install -y libffi-dev git
-
-Install OS dependencies on Ubuntu 16.x
-
-.. code-block:: bash
-
- $ sudo apt-get update
- $ sudo apt-get install -y python-pip libssl-dev docker-engine
- # If installing Molecule from source.
- $ sudo apt-get install -y libffi-dev git
-
-Install using pip:
-
-.. code-block:: bash
-
- $ sudo pip install ansible
- $ sudo pip install docker-py
- $ sudo pip install molecule --pre
diff --git a/roles/systemli.apt_repositories/molecule/docker/molecule.yml b/roles/systemli.apt_repositories/molecule/docker/molecule.yml
deleted file mode 100644
index 4ce557b..0000000
--- a/roles/systemli.apt_repositories/molecule/docker/molecule.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-driver:
- name: docker
-platforms:
- - name: instance
- image: "geerlingguy/docker-${MOLECULE_DISTRO:-debian10}-ansible:latest"
- command: ${MOLECULE_DOCKER_COMMAND:-""}
- volumes:
- - /sys/fs/cgroup:/sys/fs/cgroup:ro
- privileged: true
- pre_build_image: true
-lint: |
- set -e
- yamllint .
- ansible-lint
-provisioner:
- name: ansible
- playbooks:
- converge: ../default/converge.yml
diff --git a/roles/systemli.apt_repositories/tasks/key_path.yml b/roles/systemli.apt_repositories/tasks/key_path.yml
deleted file mode 100644
index 6b3ccab..0000000
--- a/roles/systemli.apt_repositories/tasks/key_path.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-
-- name: Copy key to remote machine
- copy:
- src: "{{ _config.key_path }}"
- dest: /tmp/{{ _name }}.gpg
- owner: root
- group: root
- mode: 0644
- check_mode: false
- changed_when: false
-
-- name: Find out whether {{ _config.key_path }} is armored
- stat:
- path: /tmp/{{ _name }}.gpg
- register: _file
-
-- name: Add armored key for {{ _name }} by path
- apt_key:
- file: /tmp/{{ _name }}.gpg
- keyring: /usr/share/keyrings/{{ _name }}.gpg
- when: "'ascii' in _file.stat.charset"
-
-- name: Add binary key for {{ _name }} by path
- copy:
- src: "{{ _config.key_path }}"
- dest: /usr/share/keyrings/{{ _name }}.gpg
- owner: root
- group: root
- mode: 0644
- when: _file.stat.charset == 'binary'
diff --git a/roles/systemli.apt_repositories/tasks/main.yml b/roles/systemli.apt_repositories/tasks/main.yml
deleted file mode 100644
index dad3d3d..0000000
--- a/roles/systemli.apt_repositories/tasks/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-
-- name: Remove old repositories
- file:
- path: "/etc/apt/sources.list.d/{{ item }}"
- state: absent
- with_items: "{{ apt_repositories_absent }}"
-
-- name: Update cache
- apt:
- update_cache: true
- changed_when: false
-
-- name: Install dependecies
- apt:
- pkg: "{{ apt_repositories_dependencies }}"
-
-- name: Ensure we can transport via https
- apt:
- pkg: apt-transport-https
- when: ((ansible_distribution == 'Debian' and ansible_distribution_major_version|int < 10) or
- (ansible_distribution == 'Ubuntu' and ansible_distribution_major_version|int < 18))
-
-- include_tasks: repo.yml
- loop: "{{ apt_repositories }}"
-
-- name: Update cache
- apt:
- update_cache: true
- changed_when: false
diff --git a/roles/systemli.apt_repositories/tasks/repo.yml b/roles/systemli.apt_repositories/tasks/repo.yml
deleted file mode 100644
index 9374ac2..0000000
--- a/roles/systemli.apt_repositories/tasks/repo.yml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-
-- name: Clear preset
- include_vars: null.yml
-
-- name: Read in preset
- include_vars:
- file: "{{ item.preset }}.yml"
- name: _preset
- when: item.preset is defined
-
-# - debug:
-# var: _preset
-
-- name: combine preset with given config
- set_fact:
- _config: "{{ _preset|default({})|combine(item) }}"
-
-# - debug:
-# var: _config
-
-- name: ensure we have a repo name 1
- set_fact:
- _name: "{{ _config.name }}"
- when: _config.name is defined and _config.name
-
-- name: ensure we have a repo name 2
- set_fact:
- _name: "{{ _config.url|urlsplit('hostname') }}"
- when: _config.name is undefined or not _config.name
-
-- name: Add key for {{ _name }} by content
- apt_key:
- data: "{{ _config.key }}"
- keyring: /usr/share/keyrings/{{ _name }}.gpg
- when: _config.key is defined and _config.key
-
-- import_tasks: key_path.yml
- when: _config.key is undefined or not _config.key
-
-- name: Register final keyring stats
- stat:
- path: /usr/share/keyrings/{{ _name }}.gpg
- register: _f
-
-# - debug:
-# var: _f
-
-- name: Ensure we have binary key
- assert:
- that:
- - _f.stat.exists
- - _f.stat.charset == 'binary'
- when: not ansible_check_mode
-
-- name: Add repository {{ _name }}
- template:
- src: repo.sources.j2
- dest: /etc/apt/sources.list.d/{{ _name }}.sources
- owner: root
- group: root
- mode: 0644
- when: _config.style is undefined or _config.style == "deb822"
-
-- name: Add repository {{ _name }} in single line style
- template:
- src: repo.list.j2
- dest: /etc/apt/sources.list.d/{{ _name }}.list
- owner: root
- group: root
- mode: 0644
- when: _config.style is defined and _config.style == "line"
-
-# apt currently only support meaningful pinning by hostname
-# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=858406
-
-- name: Add pinning preferences for {{ _name }}
- template:
- src: pref.j2
- dest: /etc/apt/preferences.d/{{ _name }}.pref
- owner: root
- group: root
- mode: 0644
diff --git a/roles/systemli.apt_repositories/templates/pref.j2 b/roles/systemli.apt_repositories/templates/pref.j2
deleted file mode 100644
index 64e33cf..0000000
--- a/roles/systemli.apt_repositories/templates/pref.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-# Packages with pin priority = 100
-# causes a version to be installed unless there is a version available
-# belonging to some other distribution or the installed version is more
-# recent
-
-Package: *
-Pin: origin "{{ _config.url|urlsplit('hostname') }}"
-Pin-Priority: 100
-{% if _config.packages is defined %}
-{% for pkg in _config.packages %}
-
-Package: {{ pkg }}
-Pin: origin "{{ _config.url|urlsplit('hostname') }}"
-Pin-Priority: 500
-{% endfor %}
-{% endif %}
diff --git a/roles/systemli.apt_repositories/templates/repo.list.j2 b/roles/systemli.apt_repositories/templates/repo.list.j2
deleted file mode 100644
index d8deb33..0000000
--- a/roles/systemli.apt_repositories/templates/repo.list.j2
+++ /dev/null
@@ -1 +0,0 @@
-deb [signed-by=/usr/share/keyrings/{{ _name }}.gpg] {{ _config.url }} {{ _config.suites }}
diff --git a/roles/systemli.apt_repositories/templates/repo.sources.j2 b/roles/systemli.apt_repositories/templates/repo.sources.j2
deleted file mode 100644
index 059e186..0000000
--- a/roles/systemli.apt_repositories/templates/repo.sources.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-Types: {{ _config.types|default('deb') }}
-URIs: {{ _config.url }}
-Suites: {{ _config.suites|default(ansible_distribution_release) }}
-Components: {{ _config.components|default('main') }}
-Signed-by: /usr/share/keyrings/{{ _name }}.gpg
-{% if _config.arch is defined %}
-Architecture: {{ _config.arch }}
-{% endif %}
diff --git a/roles/systemli.apt_repositories/vars/gitlab.yml b/roles/systemli.apt_repositories/vars/gitlab.yml
deleted file mode 100644
index 362828c..0000000
--- a/roles/systemli.apt_repositories/vars/gitlab.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-name: packages.gitlab.com
-url: https://packages.gitlab.com/gitlab/gitlab-ce/{{ ansible_distribution|lower }}/
-key_path: gitlab.asc
-packages:
- - gitlab-ce
diff --git a/roles/systemli.apt_repositories/vars/grafana.yml b/roles/systemli.apt_repositories/vars/grafana.yml
deleted file mode 100644
index e6dad99..0000000
--- a/roles/systemli.apt_repositories/vars/grafana.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Name of the sources file
-name: packages.grafana.com
-# Url of the Debian Repo
-url: https://packages.grafana.com/oss/deb
-# Name of the key file under files
-key_path: grafana.gpg
-# Name of the suite
-suites: stable
-# List of the components
-components: 'main'
diff --git a/roles/systemli.apt_repositories/vars/jitsi.yml b/roles/systemli.apt_repositories/vars/jitsi.yml
deleted file mode 100644
index 56f1efc..0000000
--- a/roles/systemli.apt_repositories/vars/jitsi.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-name: download.jitsi.org
-url: https://download.jitsi.org/
-key_path: jitsi-archive-keyring.gpg
-suites: stable/
-components: ''
diff --git a/roles/systemli.apt_repositories/vars/kubic.yml b/roles/systemli.apt_repositories/vars/kubic.yml
deleted file mode 100644
index 2e7f66e..0000000
--- a/roles/systemli.apt_repositories/vars/kubic.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-name: kubic
-style: line
-url: https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{{ 'x' if ansible_distribution == 'Ubuntu' else ''}}{{ ansible_distribution }}_{{ ansible_distribution_version }}/
-suites: /
-key_path: kubic.asc
-packages:
- - fuse-overlayfs
- - podman
- - slirp4netns
- - libseccomp2
diff --git a/roles/systemli.apt_repositories/vars/null.yml b/roles/systemli.apt_repositories/vars/null.yml
deleted file mode 100644
index b96b380..0000000
--- a/roles/systemli.apt_repositories/vars/null.yml
+++ /dev/null
@@ -1 +0,0 @@
-_preset: {}
diff --git a/roles/systemli.apt_repositories/vars/prosody.yml b/roles/systemli.apt_repositories/vars/prosody.yml
deleted file mode 100644
index 12e802f..0000000
--- a/roles/systemli.apt_repositories/vars/prosody.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-url: https://packages.prosody.im/debian
-key_path: prosody-debian-packages.gpg
-packages:
- - prosody
- - "lua-*"
diff --git a/roles/systemli.apt_repositories/vars/sury.yml b/roles/systemli.apt_repositories/vars/sury.yml
deleted file mode 100644
index 2bbae96..0000000
--- a/roles/systemli.apt_repositories/vars/sury.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-url: https://packages.sury.org/php/
-key_path: sury.asc