Files
toallab-openstack/files/openstack_user_config.yml

254 lines
5.9 KiB
YAML

cidr_networks:
container: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
used_ips:
- "172.29.236.1,172.29.236.20"
- "172.29.240.1,172.29.240.20"
- "172.29.244.1,172.29.244.20"
global_overrides:
internal_lb_vip_address: 172.29.236.11
external_lb_vip_address: 172.29.236.10
management_bridge: "br-mgmt"
tunnel_bridge: "br-tunnel"
neutron_provider_networks:
network_types: "geneve"
network_geneve_ranges: "1:1000"
network_vlan_ranges: "public"
network_mappings: "public:br-publicnet"
network_interface_mappings: "br-publicnet:bond1"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
is_management_address: true
is_container_address: true
- network:
container_bridge: "br-storage"
container_type: "veth"
container_interface: "eth2"
container_mtu: "9000"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
- repo_container
- network:
container_bridge: "br-tunnel"
container_type: "veth"
container_interface: "eth10"
container_mtu: "9000"
ip_from_q: "tunnel"
net_name: "tunnel"
type: "geneve"
range: "1:1000"
group_binds:
- network_hosts
- compute_hosts
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth11"
container_mtu: "9000"
type: "vlan"
range: "700:899"
net_name: "vlan"
host_bind_override: "ens2f1"
group_binds:
- compute_hosts
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth11"
container_mtu: "9000"
type: "vlan"
range: "700:899"
net_name: "vlan"
host_bind_override: "ens3"
group_binds:
- network_hosts
# Workaround for https://github.com/eventlet/eventlet/issues/781
# venv_default_pip_packages:
# - eventlet==0.33.3
neutron_plugin_type: ml2.ovn
neutron_plugin_base:
- ovn-router
neutron_ml2_drivers_type: "vlan,local,geneve"
# neutron_plugin_type: ml2.ovs
# neutron_ml2_drivers_type: "flat,vlan,vxlan"
# neutron_plugin_base:
# - router
###
### Infrastructure
###
# galera, memcache, rabbitmq, utility
shared-infra_hosts:
infra1:
ip: 172.29.236.11
# repository (apt cache, python packages, etc)
repo-infra_hosts:
infra1:
ip: 172.29.236.11
# load balancer
haproxy_hosts:
infra1:
ip: 172.29.236.11
###
### OpenStack
###
# keystone
identity_hosts:
infra1:
ip: 172.29.236.11
# cinder api services
storage-infra_hosts:
infra1:
ip: 172.29.236.11
# glance
image_hosts:
infra1:
ip: 172.29.236.11
# placement
placement-infra_hosts:
infra1:
ip: 172.29.236.11
# nova api, conductor, etc services
compute-infra_hosts:
infra1:
ip: 172.29.236.11
# heat
orchestration_hosts:
infra1:
ip: 172.29.236.11
# horizon
dashboard_hosts:
infra1:
ip: 172.29.236.11
# neutron server, agents (L3, etc)
network_hosts:
infra1:
ip: 172.29.236.11
network-northd_hosts:
infra1:
ip: 172.29.236.11
neutron_ovn_northd:
infra1:
ip: 172.29.236.11
network-gateway_hosts:
infra1:
ip: 172.29.236.11
neutron_ovn_controller:
infra1:
ip: 172.29.236.11
neutron_ovn_gateway:
infra1:
ip: 172.29.236.11
# nova hypervisors
compute_hosts:
compute1:
ip: 172.29.236.12
# cinder storage host (LVM-backed)
storage_hosts:
infra1:
ip: 172.29.236.11
container_vars:
cinder_backends:
cinder_nfs_storage10_evo2tb:
volume_backend_name: storage10_evo2tb
nfs_shares_config: /etc/cinder/nfs_shares_storage10_evo2tb
volume_driver: cinder.volume.drivers.nfs.NfsDriver
shares:
- { ip: 172.29.244.2, share: "/mnt/EVO2TB/stack_evo2tb" }
# --------
#
# Level: haproxy_hosts (optional)
# List of target hosts on which to deploy HAProxy. Recommend at least one
# target host for this service if hardware load balancers are not being
# used.
#
# Level: <value> (required, string)
# Hostname of a target host.
#
# Option: ip (required, string)
# IP address of this target host, typically the IP address assigned to
# the management bridge.
#
#
# Example:
#
# Define a virtual load balancer (HAProxy):
#
# While HAProxy can be used as a virtual load balancer, it is recommended to use
# a physical load balancer in a production environment.
#
# haproxy_hosts:
# lb1:
# ip: 172.29.236.100
# lb2:
# ip: 172.29.236.101
#
# In case of the above scenario(multiple hosts),HAProxy can be deployed in a
# highly-available manner by installing keepalived.
#
# To make keepalived work, edit at least the following variables
# in ``user_variables.yml``:
# haproxy_keepalived_external_vip_cidr: 192.168.0.4/25
# haproxy_keepalived_internal_vip_cidr: 172.29.236.54/16
# haproxy_keepalived_external_interface: br-flat
# haproxy_keepalived_internal_interface: br-mgmt
#
# To always deploy (or upgrade to) the latest stable version of keepalived.
# Edit the ``/etc/openstack_deploy/user_variables.yml``:
# keepalived_package_state: latest
#
# The group_vars/all/keepalived.yml contains the keepalived
# variables that are fed into the keepalived role during
# the haproxy playbook.
# You can change the keepalived behavior for your
# deployment. Refer to the ``user_variables.yml`` file for
# more information.
#
# Keepalived can ping a public and private IP address to check its status. To
# enable this feature, set the ``keepalived_external_ping_address`` and
# ``keepalived_internal_ping_address`` variables in the ``user_variables.yml``
# file.