Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add libvirt ipv6 support scenario in vagrant (backport #7434) #7437

Merged
merged 1 commit into from
Jun 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
216 changes: 167 additions & 49 deletions Vagrantfile

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion roles/ceph-facts/tasks/grafana.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

- name: set grafana_server_addrs fact - ipv6
set_fact:
grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(grafana_network.split(',')) | last | ipansible.utils.ipwrapwrap]) | unique }}"
grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(grafana_network.split(',')) | last | ansible.utils.ipwrap]) | unique }}"
with_items: "{{ groups.get(monitoring_group_name, []) }}"
when:
- groups.get(monitoring_group_name, []) | length > 0
Expand Down
1 change: 1 addition & 0 deletions tests/functional/all_daemons_ipv6/Vagrantfile
40 changes: 40 additions & 0 deletions tests/functional/all_daemons_ipv6/ceph-override.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
{
"ceph_conf_overrides": {
"global": {
"auth_allow_insecure_global_id_reclaim": false,
"osd_pool_default_pg_num": 12,
"osd_pool_default_size": 1,
"mon_allow_pool_size_one": true,
"mon_warn_on_pool_no_redundancy": false,
"mon_max_pg_per_osd": 300
}
},
"cephfs_pools": [
{
"name": "cephfs_data",
"pg_num": 8,
"pgp_num": 8,
"rule_name": "replicated_rule",
"type": 1,
"erasure_profile": "",
"expected_num_objects": "",
"application": "cephfs",
"size": 2,
"min_size": 0
},
{
"name": "cephfs_metadata",
"pg_num": 8,
"pgp_num": 8,
"rule_name": "replicated_rule",
"type": 1,
"erasure_profile": "",
"expected_num_objects": "",
"application": "cephfs",
"size": 2,
"min_size": 0
}
],
"ceph_mon_docker_memory_limit": "2g",
"radosgw_num_instances": 2
}
1 change: 1 addition & 0 deletions tests/functional/all_daemons_ipv6/container/Vagrantfile
46 changes: 46 additions & 0 deletions tests/functional/all_daemons_ipv6/container/group_vars/all
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True

containerized_deployment: True
monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
ip_version: ipv6
public_network: "fdec:f1fb:29cd:6940::/64"
cluster_network: "fdec:f1fb:29cd:7120::/64"
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
auth_allow_insecure_global_id_reclaim: false
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: True
openstack_glance_pool:
name: "images"
size: 1
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
rule_name: "HDD"
size: 1
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
docker_pull_timeout: 600s
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
ceph_docker_image_tag: latest-main
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
13 changes: 13 additions & 0 deletions tests/functional/all_daemons_ipv6/container/group_vars/clients
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
---
user_config: True
copy_admin_key: True
test:
name: "test"
rule_name: "HDD"
size: 1
test2:
name: "test2"
size: 1
pools:
- "{{ test }}"
- "{{ test2 }}"
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
---
generate_crt: True
11 changes: 11 additions & 0 deletions tests/functional/all_daemons_ipv6/container/group_vars/mons
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
---
create_crush_tree: True
crush_rule_config: True
crush_rule_hdd:
name: HDD
root: default
type: host
class: hdd
default: true
crush_rules:
- "{{ crush_rule_hdd }}"
8 changes: 8 additions & 0 deletions tests/functional/all_daemons_ipv6/container/group_vars/osds
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
8 changes: 8 additions & 0 deletions tests/functional/all_daemons_ipv6/container/group_vars/rgws
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
copy_admin_key: True
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
36 changes: 36 additions & 0 deletions tests/functional/all_daemons_ipv6/container/hosts
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
[mons]
mon0 monitor_address="fdec:f1fb:29cd:6940::10"
mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
mon2 monitor_address="fdec:f1fb:29cd:6940::12"

[mgrs]
mgr0

[osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"

[mdss]
mds0
mds1
mds2

[rgws]
rgw0

#[nfss]
#nfs0

[clients]
client0
client1

[rbdmirrors]
rbd-mirror0

[iscsigws]
iscsi-gw0

[monitoring]
mon0
61 changes: 61 additions & 0 deletions tests/functional/all_daemons_ipv6/container/vagrant_variables.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
---

# DEPLOY CONTAINERIZED DAEMONS
docker: True

# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
osd_vms: 3
mds_vms: 3
rgw_vms: 1
nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
mgr_vms: 1

# SUBNETS TO USE FOR THE VMS
public_subnet: "fdec:f1fb:29cd:6940::"
cluster_subnet: "fdec:f1fb:29cd:7120::"

# MEMORY
# set 1024 for CentOS
memory: 1024

# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sda', '/dev/sdb' ]"

# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/atomic-host
#client_vagrant_box: centos/stream8
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
39 changes: 39 additions & 0 deletions tests/functional/all_daemons_ipv6/group_vars/all
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
---
ceph_origin: repository
ceph_repository: dev
ip_version: ipv6
public_network: "fdec:f1fb:29cd:6940::/64"
cluster_network: "fdec:f1fb:29cd:7120::/64"
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_conf_overrides:
global:
auth_allow_insecure_global_id_reclaim: false
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
openstack_config: True
openstack_glance_pool:
name: "images"
size: 1
application: rbd
target_size_ratio: 0.2
openstack_cinder_pool:
name: "volumes"
rule_name: "HDD"
size: 1
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B!
grafana_admin_password: +xFRe+RES@7vg24n
ceph_docker_registry: quay.io
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
grafana_server_group_name: ceph_monitoring
13 changes: 13 additions & 0 deletions tests/functional/all_daemons_ipv6/group_vars/clients
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
---
copy_admin_key: True
user_config: True
test:
name: "test"
rule_name: "HDD"
size: 1
test2:
name: "test2"
size: 1
pools:
- "{{ test }}"
- "{{ test2 }}"
2 changes: 2 additions & 0 deletions tests/functional/all_daemons_ipv6/group_vars/iscsigws
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
---
generate_crt: True
11 changes: 11 additions & 0 deletions tests/functional/all_daemons_ipv6/group_vars/mons
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
---
create_crush_tree: True
crush_rule_config: True
crush_rule_hdd:
name: HDD
root: default
type: host
class: hdd
default: true
crush_rules:
- "{{ crush_rule_hdd }}"
10 changes: 10 additions & 0 deletions tests/functional/all_daemons_ipv6/group_vars/nfss
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
copy_admin_key: true
nfs_file_gw: false
nfs_obj_gw: true
ganesha_conf_overrides: |
CACHEINODE {
Entries_HWMark = 100000;
}
nfs_ganesha_stable: true
nfs_ganesha_dev: false
nfs_ganesha_flavor: "ceph_main"
10 changes: 10 additions & 0 deletions tests/functional/all_daemons_ipv6/group_vars/osds
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
9 changes: 9 additions & 0 deletions tests/functional/all_daemons_ipv6/group_vars/rgws
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
copy_admin_key: true
rgw_create_pools:
foo:
pg_num: 16
type: replicated
bar:
pg_num: 16
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
36 changes: 36 additions & 0 deletions tests/functional/all_daemons_ipv6/hosts
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
[mons]
mon0 monitor_address="fdec:f1fb:29cd:6940::10"
mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
mon2 monitor_address="fdec:f1fb:29cd:6940::12"

[mgrs]
mgr0

[osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"

[mdss]
mds0
mds1
mds2

[rgws]
rgw0

[clients]
client0
client1

#[nfss]
#nfs0

[rbdmirrors]
rbd-mirror0

[iscsigws]
iscsi-gw0

[ceph_monitoring]
mon0
Loading