Skip to content

Commit

Permalink
ceph-pool: add support updating crush rule of the pool
Browse files Browse the repository at this point in the history
Support updating crush rule of the pool via crush rule name.

Signed-off-by: Seena Fallah <seenafallah@gmail.com>
  • Loading branch information
clwluvw authored and guits committed Aug 13, 2024
1 parent be9b458 commit d6ac0cc
Show file tree
Hide file tree
Showing 2 changed files with 77 additions and 2 deletions.
43 changes: 41 additions & 2 deletions library/ceph_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,28 @@ def get_application_pool(cluster,
return cmd


def get_crush_rule_pool(cluster,
name,
user,
user_key,
output_format='json',
container_image=None):
'''
Get crush rule type on a given pool
'''

args = ['get', name, 'crush_rule', '-f', output_format]

cmd = generate_cmd(sub_cmd=['osd', 'pool'],
args=args,
cluster=cluster,
user=user,
user_key=user_key,
container_image=container_image)

return cmd


def enable_application_pool(cluster,
name,
application,
Expand Down Expand Up @@ -317,6 +339,12 @@ def get_pool_details(module,
user, # noqa: E501
user_key, # noqa: E501
container_image=container_image)) # noqa: E501
_rc, _cmd, crush_rule, _err = exec_command(module,
get_crush_rule_pool(cluster, # noqa: E501
name, # noqa: E501
user, # noqa: E501
user_key, # noqa: E501
container_image=container_image)) # noqa: E501

# This is a trick because "target_size_ratio" isn't present at the same
# level in the dict
Expand All @@ -343,6 +371,8 @@ def get_pool_details(module,
else:
out['application'] = application[0]

out['crush_rule'] = json.loads(crush_rule.strip())['crush_rule']

return rc, cmd, out, err


Expand All @@ -353,7 +383,8 @@ def compare_pool_config(user_pool_config, running_pool_details):

delta = {}
filter_keys = ['pg_num', 'pg_placement_num', 'size',
'pg_autoscale_mode', 'target_size_ratio']
'pg_autoscale_mode', 'target_size_ratio',
'crush_rule']
for key in filter_keys:
if (str(running_pool_details[key]) != user_pool_config[key]['value'] and # noqa: E501
user_pool_config[key]['value']):
Expand Down Expand Up @@ -599,6 +630,8 @@ def run_module():
keyring_filename = cluster + '.' + user + '.keyring'
user_key = os.path.join("/etc/ceph/", keyring_filename)

diff = dict(before="", after="")

if state == "present":
rc, cmd, out, err = exec_command(module,
check_pool_exist(cluster,
Expand All @@ -625,6 +658,12 @@ def run_module():
if details['pg_autoscale_mode'] == 'on':
delta.pop('pg_num', None)
delta.pop('pgp_num', None)
if not module.params.get('rule_name'):
delta.pop('crush_rule', None)

for key in delta.keys():
diff['before'] += "{}: {}\n".format(key, details[key])
diff['after'] += "{}: {}\n".format(key, delta[key]['value'])

changed = len(delta) > 0
if changed and not module.check_mode:
Expand Down Expand Up @@ -687,7 +726,7 @@ def run_module():
container_image=container_image)) # noqa: E501

exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd,
changed=changed)
changed=changed, diff=diff)


def main():
Expand Down
36 changes: 36 additions & 0 deletions tests/library/test_ceph_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,42 @@ def test_get_application_pool(self):

assert cmd == expected_command

def test_get_crush_rule_pool(self):
expected_command = [
'podman',
'run',
'--rm',
'--net=host',
'-v',
'/etc/ceph:/etc/ceph:z',
'-v',
'/var/lib/ceph/:/var/lib/ceph/:z',
'-v',
'/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
fake_container_image_name,
'-n',
'client.admin',
'-k',
'/etc/ceph/ceph.client.admin.keyring',
'--cluster',
'ceph',
'osd',
'pool',
'get',
self.fake_user_pool_config['pool_name']['value'],
'crush_rule',
'-f',
'json'
]

cmd = ceph_pool.get_crush_rule_pool(fake_cluster_name,
self.fake_user_pool_config['pool_name']['value'],
fake_user, fake_user_key, 'json',
container_image=fake_container_image_name)

assert cmd == expected_command

def test_enable_application_pool(self):
expected_command = [
'podman',
Expand Down

0 comments on commit d6ac0cc

Please sign in to comment.