diff --git a/docker/provider_ccm_up.py b/docker/cluster_manager_up.py similarity index 67% rename from docker/provider_ccm_up.py rename to docker/cluster_manager_up.py index f82d5739b32..e8f1a22b9b5 100755 --- a/docker/provider_ccm_up.py +++ b/docker/cluster_manager_up.py @@ -4,7 +4,7 @@ Copyright (C) 2015 ACK CYFRONET AGH This software is released under the MIT license cited in 'LICENSE.txt' -A script to bring up a set of oneprovider ccm nodes. They can create separate +A script to bring up a set of oneprovider cm nodes. They can create separate clusters. Run the script with -h flag to learn about script's running options. """ @@ -12,10 +12,10 @@ from __future__ import print_function import json -from environment import common, provider_ccm +from environment import common, cluster_manager -parser = common.standard_arg_parser('Bring up op_ccm nodes.') +parser = common.standard_arg_parser('Bring up cluster_manager nodes.') parser.add_argument( '-l', '--logdir', action='store', @@ -24,7 +24,7 @@ dest='logdir') args = parser.parse_args() -output = provider_ccm.up(args.image, args.bin, args.dns, args.uid, +output = cluster_manager.up(args.image, args.bin, args.dns, args.uid, args.config_path, args.logdir) print(json.dumps(output)) diff --git a/docker/cluster_up.py b/docker/cluster_up.py index ca8e373d97e..98a0a3b2c3c 100755 --- a/docker/cluster_up.py +++ b/docker/cluster_up.py @@ -12,10 +12,10 @@ from __future__ import print_function import json import os -from environment import common, cluster_worker, provider_ccm, dns +from environment import common, cluster_worker, cluster_manager, dns parser = common.standard_arg_parser( - 'Bring up bare cluster nodes (workers and ccms).') + 'Bring up bare cluster nodes (workers and cms).') parser.add_argument( '-l', '--logdir', action='store', @@ -29,17 +29,17 @@ help='the path to cluster-worker repository (precompiled)', dest='bin_op_worker') parser.add_argument( - '-bccm', '--bin-ccm', + '-bcm', '--bin-cm', action='store', - default=os.getcwd() + '/op_ccm', - help='the path to op_ccm repository (precompiled)', - dest='bin_op_ccm') + default=os.getcwd() + '/cluster_manager', + help='the path to cluster_manager repository (precompiled)', + dest='bin_cluster_manager') # Prepare config args = parser.parse_args() config = common.parse_json_file(args.config_path) output = { - 'op_ccm_nodes': [], + 'cluster_manager_nodes': [], 'cluster_worker_nodes': [], } uid = common.generate_uid() @@ -48,9 +48,9 @@ [dns_server], dns_output = dns.maybe_start('auto', uid) common.merge(output, dns_output) -# Start ccms -ccm_output = provider_ccm.up(args.image, args.bin_op_ccm, dns_server, uid, args.config_path, args.logdir) -common.merge(output, ccm_output) +# Start cms +cm_output = cluster_manager.up(args.image, args.bin_cluster_manager, dns_server, uid, args.config_path, args.logdir) +common.merge(output, cm_output) # Start workers worker_output = cluster_worker.up(args.image, args.bin_op_worker, dns_server, uid, args.config_path, args.logdir) diff --git a/docker/ct_run.py b/docker/ct_run.py index 44bd3875981..b4acb31c78f 100755 --- a/docker/ct_run.py +++ b/docker/ct_run.py @@ -173,10 +173,10 @@ configs_to_change.extend( data['provider_domains'][provider][ 'op_worker'].values()) - if 'op_ccm' in data['provider_domains'][provider]: + if 'cluster_manager' in data['provider_domains'][provider]: configs_to_change.extend( data['provider_domains'][provider][ - 'op_ccm'].values()) + 'cluster_manager'].values()) if 'globalregistry_domains' in data: for globalregistry in data['globalregistry_domains']: configs_to_change.extend( diff --git a/docker/env_up.py b/docker/env_up.py index ddf3e529232..c2fd6557aa9 100755 --- a/docker/env_up.py +++ b/docker/env_up.py @@ -43,11 +43,11 @@ dest='bin_cluster_worker') parser.add_argument( - '-bccm', '--bin-ccm', + '-bcm', '--bin-cm', action='store', - default=env.default('bin_op_ccm'), - help='the path to op_ccm repository (precompiled)', - dest='bin_op_ccm') + default=env.default('bin_cluster_manager'), + help='the path to cluster_manager repository (precompiled)', + dest='bin_cluster_manager') parser.add_argument( '-bg', '--bin-gr', @@ -85,7 +85,7 @@ args = parser.parse_args() output = env.up(args.config_path, image=args.image, bin_am=args.bin_am, - bin_gr=args.bin_gr, bin_op_ccm=args.bin_op_ccm, + bin_gr=args.bin_gr, bin_cluster_manager=args.bin_cluster_manager, bin_op_worker=args.bin_op_worker, bin_cluster_worker=args.bin_cluster_worker, bin_oc=args.bin_oc, logdir=args.logdir) diff --git a/docker/environment/appmock.py b/docker/environment/appmock.py index 324bcdec23f..a0f0719e987 100644 --- a/docker/environment/appmock.py +++ b/docker/environment/appmock.py @@ -12,7 +12,7 @@ import random import string -from . import common, docker, dns, provider_ccm, worker, globalregistry +from . import common, docker, dns, cluster_manager, worker, globalregistry APPMOCK_WAIT_FOR_NAGIOS_SECONDS = 60 * 2 @@ -48,7 +48,7 @@ def _tweak_config(config, appmock_node, appmock_instance, uid): # Node name depends on mocked app, if none is specified, # default appmock_erl_node_name will be used. node_name = { - 'op_ccm': provider_ccm.ccm_erl_node_name(appmock_node, + 'cluster_manager': cluster_manager.cm_erl_node_name(appmock_node, appmock_instance, uid), 'op_worker': worker.worker_erl_node_name(appmock_node, appmock_instance, diff --git a/docker/environment/provider_ccm.py b/docker/environment/cluster_manager.py similarity index 65% rename from docker/environment/provider_ccm.py rename to docker/environment/cluster_manager.py index 07bba550fad..8bd99b021b8 100644 --- a/docker/environment/provider_ccm.py +++ b/docker/environment/cluster_manager.py @@ -2,7 +2,7 @@ Copyright (C) 2015 ACK CYFRONET AGH This software is released under the MIT license cited in 'LICENSE.txt' -Brings up a set of oneprovider ccm nodes. They can create separate clusters. +Brings up a set of oneprovider cm nodes. They can create separate clusters. """ from __future__ import print_function @@ -14,32 +14,32 @@ from . import common, docker, dns -def ccm_hostname(node_name, op_instance, uid): - """Formats hostname for a docker hosting op_ccm. +def cm_hostname(node_name, op_instance, uid): + """Formats hostname for a docker hosting cluster_manager. NOTE: Hostnames are also used as docker names! """ return common.format_hostname([node_name, op_instance], uid) -def ccm_erl_node_name(node_name, op_instance, uid): - """Formats erlang node name for a vm on op_ccm docker. +def cm_erl_node_name(node_name, op_instance, uid): + """Formats erlang node name for a vm on cluster_manager docker. """ - hostname = ccm_hostname(node_name, op_instance, uid) - return common.format_erl_node_name('ccm', hostname) + hostname = cm_hostname(node_name, op_instance, uid) + return common.format_erl_node_name('cm', hostname) -def _tweak_config(config, ccm_node, op_instance, uid): +def _tweak_config(config, cm_node, op_instance, uid): cfg = copy.deepcopy(config) - cfg['nodes'] = {'node': cfg['nodes'][ccm_node]} + cfg['nodes'] = {'node': cfg['nodes'][cm_node]} sys_config = cfg['nodes']['node']['sys.config'] - sys_config['ccm_nodes'] = [ccm_erl_node_name(n, op_instance, uid) - for n in sys_config['ccm_nodes']] + sys_config['cm_nodes'] = [cm_erl_node_name(n, op_instance, uid) + for n in sys_config['cm_nodes']] if 'vm.args' not in cfg['nodes']['node']: cfg['nodes']['node']['vm.args'] = {} vm_args = cfg['nodes']['node']['vm.args'] - vm_args['name'] = ccm_erl_node_name(ccm_node, op_instance, uid) + vm_args['name'] = cm_erl_node_name(cm_node, op_instance, uid) return cfg @@ -58,9 +58,9 @@ def _node_up(image, bindir, config, dns_servers, logdir): EOF set -e escript bamboos/gen_dev/gen_dev.escript /tmp/gen_dev_args.json -/root/bin/node/bin/op_ccm console''' +/root/bin/node/bin/cluster_manager console''' command = command.format( - gen_dev_args=json.dumps({'op_ccm': config}), + gen_dev_args=json.dumps({'cluster_manager': config}), uid=os.geteuid(), gid=os.getegid()) @@ -84,7 +84,7 @@ def _node_up(image, bindir, config, dns_servers, logdir): return { 'docker_ids': [container], - 'op_ccm_nodes': [node_name] + 'cluster_manager_nodes': [node_name] } @@ -94,29 +94,29 @@ def _ready(container): def up(image, bindir, dns_server, uid, config_path, logdir=None, domains_name='provider_domains'): config = common.parse_json_file(config_path) - input_dir = config['dirs_config']['op_ccm']['input_dir'] + input_dir = config['dirs_config']['cluster_manager']['input_dir'] dns_servers, output = dns.maybe_start(dns_server, uid) - # CCMs of every provider are started together + # CMs of every provider are started together for op_instance in config[domains_name]: gen_dev_cfg = { 'config': { 'input_dir': input_dir, 'target_dir': '/root/bin' }, - 'nodes': config[domains_name][op_instance]['op_ccm'] + 'nodes': config[domains_name][op_instance]['cluster_manager'] } tweaked_configs = [ - _tweak_config(gen_dev_cfg, ccm_node, op_instance, uid) - for ccm_node in gen_dev_cfg['nodes']] + _tweak_config(gen_dev_cfg, cm_node, op_instance, uid) + for cm_node in gen_dev_cfg['nodes']] - ccms = [] + cms = [] for cfg in tweaked_configs: node_out = _node_up(image, bindir, cfg, dns_servers, logdir) - ccms.extend(node_out['docker_ids']) + cms.extend(node_out['docker_ids']) common.merge(output, node_out) - common.wait_until(_ready, ccms, 0) + common.wait_until(_ready, cms, 0) return output diff --git a/docker/environment/cluster_worker.py b/docker/environment/cluster_worker.py index d3940ef0b5f..2491ce68e1c 100644 --- a/docker/environment/cluster_worker.py +++ b/docker/environment/cluster_worker.py @@ -20,7 +20,7 @@ class ClusterWorkerConfigurator: def tweak_config(self, cfg, uid): return cfg - def configure_started_instance(self, bindir, instance, config, os_config, output): + def configure_started_instance(self, bindir, instance, config, output): pass def extra_volumes(self, config): diff --git a/docker/environment/common.py b/docker/environment/common.py index e2092a2f973..d92b06da768 100644 --- a/docker/environment/common.py +++ b/docker/environment/common.py @@ -147,7 +147,7 @@ def format_hostname(domain_parts, uid): """Formats hostname for a docker based on domain parts and uid. NOTE: Hostnames are also used as docker names! domain_parts - a single or a list of consecutive domain parts that constitute a unique name - within environment e.g.: ['worker1', 'prov1'], ['ccm1', 'prov1'], 'client1' + within environment e.g.: ['worker1', 'prov1'], ['cm1', 'prov1'], 'client1' uid - timestamp """ if isinstance(domain_parts, (str, unicode)): @@ -161,7 +161,7 @@ def format_hostname(domain_parts, uid): def format_erl_node_name(app_name, hostname): """Formats full node name for an erlang VM hosted on docker based on app_name and hostname. NOTE: Hostnames are also used as docker names! - app_name - application name, e.g.: 'op_ccm', 'globalregistry' + app_name - application name, e.g.: 'cluster_manager', 'globalregistry' hostname - hostname aquired by format_*_hostname """ return '{0}@{1}'.format(app_name, hostname) diff --git a/docker/environment/env.py b/docker/environment/env.py index 4436d6a562c..84d33cea5f0 100644 --- a/docker/environment/env.py +++ b/docker/environment/env.py @@ -10,7 +10,7 @@ import copy import subprocess import json -from . import appmock, client, common, globalregistry, provider_ccm, \ +from . import appmock, client, common, globalregistry, cluster_manager, \ provider_worker, cluster_worker, docker, dns @@ -20,13 +20,13 @@ def default(key): 'bin_gr': '{0}/globalregistry'.format(os.getcwd()), 'bin_op_worker': '{0}/op_worker'.format(os.getcwd()), 'bin_cluster_worker': '{0}/cluster_worker'.format(os.getcwd()), - 'bin_op_ccm': '{0}/op_ccm'.format(os.getcwd()), + 'bin_cluster_manager': '{0}/cluster_manager'.format(os.getcwd()), 'bin_oc': '{0}/oneclient'.format(os.getcwd()), 'logdir': None}[key] def up(config_path, image=default('image'), bin_am=default('bin_am'), - bin_gr=default('bin_gr'), bin_op_ccm=default('bin_op_ccm'), + bin_gr=default('bin_gr'), bin_cluster_manager=default('bin_cluster_manager'), bin_op_worker=default('bin_op_worker'), bin_cluster_worker=default('bin_cluster_worker'), bin_oc=default('bin_oc'), logdir=default('logdir')): config = common.parse_json_file(config_path) @@ -36,7 +36,7 @@ def up(config_path, image=default('image'), bin_am=default('bin_am'), 'docker_ids': [], 'gr_nodes': [], 'gr_db_nodes': [], - 'op_ccm_nodes': [], + 'cluster_manager_nodes': [], 'op_worker_nodes': [], 'cluster_worker_nodes': [], 'appmock_nodes': [], @@ -68,34 +68,12 @@ def up(config_path, image=default('image'), bin_am=default('bin_am'), dns.maybe_restart_with_configuration('auto', uid, output) # Start provider cluster instances - if 'provider_domains' in config: - # Start op_ccm instances - op_ccm_output = provider_ccm.up(image, bin_op_ccm, dns_server, - uid, config_path, logdir) - common.merge(output, op_ccm_output) + setup_worker(provider_worker, bin_op_worker, 'provider_domains', + bin_cluster_manager, config, config_path, dns_server, image, logdir, output, uid) - # Start op_worker instances - op_worker_output = provider_worker.up(image, bin_op_worker, dns_server, uid, config_path, logdir) - common.merge(output, op_worker_output) - # Make sure OP domains are added to the dns server. - # Setting first arg to 'auto' will force the restart and this is needed - # so that dockers that start after can immediately see the domains. - dns.maybe_restart_with_configuration('auto', uid, output) - - # Start provider cluster instances - if 'cluster_domains' in config: - # Start op_ccm instances - op_ccm_output = provider_ccm.up(image, bin_op_ccm, dns_server, - uid, config_path, logdir, domains_name='cluster_domains') - common.merge(output, op_ccm_output) - - # Start op_worker instances - cluster_worker_output = cluster_worker.up(image, bin_cluster_worker, dns_server, uid, config_path, logdir) - common.merge(output, cluster_worker_output) - # Make sure OP domains are added to the dns server. - # Setting first arg to 'auto' will force the restart and this is needed - # so that dockers that start after can immediately see the domains. - dns.maybe_restart_with_configuration('auto', uid, output) + # Start stock cluster worker instances + setup_worker(cluster_worker, bin_cluster_worker, 'cluster_domains', + bin_cluster_manager, config, config_path, dns_server, image, logdir, output, uid) # Start oneclient instances if 'oneclient' in config: @@ -153,3 +131,19 @@ def up(config_path, image=default('image'), bin_am=default('bin_am'), ) return output + + +def setup_worker(worker, bin_worker, domains_name, bin_cm, config, config_path, dns_server, image, logdir, output, uid): + if domains_name in config: + # Start cluster_manager instances + cluster_manager_output = cluster_manager.up(image, bin_cm, dns_server, uid, config_path, logdir, + domains_name=domains_name) + common.merge(output, cluster_manager_output) + + # Start op_worker instances + cluster_worker_output = worker.up(image, bin_worker, dns_server, uid, config_path, logdir) + common.merge(output, cluster_worker_output) + # Make sure OP domains are added to the dns server. + # Setting first arg to 'auto' will force the restart and this is needed + # so that dockers that start after can immediately see the domains. + dns.maybe_restart_with_configuration('auto', uid, output) diff --git a/docker/environment/provider_worker.py b/docker/environment/provider_worker.py index 76a9faa8554..36061b56487 100644 --- a/docker/environment/provider_worker.py +++ b/docker/environment/provider_worker.py @@ -25,13 +25,15 @@ def tweak_config(self, cfg, uid): sys_config['global_registry_domain'] = gr_hostname return cfg - def configure_started_instance(self, bindir, instance, config, os_config, output): - create_storages(config['os_configs'][os_config]['storages'], - output[self.nodes_list_attribute()], - config[self.domains_attribute()][instance][self.app_name()], bindir) + def configure_started_instance(self, bindir, instance, config, output): + if 'os_config' in config[self.domains_attribute()][instance]: + os_config = config[self.domains_attribute()][instance]['os_config'] + create_storages(config['os_configs'][os_config]['storages'], + output[self.nodes_list_attribute()], + config[self.domains_attribute()][instance][self.app_name()], bindir) def extra_volumes(self, config): - return [common.volume_for_storage(s) for s in config['os_config']['storages']] + return [common.volume_for_storage(s) for s in config['os_config']['storages']] if 'os_config' in config else [] def app_name(self): return "op_worker" diff --git a/docker/environment/riak.py b/docker/environment/riak.py index 9e7c42a1430..bf29f0b098c 100644 --- a/docker/environment/riak.py +++ b/docker/environment/riak.py @@ -17,7 +17,7 @@ def riak_hostname(node_num, op_instance, uid): - """Formats hostname for a docker hosting op_ccm. + """Formats hostname for a docker hosting riak. NOTE: Hostnames are also used as docker names! """ node_name = 'riak{0}'.format(node_num) @@ -25,7 +25,7 @@ def riak_hostname(node_num, op_instance, uid): def riak_erl_node_name(node_name, op_instance, uid): - """Formats erlang node name for a vm on op_ccm docker. + """Formats erlang node name for a vm on cluster_manager docker. """ hostname = riak_hostname(node_name, op_instance, uid) return common.format_erl_node_name('riak', hostname) diff --git a/docker/environment/worker.py b/docker/environment/worker.py index 7c2064f1431..57dd8855473 100644 --- a/docker/environment/worker.py +++ b/docker/environment/worker.py @@ -11,7 +11,7 @@ import os import subprocess import sys -from . import common, docker, riak, couchbase, dns, provider_ccm +from . import common, docker, riak, couchbase, dns, cluster_manager CLUSTER_WAIT_FOR_NAGIOS_SECONDS = 60 * 2 # mounting point for op-worker-node docker @@ -42,9 +42,9 @@ def _tweak_config(config, name, instance, uid, configurator): cfg['nodes'] = {'node': cfg['nodes'][name]} sys_config = cfg['nodes']['node']['sys.config'] - sys_config['ccm_nodes'] = [ - provider_ccm.ccm_erl_node_name(n, instance, uid) for n in - sys_config['ccm_nodes']] + sys_config['cm_nodes'] = [ + cluster_manager.cm_erl_node_name(n, instance, uid) for n in + sys_config['cm_nodes']] # Set the cluster domain (needed for nodes to start) sys_config[configurator.domain_env_name()] = cluster_domain(instance, uid) @@ -102,9 +102,10 @@ def _node_up(image, bindir, config, dns_servers, db_node_mappings, logdir, confi dns_list=dns_servers, command=command) - # create system users and grous - common.create_users(container, config['os_config']['users']) - common.create_groups(container, config['os_config']['groups']) + # create system users and groups (if specified) + if 'os_config' in config: + common.create_users(container, config['os_config']['users']) + common.create_groups(container, config['os_config']['groups']) return container, { 'docker_ids': [container], @@ -168,17 +169,20 @@ def up(image, bindir, dns_server, uid, config_path, configurator, logdir=None): # Workers of every cluster are started together for instance in config[configurator.domains_attribute()]: - os_config = config[configurator.domains_attribute()][instance]['os_config'] gen_dev_cfg = { 'config': { 'input_dir': input_dir, 'target_dir': '/root/bin' }, 'nodes': config[configurator.domains_attribute()][instance][configurator.app_name()], - 'db_driver': _db_driver(config[configurator.domains_attribute()][instance]), - 'os_config': config['os_configs'][os_config] + 'db_driver': _db_driver(config[configurator.domains_attribute()][instance]) } + # If present, include os_config + if 'os_config' in config[configurator.domains_attribute()][instance]: + os_config = config[configurator.domains_attribute()][instance]['os_config'] + gen_dev_cfg['os_config'] = config['os_configs'][os_config] + # Tweak configs, retrieve lis of riak nodes to start configs = [] all_db_nodes = [] @@ -224,8 +228,8 @@ def up(image, bindir, dns_server, uid, config_path, configurator, logdir=None): } } common.merge(output, domains) - configurator.configure_started_instance(bindir, instance, config, os_config, output) + configurator.configure_started_instance(bindir, instance, config, output) # Make sure domains are added to the dns server. dns.maybe_restart_with_configuration(dns_server, uid, output) - return output \ No newline at end of file + return output diff --git a/docker/provider_up.py b/docker/provider_up.py index 40e1f1665f2..f1aa75faa7f 100755 --- a/docker/provider_up.py +++ b/docker/provider_up.py @@ -13,10 +13,10 @@ import json import os -from environment import common, provider_worker, provider_ccm, dns +from environment import common, provider_worker, cluster_manager, dns parser = common.standard_arg_parser( - 'Bring up oneprovider nodes (workers and ccms).') + 'Bring up oneprovider nodes (workers and cms).') parser.add_argument( '-l', '--logdir', action='store', @@ -30,17 +30,17 @@ help='the path to oneprovider repository (precompiled)', dest='bin_op_worker') parser.add_argument( - '-bccm', '--bin-ccm', + '-bcm', '--bin-cm', action='store', - default=os.getcwd() + '/op_ccm', - help='the path to op_ccm repository (precompiled)', - dest='bin_op_ccm') + default=os.getcwd() + '/cluster_manager', + help='the path to cluster_manager repository (precompiled)', + dest='bin_cluster_manager') # Prepare config args = parser.parse_args() config = common.parse_json_file(args.config_path) output = { - 'op_ccm_nodes': [], + 'cluster_manager_nodes': [], 'op_worker_nodes': [], } uid = common.generate_uid() @@ -49,10 +49,10 @@ [dns_server], dns_output = dns.maybe_start('auto', uid) common.merge(output, dns_output) -# Start ccms -ccm_output = provider_ccm.up(args.image, args.bin_op_ccm, +# Start cms +cm_output = cluster_manager.up(args.image, args.bin_cluster_manager, dns_server, uid, args.config_path, args.logdir) -common.merge(output, ccm_output) +common.merge(output, cm_output) # Start workers worker_output = provider_worker.up(args.image, args.bin_op_worker, dns_server, diff --git a/example_env/appmock_gr.json b/example_env/appmock_gr.json index 071f192a431..0ed273af2ca 100644 --- a/example_env/appmock_gr.json +++ b/example_env/appmock_gr.json @@ -1,7 +1,7 @@ { "dirs_config": { - "op_ccm": { - "input_dir": "rel/op_ccm", + "cluster_manager": { + "input_dir": "rel/cluster_manager", "target_dir": "rel/test_cluster" }, "op_worker": { @@ -40,14 +40,14 @@ "p1": { "db_driver": "couchbase", "os_config": "cfg1", - "op_ccm": { - "ccm1": { + "cluster_manager": { + "cm1": { "vm.args": { "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "worker_num": 1 } @@ -59,8 +59,8 @@ "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "db_nodes": [ "riaknode1" diff --git a/example_env/appmock_worker.json b/example_env/appmock_worker.json index f0eb101597c..b8fec21cd57 100644 --- a/example_env/appmock_worker.json +++ b/example_env/appmock_worker.json @@ -1,7 +1,7 @@ { "dirs_config": { - "op_ccm": { - "input_dir": "rel/op_ccm", + "cluster_manager": { + "input_dir": "rel/cluster_manager", "target_dir": "rel/test_cluster" }, "op_worker": { @@ -40,14 +40,14 @@ "p1": { "db_driver": "couchbase", "os_config": "cfg1", - "op_ccm": { - "ccm1": { + "cluster_manager": { + "cm1": { "vm.args": { "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "worker_num": 1 } diff --git a/example_env/cluster_small.json b/example_env/cluster_small.json index 22afa389ca0..23e637c560a 100644 --- a/example_env/cluster_small.json +++ b/example_env/cluster_small.json @@ -1,7 +1,7 @@ { "dirs_config": { - "op_ccm": { - "input_dir": "rel/op_ccm", + "cluster_manager": { + "input_dir": "rel/cluster_manager", "target_dir": "rel/test_cluster" }, "cluster_worker": { @@ -28,14 +28,14 @@ "p1": { "db_driver": "couchbase", "os_config": "cfg1", - "op_ccm": { - "ccm1": { + "cluster_manager": { + "cm1": { "vm.args": { "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "worker_num": 1 } @@ -47,8 +47,8 @@ "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "db_nodes": [ "riaknode1" diff --git a/example_env/example_env.json b/example_env/example_env.json index 311bae706fd..9bea1b77e65 100644 --- a/example_env/example_env.json +++ b/example_env/example_env.json @@ -1,7 +1,7 @@ { "dirs_config": { - "op_ccm": { - "input_dir": "rel/op_ccm", + "cluster_manager": { + "input_dir": "rel/cluster_manager", "target_dir": "rel/test_cluster" }, "op_worker": { @@ -69,14 +69,14 @@ "p1": { "db_driver": "couchbase", "os_config": "cfg1", - "op_ccm": { - "ccm1": { + "cluster_manager": { + "cm1": { "vm.args": { "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "worker_num": 1 } @@ -88,8 +88,8 @@ "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "db_nodes": [ "dbnode1", @@ -104,14 +104,14 @@ "p2.gr": { "db_driver": "riak", "os_config": "cfg2", - "op_ccm": { - "ccm1": { + "cluster_manager": { + "cm1": { "vm.args": { "setcookie": "cookie2" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "worker_num": 2 } @@ -123,8 +123,8 @@ "setcookie": "cookie2" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "db_nodes": [ "riaknode1" @@ -138,8 +138,8 @@ "setcookie": "cookie2" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "db_nodes": [ "riaknode1" @@ -155,14 +155,14 @@ "p1": { "db_driver": "couchbase", "os_config": "cfg1", - "op_ccm": { - "ccm1": { + "cluster_manager": { + "cm1": { "vm.args": { "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "worker_num": 1 } @@ -174,8 +174,8 @@ "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "db_nodes": [ "dbnode1", diff --git a/example_env/provider_small.json b/example_env/provider_small.json index 9afc594487a..1ccf53bcb58 100644 --- a/example_env/provider_small.json +++ b/example_env/provider_small.json @@ -1,7 +1,7 @@ { "dirs_config": { - "op_ccm": { - "input_dir": "rel/op_ccm", + "cluster_manager": { + "input_dir": "rel/cluster_manager", "target_dir": "rel/test_cluster" }, "op_worker": { @@ -28,14 +28,14 @@ "p1": { "db_driver": "couchbase", "os_config": "cfg1", - "op_ccm": { - "ccm1": { + "cluster_manager": { + "cm1": { "vm.args": { "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "worker_num": 1 } @@ -47,8 +47,8 @@ "setcookie": "cookie1" }, "sys.config": { - "ccm_nodes": [ - "ccm1" + "cm_nodes": [ + "cm1" ], "db_nodes": [ "riaknode1" diff --git a/gen_dev/src/release_configurator.erl b/gen_dev/src/release_configurator.erl index 868c761b3f3..0505dd514f8 100644 --- a/gen_dev/src/release_configurator.erl +++ b/gen_dev/src/release_configurator.erl @@ -14,7 +14,7 @@ -author("Tomasz Lichon"). % oneprovider specific config --define(ONEPROVIDER_CCM_APP_NAME, op_ccm). +-define(ONEPROVIDER_CCM_APP_NAME, cluster_manager). -define(DIST_APP_FAILOVER_TIMEOUT, timer:seconds(5)). -define(SYNC_NODES_TIMEOUT, timer:minutes(1)). @@ -45,18 +45,18 @@ configure_release(?ONEPROVIDER_CCM_APP_NAME, ReleaseRootPath, SysConfig, VmArgs) % configure kernel distributed erlang app NodeName = proplists:get_value(name, VmArgs), - CcmNodes = proplists:get_value(ccm_nodes, SysConfig), - case length(CcmNodes) > 1 of + CmNodes = proplists:get_value(cm_nodes, SysConfig), + case length(CmNodes) > 1 of true -> - OptCcms = CcmNodes -- [list_to_atom(NodeName)], + OptCms = CmNodes -- [list_to_atom(NodeName)], replace_application_config(SysConfigPath, kernel, [ {distributed, [{ ?ONEPROVIDER_CCM_APP_NAME, ?DIST_APP_FAILOVER_TIMEOUT, - [list_to_atom(NodeName), list_to_tuple(OptCcms)] + [list_to_atom(NodeName), list_to_tuple(OptCms)] }]}, - {sync_nodes_mandatory, OptCcms}, + {sync_nodes_mandatory, OptCms}, {sync_nodes_timeout, ?SYNC_NODES_TIMEOUT} ]); false -> ok