diff --git a/clear/main.py b/clear/main.py index 5cbaee9d62..c436f6c1d8 100755 --- a/clear/main.py +++ b/clear/main.py @@ -484,8 +484,7 @@ def remap_keys(dict): # Load plugins and register them helper = util_base.UtilHelper() -for plugin in helper.load_plugins(plugins): - helper.register_plugin(plugin, cli) +helper.load_and_register_plugins(plugins, cli) if __name__ == '__main__': diff --git a/clear/plugins/auto/__init__.py b/clear/plugins/auto/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/config/main.py b/config/main.py index 41ef47b34a..997398aaa3 100644 --- a/config/main.py +++ b/config/main.py @@ -6103,8 +6103,7 @@ def smoothing_interval(interval, rates_type): # Load plugins and register them helper = util_base.UtilHelper() -for plugin in helper.load_plugins(plugins): - helper.register_plugin(plugin, config) +helper.load_and_register_plugins(plugins, config) if __name__ == '__main__': diff --git a/config/plugins/auto/__init__.py b/config/plugins/auto/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/config/plugins/auto_techsupport.py b/config/plugins/auto_techsupport.py new file mode 100644 index 0000000000..c2960646d4 --- /dev/null +++ b/config/plugins/auto_techsupport.py @@ -0,0 +1,350 @@ +""" +Autogenerated config CLI plugin. +""" + +import click +import utilities_common.cli as clicommon +import utilities_common.general as general +from config import config_mgmt + + +# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. +sonic_cfggen = general.load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + + +def exit_with_error(*args, **kwargs): + """ Print a message and abort CLI. """ + + click.secho(*args, **kwargs) + raise click.Abort() + + +def validate_config_or_raise(cfg): + """ Validate config db data using ConfigMgmt """ + + try: + cfg = sonic_cfggen.FormatConverter.to_serialized(cfg) + config_mgmt.ConfigMgmt().loadData(cfg) + except Exception as err: + raise Exception('Failed to validate configuration: {}'.format(err)) + + +def add_entry_validated(db, table, key, data): + """ Add new entry in table and validate configuration """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key in cfg[table]: + raise Exception(f"{key} already exists") + + cfg[table][key] = data + + validate_config_or_raise(cfg) + db.set_entry(table, key, data) + + +def update_entry_validated(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + for attr, value in data.items(): + if value is None and attr in cfg[table][key]: + cfg[table][key].pop(attr) + else: + cfg[table][key][attr] = value + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def del_entry_validated(db, table, key): + """ Delete entry in table and validate configuration """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + cfg[table].pop(key) + + validate_config_or_raise(cfg) + db.set_entry(table, key, None) + + +def add_list_entry_validated(db, table, key, attr, data): + """ Add new entry into list in table and validate configuration""" + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + cfg[table][key].setdefault(attr, []) + for entry in data: + if entry in cfg[table][key][attr]: + raise Exception(f"{entry} already exists") + cfg[table][key][attr].append(entry) + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def del_list_entry_validated(db, table, key, attr, data): + """ Delete entry from list in table and validate configuration""" + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + cfg[table][key].setdefault(attr, []) + for entry in data: + if entry not in cfg[table][key][attr]: + raise Exception(f"{entry} does not exist") + cfg[table][key][attr].remove(entry) + if not cfg[table][key][attr]: + cfg[table][key].pop(attr) + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def clear_list_entry_validated(db, table, key, attr): + """ Clear list in object and validate configuration""" + + update_entry_validated(db, table, key, {attr: None}) + + +@click.group(name="auto-techsupport", + cls=clicommon.AliasedGroup) +def AUTO_TECHSUPPORT(): + """ AUTO_TECHSUPPORT part of config_db.json """ + + pass + + +@AUTO_TECHSUPPORT.group(name="global", + cls=clicommon.AliasedGroup) +@clicommon.pass_db +def AUTO_TECHSUPPORT_GLOBAL(db): + """ """ + + pass + + +@AUTO_TECHSUPPORT_GLOBAL.command(name="state") +@click.argument( + "state", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_GLOBAL_state(db, state): + """ Knob to make techsupport invocation event-driven based on core-dump generation """ + + table = "AUTO_TECHSUPPORT" + key = "GLOBAL" + data = { + "state": state, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@AUTO_TECHSUPPORT_GLOBAL.command(name="rate-limit-interval") +@click.argument( + "rate-limit-interval", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_GLOBAL_rate_limit_interval(db, rate_limit_interval): + """ Minimum time in seconds between two successive techsupport invocations. Configure 0 to explicitly disable """ + + table = "AUTO_TECHSUPPORT" + key = "GLOBAL" + data = { + "rate_limit_interval": rate_limit_interval, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@AUTO_TECHSUPPORT_GLOBAL.command(name="max-techsupport-limit") +@click.argument( + "max-techsupport-limit", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_GLOBAL_max_techsupport_limit(db, max_techsupport_limit): + """ Max Limit in percentage for the cummulative size of ts dumps. + No cleanup is performed if the value isn't configured or is 0.0 + """ + + table = "AUTO_TECHSUPPORT" + key = "GLOBAL" + data = { + "max_techsupport_limit": max_techsupport_limit, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@AUTO_TECHSUPPORT_GLOBAL.command(name="max-core-limit") +@click.argument( + "max-core-limit", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_GLOBAL_max_core_limit(db, max_core_limit): + """ Max Limit in percentage for the cummulative size of core dumps. + No cleanup is performed if the value isn't congiured or is 0.0 + """ + + table = "AUTO_TECHSUPPORT" + key = "GLOBAL" + data = { + "max_core_limit": max_core_limit, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@AUTO_TECHSUPPORT_GLOBAL.command(name="since") +@click.argument( + "since", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_GLOBAL_since(db, since): + """ Only collect the logs & core-dumps generated since the time provided. + A default value of '2 days ago' is used if this value is not set explicitly or a non-valid string is provided """ + + table = "AUTO_TECHSUPPORT" + key = "GLOBAL" + data = { + "since": since, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@click.group(name="auto-techsupport-feature", + cls=clicommon.AliasedGroup) +def AUTO_TECHSUPPORT_FEATURE(): + """ AUTO_TECHSUPPORT_FEATURE part of config_db.json """ + pass + + +@AUTO_TECHSUPPORT_FEATURE.command(name="add") +@click.argument( + "feature-name", + nargs=1, + required=True, +) +@click.option( + "--state", + help="Enable auto techsupport invocation on the processes running inside this feature", +) +@click.option( + "--rate-limit-interval", + help="Rate limit interval for the corresponding feature. Configure 0 to explicitly disable", +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_FEATURE_add(db, feature_name, state, rate_limit_interval): + """ Add object in AUTO_TECHSUPPORT_FEATURE. """ + + table = "AUTO_TECHSUPPORT_FEATURE" + key = feature_name + data = {} + if state is not None: + data["state"] = state + if rate_limit_interval is not None: + data["rate_limit_interval"] = rate_limit_interval + + try: + add_entry_validated(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@AUTO_TECHSUPPORT_FEATURE.command(name="update") +@click.argument( + "feature-name", + nargs=1, + required=True, +) +@click.option( + "--state", + help="Enable auto techsupport invocation on the processes running inside this feature", +) +@click.option( + "--rate-limit-interval", + help="Rate limit interval for the corresponding feature. Configure 0 to explicitly disable", +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_FEATURE_update(db, feature_name, state, rate_limit_interval): + """ Add object in AUTO_TECHSUPPORT_FEATURE. """ + + table = "AUTO_TECHSUPPORT_FEATURE" + key = feature_name + data = {} + if state is not None: + data["state"] = state + if rate_limit_interval is not None: + data["rate_limit_interval"] = rate_limit_interval + + try: + update_entry_validated(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@AUTO_TECHSUPPORT_FEATURE.command(name="delete") +@click.argument( + "feature-name", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_FEATURE_delete(db, feature_name): + """ Delete object in AUTO_TECHSUPPORT_FEATURE. """ + + table = "AUTO_TECHSUPPORT_FEATURE" + key = feature_name + try: + del_entry_validated(db.cfgdb, table, key) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +def register(cli): + cli_node = AUTO_TECHSUPPORT + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(AUTO_TECHSUPPORT) + cli_node = AUTO_TECHSUPPORT_FEATURE + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(AUTO_TECHSUPPORT_FEATURE) diff --git a/counterpoll/main.py b/counterpoll/main.py index e04575d225..27557c170d 100644 --- a/counterpoll/main.py +++ b/counterpoll/main.py @@ -6,6 +6,7 @@ BUFFER_POOL_WATERMARK = "BUFFER_POOL_WATERMARK" PORT_BUFFER_DROP = "PORT_BUFFER_DROP" PG_DROP = "PG_DROP" +ACL = "ACL" DISABLE = "disable" ENABLE = "enable" DEFLT_60_SEC= "default (60000)" @@ -241,6 +242,45 @@ def disable(): configdb.mod_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_info) configdb.mod_entry("FLEX_COUNTER_TABLE", BUFFER_POOL_WATERMARK, fc_info) +# ACL counter commands +@cli.group() +@click.pass_context +def acl(ctx): + """ ACL counter commands """ + ctx.obj = ConfigDBConnector() + ctx.obj.connect() + +@acl.command() +@click.argument('poll_interval', type=click.IntRange(1000, 30000)) +@click.pass_context +def interval(ctx, poll_interval): + """ + Set ACL counters query interval + interval is between 1s and 30s. + """ + + fc_group_cfg = {} + fc_group_cfg['POLL_INTERVAL'] = poll_interval + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ACL, fc_group_cfg) + +@acl.command() +@click.pass_context +def enable(ctx): + """ Enable ACL counter query """ + + fc_group_cfg = {} + fc_group_cfg['FLEX_COUNTER_STATUS'] = ENABLE + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ACL, fc_group_cfg) + +@acl.command() +@click.pass_context +def disable(ctx): + """ Disable ACL counter query """ + + fc_group_cfg = {} + fc_group_cfg['FLEX_COUNTER_STATUS'] = DISABLE + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ACL, fc_group_cfg) + # Tunnel counter commands @cli.group() def tunnel(): @@ -287,8 +327,9 @@ def show(): pg_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PG_WATERMARK') pg_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PG_DROP) buffer_pool_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', BUFFER_POOL_WATERMARK) + acl_info = configdb.get_entry('FLEX_COUNTER_TABLE', ACL) tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL') - + header = ("Type", "Interval (in ms)", "Status") data = [] if queue_info: @@ -307,6 +348,8 @@ def show(): data.append(['PG_DROP_STAT', pg_drop_info.get("POLL_INTERVAL", DEFLT_10_SEC), pg_drop_info.get("FLEX_COUNTER_STATUS", DISABLE)]) if buffer_pool_wm_info: data.append(["BUFFER_POOL_WATERMARK_STAT", buffer_pool_wm_info.get("POLL_INTERVAL", DEFLT_10_SEC), buffer_pool_wm_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + if acl_info: + data.append([ACL, pg_drop_info.get("POLL_INTERVAL", DEFLT_10_SEC), acl_info.get("FLEX_COUNTER_STATUS", DISABLE)]) if tunnel_info: data.append(["TUNNEL_STAT", rif_info.get("POLL_INTERVAL", DEFLT_10_SEC), rif_info.get("FLEX_COUNTER_STATUS", DISABLE)]) diff --git a/scripts/aclshow b/scripts/aclshow index 046cb72efe..1e7bfe4fcd 100755 --- a/scripts/aclshow +++ b/scripts/aclshow @@ -29,13 +29,15 @@ from tabulate import tabulate ### if we could have a SAI command to clear counters will be better, so no need to maintain ### counters in temp loaction for clear conter action COUNTER_POSITION = '/tmp/.counters_acl.p' +COUNTERS = "COUNTERS" +ACL_COUNTER_RULE_MAP = "ACL_COUNTER_RULE_MAP" ### acl display header ACL_HEADER = ["RULE NAME", "TABLE NAME", "PRIO", "PACKETS COUNT", "BYTES COUNT"] -# some constants for rule properties -PACKETS_COUNTER = "packets counter" -BYTES_COUNTER = "bytes counter" +COUNTER_PACKETS_ATTR = "SAI_ACL_COUNTER_ATTR_PACKETS" +COUNTER_BYTES_ATTR = "SAI_ACL_COUNTER_ATTR_BYTES" + class AclStat(object): """ @@ -91,8 +93,13 @@ class AclStat(object): read redis database for acl counters """ - def lowercase_keys(dictionary): - return dict((k.lower(), v) for k, v in dictionary.items()) if dictionary else None + def get_acl_rule_counter_map(): + """ + Return ACL_COUNTER_RULE_MAP + """ + if self.db.exists(self.db.COUNTERS_DB, ACL_COUNTER_RULE_MAP): + return self.db.get_all(self.db.COUNTERS_DB, ACL_COUNTER_RULE_MAP) + return {} def fetch_acl_tables(): """ @@ -124,8 +131,18 @@ class AclStat(object): """ Get ACL counters from the DB """ + counters_db_separator = self.db.get_db_separator(self.db.COUNTERS_DB) + rule_to_counter_map = get_acl_rule_counter_map() for table, rule in self.acl_rules: - cnt_props = lowercase_keys(self.db.get_all(self.db.COUNTERS_DB, "COUNTERS:%s:%s" % (table, rule))) + self.acl_counters[table, rule] = {} + rule_identifier = table + counters_db_separator + rule + if not rule_to_counter_map: + continue + counter_oid = rule_to_counter_map.get(rule_identifier) + if not counter_oid: + continue + counters_db_key = COUNTERS + counters_db_separator + counter_oid + cnt_props = self.db.get_all(self.db.COUNTERS_DB, counters_db_key) self.acl_counters[table, rule] = cnt_props if verboseflag: @@ -164,8 +181,8 @@ class AclStat(object): header = ACL_HEADER aclstat = [] for rule_key in self.acl_rules: - if not display_all and (self.get_counter_value(rule_key, 'packets') == '0' or \ - self.get_counter_value(rule_key, 'packets') == 'N/A'): + if not display_all and (self.get_counter_value(rule_key, COUNTER_PACKETS_ATTR) == '0' or \ + self.get_counter_value(rule_key, COUNTER_PACKETS_ATTR) == 'N/A'): continue rule = self.acl_rules[rule_key] rule_priority = -1 @@ -174,8 +191,8 @@ class AclStat(object): rule_priority = val line = [rule_key[1], rule_key[0], rule_priority, - self.get_counter_value(rule_key, 'packets'), - self.get_counter_value(rule_key, 'bytes')] + self.get_counter_value(rule_key, COUNTER_PACKETS_ATTR), + self.get_counter_value(rule_key, COUNTER_BYTES_ATTR)] aclstat.append(line) # sort the list with table name first and then descending priority diff --git a/scripts/coredump-compress b/scripts/coredump-compress index 53381fc00e..667d5f8a58 100755 --- a/scripts/coredump-compress +++ b/scripts/coredump-compress @@ -7,7 +7,9 @@ while [[ $# > 1 ]]; do shift done +CONTAINER_ID="" if [ $# > 0 ]; then + CONTAINER_ID=$(xargs -0 -L1 -a /proc/${1}/cgroup | grep -oP "pids:/docker/\K\w+") ns=`xargs -0 -L1 -a /proc/${1}/environ | grep -e "^NAMESPACE_ID" | cut -f2 -d'='` if [ ! -z ${ns} ]; then PREFIX=${PREFIX}${ns}. @@ -15,3 +17,18 @@ if [ $# > 0 ]; then fi /bin/gzip -1 - > /var/core/${PREFIX}core.gz + +if [[ ! -z $CONTAINER_ID ]]; then + CONTAINER_NAME=$(docker inspect --format='{{.Name}}' ${CONTAINER_ID} | cut -c2-) + if [[ ! -z ${CONTAINER_NAME} ]]; then + # coredump_gen_handler invokes techsupport if all the other required conditions are met + # explicitly passing in the env vars because coredump-compress's namespace doesn't have these set by default + for path in $(find /usr/local/lib/python3*/dist-packages -maxdepth 0); do + PYTHONPATH=$PYTHONPATH:$path + done + setsid $(echo > /tmp/coredump_gen_handler.log; + export PYTHONPATH=$PYTHONPATH; + python3 /usr/local/bin/coredump_gen_handler.py ${PREFIX}core.gz ${CONTAINER_NAME} &>> /tmp/coredump_gen_handler.log) & + fi +fi + diff --git a/scripts/coredump_gen_handler.py b/scripts/coredump_gen_handler.py new file mode 100644 index 0000000000..895c22146a --- /dev/null +++ b/scripts/coredump_gen_handler.py @@ -0,0 +1,185 @@ +""" +coredump_gen_handler script. + This script is invoked by the coredump-compress script + for auto techsupport invocation and cleanup core dumps. + For more info, refer to the Event Driven TechSupport & CoreDump Mgmt HLD +""" +import os +import time +import argparse +import syslog +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.auto_techsupport_helper import * + +# Explicity Pass this to the subprocess invoking techsupport +ENV_VAR = os.environ +PATH_PREV = ENV_VAR["PATH"] if "PATH" in ENV_VAR else "" +ENV_VAR["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:" + PATH_PREV + + +def handle_coredump_cleanup(dump_name, db): + file_path = os.path.join(CORE_DUMP_DIR, dump_name) + if not verify_recent_file_creation(file_path): + return + + _, num_bytes = get_stats(os.path.join(CORE_DUMP_DIR, CORE_DUMP_PTRN)) + + if db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": + msg = "coredump_cleanup is disabled. No cleanup is performed. current size occupied : {}" + syslog.syslog(syslog.LOG_NOTICE, msg.format(pretty_size(num_bytes))) + return + + core_usage = db.get(CFG_DB, AUTO_TS, CFG_CORE_USAGE) + try: + core_usage = float(core_usage) + except ValueError: + core_usage = 0.0 + + if not core_usage: + msg = "core-usage argument is not set. No cleanup is performed, current size occupied: {}" + syslog.syslog(syslog.LOG_NOTICE, msg.format(pretty_size(num_bytes))) + return + + cleanup_process(core_usage, CORE_DUMP_PTRN, CORE_DUMP_DIR) + + +class CriticalProcCoreDumpHandle(): + """ + Class to handle coredump creation event for critical processes + """ + + def __init__(self, core_name, container_name, db): + self.core_name = core_name + self.container = container_name + self.db = db + self.proc_mp = {} + self.core_ts_map = {} + self.curr_ts_list = [] + + def handle_core_dump_creation_event(self): + file_path = os.path.join(CORE_DUMP_DIR, self.core_name) + if not verify_recent_file_creation(file_path): + syslog.syslog(syslog.LOG_INFO, "Spurious Invocation. {} is not created within last {} sec".format(file_path, TIME_BUF)) + return + + if self.db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": + syslog.syslog(syslog.LOG_NOTICE, "auto_invoke_ts is disabled. No cleanup is performed: core {}".format(self.core_name)) + return + + # Config made for the defaul instance applies to all the masic instances + self.container = trim_masic_suffix(self.container) + + FEATURE_KEY = FEATURE.format(self.container) + if self.db.get(CFG_DB, FEATURE_KEY, CFG_STATE) != "enabled": + msg = "auto-techsupport feature for {} is not enabled. Techsupport Invocation is skipped. core: {}" + syslog.syslog(syslog.LOG_NOTICE, msg.format(self.container, self.core_name)) + return + + global_cooloff = self.db.get(CFG_DB, AUTO_TS, COOLOFF) + container_cooloff = self.db.get(CFG_DB, FEATURE_KEY, COOLOFF) + + try: + global_cooloff = float(global_cooloff) + except ValueError: + global_cooloff = 0.0 + + try: + container_cooloff = float(container_cooloff) + except ValueError: + container_cooloff = 0.0 + + cooloff_passed = self.verify_rate_limit_intervals(global_cooloff, container_cooloff) + if cooloff_passed: + since_cfg = self.get_since_arg() + new_file = self.invoke_ts_cmd(since_cfg) + if new_file: + self.write_to_state_db(int(time.time()), new_file[0]) + + def write_to_state_db(self, timestamp, ts_dump): + name = strip_ts_ext(ts_dump) + key = TS_MAP + "|" + name + self.db.set(STATE_DB, key, CORE_DUMP, self.core_name) + self.db.set(STATE_DB, key, TIMESTAMP, str(timestamp)) + self.db.set(STATE_DB, key, CONTAINER, self.container) + + def get_since_arg(self): + since_cfg = self.db.get(CFG_DB, AUTO_TS, CFG_SINCE) + if not since_cfg: + return SINCE_DEFAULT + rc, _, stderr = subprocess_exec(["date", "--date='{}'".format(since_cfg)], env=ENV_VAR) + if rc == 0: + return since_cfg + return SINCE_DEFAULT + + def invoke_ts_cmd(self, since_cfg): + since_cfg = "'" + since_cfg + "'" + cmd = " ".join(["show", "techsupport", "--since", since_cfg]) + rc, _, stderr = subprocess_exec(["show", "techsupport", "--since", since_cfg], env=ENV_VAR) + if not rc: + syslog.syslog(syslog.LOG_ERR, "show techsupport failed with exit code {}, stderr:{}".format(rc, stderr)) + new_list = get_ts_dumps(True) + diff = list(set(new_list).difference(set(self.curr_ts_list))) + self.curr_ts_list = new_list + if not diff: + syslog.syslog(syslog.LOG_ERR, "{} was run, but no techsupport dump is found".format(cmd)) + else: + syslog.syslog(syslog.LOG_INFO, "{} is successful, {} is created".format(cmd, diff)) + return diff + + def verify_rate_limit_intervals(self, global_cooloff, container_cooloff): + """Verify both the global and per-proc rate_limit_intervals have passed""" + self.curr_ts_list = get_ts_dumps(True) + if global_cooloff and self.curr_ts_list: + last_ts_dump_creation = os.path.getmtime(self.curr_ts_list[-1]) + if time.time() - last_ts_dump_creation < global_cooloff: + msg = "Global rate_limit_interval period has not passed. Techsupport Invocation is skipped. Core: {}" + syslog.syslog(syslog.LOG_INFO, msg.format(self.core_name)) + return False + + self.parse_ts_map() + if container_cooloff and self.container in self.core_ts_map: + last_creation_time = self.core_ts_map[self.container][0][0] + if time.time() - last_creation_time < container_cooloff: + msg = "Per Container rate_limit_interval for {} has not passed. Techsupport Invocation is skipped. Core: {}" + syslog.syslog(syslog.LOG_INFO, msg.format(self.container, self.core_name)) + return False + return True + + def parse_ts_map(self): + """Create proc_name, ts_dump & creation_time map""" + ts_keys = self.db.keys(STATE_DB, TS_MAP+"*") + if not ts_keys: + return + for ts_key in ts_keys: + data = self.db.get_all(STATE_DB, ts_key) + if not data: + continue + container_name = data.get(CONTAINER, "") + creation_time = data.get(TIMESTAMP, "") + try: + creation_time = int(creation_time) + except Exception: + continue # if the creation time is invalid, skip the entry + ts_dump = ts_key.split("|")[-1] + if container_name and container_name not in self.core_ts_map: + self.core_ts_map[container_name] = [] + self.core_ts_map[container_name].append((int(creation_time), ts_dump)) + for container_name in self.core_ts_map: + self.core_ts_map[container_name].sort() + +def main(): + parser = argparse.ArgumentParser(description='Auto Techsupport Invocation and CoreDump Mgmt Script') + parser.add_argument('name', type=str, help='Core Dump Name') + parser.add_argument('container', type=str, help='Container Name') + args = parser.parse_args() + syslog.openlog(logoption=syslog.LOG_PID) + db = SonicV2Connector(use_unix_socket_path=True) + db.connect(CFG_DB) + db.connect(STATE_DB) + cls = CriticalProcCoreDumpHandle(args.name, args.container, db) + cls.handle_core_dump_creation_event() + handle_coredump_cleanup(args.name, db) + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_dump b/scripts/generate_dump index 63cf6e52cf..d5f3bb6731 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1338,6 +1338,10 @@ main() { fi fi + # Invoke the TechSupport Cleanup Hook + setsid $(echo > /tmp/techsupport_cleanup.log; + python3 /usr/local/bin/techsupport_cleanup.py ${TARFILE} &>> /tmp/techsupport_cleanup.log) & + echo ${TARFILE} if ! $SAVE_STDERR diff --git a/scripts/techsupport_cleanup.py b/scripts/techsupport_cleanup.py new file mode 100644 index 0000000000..53a10562e7 --- /dev/null +++ b/scripts/techsupport_cleanup.py @@ -0,0 +1,59 @@ +""" +techsupport_cleanup script. + This script is invoked by the generate_dump script for techsupport cleanup + For more info, refer to the Event Driven TechSupport & CoreDump Mgmt HLD +""" +import os +import argparse +import syslog +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.auto_techsupport_helper import * + + +def clean_state_db_entries(removed_files, db): + if not removed_files: + return + for file in removed_files: + name = strip_ts_ext(file) + db.delete(STATE_DB, TS_MAP + "|" + name) + + +def handle_techsupport_creation_event(dump_name, db): + file_path = os.path.join(TS_DIR, dump_name) + if not verify_recent_file_creation(file_path): + return + _ , num_bytes = get_stats(os.path.join(TS_DIR, TS_PTRN)) + + if db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": + msg = "techsupport_cleanup is disabled. No cleanup is performed. current size occupied : {}" + syslog.syslog(syslog.LOG_NOTICE, msg.format(pretty_size(num_bytes))) + return + + max_ts = db.get(CFG_DB, AUTO_TS, CFG_MAX_TS) + try: + max_ts = float(max_ts) + except ValueError: + max_ts = 0.0 + + if not max_ts: + msg = "max-techsupport-limit argument is not set. No cleanup is performed, current size occupied: {}" + syslog.syslog(syslog.LOG_NOTICE, msg.format(pretty_size(num_bytes))) + return + + removed_files = cleanup_process(max_ts, TS_PTRN, TS_DIR) + clean_state_db_entries(removed_files, db) + + +def main(): + parser = argparse.ArgumentParser(description='Auto Techsupport Invocation and CoreDump Mgmt Script') + parser.add_argument('name', type=str, help='TechSupport Dump Name') + args = parser.parse_args() + syslog.openlog(logoption=syslog.LOG_PID) + db = SonicV2Connector(use_unix_socket_path=True) + db.connect(CFG_DB) + db.connect(STATE_DB) + handle_techsupport_creation_event(args.name, db) + + +if __name__ == "__main__": + main() diff --git a/setup.py b/setup.py index ae20fdae06..5c586058f9 100644 --- a/setup.py +++ b/setup.py @@ -23,8 +23,10 @@ 'acl_loader', 'clear', 'clear.plugins', + 'clear.plugins.auto', 'config', 'config.plugins', + 'config.plugins.auto', 'connect', 'consutil', 'counterpoll', @@ -48,6 +50,7 @@ 'show', 'show.interfaces', 'show.plugins', + 'show.plugins.auto', 'sonic_installer', 'sonic_installer.bootloader', 'sonic_package_manager', @@ -56,6 +59,7 @@ 'undebug', 'utilities_common', 'watchdogutil', + 'sonic_cli_gen', ], package_data={ 'generic_config_updater': ['generic_updater_config.conf.json'], @@ -138,8 +142,10 @@ 'scripts/sonic-kdump-config', 'scripts/centralize_database', 'scripts/null_route_helper', - 'scripts/check_db_integrity.py', - 'scripts/storm_control.py' + 'scripts/coredump_gen_handler.py', + 'scripts/techsupport_cleanup.py', + 'scripts/storm_control.py', + 'scripts/check_db_integrity.py' ], entry_points={ 'console_scripts': [ @@ -171,6 +177,7 @@ 'spm = sonic_package_manager.main:cli', 'undebug = undebug.main:cli', 'watchdogutil = watchdogutil.main:watchdogutil', + 'sonic-cli-gen = sonic_cli_gen.main:cli', ] }, install_requires=[ @@ -210,6 +217,7 @@ ], tests_require = [ 'pyfakefs', + 'responses', 'pytest', 'mockredispy>=2.9.3', 'deepdiff==5.2.3' diff --git a/show/main.py b/show/main.py index 37315400e8..c09bab77a9 100755 --- a/show/main.py +++ b/show/main.py @@ -1765,8 +1765,7 @@ def ztp(status, verbose): # Load plugins and register them helper = util_base.UtilHelper() -for plugin in helper.load_plugins(plugins): - helper.register_plugin(plugin, cli) +helper.load_and_register_plugins(plugins, cli) if __name__ == '__main__': cli() diff --git a/show/platform.py b/show/platform.py index c713472080..1916e10d84 100644 --- a/show/platform.py +++ b/show/platform.py @@ -64,6 +64,9 @@ def summary(json): click.echo("Serial Number: {}".format(chassis_info['serial'])) click.echo("Model Number: {}".format(chassis_info['model'])) click.echo("Hardware Revision: {}".format(chassis_info['revision'])) + switch_type = platform_info.get('switch_type') + if switch_type: + click.echo("Switch Type: {}".format(switch_type)) # 'syseeprom' subcommand ("show platform syseeprom") diff --git a/show/plugins/auto/__init__.py b/show/plugins/auto/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/show/plugins/auto_techsupport.py b/show/plugins/auto_techsupport.py new file mode 100644 index 0000000000..9bcda1b04c --- /dev/null +++ b/show/plugins/auto_techsupport.py @@ -0,0 +1,159 @@ +""" +Auto-generated show CLI plugin. +Manually Edited to add show cli for "show auto_techsupport history" +""" + +import click +import tabulate +import natsort +import utilities_common.cli as clicommon + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: fomatted attribute value. + """ + + if attr["is-leaf-list"]: + return "\n".join(entry.get(attr["name"], [])) + return entry.get(attr["name"], "N/A") + + +def format_group_value(entry, attrs): + """ Helper that formats grouped attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attrs (List[Dict]): Attributes metadata that belongs to the same group. + + Returns: + str: fomatted group attributes. + """ + + data = [] + for attr in attrs: + if entry.get(attr["name"]): + data.append((attr["name"] + ":", format_attr_value(entry, attr))) + return tabulate.tabulate(data, tablefmt="plain") + + +@click.group(name="auto-techsupport", + cls=clicommon.AliasedGroup) +def AUTO_TECHSUPPORT(): + """ AUTO_TECHSUPPORT part of config_db.json """ + + pass + + +@AUTO_TECHSUPPORT.command(name="global") +@clicommon.pass_db +def AUTO_TECHSUPPORT_GLOBAL(db): + """ """ + + header = [ + "STATE", + "RATE LIMIT INTERVAL (sec)", + "MAX TECHSUPPORT LIMIT (%)", + "MAX CORE LIMIT (%)", + "SINCE", + ] + + body = [] + table = db.cfgdb.get_table("AUTO_TECHSUPPORT") + entry = table.get("GLOBAL", {}) + row = [ + format_attr_value( + entry, + {'name': 'state', 'description': 'Knob to make techsupport invocation event-driven based on core-dump generation', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'rate_limit_interval', 'description': 'Minimum time in seconds between two successive techsupport invocations. Configure 0 to explicitly disable', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'max_techsupport_limit', 'description': 'Max Limit in percentage for the cummulative size of ts dumps. No cleanup is performed if the value isn\'t configured or is 0.0', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'max_core_limit', 'description': 'Max Limit in percentage for the cummulative size of core dumps. No cleanup is performed if the value isn\'t congiured or is 0.0', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'since', 'description': "Only collect the logs & core-dumps generated since the time provided. A default value of '2 days ago' is used if this value is not set explicitly or a non-valid string is provided", 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + ] + + body.append(row) + click.echo(tabulate.tabulate(body, header, numalign="left")) + + +@AUTO_TECHSUPPORT.command(name="history") +@clicommon.pass_db +def AUTO_TECHSUPPORT_history(db): + keys = db.db.keys("STATE_DB", "AUTO_TECHSUPPORT_DUMP_INFO|*") + header = ["TECHSUPPORT DUMP", "TRIGGERED BY", "CORE DUMP"] + body = [] + for key in keys: + dump = key.split("|")[-1] + fv_pairs = db.db.get_all("STATE_DB", key) + core_dump = fv_pairs.get("core_dump", "") + container = fv_pairs.get("container_name", "") + body.append([dump, container, core_dump]) + click.echo(tabulate.tabulate(body, header, numalign="left")) + + +@click.group(name="auto-techsupport-feature", + cls=clicommon.AliasedGroup, + invoke_without_command=True) +@clicommon.pass_db +def AUTO_TECHSUPPORT_FEATURE(db): + """ [Callable command group] """ + + header = [ + "FEATURE NAME", + "STATE", + "RATE LIMIT INTERVAL (sec)", + ] + + body = [] + + table = db.cfgdb.get_table("AUTO_TECHSUPPORT_FEATURE") + for key in natsort.natsorted(table): + entry = table[key] + if not isinstance(key, tuple): + key = (key,) + + row = [*key] + [ + format_attr_value( + entry, + {'name': 'state', 'description': 'Enable auto techsupport invocation on the processes running inside this feature', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'rate_limit_interval', 'description': 'Rate limit interval for the corresponding feature. Configure 0 to explicitly disable', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + ] + body.append(row) + click.echo(tabulate.tabulate(body, header, numalign="left")) + + +def register(cli): + cli_node = AUTO_TECHSUPPORT + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(AUTO_TECHSUPPORT) + cli_node = AUTO_TECHSUPPORT_FEATURE + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(AUTO_TECHSUPPORT_FEATURE) + cli_node = AUTO_TECHSUPPORT_history + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(AUTO_TECHSUPPORT_history) diff --git a/show/plugins/pbh.py b/show/plugins/pbh.py index d7cd929e02..95115d976d 100644 --- a/show/plugins/pbh.py +++ b/show/plugins/pbh.py @@ -16,6 +16,12 @@ PBH_COUNTERS_LOCATION = '/tmp/.pbh_counters.txt' +COUNTER_PACKETS_ATTR = "SAI_ACL_COUNTER_ATTR_PACKETS" +COUNTER_BYTES_ATTR = "SAI_ACL_COUNTER_ATTR_BYTES" + +COUNTERS = "COUNTERS" +ACL_COUNTER_RULE_MAP = "ACL_COUNTER_RULE_MAP" + pbh_hash_field_tbl_name = 'PBH_HASH_FIELD' pbh_hash_tbl_name = 'PBH_HASH' pbh_table_tbl_name = 'PBH_TABLE' @@ -377,8 +383,8 @@ def PBH_STATISTICS(db): row = [ key[0], key[1], - get_counter_value(pbh_counters, saved_pbh_counters, key, 'packets'), - get_counter_value(pbh_counters, saved_pbh_counters, key, 'bytes'), + get_counter_value(pbh_counters, saved_pbh_counters, key, COUNTER_PACKETS_ATTR), + get_counter_value(pbh_counters, saved_pbh_counters, key, COUNTER_BYTES_ATTR), ] body.append(row) @@ -415,14 +421,30 @@ def read_saved_pbh_counters(): return {} +def read_acl_rule_counter_map(db_connector): + if db_connector.exists(db_connector.COUNTERS_DB, ACL_COUNTER_RULE_MAP): + return db_connector.get_all(db_connector.COUNTERS_DB, ACL_COUNTER_RULE_MAP) + return {} + + def read_pbh_counters(pbh_rules) -> dict: pbh_counters = {} db_connector = SonicV2Connector(use_unix_socket_path=False) db_connector.connect(db_connector.COUNTERS_DB) + counters_db_separator = db_connector.get_db_separator(db_connector.COUNTERS_DB) + rule_to_counter_map = read_acl_rule_counter_map(db_connector) for table, rule in natsort.natsorted(pbh_rules): - counter_props = lowercase_keys(db_connector.get_all(db_connector.COUNTERS_DB, "COUNTERS:%s:%s" % (table, rule))) + pbh_counters[table, rule] = {} + rule_identifier = table + counters_db_separator + rule + if not rule_to_counter_map: + continue + counter_oid = rule_to_counter_map.get(rule_identifier) + if not counter_oid: + continue + counters_db_key = COUNTERS + counters_db_separator + counter_oid + counter_props = db_connector.get_all(db_connector.COUNTERS_DB, counters_db_key) if counter_props: pbh_counters[table, rule] = counter_props @@ -457,10 +479,6 @@ def inject_symmetric_field(obj_list): counter = 0 -def lowercase_keys(dictionary): - return dict((k.lower(), v) for k, v in dictionary.items()) if dictionary else None - - def register(cli): cli_node = PBH if cli_node.name in cli.commands: diff --git a/sonic-utilities-data/bash_completion.d/sonic-cli-gen b/sonic-utilities-data/bash_completion.d/sonic-cli-gen new file mode 100644 index 0000000000..3327f9c513 --- /dev/null +++ b/sonic-utilities-data/bash_completion.d/sonic-cli-gen @@ -0,0 +1,8 @@ +_sonic_cli_gen_completion() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _SONIC_CLI_GEN_COMPLETE=complete $1 ) ) + return 0 +} + +complete -F _sonic_cli_gen_completion -o default sonic-cli-gen; diff --git a/sonic-utilities-data/debian/install b/sonic-utilities-data/debian/install index 82d087d54d..1f67b78c20 100644 --- a/sonic-utilities-data/debian/install +++ b/sonic-utilities-data/debian/install @@ -1,2 +1,3 @@ -bash_completion.d/ /etc/ -templates/*.j2 /usr/share/sonic/templates/ +bash_completion.d/ /etc/ +templates/*.j2 /usr/share/sonic/templates/ +templates/sonic-cli-gen/*.j2 /usr/share/sonic/templates/sonic-cli-gen/ diff --git a/sonic-utilities-data/templates/sonic-cli-gen/common.j2 b/sonic-utilities-data/templates/sonic-cli-gen/common.j2 new file mode 100644 index 0000000000..3b83ee5635 --- /dev/null +++ b/sonic-utilities-data/templates/sonic-cli-gen/common.j2 @@ -0,0 +1,3 @@ +{% macro cli_name(name) -%} +{{ name|lower|replace("_", "-") }} +{%- endmacro %} diff --git a/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 b/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 new file mode 100644 index 0000000000..7706ae3940 --- /dev/null +++ b/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 @@ -0,0 +1,570 @@ +{%- from "common.j2" import cli_name -%} +""" +Autogenerated config CLI plugin. +{% if source_template is defined %} +Source template: {{ source_template }} +{% endif %} +{% if source_yang_module is defined %} +Source YANG module: {{ source_yang_module }} +{% endif %} +""" + +import copy +import click +import utilities_common.cli as clicommon +import utilities_common.general as general +from config import config_mgmt + + +# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. +sonic_cfggen = general.load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + + +def exit_with_error(*args, **kwargs): + """ Print a message with click.secho and abort CLI. + + Args: + args: Positional arguments to pass to click.secho + kwargs: Keyword arguments to pass to click.secho + """ + + click.secho(*args, **kwargs) + raise click.Abort() + + +def validate_config_or_raise(cfg): + """ Validate config db data using ConfigMgmt. + + Args: + cfg (Dict): Config DB data to validate. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + try: + cfg = sonic_cfggen.FormatConverter.to_serialized(copy.deepcopy(cfg)) + config_mgmt.ConfigMgmt().loadData(cfg) + except Exception as err: + raise Exception('Failed to validate configuration: {}'.format(err)) + + +def add_entry_validated(db, table, key, data): + """ Add new entry in table and validate configuration. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + data (Dict): Entry data. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key in cfg[table]: + raise Exception(f"{key} already exists") + + cfg[table][key] = data + + validate_config_or_raise(cfg) + db.set_entry(table, key, data) + + +def update_entry_validated(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + data (Dict): Entry data. + create_if_not_exists (bool): + In case entry does not exists already a new entry + is not created if this flag is set to False and + creates a new entry if flag is set to True. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if not data: + raise Exception(f"No field/values to update {key}") + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + entry_changed = False + for attr, value in data.items(): + if value == cfg[table][key][attr]: + continue + entry_changed = True + if value is None: + cfg[table][key].pop(attr, None) + else: + cfg[table][key][attr] = value + + if not entry_changed: + return + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def del_entry_validated(db, table, key): + """ Delete entry in table and validate configuration. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + cfg[table].pop(key) + + validate_config_or_raise(cfg) + db.set_entry(table, key, None) + + +def add_list_entry_validated(db, table, key, attr, data): + """ Add new entry into list in table and validate configuration. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to add data to. + key (Union[str, Tuple]): Key name in the table. + attr (str): Attribute name which represents a list the data needs to be added to. + data (List): Data list to add to config DB. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + cfg[table][key].setdefault(attr, []) + for entry in data: + if entry in cfg[table][key][attr]: + raise Exception(f"{entry} already exists") + cfg[table][key][attr].append(entry) + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def del_list_entry_validated(db, table, key, attr, data): + """ Delete entry from list in table and validate configuration. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to remove data from. + key (Union[str, Tuple]): Key name in the table. + attr (str): Attribute name which represents a list the data needs to be removed from. + data (Dict): Data list to remove from config DB. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + cfg[table][key].setdefault(attr, []) + for entry in data: + if entry not in cfg[table][key][attr]: + raise Exception(f"{entry} does not exist") + cfg[table][key][attr].remove(entry) + if not cfg[table][key][attr]: + cfg[table][key].pop(attr) + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def clear_list_entry_validated(db, table, key, attr): + """ Clear list in object and validate configuration. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to remove the list attribute from. + key (Union[str, Tuple]): Key name in the table. + attr (str): Attribute name which represents a list that needs to be removed. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + update_entry_validated(db, table, key, {attr: None}) + + +{# Generate click arguments macro +Jinja2 Call: + {{ gen_click_arguments([{"name": "leaf1", "is-leaf-list": False}, + {"name": "leaf2", "is-leaf-list": Talse}) }} +Result: +@click.argument( + "leaf1", + nargs=1, + required=True, +) +@click.argument( + "leaf2", + nargs=-1, + required=True, +) +#} +{%- macro gen_click_arguments(attrs) -%} +{%- for attr in attrs %} +@click.argument( + "{{ cli_name(attr.name) }}", + nargs={% if attr["is-leaf-list"] %}-1{% else %}1{% endif %}, + required=True, +) +{%- endfor %} +{%- endmacro %} + + +{# Generate click options macro +Jinja2 Call: + {{ gen_click_arguments([{"name": "leaf1", "is-mandatory": True, "description": "leaf1-desc"}, + {"name": "leaf2", "is-mandatory": False, "description": "leaf2-desc"}) }} +Result: +@click.option( + "--leaf1", + help="leaf1-desc [mandatory]", +) +@click.option( + "--leaf2", + help="leaf2-desc", +) +#} +{%- macro gen_click_options(attrs) -%} +{%- for attr in attrs %} +@click.option( + "--{{ cli_name(attr.name) }}", + help="{{ attr.description }}{% if attr['is-mandatory'] %}[mandatory]{% endif %}", +) +{%- endfor %} +{%- endmacro %} + +{# Generate valid python identifier from input names #} +{% macro pythonize(attrs) -%} +{{ attrs|map(attribute="name")|map("lower")|map("replace", "-", "_")|join(", ") }} +{%- endmacro %} + +{% macro gen_cfg_obj_list_update(group, table, object, attr) %} +{% set list_update_group = group + "_" + attr.name %} + +@{{ group }}.group(name="{{ cli_name(attr.name) }}", + cls=clicommon.AliasedGroup) +def {{ list_update_group }}(): + """ Add/Delete {{ attr.name }} in {{ table.name }} """ + + pass + +{# Add entries to list attribute config CLI generation +E.g: + @TABLE_object.command(name="add") + @click.argument("key1", nargs=1) + @click.argument("key2", nargs=1) + @click.argument("attribute", nargs=-1) + def TABLE_object_attribute_add(db, key1, key2, attribute): +#} +@{{ list_update_group }}.command(name="add") +{{ gen_click_arguments(object["keys"] + [attr]) }} +@clicommon.pass_db +def {{ list_update_group }}_add( + db, + {{ pythonize(object["keys"] + [attr]) }} +): + """ Add {{ attr.name }} in {{ table.name }} """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + attr = "{{ attr.name }}" + data = {{ pythonize([attr]) }} + + try: + add_list_entry_validated(db.cfgdb, table, key, attr, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +{# Delete entries from list attribute config CLI generation +E.g: + @TABLE_object.command(name="delete") + @click.argument("key1", nargs=1) + @click.argument("key2", nargs=1) + @click.argument("attribute", nargs=-1) + def TABLE_object_attribute_delete(db, key1, key2, attribute): +#} +@{{ list_update_group }}.command(name="delete") +{{ gen_click_arguments(object["keys"] + [attr]) }} +@clicommon.pass_db +def {{ list_update_group }}_delete( + db, + {{ pythonize(object["keys"] + [attr]) }} +): + """ Delete {{ attr.name }} in {{ table.name }} """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + attr = "{{ attr.name }}" + data = {{ pythonize([attr]) }} + + try: + del_list_entry_validated(db.cfgdb, table, key, attr, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +{# Clear entries from list attribute config CLI generation +E.g: + @TABLE_object.command(name="delete") + @click.argument("key1", nargs=1) + @click.argument("key2", nargs=1) + def TABLE_object_attribute_clear(db, key1, key2): +#} +@{{ list_update_group }}.command(name="clear") +{{ gen_click_arguments(object["keys"]) }} +@clicommon.pass_db +def {{ list_update_group }}_clear( + db, + {{ pythonize(object["keys"]) }} +): + """ Clear {{ attr.name }} in {{ table.name }} """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + attr = "{{ attr.name }}" + + try: + clear_list_entry_validated(db.cfgdb, table, key, attr) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + +{% endmacro %} + + +{% macro gen_cfg_obj_list_update_all(group, table, object) %} +{% for attr in object.attrs %} +{% if attr["is-leaf-list"] %} +{{ gen_cfg_obj_list_update(group, table, object, attr) }} +{% endif %} +{% endfor %} +{% endmacro %} + + +{% macro gen_cfg_static_obj_attr(table, object, attr) %} +@{{ table.name }}_{{ object.name }}.command(name="{{ cli_name(attr.name) }}") +{{ gen_click_arguments([attr]) }} +@clicommon.pass_db +def {{ table.name }}_{{ object.name }}_{{ attr.name }}(db, {{ pythonize([attr]) }}): + """ {{ attr.description }} """ + + table = "{{ table.name }}" + key = "{{ object.name }}" + data = { + "{{ attr.name }}": {{ pythonize([attr]) }}, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") +{% endmacro %} + + +{# Static objects config CLI generation +E.g: + @TABLE.group(name="object") + def TABLE_object(db): +#} +{% macro gen_cfg_static_obj(table, object) %} +@{{ table.name }}.group(name="{{ cli_name(object.name) }}", + cls=clicommon.AliasedGroup) +@clicommon.pass_db +def {{ table.name }}_{{ object.name }}(db): + """ {{ object.description }} """ + + pass + +{# Static objects attributes config CLI generation +E.g: + @TABLE_object.command(name="attribute") + def TABLE_object_attribute(db, attribute): +#} +{% for attr in object.attrs %} +{{ gen_cfg_static_obj_attr(table, object, attr) }} +{% endfor %} + +{{ gen_cfg_obj_list_update_all(table.name + "_" + object.name, table, object) }} +{% endmacro %} + +{# Dynamic objects config CLI generation #} + +{# Dynamic objects add command +E.g: + @TABLE.command(name="add") + @click.argument("key1") + @click.argument("key2") + @click.option("--attr1") + @click.option("--attr2") + @click.option("--attr3") + def TABLE_TABLE_LIST_add(db, key1, key2, attr1, attr2, attr3): +#} +{% macro gen_cfg_dyn_obj_add(group, table, object) %} +@{{ group }}.command(name="add") +{{ gen_click_arguments(object["keys"]) }} +{{ gen_click_options(object.attrs) }} +@clicommon.pass_db +def {{ group }}_add(db, {{ pythonize(object["keys"] + object.attrs) }}): + """ Add object in {{ table.name }}. """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + data = {} +{%- for attr in object.attrs %} + if {{ pythonize([attr]) }} is not None: +{%- if not attr["is-leaf-list"] %} + data["{{ attr.name }}"] = {{ pythonize([attr]) }} +{%- else %} + data["{{ attr.name }}"] = {{ pythonize([attr]) }}.split(",") +{%- endif %} +{%- endfor %} + + try: + add_entry_validated(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") +{% endmacro %} + +{# Dynamic objects update command +E.g: + @TABLE.command(name="update") + @click.argument("key1") + @click.argument("key2") + @click.option("--attr1") + @click.option("--attr2") + @click.option("--attr3") + def TABLE_TABLE_LIST_update(db, key1, key2, attr1, attr2, attr3): +#} +{% macro gen_cfg_dyn_obj_update(group, table, object) %} +@{{ group }}.command(name="update") +{{ gen_click_arguments(object["keys"]) }} +{{ gen_click_options(object.attrs) }} +@clicommon.pass_db +def {{ group }}_update(db, {{ pythonize(object["keys"] + object.attrs) }}): + """ Add object in {{ table.name }}. """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + data = {} +{%- for attr in object.attrs %} + if {{ pythonize([attr]) }} is not None: +{%- if not attr["is-leaf-list"] %} + data["{{ attr.name }}"] = {{ pythonize([attr]) }} +{%- else %} + data["{{ attr.name }}"] = {{ pythonize([attr]) }}.split(",") +{%- endif %} +{%- endfor %} + + try: + update_entry_validated(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") +{% endmacro %} + +{# Dynamic objects delete command +E.g: + @TABLE.command(name="delete") + @click.argument("key1") + @click.argument("key2") + def TABLE_TABLE_LIST_delete(db, key1, key2): +#} +{% macro gen_cfg_dyn_obj_delete(group, table, object) %} +@{{ group }}.command(name="delete") +{{ gen_click_arguments(object["keys"]) }} +@clicommon.pass_db +def {{ group }}_delete(db, {{ pythonize(object["keys"]) }}): + """ Delete object in {{ table.name }}. """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + try: + del_entry_validated(db.cfgdb, table, key) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") +{% endmacro %} + +{% macro gen_cfg_dyn_obj(table, object) %} +{# Generate another nested group in case table holds two types of objects #} +{% if table["dynamic-objects"]|length > 1 %} +{% set group = table.name + "_" + object.name %} +@{{ table.name }}.group(name="{{ cli_name(object.name) }}", + cls=clicommon.AliasedGroup) +def {{ group }}(): + """ {{ object.description }} """ + + pass +{% else %} +{% set group = table.name %} +{% endif %} + +{{ gen_cfg_dyn_obj_add(group, table, object) }} +{{ gen_cfg_dyn_obj_update(group, table, object) }} +{{ gen_cfg_dyn_obj_delete(group, table, object) }} +{{ gen_cfg_obj_list_update_all(group, table, object) }} +{% endmacro %} + + +{% for table in tables %} +@click.group(name="{{ cli_name(table.name) }}", + cls=clicommon.AliasedGroup) +def {{ table.name }}(): + """ {{ table.description }} """ + + pass + +{% if "static-objects" in table %} +{% for object in table["static-objects"] %} +{{ gen_cfg_static_obj(table, object) }} +{% endfor %} +{% endif %} + +{% if "dynamic-objects" in table %} +{% for object in table["dynamic-objects"] %} +{{ gen_cfg_dyn_obj(table, object) }} +{% endfor %} +{% endif %} + +{% endfor %} + +def register(cli): + """ Register new CLI nodes in root CLI. + + Args: + cli: Root CLI node. + Raises: + Exception: when root CLI already has a command + we are trying to register. + """ + +{%- for table in tables %} + cli_node = {{ table.name }} + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command({{ table.name }}) +{%- endfor %} diff --git a/sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 b/sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 new file mode 100644 index 0000000000..2a3d065fdf --- /dev/null +++ b/sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 @@ -0,0 +1,254 @@ +{% from "common.j2" import cli_name -%} +""" +Auto-generated show CLI plugin. +{% if source_template is defined %} +Source template: {{ source_template }} +{% endif %} +{% if source_yang_module is defined %} +Source YANG module: {{ source_yang_module }} +{% endif %} +""" + +import click +import tabulate +import natsort +import utilities_common.cli as clicommon + + +{% macro column_name(name) -%} +{{ name|upper|replace("_", " ")|replace("-", " ") }} +{%- endmacro %} + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: fomatted attribute value. + """ + + if attr["is-leaf-list"]: + return "\n".join(entry.get(attr["name"], [])) + return entry.get(attr["name"], "N/A") + + +def format_group_value(entry, attrs): + """ Helper that formats grouped attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attrs (List[Dict]): Attributes metadata that belongs to the same group. + + Returns: + str: fomatted group attributes. + """ + + data = [] + for attr in attrs: + if entry.get(attr["name"]): + data.append((attr["name"] + ":", format_attr_value(entry, attr))) + return tabulate.tabulate(data, tablefmt="plain") + + +{# Generates a python list that represents a row in the table view. +E.g: +Jinja2: +{{ + gen_row("entry", [ + {"name": "leaf1"}, + {"name": "leaf_1"}, + {"name": "leaf_2"}, + {"name": "leaf_3", "group": "group_0"} + ]) +}} +Result: +[ + format_attr_value( + entry, + {'name': 'leaf1'} + ), + format_attr_value( + entry, + {'name': 'leaf_1'} + ), + format_attr_value( + entry, + {'name': 'leaf_2'} + ), + format_group_value( + entry, + [{'name': 'leaf_3', 'group': 'group_0'}] + ), +] +#} +{% macro gen_row(entry, attrs) -%} +[ +{%- for attr in attrs|rejectattr("group", "defined") %} + format_attr_value( + {{ entry }}, + {{ attr }} + ), +{%- endfor %} +{%- for group, attrs in attrs|selectattr("group", "defined")|groupby("group") %} +{%- if group == "" %} +{%- for attr in attrs %} + format_attr_value( + {{ entry }}, + {{ attr }} + ), +{%- endfor %} +{%- else %} + format_group_value( + {{ entry }}, + {{ attrs }} + ), +{%- endif %} +{%- endfor %} +] +{% endmacro %} + +{# Generates a list that represents a header in table view. +E.g: +Jinja2: {{ + gen_header([ + {"name": "key"}, + {"name": "leaf_1"}, + {"name": "leaf_2"}, + {"name": "leaf_3", "group": "group_0"} + ]) + }} + +Result: +[ + "KEY", + "LEAF 1", + "LEAF 2", + "GROUP 0", +] + +#} +{% macro gen_header(attrs) -%} +[ +{% for attr in attrs|rejectattr("group", "defined") -%} + "{{ column_name(attr.name) }}", +{% endfor -%} +{% for group, attrs in attrs|selectattr("group", "defined")|groupby("group") -%} +{%- if group == "" %} +{% for attr in attrs -%} + "{{ column_name(attr.name) }}", +{% endfor -%} +{%- else %} + "{{ column_name(group) }}", +{%- endif %} +{% endfor -%} +] +{% endmacro %} + + +{% for table in tables %} +{% if "static-objects" in table %} +{# For static objects generate a command group called against table name. +E.g: +@click.group(name="table-name", + cls=clicommon.AliasedGroup) +def TABLE_NAME(): + """ TABLE DESCRIPTION """ + + pass +#} +@click.group(name="{{ cli_name(table.name) }}", + cls=clicommon.AliasedGroup) +def {{ table.name }}(): + """ {{ table.description }} """ + + pass + +{% for object in table["static-objects"] %} +{# For every object in static table generate a command +in the group to show individual object configuration. +CLI command is named against the object key in DB. +E.g: +@TABLE_NAME.command(name="object-name") +@clicommon.pass_db +def TABLE_NAME_object_name(db): + ... +#} +@{{ table.name }}.command(name="{{ cli_name(object.name) }}") +@clicommon.pass_db +def {{ table.name }}_{{ object.name }}(db): + """ {{ object.description }} """ + + header = {{ gen_header(object.attrs) }} + body = [] + + table = db.cfgdb.get_table("{{ table.name }}") + entry = table.get("{{ object.name }}", {}) + row = {{ gen_row("entry", object.attrs) }} + body.append(row) + click.echo(tabulate.tabulate(body, header)) + +{% endfor %} +{% elif "dynamic-objects" in table %} +{% if table["dynamic-objects"]|length > 1 %} +@click.group(name="{{ cli_name(table.name) }}", + cls=clicommon.AliasedGroup) +def {{ table.name }}(): + """ {{ table.description }} """ + + pass +{% endif %} +{% for object in table["dynamic-objects"] %} +{# Generate another nesting group in case table holds two types of objects #} +{% if table["dynamic-objects"]|length > 1 %} +{% set group = table.name %} +{% set name = object.name %} +{% else %} +{% set group = "click" %} +{% set name = table.name %} +{% endif %} + +{# Generate an implementation to display table. #} +@{{ group }}.group(name="{{ cli_name(name) }}", + cls=clicommon.AliasedGroup, + invoke_without_command=True) +@clicommon.pass_db +def {{ name }}(db): + """ {{ object.description }} [Callable command group] """ + + header = {{ gen_header(object["keys"] + object.attrs) }} + body = [] + + table = db.cfgdb.get_table("{{ table.name }}") + for key in natsort.natsorted(table): + entry = table[key] + if not isinstance(key, tuple): + key = (key,) + + row = [*key] + {{ gen_row("entry", object.attrs) }} + body.append(row) + + click.echo(tabulate.tabulate(body, header)) +{% endfor %} +{% endif %} +{% endfor %} + +def register(cli): + """ Register new CLI nodes in root CLI. + + Args: + cli (click.core.Command): Root CLI node. + Raises: + Exception: when root CLI already has a command + we are trying to register. + """ + +{%- for table in tables %} + cli_node = {{ table.name }} + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command({{ table.name }}) +{%- endfor %} diff --git a/sonic_cli_gen/__init__.py b/sonic_cli_gen/__init__.py new file mode 100644 index 0000000000..e7e775c0fb --- /dev/null +++ b/sonic_cli_gen/__init__.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python + +from sonic_cli_gen.generator import CliGenerator + +__all__ = ['CliGenerator'] + diff --git a/sonic_cli_gen/generator.py b/sonic_cli_gen/generator.py new file mode 100644 index 0000000000..9d5bac6008 --- /dev/null +++ b/sonic_cli_gen/generator.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +import os +import pkgutil +import jinja2 + +from sonic_cli_gen.yang_parser import YangParser + +templates_path_switch = '/usr/share/sonic/templates/sonic-cli-gen/' + + +class CliGenerator: + """ SONiC CLI generator. This class provides public API + for sonic-cli-gen python library. It can generate config, + show CLI plugins. + + Attributes: + logger: logger + """ + + def __init__(self, logger): + """ Initialize CliGenerator. """ + + self.logger = logger + + + def generate_cli_plugin( + self, + cli_group, + plugin_name, + config_db_path='configDB', + templates_path='/usr/share/sonic/templates/sonic-cli-gen/' + ): + """ Generate click CLI plugin and put it to: + /usr/local/lib//dist-packages//plugins/auto/ + """ + + parser = YangParser( + yang_model_name=plugin_name, + config_db_path=config_db_path, + allow_tbl_without_yang=True, + debug=False + ) + # yang_dict will be used as an input for templates located in + # /usr/share/sonic/templates/sonic-cli-gen/ + yang_dict = parser.parse_yang_model() + + loader = jinja2.FileSystemLoader(templates_path) + j2_env = jinja2.Environment(loader=loader) + try: + template = j2_env.get_template(cli_group + '.py.j2') + except jinja2.exceptions.TemplateNotFound: + self.logger.error(' Templates for auto-generation does NOT exist in folder {}'.format(templates_path)) + + plugin_path = get_cli_plugin_path(cli_group, plugin_name + '_yang.py') + + with open(plugin_path, 'w') as plugin_py: + plugin_py.write(template.render(yang_dict)) + self.logger.info(' Auto-generation successful! Location: {}'.format(plugin_path)) + + + def remove_cli_plugin(self, cli_group, plugin_name): + """ Remove CLI plugin from directory: + /usr/local/lib//dist-packages//plugins/auto/ + """ + + plugin_path = get_cli_plugin_path(cli_group, plugin_name + '_yang.py') + + if os.path.exists(plugin_path): + os.remove(plugin_path) + self.logger.info(' {} was removed.'.format(plugin_path)) + else: + self.logger.warning(' Path {} doest NOT exist!'.format(plugin_path)) + + +def get_cli_plugin_path(command, plugin_name): + pkg_loader = pkgutil.get_loader(f'{command}.plugins.auto') + if pkg_loader is None: + raise Exception(f'Failed to get plugins path for {command} CLI') + plugins_pkg_path = os.path.dirname(pkg_loader.path) + + return os.path.join(plugins_pkg_path, plugin_name) + diff --git a/sonic_cli_gen/main.py b/sonic_cli_gen/main.py new file mode 100644 index 0000000000..bfcd301aed --- /dev/null +++ b/sonic_cli_gen/main.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +import sys +import click +import logging +from sonic_cli_gen.generator import CliGenerator + +logger = logging.getLogger('sonic-cli-gen') +logging.basicConfig(stream=sys.stdout, level=logging.INFO) + + +@click.group() +@click.pass_context +def cli(ctx): + """ SONiC CLI Auto-generator tool.\r + Generate click CLI plugin for 'config' or 'show' CLI groups.\r + CLI plugin will be generated from the YANG model, which should be in:\r\n + /usr/local/yang-models/ \n + Generated CLI plugin will be placed in: \r\n + /usr/local/lib/python3.7/dist-packages//plugins/auto/ + """ + + context = { + 'gen': CliGenerator(logger) + } + ctx.obj = context + + +@cli.command() +@click.argument('cli_group', type=click.Choice(['config', 'show'])) +@click.argument('yang_model_name', type=click.STRING) +@click.pass_context +def generate(ctx, cli_group, yang_model_name): + """ Generate click CLI plugin. """ + + ctx.obj['gen'].generate_cli_plugin(cli_group, yang_model_name) + + +@cli.command() +@click.argument('cli_group', type=click.Choice(['config', 'show'])) +@click.argument('yang_model_name', type=click.STRING) +@click.pass_context +def remove(ctx, cli_group, yang_model_name): + """ Remove generated click CLI plugin from. """ + + ctx.obj['gen'].remove_cli_plugin(cli_group, yang_model_name) + + +if __name__ == '__main__': + cli() + diff --git a/sonic_cli_gen/yang_parser.py b/sonic_cli_gen/yang_parser.py new file mode 100644 index 0000000000..df0382536f --- /dev/null +++ b/sonic_cli_gen/yang_parser.py @@ -0,0 +1,679 @@ +#!/usr/bin/env python + +from collections import OrderedDict +from config.config_mgmt import ConfigMgmt +from typing import List, Dict + +yang_guidelines_link = 'https://github.com/Azure/SONiC/blob/master/doc/mgmt/SONiC_YANG_Model_Guidelines.md' + + +class YangParser: + """ YANG model parser + + Attributes: + yang_model_name: Name of the YANG model file + conf_mgmt: Instance of Config Mgmt class to + help parse YANG models + y_module: Reference to 'module' entity + from YANG model file + y_top_level_container: Reference to top level 'container' + entity from YANG model file + y_table_containers: Reference to 'container' entities + from YANG model file that represent Config DB tables + yang_2_dict: dictionary created from YANG model file that + represent Config DB schema. + + Below the 'yang_2_dict' obj in case if YANG model has a 'list' entity: + { + 'tables': [{ + 'name': 'value', + 'description': 'value', + 'dynamic-objects': [ + 'name': 'value', + 'description': 'value, + 'attrs': [ + { + 'name': 'value', + 'description': 'value', + 'is-leaf-list': False, + 'is-mandatory': False, + 'group': 'value' + } + ... + ], + 'keys': [ + { + 'name': 'ACL_TABLE_NAME', + 'description': 'value' + } + ... + ] + ], + }] + } + In case if YANG model does NOT have a 'list' entity, + it has the same structure as above, but 'dynamic-objects' + changed to 'static-objects' and have no 'keys' + """ + + def __init__(self, + yang_model_name, + config_db_path, + allow_tbl_without_yang, + debug): + self.yang_model_name = yang_model_name + self.conf_mgmt = None + self.y_module = None + self.y_top_level_container = None + self.y_table_containers = None + self.yang_2_dict = dict() + + try: + self.conf_mgmt = ConfigMgmt(config_db_path, + debug, + allow_tbl_without_yang) + except Exception as e: + raise Exception("Failed to load the {} class".format(str(e))) + + def _init_yang_module_and_containers(self): + """ Initialize inner class variables: + self.y_module + self.y_top_level_container + self.y_table_containers + + Raises: + Exception: if YANG model is invalid or NOT exist + """ + + self.y_module = self._find_yang_model_in_yjson_obj() + + if self.y_module is None: + raise Exception('The YANG model {} is NOT exist'.format(self.yang_model_name)) + + if self.y_module.get('container') is None: + raise Exception('The YANG model {} does NOT have\ + "top level container" element\ + Please follow the SONiC YANG model guidelines:\ + \n{}'.format(self.yang_model_name, yang_guidelines_link)) + self.y_top_level_container = self.y_module.get('container') + + if self.y_top_level_container.get('container') is None: + raise Exception('The YANG model {} does NOT have "container"\ + element after "top level container"\ + Please follow the SONiC YANG model guidelines:\ + \n{}'.format(self.yang_model_name, yang_guidelines_link)) + self.y_table_containers = self.y_top_level_container.get('container') + + def _find_yang_model_in_yjson_obj(self) -> OrderedDict: + """ Find provided YANG model inside the yJson object, + the yJson object contain all yang-models + parsed from directory - /usr/local/yang-models + + Returns: + reference to yang_model_name + """ + + for yang_model in self.conf_mgmt.sy.yJson: + if yang_model.get('module').get('@name') == self.yang_model_name: + return yang_model.get('module') + + def parse_yang_model(self) -> dict: + """ Parse provided YANG model and save + the output to self.yang_2_dict object + + Returns: + parsed YANG model in dictionary format + """ + + self._init_yang_module_and_containers() + self.yang_2_dict['tables'] = list() + + # determine how many (1 or more) containers a YANG model + # has after the 'top level' container + # 'table' container goes after the 'top level' container + self.yang_2_dict['tables'] += list_handler(self.y_table_containers, + lambda e: on_table_container(self.y_module, e, self.conf_mgmt)) + + return self.yang_2_dict + + +# ------------------------------HANDLERS-------------------------------- # + +def list_handler(y_entity, callback) -> List[Dict]: + """ Determine if the type of entity is a list, + if so - call the callback for every list element + """ + + if isinstance(y_entity, list): + return [callback(e) for e in y_entity] + else: + return [callback(y_entity)] + + +def on_table_container(y_module: OrderedDict, + tbl_container: OrderedDict, + conf_mgmt: ConfigMgmt) -> dict: + """ Parse 'table' container, + 'table' container goes after 'top level' container + + Args: + y_module: reference to 'module' + tbl_container: reference to 'table' container + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG models + Returns: + element for self.yang_2_dict['tables'] + """ + y2d_elem = { + 'name': tbl_container.get('@name'), + 'description': get_description(tbl_container) + } + + # determine if 'table container' has a 'list' entity + if tbl_container.get('list') is None: + y2d_elem['static-objects'] = list() + + # 'object' container goes after the 'table' container + # 'object' container have 2 types - list (like sonic-flex_counter.yang) + # and NOT list (like sonic-device_metadata.yang) + y2d_elem['static-objects'] += list_handler(tbl_container.get('container'), + lambda e: on_object_entity(y_module, e, conf_mgmt, is_list=False)) + else: + y2d_elem['dynamic-objects'] = list() + + # 'container' can have more than 1 'list' entity + y2d_elem['dynamic-objects'] += list_handler(tbl_container.get('list'), + lambda e: on_object_entity(y_module, e, conf_mgmt, is_list=True)) + + # move 'keys' elements from 'attrs' to 'keys' + change_dyn_obj_struct(y2d_elem['dynamic-objects']) + + return y2d_elem + + +def on_object_entity(y_module: OrderedDict, + y_entity: OrderedDict, + conf_mgmt: ConfigMgmt, + is_list: bool) -> dict: + """ Parse a 'object' entity, it could be a 'container' or a 'list' + 'Object' entity represent OBJECT in Config DB schema: + { + "TABLE": { + "OBJECT": { + "attr": "value" + } + } + } + + Args: + y_module: reference to 'module' + y_entity: reference to 'object' entity + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG models + is_list: boolean flag to determine if a 'list' was passed + Returns: + element for y2d_elem['static-objects'] OR y2d_elem['dynamic-objects'] + """ + + if y_entity is None: + return {} + + obj_elem = { + 'name': y_entity.get('@name'), + 'description': get_description(y_entity), + 'attrs': list() + } + + if is_list: + obj_elem['keys'] = get_list_keys(y_entity) + + attrs_list = list() + # grouping_name is empty because 'grouping' is not used so far + attrs_list.extend(get_leafs(y_entity, grouping_name='')) + attrs_list.extend(get_leaf_lists(y_entity, grouping_name='')) + attrs_list.extend(get_choices(y_module, y_entity, conf_mgmt, grouping_name='')) + attrs_list.extend(get_uses(y_module, y_entity, conf_mgmt)) + + obj_elem['attrs'] = attrs_list + + return obj_elem + + +def on_uses(y_module: OrderedDict, + y_uses, + conf_mgmt: ConfigMgmt) -> list: + """ Parse a YANG 'uses' entities + 'uses' referring to 'grouping' YANG entity + + Args: + y_module: reference to 'module' + y_uses: reference to 'uses' + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG model + Returns: + element for obj_elem['attrs'], 'attrs' contain a parsed 'leafs' + """ + + ret_attrs = list() + y_grouping = get_all_grouping(y_module, y_uses, conf_mgmt) + # trim prefixes in order to the next checks + trim_uses_prefixes(y_uses) + + # TODO: 'refine' support + for group in y_grouping: + if isinstance(y_uses, list): + for use in y_uses: + if group.get('@name') == use.get('@name'): + ret_attrs.extend(get_leafs(group, group.get('@name'))) + ret_attrs.extend(get_leaf_lists(group, group.get('@name'))) + ret_attrs.extend(get_choices(y_module, group, conf_mgmt, group.get('@name'))) + else: + if group.get('@name') == y_uses.get('@name'): + ret_attrs.extend(get_leafs(group, group.get('@name'))) + ret_attrs.extend(get_leaf_lists(group, group.get('@name'))) + ret_attrs.extend(get_choices(y_module, group, conf_mgmt, group.get('@name'))) + + return ret_attrs + + +def on_choices(y_module: OrderedDict, + y_choices, + conf_mgmt: ConfigMgmt, + grouping_name: str) -> list: + """ Parse a YANG 'choice' entities + + Args: + y_module: reference to 'module' + y_choices: reference to 'choice' element + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG model + grouping_name: if YANG entity contain 'uses', this arg represent 'grouping' name + Returns: + element for obj_elem['attrs'], 'attrs' contain a parsed 'leafs' + """ + + ret_attrs = list() + + # the YANG model can have multiple 'choice' entities + # inside a 'container' or 'list' + if isinstance(y_choices, list): + for choice in y_choices: + attrs = on_choice_cases(y_module, choice.get('case'), + conf_mgmt, grouping_name) + ret_attrs.extend(attrs) + else: + ret_attrs = on_choice_cases(y_module, y_choices.get('case'), + conf_mgmt, grouping_name) + + return ret_attrs + + +def on_choice_cases(y_module: OrderedDict, + y_cases, + conf_mgmt: ConfigMgmt, + grouping_name: str) -> list: + """ Parse a single YANG 'case' entity from the 'choice' entity. + The 'case' element can has inside - 'leaf', 'leaf-list', 'uses' + + Args: + y_module: reference to 'module' + y_cases: reference to 'case' + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all + parsed YANG model + grouping_name: if YANG entity contain 'uses', + this argument represent 'grouping' name + Returns: + element for the obj_elem['attrs'], the 'attrs' + contain a parsed 'leafs' + """ + + ret_attrs = list() + + if isinstance(y_cases, list): + for case in y_cases: + ret_attrs.extend(get_leafs(case, grouping_name)) + ret_attrs.extend(get_leaf_lists(case, grouping_name)) + ret_attrs.extend(get_uses(y_module, case, conf_mgmt)) + else: + ret_attrs.extend(get_leafs(y_cases, grouping_name)) + ret_attrs.extend(get_leaf_lists(y_cases, grouping_name)) + ret_attrs.extend(get_uses(y_module, y_cases, conf_mgmt)) + + return ret_attrs + + +def on_leafs(y_leafs, + grouping_name: str, + is_leaf_list: bool) -> list: + """ Parse all the 'leaf' or 'leaf-list' elements + + Args: + y_leafs: reference to all 'leaf' elements + grouping_name: if YANG entity contain 'uses', + this argument represent the 'grouping' name + is_leaf_list: boolean to determine if a 'leaf-list' + was passed as 'y_leafs' argument + Returns: + list of parsed 'leaf' elements + """ + + ret_attrs = list() + # The YANG 'container' entity may have only 1 'leaf' + # element OR a list of 'leaf' elements + ret_attrs += list_handler(y_leafs, lambda e: on_leaf(e, grouping_name, is_leaf_list)) + + return ret_attrs + + +def on_leaf(leaf: OrderedDict, + grouping_name: str, + is_leaf_list: bool) -> dict: + """ Parse a single 'leaf' element + + Args: + leaf: reference to a 'leaf' entity + grouping_name: if YANG entity contain 'uses', + this argument represent 'grouping' name + is_leaf_list: boolean to determine if 'leaf-list' + was passed in 'y_leafs' argument + Returns: + parsed 'leaf' element + """ + + attr = {'name': leaf.get('@name'), + 'description': get_description(leaf), + 'is-leaf-list': is_leaf_list, + 'is-mandatory': get_mandatory(leaf), + 'group': grouping_name} + + return attr + + +# ----------------------GETERS------------------------- # + +def get_mandatory(y_leaf: OrderedDict) -> bool: + """ Parse the 'mandatory' statement for a 'leaf' + + Args: + y_leaf: reference to a 'leaf' entity + Returns: + 'leaf' 'mandatory' value + """ + + if y_leaf.get('mandatory') is not None: + return True + + return False + + +def get_description(y_entity: OrderedDict) -> str: + """ Parse the 'description' entity from any YANG element + + Args: + y_entity: reference to YANG 'container' OR 'list' OR 'leaf' ... + Returns: + text of the 'description' + """ + + if y_entity.get('description') is not None: + return y_entity.get('description').get('text') + else: + return '' + + +def get_leafs(y_entity: OrderedDict, + grouping_name: str) -> list: + """ Check if the YANG entity have 'leafs', if so call handler + + Args: + y_entity: reference YANG 'container' or 'list' + or 'choice' or 'uses' + grouping_name: if YANG entity contain 'uses', + this argument represent 'grouping' name + Returns: + list of parsed 'leaf' elements + """ + + if y_entity.get('leaf') is not None: + return on_leafs(y_entity.get('leaf'), grouping_name, is_leaf_list=False) + + return [] + + +def get_leaf_lists(y_entity: OrderedDict, + grouping_name: str) -> list: + """ Check if the YANG entity have 'leaf-list', if so call handler + + Args: + y_entity: reference YANG 'container' or 'list' + or 'choice' or 'uses' + grouping_name: if YANG entity contain 'uses', + this argument represent 'grouping' name + Returns: + list of parsed 'leaf-list' elements + """ + + if y_entity.get('leaf-list') is not None: + return on_leafs(y_entity.get('leaf-list'), grouping_name, is_leaf_list=True) + + return [] + + +def get_choices(y_module: OrderedDict, + y_entity: OrderedDict, + conf_mgmt: ConfigMgmt, + grouping_name: str) -> list: + """ Check if the YANG entity have 'choice', if so call handler + + Args: + y_module: reference to 'module' + y_entity: reference YANG 'container' or 'list' + or 'choice' or 'uses' + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG model + grouping_name: if YANG entity contain 'uses', + this argument represent 'grouping' name + Returns: + list of parsed elements inside 'choice' + """ + + if y_entity.get('choice') is not None: + return on_choices(y_module, y_entity.get('choice'), conf_mgmt, grouping_name) + + return [] + + +def get_uses(y_module: OrderedDict, + y_entity: OrderedDict, + conf_mgmt: ConfigMgmt) -> list: + """ Check if the YANG entity have 'uses', if so call handler + + Args: + y_module: reference to 'module' + y_entity: reference YANG 'container' or 'list' + or 'choice' or 'uses' + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG model + Returns: + list of parsed elements inside 'grouping' + that referenced by 'uses' + """ + + if y_entity.get('uses') is not None: + return on_uses(y_module, y_entity.get('uses'), conf_mgmt) + + return [] + + +def get_all_grouping(y_module: OrderedDict, + y_uses: OrderedDict, + conf_mgmt: ConfigMgmt) -> list: + """ Get all the 'grouping' entities that was referenced + by 'uses' in current YANG model + + Args: + y_module: reference to 'module' + y_entity: reference to 'uses' + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG model + Returns: + list of 'grouping' elements + """ + + ret_grouping = list() + # prefix_list needed to find what YANG model was imported + prefix_list = get_import_prefixes(y_uses) + + # in case if 'grouping' located in the same YANG model + local_grouping = y_module.get('grouping') + if local_grouping is not None: + if isinstance(local_grouping, list): + ret_grouping.extend(local_grouping) + else: + ret_grouping.append(local_grouping) + + # if prefix_list is NOT empty it means that 'grouping' + # was imported from another YANG model + if prefix_list != []: + for prefix in prefix_list: + y_import = y_module.get('import') + if isinstance(y_import, list): + for _import in y_import: + if _import.get('prefix').get('@value') == prefix: + ret_grouping.extend(get_grouping_from_another_yang_model(_import.get('@module'), conf_mgmt)) + else: + if y_import.get('prefix').get('@value') == prefix: + ret_grouping.extend(get_grouping_from_another_yang_model(y_import.get('@module'), conf_mgmt)) + + return ret_grouping + + +def get_grouping_from_another_yang_model(yang_model_name: str, + conf_mgmt) -> list: + """ Get the YANG 'grouping' entity + + Args: + yang_model_name - YANG model to search + conf_mgmt - reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG models + + Returns: + list of 'grouping' entities + """ + + ret_grouping = list() + + for yang_model in conf_mgmt.sy.yJson: + if (yang_model.get('module').get('@name') == yang_model_name): + grouping = yang_model.get('module').get('grouping') + if isinstance(grouping, list): + ret_grouping.extend(grouping) + else: + ret_grouping.append(grouping) + + return ret_grouping + + +def get_import_prefixes(y_uses: OrderedDict) -> list: + """ Parse 'import prefix' of YANG 'uses' entity + Example: + { + uses stypes:endpoint; + } + 'stypes' - prefix of imported YANG module. + 'endpoint' - YANG 'grouping' entity name + + Args: + y_uses: refrence to YANG 'uses' + Returns: + list of parsed prefixes + """ + + ret_prefixes = list() + + if isinstance(y_uses, list): + for use in y_uses: + prefix = use.get('@name').split(':')[0] + if prefix != use.get('@name'): + ret_prefixes.append(prefix) + else: + prefix = y_uses.get('@name').split(':')[0] + if prefix != y_uses.get('@name'): + ret_prefixes.append(prefix) + + return ret_prefixes + + +def trim_uses_prefixes(y_uses) -> list: + """ Trim prefixes from the 'uses' YANG entities. + If the YANG 'grouping' was imported from another + YANG file, it use the 'prefix' before the 'grouping' name: + { + uses sgrop:endpoint; + } + Where 'sgrop' = 'prefix'; 'endpoint' = 'grouping' name. + + Args: + y_uses: reference to 'uses' + + Returns: + list of 'uses' without 'prefixes' + """ + + prefixes = get_import_prefixes(y_uses) + + for prefix in prefixes: + if isinstance(y_uses, list): + for use in y_uses: + if prefix in use.get('@name'): + use['@name'] = use.get('@name').split(':')[1] + else: + if prefix in y_uses.get('@name'): + y_uses['@name'] = y_uses.get('@name').split(':')[1] + + +def get_list_keys(y_list: OrderedDict) -> list: + """ Parse YANG the 'key' entity. + If YANG model has a 'list' entity, inside the 'list' + there is 'key' entity. The 'key' - whitespace + separeted list of 'leafs' + + Args: + y_list: reference to the 'list' + Returns: + list of parsed keys + """ + + ret_list = list() + + keys = y_list.get('key').get('@value').split() + for k in keys: + key = {'name': k} + ret_list.append(key) + + return ret_list + + +def change_dyn_obj_struct(dynamic_objects: list): + """ Rearrange self.yang_2_dict['dynamic_objects'] structure. + If YANG model have a 'list' entity - inside the 'list' + it has 'key' entity. The 'key' entity it is whitespace + separeted list of 'leafs', those 'leafs' was parsed by + 'on_leaf()' function and placed under 'attrs' in + self.yang_2_dict['dynamic_objects'] need to move 'leafs' + from 'attrs' and put them into 'keys' section of + self.yang_2_dict['dynamic_objects'] + + Args: + dynamic_objects: reference to self.yang_2_dict['dynamic_objects'] + """ + + for obj in dynamic_objects: + for key in obj.get('keys'): + for attr in obj.get('attrs'): + if key.get('name') == attr.get('name'): + key['description'] = attr.get('description') + obj['attrs'].remove(attr) + break + diff --git a/sonic_package_manager/registry.py b/sonic_package_manager/registry.py index 8c03b078d2..5cac5469bf 100644 --- a/sonic_package_manager/registry.py +++ b/sonic_package_manager/registry.py @@ -23,20 +23,21 @@ class AuthenticationService: """ AuthenticationService provides an authentication tokens. """ @staticmethod - def get_token(realm, service, scope) -> str: + def get_token(bearer: Dict) -> str: """ Retrieve an authentication token. Args: - realm: Realm: url to request token. - service: service to request token for. - scope: scope to requests token for. + bearer: Bearer token. Returns: token value as a string. """ - log.debug(f'getting authentication token: realm={realm} service={service} scope={scope}') + log.debug(f'getting authentication token {bearer}') + if 'realm' not in bearer: + raise AuthenticationServiceError(f'Realm is required in bearer') - response = requests.get(f'{realm}?scope={scope}&service={service}') + url = bearer.pop('realm') + response = requests.get(url, params=bearer) if response.status_code != requests.codes.ok: raise AuthenticationServiceError('Failed to retrieve token') @@ -44,7 +45,7 @@ def get_token(realm, service, scope) -> str: token = content['token'] expires_in = content['expires_in'] - log.debug(f'authentication token for realm={realm} service={service} scope={scope}: ' + log.debug(f'authentication token for bearer={bearer}: ' f'token={token} expires_in={expires_in}') return token @@ -85,7 +86,7 @@ def _execute_get_request(url, headers): log.debug(f'unauthorized: retrieving authentication details ' f'from response headers {www_authenticate_details}') bearer = www_authenticate.parse(www_authenticate_details)['bearer'] - token = AuthenticationService.get_token(**bearer) + token = AuthenticationService.get_token(bearer) headers['Authorization'] = f'Bearer {token}' # Repeat request response = requests.get(url, headers=headers) diff --git a/tests/cli_autogen_input/autogen_test/show_cmd_output.py b/tests/cli_autogen_input/autogen_test/show_cmd_output.py new file mode 100644 index 0000000000..19c02c7783 --- /dev/null +++ b/tests/cli_autogen_input/autogen_test/show_cmd_output.py @@ -0,0 +1,81 @@ +""" +This module are holding correct output for the show command for cli_autogen_test.py +""" + + +show_device_metadata_localhost="""\ +HWSKU DEFAULT BGP STATUS DOCKER ROUTING CONFIG MODE HOSTNAME PLATFORM MAC DEFAULT PFCWD STATUS BGP ASN DEPLOYMENT ID TYPE BUFFER MODEL FRR MGMT FRAMEWORK CONFIG +----------- -------------------- ---------------------------- ---------- ---------------------- ----------------- ---------------------- --------- --------------- --------- -------------- --------------------------- +ACS-MSN2100 up N/A r-sonic-01 x86_64-mlnx_msn2100-r0 ff:ff:ff:ff:ff:00 disable N/A N/A ToRRouter traditional N/A +""" + + +show_device_metadata_localhost_changed_buffer_model="""\ +HWSKU DEFAULT BGP STATUS DOCKER ROUTING CONFIG MODE HOSTNAME PLATFORM MAC DEFAULT PFCWD STATUS BGP ASN DEPLOYMENT ID TYPE BUFFER MODEL FRR MGMT FRAMEWORK CONFIG +----------- -------------------- ---------------------------- ---------- ---------------------- ----------------- ---------------------- --------- --------------- --------- -------------- --------------------------- +ACS-MSN2100 up N/A r-sonic-01 x86_64-mlnx_msn2100-r0 ff:ff:ff:ff:ff:00 disable N/A N/A ToRRouter dynamic N/A +""" + + +show_device_neighbor="""\ +PEER NAME NAME MGMT ADDR LOCAL PORT PORT TYPE +----------- -------- ----------- ------------ ------ ------ +Ethernet0 Servers 10.217.0.1 Ethernet0 eth0 type +Ethernet4 Servers0 10.217.0.2 Ethernet4 eth1 type +""" + + +show_device_neighbor_added="""\ +PEER NAME NAME MGMT ADDR LOCAL PORT PORT TYPE +----------- -------- ----------- ------------ ------ ------ +Ethernet0 Servers 10.217.0.1 Ethernet0 eth0 type +Ethernet4 Servers0 10.217.0.2 Ethernet4 eth1 type +Ethernet8 Servers1 10.217.0.3 Ethernet8 eth2 type +""" + + +show_device_neighbor_deleted="""\ +PEER NAME NAME MGMT ADDR LOCAL PORT PORT TYPE +----------- -------- ----------- ------------ ------ ------ +Ethernet4 Servers0 10.217.0.2 Ethernet4 eth1 type +""" + + +show_device_neighbor_updated_mgmt_addr="""\ +PEER NAME NAME MGMT ADDR LOCAL PORT PORT TYPE +----------- -------- ----------- ------------ ------ ------ +Ethernet0 Servers 10.217.0.5 Ethernet0 eth0 type +Ethernet4 Servers0 10.217.0.2 Ethernet4 eth1 type +""" + + +show_device_neighbor_updated_name="""\ +PEER NAME NAME MGMT ADDR LOCAL PORT PORT TYPE +----------- -------- ----------- ------------ ------ ------ +Ethernet0 Servers1 10.217.0.1 Ethernet0 eth0 type +Ethernet4 Servers0 10.217.0.2 Ethernet4 eth1 type +""" + + +show_device_neighbor_updated_local_port="""\ +PEER NAME NAME MGMT ADDR LOCAL PORT PORT TYPE +----------- -------- ----------- ------------ ------ ------ +Ethernet0 Servers 10.217.0.1 Ethernet12 eth0 type +Ethernet4 Servers0 10.217.0.2 Ethernet4 eth1 type +""" + + +show_device_neighbor_updated_port="""\ +PEER NAME NAME MGMT ADDR LOCAL PORT PORT TYPE +----------- -------- ----------- ------------ ------ ------ +Ethernet0 Servers 10.217.0.1 Ethernet0 eth2 type +Ethernet4 Servers0 10.217.0.2 Ethernet4 eth1 type +""" + + +show_device_neighbor_updated_type="""\ +PEER NAME NAME MGMT ADDR LOCAL PORT PORT TYPE +----------- -------- ----------- ------------ ------ ------ +Ethernet0 Servers 10.217.0.1 Ethernet0 eth0 type2 +Ethernet4 Servers0 10.217.0.2 Ethernet4 eth1 type +""" diff --git a/tests/cli_autogen_input/autogen_test/sonic-device_metadata.yang b/tests/cli_autogen_input/autogen_test/sonic-device_metadata.yang new file mode 100644 index 0000000000..400cbf3bcd --- /dev/null +++ b/tests/cli_autogen_input/autogen_test/sonic-device_metadata.yang @@ -0,0 +1,123 @@ +module sonic-device_metadata { + + yang-version 1.1; + + namespace "http://github.com/Azure/sonic-device_metadata"; + prefix device_metadata; + + import ietf-yang-types { + prefix yang; + } + + import ietf-inet-types { + prefix inet; + } + + import sonic-types { + prefix stypes; + revision-date 2019-07-01; + } + + description "DEVICE_METADATA YANG Module for SONiC OS"; + + revision 2021-02-27 { + description "Added frr_mgmt_framework_config field to handle BGP + config DB schema events to configure FRR protocols."; + } + + revision 2020-04-10 { + description "First Revision"; + } + + container sonic-device_metadata { + + container DEVICE_METADATA { + + description "DEVICE_METADATA part of config_db.json"; + + container localhost{ + + leaf hwsku { + type stypes:hwsku; + } + + leaf default_bgp_status { + type enumeration { + enum up; + enum down; + } + default up; + } + + leaf docker_routing_config_mode { + type string { + pattern "unified|split|separated"; + } + default "unified"; + } + + leaf hostname { + type string { + length 1..255; + } + } + + leaf platform { + type string { + length 1..255; + } + } + + leaf mac { + type yang:mac-address; + } + + leaf default_pfcwd_status { + type enumeration { + enum disable; + enum enable; + } + default disable; + } + + leaf bgp_asn { + type inet:as-number; + } + + leaf deployment_id { + type uint32; + } + + leaf type { + type string { + length 1..255; + pattern "ToRRouter|LeafRouter|SpineChassisFrontendRouter|ChassisBackendRouter|ASIC"; + } + } + + leaf buffer_model { + description "This leaf is added for dynamic buffer calculation. + The dynamic model represents the model in which the buffer configurations, + like the headroom sizes and buffer pool sizes, are dynamically calculated based + on the ports' speed, cable length, and MTU. This model is used by Mellanox so far. + The traditional model represents the model in which all the buffer configurations + are statically configured in CONFIG_DB tables. This is the default model used by all other vendors"; + type string { + pattern "dynamic|traditional"; + } + } + + leaf frr_mgmt_framework_config { + type boolean; + description "FRR configurations are handled by sonic-frr-mgmt-framework module when set to true, + otherwise, sonic-bgpcfgd handles the FRR configurations based on the predefined templates."; + default "false"; + } + } + /* end of container localhost */ + } + /* end of container DEVICE_METADATA */ + } + /* end of top level container */ +} +/* end of module sonic-device_metadata */ diff --git a/tests/cli_autogen_input/autogen_test/sonic-device_neighbor.yang b/tests/cli_autogen_input/autogen_test/sonic-device_neighbor.yang new file mode 100644 index 0000000000..e1c745dd9a --- /dev/null +++ b/tests/cli_autogen_input/autogen_test/sonic-device_neighbor.yang @@ -0,0 +1,78 @@ +module sonic-device_neighbor { + + yang-version 1.1; + + namespace "http://github.com/Azure/sonic-device_neighbor"; + prefix device_neighbor; + + import ietf-inet-types { + prefix inet; + } + + import sonic-extension { + prefix ext; + revision-date 2019-07-01; + } + + import sonic-port { + prefix port; + revision-date 2019-07-01; + } + + description "DEVICE_NEIGHBOR YANG Module for SONiC OS"; + + revision 2020-04-10 { + description "First Revision"; + } + + container sonic-device_neighbor { + + container DEVICE_NEIGHBOR { + + description "DEVICE_NEIGHBOR part of config_db.json"; + + list DEVICE_NEIGHBOR_LIST { + + key "peer_name"; + + leaf peer_name { + type string { + length 1..255; + } + } + + leaf name { + type string { + length 1..255; + } + } + + leaf mgmt_addr { + type inet:ip-address; + } + + leaf local_port { + type leafref { + path /port:sonic-port/port:PORT/port:PORT_LIST/port:name; + } + } + + leaf port { + type string { + length 1..255; + } + } + + leaf type { + type string { + length 1..255; + } + } + } + /* end of list DEVICE_NEIGHBOR_LIST */ + } + /* end of container DEVICE_NEIGHBOR */ + } + /* end of top level container */ +} +/* end of module sonic-device_neighbor */ diff --git a/tests/cli_autogen_input/cli_autogen_common.py b/tests/cli_autogen_input/cli_autogen_common.py new file mode 100644 index 0000000000..141bceed9c --- /dev/null +++ b/tests/cli_autogen_input/cli_autogen_common.py @@ -0,0 +1,38 @@ +import os + +yang_models_path = '/usr/local/yang-models' + + +def move_yang_models(test_path, test_name, test_yang_models): + """ Move a test YANG models to known location """ + + for yang_model in test_yang_models: + src_path = os.path.join( + test_path, + 'cli_autogen_input', + test_name, + yang_model + ) + os.system('sudo cp {} {}'.format(src_path, yang_models_path)) + + +def remove_yang_models(test_yang_models): + """ Remove a test YANG models to known location """ + + for yang_model in test_yang_models: + yang_model_path = os.path.join(yang_models_path, yang_model) + os.system('sudo rm {}'.format(yang_model_path)) + + +def backup_yang_models(): + """ Make a copy of existing YANG models """ + + os.system('sudo cp -R {} {}'.format(yang_models_path, yang_models_path + '_backup')) + + +def restore_backup_yang_models(): + """ Restore existing YANG models from backup """ + + os.system('sudo cp {} {}'.format(yang_models_path + '_backup/*', yang_models_path)) + os.system('sudo rm -rf {}'.format(yang_models_path + '_backup')) + \ No newline at end of file diff --git a/tests/cli_autogen_input/config_db.json b/tests/cli_autogen_input/config_db.json new file mode 100644 index 0000000000..5d8c863cec --- /dev/null +++ b/tests/cli_autogen_input/config_db.json @@ -0,0 +1,65 @@ +{ + "DEVICE_METADATA|localhost": { + "buffer_model": "traditional", + "default_bgp_status": "up", + "default_pfcwd_status": "disable", + "hostname": "r-sonic-01", + "hwsku": "ACS-MSN2100", + "mac": "ff:ff:ff:ff:ff:00", + "platform": "x86_64-mlnx_msn2100-r0", + "type": "ToRRouter" + }, + "PORT|Ethernet0": { + "alias": "etp1", + "description": "etp1", + "index": "0", + "lanes": "0, 1, 2, 3", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000" + }, + "PORT|Ethernet4": { + "admin_status": "up", + "alias": "etp2", + "description": "Servers0:eth0", + "index": "1", + "lanes": "4, 5, 6, 7", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000" + }, + "PORT|Ethernet8": { + "admin_status": "up", + "alias": "etp3", + "description": "Servers0:eth2", + "index": "2", + "lanes": "8, 9, 10, 11", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000" + }, + "PORT|Ethernet12": { + "admin_status": "up", + "alias": "etp4", + "description": "Servers0:eth4", + "index": "3", + "lanes": "12, 13, 14, 15", + "mtu": "9100", + "pfc_asym": "off", + "speed": "100000" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "Servers", + "port": "eth0", + "mgmt_addr": "10.217.0.1", + "local_port": "Ethernet0", + "type": "type" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "Servers0", + "port": "eth1", + "mgmt_addr": "10.217.0.2", + "local_port": "Ethernet4", + "type": "type" + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/yang_parser_test/assert_dictionaries.py b/tests/cli_autogen_input/yang_parser_test/assert_dictionaries.py new file mode 100644 index 0000000000..bed2a4a06a --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/assert_dictionaries.py @@ -0,0 +1,626 @@ +""" +Module holding correct dictionaries for test YANG models +""" + +one_table_container = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + } + ] + } + ] +} + +two_table_containers = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + + } + ] + }, + { + "description":"TABLE_2 description", + "name":"TABLE_2", + "static-objects":[ + { + + } + ] + } + ] +} + +one_object_container = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + ] + } + ] + } + ] +} + +two_object_containers = { + "tables":[ + { + "description":"FIRST_TABLE description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + ] + }, + { + "name":"OBJECT_2", + "description":"OBJECT_2 description", + "attrs":[ + ] + } + ] + } + ] +} + +one_list = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "dynamic-objects":[ + { + "name":"TABLE_1_LIST", + "description":"TABLE_1_LIST description", + "keys":[ + { + "name": "key_name", + "description": "", + } + ], + "attrs":[ + ] + } + ] + } + ] +} + +two_lists = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "dynamic-objects":[ + { + "name":"TABLE_1_LIST_1", + "description":"TABLE_1_LIST_1 description", + "keys":[ + { + "name": "key_name1", + "description": "", + } + ], + "attrs":[ + ] + }, + { + "name":"TABLE_1_LIST_2", + "description":"TABLE_1_LIST_2 description", + "keys":[ + { + "name": "key_name2", + "description": "", + } + ], + "attrs":[ + ] + } + ] + } + ] +} + +static_object_complex_1 = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + { + "name":"OBJ_1_LEAF_1", + "description": "OBJ_1_LEAF_1 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + } + ] + } + ] + } + ] +} + +static_object_complex_2 = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + { + "name":"OBJ_1_LEAF_1", + "description": "OBJ_1_LEAF_1 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_2", + "description": "OBJ_1_LEAF_2 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_2_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_2_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + ] + } + ] + } + ] +} + +dynamic_object_complex_1 = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "dynamic-objects":[ + { + "name":"OBJECT_1_LIST", + "description":"OBJECT_1_LIST description", + "attrs":[ + { + "name":"OBJ_1_LEAF_1", + "description": "OBJ_1_LEAF_1 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + } + ], + "keys":[ + { + "name": "KEY_LEAF_1", + "description": "KEY_LEAF_1 description", + } + ] + } + ] + } + ] +} + +dynamic_object_complex_2 = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "dynamic-objects":[ + { + "name":"OBJECT_1_LIST", + "description":"OBJECT_1_LIST description", + "attrs":[ + { + "name":"OBJ_1_LEAF_1", + "description": "OBJ_1_LEAF_1 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_2", + "description": "OBJ_1_LEAF_2 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_2_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_2_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + } + ], + "keys":[ + { + "name": "KEY_LEAF_1", + "description": "KEY_LEAF_1 description", + }, + { + "name": "KEY_LEAF_2", + "description": "KEY_LEAF_2 description", + } + ] + } + ] + } + ] +} + +choice_complex = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + { + "name":"LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"GR_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_1", + }, + { + "name":"GR_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_1', + }, + { + "name":"LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"LEAF_3", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"LEAF_LIST_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"LEAF_LIST_3", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"GR_5_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_5', + }, + { + "name":"GR_5_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_5', + }, + { + "name":"GR_2_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_2', + }, + { + "name":"GR_2_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_2', + }, + { + "name":"GR_3_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_3', + }, + { + "name":"GR_3_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_3', + }, + ] + } + ] + } + ] +} + +grouping_complex = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + { + "name":"GR_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_1", + }, + { + "name":"GR_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_1', + }, + ] + }, + { + "name":"OBJECT_2", + "description":"OBJECT_2 description", + "attrs":[ + { + "name":"GR_5_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_5", + }, + { + "name":"GR_5_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": "GR_5", + }, + { + "name":"GR_6_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_6", + }, + { + "name":"GR_6_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_1_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_2_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_2_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_2_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_2_LEAF_LIST_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": "GR_6", + }, + { + "name":"GR_4_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_4", + }, + { + "name":"GR_4_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_4", + }, + ] + } + ] + } + ] +} + diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-1-list.yang b/tests/cli_autogen_input/yang_parser_test/sonic-1-list.yang new file mode 100644 index 0000000000..bc8603add4 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-1-list.yang @@ -0,0 +1,29 @@ +module sonic-1-list { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-1-list"; + prefix s-1-list; + + container sonic-1-list { + /* sonic-1-list - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + list TABLE_1_LIST { + /* TABLE_1 - object container */ + + description "TABLE_1_LIST description"; + + key "key_name"; + + leaf key_name { + type string; + } + } + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-1-object-container.yang b/tests/cli_autogen_input/yang_parser_test/sonic-1-object-container.yang new file mode 100644 index 0000000000..8d19979157 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-1-object-container.yang @@ -0,0 +1,23 @@ +module sonic-1-object-container { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-1-object"; + prefix s-1-object; + + container sonic-1-object-container { + /* sonic-1-object-container - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + container OBJECT_1 { + /* OBJECT_1 - object container */ + + description "OBJECT_1 description"; + } + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-1-table-container.yang b/tests/cli_autogen_input/yang_parser_test/sonic-1-table-container.yang new file mode 100644 index 0000000000..36b98415e5 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-1-table-container.yang @@ -0,0 +1,17 @@ +module sonic-1-table-container { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-1-table"; + prefix s-1-table; + + container sonic-1-table-container { + /* sonic-1-table-container - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-2-lists.yang b/tests/cli_autogen_input/yang_parser_test/sonic-2-lists.yang new file mode 100644 index 0000000000..fce9704f00 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-2-lists.yang @@ -0,0 +1,42 @@ +module sonic-2-lists { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-2-lists"; + prefix s-2-lists; + + container sonic-2-lists { + /* sonic-2-lists - top level container */ + + container TABLE_1 { + /* TALBE_1 - table container */ + + + description "TABLE_1 description"; + + list TABLE_1_LIST_1 { + /* TALBE_1_LIST_1 - object container */ + + description "TABLE_1_LIST_1 description"; + + key "key_name1"; + + leaf key_name1 { + type string; + } + } + + list TABLE_1_LIST_2 { + /* TALBE_1_LIST_2 - object container */ + + description "TABLE_1_LIST_2 description"; + + key "key_name2"; + + leaf key_name2 { + type string; + } + } + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-2-object-containers.yang b/tests/cli_autogen_input/yang_parser_test/sonic-2-object-containers.yang new file mode 100644 index 0000000000..e633b66246 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-2-object-containers.yang @@ -0,0 +1,29 @@ +module sonic-2-object-containers { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-2-object"; + prefix s-2-object; + + container sonic-2-object-containers { + /* sonic-2-object-containers - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "FIRST_TABLE description"; + + container OBJECT_1 { + /* OBJECT_1 - object container */ + + description "OBJECT_1 description"; + } + + container OBJECT_2 { + /* OBJECT_2 - object container */ + + description "OBJECT_2 description"; + } + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-2-table-containers.yang b/tests/cli_autogen_input/yang_parser_test/sonic-2-table-containers.yang new file mode 100644 index 0000000000..f5284c67ee --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-2-table-containers.yang @@ -0,0 +1,23 @@ +module sonic-2-table-containers { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-2-table"; + prefix s-2-table; + + container sonic-2-table-containers { + /* sonic-2-table-containers - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + } + + container TABLE_2 { + /* TABLE_2 - table container */ + + description "TABLE_2 description"; + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-choice-complex.yang b/tests/cli_autogen_input/yang_parser_test/sonic-choice-complex.yang new file mode 100644 index 0000000000..9d6e0de9ee --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-choice-complex.yang @@ -0,0 +1,91 @@ +module sonic-choice-complex { + + yang-version 1.1; + + namespace "http://github.com/Azure/choice-complex"; + prefix choice-complex; + + import sonic-grouping-1 { + prefix sgroup1; + } + + import sonic-grouping-2 { + prefix sgroup2; + } + + grouping GR_5 { + leaf GR_5_LEAF_1 { + type string; + } + + leaf GR_5_LEAF_2 { + type string; + } + } + + grouping GR_6 { + leaf GR_6_LEAF_1 { + type string; + } + + leaf GR_6_LEAF_2 { + type string; + } + } + + container sonic-choice-complex { + /* sonic-choice-complex - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + container OBJECT_1 { + /* OBJECT_1 - object container, it have + * 1 choice, which have 2 cases. + * first case have: 1 leaf, 1 leaf-list, 1 uses + * second case have: 2 leafs, 2 leaf-lists, 2 uses + */ + + description "OBJECT_1 description"; + + choice CHOICE_1 { + case CHOICE_1_CASE_1 { + leaf LEAF_1 { + type uint16; + } + + leaf-list LEAF_LIST_1 { + type string; + } + + uses sgroup1:GR_1; + } + + case CHOICE_1_CASE_2 { + leaf LEAF_2 { + type string; + } + + leaf LEAF_3 { + type string; + } + + leaf-list LEAF_LIST_2 { + type string; + } + + leaf-list LEAF_LIST_3 { + type string; + } + + uses GR_5; + uses sgroup1:GR_2; + uses sgroup2:GR_3; + } + } + } + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-dynamic-object-complex-1.yang b/tests/cli_autogen_input/yang_parser_test/sonic-dynamic-object-complex-1.yang new file mode 100644 index 0000000000..383e94fb43 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-dynamic-object-complex-1.yang @@ -0,0 +1,57 @@ +module sonic-dynamic-object-complex-1 { + + yang-version 1.1; + + namespace "http://github.com/Azure/dynamic-complex-1"; + prefix dynamic-complex-1; + + container sonic-dynamic-object-complex-1 { + /* sonic-dynamic-object-complex-1 - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + list OBJECT_1_LIST { + /* OBJECT_1_LIST - dynamic object container, it have: + * 1 key, + * 1 leaf, + * 1 leaf-list + * 1 choice + */ + + description "OBJECT_1_LIST description"; + + key "KEY_LEAF_1"; + + leaf KEY_LEAF_1 { + description "KEY_LEAF_1 description"; + type string; + } + + leaf OBJ_1_LEAF_1 { + description "OBJ_1_LEAF_1 description"; + type string; + } + + leaf-list OBJ_1_LEAF_LIST_1 { + type string; + } + + choice OBJ_1_CHOICE_1 { + case OBJ_1_CHOICE_1_CASE_1 { + leaf OBJ_1_CHOICE_1_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_1_CASE_2 { + leaf OBJ_1_CHOICE_1_LEAF_2 { + type string; + } + } + } + } + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-dynamic-object-complex-2.yang b/tests/cli_autogen_input/yang_parser_test/sonic-dynamic-object-complex-2.yang new file mode 100644 index 0000000000..a365b014ad --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-dynamic-object-complex-2.yang @@ -0,0 +1,84 @@ +module sonic-dynamic-object-complex-2 { + + yang-version 1.1; + + namespace "http://github.com/Azure/dynamic-complex-2"; + prefix dynamic-complex-2; + + container sonic-dynamic-object-complex-2 { + /* sonic-dynamic-object-complex-2 - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + list OBJECT_1_LIST { + /* OBJECT_1_LIST - dynamic object container, it have: + * 2 keys + * 2 leaf, + * 2 leaf-list + * 2 choice + */ + + description "OBJECT_1_LIST description"; + + key "KEY_LEAF_1 KEY_LEAF_2"; + + leaf KEY_LEAF_1 { + description "KEY_LEAF_1 description"; + type string; + } + + leaf KEY_LEAF_2 { + description "KEY_LEAF_2 description"; + type string; + } + + leaf OBJ_1_LEAF_1 { + description "OBJ_1_LEAF_1 description"; + type string; + } + + leaf OBJ_1_LEAF_2 { + description "OBJ_1_LEAF_2 description"; + type string; + } + + leaf-list OBJ_1_LEAF_LIST_1 { + type string; + } + + leaf-list OBJ_1_LEAF_LIST_2 { + type string; + } + + choice OBJ_1_CHOICE_1 { + case OBJ_1_CHOICE_1_CASE_1 { + leaf OBJ_1_CHOICE_1_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_1_CASE_2 { + leaf OBJ_1_CHOICE_1_LEAF_2 { + type string; + } + } + } + + choice OBJ_1_CHOICE_2 { + case OBJ_1_CHOICE_2_CASE_1 { + leaf OBJ_1_CHOICE_2_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_2_CASE_2 { + leaf OBJ_1_CHOICE_2_LEAF_2 { + type string; + } + } + } + } + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-grouping-1.yang b/tests/cli_autogen_input/yang_parser_test/sonic-grouping-1.yang new file mode 100644 index 0000000000..bf0be792f5 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-grouping-1.yang @@ -0,0 +1,25 @@ +module sonic-grouping-1{ + + yang-version 1.1; + + namespace "http://github.com/Azure/s-grouping-1"; + prefix s-grouping-1; + + grouping GR_1 { + leaf GR_1_LEAF_1 { + type string; + } + leaf GR_1_LEAF_2 { + type string; + } + } + + grouping GR_2 { + leaf GR_2_LEAF_1 { + type string; + } + leaf GR_2_LEAF_2 { + type string; + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-grouping-2.yang b/tests/cli_autogen_input/yang_parser_test/sonic-grouping-2.yang new file mode 100644 index 0000000000..58e9df6621 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-grouping-2.yang @@ -0,0 +1,25 @@ +module sonic-grouping-2 { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-grouping-2"; + prefix s-grouping-2; + + grouping GR_3 { + leaf GR_3_LEAF_1 { + type string; + } + leaf GR_3_LEAF_2 { + type string; + } + } + + grouping GR_4 { + leaf GR_4_LEAF_1 { + type string; + } + leaf GR_4_LEAF_2 { + type string; + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-grouping-complex.yang b/tests/cli_autogen_input/yang_parser_test/sonic-grouping-complex.yang new file mode 100644 index 0000000000..22956789b0 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-grouping-complex.yang @@ -0,0 +1,96 @@ +module sonic-grouping-complex { + + yang-version 1.1; + + namespace "http://github.com/Azure/grouping-complex"; + prefix grouping-complex; + + import sonic-grouping-1 { + prefix sgroup1; + } + + import sonic-grouping-2 { + prefix sgroup2; + } + + grouping GR_5 { + leaf GR_5_LEAF_1 { + type string; + } + + leaf-list GR_5_LEAF_LIST_1 { + type string; + } + } + + grouping GR_6 { + leaf GR_6_LEAF_1 { + type string; + } + + leaf GR_6_LEAF_2 { + type string; + } + + choice GR_6_CHOICE_1 { + case CHOICE_1_CASE_1 { + leaf GR_6_CASE_1_LEAF_1 { + type uint16; + } + + leaf-list GR_6_CASE_1_LEAF_LIST_1 { + type string; + } + } + + case CHOICE_1_CASE_2 { + leaf GR_6_CASE_2_LEAF_1 { + type uint16; + } + + leaf GR_6_CASE_2_LEAF_2 { + type uint16; + } + + leaf-list GR_6_CASE_2_LEAF_LIST_1 { + type string; + } + + leaf-list GR_6_CASE_2_LEAF_LIST_2 { + type string; + } + } + } + } + + container sonic-grouping-complex { + /* sonic-grouping-complex - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + container OBJECT_1 { + /* OBJECT_1 - object container, it have + * 1 choice, which have 2 cases. + * first case have: 1 leaf, 1 leaf-list, 1 uses + * second case have: 2 leafs, 2 leaf-lists, 2 uses + */ + + description "OBJECT_1 description"; + + uses sgroup1:GR_1; + } + + container OBJECT_2 { + + description "OBJECT_2 description"; + + uses GR_5; + uses GR_6; + uses sgroup2:GR_4; + } + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-static-object-complex-1.yang b/tests/cli_autogen_input/yang_parser_test/sonic-static-object-complex-1.yang new file mode 100644 index 0000000000..fa082d3b25 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-static-object-complex-1.yang @@ -0,0 +1,49 @@ +module sonic-static-object-complex-1 { + + yang-version 1.1; + + namespace "http://github.com/Azure/static-complex-1"; + prefix static-complex-1; + + container sonic-static-object-complex-1 { + /* sonic-static-object-complex-1 - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + container OBJECT_1 { + /* OBJECT_1 - object container, it have: + * 1 leaf, + * 1 leaf-list + * 1 choice + */ + + description "OBJECT_1 description"; + + leaf OBJ_1_LEAF_1 { + description "OBJ_1_LEAF_1 description"; + type string; + } + + leaf-list OBJ_1_LEAF_LIST_1 { + type string; + } + + choice OBJ_1_CHOICE_1 { + case OBJ_1_CHOICE_1_CASE_1 { + leaf OBJ_1_CHOICE_1_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_1_CASE_2 { + leaf OBJ_1_CHOICE_1_LEAF_2 { + type string; + } + } + } + } + } + } +} diff --git a/tests/cli_autogen_input/yang_parser_test/sonic-static-object-complex-2.yang b/tests/cli_autogen_input/yang_parser_test/sonic-static-object-complex-2.yang new file mode 100644 index 0000000000..4e53b2e1b1 --- /dev/null +++ b/tests/cli_autogen_input/yang_parser_test/sonic-static-object-complex-2.yang @@ -0,0 +1,71 @@ +module sonic-static-object-complex-2 { + + yang-version 1.1; + + namespace "http://github.com/Azure/static-complex-2"; + prefix static-complex-2; + + container sonic-static-object-complex-2 { + /* sonic-static-object-complex-2 - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + container OBJECT_1 { + /* OBJECT_1 - object container, it have: + * 2 leafs, + * 2 leaf-lists, + * 2 choices + */ + + description "OBJECT_1 description"; + + leaf OBJ_1_LEAF_1 { + description "OBJ_1_LEAF_1 description"; + type string; + } + + leaf OBJ_1_LEAF_2 { + description "OBJ_1_LEAF_2 description"; + type string; + } + + leaf-list OBJ_1_LEAF_LIST_1 { + type string; + } + + leaf-list OBJ_1_LEAF_LIST_2 { + type string; + } + + choice OBJ_1_CHOICE_1 { + case OBJ_1_CHOICE_1_CASE_1 { + leaf OBJ_1_CHOICE_1_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_1_CASE_2 { + leaf OBJ_1_CHOICE_1_LEAF_2 { + type string; + } + } + } + + choice OBJ_1_CHOICE_2 { + case OBJ_1_CHOICE_2_CASE_1 { + leaf OBJ_1_CHOICE_2_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_2_CASE_2 { + leaf OBJ_1_CHOICE_2_LEAF_2 { + type string; + } + } + } + } + } + } +} diff --git a/tests/cli_autogen_test.py b/tests/cli_autogen_test.py new file mode 100644 index 0000000000..13407d1c13 --- /dev/null +++ b/tests/cli_autogen_test.py @@ -0,0 +1,243 @@ +import os +import logging +import pytest + +import show.plugins as show_plugins +import show.main as show_main +import config.plugins as config_plugins +import config.main as config_main +from .cli_autogen_input.autogen_test import show_cmd_output +from .cli_autogen_input.cli_autogen_common import backup_yang_models, restore_backup_yang_models, move_yang_models, remove_yang_models + +from utilities_common import util_base +from sonic_cli_gen.generator import CliGenerator +from .mock_tables import dbconnector +from utilities_common.db import Db +from click.testing import CliRunner + +logger = logging.getLogger(__name__) +gen = CliGenerator(logger) + +test_path = os.path.dirname(os.path.abspath(__file__)) +mock_db_path = os.path.join(test_path, 'cli_autogen_input', 'config_db') +config_db_path = os.path.join(test_path, 'cli_autogen_input', 'config_db.json') +templates_path = os.path.join(test_path, '../', 'sonic-utilities-data', 'templates', 'sonic-cli-gen') + +SUCCESS = 0 +ERROR = 1 +INVALID_VALUE = 'INVALID' + +test_yang_models = [ + 'sonic-device_metadata.yang', + 'sonic-device_neighbor.yang', +] + + +class TestCliAutogen: + @classmethod + def setup_class(cls): + logger.info('SETUP') + os.environ['UTILITIES_UNIT_TESTING'] = '2' + + backup_yang_models() + move_yang_models(test_path, 'autogen_test', test_yang_models) + + for yang_model in test_yang_models: + gen.generate_cli_plugin( + cli_group='show', + plugin_name=yang_model.split('.')[0], + config_db_path=config_db_path, + templates_path=templates_path + ) + gen.generate_cli_plugin( + cli_group='config', + plugin_name=yang_model.split('.')[0], + config_db_path=config_db_path, + templates_path=templates_path + ) + + helper = util_base.UtilHelper() + helper.load_and_register_plugins(show_plugins, show_main.cli) + helper.load_and_register_plugins(config_plugins, config_main.config) + + + @classmethod + def teardown_class(cls): + logger.info('TEARDOWN') + + for yang_model in test_yang_models: + gen.remove_cli_plugin('show', yang_model.split('.')[0]) + gen.remove_cli_plugin('config', yang_model.split('.')[0]) + + restore_backup_yang_models() + + dbconnector.dedicated_dbs['CONFIG_DB'] = None + + os.environ['UTILITIES_UNIT_TESTING'] = '0' + + + def test_show_device_metadata(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = mock_db_path + db = Db() + runner = CliRunner() + + result = runner.invoke( + show_main.cli.commands['device-metadata'].commands['localhost'], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == show_cmd_output.show_device_metadata_localhost + + + def test_config_device_metadata(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = mock_db_path + db = Db() + runner = CliRunner() + + result = runner.invoke( + config_main.config.commands['device-metadata'].commands['localhost'].commands['buffer-model'], ['dynamic'], obj=db + ) + + result = runner.invoke( + show_main.cli.commands['device-metadata'].commands['localhost'], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == show_cmd_output.show_device_metadata_localhost_changed_buffer_model + + + @pytest.mark.parametrize("parameter,value", [ + ('default-bgp-status', INVALID_VALUE), + ('docker-routing-config-mode', INVALID_VALUE), + ('mac', INVALID_VALUE), + ('default-pfcwd-status', INVALID_VALUE), + ('bgp-asn', INVALID_VALUE), + ('type', INVALID_VALUE), + ('buffer-model', INVALID_VALUE), + ('frr-mgmt-framework-config', INVALID_VALUE) + ]) + def test_config_device_metadata_invalid(self, parameter, value): + dbconnector.dedicated_dbs['CONFIG_DB'] = mock_db_path + db = Db() + runner = CliRunner() + + result = runner.invoke( + config_main.config.commands['device-metadata'].commands['localhost'].commands[parameter], [value], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + + + def test_show_device_neighbor(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = mock_db_path + db = Db() + runner = CliRunner() + + result = runner.invoke( + show_main.cli.commands['device-neighbor'], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert show_cmd_output.show_device_neighbor + assert result.exit_code == SUCCESS + + + def test_config_device_neighbor_add(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = mock_db_path + db = Db() + runner = CliRunner() + + result = runner.invoke( + config_main.config.commands['device-neighbor'].commands['add'], + ['Ethernet8', '--name', 'Servers1', '--mgmt-addr', '10.217.0.3', + '--local-port', 'Ethernet8', '--port', 'eth2', '--type', 'type'], + obj=db + ) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + show_main.cli.commands['device-neighbor'], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == show_cmd_output.show_device_neighbor_added + + + def test_config_device_neighbor_delete(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = mock_db_path + db = Db() + runner = CliRunner() + + result = runner.invoke( + config_main.config.commands['device-neighbor'].commands['delete'], + ['Ethernet0'], obj=db + ) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + show_main.cli.commands['device-neighbor'], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == show_cmd_output.show_device_neighbor_deleted + + + @pytest.mark.parametrize("parameter,value,output", [ + ('--mgmt-addr', '10.217.0.5', show_cmd_output.show_device_neighbor_updated_mgmt_addr), + ('--name', 'Servers1', show_cmd_output.show_device_neighbor_updated_name), + ('--local-port', 'Ethernet12', show_cmd_output.show_device_neighbor_updated_local_port), + ('--port', 'eth2', show_cmd_output.show_device_neighbor_updated_port), + ('--type', 'type2', show_cmd_output.show_device_neighbor_updated_type), + ]) + def test_config_device_neighbor_update(self, parameter, value, output): + dbconnector.dedicated_dbs['CONFIG_DB'] = mock_db_path + db = Db() + runner = CliRunner() + + result = runner.invoke( + config_main.config.commands['device-neighbor'].commands['update'], + ['Ethernet0', parameter, value], obj=db + ) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + show_main.cli.commands['device-neighbor'], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == output + + + @pytest.mark.parametrize("parameter,value", [ + ('--mgmt-addr', INVALID_VALUE), + ('--local-port', INVALID_VALUE) + ]) + def test_config_device_neighbor_update_invalid(self, parameter, value): + dbconnector.dedicated_dbs['CONFIG_DB'] = mock_db_path + db = Db() + runner = CliRunner() + + result = runner.invoke( + config_main.config.commands['device-neighbor'].commands['update'], + ['Ethernet0', parameter, value], obj=db + ) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == ERROR + diff --git a/tests/cli_autogen_yang_parser_test.py b/tests/cli_autogen_yang_parser_test.py new file mode 100644 index 0000000000..ed82693e91 --- /dev/null +++ b/tests/cli_autogen_yang_parser_test.py @@ -0,0 +1,172 @@ +import os +import logging +import pprint + +from sonic_cli_gen.yang_parser import YangParser +from .cli_autogen_input.yang_parser_test import assert_dictionaries +from .cli_autogen_input.cli_autogen_common import move_yang_models, remove_yang_models + +logger = logging.getLogger(__name__) + +test_path = os.path.dirname(os.path.abspath(__file__)) + +test_yang_models = [ + 'sonic-1-table-container.yang', + 'sonic-2-table-containers.yang', + 'sonic-1-object-container.yang', + 'sonic-2-object-containers.yang', + 'sonic-1-list.yang', + 'sonic-2-lists.yang', + 'sonic-static-object-complex-1.yang', + 'sonic-static-object-complex-2.yang', + 'sonic-dynamic-object-complex-1.yang', + 'sonic-dynamic-object-complex-2.yang', + 'sonic-choice-complex.yang', + 'sonic-grouping-complex.yang', + 'sonic-grouping-1.yang', + 'sonic-grouping-2.yang', +] + + +class TestYangParser: + @classmethod + def setup_class(cls): + logger.info("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + move_yang_models(test_path, 'yang_parser_test', test_yang_models) + + @classmethod + def teardown_class(cls): + logger.info("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + remove_yang_models(test_yang_models) + + def test_1_table_container(self): + """ Test for 1 'table' container + 'table' container represent TABLE in Config DB schema: + { + "TABLE": { + "OBJECT": { + "attr": "value" + ... + } + } + } + """ + + base_test('sonic-1-table-container', + assert_dictionaries.one_table_container) + + def test_2_table_containers(self): + """ Test for 2 'table' containers """ + + base_test('sonic-2-table-containers', + assert_dictionaries.two_table_containers) + + def test_1_object_container(self): + """ Test for 1 'object' container + 'object' container represent OBJECT in Config DB schema: + { + "TABLE": { + "OBJECT": { + "attr": "value" + ... + } + } + } + """ + + base_test('sonic-1-object-container', + assert_dictionaries.one_object_container) + + def test_2_object_containers(self): + """ Test for 2 'object' containers """ + + base_test('sonic-2-object-containers', + assert_dictionaries.two_object_containers) + + def test_1_list(self): + """ Test for 1 container that has inside + the YANG 'list' entity + """ + + base_test('sonic-1-list', assert_dictionaries.one_list) + + def test_2_lists(self): + """ Test for 2 containers that have inside + the YANG 'list' entity + """ + + base_test('sonic-2-lists', assert_dictionaries.two_lists) + + def test_static_object_complex_1(self): + """ Test for the object container with: + 1 leaf, 1 leaf-list, 1 choice. + """ + + base_test('sonic-static-object-complex-1', + assert_dictionaries.static_object_complex_1) + + def test_static_object_complex_2(self): + """ Test for object container with: + 2 leafs, 2 leaf-lists, 2 choices. + """ + + base_test('sonic-static-object-complex-2', + assert_dictionaries.static_object_complex_2) + + def test_dynamic_object_complex_1(self): + """ Test for object container with: + 1 key, 1 leaf, 1 leaf-list, 1 choice. + """ + + base_test('sonic-dynamic-object-complex-1', + assert_dictionaries.dynamic_object_complex_1) + + def test_dynamic_object_complex_2(self): + """ Test for object container with: + 2 keys, 2 leafs, 2 leaf-list, 2 choice. + """ + + base_test('sonic-dynamic-object-complex-2', + assert_dictionaries.dynamic_object_complex_2) + + def test_choice_complex(self): + """ Test for object container with the 'choice' + that have complex strucutre: + leafs, leaf-lists, multiple 'uses' from different files + """ + + base_test('sonic-choice-complex', + assert_dictionaries.choice_complex) + + def test_grouping_complex(self): + """ Test for object container with multitple 'uses' that using 'grouping' + from different files. The used 'grouping' have a complex structure: + leafs, leaf-lists, choices + """ + + base_test('sonic-grouping-complex', + assert_dictionaries.grouping_complex) + + +def base_test(yang_model_name, correct_dict): + """ General logic for each test case """ + + config_db_path = os.path.join(test_path, + 'mock_tables/config_db.json') + parser = YangParser(yang_model_name=yang_model_name, + config_db_path=config_db_path, + allow_tbl_without_yang=True, + debug=False) + yang_dict = parser.parse_yang_model() + pretty_log_debug(yang_dict) + assert yang_dict == correct_dict + + +def pretty_log_debug(dictionary): + """ Pretty print of parsed dictionary """ + + for line in pprint.pformat(dictionary).split('\n'): + logging.debug(line) + diff --git a/tests/coredump_gen_handler_test.py b/tests/coredump_gen_handler_test.py new file mode 100644 index 0000000000..c742f09d06 --- /dev/null +++ b/tests/coredump_gen_handler_test.py @@ -0,0 +1,383 @@ +import os +import time +import sys +import pyfakefs +import unittest +from pyfakefs.fake_filesystem_unittest import Patcher +from swsscommon import swsscommon +from utilities_common.general import load_module_from_source +from utilities_common.db import Db +from .mock_tables import dbconnector + +sys.path.append("scripts") +import coredump_gen_handler as cdump_mod + + +def set_auto_ts_cfg(redis_mock, state="disabled", + rate_limit_interval="0", + max_core_size="0.0", + since_cfg="None"): + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_STATE, state) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.COOLOFF, rate_limit_interval) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_CORE_USAGE, max_core_size) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_SINCE, since_cfg) + + +def set_feature_table_cfg(redis_mock, state="disabled", rate_limit_interval="0", container_name="swss"): + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.FEATURE.format(container_name), cdump_mod.CFG_STATE, state) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.FEATURE.format(container_name), cdump_mod.COOLOFF, rate_limit_interval) + + +def set_auto_ts_dump_info(redis_mock, ts_dump, core_dump, timestamp, container): + key = cdump_mod.TS_MAP + "|" + ts_dump + redis_mock.set(cdump_mod.STATE_DB, key, cdump_mod.CORE_DUMP, core_dump) + redis_mock.set(cdump_mod.STATE_DB, key, cdump_mod.TIMESTAMP, timestamp) + redis_mock.set(cdump_mod.STATE_DB, key, cdump_mod.CONTAINER, container) + + +def verify_post_exec_state(redis_mock, cdump_expect=[], cdumps_not_expect=[], container_mp={}): + final_state = redis_mock.keys(cdump_mod.STATE_DB, cdump_mod.TS_MAP+"*") + print(final_state) + for dump in cdump_expect: + assert cdump_mod.TS_MAP+"|"+dump in final_state + for dump in cdumps_not_expect: + assert cdump_mod.TS_MAP+"|"+dump not in final_state + for dump, container in container_mp.items(): + key = cdump_mod.TS_MAP+"|"+dump + assert container in redis_mock.get(cdump_mod.STATE_DB, key, cdump_mod.CONTAINER) + + +def populate_state_db(redis_mock, + ts_map={"sonic_dump_random1": "orchagent;1575985;swss", + "sonic_dump_random2": "syncd;1575988;syncd"}): + for dump, value in ts_map.items(): + core_dump, timestamp, container_name = value.split(";") + set_auto_ts_dump_info(redis_mock, dump, core_dump, timestamp, container_name) + print(redis_mock.keys(cdump_mod.STATE_DB, cdump_mod.TS_MAP+"*")) + + +class TestCoreDumpCreationEvent(unittest.TestCase): + + def setUp(self): + cdump_mod.TIME_BUF = 1 + cdump_mod.WAIT_BUFFER = 1 + + def test_invoc_ts_state_db_update(self): + """ + Scenario: CFG_STATE is enabled. CFG_CORE_CLEANUP is disabled and no rate_limit_interval is provided + Check if techsupport is invoked, file is created and State DB is updated + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled") + set_feature_table_cfg(redis_mock, state="enabled") + populate_state_db(redis_mock) + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return 1, "", "Command Not Found" + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss", redis_mock) + cls.handle_core_dump_creation_event() + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz", redis_mock) + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) + cdump_expect = ["sonic_dump_random1", "sonic_dump_random2", "sonic_dump_random3"] + verify_post_exec_state(redis_mock, cdump_expect) + + def test_global_rate_limit_interval(self): + """ + Scenario: CFG_STATE is enabled. + Global rate_limit_interval is not passed yet. Check if techsupport isn't invoked. + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled", rate_limit_interval="1") + set_feature_table_cfg(redis_mock, state="enabled") + populate_state_db(redis_mock) + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return 1, "", "Command Not Found" + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss", redis_mock) + cls.handle_core_dump_creation_event() + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz", redis_mock) + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) + cdump_expect = ["sonic_dump_random1", "sonic_dump_random2"] + cdump_not_expect = ["sonic_dump_random3"] + verify_post_exec_state(redis_mock, cdump_expect, cdump_not_expect) + + def test_per_container_rate_limit_interval(self): + """ + Scenario: CFG_STATE is enabled. Global rate_limit_interval is passed + But Per container rate_limit_interval is not passed yet. Check if techsupport isn't invoked + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled", rate_limit_interval="0.25") + set_feature_table_cfg(redis_mock, state="enabled", rate_limit_interval="10") + populate_state_db(redis_mock, ts_map={"sonic_dump_random1": + "orchagent;{};swss".format(int(time.time()))}) + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return 1, "", "Command Not Found" + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss", redis_mock) + time.sleep(0.25) # wait for global rate_limit_interval to pass + cls.handle_core_dump_creation_event() + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) + verify_post_exec_state(redis_mock, ["sonic_dump_random1"], ["sonic_dump_random3"]) + + def test_invoc_ts_after_rate_limit_interval(self): + """ + Scenario: CFG_STATE is enabled. + All the rate_limit_interval's are passed. Check if techsupport is invoked + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled", rate_limit_interval="0.1") + set_feature_table_cfg(redis_mock, state="enabled", rate_limit_interval="0.25") + populate_state_db(redis_mock, ts_map={"sonic_dump_random1": + "orchagent;{};swss".format(int(time.time()))}) + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return 1, "", "Command Not Found" + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss", redis_mock) + time.sleep(0.25) # wait for all the rate_limit_interval's to pass + cls.handle_core_dump_creation_event() + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) + ts_mp = {"sonic_dump_random3": "swss"} + verify_post_exec_state(redis_mock, ["sonic_dump_random1", "sonic_dump_random3"], [], ts_mp) + + def test_core_dump_with_invalid_container_name(self): + """ + Scenario: CFG_STATE is enabled. + Core Dump is found but no relevant exit_event entry is found in STATE_DB. + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled") + set_feature_table_cfg(redis_mock, state="enabled", container_name="snmp") + populate_state_db(redis_mock, {}) + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return 1, "", "Command Not Found" + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/core/snmpd.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("snmpd.12345.123.core.gz", "whatevver", redis_mock) + cls.handle_core_dump_creation_event() + assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) + final_state = redis_mock.keys(cdump_mod.STATE_DB, cdump_mod.TS_MAP+"*") + assert not final_state + + def test_feature_table_not_set(self): + """ + Scenario: CFG_STATE is enabled. + The auto-techsupport in Feature table is not enabled for the core-dump generated + Check if techsupport is not invoked + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled") + set_feature_table_cfg(redis_mock, state="disabled", container_name="snmp") + populate_state_db(redis_mock, {}) + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return 1, "", "Command Not Found" + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/core/python3.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("python3.12345.123.core.gz", "snmp", redis_mock) + cls.handle_core_dump_creation_event() + cdump_mod.handle_coredump_cleanup("python3.12345.123.core.gz", redis_mock) + assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) + + def test_since_argument(self): + """ + Scenario: CFG_STATE is enabled. + Check if techsupport is invoked and since argument in properly applied + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled", since_cfg="4 days ago") + set_feature_table_cfg(redis_mock, state="enabled") + populate_state_db(redis_mock) + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport --since '4 days ago'" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + return 0, "", "" + elif "date --date='4 days ago'" in cmd_str: + return 0, "", "" + else: + return 1, "", "Invalid Command" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss", redis_mock) + cls.handle_core_dump_creation_event() + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz", redis_mock) + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) + expect = ["sonic_dump_random1", "sonic_dump_random2", "sonic_dump_random3"] + ts_mp = {"sonic_dump_random3": "swss"} + verify_post_exec_state(redis_mock, expect, [], ts_mp) + + def test_masic_core_dump(self): + """ + Scenario: Dump is generated from swss12 container. Config specified for swss shoudl be applied + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled") + set_feature_table_cfg(redis_mock, state="enabled") + populate_state_db(redis_mock) + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return 1, "", "Command Not Found" + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss12", redis_mock) + cls.handle_core_dump_creation_event() + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz", redis_mock) + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) + cdump_expect = ["sonic_dump_random1", "sonic_dump_random2", "sonic_dump_random3"] + verify_post_exec_state(redis_mock, cdump_expect) + + def test_invalid_since_argument(self): + """ + Scenario: CFG_STATE is enabled. + Check if techsupport is invoked and an invalid since argument in identified + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled", since_cfg="whatever") + set_feature_table_cfg(redis_mock, state="enabled") + populate_state_db(redis_mock) + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport --since '2 days ago'" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + return 0, "", "" + elif "date --date='whatever'" in cmd_str: + return 1, "", "Invalid Date Format" + else: + return 1, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss", redis_mock) + cls.handle_core_dump_creation_event() + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz", redis_mock) + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) + expect = ["sonic_dump_random1", "sonic_dump_random2", "sonic_dump_random3"] + ts_mp = {"sonic_dump_random3": "swss"} + verify_post_exec_state(redis_mock, expect, [], ts_mp) + + def test_core_dump_cleanup(self): + """ + Scenario: CFG_STATE is enabled. core-dump limit is crossed + Verify Whether is cleanup is performed + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled", max_core_size="6.0") + with Patcher() as patcher: + patcher.fs.set_disk_usage(1000, path="/var/core/") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz", st_size=25) + patcher.fs.create_file("/var/core/lldpmgrd.12345.22.core.gz", st_size=25) + patcher.fs.create_file("/var/core/python3.12345.21.core.gz", st_size=25) + cdump_mod.handle_coredump_cleanup("python3.12345.21.core.gz", redis_mock) + current_fs = os.listdir(cdump_mod.CORE_DUMP_DIR) + assert len(current_fs) == 2 + assert "orchagent.12345.123.core.gz" not in current_fs + assert "lldpmgrd.12345.22.core.gz" in current_fs + assert "python3.12345.21.core.gz" in current_fs + + def test_max_core_size_limit_not_crossed(self): + """ + Scenario: CFG_STATE is enabled. core-dump limit is crossed + Verify Whether is cleanup is performed + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled", max_core_size="5.0") + with Patcher() as patcher: + def mock_cmd(cmd, env): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + return 0, "", "" + patcher.fs.set_disk_usage(2000, path="/var/core/") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz", st_size=25) + patcher.fs.create_file("/var/core/lldpmgrd.12345.22.core.gz", st_size=25) + patcher.fs.create_file("/var/core/python3.12345.21.core.gz", st_size=25) + cdump_mod.handle_coredump_cleanup("python3.12345.21.core.gz", redis_mock) + current_fs = os.listdir(cdump_mod.CORE_DUMP_DIR) + assert len(current_fs) == 3 + assert "orchagent.12345.123.core.gz" in current_fs + assert "lldpmgrd.12345.22.core.gz" in current_fs + assert "python3.12345.21.core.gz" in current_fs diff --git a/tests/counterpoll_test.py b/tests/counterpoll_test.py index 371b984472..f1a4cdaaa1 100644 --- a/tests/counterpoll_test.py +++ b/tests/counterpoll_test.py @@ -26,6 +26,7 @@ QUEUE_WATERMARK_STAT 10000 enable PG_WATERMARK_STAT 10000 enable PG_DROP_STAT 10000 enable +ACL 10000 enable """ class TestCounterpoll(object): @@ -63,6 +64,15 @@ def test_pg_drop_interval_too_long(self): assert result.exit_code == 2 assert expected in result.output + @pytest.mark.parametrize("interval", [100, 50000]) + def test_acl_interval_range(self, interval): + runner = CliRunner() + result = runner.invoke(counterpoll.cli.commands["acl"].commands["interval"], [str(interval)]) + print(result.output) + expected = "Invalid value for \"POLL_INTERVAL\": {} is not in the valid range of 1000 to 30000.".format(interval) + assert result.exit_code == 2 + assert expected in result.output + @pytest.fixture(scope='class') def _get_config_db_file(self): sample_config_db_file = os.path.join(test_path, "counterpoll_input", "config_db.json") @@ -109,6 +119,30 @@ def test_update_pg_drop_interval(self): table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') assert test_interval == table["PG_DROP"]["POLL_INTERVAL"] + @pytest.mark.parametrize("status", ["disable", "enable"]) + def test_update_acl_status(self, status): + runner = CliRunner() + db = Db() + + result = runner.invoke(counterpoll.cli.commands["acl"].commands[status], [], obj=db.cfgdb) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + table = db.cfgdb.get_table("FLEX_COUNTER_TABLE") + assert status == table["ACL"]["FLEX_COUNTER_STATUS"] + + def test_update_acl_interval(self): + runner = CliRunner() + db = Db() + test_interval = "20000" + + result = runner.invoke(counterpoll.cli.commands["acl"].commands["interval"], [test_interval], obj=db.cfgdb) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + table = db.cfgdb.get_table("FLEX_COUNTER_TABLE") + assert test_interval == table["ACL"]["POLL_INTERVAL"] + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/mock_tables/asic0/counters_db.json b/tests/mock_tables/asic0/counters_db.json index 44b78d823b..05b956ffa6 100644 --- a/tests/mock_tables/asic0/counters_db.json +++ b/tests/mock_tables/asic0/counters_db.json @@ -1419,49 +1419,62 @@ "oid:0x600000000063d": "SAI_ROUTER_INTERFACE_TYPE_PORT", "oid:0x600000000065f": "SAI_ROUTER_INTERFACE_TYPE_PORT" }, - "COUNTERS:DATAACL:DEFAULT_RULE": { - "Bytes": "1", - "Packets": "2" - }, - "COUNTERS:DATAACL:RULE_1": { - "Bytes": "100", - "Packets": "101" - }, - "COUNTERS:DATAACL:RULE_2": { - "Bytes": "200", - "Packets": "201" - }, - "COUNTERS:DATAACL:RULE_3": { - "Bytes": "300", - "Packets": "301" - }, - "COUNTERS:DATAACL:RULE_4": { - "Bytes": "400", - "Packets": "401" - }, - "COUNTERS:DATAACL:RULE_05": { - "Bytes": "0", - "Packets": "0" - }, - "COUNTERS:EVERFLOW:RULE_6": { - "Bytes": "600", - "Packets": "601" - }, - "COUNTERS:DATAACL:RULE_7":{ - "Bytes": "700", - "Packets": "701" - }, - "COUNTERS:EVERFLOW:RULE_08": { - "Bytes": "0", - "Packets": "0" - }, - "COUNTERS:DATAACL:RULE_9": { - "Bytes": "900", - "Packets": "901" - }, - "COUNTERS:DATAACL:RULE_10": { - "Bytes": "1000", - "Packets": "1001" + "COUNTERS:oid:0x9000000000000": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "2", + "SAI_ACL_COUNTER_ATTR_BYTES": "1" + }, + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "101", + "SAI_ACL_COUNTER_ATTR_BYTES": "100" + }, + "COUNTERS:oid:0x9000000000002": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "201", + "SAI_ACL_COUNTER_ATTR_BYTES": "200" + }, + "COUNTERS:oid:0x9000000000003": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "301", + "SAI_ACL_COUNTER_ATTR_BYTES": "300" + }, + "COUNTERS:oid:0x9000000000004": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "401", + "SAI_ACL_COUNTER_ATTR_BYTES": "400" + }, + "COUNTERS:oid:0x9000000000005": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "0", + "SAI_ACL_COUNTER_ATTR_BYTES": "0" + }, + "COUNTERS:oid:0x9000000000006": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "601", + "SAI_ACL_COUNTER_ATTR_BYTES": "600" + }, + "COUNTERS:oid:0x9000000000007":{ + "SAI_ACL_COUNTER_ATTR_PACKETS": "701", + "SAI_ACL_COUNTER_ATTR_BYTES": "700" + }, + "COUNTERS:oid:0x9000000000008": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "0", + "SAI_ACL_COUNTER_ATTR_BYTES": "0" + }, + "COUNTERS:oid:0x9000000000009": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "901", + "SAI_ACL_COUNTER_ATTR_BYTES": "900" + }, + "COUNTERS:oid:0x900000000000a": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "1001", + "SAI_ACL_COUNTER_ATTR_BYTES": "1000" + }, + "ACL_COUNTER_RULE_MAP": { + "DATAACL:DEFAULT_RULE": "oid:0x9000000000000", + "DATAACL:RULE_1": "oid:0x9000000000001", + "DATAACL:RULE_2": "oid:0x9000000000002", + "DATAACL:RULE_3": "oid:0x9000000000003", + "DATAACL:RULE_4": "oid:0x9000000000004", + "DATAACL:RULE_05": "oid:0x9000000000005", + "EVERFLOW:RULE_6": "oid:0x9000000000006", + "DATAACL:RULE_7": "oid:0x9000000000007", + "EVERFLOW:RULE_08": "oid:0x9000000000008", + "DATAACL:RULE_9": "oid:0x9000000000009", + "DATAACL:RULE_10": "oid:0x900000000000a" }, "COUNTERS:oid:0x1000000000002": { "SAI_PORT_STAT_IF_IN_UCAST_PKTS": "8", diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index 606ebd3fe8..26e2ac033f 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -75,49 +75,62 @@ "oid:0x600000000063d": "SAI_ROUTER_INTERFACE_TYPE_PORT", "oid:0x600000000065f": "SAI_ROUTER_INTERFACE_TYPE_PORT" }, - "COUNTERS:DATAACL:DEFAULT_RULE": { - "Bytes": "1", - "Packets": "2" - }, - "COUNTERS:DATAACL:RULE_1": { - "Bytes": "100", - "Packets": "101" - }, - "COUNTERS:DATAACL:RULE_2": { - "Bytes": "200", - "Packets": "201" - }, - "COUNTERS:DATAACL:RULE_3": { - "Bytes": "300", - "Packets": "301" - }, - "COUNTERS:DATAACL:RULE_4": { - "Bytes": "400", - "Packets": "401" - }, - "COUNTERS:DATAACL:RULE_05": { - "Bytes": "0", - "Packets": "0" - }, - "COUNTERS:EVERFLOW:RULE_6": { - "Bytes": "600", - "Packets": "601" - }, - "COUNTERS:DATAACL:RULE_7":{ - "Bytes": "700", - "Packets": "701" - }, - "COUNTERS:EVERFLOW:RULE_08": { - "Bytes": "0", - "Packets": "0" - }, - "COUNTERS:DATAACL:RULE_9": { - "Bytes": "900", - "Packets": "901" - }, - "COUNTERS:DATAACL:RULE_10": { - "Bytes": "1000", - "Packets": "1001" + "COUNTERS:oid:0x9000000000000": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "2", + "SAI_ACL_COUNTER_ATTR_BYTES": "1" + }, + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "101", + "SAI_ACL_COUNTER_ATTR_BYTES": "100" + }, + "COUNTERS:oid:0x9000000000002": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "201", + "SAI_ACL_COUNTER_ATTR_BYTES": "200" + }, + "COUNTERS:oid:0x9000000000003": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "301", + "SAI_ACL_COUNTER_ATTR_BYTES": "300" + }, + "COUNTERS:oid:0x9000000000004": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "401", + "SAI_ACL_COUNTER_ATTR_BYTES": "400" + }, + "COUNTERS:oid:0x9000000000005": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "0", + "SAI_ACL_COUNTER_ATTR_BYTES": "0" + }, + "COUNTERS:oid:0x9000000000006": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "601", + "SAI_ACL_COUNTER_ATTR_BYTES": "600" + }, + "COUNTERS:oid:0x9000000000007":{ + "SAI_ACL_COUNTER_ATTR_PACKETS": "701", + "SAI_ACL_COUNTER_ATTR_BYTES": "700" + }, + "COUNTERS:oid:0x9000000000008": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "0", + "SAI_ACL_COUNTER_ATTR_BYTES": "0" + }, + "COUNTERS:oid:0x9000000000009": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "901", + "SAI_ACL_COUNTER_ATTR_BYTES": "900" + }, + "COUNTERS:oid:0x900000000000a": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "1001", + "SAI_ACL_COUNTER_ATTR_BYTES": "1000" + }, + "ACL_COUNTER_RULE_MAP": { + "DATAACL:DEFAULT_RULE": "oid:0x9000000000000", + "DATAACL:RULE_1": "oid:0x9000000000001", + "DATAACL:RULE_2": "oid:0x9000000000002", + "DATAACL:RULE_3": "oid:0x9000000000003", + "DATAACL:RULE_4": "oid:0x9000000000004", + "DATAACL:RULE_05": "oid:0x9000000000005", + "EVERFLOW:RULE_6": "oid:0x9000000000006", + "DATAACL:RULE_7": "oid:0x9000000000007", + "EVERFLOW:RULE_08": "oid:0x9000000000008", + "DATAACL:RULE_9": "oid:0x9000000000009", + "DATAACL:RULE_10": "oid:0x900000000000a" }, "COUNTERS:oid:0x1000000000b06": { "SAI_PORT_STAT_IF_IN_UCAST_PKTS": "8", diff --git a/tests/mock_tables/asic2/counters_db.json b/tests/mock_tables/asic2/counters_db.json index 09343d784b..66875f8245 100644 --- a/tests/mock_tables/asic2/counters_db.json +++ b/tests/mock_tables/asic2/counters_db.json @@ -1419,49 +1419,62 @@ "oid:0x600000000063d": "SAI_ROUTER_INTERFACE_TYPE_PORT", "oid:0x600000000065f": "SAI_ROUTER_INTERFACE_TYPE_PORT" }, - "COUNTERS:DATAACL:DEFAULT_RULE": { - "Bytes": "1", - "Packets": "2" - }, - "COUNTERS:DATAACL:RULE_1": { - "Bytes": "100", - "Packets": "101" - }, - "COUNTERS:DATAACL:RULE_2": { - "Bytes": "200", - "Packets": "201" - }, - "COUNTERS:DATAACL:RULE_3": { - "Bytes": "300", - "Packets": "301" - }, - "COUNTERS:DATAACL:RULE_4": { - "Bytes": "400", - "Packets": "401" - }, - "COUNTERS:DATAACL:RULE_05": { - "Bytes": "0", - "Packets": "0" - }, - "COUNTERS:EVERFLOW:RULE_6": { - "Bytes": "600", - "Packets": "601" - }, - "COUNTERS:DATAACL:RULE_7":{ - "Bytes": "700", - "Packets": "701" - }, - "COUNTERS:EVERFLOW:RULE_08": { - "Bytes": "0", - "Packets": "0" - }, - "COUNTERS:DATAACL:RULE_9": { - "Bytes": "900", - "Packets": "901" - }, - "COUNTERS:DATAACL:RULE_10": { - "Bytes": "1000", - "Packets": "1001" + "COUNTERS:oid:0x9000000000000": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "2", + "SAI_ACL_COUNTER_ATTR_BYTES": "1" + }, + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "101", + "SAI_ACL_COUNTER_ATTR_BYTES": "100" + }, + "COUNTERS:oid:0x9000000000002": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "201", + "SAI_ACL_COUNTER_ATTR_BYTES": "200" + }, + "COUNTERS:oid:0x9000000000003": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "301", + "SAI_ACL_COUNTER_ATTR_BYTES": "300" + }, + "COUNTERS:oid:0x9000000000004": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "401", + "SAI_ACL_COUNTER_ATTR_BYTES": "400" + }, + "COUNTERS:oid:0x9000000000005": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "0", + "SAI_ACL_COUNTER_ATTR_BYTES": "0" + }, + "COUNTERS:oid:0x9000000000006": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "601", + "SAI_ACL_COUNTER_ATTR_BYTES": "600" + }, + "COUNTERS:oid:0x9000000000007":{ + "SAI_ACL_COUNTER_ATTR_PACKETS": "701", + "SAI_ACL_COUNTER_ATTR_BYTES": "700" + }, + "COUNTERS:oid:0x9000000000008": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "0", + "SAI_ACL_COUNTER_ATTR_BYTES": "0" + }, + "COUNTERS:oid:0x9000000000009": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "901", + "SAI_ACL_COUNTER_ATTR_BYTES": "900" + }, + "COUNTERS:oid:0x900000000000a": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "1001", + "SAI_ACL_COUNTER_ATTR_BYTES": "1000" + }, + "ACL_COUNTER_RULE_MAP": { + "DATAACL:DEFAULT_RULE": "oid:0x9000000000000", + "DATAACL:RULE_1": "oid:0x9000000000001", + "DATAACL:RULE_2": "oid:0x9000000000002", + "DATAACL:RULE_3": "0x9000000000003", + "DATAACL:RULE_4": "0x9000000000004", + "DATAACL:RULE_05": "0x9000000000005", + "EVERFLOW:RULE_6": "0x9000000000006", + "DATAACL:RULE_7": "0x9000000000007", + "EVERFLOW:RULE_08": "0x9000000000008", + "DATAACL:RULE_9": "0x9000000000009", + "DATAACL:RULE_10": "0x900000000000a" }, "COUNTERS:oid:0x1000000000002": { "SAI_PORT_STAT_IF_IN_UCAST_PKTS": "8", diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 6ba9c3aa86..f438d7f207 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -1573,6 +1573,10 @@ "POLL_INTERVAL": "10000", "FLEX_COUNTER_STATUS": "enable" }, + "FLEX_COUNTER_TABLE|ACL": { + "POLL_INTERVAL": "10000", + "FLEX_COUNTER_STATUS": "enable" + }, "PFC_WD|Ethernet0": { "action": "drop", "detection_time": "600", diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index 0ed15d8ca1..603a8c7f8e 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -542,6 +542,63 @@ "oid:0x600000000063d": "SAI_ROUTER_INTERFACE_TYPE_PORT", "oid:0x600000000065f": "SAI_ROUTER_INTERFACE_TYPE_PORT" }, + "COUNTERS:oid:0x9000000000000": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "2", + "SAI_ACL_COUNTER_ATTR_BYTES": "1" + }, + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "101", + "SAI_ACL_COUNTER_ATTR_BYTES": "100" + }, + "COUNTERS:oid:0x9000000000002": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "201", + "SAI_ACL_COUNTER_ATTR_BYTES": "200" + }, + "COUNTERS:oid:0x9000000000003": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "301", + "SAI_ACL_COUNTER_ATTR_BYTES": "300" + }, + "COUNTERS:oid:0x9000000000004": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "401", + "SAI_ACL_COUNTER_ATTR_BYTES": "400" + }, + "COUNTERS:oid:0x9000000000005": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "0", + "SAI_ACL_COUNTER_ATTR_BYTES": "0" + }, + "COUNTERS:oid:0x9000000000006": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "601", + "SAI_ACL_COUNTER_ATTR_BYTES": "600" + }, + "COUNTERS:oid:0x9000000000007":{ + "SAI_ACL_COUNTER_ATTR_PACKETS": "701", + "SAI_ACL_COUNTER_ATTR_BYTES": "700" + }, + "COUNTERS:oid:0x9000000000008": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "0", + "SAI_ACL_COUNTER_ATTR_BYTES": "0" + }, + "COUNTERS:oid:0x9000000000009": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "901", + "SAI_ACL_COUNTER_ATTR_BYTES": "900" + }, + "COUNTERS:oid:0x900000000000a": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "1001", + "SAI_ACL_COUNTER_ATTR_BYTES": "1000" + }, + "ACL_COUNTER_RULE_MAP": { + "DATAACL:DEFAULT_RULE": "oid:0x9000000000000", + "DATAACL:RULE_1": "oid:0x9000000000001", + "DATAACL:RULE_2": "oid:0x9000000000002", + "DATAACL:RULE_3": "oid:0x9000000000003", + "DATAACL:RULE_4": "oid:0x9000000000004", + "DATAACL:RULE_05": "oid:0x9000000000005", + "EVERFLOW:RULE_6": "oid:0x9000000000006", + "DATAACL:RULE_7": "oid:0x9000000000007", + "EVERFLOW:RULE_08": "oid:0x9000000000008", + "DATAACL:RULE_9": "oid:0x9000000000009", + "DATAACL:RULE_10": "oid:0x900000000000a" + }, "COUNTERS_TUNNEL_NAME_MAP": { "vtep1": "oid:0x2a0000000035e" }, diff --git a/tests/pbh_input/counters_db.json b/tests/pbh_input/counters_db.json index 1f764f32db..6e3c6e5dfc 100644 --- a/tests/pbh_input/counters_db.json +++ b/tests/pbh_input/counters_db.json @@ -1,10 +1,14 @@ { - "COUNTERS:pbh_table1:nvgre": { - "Packets": "100", - "Bytes": "200" + "COUNTERS:oid:0x9000000000000": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "100", + "SAI_ACL_COUNTER_ATTR_BYTES": "200" }, - "COUNTERS:pbh_table2:vxlan": { - "Packets": "300", - "Bytes": "400" + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "300", + "SAI_ACL_COUNTER_ATTR_BYTES": "400" + }, + "ACL_COUNTER_RULE_MAP": { + "pbh_table1:nvgre": "oid:0x9000000000000", + "pbh_table2:vxlan": "oid:0x9000000000001" } } diff --git a/tests/pbh_input/counters_db_updated.json b/tests/pbh_input/counters_db_updated.json index c1771ba3ff..fa0caa86ea 100644 --- a/tests/pbh_input/counters_db_updated.json +++ b/tests/pbh_input/counters_db_updated.json @@ -1,10 +1,14 @@ { - "COUNTERS:pbh_table1:nvgre": { - "Packets": "500", - "Bytes": "600" + "COUNTERS:oid:0x9000000000000": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "500", + "SAI_ACL_COUNTER_ATTR_BYTES": "600" }, - "COUNTERS:pbh_table2:vxlan": { - "Packets": "700", - "Bytes": "800" + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "700", + "SAI_ACL_COUNTER_ATTR_BYTES": "800" + }, + "ACL_COUNTER_RULE_MAP": { + "pbh_table1:nvgre": "oid:0x9000000000000", + "pbh_table2:vxlan": "oid:0x9000000000001" } } diff --git a/tests/sonic_package_manager/test_registry.py b/tests/sonic_package_manager/test_registry.py index 0d82499df3..0b6072a693 100644 --- a/tests/sonic_package_manager/test_registry.py +++ b/tests/sonic_package_manager/test_registry.py @@ -1,5 +1,7 @@ #!/usr/bin/env python +import requests +import responses from sonic_package_manager.registry import RegistryResolver @@ -13,3 +15,22 @@ def test_get_registry_for(): assert registry.url == 'https://registry-server:5000' registry = resolver.get_registry_for('registry-server.com/docker') assert registry.url == 'https://registry-server.com' + + +@responses.activate +def test_registry_auth(): + resolver = RegistryResolver() + registry = resolver.get_registry_for('registry-server:5000/docker') + responses.add(responses.GET, registry.url + '/v2/docker/tags/list', + headers={ + 'www-authenticate': 'Bearer realm="https://auth.docker.io/token",scope="repository:library/docker:pull,push"' + }, + status=requests.codes.unauthorized) + responses.add(responses.GET, + 'https://auth.docker.io/token?scope=repository:library/docker:pull,push', + json={'token': 'a', 'expires_in': '100'}, + status=requests.codes.ok) + responses.add(responses.GET, registry.url + '/v2/docker/tags/list', + json={'tags': ['a', 'b']}, + status=requests.codes.ok) + assert registry.tags('registry-server:5000/docker') == ['a', 'b'] diff --git a/tests/techsupport_cleanup_test.py b/tests/techsupport_cleanup_test.py new file mode 100644 index 0000000000..da1e2a772c --- /dev/null +++ b/tests/techsupport_cleanup_test.py @@ -0,0 +1,115 @@ +import os +import sys +import pyfakefs +import unittest +from pyfakefs.fake_filesystem_unittest import Patcher +from swsscommon import swsscommon +from utilities_common.general import load_module_from_source +from utilities_common.db import Db +from .mock_tables import dbconnector + +sys.path.append("scripts") +import techsupport_cleanup as ts_mod + + +def set_auto_ts_cfg(redis_mock, auto_ts_state="disabled", max_ts="0"): + redis_mock.set(ts_mod.CFG_DB, ts_mod.AUTO_TS, ts_mod.CFG_STATE, auto_ts_state) + redis_mock.set(ts_mod.CFG_DB, ts_mod.AUTO_TS, ts_mod.CFG_MAX_TS, max_ts) + + +def set_auto_ts_dump_info(redis_mock, ts_dump, core_dump, timestamp, container_name): + key = ts_mod.TS_MAP + "|" + ts_dump + redis_mock.set(ts_mod.STATE_DB, key, ts_mod.CORE_DUMP, core_dump) + redis_mock.set(ts_mod.STATE_DB, key, ts_mod.TIMESTAMP, timestamp) + redis_mock.set(ts_mod.STATE_DB, key, ts_mod.CONTAINER, container_name) + + +class TestTechsupportCreationEvent(unittest.TestCase): + + def test_no_cleanup_state_disabled(self): + """ + Scenario: TS_CLEANUP is disabled. Check no cleanup is performed, + even though the techsupport limit is already crossed + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, max_ts="5") + with Patcher() as patcher: + patcher.fs.set_disk_usage(1000, path="/var/dump/") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=30) + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=30) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=30) + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz", redis_mock) + current_fs = os.listdir(ts_mod.TS_DIR) + print(current_fs) + assert len(current_fs) == 3 + assert "sonic_dump_random1.tar.gz" in current_fs + assert "sonic_dump_random2.tar.gz" in current_fs + assert "sonic_dump_random3.tar.gz" in current_fs + + def test_no_cleanup_state_enabled(self): + """ + Scenario: TS_CLEANUP is enabled. + Verify no cleanup is performed, as the techsupport limit haven't crossed yet + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_ts_state="enabled", max_ts="10") + with Patcher() as patcher: + patcher.fs.set_disk_usage(1000, path="/var/dump/") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=30) + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=30) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=30) + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz", redis_mock) + current_fs = os.listdir(ts_mod.TS_DIR) + print(current_fs) + assert len(current_fs) == 3 + assert "sonic_dump_random1.tar.gz" in current_fs + assert "sonic_dump_random2.tar.gz" in current_fs + assert "sonic_dump_random3.tar.gz" in current_fs + + def test_dump_cleanup(self): + """ + Scenario: TS_CLEANUP is enabled. techsupport size limit is crosed + Verify Whether is cleanup is performed or not + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_ts_state="enabled", max_ts="5") + with Patcher() as patcher: + patcher.fs.set_disk_usage(1000, path="/var/dump/") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=25) + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=25) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz", redis_mock) + current_fs = os.listdir(ts_mod.TS_DIR) + assert len(current_fs) == 2 + assert "sonic_dump_random1.tar.gz" not in current_fs + assert "sonic_dump_random2.tar.gz" in current_fs + assert "sonic_dump_random3.tar.gz" in current_fs + + def test_state_db_update(self): + """ + Scenario: TS_CLEANUP is enabled. techsupport size limit is crosed + Verify Whether is cleanup is performed and the state_db is updated + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_ts_state="enabled", max_ts="5") + set_auto_ts_dump_info(redis_mock, "sonic_dump_random1", "orchagent", "1575985", "orchagent") + set_auto_ts_dump_info(redis_mock, "sonic_dump_random2", "syncd", "1575988", "syncd") + with Patcher() as patcher: + patcher.fs.set_disk_usage(1000, path="/var/dump/") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=25) + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=25) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz", redis_mock) + current_fs = os.listdir(ts_mod.TS_DIR) + print(current_fs) + assert len(current_fs) == 2 + assert "sonic_dump_random1.tar.gz" not in current_fs + assert "sonic_dump_random2.tar.gz" in current_fs + assert "sonic_dump_random3.tar.gz" in current_fs + final_state = redis_mock.keys(ts_mod.STATE_DB, ts_mod.TS_MAP + "*") + assert ts_mod.TS_MAP + "|sonic_dump_random2" in final_state + assert ts_mod.TS_MAP + "|sonic_dump_random1" not in final_state diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py new file mode 100644 index 0000000000..b94b782897 --- /dev/null +++ b/utilities_common/auto_techsupport_helper.py @@ -0,0 +1,184 @@ +import os +import glob +import time +import subprocess +import shutil +import math +import syslog +from os.path import basename, splitext + +__all__ = [ # Contants + "CORE_DUMP_DIR", "CORE_DUMP_PTRN", "TS_DIR", "TS_PTRN", + "CFG_DB", "AUTO_TS", "CFG_STATE", "CFG_MAX_TS", "COOLOFF", + "CFG_CORE_USAGE", "CFG_SINCE", "FEATURE", "STATE_DB", + "TS_MAP", "CORE_DUMP", "TIMESTAMP", "CONTAINER", + "TIME_BUF", "SINCE_DEFAULT" + ] + [ # Methods + "verify_recent_file_creation", + "get_ts_dumps", + "strip_ts_ext", + "get_stats", + "pretty_size", + "cleanup_process", + "subprocess_exec", + "trim_masic_suffix" + ] + + +# MISC +CORE_DUMP_DIR = "/var/core" +CORE_DUMP_PTRN = "*.core.gz" + +TS_DIR = "/var/dump" +TS_PTRN = "sonic_dump_*.tar*" + +# CONFIG DB Attributes +CFG_DB = "CONFIG_DB" + +# AUTO_TECHSUPPORT|GLOBAL table attributes +AUTO_TS = "AUTO_TECHSUPPORT|GLOBAL" +CFG_STATE = "state" +CFG_MAX_TS = "max_techsupport_limit" +COOLOFF = "rate_limit_interval" +CFG_CORE_USAGE = "max_core_limit" +CFG_SINCE = "since" + +# AUTO_TECHSUPPORT_FEATURE Table +FEATURE = "AUTO_TECHSUPPORT_FEATURE|{}" + +# State DB Attributes +STATE_DB = "STATE_DB" + +# AUTO_TECHSUPPORT_DUMP_INFO table info +TS_MAP = "AUTO_TECHSUPPORT_DUMP_INFO" +CORE_DUMP = "core_dump" +TIMESTAMP = "timestamp" +CONTAINER = "container_name" + +TIME_BUF = 20 +SINCE_DEFAULT = "2 days ago" + + +# Helper methods +def subprocess_exec(cmd, env=None): + output = subprocess.run( + cmd, + capture_output=True, + text=True, + env=env + ) + return output.returncode, output.stdout, output.stderr + + +def strip_ts_ext(ts_path): + """ Return the basename and strip the techsupport dump of any extensions """ + base_name = basename(ts_path) + name, _ = splitext(splitext(base_name)[0]) # *.tar.gz + return name + + +def get_ts_dumps(full_path=False): + """ Get the list of TS dumps in the TS_DIR, sorted by the creation time """ + curr_list = glob.glob(os.path.join(TS_DIR, TS_PTRN)) + curr_list.sort(key=os.path.getmtime) + if full_path: + return curr_list + return [os.path.basename(name) for name in curr_list] + + +def verify_recent_file_creation(file_path, in_last_sec=TIME_BUF): + """ Verify if the file exists and is created within the last TIME_BUF sec """ + curr = time.time() + try: + was_created_on = os.path.getmtime(file_path) + except Exception: + return False + if curr - was_created_on < in_last_sec: + return True + else: + return False + + +def get_stats(ptrn, collect_stats=True): + """ + Returns the size of the files (matched by the ptrn) occupied. + Also returns the list of files Sorted by the Descending order of creation time & size + """ + files = glob.glob(ptrn) + file_stats = [] + total_size = 0 + for file in files: + file_size = os.path.getsize(file) + if collect_stats: + file_stats.append((os.path.getmtime(file), file_size, file)) + total_size += file_size + if collect_stats: + # Sort by the Descending order of file_creation_time, size_of_file + file_stats = sorted(file_stats, key=lambda sub: (-sub[0], sub[1], sub[2])) + return (file_stats, total_size) + + +def pretty_size(bytes): + """Get human-readable file sizes""" + UNITS_MAPPING = [ + (1 << 50, ' PB'), + (1 << 40, ' TB'), + (1 << 30, ' GB'), + (1 << 20, ' MB'), + (1 << 10, ' KB'), + (1, (' byte', ' bytes')), + ] + for factor, suffix in UNITS_MAPPING: + if bytes >= factor: + break + amount = int(bytes / factor) + + if isinstance(suffix, tuple): + singular, multiple = suffix + if amount == 1: + suffix = singular + else: + suffix = multiple + return str(amount) + suffix + + +def cleanup_process(limit, file_ptrn, dir): + """Deletes the oldest files incrementally until the size is under limit""" + if not(0 < limit and limit < 100): + syslog.syslog(syslog.LOG_ERR, "core_usage_limit can only be between 1 and 100, whereas the configured value is: {}".format(limit)) + return + + fs_stats, curr_size = get_stats(os.path.join(dir, file_ptrn)) + disk_stats = shutil.disk_usage(dir) + max_limit_bytes = math.floor((limit * disk_stats.total / 100)) + + if curr_size <= max_limit_bytes: + return + + num_bytes_to_del = curr_size - max_limit_bytes + num_deleted = 0 + removed_files = [] + # Preserve the latest file created + while num_deleted < num_bytes_to_del and len(fs_stats) > 1: + stat = fs_stats.pop() + try: + os.remove(stat[2]) + removed_files.append(stat[2]) + except OSError as error: + continue + num_deleted += stat[1] + syslog.syslog(syslog.LOG_INFO, "{} deleted from {}".format(pretty_size(num_deleted), dir)) + return removed_files + + +def trim_masic_suffix(container_name): + """ Trim any masic suffix i.e swss0 -> swss """ + arr = list(container_name) + index = len(arr) - 1 + while index >= 0: + if arr[-1].isdigit(): + arr.pop() + else: + break + index = index - 1 + return "".join(arr) diff --git a/utilities_common/util_base.py b/utilities_common/util_base.py index ff5570735c..98fc230629 100644 --- a/utilities_common/util_base.py +++ b/utilities_common/util_base.py @@ -24,6 +24,7 @@ def iter_namespace(ns_pkg): for _, module_name, ispkg in iter_namespace(plugins_namespace): if ispkg: + yield from self.load_plugins(importlib.import_module(module_name)) continue log.log_debug('importing plugin: {}'.format(module_name)) try: @@ -82,3 +83,9 @@ def check_pddf_mode(self): return True else: return False + + def load_and_register_plugins(self, plugins, cli): + """ Load plugins and register them """ + + for plugin in self.load_plugins(plugins): + self.register_plugin(plugin, cli) \ No newline at end of file