diff --git a/acl_loader/main.py b/acl_loader/main.py index c50efec032..2eab089c21 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -72,6 +72,10 @@ class AclLoader(object): ACL_TABLE = "ACL_TABLE" ACL_RULE = "ACL_RULE" + CFG_ACL_TABLE = "ACL_TABLE" + STATE_ACL_TABLE = "ACL_TABLE_TABLE" + CFG_ACL_RULE = "ACL_RULE" + STATE_ACL_RULE = "ACL_RULE_TABLE" ACL_TABLE_TYPE_MIRROR = "MIRROR" ACL_TABLE_TYPE_CTRLPLANE = "CTRLPLANE" CFG_MIRROR_SESSION_TABLE = "MIRROR_SESSION" @@ -117,11 +121,16 @@ def __init__(self): self.tables_db_info = {} self.rules_db_info = {} self.rules_info = {} + self.tables_state_info = None + self.rules_state_info = None # Load database config files load_db_config() self.sessions_db_info = {} + self.acl_table_status = {} + self.acl_rule_status = {} + self.configdb = ConfigDBConnector() self.configdb.connect() self.statedb = SonicV2Connector(host="127.0.0.1") @@ -156,6 +165,8 @@ def __init__(self): self.read_rules_info() self.read_sessions_info() self.read_policers_info() + self.acl_table_status = self.read_acl_object_status_info(self.CFG_ACL_TABLE, self.STATE_ACL_TABLE) + self.acl_rule_status = self.read_acl_object_status_info(self.CFG_ACL_RULE, self.STATE_ACL_RULE) def read_tables_info(self): """ @@ -210,7 +221,7 @@ def read_sessions_info(self): for key in self.sessions_db_info: if self.per_npu_statedb: # For multi-npu platforms we will read from all front asic name space - # statedb as the monitor port will be differnt for each asic + # statedb as the monitor port will be different for each asic # and it's status also might be different (ideally should not happen) # We will store them as dict of 'asic' : value self.sessions_db_info[key]["status"] = {} @@ -224,6 +235,35 @@ def read_sessions_info(self): self.sessions_db_info[key]["status"] = state_db_info.get("status", "inactive") if state_db_info else "error" self.sessions_db_info[key]["monitor_port"] = state_db_info.get("monitor_port", "") if state_db_info else "" + def read_acl_object_status_info(self, cfg_db_table_name, state_db_table_name): + """ + Read ACL_TABLE status or ACL_RULE status from STATE_DB + """ + if self.per_npu_configdb: + namespace_configdb = list(self.per_npu_configdb.values())[0] + keys = namespace_configdb.get_table(cfg_db_table_name).keys() + else: + keys = self.configdb.get_table(cfg_db_table_name).keys() + + status = {} + for key in keys: + # For ACL_RULE, the key is (acl_table_name, acl_rule_name) + if isinstance(key, tuple): + state_db_key = key[0] + "|" + key[1] + else: + state_db_key = key + status[key] = {} + if self.per_npu_statedb: + status[key]['status'] = {} + for namespace_key, namespace_statedb in self.per_npu_statedb.items(): + state_db_info = namespace_statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(state_db_table_name, state_db_key)) + status[key]['status'][namespace_key] = state_db_info.get("status", "N/A") if state_db_info else "N/A" + else: + state_db_info = self.statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(state_db_table_name, state_db_key)) + status[key]['status'] = state_db_info.get("status", "N/A") if state_db_info else "N/A" + + return status + def get_sessions_db_info(self): return self.sessions_db_info @@ -786,32 +826,36 @@ def show_table(self, table_name): :param table_name: Optional. ACL table name. Filter tables by specified name. :return: """ - header = ("Name", "Type", "Binding", "Description", "Stage") + header = ("Name", "Type", "Binding", "Description", "Stage", "Status") data = [] for key, val in self.get_tables_db_info().items(): if table_name and key != table_name: continue - + stage = val.get("stage", Stage.INGRESS).lower() - + # Get ACL table status from STATE_DB + if key in self.acl_table_status: + status = self.acl_table_status[key]['status'] + else: + status = 'N/A' if val["type"] == AclLoader.ACL_TABLE_TYPE_CTRLPLANE: services = natsorted(val["services"]) - data.append([key, val["type"], services[0], val["policy_desc"], stage]) + data.append([key, val["type"], services[0], val["policy_desc"], stage, status]) if len(services) > 1: for service in services[1:]: - data.append(["", "", service, "", ""]) + data.append(["", "", service, "", "", ""]) else: if not val["ports"]: - data.append([key, val["type"], "", val["policy_desc"], stage]) + data.append([key, val["type"], "", val["policy_desc"], stage, status]) else: ports = natsorted(val["ports"]) - data.append([key, val["type"], ports[0], val["policy_desc"], stage]) + data.append([key, val["type"], ports[0], val["policy_desc"], stage, status]) if len(ports) > 1: for port in ports[1:]: - data.append(["", "", port, "", ""]) + data.append(["", "", port, "", "", ""]) print(tabulate.tabulate(data, headers=header, tablefmt="simple", missingval="")) @@ -873,7 +917,7 @@ def show_rule(self, table_name, rule_id): :param rule_id: Optional. ACL rule name. Filter rule by specified rule name. :return: """ - header = ("Table", "Rule", "Priority", "Action", "Match") + header = ("Table", "Rule", "Priority", "Action", "Match", "Status") def pop_priority(val): priority = "N/A" @@ -919,11 +963,16 @@ def pop_matches(val): priority = pop_priority(val) action = pop_action(val) matches = pop_matches(val) - - rule_data = [[tname, rid, priority, action, matches[0]]] + # Get ACL rule status from STATE_DB + status_key = (tname, rid) + if status_key in self.acl_rule_status: + status = self.acl_rule_status[status_key]['status'] + else: + status = "N/A" + rule_data = [[tname, rid, priority, action, matches[0], status]] if len(matches) > 1: for m in matches[1:]: - rule_data.append(["", "", "", "", m]) + rule_data.append(["", "", "", "", m, ""]) raw_data.append([priority, rule_data]) diff --git a/config/main.py b/config/main.py index f2576ac74d..384e6f9f68 100644 --- a/config/main.py +++ b/config/main.py @@ -2354,25 +2354,35 @@ def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer session_info['gre_type'] = gre_type session_info = gather_session_info(session_info, policer, queue, src_port, direction) + ctx = click.get_current_context() """ For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() if not namespaces['front_ns']: - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: - return - config_db.set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: + return + try: + config_db.set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) + else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: - per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces] = ValidatedConfigDBConnector(ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)) per_npu_configdb[front_asic_namespaces].connect() - if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False: - return - per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False: + return + try: + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) @mirror_session.group(cls=clicommon.AbbreviationGroup, name='span') @click.pass_context @@ -2404,25 +2414,34 @@ def add_span(session_name, dst_port, src_port, direction, queue, policer): } session_info = gather_session_info(session_info, policer, queue, src_port, direction) + ctx = click.get_current_context() """ For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() if not namespaces['front_ns']: - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False: - return - config_db.set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False: + return + try: + config_db.set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: - per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces] = ValidatedConfigDBConnector(ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)) per_npu_configdb[front_asic_namespaces].connect() - if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False: - return - per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + if ADHOC_VALIDATION: + if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False: + return + try: + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + except ValueError as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) @mirror_session.command() @@ -2434,16 +2453,23 @@ def remove(session_name): For multi-npu platforms we need to program all front asic namespaces """ namespaces = multi_asic.get_all_namespaces() + ctx = click.get_current_context() if not namespaces['front_ns']: - config_db = ConfigDBConnector() + config_db = ValidatedConfigDBConnector(ConfigDBConnector()) config_db.connect() - config_db.set_entry("MIRROR_SESSION", session_name, None) + try: + config_db.set_entry("MIRROR_SESSION", session_name, None) + except JsonPatchConflict as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: - per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces] = ValidatedConfigDBConnector(ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)) per_npu_configdb[front_asic_namespaces].connect() - per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None) + try: + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None) + except JsonPatchConflict as e: + ctx.fail("Invalid ConfigDB. Error: {}".format(e)) # # 'pfcwd' group ('config pfcwd ...') diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index f5a365d59f..d0818172f8 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -9,7 +9,7 @@ from .gu_common import genericUpdaterLogging SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -UPDATER_CONF_FILE = f"{SCRIPT_DIR}/generic_config_updater.conf.json" +UPDATER_CONF_FILE = f"{SCRIPT_DIR}/gcu_services_validator.conf.json" logger = genericUpdaterLogging.get_logger(title="Change Applier") print_to_console = False diff --git a/generic_config_updater/field_operation_validators.py b/generic_config_updater/field_operation_validators.py new file mode 100644 index 0000000000..befd4b8749 --- /dev/null +++ b/generic_config_updater/field_operation_validators.py @@ -0,0 +1,26 @@ +from sonic_py_common import device_info +import re + +def rdma_config_update_validator(): + version_info = device_info.get_sonic_version_info() + build_version = version_info.get('build_version') + asic_type = version_info.get('asic_type') + + if (asic_type != 'mellanox' and asic_type != 'broadcom' and asic_type != 'cisco-8000'): + return False + + version_substrings = build_version.split('.') + branch_version = None + + for substring in version_substrings: + if substring.isdigit() and re.match(r'^\d{8}$', substring): + branch_version = substring + break + + if branch_version is None: + return False + + if asic_type == 'cisco-8000': + return branch_version >= "20201200" + else: + return branch_version >= "20181100" diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json new file mode 100644 index 0000000000..f12a14d8eb --- /dev/null +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -0,0 +1,20 @@ +{ + "README": [ + "field_operation_validators provides, module & method name as ", + " .", + "NOTE: module name could have '.'", + " ", + "The last element separated by '.' is considered as ", + "method name", + "", + "e.g. 'show.acl.test_acl'", + "", + "field_operation_validators for a given table defines a list of validators that all must pass for modification to the specified field and table to be allowed", + "" + ], + "tables": { + "PFC_WD": { + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ] + } + } +} diff --git a/generic_config_updater/generic_config_updater.conf.json b/generic_config_updater/gcu_services_validator.conf.json similarity index 91% rename from generic_config_updater/generic_config_updater.conf.json rename to generic_config_updater/gcu_services_validator.conf.json index 907b5a6863..852b587286 100644 --- a/generic_config_updater/generic_config_updater.conf.json +++ b/generic_config_updater/gcu_services_validator.conf.json @@ -48,6 +48,9 @@ }, "NTP_SERVER": { "services_to_validate": [ "ntp-service" ] + }, + "VLAN_INTERFACE": { + "services_to_validate": [ "vlanintf-service" ] } }, "services": { @@ -71,6 +74,9 @@ }, "ntp-service": { "validate_commands": [ "generic_config_updater.services_validator.ntp_validator" ] + }, + "vlanintf-service": { + "validate_commands": [ "generic_config_updater.services_validator.vlanintf_validator" ] } } } diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 0d7a5281bb..e8c66fcbbe 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -1,5 +1,6 @@ import json import jsonpatch +import importlib from jsonpointer import JsonPointer import sonic_yang import sonic_yang_ext @@ -7,11 +8,14 @@ import yang as ly import copy import re +import os from sonic_py_common import logger from enum import Enum YANG_DIR = "/usr/local/yang-models" SYSLOG_IDENTIFIER = "GenericConfigUpdater" +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +GCU_FIELD_OP_CONF_FILE = f"{SCRIPT_DIR}/gcu_field_operation_validators.conf.json" class GenericConfigUpdaterError(Exception): pass @@ -162,6 +166,38 @@ def validate_field_operation(self, old_config, target_config): if any(op['op'] == operation and field == op['path'] for op in patch): raise IllegalPatchOperationError("Given patch operation is invalid. Operation: {} is illegal on field: {}".format(operation, field)) + def _invoke_validating_function(cmd): + # cmd is in the format as . + method_name = cmd.split(".")[-1] + module_name = ".".join(cmd.split(".")[0:-1]) + if module_name != "generic_config_updater.field_operation_validators" or "validator" not in method_name: + raise GenericConfigUpdaterError("Attempting to call invalid method {} in module {}. Module must be generic_config_updater.field_operation_validators, and method must be a defined validator".format(method_name, module_name)) + module = importlib.import_module(module_name, package=None) + method_to_call = getattr(module, method_name) + return method_to_call() + + if os.path.exists(GCU_FIELD_OP_CONF_FILE): + with open(GCU_FIELD_OP_CONF_FILE, "r") as s: + gcu_field_operation_conf = json.load(s) + else: + raise GenericConfigUpdaterError("GCU field operation validators config file not found") + + for element in patch: + path = element["path"] + match = re.search(r'\/([^\/]+)(\/|$)', path) # This matches the table name in the path, eg if path if /PFC_WD/GLOBAL, the match would be PFC_WD + if match is not None: + table = match.group(1) + else: + raise GenericConfigUpdaterError("Invalid jsonpatch path: {}".format(path)) + validating_functions= set() + tables = gcu_field_operation_conf["tables"] + validating_functions.update(tables.get(table, {}).get("field_operation_validators", [])) + + for function in validating_functions: + if not _invoke_validating_function(function): + raise IllegalPatchOperationError("Modification of {} table is illegal- validating function {} returned False".format(table, function)) + + def validate_lanes(self, config_db): if "PORT" not in config_db: return True, None diff --git a/generic_config_updater/services_validator.py b/generic_config_updater/services_validator.py index 44a9e095eb..5d8c1f0d51 100644 --- a/generic_config_updater/services_validator.py +++ b/generic_config_updater/services_validator.py @@ -101,3 +101,24 @@ def caclmgrd_validator(old_config, upd_config, keys): def ntp_validator(old_config, upd_config, keys): return _service_restart("ntp-config") + +def vlanintf_validator(old_config, upd_config, keys): + old_vlan_intf = old_config.get("VLAN_INTERFACE", {}) + upd_vlan_intf = upd_config.get("VLAN_INTERFACE", {}) + + # Get the tuple with format (iface, iface_ip) then check deleted tuple + # Example: + # old_keys = [("Vlan1000", "192.168.0.1")] + # upd_keys = [("Vlan1000", "192.168.0.2")] + old_keys = [ tuple(key.split("|")) + for key in old_vlan_intf if len(key.split("|")) == 2 ] + upd_keys = [ tuple(key.split("|")) + for key in upd_vlan_intf if len(key.split("|")) == 2 ] + + deleted_keys = list(set(old_keys) - set(upd_keys)) + for key in deleted_keys: + iface, iface_ip = key + rc = os.system(f"ip neigh flush dev {iface} {iface_ip}") + if not rc: + return False + return True diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 5e7cc34bc9..426c7b2727 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -23,6 +23,7 @@ PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) PLATFORM_PLUGIN="${REBOOT_TYPE}_plugin" LOG_SSD_HEALTH="/usr/local/bin/log_ssd_health" PLATFORM_FWUTIL_AU_REBOOT_HANDLE="platform_fw_au_reboot_handle" +PLATFORM_REBOOT_PRE_CHECK="platform_reboot_pre_check" SSD_FW_UPDATE="ssd-fw-upgrade" SSD_FW_UPDATE_BOOT_OPTION=no TAG_LATEST=yes @@ -179,6 +180,10 @@ function initialize_pre_shutdown() function request_pre_shutdown() { + if [ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_REBOOT_PRE_CHECK} ]; then + debug "Requesting platform reboot pre-check ..." + ${DEVPATH}/${PLATFORM}/${PLATFORM_REBOOT_PRE_CHECK} ${REBOOT_TYPE} + fi debug "Requesting pre-shutdown ..." STATE=$(timeout 5s docker exec syncd /usr/bin/syncd_request_shutdown --pre &> /dev/null; if [[ $? == 124 ]]; then echo "timed out"; fi) if [[ x"${STATE}" == x"timed out" ]]; then @@ -614,11 +619,14 @@ if is_secureboot && grep -q aboot_machine= /host/machine.conf; then load_aboot_secureboot_kernel else # check if secure boot is enable in UEFI - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") - if [ ${SECURE_UPGRADE_ENABLED} -eq 1 ]; then - load_kernel_secure - else + CHECK_SECURE_UPGRADE_ENABLED=0 + SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? + if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then + debug "Loading kernel without secure boot" load_kernel + else + debug "Loading kernel with secure boot" + load_kernel_secure fi fi @@ -806,6 +814,17 @@ fi # Reboot: explicitly call Linux native reboot under sbin debug "Rebooting with ${REBOOT_METHOD} to ${NEXT_SONIC_IMAGE} ..." + +LOGS_ON_TMPFS=0 +df --output=fstype /var/log* | grep -c 'tmpfs' || LOGS_ON_TMPFS=$? +if [[ LOGS_ON_TMPFS -eq 0 ]]; then + debug "Backup shutdown logs to /host/logs_before_reboot" + mkdir -p /host/logs_before_reboot || /bin/true + # maxdepth 2: find files within 2 nested directories (eg. /var/log/ and /var/log/swss/) + # mmin 30: find files written in past 30 minutes + find /var/log -maxdepth 2 -mmin -30 -type f | xargs -I {} cp {} /host/logs_before_reboot/ || /bin/true +fi + exec ${REBOOT_METHOD} # Should never reach here diff --git a/scripts/route_check.py b/scripts/route_check.py index 705025bbd4..c832b2c6ea 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -11,11 +11,11 @@ How: NOTE: The flow from APPL-DB to ASIC-DB takes non zero milliseconds. 1) Initiate subscribe for ASIC-DB updates. - 2) Read APPL-DB & ASIC-DB + 2) Read APPL-DB & ASIC-DB 3) Get the diff. - 4) If any diff, + 4) If any diff, 4.1) Collect subscribe messages for a second - 4.2) check diff against the subscribe messages + 4.2) check diff against the subscribe messages 5) Rule out local interfaces & default routes 6) If still outstanding diffs, report failure. @@ -29,7 +29,7 @@ down to ensure failure. Analyze the reported failures to match expected. You may use the exit code to verify the result as success or not. - + """ @@ -45,6 +45,7 @@ import time import signal import traceback +import subprocess from ipaddress import ip_network from swsscommon import swsscommon @@ -72,6 +73,9 @@ PRINT_MSG_LEN_MAX = 1000 +FRR_CHECK_RETRIES = 3 +FRR_WAIT_TIME = 15 + class Level(Enum): ERR = 'ERR' INFO = 'INFO' @@ -294,7 +298,7 @@ def get_routes(): def get_route_entries(): """ - helper to read present route entries from ASIC-DB and + helper to read present route entries from ASIC-DB and as well initiate selector for ASIC-DB:ASIC-state updates. :return (selector, subscriber, ) """ @@ -310,7 +314,7 @@ def get_route_entries(): res, e = checkout_rt_entry(k) if res: rt.append(e) - + print_message(syslog.LOG_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": sorted(rt)}, indent=4)) selector = swsscommon.Select() @@ -318,6 +322,31 @@ def get_route_entries(): return (selector, subs, sorted(rt)) +def is_suppress_fib_pending_enabled(): + """ + Returns True if FIB suppression is enabled, False otherwise + """ + cfg_db = swsscommon.ConfigDBConnector() + cfg_db.connect() + + state = cfg_db.get_entry('DEVICE_METADATA', 'localhost').get('suppress-fib-pending') + + return state == 'enabled' + + +def get_frr_routes(): + """ + Read routes from zebra through CLI command + :return frr routes dictionary + """ + + output = subprocess.check_output('show ip route json', shell=True) + routes = json.loads(output) + output = subprocess.check_output('show ipv6 route json', shell=True) + routes.update(json.loads(output)) + return routes + + def get_interfaces(): """ helper to read interface table from APPL-DB. @@ -355,7 +384,7 @@ def filter_out_local_interfaces(keys): chassis_local_intfs = chassis.get_chassis_local_interfaces() local_if_lst.update(set(chassis_local_intfs)) - + db = swsscommon.DBConnector(APPL_DB_NAME, 0) tbl = swsscommon.Table(db, 'ROUTE_TABLE') @@ -494,6 +523,61 @@ def filter_out_standalone_tunnel_routes(routes): return updated_routes +def check_frr_pending_routes(): + """ + Check FRR routes for offload flag presence by executing "show ip route json" + Returns a list of routes that have no offload flag. + """ + + missed_rt = [] + + retries = FRR_CHECK_RETRIES + for i in range(retries): + missed_rt = [] + frr_routes = get_frr_routes() + + for _, entries in frr_routes.items(): + for entry in entries: + if entry['protocol'] != 'bgp': + continue + + # TODO: Also handle VRF routes. Currently this script does not check for VRF routes so it would be incorrect for us + # to assume they are installed in ASIC_DB, so we don't handle them. + if entry['vrfName'] != 'default': + continue + + if not entry.get('offloaded', False): + missed_rt.append(entry) + + if not missed_rt: + break + + time.sleep(FRR_WAIT_TIME) + + return missed_rt + + +def mitigate_installed_not_offloaded_frr_routes(missed_frr_rt, rt_appl): + """ + Mitigate installed but not offloaded FRR routes. + + In case route exists in APPL_DB, this function will manually send a notification to fpmsyncd + to trigger the flow that sends offload flag to zebra. + + It is designed to mitigate a problem when orchagent fails to send notification about installed route to fpmsyncd + or fpmsyncd not being able to read the notification or in case zebra fails to receive offload update due to variety of reasons. + All of the above mentioned cases must be considered as a bug, but even in that case we will report an error in the log but + given that this script ensures the route is installed in the hardware it will automitigate such a bug. + """ + db = swsscommon.DBConnector('APPL_STATE_DB', 0) + response_producer = swsscommon.NotificationProducer(db, f'{APPL_DB_NAME}_{swsscommon.APP_ROUTE_TABLE_NAME}_RESPONSE_CHANNEL') + for entry in [entry for entry in missed_frr_rt if entry['prefix'] in rt_appl]: + fvs = swsscommon.FieldValuePairs([('err_str', 'SWSS_RC_SUCCESS'), ('protocol', entry['protocol'])]) + response_producer.send('SWSS_RC_SUCCESS', entry['prefix'], fvs) + + print_message(syslog.LOG_ERR, f'Mitigated route {entry["prefix"]}') + + def get_soc_ips(config_db): mux_table = config_db.get_table('MUX_CABLE') soc_ips = [] @@ -537,7 +621,7 @@ def check_routes(): """ The heart of this script which runs the checks. Read APPL-DB & ASIC-DB, the relevant tables for route checking. - Checkout routes in ASIC-DB to match APPL-DB, discounting local & + Checkout routes in ASIC-DB to match APPL-DB, discounting local & default routes. In case of missed / unexpected entries in ASIC, it might be due to update latency between APPL & ASIC DBs. So collect ASIC-DB subscribe updates for a second, and checkout if you see SET @@ -546,12 +630,16 @@ def check_routes(): If there are still some unjustifiable diffs, between APPL & ASIC DB, related to routes report failure, else all good. + If there are FRR routes that aren't marked offloaded but all APPL & ASIC DB + routes are in sync report failure and perform a mitigation action. + :return (0, None) on sucess, else (-1, results) where results holds the unjustifiable entries. """ intf_appl_miss = [] rt_appl_miss = [] rt_asic_miss = [] + rt_frr_miss = [] results = {} adds = [] @@ -600,11 +688,22 @@ def check_routes(): if rt_asic_miss: results["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss + rt_frr_miss = check_frr_pending_routes() + + if rt_frr_miss: + results["missed_FRR_routes"] = rt_frr_miss + if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") print_message(syslog.LOG_WARNING, "Failed. Look at reported mismatches above") print_message(syslog.LOG_WARNING, "add: ", json.dumps(adds, indent=4)) print_message(syslog.LOG_WARNING, "del: ", json.dumps(deletes, indent=4)) + + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR but all routes in APPL_DB and ASIC_DB are in sync") + if is_suppress_fib_pending_enabled(): + mitigate_installed_not_offloaded_frr_routes(rt_frr_miss, rt_appl) + return -1, results else: print_message(syslog.LOG_INFO, "All good!") @@ -650,7 +749,7 @@ def main(): return ret, res else: return ret, res - + if __name__ == "__main__": diff --git a/setup.py b/setup.py index 70d7473bd7..f071797280 100644 --- a/setup.py +++ b/setup.py @@ -5,9 +5,34 @@ # under scripts/. Consider stop using scripts and use console_scripts instead # # https://stackoverflow.com/questions/18787036/difference-between-entry-points-console-scripts-and-scripts-in-setup-py +from __future__ import print_function +import sys import fastentrypoints from setuptools import setup +import pkg_resources +from packaging import version + +# sonic_dependencies, version requirement only supports '>=' +sonic_dependencies = [ + 'sonic-config-engine', + 'sonic-platform-common', + 'sonic-py-common', + 'sonic-yang-mgmt', +] + +for package in sonic_dependencies: + try: + package_dist = pkg_resources.get_distribution(package.split(">=")[0]) + except pkg_resources.DistributionNotFound: + print(package + " is not found!", file=sys.stderr) + print("Please build and install SONiC python wheels dependencies from sonic-buildimage", file=sys.stderr) + exit(1) + if ">=" in package: + if version.parse(package_dist.version) >= version.parse(package.split(">=")[1]): + continue + print(package + " version not match!", file=sys.stderr) + exit(1) setup( name='sonic-utilities', @@ -64,7 +89,7 @@ 'sonic_cli_gen', ], package_data={ - 'generic_config_updater': ['generic_config_updater.conf.json'], + 'generic_config_updater': ['gcu_services_validator.conf.json', 'gcu_field_operation_validators.conf.json'], 'show': ['aliases.ini'], 'sonic_installer': ['aliases.ini'], 'tests': ['acl_input/*', @@ -211,16 +236,12 @@ 'prettyprinter>=0.18.0', 'pyroute2>=0.5.14, <0.6.1', 'requests>=2.25.0', - 'sonic-config-engine', - 'sonic-platform-common', - 'sonic-py-common', - 'sonic-yang-mgmt', 'tabulate==0.8.2', 'toposort==1.6', 'www-authenticate==0.9.2', 'xmltodict==0.12.0', 'lazy-object-proxy', - ], + ] + sonic_dependencies, setup_requires= [ 'pytest-runner', 'wheel' diff --git a/show/muxcable.py b/show/muxcable.py index 837e362789..5df4bd8c2a 100644 --- a/show/muxcable.py +++ b/show/muxcable.py @@ -20,7 +20,7 @@ REDIS_TIMEOUT_MSECS = 0 SELECT_TIMEOUT = 1000 -HWMODE_MUXDIRECTION_TIMEOUT = 0.1 +HWMODE_MUXDIRECTION_TIMEOUT = 0.5 # The empty namespace refers to linux host namespace. EMPTY_NAMESPACE = '' diff --git a/tests/aclshow_test.py b/tests/aclshow_test.py index 90fe46f683..0abe509aad 100644 --- a/tests/aclshow_test.py +++ b/tests/aclshow_test.py @@ -46,6 +46,7 @@ RULE_9 DATAACL 9991 901 900 RULE_10 DATAACL 9989 1001 1000 DEFAULT_RULE DATAACL 1 2 1 +RULE_1 DATAACL_5 9999 N/A N/A RULE_NO_COUNTER DATAACL_NO_COUNTER 9995 N/A N/A RULE_6 EVERFLOW 9994 601 600 RULE_08 EVERFLOW 9992 0 0 @@ -89,8 +90,8 @@ # Expected output for aclshow -r RULE_4,RULE_6 -vv rule4_rule6_verbose_output = '' + \ """Reading ACL info... -Total number of ACL Tables: 11 -Total number of ACL Rules: 20 +Total number of ACL Tables: 12 +Total number of ACL Rules: 21 RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ----------- ------------ ------ --------------- ------------- @@ -136,6 +137,7 @@ RULE_9 DATAACL 9991 0 0 RULE_10 DATAACL 9989 0 0 DEFAULT_RULE DATAACL 1 0 0 +RULE_1 DATAACL_5 9999 N/A N/A RULE_NO_COUNTER DATAACL_NO_COUNTER 9995 N/A N/A RULE_6 EVERFLOW 9994 0 0 RULE_08 EVERFLOW 9992 0 0 @@ -161,6 +163,7 @@ RULE_9 DATAACL 9991 0 0 RULE_10 DATAACL 9989 0 0 DEFAULT_RULE DATAACL 1 0 0 +RULE_1 DATAACL_5 9999 N/A N/A RULE_NO_COUNTER DATAACL_NO_COUNTER 9995 100 100 RULE_6 EVERFLOW 9994 0 0 RULE_08 EVERFLOW 9992 0 0 diff --git a/tests/config_mirror_session_test.py b/tests/config_mirror_session_test.py index 5585cab87a..ccbc196b50 100644 --- a/tests/config_mirror_session_test.py +++ b/tests/config_mirror_session_test.py @@ -1,7 +1,11 @@ import pytest import config.main as config +import jsonpatch from unittest import mock from click.testing import CliRunner +from mock import patch +from jsonpatch import JsonPatchConflict +from sonic_py_common import multi_asic ERR_MSG_IP_FAILURE = "does not appear to be an IPv4 or IPv6 network" ERR_MSG_IP_VERSION_FAILURE = "not a valid IPv4 address" @@ -172,7 +176,34 @@ def test_mirror_session_erspan_add(): mocked.assert_called_with("test_session", "100.1.1.1", "2.2.2.2", 8, 63, 0, 0, None, None, None) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_erspan_add_invalid_yang_validation(): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["erspan"].commands["add"], + ["test_session", "100.1.1.1", "2.2.2.2", "8", "63", "10", "100"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("config.main.ConfigDBConnector", spec=True, connect=mock.Mock()) +@patch("config.main.multi_asic.get_all_namespaces", mock.Mock(return_value={'front_ns': 'sample_ns'})) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_erspan_add_multi_asic_invalid_yang_validation(mock_db_connector): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["erspan"].commands["add"], + ["test_session", "100.1.1.1", "2.2.2.2", "8", "63", "10", "100"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + def test_mirror_session_span_add(): + config.ADHOC_VALIDATION = True runner = CliRunner() # Verify invalid queue @@ -273,3 +304,54 @@ def test_mirror_session_span_add(): mocked.assert_called_with("test_session", "Ethernet0", "Ethernet4", "rx", 0, None) + +@patch("config.main.ConfigDBConnector", spec=True, connect=mock.Mock()) +@patch("config.main.multi_asic.get_all_namespaces", mock.Mock(return_value={'front_ns': 'sample_ns'})) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_span_add_multi_asic_invalid_yang_validation(mock_db_connector): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet0", "Ethernet4", "rx", "0"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) +def test_mirror_session_span_add_invalid_yang_validation(): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet0", "Ethernet4", "rx", "0"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("config.main.multi_asic.get_all_namespaces", mock.Mock(return_value={'front_ns': 'sample_ns'})) +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.main.ConfigDBConnector", spec=True, connect=mock.Mock()) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) +def test_mirror_session_remove_multi_asic_invalid_yang_validation(mock_db_connector): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["remove"], + ["mrr_sample"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output + + +@patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) +@patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) +def test_mirror_session_remove_invalid_yang_validation(): + config.ADHOC_VALIDATION = False + runner = CliRunner() + result = runner.invoke( + config.config.commands["mirror_session"].commands["remove"], + ["mrr_sample"]) + print(result.output) + assert "Invalid ConfigDB. Error" in result.output diff --git a/tests/config_snmp_test.py b/tests/config_snmp_test.py index 096f21cb80..76f5675690 100644 --- a/tests/config_snmp_test.py +++ b/tests/config_snmp_test.py @@ -118,6 +118,7 @@ def setup_class(cls): # Add snmp community tests def test_config_snmp_community_add_new_community_ro(self): + config.ADHOC_VALIDATION = True db = Db() runner = CliRunner() with mock.patch('utilities_common.cli.run_command') as mock_run_command: diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index 7fa471ee3b..a319a25ead 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -3,8 +3,10 @@ import jsonpatch import sonic_yang import unittest -from unittest.mock import MagicMock, Mock, patch +import mock +from unittest.mock import MagicMock, Mock +from mock import patch from .gutest_helpers import create_side_effect_dict, Files import generic_config_updater.gu_common as gu_common @@ -69,11 +71,25 @@ def setUp(self): self.config_wrapper_mock = gu_common.ConfigWrapper() self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "mellanox", "build_version": "SONiC.20181131"})) def test_validate_field_operation_legal__pfcwd(self): old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "40"}}} config_wrapper = gu_common.ConfigWrapper() config_wrapper.validate_field_operation(old_config, target_config) + + def test_validate_field_operation_illegal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} + target_config = {"PFC_WD": {"GLOBAL": {}}} + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "invalid-asic", "build_version": "SONiC.20181131"})) + def test_validate_field_modification_illegal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} + target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "80"}}} + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) def test_validate_field_operation_legal__rm_loopback1(self): old_config = { @@ -92,13 +108,7 @@ def test_validate_field_operation_legal__rm_loopback1(self): } config_wrapper = gu_common.ConfigWrapper() config_wrapper.validate_field_operation(old_config, target_config) - - def test_validate_field_operation_illegal__pfcwd(self): - old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": 60}}} - target_config = {"PFC_WD": {"GLOBAL": {}}} - config_wrapper = gu_common.ConfigWrapper() - self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) - + def test_validate_field_operation_illegal__rm_loopback0(self): old_config = { "LOOPBACK_INTERFACE": { diff --git a/tests/generic_config_updater/service_validator_test.py b/tests/generic_config_updater/service_validator_test.py index 2f51771d33..f14a3ad7b0 100644 --- a/tests/generic_config_updater/service_validator_test.py +++ b/tests/generic_config_updater/service_validator_test.py @@ -6,7 +6,7 @@ from collections import defaultdict from unittest.mock import patch -from generic_config_updater.services_validator import vlan_validator, rsyslog_validator, caclmgrd_validator +from generic_config_updater.services_validator import vlan_validator, rsyslog_validator, caclmgrd_validator, vlanintf_validator import generic_config_updater.gu_common @@ -152,6 +152,46 @@ def mock_time_sleep_call(sleep_time): { "cmd": "systemctl restart rsyslog", "rc": 1 }, # restart again; fails ] +test_vlanintf_data = [ + { "old": {}, "upd": {}, "cmd": "" }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "cmd": "" + }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.2/21": {} } }, + "cmd": "ip neigh flush dev Vlan1000 192.168.0.1/21" + }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {}, + "Vlan1000|192.168.0.2/21": {} } }, + "cmd": "" + }, + { + "old": { "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {} } }, + "upd": {}, + "cmd": "ip neigh flush dev Vlan1000 192.168.0.1/21" + } + ] + + class TestServiceValidator(unittest.TestCase): @patch("generic_config_updater.change_applier.os.system") @@ -177,6 +217,15 @@ def test_change_apply_os_system(self, mock_os_sys): rc = rsyslog_validator("", "", "") assert not rc, "rsyslog_validator expected to fail" + os_system_calls = [] + os_system_call_index = 0 + for entry in test_vlanintf_data: + if entry["cmd"]: + os_system_calls.append({"cmd": entry["cmd"], "rc": 0 }) + msg = "case failed: {}".format(str(entry)) + + vlanintf_validator(entry["old"], entry["upd"], None) + @patch("generic_config_updater.services_validator.time.sleep") def test_change_apply_time_sleep(self, mock_time_sleep): global time_sleep_calls, time_sleep_call_index diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 66b51f4ccb..de20194a64 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -246,5 +246,16 @@ "holdtime": "10", "asn": "65200", "keepalive": "3" + }, + "ACL_RULE|DATAACL_5|RULE_1": { + "IP_PROTOCOL": "126", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9999" + }, + "ACL_TABLE|DATAACL_5": { + "policy_desc": "DATAACL_5", + "ports@": "Ethernet124", + "type": "L3", + "stage": "ingress" } } diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 2756404971..559af04826 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -286,5 +286,11 @@ "STATUS": "up", "REMOTE_MOD": "0", "REMOTE_PORT": "93" + }, + "ACL_TABLE_TABLE|DATAACL_5" : { + "status": "Active" + }, + "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { + "status": "Active" } } diff --git a/tests/mock_tables/asic2/config_db.json b/tests/mock_tables/asic2/config_db.json index 532d85bcbb..bfda10a0d5 100644 --- a/tests/mock_tables/asic2/config_db.json +++ b/tests/mock_tables/asic2/config_db.json @@ -124,5 +124,16 @@ "state": "disabled", "auto_restart": "disabled", "high_mem_alert": "disabled" + }, + "ACL_RULE|DATAACL_5|RULE_1": { + "IP_PROTOCOL": "126", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9999" + }, + "ACL_TABLE|DATAACL_5": { + "policy_desc": "DATAACL_5", + "ports@": "Ethernet124", + "type": "L3", + "stage": "ingress" } } diff --git a/tests/mock_tables/asic2/state_db.json b/tests/mock_tables/asic2/state_db.json index f6e3eee4cf..c6c8c88898 100644 --- a/tests/mock_tables/asic2/state_db.json +++ b/tests/mock_tables/asic2/state_db.json @@ -207,5 +207,11 @@ "speed_target": "50", "led_status": "green", "timestamp": "20200813 01:32:30" + }, + "ACL_TABLE_TABLE|DATAACL_5" : { + "status": "Active" + }, + "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { + "status": "Active" } } diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 899dada260..3a2b681a6e 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -496,6 +496,11 @@ "PACKET_ACTION": "FORWARD", "PRIORITY": "9995" }, + "ACL_RULE|DATAACL_5|RULE_1": { + "IP_PROTOCOL": "126", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9999" + }, "ACL_TABLE|NULL_ROUTE_V4": { "policy_desc": "DATAACL", "ports@": "PortChannel0002,PortChannel0005,PortChannel0008,PortChannel0011,PortChannel0014,PortChannel0017,PortChannel0020,PortChannel0023", @@ -533,6 +538,12 @@ "type": "L3V6", "stage": "egress" }, + "ACL_TABLE|DATAACL_5": { + "policy_desc": "DATAACL_5", + "ports@": "Ethernet124", + "type": "L3", + "stage": "ingress" + }, "ACL_TABLE|EVERFLOW": { "policy_desc": "EVERFLOW", "ports@": "PortChannel0002,PortChannel0005,PortChannel0008,PortChannel0011,PortChannel0014,PortChannel0017,PortChannel0020,PortChannel0023,Ethernet100,Ethernet104,Ethernet92,Ethernet96,Ethernet84,Ethernet88,Ethernet76,Ethernet80,Ethernet108,Ethernet112,Ethernet64,Ethernet120,Ethernet116,Ethernet124,Ethernet72,Ethernet68", @@ -831,7 +842,8 @@ "mac": "1d:34:db:16:a6:00", "platform": "x86_64-mlnx_msn3800-r0", "peer_switch": "sonic-switch", - "type": "ToRRouter" + "type": "ToRRouter", + "suppress-fib-pending": "enabled" }, "SNMP_COMMUNITY|msft": { "TYPE": "RO" diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 4cdda56bc8..cd1a194ba8 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -1210,5 +1210,11 @@ "STATUS": "up", "REMOTE_MOD": "0", "REMOTE_PORT": "93" + }, + "ACL_TABLE_TABLE|DATAACL_5" : { + "status": "Active" + }, + "ACL_RULE_TABLE|DATAACL_5|RULE_1" : { + "status": "Active" } } diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 85e6a64a95..4d93c74e2d 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -7,7 +7,7 @@ import time from sonic_py_common import device_info from unittest.mock import MagicMock, patch -from tests.route_check_test_data import APPL_DB, ARGS, ASIC_DB, CONFIG_DB, DEFAULT_CONFIG_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, UPD +from tests.route_check_test_data import APPL_DB, ARGS, ASIC_DB, CONFIG_DB, DEFAULT_CONFIG_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, UPD, FRR_ROUTES import pytest @@ -239,6 +239,7 @@ def setup(self): def init(self): route_check.UNIT_TESTING = 1 + route_check.FRR_WAIT_TIME = 0 @pytest.fixture def force_hang(self): @@ -258,7 +259,8 @@ def mock_dbs(self): patch("route_check.swsscommon.Table") as mock_table, \ patch("route_check.swsscommon.Select") as mock_sel, \ patch("route_check.swsscommon.SubscriberStateTable") as mock_subs, \ - patch("route_check.swsscommon.ConfigDBConnector", return_value=mock_config_db): + patch("route_check.swsscommon.ConfigDBConnector", return_value=mock_config_db), \ + patch("route_check.swsscommon.NotificationProducer"): device_info.get_platform = MagicMock(return_value='unittest') set_mock(mock_table, mock_conn, mock_sel, mock_subs, mock_config_db) yield @@ -272,7 +274,21 @@ def test_route_check(self, mock_dbs, test_num): set_test_case_data(ct_data) logger.info("Running test case {}: {}".format(test_num, ct_data[DESCR])) - with patch('sys.argv', ct_data[ARGS].split()): + with patch('sys.argv', ct_data[ARGS].split()), \ + patch('route_check.subprocess.check_output') as mock_check_output: + + check_frr_patch = patch('route_check.check_frr_pending_routes', lambda: []) + + if FRR_ROUTES in ct_data: + routes = ct_data[FRR_ROUTES] + + def side_effect(*args, **kwargs): + return json.dumps(routes) + + mock_check_output.side_effect = side_effect + else: + check_frr_patch.start() + ret, res = route_check.main() expect_ret = ct_data[RET] if RET in ct_data else 0 expect_res = ct_data[RESULT] if RESULT in ct_data else None @@ -283,6 +299,8 @@ def test_route_check(self, mock_dbs, test_num): assert ret == expect_ret assert res == expect_res + check_frr_patch.stop() + def test_timeout(self, mock_dbs, force_hang): # Test timeout ex_raised = False diff --git a/tests/route_check_test_data.py b/tests/route_check_test_data.py index a0b7a97e30..7ed1eee41f 100644 --- a/tests/route_check_test_data.py +++ b/tests/route_check_test_data.py @@ -6,6 +6,7 @@ CONFIG_DB = 4 PRE = "pre-value" UPD = "update" +FRR_ROUTES = "frr-routes" RESULT = "res" OP_SET = "SET" @@ -360,6 +361,107 @@ } } }, + "10": { + DESCR: "basic good one, check FRR routes", + ARGS: "route_check -m INFO -i 1000", + PRE: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + FRR_ROUTES: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + }, + ], + }, + }, + "11": { + DESCR: "failure test case, missing FRR routes", + ARGS: "route_check -m INFO -i 1000", + PRE: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + FRR_ROUTES: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + }, + ], + }, + RESULT: { + "missed_FRR_routes": [ + {"prefix": "10.10.196.12/31", "vrfName": "default", "protocol": "bgp"} + ], + }, + RET: -1, + }, "10": { DESCR: "basic good one with IPv6 address", ARGS: "route_check -m INFO -i 1000", diff --git a/tests/show_acl_test.py b/tests/show_acl_test.py new file mode 100644 index 0000000000..1b2cdc60a9 --- /dev/null +++ b/tests/show_acl_test.py @@ -0,0 +1,95 @@ +import os +import pytest +from click.testing import CliRunner + +import acl_loader.main as acl_loader_show +from acl_loader import * +from acl_loader.main import * +from importlib import reload + +root_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(root_path) +scripts_path = os.path.join(modules_path, "scripts") + + +@pytest.fixture() +def setup_teardown_single_asic(): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + yield + os.environ["UTILITIES_UNIT_TESTING"] = "0" + + +@pytest.fixture(scope="class") +def setup_teardown_multi_asic(): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + from .mock_tables import mock_multi_asic_3_asics + reload(mock_multi_asic_3_asics) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + yield + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + from .mock_tables import mock_single_asic + reload(mock_single_asic) + + +class TestShowACLSingleASIC(object): + def test_show_acl_table(self, setup_teardown_single_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['table'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 L3 Ethernet124 DATAACL_5 ingress Active" + assert result_top == expected_output + + def test_show_acl_rule(self, setup_teardown_single_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['rule'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 RULE_1 9999 FORWARD IP_PROTOCOL: 126 Active" + assert result_top == expected_output + + +class TestShowACLMultiASIC(object): + def test_show_acl_table(self, setup_teardown_multi_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['table'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 L3 Ethernet124 DATAACL_5 ingress {'asic0': 'Active', 'asic2': 'Active'}" + assert result_top == expected_output + + def test_show_acl_rule(self, setup_teardown_multi_asic): + runner = CliRunner() + aclloader = AclLoader() + context = { + "acl_loader": aclloader + } + result = runner.invoke(acl_loader_show.cli.commands['show'].commands['rule'], ['DATAACL_5'], obj=context) + assert result.exit_code == 0 + # We only care about the third line, which contains the 'Active' + result_top = result.output.split('\n')[2] + expected_output = "DATAACL_5 RULE_1 9999 FORWARD IP_PROTOCOL: 126 {'asic0': 'Active', 'asic2': 'Active'}" + assert result_top == expected_output + +