From 0b2536be1b821cc3c34266841739f13cbbea5580 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Mon, 25 Oct 2021 16:57:05 -0700 Subject: [PATCH] Generic_upater: Apply JSON change (#1856) What I did Apply JSON change How I did it Get running config, apply json change and set the updates onto running redis. --- generic_config_updater/change_applier.py | 140 ++++++++ generic_config_updater/generic_updater.py | 5 +- .../generic_updater_config.conf.json | 59 ++++ generic_config_updater/services_validator.py | 17 + .../change_applier_test.py | 270 +++++++++++++++ .../files/change_applier_test.conf.json | 24 ++ .../files/change_applier_test.data.json | 308 ++++++++++++++++++ 7 files changed, 819 insertions(+), 4 deletions(-) create mode 100644 generic_config_updater/change_applier.py create mode 100644 generic_config_updater/generic_updater_config.conf.json create mode 100644 generic_config_updater/services_validator.py create mode 100644 tests/generic_config_updater/change_applier_test.py create mode 100644 tests/generic_config_updater/files/change_applier_test.conf.json create mode 100644 tests/generic_config_updater/files/change_applier_test.data.json diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py new file mode 100644 index 0000000000..b5ed4a637b --- /dev/null +++ b/generic_config_updater/change_applier.py @@ -0,0 +1,140 @@ +import copy +import json +import jsondiff +import importlib +import os +import tempfile +from collections import defaultdict +from swsscommon.swsscommon import ConfigDBConnector +from .gu_common import genericUpdaterLogging + + +UPDATER_CONF_FILE = "/etc/sonic/generic_config_updater.conf" +logger = genericUpdaterLogging.get_logger(title="Change Applier") + +print_to_console = False +print_to_stdout = False + +def set_print_options(to_console=False, to_stdout=False): + global print_to_console, print_to_stdout + + print_to_console = to_console + print_to_stdout = to_stdout + + +def log_debug(m): + logger.log_debug(m, print_to_console) + if print_to_stdout: + print(m) + + +def log_error(m): + logger.log_error(m, print_to_console) + if print_to_stdout: + print(m) + + +def get_config_db(): + config_db = ConfigDBConnector() + config_db.connect() + return config_db + + +def set_config(config_db, tbl, key, data): + config_db.set_entry(tbl, key, data) + + +class ChangeApplier: + + updater_conf = None + + def __init__(self): + self.config_db = get_config_db() + if (not ChangeApplier.updater_conf) and os.path.exists(UPDATER_CONF_FILE): + with open(UPDATER_CONF_FILE, "r") as s: + ChangeApplier.updater_conf = json.load(s) + + + def _invoke_cmd(self, cmd, old_cfg, upd_cfg, keys): + # cmd is in the format as . + # + method_name = cmd.split(".")[-1] + module_name = ".".join(cmd.split(".")[0:-1]) + + module = importlib.import_module(module_name, package=None) + method_to_call = getattr(module, method_name) + + return method_to_call(old_cfg, upd_cfg, keys) + + + def _services_validate(self, old_cfg, upd_cfg, keys): + lst_svcs = set() + lst_cmds = set() + if not keys: + # calling apply with no config would invoke + # default validation, if any + # + keys[""] = {} + + tables = ChangeApplier.updater_conf["tables"] + for tbl in keys: + lst_svcs.update(tables.get(tbl, {}).get("services_to_validate", [])) + + services = ChangeApplier.updater_conf["services"] + for svc in lst_svcs: + lst_cmds.update(services.get(svc, {}).get("validate_commands", [])) + + for cmd in lst_cmds: + ret = self._invoke_cmd(cmd, old_cfg, upd_cfg, keys) + if ret: + log_error("service invoked: {} failed with ret={}".format(cmd, ret)) + return ret + log_debug("service invoked: {}".format(cmd)) + return 0 + + + def _upd_data(self, tbl, run_tbl, upd_tbl, upd_keys): + for key in set(run_tbl.keys()).union(set(upd_tbl.keys())): + run_data = run_tbl.get(key, None) + upd_data = upd_tbl.get(key, None) + + if run_data != upd_data: + set_config(self.config_db, tbl, key, upd_data) + upd_keys[tbl][key] = {} + log_debug("Patch affected tbl={} key={}".format(tbl, key)) + + + def _report_mismatch(self, run_data, upd_data): + log_error("run_data vs expected_data: {}".format( + str(jsondiff.diff(run_data, upd_data))[0:40])) + + + def apply(self, change): + run_data = self._get_running_config() + upd_data = change.apply(copy.deepcopy(run_data)) + upd_keys = defaultdict(dict) + + for tbl in sorted(set(run_data.keys()).union(set(upd_data.keys()))): + self._upd_data(tbl, run_data.get(tbl, {}), + upd_data.get(tbl, {}), upd_keys) + + ret = self._services_validate(run_data, upd_data, upd_keys) + if not ret: + run_data = self._get_running_config() + if upd_data != run_data: + self._report_mismatch(run_data, upd_data) + ret = -1 + if ret: + log_error("Failed to apply Json change") + return ret + + + def _get_running_config(self): + (_, fname) = tempfile.mkstemp(suffix="_changeApplier") + os.system("sonic-cfggen -d --print-data > {}".format(fname)) + run_data = {} + with open(fname, "r") as s: + run_data = json.load(s) + if os.path.isfile(fname): + os.remove(fname) + return run_data diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index 8205a20b27..5a6c7172a1 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -4,6 +4,7 @@ from .gu_common import GenericConfigUpdaterError, ConfigWrapper, \ DryRunConfigWrapper, PatchWrapper, genericUpdaterLogging from .patch_sorter import PatchSorter +from .change_applier import ChangeApplier CHECKPOINTS_DIR = "/etc/sonic/checkpoints" CHECKPOINT_EXT = ".cp.json" @@ -17,10 +18,6 @@ def release_lock(self): # TODO: Implement ConfigLock pass -class ChangeApplier: - def apply(self, change): - # TODO: Implement change applier - raise NotImplementedError("ChangeApplier.apply(change) is not implemented yet") class ConfigFormat(Enum): CONFIGDB = 1 diff --git a/generic_config_updater/generic_updater_config.conf.json b/generic_config_updater/generic_updater_config.conf.json new file mode 100644 index 0000000000..f86844cca5 --- /dev/null +++ b/generic_config_updater/generic_updater_config.conf.json @@ -0,0 +1,59 @@ +{ + "tables": { + "": { + "services_to_validate": [ "system_health" ] + }, + "PORT": { + "services_to_validate": [ "port_service" ] + }, + "SYSLOG_SERVER":{ + "services_to_validate": [ "rsyslog" ] + }, + "DHCP_RELAY": { + "services_to_validate": [ "dhcp-relay" ] + }, + "DHCP_SERVER": { + "services_to_validate": [ "dhcp-relay" ] + } + }, + "README": [ + "Validate_commands provides, module & method name as ", + " .", + "NOTE: module name could have '.'", + " ", + "The last element separated by '.' is considered as ", + "method name", + "", + "e.g. 'show.acl.test_acl'", + "", + "Here we load 'show.acl' and call 'test_acl' method on it.", + "", + "called as:", + " .>(, ", + " , )", + " config is in JSON format as in config_db.json", + " affected_keys in same format, but w/o value", + " { 'ACL_TABLE': { 'SNMP_ACL': {} ... }, ...}", + " The affected keys has 'added', 'updated' & 'deleted'", + "", + "Multiple validate commands may be provided.", + "", + "Note: The commands may be called in any order", + "" + ], + "services": { + "system_health": { + "validate_commands": [ ] + }, + "port_service": { + "validate_commands": [ ] + }, + "rsyslog": { + "validate_commands": [ "services_validator.ryslog_validator" ] + }, + "dhcp-relay": { + "validate_commands": [ "services_validator.dhcp_validator" ] + } + } +} + diff --git a/generic_config_updater/services_validator.py b/generic_config_updater/services_validator.py new file mode 100644 index 0000000000..525afe971b --- /dev/null +++ b/generic_config_updater/services_validator.py @@ -0,0 +1,17 @@ +import os +from .gu_common import genericUpdaterLogging + +logger = genericUpdaterLogging.get_logger(title="Service Validator") + +def _service_restart(svc_name): + os.system(f"systemctl restart {svc_name}") + logger.log_notice(f"Restarted {svc_name}") + + +def ryslog_validator(old_config, upd_config, keys): + _service_restart("rsyslog-config") + + +def dhcp_validator(old_config, upd_config, keys): + _service_restart("dhcp_relay") + diff --git a/tests/generic_config_updater/change_applier_test.py b/tests/generic_config_updater/change_applier_test.py new file mode 100644 index 0000000000..5d5b0908dc --- /dev/null +++ b/tests/generic_config_updater/change_applier_test.py @@ -0,0 +1,270 @@ +import copy +import json +import jsondiff +import os +import unittest +from collections import defaultdict +from unittest.mock import patch + +import generic_config_updater.change_applier +import generic_config_updater.gu_common + +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +DATA_FILE = os.path.join(SCRIPT_DIR, "files", "change_applier_test.data.json") +CONF_FILE = os.path.join(SCRIPT_DIR, "files", "change_applier_test.conf.json") +# +# Datafile is structured as +# "running_config": {....} +# "json_changes": [ +# { +# "notes": , +# "update": { : {: {}, ...}...}, +# "remove": { : { : {}, ..}, ...}, +# "services_to_validate": [ , ...] +# }, +# ... +# ] +# +# The json_changes is read into global json_changes +# The ChangeApplier.apply is called with each change +# The mocked JsonChange.apply applies this diff on given config +# The ChangeApplier.apply calls set_entry to update redis +# But we mock set_entry, and that instead: +# remove the corresponding changes from json_changes. +# Updates the global running_config +# +# At the end of application of all changes, expect global json-changes to +# be empty, which assures that set_entry is called for all expected keys. +# The global running config would reflect the final config +# +# The changes are written in such a way, upon the last change, the config +# will be same as the original config that we started with or as read from +# data file. +# +# So compare global running_config with read_data for running config +# from the file. +# This compares the integrity of final o/p + +# Data read from file +read_data = {} + +# Keep a copy of running_config before calling apply +# This is used by service_validate call to verify the args +# Args from change applier: ( +# +start_running_config = {} + +# The mock_set_entry (otherwise redis update) reflects the final config +# service_validate calls will verify against this +# +running_config = {} + +# Copy of changes read. Used by mock JsonChange.apply +# Cleared by mocked set_entry +json_changes = {} + +# The index into list of i/p json changes for mock code to use +json_change_index = 0 + +DB_HANDLE = "config_db" + +def debug_print(msg): + print(msg) + + +# Mimics os.system call for sonic-cfggen -d --print-data > filename +# +def os_system_cfggen(cmd): + global running_config + + fname = cmd.split(">")[-1].strip() + with open(fname, "w") as s: + s.write(json.dumps(running_config, indent=4)) + debug_print("File created {} type={} cfg={}".format(fname, + type(running_config), json.dumps(running_config)[1:40])) + return 0 + + +# mimics config_db.set_entry +# +def set_entry(config_db, tbl, key, data): + global running_config, json_changes, json_change_index + + assert config_db == DB_HANDLE + debug_print("set_entry: {} {} {}".format(tbl, key, str(data))) + + json_change = json_changes[json_change_index] + change_data = json_change["update"] if data != None else json_change["remove"] + + assert tbl in change_data + assert key in change_data[tbl] + + if data != None: + if tbl not in running_config: + running_config[tbl] = {} + running_config[tbl][key] = data + else: + assert tbl in running_config + assert key in running_config[tbl] + running_config[tbl].pop(key) + if not running_config[tbl]: + running_config.pop(tbl) + + change_data[tbl].pop(key) + if not change_data[tbl]: + change_data.pop(tbl) + + +# mimics JsonChange.apply +# +class mock_obj: + def apply(self, config): + json_change = json_changes[json_change_index] + + update = copy.deepcopy(json_change["update"]) + for tbl in update: + if tbl not in config: + config[tbl] = {} + for key in update[tbl]: + debug_print("apply: tbl={} key={} ".format(tbl, key)) + if key in config[tbl]: + config[tbl][key].update(update[tbl][key]) + else: + config[tbl][key] = update[tbl][key] + + remove = json_change["remove"] + for tbl in remove: + if tbl in config: + for key in remove[tbl]: + config[tbl].pop(key, None) + debug_print("apply: popped tbl={} key={}".format(tbl, key)) + if not config[tbl]: + config.pop(tbl, None) + debug_print("apply: popped EMPTY tbl={}".format(tbl)) + return config + + +# Test validators +# +def system_health(old_cfg, new_cfg, keys): + debug_print("system_health called") + svc_name = "system_health" + if old_cfg != new_cfg: + debug_print("system_health: diff={}".format(str( + jsondiff.diff(old_cfg, new_cfg)))) + assert False, "No change expected" + svcs = json_changes[json_change_index].get("services_validated", None) + if svcs != None: + assert svc_name in svcs + svcs.remove(svc_name) + + +def _validate_keys(keys): + # validate keys against original change as read from data file + # + change = read_data["json_changes"][json_change_index] + change_data = copy.deepcopy(change["update"]) + change_data.update(change["remove"]) + + for tbl in set(change_data.keys()).union(set(keys.keys())): + assert tbl in change_data + assert tbl in keys + chg_tbl = change_data[tbl] + keys_tbl = keys[tbl] + for key in set(chg_tbl.keys()).union(set(keys_tbl.keys())): + assert key in chg_tbl + assert key in keys_tbl + + +def _validate_svc(svc_name, old_cfg, new_cfg, keys): + if old_cfg != start_running_config: + debug_print("validate svc {}: old diff={}".format(svc_name, str( + jsondiff.diff(old_cfg, start_running_config)))) + assert False, "_validate_svc: old config mismatch" + + if new_cfg != running_config: + debug_print("validate svc {}: new diff={}".format(svc_name, str( + jsondiff.diff(new_cfg, running_config)))) + assert False, "_validate_svc: running config mismatch" + + _validate_keys(keys) + + # None provides a chance for test data to skip services_validated + # verification + svcs = json_changes[json_change_index].get("services_validated", None) + if svcs != None: + assert svc_name in svcs + svcs.remove(svc_name) + + +def acl_validate(old_cfg, new_cfg, keys): + debug_print("acl_validate called") + _validate_svc("acl_validate", old_cfg, new_cfg, keys) + + +def vlan_validate(old_cfg, new_cfg, keys): + debug_print("vlan_validate called") + _validate_svc("vlan_validate", old_cfg, new_cfg, keys) + + +class TestChangeApplier(unittest.TestCase): + + @patch("generic_config_updater.change_applier.os.system") + @patch("generic_config_updater.change_applier.get_config_db") + @patch("generic_config_updater.change_applier.set_config") + def test_change_apply(self, mock_set, mock_db, mock_os_sys): + global read_data, running_config, json_changes, json_change_index + global start_running_config + + mock_os_sys.side_effect = os_system_cfggen + mock_db.return_value = DB_HANDLE + mock_set.side_effect = set_entry + + with open(DATA_FILE, "r") as s: + read_data = json.load(s) + + running_config = copy.deepcopy(read_data["running_data"]) + json_changes = copy.deepcopy(read_data["json_changes"]) + + generic_config_updater.change_applier.UPDATER_CONF_FILE = CONF_FILE + generic_config_updater.change_applier.set_print_options(to_stdout=True) + + applier = generic_config_updater.change_applier.ChangeApplier() + debug_print("invoked applier") + + for i in range(len(json_changes)): + json_change_index = i + + # Take copy for comparison + start_running_config = copy.deepcopy(running_config) + + debug_print("main: json_change_index={}".format(json_change_index)) + + applier.apply(mock_obj()) + + debug_print(f"Testing json_change {json_change_index}") + + debug_print("Checking: index={} update:{} remove:{} svcs:{}".format(i, + json.dumps(json_changes[i]["update"])[0:20], + json.dumps(json_changes[i]["remove"])[0:20], + json.dumps(json_changes[i].get("services_validated", []))[0:20])) + assert not json_changes[i]["update"] + assert not json_changes[i]["remove"] + assert not json_changes[i].get("services_validated", []) + debug_print(f"----------------------------- DONE {i} ---------------------------------") + + debug_print("All changes applied & tested") + + # Test data is set up in such a way the multiple changes + # finally brings it back to original config. + # + if read_data["running_data"] != running_config: + debug_print("final config mismatch: {}".format(str( + jsondiff.diff(read_data["running_data"], running_config)))) + + assert read_data["running_data"] == running_config + + debug_print("all good for applier") + + + diff --git a/tests/generic_config_updater/files/change_applier_test.conf.json b/tests/generic_config_updater/files/change_applier_test.conf.json new file mode 100644 index 0000000000..9b0f552a43 --- /dev/null +++ b/tests/generic_config_updater/files/change_applier_test.conf.json @@ -0,0 +1,24 @@ +{ + "tables": { + "": { + "services_to_validate": [ "system_health" ] + }, + "ACL_TABLE": { + "services_to_validate": [ "acl_service" ] + }, + "VLAN_INTERFACE": { + "services_to_validate": [ "acl_service", "vlan_service" ] + } + }, + "services": { + "system_health": { + "validate_commands": [ "tests.generic_config_updater.change_applier_test.system_health" ] + }, + "acl_service": { + "validate_commands": [ "tests.generic_config_updater.change_applier_test.acl_validate" ] + }, + "vlan_service": { + "validate_commands": [ "tests.generic_config_updater.change_applier_test.vlan_validate" ] + } + } +} diff --git a/tests/generic_config_updater/files/change_applier_test.data.json b/tests/generic_config_updater/files/change_applier_test.data.json new file mode 100644 index 0000000000..d75541a5de --- /dev/null +++ b/tests/generic_config_updater/files/change_applier_test.data.json @@ -0,0 +1,308 @@ +{ + "running_data": { + "ACL_TABLE": { + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "PortChannel0001", + "PortChannel0002", + "PortChannel0003", + "PortChannel0007" + ], + "stage": "ingress", + "type": "L3" + }, + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "PortChannel0001", + "PortChannel0002", + "PortChannel0003", + "PortChannel0004", + "Ethernet96" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "PortChannel0001", + "PortChannel0002", + "PortChannel0003", + "PortChannel0004", + "Ethernet96" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "BGP_NEIGHBOR": { + "10.0.0.57": { + "asn": "64600", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.56", + "name": "ARISTA01T1", + "nhopself": "0", + "rrclient": "0" + }, + "10.0.0.59": { + "asn": "64600", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.58", + "name": "ARISTA02T1", + "nhopself": "0", + "rrclient": "0" + } + }, + "BGP_PEER_RANGE": { + "BGPSLBPassive": { + "ip_range": [ + "10.255.0.0/25" + ], + "name": "BGPSLBPassive", + "src_address": "10.1.0.32" + }, + "BGPVac": { + "ip_range": [ + "192.168.0.0/21" + ], + "name": "BGPVac", + "src_address": "10.1.0.32" + } + }, + "BUFFER_PG": { + "Ethernet0|3-4": { + "profile": "[BUFFER_PROFILE|pg_lossless_40000_300m_profile]" + }, + "Ethernet100|3-4": { + "profile": "[BUFFER_PROFILE|pg_lossless_40000_300m_profile]" + }, + "Ethernet104|3-4": { + "profile": "[BUFFER_PROFILE|pg_lossless_40000_300m_profile]" + }, + "Ethernet108|3-4": { + "profile": "[BUFFER_PROFILE|pg_lossless_40000_300m_profile]" + }, + "Ethernet112|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet112|3-4": { + "profile": "[BUFFER_PROFILE|pg_lossless_40000_300m_profile]" + } + }, + "DEVICE_METADATA": { + "localhost": { + "bgp_asn": "65100", + "buffer_model": "traditional", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "type": "ToRRouter" + } + }, + "DEVICE_NEIGHBOR_METADATA": { + "ARISTA01T1": { + "hwsku": "Arista-VM", + "lo_addr": "None", + "mgmt_addr": "10.64.246.220", + "type": "LeafRouter" + }, + "ARISTA02T1": { + "hwsku": "Arista-VM", + "lo_addr": "None", + "mgmt_addr": "10.64.246.221", + "type": "LeafRouter" + } + }, + "PORT": { + "Ethernet0": { + "alias": "fortyGigE0/0", + "description": "fortyGigE0/0", + "index": "0", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet100": { + "alias": "fortyGigE0/100", + "description": "fortyGigE0/100", + "index": "25", + "lanes": "125,126,127,128", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet104": { + "alias": "fortyGigE0/104", + "description": "fortyGigE0/104", + "index": "26", + "lanes": "85,86,87,88", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet108": { + "alias": "fortyGigE0/108", + "description": "fortyGigE0/108", + "index": "27", + "lanes": "81,82,83,84", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + }, + "PORTCHANNEL": { + "PortChannel0001": { + "admin_status": "up", + "members": [ + "Ethernet112" + ], + "min_links": "1", + "mtu": "9100" + }, + "PortChannel0002": { + "admin_status": "up", + "members": [ + "Ethernet116" + ], + "min_links": "1", + "mtu": "9100" + } + }, + "PORTCHANNEL_INTERFACE": { + "PortChannel0001": {}, + "PortChannel0002": {}, + "PortChannel0001|10.0.0.56/31": {}, + "PortChannel0001|FC00::71/126": {}, + "PortChannel0002|10.0.0.58/31": {}, + "PortChannel0002|FC00::75/126": {} + }, + "PORTCHANNEL_MEMBER": { + "PortChannel0001|Ethernet112": {}, + "PortChannel0002|Ethernet116": {}, + "PortChannel0003|Ethernet120": {}, + "PortChannel0004|Ethernet124": {} + }, + "QUEUE": { + "Ethernet112|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + } + }, + "VLAN": { + "Vlan1000": { + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.48" + ], + "members": [ + "Ethernet4", + "Ethernet8", + "Ethernet96" + ], + "vlanid": "1000" + } + }, + "VLAN_INTERFACE": { + "Vlan1000": {}, + "Vlan1000|192.168.0.1/21": {}, + "Vlan1000|2603:10b0:b13:c70::1/64": {} + }, + "VLAN_MEMBER": { + "Vlan1000|Ethernet12": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet16": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet28": { + } + }, + "WRED_PROFILE": { + "AZURE_LOSSLESS": { + "ecn": "ecn_all", + "green_drop_probability": "5", + "yellow_drop_probability": "5", + "yellow_max_threshold": "2097152", + "yellow_min_threshold": "1048576" + } + } + }, + "json_changes": [ + { + "name": "change_0", + "update": {}, + "remove": {}, + "services_validated": ["system_health"] + }, + { + "name": "change_1", + "update": { + "VLAN_INTERFACE": { "Vlan2000": {}, "Vlan2000|192.168.0.2/21": {} }, + "WRED_PROFILE": { "AZURE_LOSSLESS": { "green_drop_probability": "99", "ecn": "88" }}, + "ACL_TABLE": {"DATAACL": {"type": "test_data" }} + }, + "remove": { + "BGP_NEIGHBOR": { "10.0.0.57": {} }, + "BGP_PEER_RANGE": { "BGPSLBPassive": {} } + }, + "services_validated": [ "vlan_validate", "acl_validate" ] + }, + { + "name": "change_2", + "update": { + "BGP_NEIGHBOR": { "10.0.0.57": { + "asn": "64600", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.56", + "name": "ARISTA01T1", + "nhopself": "0", + "rrclient": "0" }}, + "WRED_PROFILE": { "AZURE_LOSSLESS": { "ecn": "ecn_all" } }, + "ACL_TABLE": {"DATAACL": {"type": "test_data11" }}, + "TEST_ONLY" : { "TEST_SUB" : {"foo": "88" } } + }, + "remove": { + "VLAN_INTERFACE": { "Vlan2000": {} } + } + }, + { + "name": "change_3", + "update": { + "WRED_PROFILE": { "AZURE_LOSSLESS": { "green_drop_probability": "5" } }, + "ACL_TABLE": {"DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "PortChannel0001", + "PortChannel0002", + "PortChannel0003", + "PortChannel0007" + ], + "stage": "ingress", + "type": "L3" } + }, + "BGP_PEER_RANGE": { + "BGPSLBPassive": { + "ip_range": ["10.255.0.0/25"], + "name": "BGPSLBPassive", + "src_address": "10.1.0.32" + } + } + }, + "remove": { + "VLAN_INTERFACE": { "Vlan2000|192.168.0.2/21": {} }, + "TEST_ONLY": { "TEST_SUB": {} } + } + } + ] +}