diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..87273e4 --- /dev/null +++ b/.flake8 @@ -0,0 +1,9 @@ +[flake8] +exclude=.git,__pycache__,docs/source/conf.py,old,build,dist,venv,__init__.py,./lib/* +inline-quotes=' +max-line-length=120 +select=E,F,W,C4,Q0,N80,ABS,W504 +ignore=W503,Q003,C417 +extend-ignore=E203 +per-file-ignores= + bin/yang_figures.py:F405,F403 \ No newline at end of file diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c0edfc9..0698e2f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -23,9 +23,19 @@ jobs: run: | python -m pip install --upgrade pip pip install -r requirements.txt + pip install -r lint_requirements.txt cd bin/resources/HTML python setup.py install + - name: Check isort + uses: jamescurtin/isort-action@master + + - name: Check flake8 + run: flake8 . + + - name: Check black + uses: psf/black@stable + - name: Test with pytest run: | export YANGCATALOG_CONFIG_PATH=$PWD/tests/resources/test.conf diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..59f04ff --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,24 @@ +repos: + - repo: https://github.com/asottile/add-trailing-comma + rev: v2.3.0 + hooks: + - id: add-trailing-comma + - repo: https://github.com/psf/black + rev: 22.10.0 + hooks: + - id: black + - repo: https://github.com/pycqa/isort + rev: 5.10.1 + hooks: + - id: isort + name: isort (python) + - repo: https://github.com/pycqa/flake8 + rev: '5.0.4' + hooks: + - id: flake8 + args: + - '--config=.flake8' + additional_dependencies: [ + flake8-docstrings, flake8-quotes, flake8-print, pep8-naming, flake8-absolute-import, flake8-comprehensions, + flake8-commas, flake8-multiline-containers, + ] \ No newline at end of file diff --git a/bin/check_archived_drafts.py b/bin/check_archived_drafts.py index 48b6f06..d56dca1 100644 --- a/bin/check_archived_drafts.py +++ b/bin/check_archived_drafts.py @@ -26,7 +26,6 @@ import time import requests - from create_config import create_config from extractors.draft_extractor import DraftExtractor from job_log import job_log @@ -51,10 +50,7 @@ def main(): yang_path = os.path.join(ietf_directory, 'archived-drafts-modules') parser = argparse.ArgumentParser(description='Check if modules from all the Drafts are populated in YANG Catalog') - parser.add_argument('--debug', - help='Debug level - default is 0', - type=int, - default=0) + parser.add_argument('--debug', help='Debug level - default is 0', type=int, default=0) args = parser.parse_args() custom_print('Starting {} script'.format(os.path.basename(__file__))) @@ -66,7 +62,7 @@ def main(): 'draft_path': archived_draft_path, 'yang_path': yang_path, 'all_yang_draft_path_strict': all_yang_drafts_strict, - 'all_yang_path': all_yang_path + 'all_yang_path': all_yang_path, } try: @@ -75,9 +71,14 @@ def main(): remove_directory_content(all_yang_drafts_strict, args.debug) custom_print('Extracting modules from drafts stored in {}'.format(archived_draft_path)) - draftExtractor = DraftExtractor(draft_extractor_paths, args.debug, - extract_elements=False, extract_examples=False, copy_drafts=False) - draftExtractor.extract() + draft_extractor = DraftExtractor( + draft_extractor_paths, + args.debug, + extract_elements=False, + extract_examples=False, + copy_drafts=False, + ) + draft_extractor.extract() except Exception as err: custom_print('Error occured while extracting modules') end = int(time.time()) @@ -110,7 +111,7 @@ def main(): missing_modules = [] incorrect_revision_modules = [] - for yang_file in draftExtractor.inverted_draft_yang_dict: + for yang_file in draft_extractor.inverted_draft_yang_dict: name_revision = yang_file.split('.yang')[0] if any(yang_file in module for module in (old_modules, unparsable_modules)) or yang_file.startswith('example'): continue diff --git a/bin/compilation_status.py b/bin/compilation_status.py index 2a2366e..42b73c3 100644 --- a/bin/compilation_status.py +++ b/bin/compilation_status.py @@ -84,7 +84,10 @@ def yanglint_compilation_status(compilation_result: str) -> str: ret = 'UNKNOWN' # 'err : Input data contains submodule which cannot be parsed directly without its main module.' error message # => still print the message, but doesn't report it as FAILED - if 'err : Input data contains submodule which cannot be parsed directly without its main module.' in compilation_result: + if ( + 'err : Input data contains submodule which cannot be parsed directly without its main module.' + in compilation_result + ): ret = 'PASSED' return ret diff --git a/bin/compile_modules.py b/bin/compile_modules.py index c6fda30..20b7045 100755 --- a/bin/compile_modules.py +++ b/bin/compile_modules.py @@ -20,11 +20,10 @@ from configparser import ConfigParser import requests -from filelock import FileLock - from compilation_status import combined_compilation, pyang_compilation_status from create_config import create_config from file_hasher import FileHasher +from filelock import FileLock from files_generator import FilesGenerator from metadata_generators.base_metadata_generator import BaseMetadataGenerator from metadata_generators.draft_metadata_generator import ArchivedMetadataGenerator, DraftMetadataGenerator @@ -34,7 +33,7 @@ from parsers.pyang_parser import PyangParser from parsers.yangdump_pro_parser import YangdumpProParser from parsers.yanglint_parser import YanglintParser -from utility.utility import check_yangcatalog_data, IETF, module_or_submodule, number_that_passed_compilation +from utility.utility import IETF, check_yangcatalog_data, module_or_submodule, number_that_passed_compilation __author__ = 'Benoit Claise' __copyright__ = 'Copyright(c) 2015-2018, Cisco Systems, Inc., Copyright The IETF Trust 2022, All Rights Reserved' @@ -107,17 +106,17 @@ def get_name_with_revision(yang_file: str) -> str: print( f'Name of the YANG file {yang_file_base} is wrong changing to correct one into ' f'{new_yang_file_base_with_revision}', - flush=True + flush=True, ) yang_file_base = new_yang_file_base_with_revision if ( - new_yang_file_base_with_revision.split('@')[1].split('.')[0] != - yang_file_base.split('@')[1].split('.')[0] + new_yang_file_base_with_revision.split('@')[1].split('.')[0] + != yang_file_base.split('@')[1].split('.')[0] ): print( f'Revision of the YANG file {yang_file_base} is wrong changing to correct as ' f'{new_yang_file_base_with_revision}', - flush=True + flush=True, ) yang_file_base = new_yang_file_base_with_revision @@ -127,7 +126,7 @@ def get_name_with_revision(yang_file: str) -> str: if debug_level > 0: print( f'DEBUG: Adding the revision to YANG module because xym can\'t get revision ' - f'(missing from the YANG module): {yang_file}' + f'(missing from the YANG module): {yang_file}', ) print(f'DEBUG: out: {new_yang_file_base_with_revision}') @@ -160,7 +159,7 @@ def parse_module(parsers: dict, yang_file: str, root_directory: str, lint: bool, 'pyang': result_no_pyang_param, 'confdrc': result_confd, 'yumadump': result_yuma, - 'yanglint': result_yanglint + 'yanglint': result_yanglint, } compilation_status = combined_compilation(os.path.basename(yang_file), module_compilation_results) return compilation_status, module_compilation_results @@ -169,23 +168,25 @@ def parse_module(parsers: dict, yang_file: str, root_directory: str, lint: bool, def parse_example_module(parsers: dict, yang_file: str, root_directory: str, lint: bool, allinclusive: bool): result_pyang = parsers['pyang'].run_pyang(root_directory, yang_file, lint, allinclusive, True) result_no_pyang_param = parsers['pyang'].run_pyang(root_directory, yang_file, lint, allinclusive, False) - module_compilation_results = { - 'pyang_lint': result_pyang, - 'pyang': result_no_pyang_param - } + module_compilation_results = {'pyang_lint': result_pyang, 'pyang': result_no_pyang_param} compilation_status = pyang_compilation_status(result_pyang) return compilation_status, module_compilation_results def validate( - prefix: str, modules: dict, yang_list: list, parser_args: dict, document_dict: dict, config: ConfigParser, + prefix: str, + modules: dict, + yang_list: list, + parser_args: dict, + document_dict: dict, + config: ConfigParser, ) -> dict: agregate_results = {'all': {}, 'no_submodules': {}} parsers = { 'pyang': PyangParser(debug_level, config=config), 'confdc': ConfdcParser(debug_level), 'yangdumppro': YangdumpProParser(debug_level), - 'yanglint': YanglintParser(debug_level) + 'yanglint': YanglintParser(debug_level), } all_yang_catalog_metadata = {} for module in modules['module']: @@ -215,13 +216,21 @@ def validate( compilation_status, module_compilation_results = parse_module(parsers, yang_file, **parser_args) metadata_generator = metadata_generator_cls( - module_compilation_results, compilation_status, yang_file, document_dict, + module_compilation_results, + compilation_status, + yang_file, + document_dict, ) confd_metadata = metadata_generator.get_confd_metadata() yang_file_compilation = metadata_generator.get_file_compilation() check_yangcatalog_data( - config, yang_file, confd_metadata, module_compilation_results, all_yang_catalog_metadata, ietf, + config, + yang_file, + confd_metadata, + module_compilation_results, + all_yang_catalog_metadata, + ietf, ) # Revert to previous hash if compilation status is 'UNKNOWN' -> try to parse model again next time @@ -273,7 +282,8 @@ def main(): ietf_directory = config.get('Directory-Section', 'ietf-directory') parser = argparse.ArgumentParser( - description='YANG Document Processor: generate tables with compilation errors/warnings') + description='YANG Document Processor: generate tables with compilation errors/warnings', + ) parser.add_argument( '--rootdir', help='Root directory where to find the source YANG models. Default is "."', @@ -283,70 +293,61 @@ def main(): parser.add_argument( '--metadata', help='Metadata text (such as SDOs, Github location, etc.) to be displayed on the generated HTML page. ' - 'Default is ""', + 'Default is ""', type=str, - default='' + default='', ) parser.add_argument( '--lint', help='Optional flag that determines pyang syntax enforcement; ' - 'If set, pyang --lint is run. ' - 'Otherwise, pyang --ietf is run. ' - 'Default is False', + 'If set, pyang --lint is run. ' + 'Otherwise, pyang --ietf is run. ' + 'Default is False', action='store_true', - default=False + default=False, ) parser.add_argument( '--allinclusive', help='Optional flag that determines whether the rootdir directory ' - 'contains all imported YANG modules; ' - 'If set, the YANG validators will only look in the rootdir directory. ' - f'Otherwise, the YANG validators look in {modules_directory}. ' - 'Default is False', + 'contains all imported YANG modules; ' + 'If set, the YANG validators will only look in the rootdir directory. ' + f'Otherwise, the YANG validators look in {modules_directory}. ' + 'Default is False', action='store_true', - default=False + default=False, ) parser.add_argument( '--prefix', help='Prefix for generating HTML file names. Example: MEF, IEEEStandard, IEEEExperimental. Default is ""', - default='' - ) - parser.add_argument( - '--debug', - help='Debug level - default is 0', - type=int, - default=0 + default='', ) + parser.add_argument('--debug', help='Debug level - default is 0', type=int, default=0) parser.add_argument( '--forcecompilation', help='Optional flag that determines wheter compilation should be run ' - 'for all files even if they have not been changed ' - 'or even if the validators versions have not been changed.', + 'for all files even if they have not been changed ' + 'or even if the validators versions have not been changed.', action='store_true', - default=False + default=False, ) group = parser.add_mutually_exclusive_group() - group.add_argument( - '--rfc', - help='Set specific options for compiling RFCs.', - action='store_true' - ) + group.add_argument('--rfc', help='Set specific options for compiling RFCs.', action='store_true') group.add_argument( '--draft', help='Include extra metadata in the compilation results when compiling drafts. ' - 'Does not include archived drafts.', - action='store_true' + 'Does not include archived drafts.', + action='store_true', ) group.add_argument( '--draft-archive', help='Include extra metadata in the compilation results when compiling drafts. Includes archived drafts.', - action='store_true' + action='store_true', ) group.add_argument( '--example', help='Include extra metadata in the compilation results when compiling examples,' - ' only compile examples with pyang.', - action='store_true' + ' only compile examples with pyang.', + action='store_true', ) args = parser.parse_args() @@ -395,11 +396,7 @@ def main(): yang_list = list_of_yang_modules_in_subdir(args.rootdir, args.debug) - parser_args = { - 'root_directory': args.rootdir, - 'lint': args.lint, - 'allinclusive': args.allinclusive - } + parser_args = {'root_directory': args.rootdir, 'lint': args.lint, 'allinclusive': args.allinclusive} if debug_level > 0: print(f'yang_list content:\n{yang_list}') @@ -408,12 +405,12 @@ def main(): custom_print('all modules compiled/validated') # Generate HTML and JSON files - filesGenerator = FilesGenerator(web_private) + files_generator = FilesGenerator(web_private) if ietf == IETF.DRAFT: # Generate json and html files with compilation results of modules extracted from IETF Drafts with Cisco authors - filesGenerator.write_dictionary(agregate_results['all'], 'IETFCiscoAuthors') - headers = filesGenerator.getIETFCiscoAuthorsYANGPageCompilationHeaders() - filesGenerator.generateYANGPageCompilationHTML(agregate_results['all'], headers, 'IETFCiscoAuthors') + files_generator.write_dictionary(agregate_results['all'], 'IETFCiscoAuthors') + headers = files_generator.get_ietf_cisco_authors_yang_page_compilation_headers() + files_generator.generate_yang_page_compilation_html(agregate_results['all'], headers, 'IETFCiscoAuthors') # Update draft archive cache path = os.path.join(web_private, 'IETFDraftArchive.json') @@ -423,17 +420,17 @@ def main(): except FileNotFoundError: old_draft_archive_results = {} draft_archive_results = old_draft_archive_results | agregate_results['all'] - filesGenerator.write_dictionary(draft_archive_results, 'IETFDraftArchive') + files_generator.write_dictionary(draft_archive_results, 'IETFDraftArchive') # Strip cisco authors out agregate_results['all'] = {k: v[:2] + v[3:] for k, v in agregate_results['all'].items()} # Generate json and html files with compilation results of modules extracted from IETF Drafts - filesGenerator.write_dictionary(agregate_results['all'], args.prefix) - headers = filesGenerator.getIETFDraftYANGPageCompilationHeaders() - filesGenerator.generateYANGPageCompilationHTML(agregate_results['all'], headers, args.prefix) + files_generator.write_dictionary(agregate_results['all'], args.prefix) + headers = files_generator.get_ietf_draft_yang_page_compilation_headers() + files_generator.generate_yang_page_compilation_html(agregate_results['all'], headers, args.prefix) elif ietf == IETF.DRAFT_ARCHIVE: - filesGenerator.write_dictionary(agregate_results['all'], args.prefix) + files_generator.write_dictionary(agregate_results['all'], args.prefix) # Update draft cache path = os.path.join(web_private, 'IETFCiscoAuthors.json') @@ -443,12 +440,12 @@ def main(): except FileNotFoundError: draft_keys = set() draft_results = {key: agregate_results['all'] for key in draft_keys} - filesGenerator.write_dictionary(draft_results, 'IETFCiscoAuthors') + files_generator.write_dictionary(draft_results, 'IETFCiscoAuthors') elif ietf == IETF.EXAMPLE: - filesGenerator.write_dictionary(agregate_results['all'], args.prefix) - headers = filesGenerator.getIETFDraftExampleYANGPageCompilationHeaders() - filesGenerator.generateYANGPageCompilationHTML(agregate_results['no_submodules'], headers, args.prefix) + files_generator.write_dictionary(agregate_results['all'], args.prefix) + headers = files_generator.get_ietf_draft_example_yang_page_compilation_headers() + files_generator.generate_yang_page_compilation_html(agregate_results['no_submodules'], headers, args.prefix) else: if ietf == IETF.RFC: # Create yang module reference table @@ -459,14 +456,17 @@ def main(): rfc_url_anchor = f'{rfc_name}' module_to_rfc_anchor[yang_module] = rfc_url_anchor - filesGenerator.write_dictionary(module_to_rfc_anchor, 'IETFYANGRFC') + files_generator.write_dictionary(module_to_rfc_anchor, 'IETFYANGRFC') headers = ['YANG Model (and submodel)', 'RFC'] - filesGenerator.generateHTMLTable(module_to_rfc_anchor, headers) - - filesGenerator.write_dictionary(agregate_results['all'], args.prefix) - headers = filesGenerator.getYANGPageCompilationHeaders(args.lint) - filesGenerator.generateYANGPageCompilationHTML( - agregate_results['no_submodules'], headers, args.prefix, args.metadata, + files_generator.generate_html_table(module_to_rfc_anchor, headers) + + files_generator.write_dictionary(agregate_results['all'], args.prefix) + headers = files_generator.get_yang_page_compilation_headers(args.lint) + files_generator.generate_yang_page_compilation_html( + agregate_results['no_submodules'], + headers, + args.prefix, + args.metadata, ) # Generate modules compilation results statistics HTML page @@ -482,25 +482,24 @@ def main(): 'draft-passed': number_that_passed_compilation(agregate_results['all'], 3, 'PASSED'), 'draft-warnings': number_that_passed_compilation(agregate_results['all'], 3, 'PASSED WITH WARNINGS'), 'all-ietf-drafts': len( - [f for f in os.listdir(all_yang_path) if os.path.isfile(os.path.join(all_yang_path, f))]) + [f for f in os.listdir(all_yang_path) if os.path.isfile(os.path.join(all_yang_path, f))], + ), } merged_stats = write_page_main('ietf-yang', compilation_stats) - filesGenerator.generateIETFYANGPageMainHTML(merged_stats) + files_generator.generate_ietfyang_page_main_html(merged_stats) elif ietf == IETF.EXAMPLE: - compilation_stats = { - 'example-drafts': len(document_dict.keys()) - } + compilation_stats = {'example-drafts': len(document_dict.keys())} merged_stats = write_page_main('ietf-yang', compilation_stats) - filesGenerator.generateIETFYANGPageMainHTML(merged_stats) + files_generator.generate_ietfyang_page_main_html(merged_stats) else: compilation_stats = { 'passed': passed, 'warnings': passed_with_warnings, 'total': total_number, - 'failed': failed + 'failed': failed, } write_page_main(args.prefix, compilation_stats) - filesGenerator.generateYANGPageMainHTML(args.prefix, compilation_stats) + files_generator.generate_yang_page_main_html(args.prefix, compilation_stats) # Print the summary of the compilation results print('--------------------------') @@ -508,35 +507,35 @@ def main(): print(f'Number of correctly extracted YANG models from IETF drafts: {compilation_stats["total-drafts"]}') print( 'Number of YANG models in IETF drafts that passed compilation: ' - f'{compilation_stats["draft-passed"]}/{compilation_stats.get("total-drafts")}' + f'{compilation_stats["draft-passed"]}/{compilation_stats.get("total-drafts")}', ) print( 'Number of YANG models in IETF drafts that passed compilation with warnings: ' - f'{compilation_stats["draft-warnings"]}/{compilation_stats.get("total-drafts")}' + f'{compilation_stats["draft-warnings"]}/{compilation_stats.get("total-drafts")}', ), print( 'Number of all YANG models in IETF drafts (examples, badly formatted, etc. ): ' - f'{compilation_stats["all-ietf-drafts"]}' + f'{compilation_stats["all-ietf-drafts"]}', ) elif ietf == IETF.EXAMPLE: print( 'Number of correctly extracted example YANG models from IETF drafts: ' f'{compilation_stats["example-drafts"]}', - flush=True + flush=True, ) else: print(f'Number of YANG data models from {args.prefix}: {compilation_stats["total"]}') print( f'Number of YANG data models from {args.prefix} that passed compilation: ' - f'{compilation_stats["passed"]}/{compilation_stats["total"]}' + f'{compilation_stats["passed"]}/{compilation_stats["total"]}', ) print( f'Number of YANG data models from {args.prefix} that passed compilation with warnings: ' - f'{compilation_stats["warnings"]}/{compilation_stats["total"]}' + f'{compilation_stats["warnings"]}/{compilation_stats["total"]}', ) print( f'Number of YANG data models from {args.prefix} that failed compilation: ' - f'{compilation_stats["failed"]}/{compilation_stats["total"]}' + f'{compilation_stats["failed"]}/{compilation_stats["total"]}', ) custom_print(f'end of {os.path.basename(__file__)} job for {args.prefix}') diff --git a/bin/create_config.py b/bin/create_config.py index c6a0f28..9061254 100644 --- a/bin/create_config.py +++ b/bin/create_config.py @@ -1,5 +1,5 @@ -import os import configparser +import os def create_config(config_path=os.environ['YANGCATALOG_CONFIG_PATH']): diff --git a/bin/extract_elem.py b/bin/extract_elem.py index d5288ac..9ec6e3f 100755 --- a/bin/extract_elem.py +++ b/bin/extract_elem.py @@ -14,55 +14,61 @@ # either express or implied. __author__ = 'Eric Vyncke' -__copyright__ = "Copyright(c) 2019, Cisco Systems, Inc., Copyright The IETF Trust 2019, All Rights Reserved" -__email__ = "evyncke@cisco.com" +__copyright__ = 'Copyright(c) 2019, Cisco Systems, Inc., Copyright The IETF Trust 2019, All Rights Reserved' +__email__ = 'evyncke@cisco.com' import re + def extract_elem(module_fname, extract_dir, elem_type): - # Let's parse the module, we will create files when seeing the keywords such as 'identity-networking-instance-type.txt' + # Let's parse the module, we will create files when seeing the keywords such as + # 'identity-networking-instance-type.txt' open_bracket_count = 0 in_comment = False found_keyword = False file_out = None with open(module_fname, 'r', encoding='utf-8', errors='ignore') as ym: for line in ym: - if not found_keyword: # Still looking for keyword + if not found_keyword: # Still looking for keyword comment_start = line.find('//') if comment_start >= 0: - line = line[:comment_start] # Get rid of the one-line comment + line = line[:comment_start] # Get rid of the one-line comment comment_start = line.find('/*') comment_end = line.find('*/') - if comment_start >=0 and comment_start < comment_end: # Another one-line comment -# print("Before cut off line:", comment_start, comment_end, line) - line = line[:comment_start] + line[comment_end+2:] -# print("After cut off line:", comment_start, comment_end, line) + if comment_start >= 0 and comment_start < comment_end: # Another one-line comment + # print("Before cut off line:", comment_start, comment_end, line) + line = line[:comment_start] + line[comment_end + 2 :] + # print("After cut off line:", comment_start, comment_end, line) else: - if comment_start >=0: + if comment_start >= 0: in_comment = True -# print("Before cut off line:", comment_start, comment_end, line) + # print("Before cut off line:", comment_start, comment_end, line) line = line[:comment_start] -# print("After cut off line:", comment_start, comment_end, line) + # print("After cut off line:", comment_start, comment_end, line) else: - if comment_end >=0: + if comment_end >= 0: in_comment = False -# print("Before cut off line:", comment_start, comment_end, line) - line = line[comment_end+2:] -# print("After cut off line:", comment_start, comment_end, line) + # print("Before cut off line:", comment_start, comment_end, line) + line = line[comment_end + 2 :] + # print("After cut off line:", comment_start, comment_end, line) # If we are in a multiple-line comment, let's skip this line if in_comment: continue # Search after the keyword which MUST be the first word in the line (no " for example before) - #keyword_start = line.lstrip().find(elem_type) + # keyword_start = line.lstrip().find(elem_type) match = re.match(r'^\s*' + elem_type + r'\s+([-_\.\w]+)' + r'\s*{', line) if match: found_keyword = True identifier = match.group(1) # Let's open the output file if not yet done - if file_out == None: - file_out = open(extract_dir + '/' + elem_type + '-' + identifier + '.txt', 'w', encoding = 'utf-8') -# print("Creating file: " + extract_dir + '/' + elem_type + '-' + identifier + '.txt') - if found_keyword and file_out: # Processing the keyword + if file_out is None: + file_out = open( + extract_dir + '/' + elem_type + '-' + identifier + '.txt', + 'w', + encoding='utf-8', + ) + # print("Creating file: " + extract_dir + '/' + elem_type + '-' + identifier + '.txt') + if found_keyword and file_out: # Processing the keyword file_out.write(line) if line.find('{') >= 0: open_bracket_count = open_bracket_count + 1 @@ -75,7 +81,8 @@ def extract_elem(module_fname, extract_dir, elem_type): found_keyword = False file_out = None -if __name__ == "__main__": + +if __name__ == '__main__': file = '/var/www/html/YANG-modules/ietf-gen-rpc.yang' extract_elem(file, '/tmp/extract', 'grouping') extract_elem(file, '/tmp/extract', 'typedef') diff --git a/bin/extract_emails.py b/bin/extract_emails.py index 288e120..0dbfc42 100644 --- a/bin/extract_emails.py +++ b/bin/extract_emails.py @@ -28,7 +28,8 @@ # Functions # ---------------------------------------------------------------------- def list_of_ietf_drafts(directory: str): - """ Returns a list of all the drafts in a directory. + """ + Returns a list of all the drafts in a directory. Arguments: :param directory (str) Directory to search for drafts @@ -42,8 +43,8 @@ def list_of_ietf_drafts(directory: str): def extract_email_string(draft_path: str, email_domain: str, debug_level: int = 0): - """ Returns a string, comma separated, of all the email addresses for the company email domain, - within an IETF draft. + """ + Returns a string, comma separated, of all the email addresses for the company email domain within an IETF draft. Arguments: :param draft_path (str) Full path to the draft @@ -88,10 +89,7 @@ def extract_email_string(draft_path: str, email_domain: str, debug_level: int = draft_path = config.get('Directory-Section', 'ietf-drafts') parser = argparse.ArgumentParser(description='Extract comma-separated list of email addresses') - parser.add_argument('--debug', - help='Debug level - default is 0', - type=int, - default=0) + parser.add_argument('--debug', help='Debug level - default is 0', type=int, default=0) args = parser.parse_args() debug_level = args.debug diff --git a/bin/extract_ietf_modules.py b/bin/extract_ietf_modules.py index 5006527..db9edc2 100755 --- a/bin/extract_ietf_modules.py +++ b/bin/extract_ietf_modules.py @@ -47,76 +47,89 @@ def main(): rfc_path = config.get('Directory-Section', 'ietf-rfcs') cache_directory = config.get('Directory-Section', 'cache') public_directory = config.get('Web-Section', 'public-directory') - send_emails_about_problematic_drafts = config.get( - 'General-Section', 'send_emails_about_problematic_drafts', fallback='False' - ) == 'True' + send_emails_about_problematic_drafts = ( + config.get('General-Section', 'send_emails_about_problematic_drafts', fallback='False') == 'True' + ) parser = argparse.ArgumentParser(description='YANG RFC/Draft Processor') - parser.add_argument('--archived', - help='Extract expired drafts as well', - action='store_true', - default=False) - parser.add_argument('--yangpath', - help='Path to the directory where to extract models (only correct). ' - f'Default is "{ietf_directory}/YANG/"', - type=str, - default=f'{ietf_directory}/YANG/') - parser.add_argument('--allyangpath', - help='Path to the directory where to extract models (including bad ones). ' - f'Default is "{ietf_directory}/YANG-all/"', - type=str, - default=f'{ietf_directory}/YANG-all/') - parser.add_argument('--allyangexamplepath', - help='Path to the directory where to extract example models ' - '(starting with example- and not with CODE BEGINS/END). ' - f'Default is "{ietf_directory}/YANG-example/"', - type=str, - default=f'{ietf_directory}/YANG-example/') - parser.add_argument('--yangexampleoldrfcpath', - help='Path to the directory where to extract ' - 'the hardcoded YANG module example models from old RFCs (not starting with example-). ' - f'Default is "{ietf_directory}/YANG-example-old-rfc/"', - type=str, - default=f'{ietf_directory}/YANG-example-old-rfc/') - parser.add_argument('--draftpathstrict', - help='Path to the directory where to extract the drafts containing the YANG model(s) - ' - 'with xym flag strict=True. ' - f'Default is "{ietf_directory}/draft-with-YANG-strict/"', - type=str, - default=f'{ietf_directory}/draft-with-YANG-strict/') - parser.add_argument('--draftpathnostrict', - help='Path to the directory where to extract the drafts containing the YANG model(s) - ' - 'with xym flag strict=False. ' - f'Default is "{ietf_directory}/draft-with-YANG-no-strict/"', - type=str, - default=f'{ietf_directory}/draft-with-YANG-no-strict/') - parser.add_argument('--draftpathonlyexample', - help='Path to the directory where to extract the drafts containing examples -' - 'with xym flags strict=False and strict_examples=True. ' - f'Default is "{ietf_directory}/draft-with-YANG-example/"', - type=str, - default=f'{ietf_directory}/draft-with-YANG-example/') - parser.add_argument('--rfcyangpath', - help='Path to the directory where to extract the data models extracted from RFCs. ' - f'Default is "{ietf_directory}/YANG-rfc/"', - type=str, - default=f'{ietf_directory}/YANG-rfc/') - parser.add_argument('--rfcextractionyangpath', - help='Path to the directory where to extract ' - 'the typedef, grouping, identity from data models extracted from RFCs. ' - f'Default is "{ietf_directory}/YANG-rfc-extraction/"', - type=str, - default=f'{ietf_directory}/YANG-rfc-extraction/') - parser.add_argument('--draftelementspath', - help='Path to the directory where to extract ' - 'the typedef, grouping, identity from data models correctely extracted from drafts. ' - f'Default is "{ietf_directory}/draft-elements/"', - type=str, - default=f'{ietf_directory}/draft-elements/') - parser.add_argument('--debug', - help='Debug level - default is 0', - type=int, - default=0) + parser.add_argument('--archived', help='Extract expired drafts as well', action='store_true', default=False) + parser.add_argument( + '--yangpath', + help='Path to the directory where to extract models (only correct). ' f'Default is "{ietf_directory}/YANG/"', + type=str, + default=f'{ietf_directory}/YANG/', + ) + parser.add_argument( + '--allyangpath', + help='Path to the directory where to extract models (including bad ones). ' + f'Default is "{ietf_directory}/YANG-all/"', + type=str, + default=f'{ietf_directory}/YANG-all/', + ) + parser.add_argument( + '--allyangexamplepath', + help='Path to the directory where to extract example models ' + '(starting with example- and not with CODE BEGINS/END). ' + f'Default is "{ietf_directory}/YANG-example/"', + type=str, + default=f'{ietf_directory}/YANG-example/', + ) + parser.add_argument( + '--yangexampleoldrfcpath', + help='Path to the directory where to extract ' + 'the hardcoded YANG module example models from old RFCs (not starting with example-). ' + f'Default is "{ietf_directory}/YANG-example-old-rfc/"', + type=str, + default=f'{ietf_directory}/YANG-example-old-rfc/', + ) + parser.add_argument( + '--draftpathstrict', + help='Path to the directory where to extract the drafts containing the YANG model(s) - ' + 'with xym flag strict=True. ' + f'Default is "{ietf_directory}/draft-with-YANG-strict/"', + type=str, + default=f'{ietf_directory}/draft-with-YANG-strict/', + ) + parser.add_argument( + '--draftpathnostrict', + help='Path to the directory where to extract the drafts containing the YANG model(s) - ' + 'with xym flag strict=False. ' + f'Default is "{ietf_directory}/draft-with-YANG-no-strict/"', + type=str, + default=f'{ietf_directory}/draft-with-YANG-no-strict/', + ) + parser.add_argument( + '--draftpathonlyexample', + help='Path to the directory where to extract the drafts containing examples -' + 'with xym flags strict=False and strict_examples=True. ' + f'Default is "{ietf_directory}/draft-with-YANG-example/"', + type=str, + default=f'{ietf_directory}/draft-with-YANG-example/', + ) + parser.add_argument( + '--rfcyangpath', + help='Path to the directory where to extract the data models extracted from RFCs. ' + f'Default is "{ietf_directory}/YANG-rfc/"', + type=str, + default=f'{ietf_directory}/YANG-rfc/', + ) + parser.add_argument( + '--rfcextractionyangpath', + help='Path to the directory where to extract ' + 'the typedef, grouping, identity from data models extracted from RFCs. ' + f'Default is "{ietf_directory}/YANG-rfc-extraction/"', + type=str, + default=f'{ietf_directory}/YANG-rfc-extraction/', + ) + parser.add_argument( + '--draftelementspath', + help='Path to the directory where to extract ' + 'the typedef, grouping, identity from data models correctely extracted from drafts. ' + f'Default is "{ietf_directory}/draft-elements/"', + type=str, + default=f'{ietf_directory}/draft-elements/', + ) + parser.add_argument('--debug', help='Debug level - default is 0', type=int, default=0) args = parser.parse_args() if args.archived: @@ -132,7 +145,7 @@ def main(): 'all_yang_example_path': args.allyangexamplepath, 'draft_path_only_example': args.draftpathonlyexample, 'all_yang_path': args.allyangpath, - 'draft_path_no_strict': args.draftpathnostrict + 'draft_path_no_strict': args.draftpathnostrict, } # ---------------------------------------------------------------------- @@ -149,14 +162,14 @@ def main(): args.draftpathnostrict, args.draftpathonlyexample, args.rfcextractionyangpath, - args.draftelementspath + args.draftelementspath, ]: remove_directory_content(dir, debug_level) # Extract YANG models from IETF RFCs files rfc_extractor = RFCExtractor(rfc_path, args.rfcyangpath, args.rfcextractionyangpath, debug_level) rfc_extractor.extract() - rfc_extractor.clean_old_RFC_YANG_modules(args.rfcyangpath, args.yangexampleoldrfcpath) + rfc_extractor.clean_old_rfc_yang_modules(args.rfcyangpath, args.yangexampleoldrfcpath) custom_print('Old examples YANG modules moved') custom_print('All IETF RFCs pre-processed') @@ -164,7 +177,8 @@ def main(): draft_extractor = DraftExtractor(draft_extractor_paths, debug_level) draft_extractor.extract() draft_extractor.dump_incorrect_drafts( - public_directory, send_emails_about_problematic_drafts=send_emails_about_problematic_drafts + public_directory, + send_emails_about_problematic_drafts=send_emails_about_problematic_drafts, ) custom_print('All IETF Drafts pre-processed') diff --git a/bin/extractors/draft_extractor.py b/bin/extractors/draft_extractor.py index 9035b51..2752a08 100755 --- a/bin/extractors/draft_extractor.py +++ b/bin/extractors/draft_extractor.py @@ -25,22 +25,21 @@ import typing as t from io import StringIO -from xym import xym - from extract_elem import extract_elem from extractors.helper import check_after_xym_extraction, invert_yang_modules_dict, remove_invalid_files from message_factory.message_factory import MessageFactory +from xym import xym class DraftExtractor: def __init__( - self, - draft_extractor_paths: dict, - debug_level: int, - extract_elements: bool = True, - extract_examples: bool = True, - copy_drafts: bool = True, - message_factory: t.Optional[MessageFactory] = None, + self, + draft_extractor_paths: dict, + debug_level: int, + extract_elements: bool = True, + extract_examples: bool = True, + copy_drafts: bool = True, + message_factory: t.Optional[MessageFactory] = None, ): self.draft_path = draft_extractor_paths.get('draft_path', '') self.yang_path = draft_extractor_paths.get('yang_path', '') @@ -87,7 +86,7 @@ def _create_ietf_drafts_list(self): if '' in line: self.ietf_drafts.append(filename) break - except: + except Exception: continue self.ietf_drafts.sort() print('Drafts list created') @@ -102,7 +101,12 @@ def extract_drafts(self): draft_file_path = os.path.join(self.draft_path, draft_file) # Extract the correctly formatted YANG Models into yang_path - extracted_yang_models = self.extract_from_draft_file(draft_file, self.draft_path, self.yang_path, strict=True) + extracted_yang_models = self.extract_from_draft_file( + draft_file, + self.draft_path, + self.yang_path, + strict=True, + ) if extracted_yang_models: correct = check_after_xym_extraction(draft_file, extracted_yang_models) @@ -124,8 +128,13 @@ def extract_drafts(self): # Extract the correctly formatted example YANG Models into all_yang_example_path if self.extract_examples: - extracted_yang_models = self.extract_from_draft_file(draft_file, self.draft_path, self.all_yang_example_path, - strict=True, strict_examples=True) + extracted_yang_models = self.extract_from_draft_file( + draft_file, + self.draft_path, + self.all_yang_example_path, + strict=True, + strict_examples=True, + ) if extracted_yang_models: correct = check_after_xym_extraction(draft_file, extracted_yang_models) if not correct: @@ -158,7 +167,12 @@ def extract_drafts(self): shutil.copy2(draft_file_path, self.draft_path_no_strict) def extract_from_draft_file( - self, draft_file: str, srcdir: str, dstdir: str, strict: bool = False, strict_examples: bool = False, + self, + draft_file: str, + srcdir: str, + dstdir: str, + strict: bool = False, + strict_examples: bool = False, ): extracted = [] @@ -167,9 +181,17 @@ def extract_from_draft_file( old_stderr = sys.stderr result = StringIO() sys.stderr = result - extracted = xym.xym(draft_file, srcdir, dstdir, strict=strict, strict_examples=strict_examples, - debug_level=self.debug_level, add_line_refs=False, force_revision_pyang=False, - force_revision_regexp=True) + extracted = xym.xym( + draft_file, + srcdir, + dstdir, + strict=strict, + strict_examples=strict_examples, + debug_level=self.debug_level, + add_line_refs=False, + force_revision_pyang=False, + force_revision_regexp=True, + ) result_string = result.getvalue() finally: sys.stderr = old_stderr @@ -191,7 +213,7 @@ def remove_invalid_files(self): remove_invalid_files(self.all_yang_path, self.inverted_draft_yang_all_dict) def extract_all_elements(self, extracted_yang_models: list): - """ Extract typedefs, groupings and identities from data models into .txt files. + """Extract typedefs, groupings and identities from data models into .txt files. These elements are not extracted from example models. """ for extracted_model in extracted_yang_models: @@ -226,7 +248,8 @@ def _send_email_about_new_problematic_drafts(self, old_incorrect_drafts: t.Itera draft_name_without_revision = re.sub(r'-\d+', '', draft_filename.split('.')[0]) author_email = f'{draft_name_without_revision}@ietf.org' self.message_factory.send_problematic_draft( - [author_email], draft_filename, errors_string, - draft_name_without_revision=draft_name_without_revision + [author_email], + draft_filename, + errors_string, + draft_name_without_revision=draft_name_without_revision, ) - diff --git a/bin/extractors/helper.py b/bin/extractors/helper.py index 1a88ce3..ae4f3ce 100755 --- a/bin/extractors/helper.py +++ b/bin/extractors/helper.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -__author__ = "Slavomir Mazur" -__copyright__ = "Copyright The IETF Trust 2021, All Rights Reserved" -__license__ = "Apache License, Version 2.0" -__email__ = "slavomir.mazur@pantheon.tech" +__author__ = 'Slavomir Mazur' +__copyright__ = 'Copyright The IETF Trust 2021, All Rights Reserved' +__license__ = 'Apache License, Version 2.0' +__email__ = 'slavomir.mazur@pantheon.tech' import glob diff --git a/bin/extractors/rfc_extractor.py b/bin/extractors/rfc_extractor.py index f5b5722..05fcd60 100755 --- a/bin/extractors/rfc_extractor.py +++ b/bin/extractors/rfc_extractor.py @@ -12,21 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -__author__ = "Slavomir Mazur" -__copyright__ = "Copyright The IETF Trust 2021, All Rights Reserved" -__license__ = "Apache License, Version 2.0" -__email__ = "slavomir.mazur@pantheon.tech" +__author__ = 'Slavomir Mazur' +__copyright__ = 'Copyright The IETF Trust 2021, All Rights Reserved' +__license__ = 'Apache License, Version 2.0' +__email__ = 'slavomir.mazur@pantheon.tech' import json import os import shutil from extract_elem import extract_elem +from extractors.helper import check_after_xym_extraction, invert_yang_modules_dict, remove_invalid_files from xym import xym -from extractors.helper import (check_after_xym_extraction, - invert_yang_modules_dict, remove_invalid_files) - class RFCExtractor: def __init__(self, rfc_path: str, rfc_yang_path: str, rfc_extraction_yang_path: str, debug_level: int): @@ -73,9 +71,17 @@ def extract_rfcs(self): self.rfc_yang_dict[rfc_file] = extracted_yang_models def extract_from_rfc_file(self, rfc_file: str): - return xym.xym(rfc_file, self.rfc_path, self.rfc_yang_path, strict=True, strict_examples=False, - debug_level=self.debug_level, add_line_refs=False, force_revision_pyang=False, - force_revision_regexp=True) + return xym.xym( + rfc_file, + self.rfc_path, + self.rfc_yang_path, + strict=True, + strict_examples=False, + debug_level=self.debug_level, + add_line_refs=False, + force_revision_pyang=False, + force_revision_regexp=True, + ) def invert_dict(self): self.inverted_rfc_yang_dict = invert_yang_modules_dict(self.rfc_yang_dict, self.debug_level) @@ -83,9 +89,10 @@ def invert_dict(self): def remove_invalid_files(self): remove_invalid_files(self.rfc_yang_path, self.inverted_rfc_yang_dict) - def clean_old_RFC_YANG_modules(self, srcdir: str, dstdir: str): + def clean_old_rfc_yang_modules(self, srcdir: str, dstdir: str): """ - Move some YANG modules, which are documented at http://www.claise.be/IETFYANGOutOfRFCNonStrictToBeCorrected.html: + Move some YANG modules, which are documented at + http://www.claise.be/IETFYANGOutOfRFCNonStrictToBeCorrected.html: ietf-foo@2010-01-18.yang, hw.yang, hardware-entities.yang, udmcore.yang, and ct-ipfix-psamp-example.yang Those YANG modules, from old RFCs, don't follow the example- conventions diff --git a/bin/file_hasher.py b/bin/file_hasher.py index e6b1b53..60990b9 100644 --- a/bin/file_hasher.py +++ b/bin/file_hasher.py @@ -30,7 +30,6 @@ import json import filelock - from create_config import create_config from versions import ValidatorsVersions @@ -48,7 +47,7 @@ def __init__(self, dst_dir: str = '', force_compilation: bool = False): self.updated_hashes = {} def hash_file(self, path: str): - """ Create hash from content of the given file and validators versions. + """Create hash from content of the given file and validators versions. Each time either the content of the file or the validator version change, the resulting hash will be different. @@ -68,7 +67,7 @@ def hash_file(self, path: str): return file_hash.hexdigest() def _load_hashed_files_list(self, dst_dir: str = ''): - """ Load dumped list of files content hashes from .json file. + """Load dumped list of files content hashes from .json file. Several threads can access this file at once, so locking the file while accessing is necessary. """ @@ -86,7 +85,7 @@ def _load_hashed_files_list(self, dst_dir: str = ''): return hashed_files_list def dump_hashed_files_list(self, dst_dir: str = ''): - """ Dumped updated list of files content hashes into .json file. + """Dumped updated list of files content hashes into .json file. Several threads can access this file at once, so locking the file while accessing is necessary. """ @@ -111,14 +110,13 @@ def dump_hashed_files_list(self, dst_dir: str = ''): print('Dictionary of {} hashes successfully dumped into .json file'.format(len(hash_cache))) def _get_versions(self): - """ Return encoded validators versions dictionary. - """ + """Return encoded validators versions dictionary.""" validators_versions = ValidatorsVersions() actual_versions = validators_versions.get_versions() return json.dumps(actual_versions).encode('utf-8') def should_parse(self, path: str): - """ Decide whether module at the given path should be parsed or not. + """Decide whether module at the given path should be parsed or not. Check whether file content hash has changed and keep it for the future use. Argument: diff --git a/bin/files_generator.py b/bin/files_generator.py index ec61df8..f6e0267 100755 --- a/bin/files_generator.py +++ b/bin/files_generator.py @@ -23,7 +23,6 @@ import time import HTML - from utility.utility import dict_to_list, list_br_html_addition from versions import ValidatorsVersions @@ -51,14 +50,19 @@ def write_dictionary(self, dictionary_data: dict, file_name: str): self._custom_print('{} file generated'.format(file_name)) - def generateYANGPageCompilationHTML(self, dictionary_data: dict, headers: list, file_name: str, metadata: str = ''): + def generate_yang_page_compilation_html( + self, + dictionary_data: dict, + headers: list, + file_name: str, + metadata: str = '', + ): """ Create YANGPageCompilation HTML table out of the modules compilation messages and generate a HTML file. Arguments: :param modules_results (list) List of the values to generate the HTML table :param headers (list) Headers list to generate the HTML table - :param HTML_filename (str) Full path to the HTML file which will be created :param metadata (str) Extra metadata text to be inserted in the generated message :return: None """ @@ -67,16 +71,16 @@ def generateYANGPageCompilationHTML(self, dictionary_data: dict, headers: list, message_html = HTML.list([generated_message]) table_html = HTML.table(modules_results, header_row=headers) file_name += 'YANGPageCompilation.html' - HTML_filename = os.path.join(self._htmlpath, file_name) + html_filename = os.path.join(self._htmlpath, file_name) - with open(HTML_filename, 'w', encoding='utf-8') as f: + with open(html_filename, 'w', encoding='utf-8') as f: f.write(message_html) f.write(table_html) - os.chmod(HTML_filename, 0o664) + os.chmod(html_filename, 0o664) self._custom_print('{} HTML page generated in directory {}'.format(file_name, self._htmlpath)) - def generateYANGPageMainHTML(self, file_name: str, stats: dict): + def generate_yang_page_main_html(self, file_name: str, stats: dict): """ Create YANGPageMain HTML with compilation results statistics and generate a HTML file. @@ -87,57 +91,73 @@ def generateYANGPageMainHTML(self, file_name: str, stats: dict): generated_message = 'Generated on {} by the YANG Catalog.'.format(time.strftime('%d/%m/%Y')) content = [ '{} YANG MODELS'.format(file_name), - 'Number of YANG data models from {} that passed compilation: {}/{}'.format(file_name, stats['passed'], stats['total']), + 'Number of YANG data models from {} that passed compilation: {}/{}'.format( + file_name, + stats['passed'], + stats['total'], + ), 'Number of YANG data models from {} that passed compilation with warnings: {}/{}'.format( - file_name, stats['warnings'], stats['total']), - 'Number of YANG data models from {} that failed compilation: {}/{}'.format(file_name, stats['failed'], stats['total']) + file_name, + stats['warnings'], + stats['total'], + ), + 'Number of YANG data models from {} that failed compilation: {}/{}'.format( + file_name, + stats['failed'], + stats['total'], + ), ] message_html = HTML.list([generated_message]) content_html = HTML.list(content) file_name += 'YANGPageMain.html' - HTML_filename = os.path.join(self._htmlpath, file_name) + html_filename = os.path.join(self._htmlpath, file_name) - with open(HTML_filename, 'w', encoding='utf-8') as f: + with open(html_filename, 'w', encoding='utf-8') as f: f.write(message_html) f.write(content_html) - os.chmod(HTML_filename, 0o664) + os.chmod(html_filename, 0o664) self._custom_print('{} HTML page generated in directory {}'.format(file_name, self._htmlpath)) - def generateIETFYANGPageMainHTML(self, drafts_stats: dict): + def generate_ietfyang_page_main_html(self, drafts_stats: dict): """ Create IETFYANGPageMain HTML with compilation results statistics of IETF YANG draft modules and generate a HTML file. Argument: - :param drafts_stats (dict) Dictionary containing number of passed, failed and total number of draft modules + :param drafts_stats (dict) Dictionary containing number of passed, failed and total number of draft modules """ generated_message = 'Generated on {} by the YANG Catalog.'.format(time.strftime('%d/%m/%Y')) - content = ['

IETF YANG MODELS

', - 'Number of correctly extracted YANG models from IETF drafts: {}'.format(drafts_stats.get('total-drafts')), - 'Number of YANG models in IETF drafts that passed compilation: {}/{}'.format( - drafts_stats.get('draft-passed'), - drafts_stats.get('total-drafts')), - 'Number of YANG models in IETF drafts that passed compilation with warnings: {}/{}'.format( - drafts_stats.get('draft-warnings'), - drafts_stats.get('total-drafts')), - 'Number of all YANG models in IETF drafts (examples, badly formatted, etc. ): {}'.format( - drafts_stats.get('all-ietf-drafts')), - 'Number of correctly extracted example YANG models from IETF drafts: {}'.format( - drafts_stats.get('example-drafts')) - ] + content = [ + '

IETF YANG MODELS

', + 'Number of correctly extracted YANG models from IETF drafts: {}'.format(drafts_stats.get('total-drafts')), + 'Number of YANG models in IETF drafts that passed compilation: {}/{}'.format( + drafts_stats.get('draft-passed'), + drafts_stats.get('total-drafts'), + ), + 'Number of YANG models in IETF drafts that passed compilation with warnings: {}/{}'.format( + drafts_stats.get('draft-warnings'), + drafts_stats.get('total-drafts'), + ), + 'Number of all YANG models in IETF drafts (examples, badly formatted, etc. ): {}'.format( + drafts_stats.get('all-ietf-drafts'), + ), + 'Number of correctly extracted example YANG models from IETF drafts: {}'.format( + drafts_stats.get('example-drafts'), + ), + ] message_html = HTML.list([generated_message]) content_html = HTML.list(content) - HTML_filename = os.path.join(self._htmlpath, 'IETFYANGPageMain.html') + html_filename = os.path.join(self._htmlpath, 'IETFYANGPageMain.html') - with open(HTML_filename, 'w', encoding='utf-8') as f: + with open(html_filename, 'w', encoding='utf-8') as f: f.write(message_html) f.write(content_html) - os.chmod(HTML_filename, 0o664) + os.chmod(html_filename, 0o664) self._custom_print('IETFYANGPageMain.html HTML page generated in directory {}'.format(self._htmlpath)) - def generateHTMLTable(self, dictionary_data: dict, headers: list): + def generate_html_table(self, dictionary_data: dict, headers: list): """ Create IETFYANGRFC HTML with links to RFC documents. @@ -150,18 +170,18 @@ def generateHTMLTable(self, dictionary_data: dict, headers: list): htmlcode = HTML.list([generated_message]) htmlcode1 = HTML.table(rfcs_list, header_row=headers) - HTML_filename = os.path.join(self._htmlpath, 'IETFYANGRFC.html') - with open(HTML_filename, 'w', encoding='utf-8') as f: + html_filename = os.path.join(self._htmlpath, 'IETFYANGRFC.html') + with open(html_filename, 'w', encoding='utf-8') as f: f.write(htmlcode) f.write(htmlcode1) - os.chmod(HTML_filename, 0o664) - self._custom_print('{} HTML page generated in directory {}'.format(HTML_filename, self._htmlpath)) + os.chmod(html_filename, 0o664) + self._custom_print('{} HTML page generated in directory {}'.format(html_filename, self._htmlpath)) # # HEADERS # - def getYANGPageCompilationHeaders(self, lint: bool): + def get_yang_page_compilation_headers(self, lint: bool): """ Create headers for YANGPageCompilation HTML table. """ @@ -170,55 +190,81 @@ def getYANGPageCompilationHeaders(self, lint: bool): else: pyang_flag = '--ietf' - return ['YANG Model', - 'Compilation', - 'Compilation Results (pyang {}). {}'.format(pyang_flag, self._versions.get('pyang_version')), - 'Compilation Results (pyang). {} {}'.format(self.__imported_note, self._versions.get('pyang_version')), - 'Compilation Results (confdc). {} {}'.format(self.__imported_note, self._versions.get('confd_version')), - 'Compilation Results (yangdump-pro). {} {}'.format(self.__imported_note, self._versions.get('yangdump_version')), - 'Compilation Results (yanglint -i). {} {}'.format(self.__imported_note, self._versions.get('yanglint_version'))] + return [ + 'YANG Model', + 'Compilation', + 'Compilation Results (pyang {}). {}'.format(pyang_flag, self._versions.get('pyang_version')), + 'Compilation Results (pyang). {} {}'.format(self.__imported_note, self._versions.get('pyang_version')), + 'Compilation Results (confdc). {} {}'.format(self.__imported_note, self._versions.get('confd_version')), + 'Compilation Results (yangdump-pro). {} {}'.format( + self.__imported_note, + self._versions.get('yangdump_version'), + ), + 'Compilation Results (yanglint -i). {} {}'.format( + self.__imported_note, + self._versions.get('yanglint_version'), + ), + ] - def getIETFDraftYANGPageCompilationHeaders(self): + def get_ietf_draft_yang_page_compilation_headers(self): """ Create headers for IETFDraftYANGPageCompilation HTML table. """ - return ['YANG Model', - 'Draft Name', - 'Email', - 'Download the YANG model', - 'Compilation', - 'Compilation Results (pyang --ietf). {}'.format(self._versions.get('pyang_version')), - 'Compilation Results (pyang). {} {}'.format(self.__imported_note, self._versions.get('pyang_version')), - 'Compilation Results (confdc). {} {}'.format(self.__imported_note, self._versions.get('confd_version')), - 'Compilation Results (yangdump-pro). {} {}'.format(self.__imported_note, self._versions.get('yangdump_version')), - 'Compilation Results (yanglint -i). {} {}'.format(self.__imported_note, self._versions.get('yanglint_version'))] + return [ + 'YANG Model', + 'Draft Name', + 'Email', + 'Download the YANG model', + 'Compilation', + 'Compilation Results (pyang --ietf). {}'.format(self._versions.get('pyang_version')), + 'Compilation Results (pyang). {} {}'.format(self.__imported_note, self._versions.get('pyang_version')), + 'Compilation Results (confdc). {} {}'.format(self.__imported_note, self._versions.get('confd_version')), + 'Compilation Results (yangdump-pro). {} {}'.format( + self.__imported_note, + self._versions.get('yangdump_version'), + ), + 'Compilation Results (yanglint -i). {} {}'.format( + self.__imported_note, + self._versions.get('yanglint_version'), + ), + ] - def getIETFDraftExampleYANGPageCompilationHeaders(self): + def get_ietf_draft_example_yang_page_compilation_headers(self): """ Create headers for IETFDraftExampleYANGPageCompilation HTML table. """ - return ['YANG Model', - 'Draft Name', - 'Email', - 'Compilation', - 'Compilation Results (pyang --ietf). {}'.format(self._versions.get('pyang_version')), - 'Compilation Results (pyang). {} {}'.format(self.__imported_note, self._versions.get('pyang_version'))] + return [ + 'YANG Model', + 'Draft Name', + 'Email', + 'Compilation', + 'Compilation Results (pyang --ietf). {}'.format(self._versions.get('pyang_version')), + 'Compilation Results (pyang). {} {}'.format(self.__imported_note, self._versions.get('pyang_version')), + ] - def getIETFCiscoAuthorsYANGPageCompilationHeaders(self): + def get_ietf_cisco_authors_yang_page_compilation_headers(self): """ Create headers for IETFCiscoAuthorsYANGPageCompilation HTML table. """ - return ['YANG Model', - 'Draft Name', - 'All Authors Email', - 'Only Cisco Email', - 'Download the YANG model', - 'Compilation', - 'Compilation Results (pyang --ietf). {}'.format(self._versions.get('pyang_version')), - 'Compilation Results (pyang). {} {}'.format(self.__imported_note, self._versions.get('pyang_version')), - 'Compilation Results (confdc). {} {}'.format(self.__imported_note, self._versions.get('confd_version')), - 'Compilation Results (yangdump-pro). {} {}'.format(self.__imported_note, self._versions.get('yangdump_version')), - 'Compilation Results (yanglint -i). {} {}'.format(self.__imported_note, self._versions.get('yanglint_version'))] + return [ + 'YANG Model', + 'Draft Name', + 'All Authors Email', + 'Only Cisco Email', + 'Download the YANG model', + 'Compilation', + 'Compilation Results (pyang --ietf). {}'.format(self._versions.get('pyang_version')), + 'Compilation Results (pyang). {} {}'.format(self.__imported_note, self._versions.get('pyang_version')), + 'Compilation Results (confdc). {} {}'.format(self.__imported_note, self._versions.get('confd_version')), + 'Compilation Results (yangdump-pro). {} {}'.format( + self.__imported_note, + self._versions.get('yangdump_version'), + ), + 'Compilation Results (yanglint -i). {} {}'.format( + self.__imported_note, + self._versions.get('yanglint_version'), + ), + ] # # HELPERS diff --git a/bin/gather_ietf_dependent_modules.py b/bin/gather_ietf_dependent_modules.py index 8512111..a5b9ba7 100644 --- a/bin/gather_ietf_dependent_modules.py +++ b/bin/gather_ietf_dependent_modules.py @@ -22,14 +22,13 @@ from typing import Set import requests - from create_config import create_config ORGANIZATIONS = ['ieee', 'ietf'] def copy_modules(api_prefix: str, src_dir: str, dst_dir: str) -> Set[str]: - """ Get the list of ietf modules from API + """Get the list of ietf modules from API and copy them from 'src_dir' to 'dst_dir' directory. Arguments: diff --git a/bin/get_config.py b/bin/get_config.py index 1245c15..41f9dd4 100755 --- a/bin/get_config.py +++ b/bin/get_config.py @@ -13,9 +13,9 @@ # either express or implied. __author__ = 'Eric Vyncke' -__copyright__ = "Copyright(c) 2018, Cisco Systems, Inc., Copyright The IETF Trust 2019, All Rights Reserved" -__license__ = "Apache V2.0" -__email__ = "evyncke@cisco.com" +__copyright__ = 'Copyright(c) 2018, Cisco Systems, Inc., Copyright The IETF Trust 2019, All Rights Reserved' +__license__ = 'Apache V2.0' +__email__ = 'evyncke@cisco.com' """ Extract a single value out of the main /etc/yangcatalog/yangcatalog.conf file @@ -27,21 +27,17 @@ if __name__ == '__main__': parser = argparse.ArgumentParser(description='Extract the value for a single key from a configuration file') - parser.add_argument('--config', - help='Path to the config file ' - 'Default is {}'.format(os.environ['YANGCATALOG_CONFIG_PATH']), - type=str, - default=os.environ['YANGCATALOG_CONFIG_PATH']) - parser.add_argument('--section', - help='Mandatory configuration section.', - type=str) - parser.add_argument('--key', - help='Mandatory key to search.', - type=str) + parser.add_argument( + '--config', + help='Path to the config file ' 'Default is {}'.format(os.environ['YANGCATALOG_CONFIG_PATH']), + type=str, + default=os.environ['YANGCATALOG_CONFIG_PATH'], + ) + parser.add_argument('--section', help='Mandatory configuration section.', type=str) + parser.add_argument('--key', help='Mandatory key to search.', type=str) args = parser.parse_args() config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation()) config.read(args.config) print(config.get(args.section, args.key)) - diff --git a/bin/job_log.py b/bin/job_log.py index 3301e86..ec2f1eb 100644 --- a/bin/job_log.py +++ b/bin/job_log.py @@ -22,8 +22,15 @@ from create_config import create_config -def job_log(start_time: int, end_time: int, temp_dir: str, filename: str, - messages: list = [], error: str = '', status: str = ''): +def job_log( + start_time: int, + end_time: int, + temp_dir: str, + filename: str, + messages: list = [], + error: str = '', + status: str = '', +): result = {} result['start'] = start_time result['end'] = end_time @@ -46,7 +53,7 @@ def job_log(start_time: int, end_time: int, temp_dir: str, filename: str, try: previous_state = file_content[filename] last_successfull = previous_state.get('last_successfull') - except: + except KeyError: last_successfull = None result['last_successfull'] = last_successfull @@ -60,26 +67,10 @@ def job_log(start_time: int, end_time: int, temp_dir: str, filename: str, config = create_config() temp_dir = config.get('Directory-Section', 'temp') parser = argparse.ArgumentParser() - parser.add_argument('--start', - help='Cronjob start time', - type=int, - default=0, - required=True) - parser.add_argument('--end', - help='Cronjob end time', - type=int, - default=0, - required=True) - parser.add_argument('--status', - help='Result of cronjob run', - type=str, - default='Fail', - required=True) - parser.add_argument('--filename', - help='Name of job', - type=str, - default='', - required=True) + parser.add_argument('--start', help='Cronjob start time', type=int, default=0, required=True) + parser.add_argument('--end', help='Cronjob end time', type=int, default=0, required=True) + parser.add_argument('--status', help='Result of cronjob run', type=str, default='Fail', required=True) + parser.add_argument('--filename', help='Name of job', type=str, default='', required=True) args = parser.parse_args() job_log(int(args.start), int(args.end), temp_dir, args.filename, status=args.status) diff --git a/bin/message_factory/message_factory.py b/bin/message_factory/message_factory.py index bb62484..2a2bf29 100644 --- a/bin/message_factory/message_factory.py +++ b/bin/message_factory/message_factory.py @@ -33,10 +33,10 @@ class MessageFactory: """This class serves to automatically email a group of admin/developers.""" def __init__( - self, - config_path=os.environ['YANGCATALOG_CONFIG_PATH'], - close_connection_after_message_sending: bool = True, - redis_user_notifications_connection: t.Optional[RedisUserNotificationsConnection] = None, + self, + config_path=os.environ['YANGCATALOG_CONFIG_PATH'], + close_connection_after_message_sending: bool = True, + redis_user_notifications_connection: t.Optional[RedisUserNotificationsConnection] = None, ): config = create_config(config_path) self._email_from = config.get('Message-Section', 'email-from') @@ -50,7 +50,7 @@ def __init__( self._smtp = smtplib.SMTP('localhost') self._close_connection_after_message_sending = close_connection_after_message_sending self._redis_user_notifications_connection = ( - redis_user_notifications_connection or RedisUserNotificationsConnection(config=config) + redis_user_notifications_connection or RedisUserNotificationsConnection(config=config) ) def __del__(self): @@ -60,7 +60,8 @@ def __del__(self): def send_missing_modules(self, modules_list: list, incorrect_revision_modules: list): message = 'Following modules extracted from drafts are missing in YANG Catalog:\n' path = os.path.join( - self._temp_dir, 'drafts-missing-modules/yangmodels/yang/experimental/ietf-extracted-YANG-modules', + self._temp_dir, + 'drafts-missing-modules/yangmodels/yang/experimental/ietf-extracted-YANG-modules', ) for module in modules_list: message += f'{module}\n' @@ -74,22 +75,23 @@ def send_missing_modules(self, modules_list: list, incorrect_revision_modules: l self._post_to_email(message, self._developers_email) def send_problematic_draft( - self, - email_to: list[str], - draft_filename: str, - errors: str, - draft_name_without_revision: t.Optional[str] = None + self, + email_to: list[str], + draft_filename: str, + errors: str, + draft_name_without_revision: t.Optional[str] = None, ): subject = f'{GREETINGS}, "{draft_filename}" had errors during an extraction' errors = errors.replace('\n', '
') message = f'During a daily check of IETF drafts, some errors were found in "{draft_filename}":

{errors}' draft_filename_without_format = draft_filename.split('.')[0] draft_name_without_revision = ( - draft_name_without_revision if draft_name_without_revision else - re.sub(r'-\d+', '', draft_filename_without_format) + draft_name_without_revision + if draft_name_without_revision + else re.sub(r'-\d+', '', draft_filename_without_format) ) unsubscribed_emails = self._redis_user_notifications_connection.get_unsubscribed_emails( - draft_name_without_revision + draft_name_without_revision, ) email_to = [email for email in email_to if email not in unsubscribed_emails] message_subtype = 'html' @@ -106,17 +108,17 @@ def send_problematic_draft( self._post_to_email(message, email_to=[email], subject=subject, subtype=message_subtype) def _post_to_email( - self, - message: str, - email_to: t.Optional[list] = None, - subject: t.Optional[str] = None, - subtype: str = 'plain', + self, + message: str, + email_to: t.Optional[list] = None, + subject: t.Optional[str] = None, + subtype: str = 'plain', ): """Send message to the list of e-mails. - Arguments: - :param message (str) message to send - :param email_to (list) list of emails to send the message to + Arguments: + :param message (str) message to send + :param email_to (list) list of emails to send the message to """ send_to = email_to or self._email_to newline_character = '
' if subtype == 'html' else '\n' diff --git a/bin/metadata_generators/__init__.py b/bin/metadata_generators/__init__.py index 5039380..325ad61 100644 --- a/bin/metadata_generators/__init__.py +++ b/bin/metadata_generators/__init__.py @@ -15,4 +15,4 @@ __author__ = 'Richard Zilincik' __copyright__ = 'Copyright The IETF Trust 2022, All Rights Reserved' __license__ = 'Apache License, Version 2.0' -__email__ = 'richard.zilincik@pantheon.tech' \ No newline at end of file +__email__ = 'richard.zilincik@pantheon.tech' diff --git a/bin/metadata_generators/base_metadata_generator.py b/bin/metadata_generators/base_metadata_generator.py index 8e89fd4..6044c67 100644 --- a/bin/metadata_generators/base_metadata_generator.py +++ b/bin/metadata_generators/base_metadata_generator.py @@ -2,8 +2,7 @@ class BaseMetadataGenerator: - - def __init__(self, compilation_results: dict, compilation_status:str, yang_file: str, document_dict: dict): + def __init__(self, compilation_results: dict, compilation_status: str, yang_file: str, document_dict: dict): self.compilation_results = compilation_results self.compilation_status = compilation_status self.yang_file_name = os.path.basename(yang_file) @@ -13,4 +12,4 @@ def get_confd_metadata(self): return {'compilation-status': self.compilation_status} def get_file_compilation(self): - return [self.compilation_status, *[result for result in self.compilation_results.values()]] + return [self.compilation_status, *list(self.compilation_results.values())] diff --git a/bin/metadata_generators/draft_metadata_generator.py b/bin/metadata_generators/draft_metadata_generator.py index 106a6d4..5beafad 100644 --- a/bin/metadata_generators/draft_metadata_generator.py +++ b/bin/metadata_generators/draft_metadata_generator.py @@ -4,7 +4,6 @@ from extract_emails import extract_email_string from metadata_generators.base_metadata_generator import BaseMetadataGenerator - config = create_config() ietf_directory = config.get('Directory-Section', 'ietf-directory') @@ -14,7 +13,7 @@ class DraftMetadataGenerator(BaseMetadataGenerator): draft_path = config.get('Directory-Section', 'ietf-drafts') web_url = config.get('Web-Section', 'my-uri') - def __init__(self, compilation_results: dict, compilation_status:str, yang_file: str, document_dict: dict): + def __init__(self, compilation_results: dict, compilation_status: str, yang_file: str, document_dict: dict): super().__init__(compilation_results, compilation_status, yang_file, document_dict) self.document_name = self.document_dict[self.yang_file_name] draft_name = self.document_name.split('.')[0] @@ -25,13 +24,12 @@ def __init__(self, compilation_results: dict, compilation_status:str, yang_file: self.draft_url_anchor = '{}'.format(self.datatracker_url, self.document_name) self.email_anchor = 'Email Authors'.format(self.mailto) - def get_confd_metadata(self): return { 'compilation-status': self.compilation_status, 'reference': self.datatracker_url, 'document-name': self.document_name, - 'author-email': self.mailto + 'author-email': self.mailto, } def get_file_compilation(self): @@ -43,8 +41,14 @@ def get_file_compilation(self): cisco_email_anchor = 'Email Cisco Authors Only'.format(draft_emails) yang_model_url = '{}/YANG-modules/{}'.format(self.web_url, self.yang_file_name) yang_model_anchor = 'Download the YANG model'.format(yang_model_url) - return [self.draft_url_anchor, self.email_anchor, cisco_email_anchor, yang_model_anchor, self.compilation_status, - *[result for result in self.compilation_results.values()]] + return [ + self.draft_url_anchor, + self.email_anchor, + cisco_email_anchor, + yang_model_anchor, + self.compilation_status, + *list(self.compilation_results.values()), + ] class ArchivedMetadataGenerator(DraftMetadataGenerator): diff --git a/bin/metadata_generators/example_metadata_generator.py b/bin/metadata_generators/example_metadata_generator.py index 78d4ca4..9816924 100644 --- a/bin/metadata_generators/example_metadata_generator.py +++ b/bin/metadata_generators/example_metadata_generator.py @@ -2,10 +2,13 @@ class ExampleMetadataGenerator(DraftMetadataGenerator): - def get_confd_metadata(self): return {} def get_file_compilation(self): - return [self.draft_url_anchor, self.email_anchor, self.compilation_status, - *[result for result in self.compilation_results.values()]] + return [ + self.draft_url_anchor, + self.email_anchor, + self.compilation_status, + *list(self.compilation_results.values()), + ] diff --git a/bin/metadata_generators/rfc_metadata_generator.py b/bin/metadata_generators/rfc_metadata_generator.py index 1f0bdd7..e81ee2f 100644 --- a/bin/metadata_generators/rfc_metadata_generator.py +++ b/bin/metadata_generators/rfc_metadata_generator.py @@ -2,7 +2,6 @@ class RfcMetadataGenerator(BaseMetadataGenerator): - def get_confd_metadata(self): document_name = self.document_dict[self.yang_file_name] rfc_name = document_name.split('.')[0] @@ -11,5 +10,5 @@ def get_confd_metadata(self): 'compilation-status': self.compilation_status, 'reference': datatracker_url, 'document-name': document_name, - 'author-email': None + 'author-email': None, } diff --git a/bin/parsers/confdc_parser.py b/bin/parsers/confdc_parser.py index fc6d46c..512efb1 100755 --- a/bin/parsers/confdc_parser.py +++ b/bin/parsers/confdc_parser.py @@ -31,7 +31,9 @@ def __init__(self, debug_level: int = 0, config: ConfigParser = create_config()) self._debug_level = debug_level self._symlink_paths = self.get_symlink_paths() - self._tail_warning = '-w TAILF_MUST_NEED_DEPENDENCY' # Treat ErrorCode as a warning, even if --fail-onwarnings is given + self._tail_warning = ( + '-w TAILF_MUST_NEED_DEPENDENCY' # Treat ErrorCode as a warning, even if --fail-onwarnings is given + ) def run_confdc(self, yang_file_path: str, rootdir: str, allinclusive: bool = False): """ diff --git a/bin/parsers/pyang_parser.py b/bin/parsers/pyang_parser.py index 9a09572..fd240e5 100755 --- a/bin/parsers/pyang_parser.py +++ b/bin/parsers/pyang_parser.py @@ -34,7 +34,12 @@ def __init__(self, debug_level: int = 0, config: ConfigParser = create_config()) ] def run_pyang( - self, rootdir: str, yang_file_path: str, lint: bool, allinclusive: bool, use_pyang_params: bool = True + self, + rootdir: str, + yang_file_path: str, + lint: bool, + allinclusive: bool, + use_pyang_params: bool = True, ) -> str: """ Run PYANG parser on the YANG model, with or without the --lint flag. diff --git a/bin/parsers/yang_parser.py b/bin/parsers/yang_parser.py index 2c9a413..987c504 100644 --- a/bin/parsers/yang_parser.py +++ b/bin/parsers/yang_parser.py @@ -24,14 +24,13 @@ import os import typing as t +from create_config import create_config from pyang.context import Context from pyang.error import error_codes from pyang.repository import FileRepository from pyang.statements import Statement from pyang.yang_parser import YangParser -from create_config import create_config - DEFAULT_OPTIONS = { 'path': [], 'deviations': [], @@ -64,7 +63,7 @@ """copy options to pyang context options""" -class objectify(object): # pylint: disable=invalid-name +class Objectify(object): # pylint: disable=invalid-name """Utility for providing object access syntax (.attr) to dicts""" features: list @@ -83,7 +82,7 @@ def __setattr__(self, attr, value): class OptsContext(Context): - opts: objectify + opts: Objectify def _parse_features_string(feature_str: str) -> t.Tuple[str, t.List[str]]: @@ -146,7 +145,7 @@ def create_context(path: str = '.') -> OptsContext: """ # deviations (list): Deviation module (NOT CURRENTLY WORKING). - opts = objectify(DEFAULT_OPTIONS) + opts = Objectify(DEFAULT_OPTIONS) repo = FileRepository(path, no_path_recurse=opts.no_path_recurse) ctx = OptsContext(repo) @@ -171,7 +170,6 @@ def create_context(path: str = '.') -> OptsContext: class ParseException(Exception): - def __init__(self, path: t.Optional[str]): if path is not None: config = create_config() diff --git a/bin/parsers/yangdump_pro_parser.py b/bin/parsers/yangdump_pro_parser.py index a816c22..e17c462 100755 --- a/bin/parsers/yangdump_pro_parser.py +++ b/bin/parsers/yangdump_pro_parser.py @@ -21,7 +21,7 @@ def _remove_duplicate_messages(result: str, module_name: str) -> str: - """ Same result messages are often found in the compilation result multiple times. + """Same result messages are often found in the compilation result multiple times. This method filter out duplicate messages. """ splitted_result = result.split('\n\n') @@ -29,7 +29,7 @@ def _remove_duplicate_messages(result: str, module_name: str) -> str: # NOTE - WORKAROUND: remove 'iana-if-type@2021-06-21.yang:128.3: warning(1054): Revision date has already been used' # from most compilation results - # This can be removed in the future with the release of 'iana-if-type' revision + # This can be removed in the future with the release of 'iana-if-type' revision # that will PASS the compilation. final_result = [] for result in unique_results_list: @@ -66,7 +66,7 @@ def run_yumadumppro(self, yang_file_path: str, workdir: str, allinclusive: bool if self._debug_level > 0: print('DEBUG: running command {}'.format(' '.join(bash_command))) - # Modify command output + # Modify command output try: result_yumadump = os.popen(' '.join(bash_command)).read() result_yumadump = result_yumadump.strip() diff --git a/bin/parsers/yanglint_parser.py b/bin/parsers/yanglint_parser.py index 3df1a90..ba60721 100755 --- a/bin/parsers/yanglint_parser.py +++ b/bin/parsers/yanglint_parser.py @@ -24,7 +24,7 @@ def _remove_duplicate_messages(result: str) -> str: - """ Same result messages are often found in the compilation result multiple times. + """Same result messages are often found in the compilation result multiple times. This method filter out duplicate messages. """ splitted_result = result.split('\n\n') diff --git a/bin/private_page.py b/bin/private_page.py index a607572..016976b 100755 --- a/bin/private_page.py +++ b/bin/private_page.py @@ -22,7 +22,6 @@ import typing as t import jinja2 - from create_config import create_config @@ -30,10 +29,12 @@ def alnum(s: str): return ''.join(c for c in s if c.isalnum()) -def get_vendor_context(directory: str, - get_alphaNumeric: t.Callable[[str, str], str], - get_allCharacters: t.Callable[[str, str], str], - separate: bool = False): +def get_vendor_context( + directory: str, + get_alpha_numeric: t.Callable[[str, str], str], + get_all_characters: t.Callable[[str, str], str], + separate: bool = False, +): operating_systems = [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))] separate_contexts = {} vendor_context = [] @@ -41,12 +42,16 @@ def get_vendor_context(directory: str, os_dir = os.path.join(directory, operating_system) os_specific_dirs = [name for name in os.listdir(os_dir) if os.path.isdir(os.path.join(os_dir, name))] for os_specific_dir in os_specific_dirs: - vendor_context.append({'alphaNumeric': get_alphaNumeric(operating_system, os_specific_dir), - 'allCharacters': get_allCharacters(operating_system, os_specific_dir)}) + vendor_context.append( + { + 'alphaNumeric': get_alpha_numeric(operating_system, os_specific_dir), + 'allCharacters': get_all_characters(operating_system, os_specific_dir), + }, + ) if separate: separate_contexts[operating_system.upper()] = sorted(vendor_context, key=lambda i: i['alphaNumeric']) vendor_context.clear() - + if separate: return separate_contexts return sorted(vendor_context, key=lambda i: i['alphaNumeric']) @@ -57,14 +62,18 @@ def get_etsi_context(etsi_dir): etsi_context = [] for etsi_version in etsi_all_versions: etsi_context.append( - {'alphaNumeric': alnum(etsi_version.strip('NFV-SOL006-v')), - 'allCharacters': etsi_version.strip('NFV-SOL006-v')}) + { + 'alphaNumeric': alnum(etsi_version.strip('NFV-SOL006-v')), + 'allCharacters': etsi_version.strip('NFV-SOL006-v'), + }, + ) return sorted(etsi_context, key=lambda i: i['alphaNumeric']) def get_openroadm_context(openroadm_files): - return [{'alphaNumeric': specific_version, 'allCharacters': specific_version} - for specific_version in openroadm_files] + return [ + {'alphaNumeric': specific_version, 'allCharacters': specific_version} for specific_version in openroadm_files + ] def render(tpl_path: str, context: dict): @@ -77,18 +86,12 @@ def render(tpl_path: str, context: dict): """ path, filename = os.path.split(tpl_path) - return jinja2.Environment( - loader=jinja2.FileSystemLoader(path or './') - ).get_template(filename).render(context) + return jinja2.Environment(loader=jinja2.FileSystemLoader(path or './')).get_template(filename).render(context) def main(): parser = argparse.ArgumentParser(description='Generate yangcatalog main private page.') - parser.add_argument('--openRoadM', - help='List of openRoadM files', - type=str, - nargs='*', - default=[]) + parser.add_argument('--openRoadM', help='List of openRoadM files', type=str, nargs='*', default=[]) args = parser.parse_args() config = create_config() @@ -108,32 +111,32 @@ def main(): cisco_dir, lambda _, os_specific_dir: alnum(os_specific_dir), lambda _, os_specific_dir: os_specific_dir, - separate=True + separate=True, ) context.update(cisco_contexts) context['juniper'] = get_vendor_context( juniper_dir, lambda _, os_specific_dir: alnum(os_specific_dir), - lambda _, os_specific_dir: os_specific_dir + lambda _, os_specific_dir: os_specific_dir, ) context['huawei'] = get_vendor_context( huawei_dir, lambda os_name, os_specific_dir: alnum('{}{}'.format(os_name, os_specific_dir)), - lambda os_name, os_specific_dir: '{} {}'.format(os_name, os_specific_dir) + lambda os_name, os_specific_dir: '{} {}'.format(os_name, os_specific_dir), ) context['fujitsu'] = get_vendor_context( fujitsu_dir, lambda os_name, os_specific_dir: alnum('{}{}'.format(os_name, os_specific_dir)), - lambda os_name, os_specific_dir: '{}{}'.format(os_name, os_specific_dir) + lambda os_name, os_specific_dir: '{}{}'.format(os_name, os_specific_dir), ) context['nokia'] = get_vendor_context( nokia_dir, lambda _, os_specific_dir: alnum(os_specific_dir.strip('latest_sros_')), - lambda _, os_specific_dir: os_specific_dir.strip('latest_sros_') + lambda _, os_specific_dir: os_specific_dir.strip('latest_sros_'), ) context['etsi'] = get_etsi_context(etsi_dir) @@ -146,17 +149,35 @@ def main(): with open(os.path.join(private_dir, 'index.html'), 'w') as writer: writer.write(result) with open(os.path.join(private_dir, 'private.json'), 'w') as writer: - context['graphs-cisco-authors'] = ['IETFCiscoAuthorsYANGPageCompilation.json', - 'figures/IETFCiscoAuthorsYANGPageCompilation.png', - 'figures/IETFYANGOutOfRFC.png', - 'figures/IETFYANGPageCompilation.png'] - context['sdo-stats'] = ['IETFDraft.json', 'IETFDraftExample.json', 'IETFYANGRFC.json', 'RFCStandard.json', - 'BBF.json', 'MEFStandard.json', 'MEFExperimental.json', 'IEEEStandard.json', - 'IEEEStandardDraft.json', 'IANAStandard.json', 'SysrepoInternal.json', - 'SysrepoApplication.json', 'ONFOpenTransport.json', 'Openconfig.json'] - context['dependency-graph'] = ['figures/modules-ietf.png', 'figures/modules-all.png', - 'figures/ietf-interfaces.png', 'figures/ietf-interfaces-all.png', - 'figures/ietf-routing.png'] + context['graphs-cisco-authors'] = [ + 'IETFCiscoAuthorsYANGPageCompilation.json', + 'figures/IETFCiscoAuthorsYANGPageCompilation.png', + 'figures/IETFYANGOutOfRFC.png', + 'figures/IETFYANGPageCompilation.png', + ] + context['sdo-stats'] = [ + 'IETFDraft.json', + 'IETFDraftExample.json', + 'IETFYANGRFC.json', + 'RFCStandard.json', + 'BBF.json', + 'MEFStandard.json', + 'MEFExperimental.json', + 'IEEEStandard.json', + 'IEEEStandardDraft.json', + 'IANAStandard.json', + 'SysrepoInternal.json', + 'SysrepoApplication.json', + 'ONFOpenTransport.json', + 'Openconfig.json', + ] + context['dependency-graph'] = [ + 'figures/modules-ietf.png', + 'figures/modules-all.png', + 'figures/ietf-interfaces.png', + 'figures/ietf-interfaces-all.png', + 'figures/ietf-routing.png', + ] json.dump(context, writer) diff --git a/bin/redis_connections/redis_connection.py b/bin/redis_connections/redis_connection.py index 85f9472..43c79b4 100644 --- a/bin/redis_connections/redis_connection.py +++ b/bin/redis_connections/redis_connection.py @@ -25,7 +25,6 @@ class RedisConnection: - def __init__(self, modules_db: int = 1): config = create_config() self._redis_host = config.get('DB-Section', 'redis-host') diff --git a/bin/redis_connections/redis_user_notifications_connection.py b/bin/redis_connections/redis_user_notifications_connection.py index 7c11d58..57b6a05 100644 --- a/bin/redis_connections/redis_user_notifications_connection.py +++ b/bin/redis_connections/redis_user_notifications_connection.py @@ -1,9 +1,8 @@ import typing as t from configparser import ConfigParser -from redis import Redis - from create_config import create_config +from redis import Redis class RedisUserNotificationsConnection: diff --git a/bin/remove_directory_content.py b/bin/remove_directory_content.py index 877593b..0c9df5f 100755 --- a/bin/remove_directory_content.py +++ b/bin/remove_directory_content.py @@ -50,14 +50,8 @@ def remove_directory_content(directory: str, debug_level: int = 0) -> None: if __name__ == '__main__': parser = argparse.ArgumentParser(description='Remove directory content') - parser.add_argument('--dir', - help='Directory the content of which to remove', - type=str, - default='') - parser.add_argument('--debug', - help='Debug level - default is 0', - type=int, - default=0) + parser.add_argument('--dir', help='Directory the content of which to remove', type=str, default='') + parser.add_argument('--debug', help='Debug level - default is 0', type=int, default=0) args = parser.parse_args() if args.debug > 0: diff --git a/bin/rename_file_backup.py b/bin/rename_file_backup.py index 48f11f9..ceca3d1 100755 --- a/bin/rename_file_backup.py +++ b/bin/rename_file_backup.py @@ -27,7 +27,7 @@ def rename_file_backup(src_dir: str, backup_dir: str, debug_level: int = 0) -> None: - """ Backup each of the files by renaming them with the current timestamp appended to the file name. + """Backup each of the files by renaming them with the current timestamp appended to the file name. Arguments: :param src_dir (str) Directory where the files to backup are stored @@ -44,11 +44,18 @@ def rename_file_backup(src_dir: str, backup_dir: str, debug_level: int = 0) -> N print('Unable to create directory: {}'.format(backup_dir)) return - files_to_backup = ['IETFYANGPageMain.html', 'IETFCiscoAuthorsYANGPageCompilation.html', - 'IETFYANGOutOfRFC.html', 'IETFDraftYANGPageCompilation.html', - 'IEEEStandardYANGPageCompilation.html', 'IEEEStandardDraftYANGPageCompilation.html', - 'IANAStandardYANGPageCompilation.html', 'IEEEExperimentalYANGPageCompilation.html', - 'YANGPageMain.html', 'IETFYANGRFC.html'] + files_to_backup = [ + 'IETFYANGPageMain.html', + 'IETFCiscoAuthorsYANGPageCompilation.html', + 'IETFYANGOutOfRFC.html', + 'IETFDraftYANGPageCompilation.html', + 'IEEEStandardYANGPageCompilation.html', + 'IEEEStandardDraftYANGPageCompilation.html', + 'IANAStandardYANGPageCompilation.html', + 'IEEEExperimentalYANGPageCompilation.html', + 'YANGPageMain.html', + 'IETFYANGRFC.html', + ] for filename in files_to_backup: name, extension = filename.split('.') full_path_file = os.path.join(src_dir, filename) @@ -56,7 +63,7 @@ def rename_file_backup(src_dir: str, backup_dir: str, debug_level: int = 0) -> N print('*** file {} not present!'.format(full_path_file)) continue modified_time = os.path.getmtime(full_path_file) - timestamp = (datetime.fromtimestamp(modified_time).strftime("%Y_%m_%d")) + timestamp = datetime.fromtimestamp(modified_time).strftime('%Y_%m_%d') if name == 'IETFYANGRFC': name = 'IETFYANGOutOfRFC' new_filename = '{}_{}.{}'.format(name, timestamp, extension) @@ -73,18 +80,14 @@ def rename_file_backup(src_dir: str, backup_dir: str, debug_level: int = 0) -> N web_private = config.get('Web-Section', 'private-directory') backup_directory = config.get('Directory-Section', 'backup') parser = argparse.ArgumentParser(description='Append creation timestamps to filenames') - parser.add_argument('--srcdir', - help='Directory the content of which to remove', - type=str, - default=web_private) - parser.add_argument('--backupdir', - help='Directory the content of which to remove', - type=str, - default=backup_directory) - parser.add_argument('--debug', - help='Debug level - default is 0', - type=int, - default=0) + parser.add_argument('--srcdir', help='Directory the content of which to remove', type=str, default=web_private) + parser.add_argument( + '--backupdir', + help='Directory the content of which to remove', + type=str, + default=backup_directory, + ) + parser.add_argument('--debug', help='Debug level - default is 0', type=int, default=0) args = parser.parse_args() rename_file_backup(args.srcdir, args.backupdir, args.debug) diff --git a/bin/resources/HTML/HTML.py b/bin/resources/HTML/HTML.py index 8dcec9c..2f2717a 100644 --- a/bin/resources/HTML/HTML.py +++ b/bin/resources/HTML/HTML.py @@ -13,10 +13,10 @@ """ __version__ = '0.04' -__date__ = '2009-07-28' -__author__ = 'Philippe Lagadec' +__date__ = '2009-07-28' +__author__ = 'Philippe Lagadec' -#--- LICENSE ------------------------------------------------------------------ +# --- LICENSE ------------------------------------------------------------------ # Copyright Philippe Lagadec - see http://www.decalage.info/contact for contact info # @@ -52,7 +52,7 @@ # knowledge of the CeCILL license and that you accept its terms. -#--- CHANGES ------------------------------------------------------------------ +# --- CHANGES ------------------------------------------------------------------ # 2008-10-06 v0.01 PL: - First version # 2008-10-13 v0.02 PL: - added cellspacing and cellpadding to table @@ -63,8 +63,8 @@ # 2009-07-28 v0.04 PL: - improved column styles, workaround for Mozilla -#------------------------------------------------------------------------------- -#TODO: +# ------------------------------------------------------------------------------- +# TODO: # - method to return a generator (yield each row) instead of a single string # - unicode support (input and output) # - escape text in cells (optional) @@ -73,11 +73,11 @@ # - add classes/functions to generate a HTML page, paragraphs, headings, etc... -#--- THANKS -------------------------------------------------------------------- +# --- THANKS -------------------------------------------------------------------- # - Michal Cernoevic, for the idea of column styles. -#--- REFERENCES ---------------------------------------------------------------- +# --- REFERENCES ---------------------------------------------------------------- # HTML 4.01 specs: http://www.w3.org/TR/html4/struct/tables.html @@ -89,16 +89,19 @@ # pyright: reportGeneralTypeIssues=false, reportOptionalMemberAccess=false # pyright: reportUndefinedVariable=false -#--- CONSTANTS ----------------------------------------------------------------- +# --- CONSTANTS ----------------------------------------------------------------- # Table style to get thin black lines in Mozilla/Firefox instead of 3D borders -TABLE_STYLE_THINBORDER = "border: 1px solid #000000; border-collapse: collapse;" -#TABLE_STYLE_THINBORDER = "border: 1px solid #000000;" +TABLE_STYLE_THINBORDER = 'border: 1px solid #000000; border-collapse: collapse;' -#=== CLASSES =================================================================== +# TABLE_STYLE_THINBORDER = 'border: 1px solid #000000;" -class TableCell (object): + +# === CLASSES =================================================================== + + +class TableCell(object): """ a TableCell object is used to create a cell in a HTML table. (TD or TH) @@ -118,33 +121,50 @@ class TableCell (object): Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.6 """ - def __init__(self, text="", bgcolor=None, header=False, width=None, - align=None, char=None, charoff=None, valign=None, style=None, - attribs=None): + def __init__( + self, + text='', + bgcolor=None, + header=False, + width=None, + align=None, + char=None, + charoff=None, + valign=None, + style=None, + attribs=None, + ): """TableCell constructor""" - self.text = text + self.text = text self.bgcolor = bgcolor - self.header = header - self.width = width - self.align = align - self.char = char + self.header = header + self.width = width + self.align = align + self.char = char self.charoff = charoff - self.valign = valign - self.style = style + self.valign = valign + self.style = style self.attribs = attribs - if attribs==None: + if attribs is None: self.attribs = {} def __str__(self): """return the HTML code for the table cell as a string""" - attribs_str = "" - if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor - if self.width: self.attribs['width'] = self.width - if self.align: self.attribs['align'] = self.align - if self.char: self.attribs['char'] = self.char - if self.charoff: self.attribs['charoff'] = self.charoff - if self.valign: self.attribs['valign'] = self.valign - if self.style: self.attribs['style'] = self.style + attribs_str = '' + if self.bgcolor: + self.attribs['bgcolor'] = self.bgcolor + if self.width: + self.attribs['width'] = self.width + if self.align: + self.attribs['align'] = self.align + if self.char: + self.attribs['char'] = self.char + if self.charoff: + self.attribs['charoff'] = self.charoff + if self.valign: + self.attribs['valign'] = self.valign + if self.style: + self.attribs['style'] = self.style for attr in self.attribs: attribs_str += ' %s="%s"' % (attr, self.attribs[attr]) if self.text: @@ -157,9 +177,11 @@ def __str__(self): else: return ' %s\n' % (attribs_str, text) -#------------------------------------------------------------------------------- -class TableRow (object): +# ------------------------------------------------------------------------------- + + +class TableRow(object): """ a TableRow object is used to create a row in a HTML table. (TR tag) @@ -174,52 +196,64 @@ class TableRow (object): Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.5 """ - def __init__(self, cells=None, bgcolor=None, header=False, attribs=None, - col_align=None, col_valign=None, col_char=None, - col_charoff=None, col_styles=None): + def __init__( + self, + cells=None, + bgcolor=None, + header=False, + attribs=None, + col_align=None, + col_valign=None, + col_char=None, + col_charoff=None, + col_styles=None, + ): """TableCell constructor""" - self.bgcolor = bgcolor - self.cells = cells - self.header = header - self.col_align = col_align - self.col_valign = col_valign - self.col_char = col_char + self.bgcolor = bgcolor + self.cells = cells + self.header = header + self.col_align = col_align + self.col_valign = col_valign + self.col_char = col_char self.col_charoff = col_charoff - self.col_styles = col_styles - self.attribs = attribs - if attribs==None: + self.col_styles = col_styles + self.attribs = attribs + if attribs is None: self.attribs = {} def __str__(self): """return the HTML code for the table row as a string""" - attribs_str = "" - if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor + attribs_str = '' + if self.bgcolor: + self.attribs['bgcolor'] = self.bgcolor for attr in self.attribs: attribs_str += ' %s="%s"' % (attr, self.attribs[attr]) result = ' \n' % attribs_str for cell in self.cells: - col = self.cells.index(cell) # cell column index + col = self.cells.index(cell) # cell column index if not isinstance(cell, TableCell): cell = TableCell(cell, header=self.header) # apply column alignment if specified: - if self.col_align and cell.align==None: + if self.col_align and cell.align is None: cell.align = self.col_align[col] - if self.col_char and cell.char==None: + if self.col_char and cell.char is None: cell.char = self.col_char[col] - if self.col_charoff and cell.charoff==None: + if self.col_charoff and cell.charoff is None: cell.charoff = self.col_charoff[col] - if self.col_valign and cell.valign==None: + if self.col_valign and cell.valign is None: cell.valign = self.col_valign[col] # apply column style if specified: - if self.col_styles and cell.style==None: + if self.col_styles and cell.style is None: cell.style = self.col_styles[col] result += str(cell) result += ' \n' return result -#------------------------------------------------------------------------------- -class Table (object): +# ------------------------------------------------------------------------------- + + +class Table(object): """ a Table object is used to create a HTML table. (TABLE tag) @@ -241,38 +275,59 @@ class Table (object): Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.1 """ - def __init__(self, rows=None, border='1', style=None, width=None, - cellspacing=None, cellpadding=4, attribs=None, header_row=None, - col_width=None, col_align=None, col_valign=None, - col_char=None, col_charoff=None, col_styles=None): + def __init__( + self, + rows=None, + border='1', + style=None, + width=None, + cellspacing=None, + cellpadding=4, + attribs=None, + header_row=None, + col_width=None, + col_align=None, + col_valign=None, + col_char=None, + col_charoff=None, + col_styles=None, + ): """TableCell constructor""" self.border = border self.style = style # style for thin borders by default - if style == None: self.style = TABLE_STYLE_THINBORDER - self.width = width + if style is None: + self.style = TABLE_STYLE_THINBORDER + self.width = width self.cellspacing = cellspacing self.cellpadding = cellpadding - self.header_row = header_row - self.rows = rows - if not rows: self.rows = [] - self.attribs = attribs - if not attribs: self.attribs = {} - self.col_width = col_width - self.col_align = col_align - self.col_char = col_char + self.header_row = header_row + self.rows = rows + if not rows: + self.rows = [] + self.attribs = attribs + if not attribs: + self.attribs = {} + self.col_width = col_width + self.col_align = col_align + self.col_char = col_char self.col_charoff = col_charoff - self.col_valign = col_valign - self.col_styles = col_styles + self.col_valign = col_valign + self.col_styles = col_styles def __str__(self): """return the HTML code for the table as a string""" - attribs_str = "" - if self.border: self.attribs['border'] = self.border - if self.style: self.attribs['style'] = self.style - if self.width: self.attribs['width'] = self.width - if self.cellspacing: self.attribs['cellspacing'] = self.cellspacing - if self.cellpadding: self.attribs['cellpadding'] = self.cellpadding + attribs_str = '' + if self.border: + self.attribs['border'] = self.border + if self.style: + self.attribs['style'] = self.style + if self.width: + self.attribs['width'] = self.width + if self.cellspacing: + self.attribs['cellspacing'] = self.cellspacing + if self.cellpadding: + self.attribs['cellpadding'] = self.cellpadding for attr in self.attribs: attribs_str += ' %s="%s"' % (attr, self.attribs[attr]) result = '\n' % attribs_str @@ -284,27 +339,27 @@ def __str__(self): # and alignement according to HTML4 specs, # BUT it is not supported completely (only width) on Mozilla Firefox: # see https://bugzilla.mozilla.org/show_bug.cgi?id=915 -## n_cols = max(len(self.col_styles), len(self.col_width), -## len(self.col_align), len(self.col_valign)) -## for i in range(n_cols): -## col = '' -## try: -## if self.col_styles[i]: -## col += ' style="%s"' % self.col_styles[i] -## except: pass -## try: -## if self.col_width[i]: -## col += ' width="%s"' % self.col_width[i] -## except: pass -## try: -## if self.col_align[i]: -## col += ' align="%s"' % self.col_align[i] -## except: pass -## try: -## if self.col_valign[i]: -## col += ' valign="%s"' % self.col_valign[i] -## except: pass -## result += '\n' % col + # n_cols = max(len(self.col_styles), len(self.col_width), + # len(self.col_align), len(self.col_valign)) + # for i in range(n_cols): + # col = '' + # try: + # if self.col_styles[i]: + # col += ' style="%s"' % self.col_styles[i] + # except: pass + # try: + # if self.col_width[i]: + # col += ' width="%s"' % self.col_width[i] + # except: pass + # try: + # if self.col_align[i]: + # col += ' align="%s"' % self.col_align[i] + # except: pass + # try: + # if self.col_valign[i]: + # col += ' valign="%s"' % self.col_valign[i] + # except: pass + # result += '\n' % col # First insert a header row if specified: if self.header_row: if not isinstance(self.header_row, TableRow): @@ -332,9 +387,10 @@ def __str__(self): return result -#------------------------------------------------------------------------------- +# ------------------------------------------------------------------------------- + -class List (object): +class List(object): """ a List object is used to create an ordered or unordered list in HTML. (UL/OL tag) @@ -362,12 +418,15 @@ def __init__(self, lines=None, ordered=False, start=None, attribs=None): def __str__(self): """return the HTML code for the list as a string""" - attribs_str = "" - if self.start: self.attribs['start'] = self.start + attribs_str = '' + if self.start: + self.attribs['start'] = self.start for attr in self.attribs: attribs_str += ' %s="%s"' % (attr, self.attribs[attr]) - if self.ordered: tag = 'OL' - else: tag = 'UL' + if self.ordered: + tag = 'OL' + else: + tag = 'UL' result = '<%s%s>\n' % (tag, attribs_str) for line in self.lines: result += '
  • %s\n' % str(line) @@ -375,55 +434,56 @@ def __str__(self): return result -##class Link (object): -## """ -## a Link object is used to create link in HTML. ( tag) -## -## Attributes: -## - text: str, text of the link -## - url: str, URL of the link -## - attribs: dict, additional attributes for the A tag -## -## Reference: http://www.w3.org/TR/html4 -## """ -## -## def __init__(self, text, url=None, attribs=None): -## """Link constructor""" -## self.text = text -## self.url = url -## if attribs: -## self.attribs = attribs -## else: -## self.attribs = {} -## -## def __str__(self): -## """return the HTML code for the link as a string""" -## attribs_str = "" -## if self.url: self.attribs['href'] = self.url -## for attr in self.attribs: -## attribs_str += ' %s="%s"' % (attr, self.attribs[attr]) -## return '%s' % (attribs_str, text) - - -#=== FUNCTIONS ================================================================ +# class Link (object): +# """ +# a Link object is used to create link in HTML. ( tag) +# +# Attributes: +# - text: str, text of the link +# - url: str, URL of the link +# - attribs: dict, additional attributes for the A tag +# +# Reference: http://www.w3.org/TR/html4 +# """ +# +# def __init__(self, text, url=None, attribs=None): +# """Link constructor""" +# self.text = text +# self.url = url +# if attribs: +# self.attribs = attribs +# else: +# self.attribs = {} +# +# def __str__(self): +# """return the HTML code for the link as a string""" +# attribs_str = "" +# if self.url: self.attribs['href'] = self.url +# for attr in self.attribs: +# attribs_str += ' %s="%s"' % (attr, self.attribs[attr]) +# return '%s' % (attribs_str, text) + + +# === FUNCTIONS ================================================================ # much simpler definition of a link as a function: -def Link(text, url): - return '%s' % (url, text) + def link(text, url): return '%s' % (url, text) + def table(*args, **kwargs): - 'return HTML code for a table as a string. See Table class for parameters.' + """return HTML code for a table as a string. See Table class for parameters.""" return str(Table(*args, **kwargs)) + def list(*args, **kwargs): - 'return HTML code for a list as a string. See List class for parameters.' + """return HTML code for a list as a string. See List class for parameters.""" return str(List(*args, **kwargs)) -#=== MAIN ===================================================================== +# === MAIN ===================================================================== # Show sample usage when this file is launched as a script. @@ -437,59 +497,57 @@ def list(*args, **kwargs): t.rows.append(TableRow(['D', 'E', 'F'])) t.rows.append(('i', 'j', 'k')) f.write(str(t) + '

    \n') - print (str(t)) - print ('-'*79) - - t2 = Table([ - ('1', '2'), - ['3', '4'] - ], width='100%', header_row=('col1', 'col2'), - col_width=('', '75%')) + print(str(t)) + print('-' * 79) + + t2 = Table([('1', '2'), ['3', '4']], width='100%', header_row=('col1', 'col2'), col_width=('', '75%')) f.write(str(t2) + '

    \n') - print (t2) - print ('-'*79) + print(t2) + print('-' * 79) t2.rows.append(['5', '6']) t2.rows[1][1] = TableCell('new', bgcolor='red') t2.rows.append(TableRow(['7', '8'], attribs={'align': 'center'})) f.write(str(t2) + '

    \n') - print (t2) - print ('-'*79) + print(t2) + print('-' * 79) # sample table with column attributes and styles: table_data = [ - ['Smith', 'John', 30, 4.5], - ['Carpenter', 'Jack', 47, 7], - ['Johnson', 'Paul', 62, 10.55], - ] - htmlcode = HTML.table(table_data, - header_row = ['Last name', 'First name', 'Age', 'Score'], + ['Smith', 'John', 30, 4.5], + ['Carpenter', 'Jack', 47, 7], + ['Johnson', 'Paul', 62, 10.55], + ] + htmlcode = table( + table_data, + header_row=['Last name', 'First name', 'Age', 'Score'], col_width=['', '20%', '10%', '10%'], col_align=['left', 'center', 'right', 'char'], - col_styles=['font-size: large', '', 'font-size: small', 'background-color:yellow']) + col_styles=['font-size: large', '', 'font-size: small', 'background-color:yellow'], + ) f.write(htmlcode + '

    \n') - print (htmlcode) - print ('-'*79) + print(htmlcode) + print('-' * 79) def gen_table_squares(n): """ Generator to create table rows for integers from 1 to n """ -## # First, header row: -## yield TableRow(('x', 'square(x)'), header=True, bgcolor='blue') -## # Then all rows: - for x in range(1, n+1): - yield (x, x*x) + # First, header row: + # yield TableRow(('x', 'square(x)'), header=True, bgcolor='blue') + # Then all rows: + for x in range(1, n + 1): + yield (x, x * x) t = Table(rows=gen_table_squares(10), header_row=('x', 'square(x)')) f.write(str(t) + '

    \n') - print ('-'*79) - l = List(['aaa', 'bbb', 'ccc']) + print('-' * 79) + l = List(['aaa', 'bbb', 'ccc']) # noqa: E741 f.write(str(l) + '

    \n') l.ordered = True f.write(str(l) + '

    \n') - l.start=10 + l.start = 10 f.write(str(l) + '

    \n') f.close() diff --git a/bin/resources/HTML/setup.py b/bin/resources/HTML/setup.py index 4853c70..ea6f061 100644 --- a/bin/resources/HTML/setup.py +++ b/bin/resources/HTML/setup.py @@ -5,6 +5,7 @@ # pyright: reportGeneralTypeIssues=false import distutils.core + import HTML DESCRIPTION = """A Python module to easily generate HTML code (tables, lists, ...). @@ -12,15 +13,15 @@ """ kw = { - 'name': "HTML.py", + 'name': 'HTML.py', 'version': HTML.__version__, 'description': DESCRIPTION, - 'author': "Philippe Lagadec", - 'author_email': "decalage (a) laposte.net", - 'url': "http://www.decalage.info/python/html", - 'license': "CeCILL (open-source GPL compatible)", - 'py_modules': ['HTML'] - } + 'author': 'Philippe Lagadec', + 'author_email': 'decalage (a) laposte.net', + 'url': 'http://www.decalage.info/python/html', + 'license': 'CeCILL (open-source GPL compatible)', + 'py_modules': ['HTML'], +} # If we're running Python 2.3+, add extra information @@ -28,16 +29,16 @@ if 'classifiers' in distutils.core.setup_keywords: kw['classifiers'] = [ 'Development Status :: 4 - Beta', - #'License :: Freely Distributable', + # 'License :: Freely Distributable', 'Natural Language :: English', 'Intended Audience :: Developers', 'Topic :: Internet :: WWW/HTTP', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Topic :: Software Development :: Libraries :: Python Modules' - ] + 'Topic :: Software Development :: Libraries :: Python Modules', + ] if 'download_url' in distutils.core.setup_keywords: - kw['download_url'] = "http://www.decalage.info/python/html" + kw['download_url'] = 'http://www.decalage.info/python/html' distutils.core.setup(**kw) diff --git a/bin/symd.py b/bin/symd.py index bc88c91..24ce441 100755 --- a/bin/symd.py +++ b/bin/symd.py @@ -8,30 +8,34 @@ # and is available at http://www.eclipse.org/legal/epl-v10.html ############################################################################## from __future__ import print_function # Must be at the beginning of the file -import matplotlib as mpl -mpl.use('Agg') # To prevent using a X-Windows server -import matplotlib.pyplot as plt -import math -import networkx as nx + import argparse import glob -import sys import os import re +import sys + +import matplotlib as mpl +import matplotlib.pyplot as plt +import networkx as nx + +mpl.use('Agg') # To prevent using a X-Windows server -__author__ = "Jan Medved, Eric Vyncke" -__copyright__ = "Copyright(c) 2015, Cisco Systems, Inc., Copyright The IETF Trust 2019, All Rights Reserved" -__license__ = "Eclipse Public License v1.0" -__email__ = "jmedved@cisco.com, evyncke@cisco.com" +__author__ = 'Jan Medved, Eric Vyncke' +__copyright__ = 'Copyright(c) 2015, Cisco Systems, Inc., Copyright The IETF Trust 2019, All Rights Reserved' +__license__ = 'Eclipse Public License v1.0' +__email__ = 'jmedved@cisco.com, evyncke@cisco.com' G = nx.DiGraph() # Regular expressions for parsing yang files; we are only interested in # the 'module', 'import' and 'revision' statements -MODULE_STATEMENT = re.compile(r'''^[ \t]*(sub)?module +(["'])?([-A-Za-z0-9]*(@[0-9-]*)?)(["'])?.*$''') -IMPORT_STATEMENT = re.compile(r'''^[ \t]*import[\s]*([-A-Za-z0-9]*)?[\s]*\{([\s]*prefix[\s]*[\S]*;[\s]*})?.*$''') -INCLUDE_STATEMENT = re.compile(r'''^[ \t]*include[\s]*([-A-Za-z0-9]*)?[\s]*\{.*$''') -REVISION_STATEMENT = re.compile(r'''^[ \t]*revision[\s]*(['"])?([-0-9]*)?(['"])?[\s]*\{.*$''') +MODULE_STATEMENT = re.compile(r'''^[ \t]*(sub)?module +(["'])?([-A-Za-z0-9]*(@[0-9-]*)?)(["'])?.*$''') # noqa: Q001 +IMPORT_STATEMENT = re.compile( + r'''^[ \t]*import[\s]*([-A-Za-z0-9]*)?[\s]*\{([\s]*prefix[\s]*[\S]*;[\s]*})?.*$''', # noqa: Q001 +) +INCLUDE_STATEMENT = re.compile(r'''^[ \t]*include[\s]*([-A-Za-z0-9]*)?[\s]*\{.*$''') # noqa: Q001 +REVISION_STATEMENT = re.compile(r'''^[ \t]*revision[\s]*(['"])?([-0-9]*)?(['"])?[\s]*\{.*$''') # noqa: Q001 # Node Attribute Types # All those attributes do not fare well in network 2.* @@ -52,7 +56,7 @@ def warning(s): :param s: The warning string to print :return: None """ - print("WARNING: %s" % s, file=sys.stderr) + print('WARNING: %s' % s, file=sys.stderr) def error(s): @@ -61,7 +65,7 @@ def error(s): :param s: The error string to print :return: None """ - print("ERROR: %s" % s, file=sys.stderr) + print('ERROR: %s' % s, file=sys.stderr) def get_local_yang_files(local_repos, recurse=False): @@ -173,7 +177,7 @@ def prune_graph_nodes(graph, tag): for node_name in graph.nodes(): try: if graph.nodes[node_name]['tag'] == tag: -# if graph.nodes[node_name]['attr_dict'][TAG_ATTR] == tag: + # if graph.nodes[node_name]['attr_dict'][TAG_ATTR] == tag: node_list.append(node_name) except KeyError: pass @@ -221,7 +225,7 @@ def print_impacting_modules(single_node=None): """ print('\n===Impacting Modules===') for node_name in G.nodes(): - if single_node and (node_name!=single_node): + if single_node and (node_name != single_node): continue descendants = nx.descendants(G, node_name) print(augment_format_string(node_name, '\n%s:') % node_name) @@ -239,7 +243,7 @@ def augment_format_string(node_name, fmts): :return: Augmented format string """ module_tag = G.nodes[node_name]['tag'] -# module_tag = G.nodes[node_name]['attr_dict'][TAG_ATTR] + # module_tag = G.nodes[node_name]['attr_dict'][TAG_ATTR] if module_tag == RFC_TAG: return fmts + ' *' if module_tag == UNKNOWN_TAG: @@ -257,7 +261,7 @@ def print_impacted_modules(single_node=None): """ print('\n===Impacted Modules===') for node_name in G.nodes(): - if single_node and (node_name!=single_node): + if single_node and (node_name != single_node): continue ancestors = nx.ancestors(G, node_name) if len(ancestors) > 0: @@ -322,7 +326,7 @@ def print_dependency_tree(): print('\n=== Module Dependency Trees ===') for node_name in G.nodes(): if G.nodes[node_name]['tag'] != UNKNOWN_TAG: -# if G.nodes[node_name]['attr_dict'][TAG_ATTR] != UNKNOWN_TAG: + # if G.nodes[node_name]['attr_dict'][TAG_ATTR] != UNKNOWN_TAG: dg = nx.dfs_successors(G, node_name) plist = [] print(augment_format_string(node_name, '\n%s:') % node_name) @@ -365,20 +369,18 @@ def init(rfc_repos, draft_repos, recurse=False): :return: None """ rfc_yang_files = get_local_yang_files(rfc_repos, recurse) - print("\n*** Scanning %d RFC yang module files for 'import' and 'revision' statements..." - % len(rfc_yang_files)) + print("\n*** Scanning %d RFC yang module files for 'import' and 'revision' statements..." % len(rfc_yang_files)) get_yang_modules(rfc_yang_files, RFC_TAG) num_rfc_modules = len(G.nodes()) print('\n*** Found %d RFC yang modules.' % num_rfc_modules) draft_yang_files = get_local_yang_files(draft_repos, recurse) - print("\n*** Scanning %d draft yang module files for 'import' and 'revision' statements..." % - len(draft_yang_files)) + print("\n*** Scanning %d draft yang module files for 'import' and 'revision' statements..." % len(draft_yang_files)) get_yang_modules(draft_yang_files, DRAFT_TAG) num_draft_modules = len(G.nodes()) - num_rfc_modules print('\n*** Found %d draft yang modules.' % num_draft_modules) - print("\n*** Analyzing imports...") + print('\n*** Analyzing imports...') get_unknown_modules() num_unknown_modules = len(G.nodes()) - (num_rfc_modules + num_draft_modules) print('\n*** Found %d imported/included yang modules that were scanned.' % num_unknown_modules) @@ -389,7 +391,7 @@ def init(rfc_repos, draft_repos, recurse=False): def plot_module_dependency_graph(graph): -#def plot_module_dependency_graph(graph, node): + # def plot_module_dependency_graph(graph, node): """ Plot a graph of specified yang modules. this function is used to plot both the full dependency graph of all yang modules in the DB, or a @@ -397,21 +399,48 @@ def plot_module_dependency_graph(graph): :param graph: Graph to be plotted :return: None """ -# fixed_positions = {node: [0.5, 0.5] } -# print(fixed_positions) + # fixed_positions = {node: [0.5, 0.5] } + # print(fixed_positions) pos = nx.spring_layout(graph, iterations=50, center=[0.5, 0.5], weight=2, k=0.6) -#EVY pos = nx.spring_layout(graph, iterations=2000, threshold=1e-5, fixed=fixed_positions, k=k, center=[0.5, 0.5]) -# pos = nx.spring_layout(graph, iterations=2000, threshold=1e-6) + # EVY pos = nx.spring_layout(graph, iterations=2000, threshold=1e-5, fixed=fixed_positions, k=k, center=[0.5, 0.5]) + # pos = nx.spring_layout(graph, iterations=2000, threshold=1e-6) print(pos) # Draw RFC nodes (yang modules) in red - nx.draw_networkx_nodes(graph, pos=pos, nodelist=prune_graph_nodes(graph, RFC_TAG), node_size=200, - node_shape='s', node_color='red', alpha=0.5, linewidths=0.25 ,label='RFC') + nx.draw_networkx_nodes( + graph, + pos=pos, + nodelist=prune_graph_nodes(graph, RFC_TAG), + node_size=200, + node_shape='s', + node_color='red', + alpha=0.5, + linewidths=0.25, + label='RFC', + ) # Draw draft nodes (yang modules) in green - nx.draw_networkx_nodes(graph, pos=pos, nodelist=prune_graph_nodes(graph, DRAFT_TAG), node_size=150, - node_shape='o', node_color='green', alpha=0.5, linewidths=0.25, label='Draft') + nx.draw_networkx_nodes( + graph, + pos=pos, + nodelist=prune_graph_nodes(graph, DRAFT_TAG), + node_size=150, + node_shape='o', + node_color='green', + alpha=0.5, + linewidths=0.25, + label='Draft', + ) # Draw unknown nodes (yang modules) in green - nx.draw_networkx_nodes(graph, pos=pos, nodelist=prune_graph_nodes(graph, UNKNOWN_TAG), node_size=200, - node_shape='^', node_color='orange', alpha=1.0, linewidths=0.25, label='Unknown') + nx.draw_networkx_nodes( + graph, + pos=pos, + nodelist=prune_graph_nodes(graph, UNKNOWN_TAG), + node_size=200, + node_shape='^', + node_color='orange', + alpha=1.0, + linewidths=0.25, + label='Unknown', + ) # Draw edges in light gray (fairly transparent) nx.draw_networkx_edges(graph, pos=pos, alpha=0.25, arrows=False) # Draw labels on nodes (modules) @@ -430,46 +459,60 @@ def plot_module_dependency_graph(graph): plt.interactive(False) parser = argparse.ArgumentParser(description='Show the dependency graph for a set of yang models.') - parser.add_argument('--draft-repos', - help='List of local directories where models defined in IETF drafts are located.', - type=str, - nargs='+', - default=['./']) - parser.add_argument('--rfc-repos', - help='List of local directories where models defined in IETF RFC are located.', - type=str, - nargs='+', - default=['./']) - parser.add_argument('-r', '--recurse', - help='Recurse into directories specified to find yang models', - action='store_true', - default=False) + parser.add_argument( + '--draft-repos', + help='List of local directories where models defined in IETF drafts are located.', + type=str, + nargs='+', + default=['./'], + ) + parser.add_argument( + '--rfc-repos', + help='List of local directories where models defined in IETF RFC are located.', + type=str, + nargs='+', + default=['./'], + ) + parser.add_argument( + '-r', + '--recurse', + help='Recurse into directories specified to find yang models', + action='store_true', + default=False, + ) g = parser.add_mutually_exclusive_group() - g.add_argument('--graph', - help='Plot the overall dependency graph.', - action='store_true', - default=False) - g.add_argument('--sub-graphs', - help='Plot the dependency graphs for the specified modules.', - type=str, - nargs='+', - default=[]) - g.add_argument('--impact-analysis', - help='For each scanned yang module, print the impacting and impacted modules.', - action='store_true', - default=False) - g.add_argument('--single-impact-analysis', - help='For a single yang module, print the impacting and impacted modules', - type=str) - g.add_argument('--dependency-tree', - help='For each scanned yang module, print its dependency tree to stdout ' - '(i.e. show all the modules that it depends on).', - action='store_true', - default=False) - g.add_argument('--single-dependency-tree', - help='For a single yang module, print its dependency tree to stdout ' - '(i.e. show all the modules that it depends on).', - type=str) + g.add_argument('--graph', help='Plot the overall dependency graph.', action='store_true', default=False) + g.add_argument( + '--sub-graphs', + help='Plot the dependency graphs for the specified modules.', + type=str, + nargs='+', + default=[], + ) + g.add_argument( + '--impact-analysis', + help='For each scanned yang module, print the impacting and impacted modules.', + action='store_true', + default=False, + ) + g.add_argument( + '--single-impact-analysis', + help='For a single yang module, print the impacting and impacted modules', + type=str, + ) + g.add_argument( + '--dependency-tree', + help='For each scanned yang module, print its dependency tree to stdout ' + '(i.e. show all the modules that it depends on).', + action='store_true', + default=False, + ) + g.add_argument( + '--single-dependency-tree', + help='For a single yang module, print its dependency tree to stdout ' + '(i.e. show all the modules that it depends on).', + type=str, + ) args = parser.parse_args() init(args.rfc_repos, args.draft_repos, recurse=args.recurse) @@ -498,23 +541,23 @@ def plot_module_dependency_graph(graph): plot_num += 1 print('Plotting the overall dependency graph...') plot_module_dependency_graph(ng) - plt.savefig("modules.png") + plt.savefig('modules.png') print(' Done.') for node in args.sub_graphs: # Set matplotlib into non-interactive mode plt.interactive(False) plt.figure(plot_num, figsize=(20, 20)) -# plt.figure(plot_num, figsize=(40, 40)) + # plt.figure(plot_num, figsize=(40, 40)) plot_num += 1 - print("Plotting graph for module '%s'..." % node) + print(f'Plotting graph for module \'{node}\'...') try: # EVY: do we need this extract argument ??? -# plot_module_dependency_graph(get_subgraph_for_node(node), node) + # plot_module_dependency_graph(get_subgraph_for_node(node), node) plot_module_dependency_graph(get_subgraph_for_node(node)) - plt.savefig("%s.png" % node) + plt.savefig('%s.png' % node) print(' Done.') except nx.NetworkXError as e: - print(" %s" % e) + print(' %s' % e) print('\n') diff --git a/bin/utility/pyang_plugin/basic_info.py b/bin/utility/pyang_plugin/basic_info.py index 448c73f..554d052 100644 --- a/bin/utility/pyang_plugin/basic_info.py +++ b/bin/utility/pyang_plugin/basic_info.py @@ -27,7 +27,6 @@ def pyang_plugin_init(): class BasicInfoPlugin(plugin.PyangPlugin): - def add_output_format(self, fmts): self.multiple_modules = True fmts['basic-info'] = self diff --git a/bin/utility/pyang_plugin/json_tree.py b/bin/utility/pyang_plugin/json_tree.py index b36ad3d..f98957c 100644 --- a/bin/utility/pyang_plugin/json_tree.py +++ b/bin/utility/pyang_plugin/json_tree.py @@ -29,7 +29,6 @@ def pyang_plugin_init(): class JSONTreePlugin(plugin.PyangPlugin): - def add_output_format(self, fmts): self.multiple_modules = True fmts['json-tree'] = self @@ -59,8 +58,7 @@ def emit_tree(modules, fd, ctx): mod_out['name'] = module.arg mod_out['type'] = module.keyword - chs = [ch for ch in module.i_children - if ch.keyword in statements.data_definition_keywords] + chs = [ch for ch in module.i_children if ch.keyword in statements.data_definition_keywords] mod_out['children'] = get_children(chs, module, mod_out['prefix'], ctx) mods = [module] @@ -73,15 +71,13 @@ def emit_tree(modules, fd, ctx): for augment in m.search('augment'): try: hasattr(augment.i_target_node, 'i_module') - except: + except Exception: continue aug = {} aug['augment_children'] = [] aug['augment_path'] = augment.arg - if (hasattr(augment.i_target_node, 'i_module') and - augment.i_target_node.i_module not in modules + mods): - aug['augment_children'].extend(get_children( - augment.i_children, module, ' ', ctx)) + if hasattr(augment.i_target_node, 'i_module') and augment.i_target_node.i_module not in modules + mods: + aug['augment_children'].extend(get_children(augment.i_children, module, ' ', ctx)) maugs.append(aug) if len(maugs) > 0: @@ -201,7 +197,7 @@ def get_flags(s): return flags elif s.keyword == 'notification': return flags - elif s.i_config == True: + elif s.i_config is True: flags['config'] = True else: flags['config'] = False @@ -217,36 +213,30 @@ def get_typename(s): def json_escape(s): - return s.replace("\\", r"\\").replace("\n", r"\n").replace("\t", r"\t").replace("\"", r"\"") + return s.replace('\\', r'\\').replace('\n', r'\n').replace('\t', r'\t').replace('\"', r'\"') def typestring(node): - def get_nontypedefstring(node): s = {} - found = False t = node.search_one('type') if t is not None: s['type'] = t.arg if t.arg == 'enumeration': - found = True s['enumeration'] = [] for enums in t.substmts: s['enumeration'].append(enums.arg) elif t.arg == 'leafref': - found = True p = t.search_one('path') if p is not None: s['path'] = p.arg elif t.arg == 'identityref': - found = True b = t.search_one('base') if b is not None: s['base'] = b.arg elif t.arg == 'union': - found = True uniontypes = t.search('type') s['union'] = [uniontypes[0].arg] for uniontype in uniontypes[1:]: @@ -254,16 +244,13 @@ def get_nontypedefstring(node): typerange = t.search_one('range') if typerange is not None: - found = True s['type_range'] = typerange.arg length = t.search_one('length') if length is not None: - found = True s['length'] = length.arg pattern = t.search_one('pattern') if pattern is not None: - found = True s['pattern'] = json_escape(pattern.arg) return s @@ -272,10 +259,8 @@ def get_nontypedefstring(node): if len(s) != 0: t = node.search_one('type') # chase typedef - type_namespace = None - i_type_name = None name = t.arg - if name.find(":") == -1: + if name.find(':') == -1: prefix = None else: [prefix, name] = name.split(':', 1) @@ -286,12 +271,11 @@ def get_nontypedefstring(node): else: # this is a prefixed name, check the imported modules err = [] - pmodule = util.prefix_to_module( - t.i_module, prefix, t.pos, err) + pmodule = util.prefix_to_module(t.i_module, prefix, t.pos, err) if pmodule is None: return typedef = statements.search_typedef(pmodule, name) - if typedef != None: + if typedef is not None: s['typedef'] = get_nontypedefstring(typedef) return s @@ -323,19 +307,22 @@ def action_params(action): s['out'].append(o.arg) return s + def mk_path_str(s, with_prefixes=False): """Returns the XPath path of the node""" + def name(s): if with_prefixes: if len(s.keyword) == 2: - return s.keyword[0] + ":" + s.arg + "?" + s.keyword[1] - return s.i_module.i_prefix + ":" + s.arg + "?" + s.keyword + return s.keyword[0] + ':' + s.arg + '?' + s.keyword[1] + return s.i_module.i_prefix + ':' + s.arg + '?' + s.keyword else: return s.arg + if s.parent.keyword in ['module', 'submodule']: - return "/" + name(s) + return '/' + name(s) elif s.keyword in ['choice', 'case']: return mk_path_str(s.parent, with_prefixes) else: p = mk_path_str(s.parent, with_prefixes) - return p + "/" + name(s) + return p + '/' + name(s) diff --git a/bin/utility/static_variables.py b/bin/utility/static_variables.py index 430b8d9..135aae1 100644 --- a/bin/utility/static_variables.py +++ b/bin/utility/static_variables.py @@ -45,7 +45,7 @@ 'ietf-yang-patch@2017-02-22.yang': 'NETCONF', 'ietf-yang-smiv2@2012-06-22.yang': 'NETMOD', 'ietf-yang-types@2010-09-24.yang': 'NETMOD', - 'ietf-yang-types@2013-07-15.yang': 'NETMOD' + 'ietf-yang-types@2013-07-15.yang': 'NETMOD', } NAMESPACE_MAP = ( @@ -59,10 +59,22 @@ ('https://www.broadband-forum.org', 'bbf'), ('http://ackl.io/', 'acklio'), ('http://ni.com/ns', 'ni'), - ('http://metroethernetforum.org/ns', 'mef') + ('http://metroethernetforum.org/ns', 'mef'), ) ORGANIZATIONS = [ - 'ietf', 'ieee', 'etsi', 'bbf', 'openconfig', 'tail-f', 'opendaylight', - 'ciena', 'cisco', 'fujitsu', 'huawei', 'juniper', 'nokia', 'acklio' + 'ietf', + 'ieee', + 'etsi', + 'bbf', + 'openconfig', + 'tail-f', + 'opendaylight', + 'ciena', + 'cisco', + 'fujitsu', + 'huawei', + 'juniper', + 'nokia', + 'acklio', ] diff --git a/bin/utility/utility.py b/bin/utility/utility.py index b78e12d..ee6c157 100644 --- a/bin/utility/utility.py +++ b/bin/utility/utility.py @@ -23,15 +23,14 @@ import time import typing as t from datetime import date -from enum import auto, Enum +from enum import Enum, auto import dateutil.parser import jinja2 -from pyang.statements import Statement -from utility.static_variables import IETF_RFC_MAP, NAMESPACE_MAP, ORGANIZATIONS - from parsers import yang_parser +from pyang.statements import Statement from redis_connections.redis_connection import RedisConnection +from utility.static_variables import IETF_RFC_MAP, NAMESPACE_MAP, ORGANIZATIONS from versions import ValidatorsVersions module_db = None @@ -79,8 +78,8 @@ def module_or_submodule(yang_file_path: str) -> t.Optional[str]: def dict_to_list(in_dict: dict, is_rfc: bool = False) -> list[list]: - """ Create a list out of compilation results from 'in_dict' dictionary variable. - First element of each list is name of the module, second one is compilation-status + """Create a list out of compilation results from 'in_dict' dictionary variable. + First element of each list is name of the module, second one is compilation-status which is followed by compilation-results. Argument: @@ -95,7 +94,7 @@ def dict_to_list(in_dict: dict, is_rfc: bool = False) -> list[list]: def list_br_html_addition(modules_list: list): - """ Replace the newlines ( \n ) by the
    HTML tag throughout the list. + """Replace the newlines ( \n ) by the
    HTML tag throughout the list. Argument: :param modules_list (list) List of lists of compilation results @@ -138,12 +137,12 @@ def namespace_to_organization(namespace: str) -> str: def check_yangcatalog_data( - config: configparser.ConfigParser, - yang_file_pseudo_path: str, - new_module_data: dict, - compilation_results: dict, - all_modules: t.Dict[str, dict], - ietf_type: t.Optional[IETF] = None + config: configparser.ConfigParser, + yang_file_pseudo_path: str, + new_module_data: dict, + compilation_results: dict, + all_modules: t.Dict[str, dict], + ietf_type: t.Optional[IETF] = None, ): result_html_dir = config.get('Web-Section', 'result-html-dir') domain_prefix = config.get('Web-Section', 'domain-prefix') @@ -174,11 +173,7 @@ def check_yangcatalog_data( else: print(f'WARN: {name_revision} not in Redis yet') organization = _resolve_organization(parsed_yang, save_file_dir) - module_data: t.Dict[str, t.Any] = { - 'name': name, - 'revision': revision, - 'organization': organization - } + module_data: t.Dict[str, t.Any] = {'name': name, 'revision': revision, 'organization': organization} incomplete = True update = True for field in ('document-name', 'reference', 'author-email'): @@ -187,9 +182,8 @@ def check_yangcatalog_data( module_data[field] = new_module_data[field] compilation_status = new_module_data.get('compilation-status') - if ( - compilation_status and - module_data.get('compilation-status') != (comp_status := compilation_status.lower().replace(' ', '-')) + if compilation_status and module_data.get('compilation-status') != ( + comp_status := compilation_status.lower().replace(' ', '-') ): # Module parsed with --ietf flag (= RFC) has higher priority if is_rfc and ietf_type is None: @@ -200,7 +194,12 @@ def check_yangcatalog_data( if compilation_status is not None: file_url = _generate_compilation_result_file( - module_data, compilation_results, result_html_dir, is_rfc, versions, ietf_type, + module_data, + compilation_results, + result_html_dir, + is_rfc, + versions, + ietf_type, ) if module_data.get('compilation-status') == 'unknown': comp_result = '' @@ -265,6 +264,7 @@ def _resolve_organization(parsed_yang: Statement, save_file_dir: str) -> str: namespace = namespace[0].arg if (namespace := parsed_yang.search('namespace')) else None return namespace_to_organization(namespace) if namespace else 'independent' + def _resolve_maturity_level(ietf_type: t.Optional[IETF], document_name: t.Optional[str]) -> t.Optional[str]: if not document_name or not ietf_type: return 'not-applicable' @@ -284,18 +284,16 @@ def _resolve_working_group(name_revision: str, ietf_type: IETF, document_name: s def _render(tpl_path: str, context: dict) -> str: """Render jinja html template - Arguments: - :param tpl_path: (str) path to a file - :param context: (dict) dictionary containing data to render jinja - template file - :return: string containing rendered html file + Arguments: + :param tpl_path: (str) path to a file + :param context: (dict) dictionary containing data to render jinja + template file + :return: string containing rendered html file """ for key in context['result']: context['result'][key] = context['result'][key].replace('\n', '
    ') path, filename = os.path.split(tpl_path) - return jinja2.Environment( - loader=jinja2.FileSystemLoader(path or './') - ).get_template(filename).render(context) + return jinja2.Environment(loader=jinja2.FileSystemLoader(path or './')).get_template(filename).render(context) def _path_in_dir(yang_file_path: str) -> str: @@ -311,7 +309,7 @@ def _path_in_dir(yang_file_path: str) -> str: def _generate_ths(versions: dict, ietf_type: t.Optional[IETF]) -> t.List[str]: - ths = list() + ths = [] option = '--lint' if ietf_type is not None: option = '--ietf' @@ -319,26 +317,27 @@ def _generate_ths(versions: dict, ietf_type: t.Optional[IETF]) -> t.List[str]: ths.append(f'Compilation Results (pyang {option}). {pyang_version}') ths.append(f'Compilation Results (pyang). Note: also generates errors for imported files. {pyang_version}') ths.append( - f'Compilation Results (confdc). Note: also generates errors for imported files. {versions.get("confd_version")}' + f'Compilation Results (confdc). Note: ' + f'also generates errors for imported files. {versions.get("confd_version")}', ) ths.append( 'Compilation Results (yangdump-pro). Note: ' - f'also generates errors for imported files. {versions.get("yangdump_version")}' + f'also generates errors for imported files. {versions.get("yangdump_version")}', ) ths.append( 'Compilation Results (yanglint -i). Note: ' - f'also generates errors for imported files. {versions.get("yanglint_version")}' + f'also generates errors for imported files. {versions.get("yanglint_version")}', ) return ths def _generate_compilation_result_file( - module_data: dict, - compilation_results: dict, - result_html_dir: str, - is_rfc: bool, - versions: dict, - ietf_type: t.Optional[IETF] + module_data: dict, + compilation_results: dict, + result_html_dir: str, + is_rfc: bool, + versions: dict, + ietf_type: t.Optional[IETF], ) -> str: name = module_data['name'] rev = module_data['revision'] diff --git a/bin/versions.py b/bin/versions.py index c43d1f9..5d910cb 100644 --- a/bin/versions.py +++ b/bin/versions.py @@ -12,19 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -__author__ = "Slavomir Mazur" -__copyright__ = "Copyright The IETF Trust 2021, All Rights Reserved" -__license__ = "Apache License, Version 2.0" -__email__ = "slavomir.mazur@pantheon.tech" -__version__ = "1.1.0" +__author__ = 'Slavomir Mazur' +__copyright__ = 'Copyright The IETF Trust 2021, All Rights Reserved' +__license__ = 'Apache License, Version 2.0' +__email__ = 'slavomir.mazur@pantheon.tech' +__version__ = '1.1.0' from subprocess import CalledProcessError, check_output +from create_config import create_config from pyang import __version__ as pyang_version from xym import __version__ as xym_version -from create_config import create_config - class ValidatorsVersions: def __init__(self): @@ -49,9 +48,14 @@ def __init__(self): except CalledProcessError: yanglint_version = 'undefined' - self.versions = {'validator_version': __version__, 'pyang_version': pyang_version, 'xym_version': xym_version, - 'confd_version': confd_version, 'yanglint_version': yanglint_version, - 'yangdump_version': yangdump_version} + self.versions = { + 'validator_version': __version__, + 'pyang_version': pyang_version, + 'xym_version': xym_version, + 'confd_version': confd_version, + 'yanglint_version': yanglint_version, + 'yangdump_version': yangdump_version, + } def get_versions(self): return self.versions diff --git a/bin/yang_figures.py b/bin/yang_figures.py index 4ebe44d..f57d0d5 100755 --- a/bin/yang_figures.py +++ b/bin/yang_figures.py @@ -12,43 +12,47 @@ # License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. -import matplotlib as mpl -mpl.use('Agg') # To prevent using a X-Windows server -from pylab import * -from matplotlib.dates import MonthLocator, DateFormatter import json +import matplotlib as mpl from create_config import create_config +from matplotlib.dates import DateFormatter, MonthLocator +from pylab import * +mpl.use('Agg') # To prevent using an X-Windows server -def historical_yangmodule_compiled_readJSON(jsonfile): + +def historical_yangmodule_compiled_read_json(jsonfile): yangmodule_history_json_file = jsonfile with open(yangmodule_history_json_file, 'r') as f: yangmodule_history = json.load(f) - print(" Found " + str(len(yangmodule_history)) + " entrie(s) from " + jsonfile) + print(f'Found {str(len(yangmodule_history))} entrie(s) from {jsonfile}') return yangmodule_history # fonts -fontr = {'family' : 'serif', - 'color' : 'red', - 'weight' : 'normal', - 'size' : 16, - } - -fontb = {'family' : 'serif', - 'color' : 'blue', - 'weight' : 'normal', - 'size' : 16, - } - -fontg = {'family' : 'serif', - 'color' : 'green', - 'weight' : 'normal', - 'size' : 16, - } - +fontr = { + 'family': 'serif', + 'color': 'red', + 'weight': 'normal', + 'size': 16, +} + +fontb = { + 'family': 'serif', + 'color': 'blue', + 'weight': 'normal', + 'size': 16, +} + +fontg = { + 'family': 'serif', + 'color': 'green', + 'weight': 'normal', + 'size': 16, +} + # every monday daysFmt = DateFormatter("%d %b '%y") @@ -62,9 +66,11 @@ def historical_yangmodule_compiled_readJSON(jsonfile): # generate stats for Cisco -yangmoduleCisco_history = historical_yangmodule_compiled_readJSON(web_directory + "/stats/IETFCiscoAuthorsYANGPageCompilationStats.json") +yangmoduleCisco_history = historical_yangmodule_compiled_read_json( + web_directory + '/stats/IETFCiscoAuthorsYANGPageCompilationStats.json', +) if len(yangmoduleCisco_history) == 0: - print ('Found no data in IETFCiscoAuthorsYANGPageCompilation.json') + print('Found no data in IETFCiscoAuthorsYANGPageCompilation.json') raise SystemExit yangmoduledates = [] yangmodulesuccess = [] @@ -72,17 +78,31 @@ def historical_yangmodule_compiled_readJSON(jsonfile): yangmoduletotal = [] for key in sorted(yangmoduleCisco_history): # the next line: doesn't take an entry with (0,0,0) for (success,failed,warning) - if yangmoduleCisco_history[key]['success'] != 0 and yangmoduleCisco_history[key]['warning'] != 0 and yangmoduleCisco_history[key]['total'] != 0: - yangmoduledates.append(float(key)) # Matplot requires a float for dates + if ( + yangmoduleCisco_history[key]['success'] != 0 + and yangmoduleCisco_history[key]['warning'] != 0 + and yangmoduleCisco_history[key]['total'] != 0 + ): + yangmoduledates.append(float(key)) # Matplot requires a float for dates yangmodulesuccess.append(int(yangmoduleCisco_history[key]['success'])) yangmodulewarning.append(int(yangmoduleCisco_history[key]['warning'])) yangmoduletotal.append(int(yangmoduleCisco_history[key]['total'])) fig, ax = plt.subplots() -ax.plot(yangmoduledates, yangmodulesuccess, 'g-', yangmoduledates, yangmoduletotal, 'b-', yangmoduledates, yangmodulewarning, 'r-') +ax.plot( + yangmoduledates, + yangmodulesuccess, + 'g-', + yangmoduledates, + yangmoduletotal, + 'b-', + yangmoduledates, + yangmodulewarning, + 'r-', +) ax.set_ylim(bottom=0, auto=False) # Leave top unset to be dynamic for this one plt.text(735727, 80, 'TOTAL', fontdict=fontb) plt.text(735727, 25, 'PASSED', fontdict=fontg) -plt.text(735732, 5, 'WARNING', fontdict=fontr) +plt.text(735732, 5, 'WARNING', fontdict=fontr) ax.xaxis.set_major_formatter(daysFmt) ax.xaxis.set_minor_locator(months) plt.ylabel('# YANG Modules') @@ -94,9 +114,11 @@ def historical_yangmodule_compiled_readJSON(jsonfile): savefig(web_directory + '/figures/IETFCiscoAuthorsYANGPageCompilation.png', bbox_inches='tight') # generate stats for the IETF -yangmodule_history = historical_yangmodule_compiled_readJSON(web_directory + "/stats/IETFYANGPageCompilationStats.json") +yangmodule_history = historical_yangmodule_compiled_read_json( + web_directory + '/stats/IETFYANGPageCompilationStats.json', +) if len(yangmodule_history) == 0: - print ('Found no data in IETFYANGPageCompilationStats.json') + print('Found no data in IETFYANGPageCompilationStats.json') raise SystemExit yangmoduledates = [] yangmodulesuccess = [] @@ -108,11 +130,21 @@ def historical_yangmodule_compiled_readJSON(jsonfile): yangmodulewarning.append(int(yangmodule_history[key]['warning'])) yangmoduletotal.append(int(yangmodule_history[key]['total'])) fig, ax = plt.subplots() -ax.plot(yangmoduledates, yangmodulesuccess, 'g-', yangmoduledates, yangmoduletotal, 'b-', yangmoduledates, yangmodulewarning, 'r-') +ax.plot( + yangmoduledates, + yangmodulesuccess, + 'g-', + yangmoduledates, + yangmoduletotal, + 'b-', + yangmoduledates, + yangmodulewarning, + 'r-', +) ax.set_ylim(bottom=0, auto=False) # Leave top unset to be dynamic for this one plt.text(735697, 95, 'TOTAL', fontdict=fontb) plt.text(735697, 40, 'PASSED', fontdict=fontg) -plt.text(735712, 5, 'WARNING', fontdict=fontr) +plt.text(735712, 5, 'WARNING', fontdict=fontr) ax.xaxis.set_major_formatter(daysFmt) ax.xaxis.set_minor_locator(months) ax.set_ylabel('# YANG Modules') @@ -124,9 +156,9 @@ def historical_yangmodule_compiled_readJSON(jsonfile): fig.savefig(web_directory + '/figures/IETFYANGPageCompilation.png', bbox_inches='tight') # generate stats for the IETF RFCs -yangRFC_history = historical_yangmodule_compiled_readJSON(web_directory + "/stats/IETFYANGOutOfRFCStats.json") +yangRFC_history = historical_yangmodule_compiled_read_json(web_directory + '/stats/IETFYANGOutOfRFCStats.json') if len(yangRFC_history) == 0: - print ('Found no data in "IETFYANGOutOfRFC.json') + print('Found no data in "IETFYANGOutOfRFC.json') raise SystemExit yangmoduledates = [] yangmoduletotal = [] @@ -135,7 +167,7 @@ def historical_yangmodule_compiled_readJSON(jsonfile): yangmoduletotal.append(int(yangRFC_history[key]['total'])) figure, axes = plt.subplots() axes.plot(yangmoduledates, yangmoduletotal) -top_max = yangRFC_history[sorted(list(yangRFC_history.keys()))[-1]] +top_max = yangRFC_history[sorted(yangRFC_history.keys())[-1]] top_max = int(top_max['total']) + 10 axes.set_ylim(bottom=0, top=top_max, auto=False) axes.xaxis.set_major_formatter(daysFmt) diff --git a/bin/yang_get_stats.py b/bin/yang_get_stats.py index 50e47ea..ee8e1c5 100755 --- a/bin/yang_get_stats.py +++ b/bin/yang_get_stats.py @@ -25,9 +25,8 @@ from configparser import ConfigParser import matplotlib as mpl -from matplotlib.dates import date2num - from create_config import create_config +from matplotlib.dates import date2num mpl.use('Agg') @@ -35,9 +34,14 @@ class GetStats: CATEGORIES_LIST = ['FAILED', 'PASSED WITH WARNINGS', 'PASSED', 'Email All Authors'] BACKUPS_PREFIXES = [ - 'HydrogenODLPageCompilation_', 'HeliumODLPageCompilation_', 'LithiumODLPageCompilation_', - 'IETFCiscoAuthorsYANGPageCompilation_', 'IETFDraftYANGPageCompilation_', 'IANAStandardYANGPageCompilation_', - 'IEEEStandardYANGPageCompilation_', 'IEEEStandardDraftYANGPageCompilation_', + 'HydrogenODLPageCompilation_', + 'HeliumODLPageCompilation_', + 'LithiumODLPageCompilation_', + 'IETFCiscoAuthorsYANGPageCompilation_', + 'IETFDraftYANGPageCompilation_', + 'IANAStandardYANGPageCompilation_', + 'IEEEStandardYANGPageCompilation_', + 'IEEEStandardDraftYANGPageCompilation_', 'IEEEExperimentalYANGPageCompilation_', ] COMPANIES = ( @@ -70,7 +74,7 @@ class GetStats: ('DT', 'telekom.de'), ('Softbank', 'softbank.co.jp'), ('Packet Design', 'packetdesign.com'), - ('Qosmos', 'qosmos.com') + ('Qosmos', 'qosmos.com'), ) YANG_PAGE_MAIN_PREFIX = 'YANGPageMain_' IETF_YANG_PAGE_MAIN_PREFIX = 'IETFYANGPageMain_' @@ -144,8 +148,8 @@ def gather_yang_page_main_compilation_stats(self): 'generated-at': generated_at, 'passed': passed, 'warnings': passed_with_warnings, - 'failed': failed - } + 'failed': failed, + }, } if int(args.days) == -1: with open(json_history_file, 'w') as filename: @@ -187,13 +191,15 @@ def gather_ietf_yang_page_main_compilation_stats(self): 'warnings': passed_with_warnings, 'passed': passed, 'badly formated': badly_formated, - 'examples': examples + 'examples': examples, } if int(args.days) == -1: with open(json_history_file, 'w') as filename: json.dump(yang_page_compilation_stats, filename) self._write_dictionary_file_in_json( - yang_page_compilation_stats, self.stats_path, 'IETFYANGPageMainStats.json' + yang_page_compilation_stats, + self.stats_path, + 'IETFYANGPageMainStats.json', ) def gather_backups_compilation_stats(self): @@ -226,13 +232,13 @@ def gather_backups_compilation_stats(self): yang_page_compilation_stats[date2num(extracted_date)] = { 'total': total_result, 'warning': passed_with_warning_result, - 'success': passed_result + 'success': passed_result, } if int(args.days) == -1: filename = ( - 'IETFYANGPageCompilationStats.json' if - prefix == 'IETFDraftYANGPageCompilation_' else - f'{prefix[:-1]}Stats.json' + 'IETFYANGPageCompilationStats.json' + if prefix == 'IETFDraftYANGPageCompilation_' + else f'{prefix[:-1]}Stats.json' ) with open(json_history_file, 'w') as f: json.dump(yang_page_compilation_stats, f) @@ -260,7 +266,9 @@ def gather_ietf_yang_out_of_rfc_compilation_stats(self): with open(json_history_file, 'w') as f: json.dump(yang_page_compilation_stats, f) self._write_dictionary_file_in_json( - yang_page_compilation_stats, self.stats_path, 'IETFYANGOutOfRFCStats.json' + yang_page_compilation_stats, + self.stats_path, + 'IETFYANGOutOfRFCStats.json', ) def _load_compilation_stats_from_history_file(self, json_history_file: str) -> dict: @@ -281,19 +289,21 @@ def _extract_date_from_filename(self, filename: str) -> datetime.date: def print_files_information(self): # determine the number of company authored drafts files = [ - filename for filename in os.listdir(self.draft_path_strict) if - os.path.isfile(os.path.join(self.draft_path_strict, filename)) + filename + for filename in os.listdir(self.draft_path_strict) + if os.path.isfile(os.path.join(self.draft_path_strict, filename)) ] files_no_strict = [ - filename for filename in os.listdir(self.draft_path_nostrict) if - os.path.isfile(os.path.join(self.draft_path_nostrict, filename)) + filename + for filename in os.listdir(self.draft_path_nostrict) + if os.path.isfile(os.path.join(self.draft_path_nostrict, filename)) ] total_number_drafts = len(files) total_number_drafts_no_strict = len(files_no_strict) print('\nPrint, per company, the number of IETF drafts containing YANG model(s)') print( f'Total numbers of drafts with YANG Model(s): {total_number_drafts} - ' - f'non strict rules: {total_number_drafts_no_strict}\n' + f'non strict rules: {total_number_drafts_no_strict}\n', ) def print_attribution(name: str, domain: str): @@ -302,7 +312,7 @@ def print_attribution(name: str, domain: str): return strict = len(self._list_of_ietf_draft_containing_keyword(files, domain, self.draft_path_strict)) non_strict = len( - self._list_of_ietf_draft_containing_keyword(files_no_strict, domain, self.draft_path_nostrict) + self._list_of_ietf_draft_containing_keyword(files_no_strict, domain, self.draft_path_nostrict), ) print(f'{name}: {strict} - non strict rules: {non_strict}') @@ -319,12 +329,13 @@ def print_attribution(name: str, domain: str): temp_result = os.popen(bash_command).read() if self.debug_level > 0: print( - f'DEBUG: copy the IETF draft containing a YANG model in {self.draft_path_diff}: error {temp_result}' + f'DEBUG: copy the IETF draft containing a YANG model in {self.draft_path_diff}: ' + f'error {temp_result}', ) if self.debug_level > 0: print( 'DEBUG: print the diff between files and files_no_strict lists, ' - f'so the files with xym extraction issues: {files_diff}' + f'so the files with xym extraction issues: {files_diff}', ) def _list_of_files_in_dir(self, srcdir: str, extension: str) -> list[str]: @@ -423,7 +434,7 @@ def _list_of_ietf_draft_containing_keyword(self, drafts: list[str], keyword: str if self.debug_level > 0: print( 'DEBUG: in list_of_ietf_draft_containing_keyword: ' - f'list_of_ietf_draft_with_keyword contains {list_of_ietf_draft_with_keyword}' + f'list_of_ietf_draft_with_keyword contains {list_of_ietf_draft_with_keyword}', ) return list_of_ietf_draft_with_keyword @@ -448,13 +459,8 @@ def _write_dictionary_file_in_json(self, in_dict: dict, path: str, file_name: st '--days', help='Numbers of days to get back in history. Default is -1 = unlimited', type=int, - default=-1 - ) - parser.add_argument( - '--debug', - help='Debug level - default is 0', - type=int, - default=0 + default=-1, ) + parser.add_argument('--debug', help='Debug level - default is 0', type=int, default=0) args = parser.parse_args() GetStats(args, config).start_process() diff --git a/bin/yang_version_1_1.py b/bin/yang_version_1_1.py index 2fb69ca..f60ddae 100755 --- a/bin/yang_version_1_1.py +++ b/bin/yang_version_1_1.py @@ -72,20 +72,19 @@ def find_v11_models(src_dir: str, dst_dir: str, debug: int = 0) -> list: dst = os.path.join(ietf_directory, 'YANG-v11') parser = argparse.ArgumentParser(description='YANG 1.1 Processing Tool. Copy all YANG 1.1 modules to destpath') - parser.add_argument('--srcpath', - help='Directory where find YANG models. ' - 'Default is "{}"'.format(src), - type=str, - default=src) - parser.add_argument('--dstpath', - help='Directory where to store version 1.1 YANG models. ' - 'Default is "{}"'.format(dst), - type=str, - default=dst) - parser.add_argument('--debug', - help='Debug level - default is 0', - type=int, - default=0) + parser.add_argument( + '--srcpath', + help='Directory where find YANG models. ' 'Default is "{}"'.format(src), + type=str, + default=src, + ) + parser.add_argument( + '--dstpath', + help='Directory where to store version 1.1 YANG models. ' 'Default is "{}"'.format(dst), + type=str, + default=dst, + ) + parser.add_argument('--debug', help='Debug level - default is 0', type=int, default=0) args = parser.parse_args() diff --git a/lint_requirements.txt b/lint_requirements.txt new file mode 100644 index 0000000..d510ae0 --- /dev/null +++ b/lint_requirements.txt @@ -0,0 +1,11 @@ +flake8==4.0.1 +flake8-black==0.3.3 +flake8-isort==5.0.0 +flake8-absolute-import==1.0.0.1 +flake8-commas==2.1.0 +flake8-comprehensions==3.10.0 +flake8-docstrings==1.6.0 +flake8-multiline-containers==0.0.19 +flake8-print==5.0.0 +flake8-quotes==3.3.1 +pep8-naming==0.13.2 \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..8bee26c --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,9 @@ +[tool.black] +line-length = 120 +skip-string-normalization = true + +[tool.isort] +skip = [".git", "__pycache__", "docs/source/conf.py", "old", "build", "dist", "venv"] +skip_glob = ["./lib/*", "./bin/*"] +line_length = 120 +use_parentheses = true \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 8fd7df3..721ed65 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,15 @@ filelock==3.4.0 redis==4.1.2 pytest==7.0.1 scipy==1.8.1 +pre-commit==2.20.0 +flake8==4.0.1 +flake8-black==0.3.3 +flake8-isort==5.0.0 +flake8-absolute-import==1.0.0.1 +flake8-commas==2.1.0 +flake8-comprehensions==3.10.0 +flake8-docstrings==1.6.0 +flake8-multiline-containers==0.0.19 +flake8-print==5.0.0 +flake8-quotes==3.3.1 +pep8-naming==0.13.2 \ No newline at end of file diff --git a/tests/test_extract_elem.py b/tests/test_extract_elem.py index a9bba7b..15c660f 100644 --- a/tests/test_extract_elem.py +++ b/tests/test_extract_elem.py @@ -19,13 +19,13 @@ __email__ = 'richard.zilincik@pantheon.tech' import filecmp -import unittest import os +import unittest import extract_elem as ee -class TestExtractElem(unittest.TestCase): +class TestExtractElem(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.resource_path = os.path.join(os.environ['VIRTUAL_ENV'], 'tests/resources/extract_elem') @@ -39,58 +39,51 @@ def test_extract_elem_grouping(self): 'grouping-shared-module-leafs.txt', 'grouping-yang-lib-common-leafs.txt', 'grouping-yang-lib-implementation-leafs.txt', - 'grouping-yang-lib-schema-leaf.txt' + 'grouping-yang-lib-schema-leaf.txt', ] ee.extract_elem( os.path.join(self.resource_path, 'yang-catalog@2018-04-03.yang'), os.path.join(self.resource_path, 'extracted'), - 'grouping' - ) + 'grouping', + ) match, mismatch, errors = filecmp.cmpfiles( os.path.join(self.resource_path, 'extracted'), os.path.join(self.resource_path, 'expected'), - groupings + groupings, ) self.assertFalse(mismatch or errors) def test_extract_elem_identity(self): - identities = [ - 'identity-netconf.txt', - 'identity-protocol.txt', - 'identity-restconf.txt' - ] + identities = ['identity-netconf.txt', 'identity-protocol.txt', 'identity-restconf.txt'] ee.extract_elem( os.path.join(self.resource_path, 'yang-catalog@2018-04-03.yang'), os.path.join(self.resource_path, 'extracted'), - 'identity' - ) + 'identity', + ) match, mismatch, errors = filecmp.cmpfiles( os.path.join(self.resource_path, 'extracted'), os.path.join(self.resource_path, 'expected'), - identities + identities, ) self.assertFalse(mismatch or errors) def test_extract_elem_typedef(self): - typedefs = [ - 'typedef-email-address.txt', - 'typedef-path.txt', - 'typedef-semver.txt' - ] + typedefs = ['typedef-email-address.txt', 'typedef-path.txt', 'typedef-semver.txt'] ee.extract_elem( os.path.join(self.resource_path, 'yang-catalog@2018-04-03.yang'), os.path.join(self.resource_path, 'extracted'), - 'typedef' - ) + 'typedef', + ) match, mismatch, errors = filecmp.cmpfiles( os.path.join(self.resource_path, 'extracted'), os.path.join(self.resource_path, 'expected'), - typedefs + typedefs, ) self.assertFalse(mismatch or errors) + if __name__ == '__main__': unittest.main() diff --git a/tests/test_extract_emails.py b/tests/test_extract_emails.py index 9629d0f..a8c2fd0 100644 --- a/tests/test_extract_emails.py +++ b/tests/test_extract_emails.py @@ -25,7 +25,6 @@ class TestExtractElem(unittest.TestCase): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.resource_path = os.path.join(os.environ['VIRTUAL_ENV'], 'tests/resources/extract_emails') @@ -38,5 +37,6 @@ def test_list_of_ietf_drafts(self): result = ee.list_of_ietf_drafts(os.path.join(self.resource_path, 'drafts')) self.assertSetEqual(set(result), {'draft-foo.txt', 'draft-bar.txt'}) + if __name__ == '__main__': unittest.main() diff --git a/tests/test_file_hasher.py b/tests/test_file_hasher.py index 26a0848..9e6a43a 100644 --- a/tests/test_file_hasher.py +++ b/tests/test_file_hasher.py @@ -19,16 +19,16 @@ __email__ = 'richard.zilincik@pantheon.tech' import json +import os import shutil import subprocess import unittest -import os from file_hasher import FileHasher from versions import ValidatorsVersions -class TestFileHasher(unittest.TestCase): +class TestFileHasher(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.resource_path = os.path.join(os.environ['VIRTUAL_ENV'], 'tests/resources/file_hasher') @@ -37,8 +37,8 @@ def __init__(self, *args, **kwargs): 'sdo_files_modification_hashes.json.lock', 'versions.json', 'correct.json', - 'incorrect.json' - ]: + 'incorrect.json', + ]: try: os.remove(self.resource(file)) except FileNotFoundError: @@ -50,15 +50,14 @@ def create_hash_files(self): json.dump(ValidatorsVersions().get_versions(), f) hash_dict = { self.resource('file.txt'): self.compute_hash('file.txt'), - self.resource('other_file.txt'): self.compute_hash('other_file.txt') + self.resource('other_file.txt'): self.compute_hash('other_file.txt'), } with open(self.resource('correct.json'), 'w') as f: json.dump(hash_dict, f) - hash_dict[self.resource('file.txt')] = 64*'0' + hash_dict[self.resource('file.txt')] = 64 * '0' with open(self.resource('incorrect.json'), 'w') as f: json.dump(hash_dict, f) - - + def compute_hash(self, file): command = 'cat {} {} | sha256sum'.format(self.resource(file), self.resource('versions.json')) return subprocess.run(command, shell=True, capture_output=True).stdout.decode().split()[0] @@ -97,7 +96,7 @@ def test_invalidate_hashes(self): result = json.load(f) self.assertDictEqual(result, expected) - + def test_should_parse(self): shutil.copy(self.resource('incorrect.json'), self.resource('sdo_files_modification_hashes.json')) diff --git a/tests/test_gather_ietf_dependent_modules.py b/tests/test_gather_ietf_dependent_modules.py index 61339e3..153b6d5 100644 --- a/tests/test_gather_ietf_dependent_modules.py +++ b/tests/test_gather_ietf_dependent_modules.py @@ -29,7 +29,6 @@ class TestGatherIetfDependentModules(unittest.TestCase): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) config = create_config() @@ -43,7 +42,7 @@ def setUp(self) -> None: @mock.patch('requests.post') def test_copy_modules(self, mock_post: mock.MagicMock) -> None: - """ Test whether the yang files have been copied to the destination directory. """ + """Test whether the yang files have been copied to the destination directory.""" with open(self.payloads_file, 'r', encoding='utf-8') as reader: content = json.load(reader) mock_post.return_value.json.return_value = content['search-filter-ietf'] @@ -58,7 +57,7 @@ def test_copy_modules(self, mock_post: mock.MagicMock) -> None: @mock.patch('requests.post') def test_copy_modules_no_src_dir(self, mock_post: mock.MagicMock) -> None: - """ Destination directory should be empty if the source directory does not exist. """ + """Destination directory should be empty if the source directory does not exist.""" with open(self.payloads_file, 'r', encoding='utf-8') as reader: content = json.load(reader) mock_post.return_value.json.return_value = content['search-filter-ietf'] @@ -71,7 +70,7 @@ def test_copy_modules_no_src_dir(self, mock_post: mock.MagicMock) -> None: @mock.patch('requests.post') def test_copy_modules_400_response(self, mock_post: mock.MagicMock) -> None: - """ Destination directory should be empty if server responded with 400/404 error message. """ + """Destination directory should be empty if server responded with 400/404 error message.""" with open(self.payloads_file, 'r', encoding='utf-8') as reader: content = json.load(reader) mock_post.return_value.json.return_value = content['search-filter-ietf-400-response'] diff --git a/tests/test_private_page.py b/tests/test_private_page.py index b5666c0..e1e7cb3 100644 --- a/tests/test_private_page.py +++ b/tests/test_private_page.py @@ -18,17 +18,17 @@ __license__ = 'Apache License, Version 2.0' __email__ = 'richard.zilincik@pantheon.tech' -import unittest import os +import unittest import private_page as pp -class TestPrivatePage(unittest.TestCase): +class TestPrivatePage(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.resources = os.path.join(os.environ['VIRTUAL_ENV'], 'tests/resources/private_page') - + def resource(self, file: str): return os.path.join(self.resources, file) @@ -36,7 +36,7 @@ def test_get_vendor_context(self): result = pp.get_vendor_context( self.resource('vendor'), lambda os_name, os_specific_dir: pp.alnum('{}{}'.format(os_name, os_specific_dir)), - lambda os_name, os_specific_dir: '{}{}'.format(os_name, os_specific_dir) + lambda os_name, os_specific_dir: '{}{}'.format(os_name, os_specific_dir), ) expected = [{'allCharacters': i, 'alphaNumeric': pp.alnum(i)} for i in ['bar1.0', 'foo1.0', 'foo1.1', 'foo1.2']] @@ -47,12 +47,12 @@ def test_get_vendor_context_separate(self): self.resource('vendor'), lambda os_name, os_specific_dir: pp.alnum('{}{}'.format(os_name, os_specific_dir)), lambda os_name, os_specific_dir: '{}{}'.format(os_name, os_specific_dir), - separate=True + separate=True, ) expected = { 'BAR': [{'allCharacters': i, 'alphaNumeric': pp.alnum(i)} for i in ['bar1.0']], - 'FOO': [{'allCharacters': i, 'alphaNumeric': pp.alnum(i)} for i in ['foo1.0', 'foo1.1', 'foo1.2']] + 'FOO': [{'allCharacters': i, 'alphaNumeric': pp.alnum(i)} for i in ['foo1.0', 'foo1.1', 'foo1.2']], } assert isinstance(result, dict) self.assertDictEqual(result, expected) @@ -60,13 +60,14 @@ def test_get_vendor_context_separate(self): def test_get_etsi_context(self): result = pp.get_etsi_context(self.resource('etsi')) - expected = [{'allCharacters': i.strip('NFV-SOL006-v'), 'alphaNumeric': pp.alnum(i.strip('NFV-SOL006-v'))} - for i in ['NFV-SOL006-v2.6.1', 'NFV-SOL006-v2.7.1', 'NFV-SOL006-v2.8.1']] + expected = [ + {'allCharacters': i.strip('NFV-SOL006-v'), 'alphaNumeric': pp.alnum(i.strip('NFV-SOL006-v'))} + for i in ['NFV-SOL006-v2.6.1', 'NFV-SOL006-v2.7.1', 'NFV-SOL006-v2.8.1'] + ] self.assertEqual(result, expected) - def test_get_openroadm_context(self): result = pp.get_openroadm_context(['bar1.0', 'foo1.0', 'foo1.1', 'foo1.2']) expected = [{'alphaNumeric': i, 'allCharacters': i} for i in ['bar1.0', 'foo1.0', 'foo1.1', 'foo1.2']] - self.assertEqual(result, expected) \ No newline at end of file + self.assertEqual(result, expected) diff --git a/tests/test_remove_directory_content.py b/tests/test_remove_directory_content.py index bec249d..a813e2b 100644 --- a/tests/test_remove_directory_content.py +++ b/tests/test_remove_directory_content.py @@ -27,7 +27,6 @@ class TestRemoveDirectoryContent(unittest.TestCase): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.resource_path = os.path.join(os.environ['VIRTUAL_ENV'], 'tests/resources/remove_directory_content') @@ -50,7 +49,7 @@ def tearDown(self) -> None: shutil.rmtree(self.resource_path, ignore_errors=True) def test_remove_directory_content(self): - """ Try to delete the content of a directory - it should be empty after script run. """ + """Try to delete the content of a directory - it should be empty after script run.""" self.assertNotEqual(os.listdir(self.resource_path), []) rdc.remove_directory_content(self.resource_path, 1) @@ -59,7 +58,7 @@ def test_remove_directory_content(self): self.assertEqual(os.listdir(self.resource_path), []) def test_rename_file_backup_from_console(self) -> None: - """ Run the script from the console by passing the arguments. """ + """Run the script from the console by passing the arguments.""" bash_command = 'python {} --dir {} --debug 1'.format(self.script_path, self.resource_path) subprocess.run(bash_command, shell=True, capture_output=True, check=False).stdout.decode() @@ -68,7 +67,6 @@ def test_rename_file_backup_from_console(self) -> None: class TestRemoveDirectoryContentEmpty(unittest.TestCase): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.resource_path = os.path.join(os.environ['VIRTUAL_ENV'], 'tests/resources/remove_directory_content') @@ -84,21 +82,21 @@ def tearDown(self) -> None: shutil.rmtree(self.non_existing_path, ignore_errors=True) def test_remove_directory_content_empty(self): - """ Try to delete the content of an empty directory. """ + """Try to delete the content of an empty directory.""" rdc.remove_directory_content(self.resource_path) self.assertTrue(os.path.isdir(self.resource_path)) self.assertEqual(os.listdir(self.resource_path), []) def test_remove_directory_content_non_existing_dir(self): - """ Try to delete the content of a directory that does not exist. """ + """Try to delete the content of a directory that does not exist.""" rdc.remove_directory_content(self.non_existing_path) self.assertTrue(os.path.isdir(self.non_existing_path)) self.assertEqual(os.listdir(self.non_existing_path), []) def test_remove_directory_content_default(self): - """ Try to delete the content of a directory - using default value. """ + """Try to delete the content of a directory - using default value.""" result = rdc.remove_directory_content('') self.assertEqual(result, None) diff --git a/tests/test_rename_file_backup.py b/tests/test_rename_file_backup.py index 03583f4..02d9441 100644 --- a/tests/test_rename_file_backup.py +++ b/tests/test_rename_file_backup.py @@ -28,7 +28,6 @@ class TestRenameFileBackup(unittest.TestCase): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.resource_path = os.path.join(os.environ['VIRTUAL_ENV'], 'tests/resources/rename_file_backup') @@ -42,13 +41,15 @@ def setUp(self) -> None: shutil.rmtree(self.backup_directory, ignore_errors=True) def test_rename_file_backup(self) -> None: - """ Create a backup of the file with timestamp as suffix - and test if the file exists and has the correct name. """ + """ + Create a backup of the file with timestamp as suffix + and test if the file exists and has the correct name. + """ rfb.rename_file_backup(self.private_directory, self.backup_directory, 1) file_to_backup = os.path.join(self.private_directory, self.filename) modified_time = os.path.getmtime(file_to_backup) - timestamp = (datetime.fromtimestamp(modified_time).strftime("%Y_%m_%d")) + timestamp = datetime.fromtimestamp(modified_time).strftime('%Y_%m_%d') backup_files = os.listdir(self.backup_directory) self.assertNotEqual(backup_files, []) @@ -57,27 +58,30 @@ def test_rename_file_backup(self) -> None: self.assertEqual(backup_file, self.backup_filename.format(timestamp)) def test_rename_file_backup_source_not_exists(self) -> None: - """ Method should not fail even if the source directory does not exist.""" + """Method should not fail even if the source directory does not exist.""" src_dir = os.path.join(os.environ['VIRTUAL_ENV'], 'tests/resources/non-existing-dir') result = rfb.rename_file_backup(src_dir, self.backup_directory, 1) self.assertEqual(result, None) def test_rename_file_backup_destination_missing(self) -> None: - """ Method should not fail even if the destination directory does not exist.""" + """Method should not fail even if the destination directory does not exist.""" result = rfb.rename_file_backup(self.private_directory, '', 1) self.assertEqual(result, None) def test_rename_file_backup_from_console(self) -> None: - """ Run the script from the console by passing the arguments. """ + """Run the script from the console by passing the arguments.""" bash_command = 'python {} --srcdir {} --backupdir {} --debug 1'.format( - self.script_path, self.private_directory, self.backup_directory) + self.script_path, + self.private_directory, + self.backup_directory, + ) subprocess.run(bash_command, shell=True, capture_output=True, check=False).stdout.decode() file_to_backup = os.path.join(self.private_directory, self.filename) modified_time = os.path.getmtime(file_to_backup) - timestamp = (datetime.fromtimestamp(modified_time).strftime("%Y_%m_%d")) + timestamp = datetime.fromtimestamp(modified_time).strftime('%Y_%m_%d') backup_files = os.listdir(self.backup_directory) self.assertNotEqual(backup_files, []) diff --git a/tests/test_utility.py b/tests/test_utility.py index 3422465..9bb52a3 100644 --- a/tests/test_utility.py +++ b/tests/test_utility.py @@ -18,15 +18,14 @@ __license__ = 'Apache License, Version 2.0' __email__ = 'richard.zilincik@pantheon.tech' -import unittest import os +import unittest import utility.utility as u from create_config import create_config class TestUtility(unittest.TestCase): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.resource_path = os.path.join(os.environ['VIRTUAL_ENV'], 'tests/resources/utility') @@ -95,9 +94,9 @@ def test_number_that_passed_compilation(self): 'foo': ['test', 'stuff', 'PASSED', 'more stuff'], 'bar': ['test', 'stuff', 'FAILED', 'more stuff'], 'foobar': ['test', 'stuff', 'PASSED WITH WARNINGS', 'more stuff'], - 'boofar': ['test', 'stuff', 'PASSED', 'more stuff'] + 'boofar': ['test', 'stuff', 'PASSED', 'more stuff'], }, 2, - 'PASSED' + 'PASSED', ) self.assertEqual(result, 2) diff --git a/tests/test_yang_version_1_1.py b/tests/test_yang_version_1_1.py index 9f14fa0..a957a39 100644 --- a/tests/test_yang_version_1_1.py +++ b/tests/test_yang_version_1_1.py @@ -27,7 +27,6 @@ class TestYangVersion11(unittest.TestCase): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.resource_path = os.path.join(os.environ['VIRTUAL_ENV'], 'tests/resources/yang_version_1_1') @@ -39,7 +38,7 @@ def setUp(self) -> None: shutil.rmtree(self.dst, ignore_errors=True) def test_yang_version_1_1(self): - """ Find and copy the yang files that contain 'yang-version 1.1' string. """ + """Find and copy the yang files that contain 'yang-version 1.1' string.""" result = yv11.find_v11_models(self.src, self.dst, 1) self.assertNotEqual(result, []) self.assertIn('test.yang', result) @@ -50,12 +49,12 @@ def test_yang_version_1_1(self): self.assertIn('test.yang', v1_files) def test_yang_version_1_1_src_not_exists(self): - """ Test the case when the src directory does not exist. """ + """Test the case when the src directory does not exist.""" result = yv11.find_v11_models('', self.dst) self.assertEqual(result, []) def test_yang_version_1_1_from_console(self): - """ Run the script from the console by passing the arguments. """ + """Run the script from the console by passing the arguments.""" bash_command = 'python {} --srcpath {} --dstpath {} --debug 1'.format(self.script_path, self.src, self.dst) subprocess.run(bash_command, shell=True, capture_output=True, check=False).stdout.decode()