From 0c7741caf14fce0aaa360166f45041ad7adc4460 Mon Sep 17 00:00:00 2001 From: MaximePerrigault Date: Tue, 9 Jan 2024 15:32:38 +0100 Subject: [PATCH 01/14] Add a worker ID to each instance of behave --- README.md | 6 ++++++ behavex/runner.py | 23 +++++++++++++++++++---- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 4238d70..66e86d6 100644 --- a/README.md +++ b/README.md @@ -153,6 +153,12 @@ Examples: When the parallel-scheme is set by **feature**, all tests within each feature will be run sequentially. +BehaveX populates the Behave contexts with the `worker_id` user-specific data. This variable contains the id of the current behave process. + +E.g If BehaveX is started with `--parallel-processes 2`, the first instance of behave will receive `worker_id=0`, and the second instance will receive `worker_id=1`. + +This variable can be accessed within the python tests using `context.config.userdata['worker_id']` + ## Test execution reports ### HTML report This is a friendly test execution report that contains information related to test scenarios, execution status, execution evidence and metrics. A filters bar is also provided to filter scenarios by name, tag or status. diff --git a/behavex/runner.py b/behavex/runner.py index 8fab7ff..e2f61d5 100644 --- a/behavex/runner.py +++ b/behavex/runner.py @@ -157,8 +157,12 @@ def setup_running_failures(args_parsed): return EXIT_OK, None -def init_multiprocessing(): +def init_multiprocessing(idQueue): signal.signal(signal.SIGINT, signal.SIG_IGN) + # Retrieve one of the unique IDs + id = idQueue.get() + # Use the unique ID to name the process + multiprocessing.current_process().name = f'behave_worker-{id}' def launch_behavex(): @@ -187,7 +191,12 @@ def launch_behavex(): lock = manager.Lock() # shared variable to track scenarios that should be run but seems to be removed from execution (using scenarios.remove) shared_removed_scenarios = manager.dict() - process_pool = multiprocessing.Pool(parallel_processes, initializer=init_multiprocessing(), initargs=(lock,)) + # Create a queue containing unique IDs from 0 to the number of parallel processes - 1 + # These IDs will be attributed to the process when they will be initialized + idQueue = manager.Queue() + for i in range(parallel_processes): + idQueue.put(i) + process_pool = multiprocessing.Pool(parallel_processes, initializer=init_multiprocessing, initargs=(idQueue,)) try: if parallel_processes == 1 or get_param('dry_run'): # Executing without parallel processes @@ -746,9 +755,13 @@ def _set_behave_arguments(features_path, multiprocess, feature=None, scenario=No scenario_outline_compatible = scenario_outline_compatible.replace(escaped_example_name, "[\\S ]*") arguments.append('--name') arguments.append("{}".format(scenario_outline_compatible)) - name = multiprocessing.current_process().name.split('-')[-1] + worker_id = multiprocessing.current_process().name.split('-')[-1] + arguments.append('--outfile') - arguments.append(os.path.join(gettempdir(), 'stdout{}.txt'.format(name))) + arguments.append(os.path.join(gettempdir(), 'stdout{}.txt'.format(worker_id))) + + arguments.append('-D') + arguments.append(f'worker_id={worker_id}') else: if type(features_path) is list: for feature_path in features_path: @@ -763,6 +776,8 @@ def _set_behave_arguments(features_path, multiprocess, feature=None, scenario=No arguments.append(output_folder) arguments.append('--outfile') arguments.append(os.path.join(output_folder, 'behave', 'behave.log')) + arguments.append('-D') + arguments.append(f'worker_id=0') arguments.append('--no-skipped') arguments.append('--no-junit') run_wip_tests = False From 789955882a664825b45cad7318bb2ea4478e18d1 Mon Sep 17 00:00:00 2001 From: anibalinn Date: Fri, 6 Sep 2024 18:57:21 -0300 Subject: [PATCH 02/14] Improvements done in inplementation to identify each parallel process --- CHANGES.rst | 11 +++++++++++ README.md | 6 ++++-- behavex/runner.py | 22 ++++++++-------------- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0da52f3..9877c43 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,17 @@ Version History =============================================================================== +Version: 4.0.3 +------------------------------------------------------------------------------- +ENHANCEMENTS: + +* Added the 'worker_id' context.config.userdata parameter to allow users to identify which worker is executing every feature or scenario when running tests in parallel. `PR #121 `_ + +CONTRIBUTIONS: + +* Contributions from `JackHerRrer `__, by providing the implementation to include the 'worker_id' context.config.userdata parameter (Thanks JackHerRrer!!) + + Version: 4.0.2 ------------------------------------------------------------------------------- ENHANCEMENTS: diff --git a/README.md b/README.md index 6949d2b..0461721 100644 --- a/README.md +++ b/README.md @@ -171,9 +171,11 @@ Examples: When the parallel-scheme is set by **feature**, all tests within each feature will be run sequentially. -BehaveX populates the Behave contexts with the `worker_id` user-specific data. This variable contains the id of the current behave process. +### Identifying each parallel process -E.g If BehaveX is started with `--parallel-processes 2`, the first instance of behave will receive `worker_id=0`, and the second instance will receive `worker_id=1`. +BehaveX populates the Behave contexts with the `worker_id` user-specific data. This variable contains the id of the current behave process. + +E.g If BehaveX is started with `--parallel-processes 2`, the first instance of behave will receive `worker_id=0`, and the second instance will receive `worker_id=1`. This variable can be accessed within the python tests using `context.config.userdata['worker_id']` diff --git a/behavex/runner.py b/behavex/runner.py index d1a8155..631f0c9 100644 --- a/behavex/runner.py +++ b/behavex/runner.py @@ -70,8 +70,6 @@ include_path_match = None include_name_match = None scenario_lines = {} -# Define a global variable to store the lock -process_lock = None def main(): @@ -169,15 +167,13 @@ def setup_running_failures(args_parsed): return EXIT_OK, None -def init_multiprocessing(lock, idQueue): +def init_multiprocessing(idQueue): """Initialize multiprocessing by ignoring SIGINT signals.""" signal.signal(signal.SIGINT, signal.SIG_IGN) - global process_lock - process_lock = lock # Retrieve one of the unique IDs - id = idQueue.get() + worker_id = idQueue.get() # Use the unique ID to name the process - multiprocessing.current_process().name = f'behave_worker-{id}' + multiprocessing.current_process().name = f'behave_worker-{worker_id}' def launch_behavex(): @@ -217,13 +213,13 @@ def launch_behavex(): shared_removed_scenarios = manager.dict() lock = manager.Lock() # Create a queue containing unique IDs from 0 to the number of parallel processes - 1 - # These IDs will be attributed to the process when they will be initialized + # These IDs will be attributed to the process when they will be initialized idQueue = manager.Queue() for i in range(parallel_processes): idQueue.put(i) process_pool = ProcessPoolExecutor(max_workers=parallel_processes, - initializer=init_multiprocessing(), - initargs=(lock, idQueue)) + initializer=init_multiprocessing, + initargs=(idQueue,)) global_vars.execution_start_time = time.time() try: config = ConfigRun() @@ -312,9 +308,7 @@ def launch_behavex(): else: execution_failed = True if execution_codes > 0 else False execution_interrupted_or_crashed = True if execution_codes == 2 else False - exit_code = ( - EXIT_ERROR if (execution_failed and failing_non_muted_tests) or execution_interrupted_or_crashed else EXIT_OK - ) + exit_code = (EXIT_ERROR if (execution_failed and failing_non_muted_tests) or execution_interrupted_or_crashed else EXIT_OK) except KeyboardInterrupt: print('Caught KeyboardInterrupt, terminating workers') process_pool.shutdown(wait=False, cancel_futures=True) @@ -1022,7 +1016,7 @@ def _set_behave_arguments(features_path, multiprocess, execution_id=None, featur arguments.append('--name') arguments.append("{}".format(scenario_outline_compatible)) worker_id = multiprocessing.current_process().name.split('-')[-1] - + arguments.append('--outfile') arguments.append(os.path.join(gettempdir(), 'stdout{}.txt'.format(worker_id))) From 3c9cac47060e49fa964b260917ece2768f8f1a76 Mon Sep 17 00:00:00 2001 From: anibalinn Date: Fri, 13 Sep 2024 19:36:00 -0300 Subject: [PATCH 03/14] Adding the --parallel-delay argument, to enable setting a staggered execution when running tests in parallel. --- CHANGES.rst | 3 ++- behavex/arguments.py | 5 +++++ behavex/runner.py | 8 ++++++-- setup.py | 2 +- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 9877c43..5a203ec 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,11 +1,12 @@ Version History =============================================================================== -Version: 4.0.3 +Version: 4.0.4 ------------------------------------------------------------------------------- ENHANCEMENTS: * Added the 'worker_id' context.config.userdata parameter to allow users to identify which worker is executing every feature or scenario when running tests in parallel. `PR #121 `_ +* Adding the --parallel-delay argument, to enable setting a staggered execution when running tests in parallel. `Issue #142 `_ CONTRIBUTIONS: diff --git a/behavex/arguments.py b/behavex/arguments.py index 333dfd7..5d4feef 100644 --- a/behavex/arguments.py +++ b/behavex/arguments.py @@ -285,6 +285,11 @@ def parse_arguments(args): help="Specifies whether parallel execution should be performed at the scenario or feature level.", required=False, ) + parser.add_argument( + '--parallel-delay', + type=int, + default=0, + help='Delay in milliseconds before starting each parallel process') parser.add_argument( '-ip', '--include-paths', diff --git a/behavex/runner.py b/behavex/runner.py index 631f0c9..50584e2 100644 --- a/behavex/runner.py +++ b/behavex/runner.py @@ -167,13 +167,16 @@ def setup_running_failures(args_parsed): return EXIT_OK, None -def init_multiprocessing(idQueue): +def init_multiprocessing(idQueue, parallel_delay): """Initialize multiprocessing by ignoring SIGINT signals.""" signal.signal(signal.SIGINT, signal.SIG_IGN) # Retrieve one of the unique IDs worker_id = idQueue.get() # Use the unique ID to name the process multiprocessing.current_process().name = f'behave_worker-{worker_id}' + # Add delay + if parallel_delay > 0: + time.sleep(parallel_delay / 1000.0) def launch_behavex(): @@ -217,9 +220,10 @@ def launch_behavex(): idQueue = manager.Queue() for i in range(parallel_processes): idQueue.put(i) + parallel_delay = get_param('parallel_delay') process_pool = ProcessPoolExecutor(max_workers=parallel_processes, initializer=init_multiprocessing, - initargs=(idQueue,)) + initargs=(idQueue, parallel_delay)) global_vars.execution_start_time = time.time() try: config = ConfigRun() diff --git a/setup.py b/setup.py index 249bc91..82ff1c1 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name='behavex', - version='4.0.2', + version='4.0.4rc1', license="MIT", platforms=['any'], python_requires='>=3.5', From bea4e281783beaf5c99f0563d171002db89edb39 Mon Sep 17 00:00:00 2001 From: anibalinn Date: Mon, 16 Sep 2024 18:49:46 -0300 Subject: [PATCH 04/14] Fixing issue when calculating scenarios to run by feature when re-executing failing scenarios --- behavex/runner.py | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/behavex/runner.py b/behavex/runner.py index 50584e2..63da44c 100644 --- a/behavex/runner.py +++ b/behavex/runner.py @@ -167,7 +167,7 @@ def setup_running_failures(args_parsed): return EXIT_OK, None -def init_multiprocessing(idQueue, parallel_delay): +def init_multiprocessing(idQueue): """Initialize multiprocessing by ignoring SIGINT signals.""" signal.signal(signal.SIGINT, signal.SIG_IGN) # Retrieve one of the unique IDs @@ -223,7 +223,7 @@ def launch_behavex(): parallel_delay = get_param('parallel_delay') process_pool = ProcessPoolExecutor(max_workers=parallel_processes, initializer=init_multiprocessing, - initargs=(idQueue, parallel_delay)) + initargs=(idQueue,)) global_vars.execution_start_time = time.time() try: config = ConfigRun() @@ -460,6 +460,7 @@ def launch_by_feature(features, execution_code, map_json = execute_tests(features_path=None, feature_filename=serial_feature["feature_filename"], feature_json_skeleton=serial_feature["feature_json_skeleton"], + scenarios_to_run_in_feature=None, scenario_name=None, multiprocess=True, config=ConfigRun(), @@ -477,6 +478,7 @@ def launch_by_feature(features, features_path=None, feature_filename=feature_filename, feature_json_skeleton=feature_json_skeleton, + scenarios_to_run_in_feature=None, scenario_name=None, multiprocess=True, config=ConfigRun(), @@ -512,8 +514,8 @@ def launch_by_scenario(features, parallel_scenarios = {} serial_scenarios = {} duplicated_scenarios = {} - total_scenarios = 0 - features_with_empty_scenario_descriptions = [] + total_scenarios_to_run = {} + features_with_no_scen_desc = [] for features_path, scenarios in features.items(): for scenario in scenarios: if include_path_match(scenario.filename, scenario.line) \ @@ -521,43 +523,45 @@ def launch_by_scenario(features, scenario_tags = get_scenario_tags(scenario) if match_for_execution(scenario_tags): if scenario.name == "": - features_with_empty_scenario_descriptions.append(scenario.filename) + features_with_no_scen_desc.append(scenario.filename) feature_json_skeleton = _get_feature_json_skeleton(scenario) - scenario_information = {"feature_filename": scenario.feature.filename, + feature_filename = scenario.feature.filename + scenario_information = {"feature_filename": feature_filename, "feature_json_skeleton": feature_json_skeleton, "scenario_name": scenario.name} + total_scenarios_to_run[feature_filename] = total_scenarios_to_run.setdefault(feature_filename, 0) + 1 if 'SERIAL' in scenario_tags: for key, list_scenarios in serial_scenarios.items(): if scenario_information in list_scenarios: duplicated_scenarios.setdefault(key, []).append(scenario.name) serial_scenarios.setdefault(features_path, []).append(scenario_information) - total_scenarios += 1 else: for key, list_scenarios in parallel_scenarios.items(): if scenario_information in list_scenarios: duplicated_scenarios.setdefault(key, []).append(scenario.name) parallel_scenarios.setdefault(features_path, []).append(scenario_information) - total_scenarios += 1 if show_progress_bar: global_vars.progress_bar_instance = _get_progress_bar_instance(parallel_scheme="scenario", - total_elements=total_scenarios) + total_elements=sum(total_scenarios_to_run.values())) if global_vars.progress_bar_instance: global_vars.progress_bar_instance.start() if duplicated_scenarios: print_parallel('scenario.duplicated_scenarios', json.dumps(duplicated_scenarios, indent=4)) exit(1) - if features_with_empty_scenario_descriptions: + if features_with_no_scen_desc: print_parallel('feature.empty_scenario_descriptions', - '\n* '.join(features_with_empty_scenario_descriptions)) + '\n* '.join(features_with_no_scen_desc)) exit(1) if serial_scenarios: print_parallel('scenario.serial_execution') for features_path, scenarios_in_feature in serial_scenarios.items(): for scen_info in scenarios_in_feature: + scenarios_to_run_in_feature = total_scenarios_to_run[scen_info["feature_filename"]] execution_code, json_report = execute_tests(features_path=features_path, feature_filename=scen_info["feature_filename"], feature_json_skeleton=scen_info["feature_json_skeleton"], + scenarios_to_run_in_feature=scenarios_to_run_in_feature, scenario_name=scen_info["scenario_name"], multiprocess=True, config=ConfigRun(), @@ -571,6 +575,7 @@ def launch_by_scenario(features, print_parallel('scenario.running_parallels') for features_path in parallel_scenarios.keys(): for scenario_information in parallel_scenarios[features_path]: + scenarios_to_run_in_feature = total_scenarios_to_run[scenario_information["feature_filename"]] feature_filename = scenario_information["feature_filename"] feature_json_skeleton = scenario_information["feature_json_skeleton"] scenario_name = scenario_information["scenario_name"] @@ -578,6 +583,7 @@ def launch_by_scenario(features, features_path=features_path, feature_filename=feature_filename, feature_json_skeleton=feature_json_skeleton, + scenarios_to_run_in_feature=scenarios_to_run_in_feature, scenario_name=scenario_name, multiprocess=True, config=ConfigRun(), @@ -596,6 +602,7 @@ def execute_tests( features_path, feature_filename, feature_json_skeleton, + scenarios_to_run_in_feature, scenario_name, multiprocess, config, @@ -608,6 +615,7 @@ def execute_tests( features_path (str): Path to the features. feature_filename (str): Name of the feature file. feature_json_skeleton (str): JSON skeleton of the feature. + scenarios_to_run_in_feature (int): Number of scenarios to run in the feature. scenario_name (str): Name of the scenario. multiprocess (bool): Whether to use multiprocessing. config (ConfigRun): Configuration object. @@ -670,6 +678,7 @@ def execute_tests( processing_xml_feature(json_output=json_output, scenario=scenario_name, feature_filename=feature_filename, + scenarios_to_run_in_feature=scenarios_to_run_in_feature, lock=lock, shared_removed_scenarios=shared_removed_scenarios) except Exception as ex: @@ -839,7 +848,9 @@ def remove_temporary_files(parallel_processes, json_reports): print(remove_ex) -def processing_xml_feature(json_output, scenario, feature_filename, lock=None, shared_removed_scenarios=None): +def processing_xml_feature(json_output, scenario, feature_filename, + scenarios_to_run_in_feature=None, lock=None, + shared_removed_scenarios=None): """ Process the XML feature and update the JSON output. @@ -847,6 +858,7 @@ def processing_xml_feature(json_output, scenario, feature_filename, lock=None, s json_output (dict): JSON output of the test execution. scenario (Scenario): Scenario object. feature_filename (str): Name of the feature file. + scenarios_to_run_in_feature (int): Number of scenarios to run in the feature. lock (Lock): Multiprocessing lock. shared_removed_scenarios (dict): Shared dictionary of removed scenarios. """ @@ -896,7 +908,10 @@ def processing_xml_feature(json_output, scenario, feature_filename, lock=None, s removed_scenarios = 0 if shared_removed_scenarios and feature_filename in shared_removed_scenarios: removed_scenarios = shared_removed_scenarios[feature_filename] - total_scenarios = len_scenarios(feature_filename) - removed_scenarios + if scenarios_to_run_in_feature is None: + total_scenarios = len_scenarios(feature_filename) - removed_scenarios + else: + total_scenarios = scenarios_to_run_in_feature - removed_scenarios if len(processed_feature_data['scenarios']) == total_scenarios: try: report_xml.export_feature_to_xml(processed_feature_data, False) From bd652c9548341540f6b02faed5f3ada3af67288a Mon Sep 17 00:00:00 2001 From: anibalinn Date: Tue, 17 Sep 2024 11:09:58 -0300 Subject: [PATCH 05/14] Additional improvements when managing parallel processes to wait for all processes to complete --- behavex/runner.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/behavex/runner.py b/behavex/runner.py index 63da44c..69d340b 100644 --- a/behavex/runner.py +++ b/behavex/runner.py @@ -167,7 +167,7 @@ def setup_running_failures(args_parsed): return EXIT_OK, None -def init_multiprocessing(idQueue): +def init_multiprocessing(idQueue, parallel_delay): """Initialize multiprocessing by ignoring SIGINT signals.""" signal.signal(signal.SIGINT, signal.SIG_IGN) # Retrieve one of the unique IDs @@ -223,7 +223,7 @@ def launch_behavex(): parallel_delay = get_param('parallel_delay') process_pool = ProcessPoolExecutor(max_workers=parallel_processes, initializer=init_multiprocessing, - initargs=(idQueue,)) + initargs=(idQueue, parallel_delay)) global_vars.execution_start_time = time.time() try: config = ConfigRun() @@ -471,6 +471,7 @@ def launch_by_feature(features, if global_vars.progress_bar_instance: global_vars.progress_bar_instance.update() print_parallel('feature.running_parallels') + parallel_processes = [] for parallel_feature in parallel_features: feature_filename = parallel_feature["feature_filename"] feature_json_skeleton = parallel_feature["feature_json_skeleton"] @@ -484,11 +485,14 @@ def launch_by_feature(features, config=ConfigRun(), lock=lock, shared_removed_scenarios=None) + parallel_processes.append(future) future.add_done_callback(create_execution_complete_callback_function( execution_codes, json_reports, global_vars.progress_bar_instance, )) + for parallel_process in parallel_processes: + parallel_process.result() return execution_codes, json_reports @@ -573,6 +577,7 @@ def launch_by_scenario(features, global_vars.progress_bar_instance.update() if parallel_scenarios: print_parallel('scenario.running_parallels') + parallel_processes = [] for features_path in parallel_scenarios.keys(): for scenario_information in parallel_scenarios[features_path]: scenarios_to_run_in_feature = total_scenarios_to_run[scenario_information["feature_filename"]] @@ -590,11 +595,14 @@ def launch_by_scenario(features, lock=lock, shared_removed_scenarios=shared_removed_scenarios ) + parallel_processes.append(future) future.add_done_callback(create_execution_complete_callback_function( execution_codes, json_reports, global_vars.progress_bar_instance )) + for parallel_process in parallel_processes: + parallel_process.result() return execution_codes, json_reports From ff7892d190d486deb583f36912c1f1e62492d42f Mon Sep 17 00:00:00 2001 From: anibalinn Date: Tue, 17 Sep 2024 11:21:15 -0300 Subject: [PATCH 06/14] updating pyproject.toml to set dependencies as a list --- pyproject.toml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0f14272..d6c4e34 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "behavex" -version = "4.0.2" +version = "4.0.4rc1" description = "Agile test wrapper on top of Behave (BDD)." readme = "README.md" license = { text = "MIT" } @@ -12,6 +12,14 @@ authors = [ { name = "Hernan Rey", email = "behavex_users@googlegroups.com" } ] requires-python = ">=3.5" +dependencies = [ + "behave==1.2.6", + "behavex-images", + "jinja2", + "configobj", + "htmlmin", + "csscompressor" +] classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Console", @@ -33,14 +41,6 @@ classifiers = [ [project.urls] homepage = "https://github.com/hrcorval/behavex" -[project.dependencies] -behave = "==1.2.6" -behavex-images = "*" -jinja2 = "*" -configobj = "*" -htmlmin = "*" -csscompressor = "*" - [project.optional-dependencies] dev = [ "pytest", From 38478fafef396775032c49772979c2f6511313f6 Mon Sep 17 00:00:00 2001 From: anibalinn Date: Tue, 17 Sep 2024 11:25:43 -0300 Subject: [PATCH 07/14] Updating project.scripts in pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d6c4e34..ebf0eaf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,5 +52,5 @@ dev = [ [tool.setuptools.packages.find] exclude = ["tests"] -[tool.setuptools.entry-points.console_scripts] +[project.scripts] behavex = "behavex.runner:main" From 9301578841609a5c65357206094bc8233e0b512e Mon Sep 17 00:00:00 2001 From: anibalinn Date: Tue, 17 Sep 2024 11:57:49 -0300 Subject: [PATCH 08/14] Chatching errors in init_multiprocessing --- behavex/runner.py | 21 ++++++++++++--------- pyproject.toml | 4 ++-- setup.py | 4 ++-- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/behavex/runner.py b/behavex/runner.py index 69d340b..b78461b 100644 --- a/behavex/runner.py +++ b/behavex/runner.py @@ -168,15 +168,18 @@ def setup_running_failures(args_parsed): def init_multiprocessing(idQueue, parallel_delay): - """Initialize multiprocessing by ignoring SIGINT signals.""" - signal.signal(signal.SIGINT, signal.SIG_IGN) - # Retrieve one of the unique IDs - worker_id = idQueue.get() - # Use the unique ID to name the process - multiprocessing.current_process().name = f'behave_worker-{worker_id}' - # Add delay - if parallel_delay > 0: - time.sleep(parallel_delay / 1000.0) + try: + signal.signal(signal.SIGINT, signal.SIG_IGN) + # Retrieve one of the unique IDs + worker_id = idQueue.get() + # Use the unique ID to name the process + multiprocessing.current_process().name = f'behave_worker-{worker_id}' + # Add an initial delay to avoid all processes starting at the same time + if parallel_delay > 0: + time.sleep(parallel_delay / 1000.0) + except Exception as e: + logging.error(f"Exception in init_multiprocessing: {e}") + raise def launch_behavex(): diff --git a/pyproject.toml b/pyproject.toml index ebf0eaf..2053294 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,8 +4,8 @@ build-backend = "setuptools.build_meta" [project] name = "behavex" -version = "4.0.4rc1" -description = "Agile test wrapper on top of Behave (BDD)." +version = "4.0.4rc2" +description = "Agile testing framework on top of Behave (BDD)." readme = "README.md" license = { text = "MIT" } authors = [ diff --git a/setup.py b/setup.py index 82ff1c1..627d40c 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name='behavex', - version='4.0.4rc1', + version='4.0.4rc2', license="MIT", platforms=['any'], python_requires='>=3.5', @@ -15,7 +15,7 @@ url='https://github.com/hrcorval/behavex', packages=find_packages(exclude=['tests']), include_package_data=True, - description='Agile test wrapper on top of Behave (BDD).', + description='Agile testing framework on top of Behave (BDD).', long_description_content_type='text/markdown', long_description=long_description, entry_points={ From 9ca6a31948493ac8e4d74ec06a07ef4b232d22e0 Mon Sep 17 00:00:00 2001 From: anibalinn Date: Tue, 17 Sep 2024 12:15:23 -0300 Subject: [PATCH 09/14] Adding try catch implementation to properly identify the source of issues in execute_tests function --- behavex/runner.py | 132 +++++++++++++++++++++++++--------------------- 1 file changed, 73 insertions(+), 59 deletions(-) diff --git a/behavex/runner.py b/behavex/runner.py index b78461b..2205c53 100644 --- a/behavex/runner.py +++ b/behavex/runner.py @@ -495,7 +495,12 @@ def launch_by_feature(features, global_vars.progress_bar_instance, )) for parallel_process in parallel_processes: - parallel_process.result() + try: + parallel_process.result() + except Exception as e: + logging.error(f"Exception in parallel_process: {e}") + process_pool.shutdown(wait=False, cancel_futures=True) + raise return execution_codes, json_reports @@ -605,7 +610,12 @@ def launch_by_scenario(features, global_vars.progress_bar_instance )) for parallel_process in parallel_processes: - parallel_process.result() + try: + parallel_process.result() + except Exception as e: + logging.error(f"Exception in parallel_process: {e}") + process_pool.shutdown(wait=False, cancel_futures=True) + raise return execution_codes, json_reports @@ -636,67 +646,71 @@ def execute_tests( Returns: tuple: Execution code and JSON report. """ - behave_args = None - if multiprocess: - ExecutionSingleton._instances[ConfigRun] = config - extend_behave_hooks() try: - # Execution ID is only important for multiprocessing so that - # we can influence where output files end up - execution_id = json.loads(feature_json_skeleton or '{}').get('id') - behave_args = _set_behave_arguments(features_path=features_path, - multiprocess=multiprocess, - execution_id=execution_id, - feature=feature_filename, - scenario=scenario_name, - config=config) - except Exception as exception: - traceback.print_exc() - print(exception) - execution_code, generate_report = _launch_behave(behave_args) - # print("pipenv run behave {} --> Execution Code: {} --> Generate Report: {}".format(" ".join(behave_args), execution_code, generate_report)) - if generate_report: - # print execution code - if execution_code == 2: - if feature_json_skeleton: - json_output = {'environment': [], - 'features': [json.loads(feature_json_skeleton)], - 'steps_definition': []} - for skeleton_feature in json_output["features"]: - if scenario_name: - for skeleton_scenario in skeleton_feature["scenarios"]: - if scenario_name_matching(scenario_name, skeleton_scenario['name']): + behave_args = None + if multiprocess: + ExecutionSingleton._instances[ConfigRun] = config + extend_behave_hooks() + try: + # Execution ID is only important for multiprocessing so that + # we can influence where output files end up + execution_id = json.loads(feature_json_skeleton or '{}').get('id') + behave_args = _set_behave_arguments(features_path=features_path, + multiprocess=multiprocess, + execution_id=execution_id, + feature=feature_filename, + scenario=scenario_name, + config=config) + except Exception as exception: + traceback.print_exc() + print(exception) + execution_code, generate_report = _launch_behave(behave_args) + # print("pipenv run behave {} --> Execution Code: {} --> Generate Report: {}".format(" ".join(behave_args), execution_code, generate_report)) + if generate_report: + # print execution code + if execution_code == 2: + if feature_json_skeleton: + json_output = {'environment': [], + 'features': [json.loads(feature_json_skeleton)], + 'steps_definition': []} + for skeleton_feature in json_output["features"]: + if scenario_name: + for skeleton_scenario in skeleton_feature["scenarios"]: + if scenario_name_matching(scenario_name, skeleton_scenario['name']): + skeleton_scenario['status'] = 'failed' + skeleton_scenario['error_msg'] = get_text('scenario.execution_crashed') + else: + skeleton_feature['status'] = 'failed' + skeleton_feature['error_msg'] = 'Execution crashed. No outputs could be generated.' + for skeleton_scenario in skeleton_feature["scenarios"]: skeleton_scenario['status'] = 'failed' - skeleton_scenario['error_msg'] = get_text('scenario.execution_crashed') - else: - skeleton_feature['status'] = 'failed' - skeleton_feature['error_msg'] = 'Execution crashed. No outputs could be generated.' - for skeleton_scenario in skeleton_feature["scenarios"]: - skeleton_scenario['status'] = 'failed' - skeleton_scenario['error_msg'] = get_text('feature.execution_crashed') + skeleton_scenario['error_msg'] = get_text('feature.execution_crashed') + else: + json_output = {'environment': [], 'features': [], 'steps_definition': []} else: - json_output = {'environment': [], 'features': [], 'steps_definition': []} + json_output = dump_json_results() + if scenario_name: + json_output['features'] = filter_feature_executed(json_output, + text(feature_filename), + scenario_name) + if len(json_output['features']) == 0 or len(json_output['features'][0]['scenarios']) == 0: + # Adding scenario data if the test was removed from the execution (setting it as "Untested") + json_output['features'] = [json.loads(feature_json_skeleton)] + try: + processing_xml_feature(json_output=json_output, + scenario=scenario_name, + feature_filename=feature_filename, + scenarios_to_run_in_feature=scenarios_to_run_in_feature, + lock=lock, + shared_removed_scenarios=shared_removed_scenarios) + except Exception as ex: + logging.exception("There was a problem processing the xml file: {}".format(ex)) else: - json_output = dump_json_results() - if scenario_name: - json_output['features'] = filter_feature_executed(json_output, - text(feature_filename), - scenario_name) - if len(json_output['features']) == 0 or len(json_output['features'][0]['scenarios']) == 0: - # Adding scenario data if the test was removed from the execution (setting it as "Untested") - json_output['features'] = [json.loads(feature_json_skeleton)] - try: - processing_xml_feature(json_output=json_output, - scenario=scenario_name, - feature_filename=feature_filename, - scenarios_to_run_in_feature=scenarios_to_run_in_feature, - lock=lock, - shared_removed_scenarios=shared_removed_scenarios) - except Exception as ex: - logging.exception("There was a problem processing the xml file: {}".format(ex)) - else: - json_output = {'environment': [], 'features': [], 'steps_definition': []} - return execution_code, join_feature_reports(json_output) + json_output = {'environment': [], 'features': [], 'steps_definition': []} + return execution_code, join_feature_reports(json_output) + except Exception as e: + logging.error(f"Exception in execute_tests: {e}") + raise def filter_feature_executed(json_output, filename, scenario_name): From 9b8258d608303e8cdd6bd7fcf021907fec25e96c Mon Sep 17 00:00:00 2001 From: anibalinn Date: Tue, 17 Sep 2024 12:16:14 -0300 Subject: [PATCH 10/14] Updating version --- pyproject.toml | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2053294..9a7565e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "behavex" -version = "4.0.4rc2" +version = "4.0.4rc3" description = "Agile testing framework on top of Behave (BDD)." readme = "README.md" license = { text = "MIT" } diff --git a/setup.py b/setup.py index 627d40c..eed8c10 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name='behavex', - version='4.0.4rc2', + version='4.0.4rc3', license="MIT", platforms=['any'], python_requires='>=3.5', From c256c005ba6a75a95824d27b408d2980916e2616 Mon Sep 17 00:00:00 2001 From: anibalinn Date: Tue, 17 Sep 2024 15:28:39 -0300 Subject: [PATCH 11/14] Updating parallel_delay management --- behavex/runner.py | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/behavex/runner.py b/behavex/runner.py index 2205c53..2247b48 100644 --- a/behavex/runner.py +++ b/behavex/runner.py @@ -175,8 +175,8 @@ def init_multiprocessing(idQueue, parallel_delay): # Use the unique ID to name the process multiprocessing.current_process().name = f'behave_worker-{worker_id}' # Add an initial delay to avoid all processes starting at the same time - if parallel_delay > 0: - time.sleep(parallel_delay / 1000.0) + if isinstance(parallel_delay, int) and parallel_delay > 0: + time.sleep(parallel_delay * worker_id / 1000.0) except Exception as e: logging.error(f"Exception in init_multiprocessing: {e}") raise @@ -241,6 +241,7 @@ def launch_behavex(): execution_codes, json_reports = execute_tests(features_path=all_paths, feature_filename=None, feature_json_skeleton=None, + scenarios_to_run_in_feature=None, scenario_name=None, multiprocess=False, config=config, @@ -494,13 +495,8 @@ def launch_by_feature(features, json_reports, global_vars.progress_bar_instance, )) - for parallel_process in parallel_processes: - try: - parallel_process.result() - except Exception as e: - logging.error(f"Exception in parallel_process: {e}") - process_pool.shutdown(wait=False, cancel_futures=True) - raise + for parallel_process in as_completed(parallel_processes): + parallel_process.result() return execution_codes, json_reports @@ -610,12 +606,7 @@ def launch_by_scenario(features, global_vars.progress_bar_instance )) for parallel_process in parallel_processes: - try: - parallel_process.result() - except Exception as e: - logging.error(f"Exception in parallel_process: {e}") - process_pool.shutdown(wait=False, cancel_futures=True) - raise + parallel_process.result() return execution_codes, json_reports From a87d2399ff551f83f288a0e93773e6e986782426 Mon Sep 17 00:00:00 2001 From: anibalinn Date: Wed, 18 Sep 2024 11:59:18 -0300 Subject: [PATCH 12/14] fix done in XML report generation, to standardize outputs when running in parallel and with a unique process --- behavex/outputs/jinja/xml.jinja2 | 10 +++++----- behavex/outputs/jinja/xml_json.jinja2 | 14 +++++++------- behavex/outputs/jinja_mgr.py | 10 +++++++--- behavex/outputs/report_utils.py | 25 ++++++++++++------------- 4 files changed, 31 insertions(+), 28 deletions(-) diff --git a/behavex/outputs/jinja/xml.jinja2 b/behavex/outputs/jinja/xml.jinja2 index 53d6db4..ff43763 100644 --- a/behavex/outputs/jinja/xml.jinja2 +++ b/behavex/outputs/jinja/xml.jinja2 @@ -25,7 +25,6 @@ {%- macro print_scenario(scenario) -%} {%- set scenario_tags = scenario|get_scenario_tags-%} {%- set is_muted = ('MUTE' in scenario_tags and scenario.status == 'failed')-%} -{%- set scenario_crashed = True if (scenario.status == 'failed' and not scenario.error_background and not scenario.error_step) else False -%} {%- set errors_step = 0-%} {%- for step in scenario.steps -%} {%- set errors_step = errors_step + (0 if step.error_message else 1) -%} @@ -33,13 +32,14 @@ {%- set steps_with_exception = scenario.steps|get_list_exception_steps(scenario._background_steps) -%} + {%- set scenario_crashed = True if (scenario.status == 'failed' and steps_with_exception|length == 0) else False -%} {%- if scenario_crashed and not is_muted -%} - - - + + + {%- elif steps_with_exception and not is_muted -%} {%- for step in steps_with_exception -%} - + diff --git a/behavex/outputs/jinja/xml_json.jinja2 b/behavex/outputs/jinja/xml_json.jinja2 index 46e5bfe..0a12201 100644 --- a/behavex/outputs/jinja/xml_json.jinja2 +++ b/behavex/outputs/jinja/xml_json.jinja2 @@ -20,7 +20,7 @@ {{'\n'}} {%- endfor -%} {%- endif -%} - {{ 16*' ' ~ step|print_step_json ~ '\n'}} + {{ 16*' ' ~ step|print_step_json|safe ~ '\n'}} {%- endmacro -%} {%- macro print_scenario(scenario) -%} {%- set scenario_tags = scenario|get_scenario_tags-%} @@ -30,13 +30,13 @@ classname={{scenario.feature|normalize|CIXC|quoteattr|safe }} > {% set step_with_exception = scenario.error_step %} {%- if scenario_crashed and not is_muted -%} - - - + + + {%- elif step_with_exception and not is_muted -%} - - + {%- endif -%} diff --git a/behavex/outputs/jinja_mgr.py b/behavex/outputs/jinja_mgr.py index c7ec0bf..0bd0e09 100644 --- a/behavex/outputs/jinja_mgr.py +++ b/behavex/outputs/jinja_mgr.py @@ -163,7 +163,11 @@ def _print_step_json(step): def get_lines_exception(step): - if step.exception: + if type(step) is dict: + return u'\n'.join( + [16 * u' ' + line for line in step['error_lines']] + ).strip() + elif step.exception: return u'\n'.join( [16 * u' ' + line for line in traceback.format_tb(step.exc_traceback)] ).strip() @@ -192,8 +196,8 @@ def _get_path_log(scenario): return path_logs -def _quoteattr(string): - return "''" if not string else quoteattr(string) +def _quoteattr(string_to_quote): + return "''" if not string_to_quote else quoteattr(string_to_quote) def _print_tag_xml(tags): diff --git a/behavex/outputs/report_utils.py b/behavex/outputs/report_utils.py index 83761b9..2b01d1d 100644 --- a/behavex/outputs/report_utils.py +++ b/behavex/outputs/report_utils.py @@ -335,23 +335,22 @@ def create_log_path(name, execution_retry=False): return path -def get_error_message(message_error): - if not message_error: +def get_error_message(error_message): + if not error_message: return u'' - if isinstance(message_error, Exception): - if hasattr(message_error, 'message'): + if isinstance(error_message, Exception): + if hasattr(error_message, 'message'): # noinspection PyBroadException try: - message_error = traceback.format_tb(message_error.message) + error_message = traceback.format_tb(error_message.message) except BaseException: - message_error = repr(message_error.message) - if hasattr(message_error, 'exc_traceback'): - message_error = traceback.format_tb(message_error.exc_traceback) - elif not isinstance(message_error, str): - message_error = repr(message_error) - return u'\n'.join( - [16 * u' ' + line for line in text(message_error).split('\n')] - ).strip() + error_message = repr(error_message.message) + if hasattr(error_message, 'exc_traceback'): + error_message = traceback.format_tb(error_message.exc_traceback) + elif not isinstance(error_message, str): + error_message = repr(error_message) + formatted_error = u'\n'.join([16 * u' ' + line for line in text(error_message).split('\n')]).strip() + return formatted_error def text(value, encoding=None, errors=None): From 2b0e89c1d00d546f382292681129691c15d5b1dc Mon Sep 17 00:00:00 2001 From: anibalinn Date: Wed, 18 Sep 2024 12:04:30 -0300 Subject: [PATCH 13/14] Updated product version and CHANGES.rst file --- CHANGES.rst | 4 ++++ pyproject.toml | 2 +- setup.py | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 5a203ec..fcdeacc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -8,6 +8,10 @@ ENHANCEMENTS: * Added the 'worker_id' context.config.userdata parameter to allow users to identify which worker is executing every feature or scenario when running tests in parallel. `PR #121 `_ * Adding the --parallel-delay argument, to enable setting a staggered execution when running tests in parallel. `Issue #142 `_ +FIXES: + +* Standardized XML report generation for parallel and single-process runs. `Issue #144 `_ + CONTRIBUTIONS: * Contributions from `JackHerRrer `__, by providing the implementation to include the 'worker_id' context.config.userdata parameter (Thanks JackHerRrer!!) diff --git a/pyproject.toml b/pyproject.toml index 9a7565e..ebb83f4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "behavex" -version = "4.0.4rc3" +version = "4.0.4rc4" description = "Agile testing framework on top of Behave (BDD)." readme = "README.md" license = { text = "MIT" } diff --git a/setup.py b/setup.py index eed8c10..3b05c7a 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name='behavex', - version='4.0.4rc3', + version='4.0.4rc4', license="MIT", platforms=['any'], python_requires='>=3.5', From 0d7a86c91e271ad750fc5df29a1660e9853af66f Mon Sep 17 00:00:00 2001 From: anibalinn Date: Mon, 23 Sep 2024 16:58:50 -0300 Subject: [PATCH 14/14] updating library version to 4.0.4 --- .gitignore | 2 ++ pyproject.toml | 2 +- setup.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index a397a82..949a7ea 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,8 @@ eggs/ .eggs/ .idea .pypirc +.cursorrules +.vscode lib/ lib64/ parts/ diff --git a/pyproject.toml b/pyproject.toml index ebb83f4..fffb773 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "behavex" -version = "4.0.4rc4" +version = "4.0.4" description = "Agile testing framework on top of Behave (BDD)." readme = "README.md" license = { text = "MIT" } diff --git a/setup.py b/setup.py index 3b05c7a..397cc04 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name='behavex', - version='4.0.4rc4', + version='4.0.4', license="MIT", platforms=['any'], python_requires='>=3.5',