Skip to content

Commit

Permalink
Merge pull request #121 from JackHerRrer/add_worker_id
Browse files Browse the repository at this point in the history
Add an ID to each instance of behave
  • Loading branch information
hrcorval authored Sep 6, 2024
2 parents 19063d7 + 997123b commit 8d7b405
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 4 deletions.
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,12 @@ Examples:
When the parallel-scheme is set by **feature**, all tests within each feature will be run sequentially.

BehaveX populates the Behave contexts with the `worker_id` user-specific data. This variable contains the id of the current behave process.

E.g If BehaveX is started with `--parallel-processes 2`, the first instance of behave will receive `worker_id=0`, and the second instance will receive `worker_id=1`.

This variable can be accessed within the python tests using `context.config.userdata['worker_id']`

## Test execution reports
### HTML report
This is a friendly test execution report that contains information related to test scenarios, execution status, execution evidence and metrics. A filters bar is also provided to filter scenarios by name, tag or status.
Expand Down
27 changes: 23 additions & 4 deletions behavex/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@
include_path_match = None
include_name_match = None
scenario_lines = {}
# Define a global variable to store the lock
process_lock = None


def main():
Expand Down Expand Up @@ -167,9 +169,15 @@ def setup_running_failures(args_parsed):
return EXIT_OK, None


def init_multiprocessing():
def init_multiprocessing(lock, idQueue):
"""Initialize multiprocessing by ignoring SIGINT signals."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
global process_lock
process_lock = lock
# Retrieve one of the unique IDs
id = idQueue.get()
# Use the unique ID to name the process
multiprocessing.current_process().name = f'behave_worker-{id}'


def launch_behavex():
Expand Down Expand Up @@ -208,9 +216,14 @@ def launch_behavex():
# shared variable to track scenarios that should be run but seems to be removed from execution (using scenarios.remove)
shared_removed_scenarios = manager.dict()
lock = manager.Lock()
# Create a queue containing unique IDs from 0 to the number of parallel processes - 1
# These IDs will be attributed to the process when they will be initialized
idQueue = manager.Queue()
for i in range(parallel_processes):
idQueue.put(i)
process_pool = ProcessPoolExecutor(max_workers=parallel_processes,
initializer=init_multiprocessing(),
initargs=(lock,))
initargs=(lock, idQueue))
global_vars.execution_start_time = time.time()
try:
config = ConfigRun()
Expand Down Expand Up @@ -1008,9 +1021,13 @@ def _set_behave_arguments(features_path, multiprocess, execution_id=None, featur
scenario_outline_compatible = scenario_outline_compatible.replace(escaped_example_name, "[\\S ]*")
arguments.append('--name')
arguments.append("{}".format(scenario_outline_compatible))
name = multiprocessing.current_process().name.split('-')[-1]
worker_id = multiprocessing.current_process().name.split('-')[-1]

arguments.append('--outfile')
arguments.append(os.path.join(gettempdir(), 'stdout{}.txt'.format(name)))
arguments.append(os.path.join(gettempdir(), 'stdout{}.txt'.format(worker_id)))

arguments.append('-D')
arguments.append(f'worker_id={worker_id}')
else:
if type(features_path) is list:
for feature_path in features_path:
Expand All @@ -1025,6 +1042,8 @@ def _set_behave_arguments(features_path, multiprocess, execution_id=None, featur
arguments.append(output_folder)
arguments.append('--outfile')
arguments.append(os.path.join(output_folder, 'behave', 'behave.log'))
arguments.append('-D')
arguments.append(f'worker_id=0')
arguments.append('--no-skipped')
arguments.append('--no-junit')
run_wip_tests = False
Expand Down

0 comments on commit 8d7b405

Please sign in to comment.