Skip to content

Commit

Permalink
PR #8041 from Matan: test.info
Browse files Browse the repository at this point in the history
  • Loading branch information
maloel committed Dec 21, 2020
2 parents 82ca18e + c6b4ddf commit b940b84
Showing 1 changed file with 199 additions and 50 deletions.
249 changes: 199 additions & 50 deletions unit-tests/py/test.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
# This module is for formatting and writing unit-tests in python. The general format is as follows
# 1. Use start to start a test and give it, as an argument, the name of the test
# 2. Use whatever check functions are relevant to test the run
# 3. Use finish to signal the end of the test
# 4. Repeat stages 1-3 as the number of tests you want to run in the file
# 5. Use print_results_and_exit to print the number of tests and assertions that passed/failed in the correct format
# before exiting with 0 if all tests passed or with 1 if there was a failed test
"""
This module is for formatting and writing unit-tests in python. The general format is as follows
1. Use start to start a test and give it, as an argument, the name of the test
2. Use whatever check functions are relevant to test the run
3. Use finish to signal the end of the test
4. Repeat stages 1-3 as the number of tests you want to run in the file
5. Use print_results_and_exit to print the number of tests and assertions that passed/failed in the correct format
before exiting with 0 if all tests passed or with 1 if there was a failed test
In addition you may want to use the 'info' functions in this module to add more detailed
messages in case of a failed check
"""

import os, sys, subprocess, traceback, platform
import pyrealsense2 as rs
Expand All @@ -15,10 +20,15 @@
n_failed_tests = 0
test_failed = False
test_in_progress = False
test_info = {} # Dictionary for holding additional information to print in case of a failed check.

# If this is the first time running this script we set the wanted environment, however it is impossible to change the current running
# environment so we rerun the script in a child process that inherits the environment we set
def set_env_vars(env_vars):
"""
If this is the first time running this script we set the wanted environment, however it is impossible to change the
current running environment so we rerun the script in a child process that inherits the environment we set
:param env_vars: A dictionary where the keys are the name of the environment variable and the values are the
wanted values in string form (environment variables must be strings)
"""
if len(sys.argv) < 2:
for env_var, val in env_vars.items():
os.environ[env_var] = val
Expand All @@ -33,60 +43,65 @@ def set_env_vars(env_vars):
p = subprocess.run( cmd, stderr=subprocess.PIPE, universal_newlines=True )
exit(p.returncode)

# Returns the first device that was found, if no device is found the test is skipped. That way we can still run
# the unit-tests when no device is connected and not fail the tests that check a connected device
def find_first_device_or_exit():
"""
:return: the first device that was found, if no device is found the test is skipped. That way we can still run
the unit-tests when no device is connected and not fail the tests that check a connected device
"""
c = rs.context()
if not c.devices.size(): # if no device is connected we skip the test
print("No device found, skipping test")
exit(0)
return c.devices[0]

# Returns a list of devices of specific product line that was found, if no device is found the test is skipped.
# That way we can still run the unit-tests when no device is connected
# and not fail the tests that check a connected device
def find_devices_by_product_line_or_exit(product_line):
"""
:param product_line: The product line of the wanted devices
:return: A list of devices of specific product line that was found, if no device is found the test is skipped.
That way we can still run the unit-tests when no device is connected
and not fail the tests that check a connected device
"""
c = rs.context()
devices_list = c.query_devices(product_line)
if devices_list.size() == 0:
print("No device of the" , product_line ,"product line was found; skipping test")
exit(0)
return devices_list

# Function for printing the current call stack. Used when an assertion fails
def print_stack():
"""
Function for printing the current call stack. Used when an assertion fails
"""
for line in traceback.format_stack():
print(line)

# Function to check frame drops while streaming
def check_frame_drops(frame, previous_frame_number, allowed_drops = 0):
frame_number = frame.get_frame_number()
if previous_frame_number > 0:
dropped_frames = frame_number - (previous_frame_number + 1) # should be 0 in windows, less than 5 in linux
if dropped_frames > allowed_drops:
print(dropped_frames, "frame(s) starting from frame", previous_frame_number + 1, "were dropped")
fail()
if dropped_frames < 0:
print("Frames repeated or out of order. Got frame", frame_number, "after frame",
previous_frame_number)
fail()

# Functions for asserting test cases:
# The check family of functions tests an expression and continues the test whether the assertion succeeded or failed.
# The require family are equivalent but execution is aborted if the assertion fails.
"""
The following functions are for asserting test cases:
The check family of functions tests an expression and continues the test whether the assertion succeeded or failed.
The require family are equivalent but execution is aborted if the assertion fails. In this module, the require family
is used by sending abort=True to check functions
"""

# Function for when a check fails
def check_failed():
"""
Function for when a check fails
"""
global n_failed_assertions, test_failed
n_failed_assertions += 1
test_failed = True
print_info()

def abort():
print("Abort was specified in a failed check. Aborting test")
exit(1)

# Receive an expression which is an assertion. If false the assertion failed.
def check(exp, abort_if_failed = False):
"""
Basic function for asserting expressions.
:param exp: An expression to be asserted, if false the assertion failed
:param abort_if_failed: If True and assertion failed the test will be aborted
:return: True if assertion passed, False otherwise
"""
global n_assertions
n_assertions += 1
if not exp:
Expand All @@ -96,40 +111,64 @@ def check(exp, abort_if_failed = False):
if abort_if_failed:
abort()
return False
reset_info()
return True

# Receives the resulted value and the expected value and asserts they are equal
def check_equal(result, expected, abort_if_failed = False):
check(type(expected) != list)
"""
Used for asserting a variable has the expected value
:param result: The actual value of a variable
:param expected: The expected value of the variable
:param abort_if_failed: If True and assertion failed the test will be aborted
:return: True if assertion passed, False otherwise
"""
if type(expected) == list:
print("check_equal should not be used for lists. Use check_equal_lists instead")
if abort_if_failed:
abort()
return False
global n_assertions
n_assertions += 1
if result != expected:
print("Result was:", result ,"\nBut we expected: ", expected)
print("Result was:" + result + "\nBut we expected: " + expected)
check_failed()
print_stack()
if abort_if_failed:
abort()
return False
reset_info()
return True

# This function should never be reached
def unreachable( abort_if_failed = False ):
"""
Used to assert that a certain section of code (exp: an if block) is not reached
:param abort_if_failed: If True and this function is reached the test will be aborted
"""
check(False, abort_if_failed)

# This function should be put in except blocks that should not be reached.
# It's different from unreachable because it expects to be in an except block and prints the stack of the error
# and not the call-stack for this function
def unexpected_exception( abort_if_failed = False ):
"""
Used to assert that an except block is not reached. It's different from unreachable because it expects
to be in an except block and prints the stack of the error and not the call-stack for this function
:param abort_if_failed: If True and this function is reached the test will be aborted
"""
global n_assertions
n_assertions += 1
check_failed()
traceback.print_exc( file = sys.stdout )
check_failed()
if abort_if_failed:
abort()
reset_info()

# Receives 2 lists and asserts they are identical. python "equality" (using ==) requires same length & elements
# but not necessarily same ordering. Here we require exactly the same, including ordering.
def check_equal_lists(result, expected, abort_if_failed = False):
"""
Used to assert that 2 lists are identical. python "equality" (using ==) requires same length & elements
but not necessarily same ordering. Here we require exactly the same, including ordering.
:param result: The actual list
:param expected: The expected list
:param abort_if_failed: If True and assertion failed the test will be aborted
:return: True if assertion passed, False otherwise
"""
global n_assertions
n_assertions += 1
failed = False
Expand All @@ -152,34 +191,141 @@ def check_equal_lists(result, expected, abort_if_failed = False):
if abort_if_failed:
abort()
return False
reset_info()
return True

# Receives an exception and asserts its type and message is as expected. This function is called with a caught exception,
def check_exception(exception, expected_type, expected_msg = None, abort_if_failed = False):
check_equal(type(exception), expected_type, abort_if_failed)
if expected_msg:
check_equal(str(exception), expected_msg, abort_if_failed)
"""
Used to assert a certain type of exception was raised, placed in the except block
:param exception: The exception that was raised
:param expected_type: The expected type of exception
:param expected_msg: The expected message in the exception
:param abort_if_failed: If True and assertion failed the test will be aborted
:return: True if assertion passed, False otherwise
"""
failed = False
if type(exception) != expected_type:
print("Raised exception was of type", type(exception), "and not of type", expected_type, "as expected")
failed = True
if expected_msg and str(exception) != expected_msg:
print("Exception had message:", str(exception), "\nBut we expected:", expected_msg)
failed = True
if failed:
check_failed()
print_stack()
if abort_if_failed:
abort()
return False
reset_info()
return True

def check_frame_drops(frame, previous_frame_number, allowed_drops = 0):
"""
Used for checking frame drops while streaming
:param frame: Current frame being checked
:param previous_frame_number: Number of the previous frame
:param allowed_drops: Maximum number of frame drops we accept
:return: False if dropped too many frames or frames were out of order, True otherwise
"""
frame_number = frame.get_frame_number()
failed = False
if previous_frame_number > 0:
dropped_frames = frame_number - (previous_frame_number + 1) # should be 0 in windows, less than 5 in linux
if dropped_frames > allowed_drops:
print(dropped_frames, "frame(s) starting from frame", previous_frame_number + 1, "were dropped")
failed = True
if dropped_frames < 0:
print("Frames repeated or out of order. Got frame", frame_number, "after frame",
previous_frame_number)
failed = True
if failed:
check_failed()
return False
reset_info()
return True

"""
The following functions are for adding additional information to the printed messages in case of a failed check.
"""

class Information:
"""
Class representing the information stored in test_info dictionary
"""
def __init__(self, value, persistent = False):
self.value = value
self.persistent = persistent

def info(name, value, persistent = False):
"""
This function is used to store additional information to print in case of a failed test. This information is
erased after the next check. The information is stored in the dictionary test_info, Keys are names (strings)
and the items are of Information class
If information with the given name is already stored it will be replaced
:param name: The name of the variable
:param value: The value this variable stores
:param persistent: If this parameter is True, the information stored will be kept after the following check
and will only be erased at the end of the test ( or when reset_info is called with True)
"""
global test_info
test_info[name] = Information(value, persistent)

def reset_info(persistent = False):
"""
erases the stored information
:param persistent: If this parameter is True, even the persistent information will be erased
"""
global test_info
if persistent:
test_info.clear()
else:
for name, information in test_info:
if information.persistent:
test_info.pop(name)

def print_info():
global test_info
if not test_info: # No information is stored
return
print("Printing information")
for name, information in test_info:
print("Name:", name, " value:", information.value)
reset_info()

"""
The following functions are for formatting tests in a file
"""

# Function for manually failing a test
def fail():
"""
Function for manually failing a test in case you want a specific test that does not fit any check function
"""
global test_in_progress, n_failed_tests, test_failed
if not test_in_progress:
raise RuntimeError("Tried to fail a test with no test running")
if not test_failed:
n_failed_tests += 1
test_failed = True

# Functions for formatting test cases
def start(*test_name):
"""
Used at the beginning of each test to reset the global variables
:param test_name: Any number of arguments that combined give the name of this test
:return:
"""
global n_tests, test_failed, test_in_progress
if test_in_progress:
raise RuntimeError("Tried to start test before previous test finished. Aborting test")
n_tests += 1
test_failed = False
test_in_progress = True
reset_info(persistent=True)
print(*test_name)

def finish():
"""
Used at the end of each test to check if it passed and print the answer
"""
global test_failed, n_failed_tests, test_in_progress
if not test_in_progress:
raise RuntimeError("Tried to finish a test without starting one")
Expand All @@ -191,8 +337,11 @@ def finish():
print()
test_in_progress = False

# The format has to agree with the expected format in check_log() in run-unit-tests and with the C++ format using Catch
def print_results_and_exit():
"""
Used to print the results of the tests in the file. The format has to agree with the expected format in check_log()
in run-unit-tests and with the C++ format using Catch
"""
global n_assertions, n_tests, n_failed_assertions, n_failed_tests
if n_failed_tests:
passed = n_assertions - n_failed_assertions
Expand Down

0 comments on commit b940b84

Please sign in to comment.