Skip to content

Commit

Permalink
_get_main_color: no yellow ("boring") for non-last item
Browse files Browse the repository at this point in the history
- refactor _get_main_color/build_summary_stats_line
- factor out property _is_last_item; test_summary_stats: tr._is_last_item
- _write_progress_information_filling_space: remove color arg
- use setter for stats, handling main color
- _get_main_color: skip cache for last item
- Handle random order in test for py35.
  • Loading branch information
blueyed committed Jan 29, 2020
1 parent 059bf16 commit e770330
Show file tree
Hide file tree
Showing 3 changed files with 123 additions and 69 deletions.
1 change: 1 addition & 0 deletions changelog/6409.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fallback to green (instead of yellow) for non-last items without previous passes with colored terminal progress indicator.
135 changes: 74 additions & 61 deletions src/_pytest/terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,8 @@ def __init__(self, config: Config, file=None) -> None:
self._showfspath = None

self.stats = {} # type: Dict[str, List[Any]]
self._main_color = None # type: Optional[str]
self._known_types = None # type: Optional[List]
self.startdir = config.invocation_dir
if file is None:
file = sys.stdout
Expand Down Expand Up @@ -462,6 +464,12 @@ def section(self, title, sep="=", **kw):
def line(self, msg, **kw):
self._tw.line(msg, **kw)

def _add_stats(self, category: str, items: List) -> None:
set_main_color = category not in self.stats
self.stats.setdefault(category, []).extend(items[:])
if set_main_color:
self._set_main_color()

def pytest_internalerror(self, excrepr):
for line in str(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
Expand All @@ -471,15 +479,14 @@ def pytest_warning_captured(self, warning_message, item):
# from _pytest.nodes import get_fslocation_from_item
from _pytest.warnings import warning_record_to_str

warnings = self.stats.setdefault("warnings", [])
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)

nodeid = item.nodeid if item is not None else ""
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
warnings.append(warning_report)
self._add_stats("warnings", [warning_report])

def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
Expand All @@ -490,7 +497,7 @@ def pytest_plugin_registered(self, plugin):
self.write_line(msg)

def pytest_deselected(self, items):
self.stats.setdefault("deselected", []).extend(items)
self._add_stats("deselected", items)

def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
Expand All @@ -511,7 +518,7 @@ def pytest_runtest_logreport(self, report: TestReport) -> None:
word, markup = word
else:
markup = None
self.stats.setdefault(category, []).append(rep)
self._add_stats(category, [rep])
if not letter and not word:
# probably passed setup/teardown
return
Expand Down Expand Up @@ -553,6 +560,10 @@ def pytest_runtest_logreport(self, report: TestReport) -> None:
self._tw.write(" " + line)
self.currentfspath = -2

@property
def _is_last_item(self):
return len(self._progress_nodeids_reported) == self._session.testscollected

def pytest_runtest_logfinish(self, nodeid):
assert self._session
if self.verbosity <= 0 and self._show_progress_info:
Expand All @@ -562,15 +573,12 @@ def pytest_runtest_logfinish(self, nodeid):
else:
progress_length = len(" [100%]")

main_color, _ = _get_main_color(self.stats)

self._progress_nodeids_reported.add(nodeid)
is_last_item = (
len(self._progress_nodeids_reported) == self._session.testscollected
)
if is_last_item:
self._write_progress_information_filling_space(color=main_color)

if self._is_last_item:
self._write_progress_information_filling_space()
else:
main_color, _ = self._get_main_color()
w = self._width_of_current_line
screen_width = self._tw.fullwidth
past_edge = w + progress_length + 1 >= screen_width
Expand All @@ -595,9 +603,8 @@ def _get_progress_information_message(self) -> str:
)
return " [100%]"

def _write_progress_information_filling_space(self, color=None):
if not color:
color, _ = _get_main_color(self.stats)
def _write_progress_information_filling_space(self):
color, _ = self._get_main_color()
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
Expand All @@ -622,9 +629,9 @@ def pytest_collection(self):

def pytest_collectreport(self, report: CollectReport) -> None:
if report.failed:
self.stats.setdefault("error", []).append(report)
self._add_stats("error", [report])
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
self._add_stats("skipped", [report])
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
Expand Down Expand Up @@ -1023,7 +1030,7 @@ def _outrep_summary(self, rep):

def summary_stats(self, session: Session) -> None:
session_duration = time.time() - self._sessionstarttime
(parts, main_color) = build_summary_stats_line(self.stats)
(parts, main_color) = self.build_summary_stats_line()
line_parts = []

display_sep = self.verbosity >= 0
Expand Down Expand Up @@ -1133,6 +1140,56 @@ def show_skipped(lines: List[str]) -> None:
for line in lines:
self.write_line(line)

def _get_main_color(self) -> Tuple[str, List[str]]:
if self._main_color is None or self._known_types is None or self._is_last_item:
self._set_main_color()
assert self._main_color
assert self._known_types
return self._main_color, self._known_types

def _set_main_color(self) -> Tuple[str, List[str]]:
stats = self.stats
known_types = (
"failed passed skipped deselected xfailed xpassed warnings error".split()
)
unknown_type_seen = False
for found_type in stats.keys():
if found_type not in known_types:
if found_type: # setup/teardown reports have an empty key, ignore them
known_types.append(found_type)
unknown_type_seen = True

# main color
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats or not self._is_last_item:
main_color = "green"
else:
main_color = "yellow"
self._main_color, self._known_types = main_color, known_types
return main_color, known_types

def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()

parts = []
for key in known_types:
reports = self.stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % _make_plural(count, key), markup))

if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]

return parts, main_color


def _get_pos(config, rep):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
Expand Down Expand Up @@ -1270,50 +1327,6 @@ def _make_plural(count, noun):
return count, noun + "s" if count != 1 else noun


def _get_main_color(stats) -> Tuple[str, List[str]]:
known_types = (
"failed passed skipped deselected xfailed xpassed warnings error".split()
)
unknown_type_seen = False
for found_type in stats.keys():
if found_type not in known_types:
if found_type: # setup/teardown reports have an empty key, ignore them
known_types.append(found_type)
unknown_type_seen = True

# main color
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats:
main_color = "green"
else:
main_color = "yellow"

return main_color, known_types


def build_summary_stats_line(stats):
main_color, known_types = _get_main_color(stats)

parts = []
for key in known_types:
reports = stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % _make_plural(count, key), markup))

if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]

return parts, main_color


def _plugin_nameversions(plugininfo) -> List[str]:
values = [] # type: List[str]
for plugin, dist in plugininfo:
Expand Down
56 changes: 48 additions & 8 deletions testing/test_terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,14 @@
import pluggy
import py

import _pytest.config
import pytest
from _pytest.main import ExitCode
from _pytest.pytester import Testdir
from _pytest.reports import BaseReport
from _pytest.terminal import _folded_skips
from _pytest.terminal import _get_line_with_reprcrash_message
from _pytest.terminal import _plugin_nameversions
from _pytest.terminal import build_summary_stats_line
from _pytest.terminal import getreportopt
from _pytest.terminal import TerminalReporter

Expand Down Expand Up @@ -1422,6 +1422,12 @@ def test_failure():
assert stdout.count("=== warnings summary ") == 1


@pytest.fixture(scope="session")
def tr():
config = _pytest.config._prepareconfig()
return TerminalReporter(config)


@pytest.mark.parametrize(
"exp_color, exp_line, stats_arg",
[
Expand Down Expand Up @@ -1552,26 +1558,42 @@ def test_failure():
),
],
)
def test_summary_stats(exp_line, exp_color, stats_arg):
def test_summary_stats(tr, exp_line, exp_color, stats_arg):
tr.stats = stats_arg

# Fake "_is_last_item" to be True.
class fake_session:
testscollected = 0

tr._session = fake_session
assert tr._is_last_item

# Reset cache.
tr._main_color = None

print("Based on stats: %s" % stats_arg)
print('Expect summary: "{}"; with color "{}"'.format(exp_line, exp_color))
(line, color) = build_summary_stats_line(stats_arg)
(line, color) = tr.build_summary_stats_line()
print('Actually got: "{}"; with color "{}"'.format(line, color))
assert line == exp_line
assert color == exp_color


def test_skip_counting_towards_summary():
def test_skip_counting_towards_summary(tr):
class DummyReport(BaseReport):
count_towards_summary = True

r1 = DummyReport()
r2 = DummyReport()
res = build_summary_stats_line({"failed": (r1, r2)})
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("2 failed", {"bold": True, "red": True})], "red")

r1.count_towards_summary = False
res = build_summary_stats_line({"failed": (r1, r2)})
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("1 failed", {"bold": True, "red": True})], "red")


Expand Down Expand Up @@ -1673,6 +1695,11 @@ def test_normal(self, many_tests_files, testdir):
def test_colored_progress(self, testdir, monkeypatch):
monkeypatch.setenv("PY_COLORS", "1")
testdir.makepyfile(
test_axfail="""
import pytest
@pytest.mark.xfail
def test_axfail(): assert 0
""",
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
Expand All @@ -1697,13 +1724,26 @@ def test_foobar(i): raise ValueError()
[
line.format(**RE_COLORS)
for line in [
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 50%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 75%\]{reset}",
r"test_axfail.py {yellow}x{reset}{green} \s+ \[ 4%\]{reset}",
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 52%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 76%\]{reset}",
r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}",
]
]
)

# Only xfail should have yellow progress indicator.
result = testdir.runpytest("test_axfail.py")
result.stdout.re_match_lines(
[
line.format(**RE_COLORS)
for line in [
r"test_axfail.py {yellow}x{reset}{yellow} \s+ \[100%\]{reset}",
r"^{yellow}=+ ({yellow}{bold}|{bold}{yellow})1 xfailed{reset}{yellow} in ",
]
]
)

def test_count(self, many_tests_files, testdir):
testdir.makeini(
"""
Expand Down

0 comments on commit e770330

Please sign in to comment.