Skip to content

Commit

Permalink
terminal: refactor, no yellow ("boring") for non-last item (#6409)
Browse files Browse the repository at this point in the history
  • Loading branch information
blueyed authored Feb 15, 2020
2 parents 9785ee4 + e872532 commit 3692847
Show file tree
Hide file tree
Showing 3 changed files with 138 additions and 69 deletions.
1 change: 1 addition & 0 deletions changelog/6409.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fallback to green (instead of yellow) for non-last items without previous passes with colored terminal progress indicator.
143 changes: 82 additions & 61 deletions src/_pytest/terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,17 @@

REPORT_COLLECTING_RESOLUTION = 0.5

KNOWN_TYPES = (
"failed",
"passed",
"skipped",
"deselected",
"xfailed",
"xpassed",
"warnings",
"error",
)

_REPORTCHARS_DEFAULT = "fE"


Expand Down Expand Up @@ -254,6 +265,8 @@ def __init__(self, config: Config, file=None) -> None:
self._showfspath = None

self.stats = {} # type: Dict[str, List[Any]]
self._main_color = None # type: Optional[str]
self._known_types = None # type: Optional[List]
self.startdir = config.invocation_dir
if file is None:
file = sys.stdout
Expand Down Expand Up @@ -372,6 +385,12 @@ def section(self, title, sep="=", **kw):
def line(self, msg, **kw):
self._tw.line(msg, **kw)

def _add_stats(self, category: str, items: List) -> None:
set_main_color = category not in self.stats
self.stats.setdefault(category, []).extend(items[:])
if set_main_color:
self._set_main_color()

def pytest_internalerror(self, excrepr):
for line in str(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
Expand All @@ -381,15 +400,14 @@ def pytest_warning_captured(self, warning_message, item):
# from _pytest.nodes import get_fslocation_from_item
from _pytest.warnings import warning_record_to_str

warnings = self.stats.setdefault("warnings", [])
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)

nodeid = item.nodeid if item is not None else ""
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
warnings.append(warning_report)
self._add_stats("warnings", [warning_report])

def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
Expand All @@ -400,7 +418,7 @@ def pytest_plugin_registered(self, plugin):
self.write_line(msg)

def pytest_deselected(self, items):
self.stats.setdefault("deselected", []).extend(items)
self._add_stats("deselected", items)

def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
Expand All @@ -421,7 +439,7 @@ def pytest_runtest_logreport(self, report: TestReport) -> None:
word, markup = word
else:
markup = None
self.stats.setdefault(category, []).append(rep)
self._add_stats(category, [rep])
if not letter and not word:
# probably passed setup/teardown
return
Expand Down Expand Up @@ -463,6 +481,10 @@ def pytest_runtest_logreport(self, report: TestReport) -> None:
self._tw.write(" " + line)
self.currentfspath = -2

@property
def _is_last_item(self):
return len(self._progress_nodeids_reported) == self._session.testscollected

def pytest_runtest_logfinish(self, nodeid):
assert self._session
if self.verbosity <= 0 and self._show_progress_info:
Expand All @@ -472,15 +494,12 @@ def pytest_runtest_logfinish(self, nodeid):
else:
progress_length = len(" [100%]")

main_color, _ = _get_main_color(self.stats)

self._progress_nodeids_reported.add(nodeid)
is_last_item = (
len(self._progress_nodeids_reported) == self._session.testscollected
)
if is_last_item:
self._write_progress_information_filling_space(color=main_color)

if self._is_last_item:
self._write_progress_information_filling_space()
else:
main_color, _ = self._get_main_color()
w = self._width_of_current_line
past_edge = w + progress_length + 1 >= self._screen_width
if past_edge:
Expand All @@ -504,9 +523,8 @@ def _get_progress_information_message(self) -> str:
)
return " [100%]"

def _write_progress_information_filling_space(self, color=None):
if not color:
color, _ = _get_main_color(self.stats)
def _write_progress_information_filling_space(self):
color, _ = self._get_main_color()
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
Expand All @@ -531,9 +549,9 @@ def pytest_collection(self) -> None:

def pytest_collectreport(self, report: CollectReport) -> None:
if report.failed:
self.stats.setdefault("error", []).append(report)
self._add_stats("error", [report])
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
self._add_stats("skipped", [report])
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
Expand Down Expand Up @@ -916,7 +934,7 @@ def summary_stats(self):
return

session_duration = time.time() - self._sessionstarttime
(parts, main_color) = build_summary_stats_line(self.stats)
(parts, main_color) = self.build_summary_stats_line()
line_parts = []

display_sep = self.verbosity >= 0
Expand Down Expand Up @@ -1017,6 +1035,53 @@ def show_skipped(lines: List[str]) -> None:
for line in lines:
self.write_line(line)

def _get_main_color(self) -> Tuple[str, List[str]]:
if self._main_color is None or self._known_types is None or self._is_last_item:
self._set_main_color()
assert self._main_color
assert self._known_types
return self._main_color, self._known_types

def _determine_main_color(self, unknown_type_seen: bool) -> str:
stats = self.stats
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats or not self._is_last_item:
main_color = "green"
else:
main_color = "yellow"
return main_color

def _set_main_color(self) -> None:
unknown_types = [] # type: List[str]
for found_type in self.stats.keys():
if found_type: # setup/teardown reports have an empty key, ignore them
if found_type not in KNOWN_TYPES and found_type not in unknown_types:
unknown_types.append(found_type)
self._known_types = list(KNOWN_TYPES) + unknown_types
self._main_color = self._determine_main_color(bool(unknown_types))

def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()

parts = []
for key in known_types:
reports = self.stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % _make_plural(count, key), markup))

if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]

return parts, main_color


def _get_pos(config, rep):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
Expand Down Expand Up @@ -1105,50 +1170,6 @@ def _make_plural(count, noun):
return count, noun + "s" if count != 1 else noun


def _get_main_color(stats) -> Tuple[str, List[str]]:
known_types = (
"failed passed skipped deselected xfailed xpassed warnings error".split()
)
unknown_type_seen = False
for found_type in stats.keys():
if found_type not in known_types:
if found_type: # setup/teardown reports have an empty key, ignore them
known_types.append(found_type)
unknown_type_seen = True

# main color
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats:
main_color = "green"
else:
main_color = "yellow"

return main_color, known_types


def build_summary_stats_line(stats):
main_color, known_types = _get_main_color(stats)

parts = []
for key in known_types:
reports = stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % _make_plural(count, key), markup))

if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]

return parts, main_color


def _plugin_nameversions(plugininfo) -> List[str]:
values = [] # type: List[str]
for plugin, dist in plugininfo:
Expand Down
63 changes: 55 additions & 8 deletions testing/test_terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,21 @@
import sys
import textwrap
from io import StringIO
from typing import Dict
from typing import List
from typing import Tuple

import pluggy
import py

import _pytest.config
import pytest
from _pytest.config import ExitCode
from _pytest.pytester import Testdir
from _pytest.reports import BaseReport
from _pytest.terminal import _folded_skips
from _pytest.terminal import _get_line_with_reprcrash_message
from _pytest.terminal import _plugin_nameversions
from _pytest.terminal import build_summary_stats_line
from _pytest.terminal import getreportopt
from _pytest.terminal import TerminalReporter

Expand Down Expand Up @@ -1409,6 +1412,12 @@ def test_failure():
assert stdout.count("=== warnings summary ") == 1


@pytest.fixture(scope="session")
def tr() -> TerminalReporter:
config = _pytest.config._prepareconfig()
return TerminalReporter(config)


@pytest.mark.parametrize(
"exp_color, exp_line, stats_arg",
[
Expand Down Expand Up @@ -1539,26 +1548,47 @@ def test_failure():
),
],
)
def test_summary_stats(exp_line, exp_color, stats_arg):
def test_summary_stats(
tr: TerminalReporter,
exp_line: List[Tuple[str, Dict[str, bool]]],
exp_color: str,
stats_arg: Dict[str, List],
) -> None:
tr.stats = stats_arg

# Fake "_is_last_item" to be True.
class fake_session:
testscollected = 0

tr._session = fake_session # type: ignore[assignment] # noqa: F821
assert tr._is_last_item

# Reset cache.
tr._main_color = None

print("Based on stats: %s" % stats_arg)
print('Expect summary: "{}"; with color "{}"'.format(exp_line, exp_color))
(line, color) = build_summary_stats_line(stats_arg)
(line, color) = tr.build_summary_stats_line()
print('Actually got: "{}"; with color "{}"'.format(line, color))
assert line == exp_line
assert color == exp_color


def test_skip_counting_towards_summary():
def test_skip_counting_towards_summary(tr):
class DummyReport(BaseReport):
count_towards_summary = True

r1 = DummyReport()
r2 = DummyReport()
res = build_summary_stats_line({"failed": (r1, r2)})
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("2 failed", {"bold": True, "red": True})], "red")

r1.count_towards_summary = False
res = build_summary_stats_line({"failed": (r1, r2)})
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("1 failed", {"bold": True, "red": True})], "red")


Expand Down Expand Up @@ -1660,6 +1690,11 @@ def test_normal(self, many_tests_files, testdir):
def test_colored_progress(self, testdir, monkeypatch, color_mapping):
monkeypatch.setenv("PY_COLORS", "1")
testdir.makepyfile(
test_axfail="""
import pytest
@pytest.mark.xfail
def test_axfail(): assert 0
""",
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
Expand All @@ -1683,13 +1718,25 @@ def test_foobar(i): raise ValueError()
result.stdout.re_match_lines(
color_mapping.format_for_rematch(
[
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 50%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 75%\]{reset}",
r"test_axfail.py {yellow}x{reset}{green} \s+ \[ 4%\]{reset}",
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 52%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 76%\]{reset}",
r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}",
]
)
)

# Only xfail should have yellow progress indicator.
result = testdir.runpytest("test_axfail.py")
result.stdout.re_match_lines(
color_mapping.format_for_rematch(
[
r"test_axfail.py {yellow}x{reset}{yellow} \s+ \[100%\]{reset}",
r"^{yellow}=+ ({yellow}{bold}|{bold}{yellow})1 xfailed{reset}{yellow} in ",
]
)
)

def test_count(self, many_tests_files, testdir):
testdir.makeini(
"""
Expand Down

0 comments on commit 3692847

Please sign in to comment.