Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

terminal: refactor, no yellow ("boring") for non-last item #6409

Merged
merged 6 commits into from
Feb 15, 2020
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog/6409.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fallback to green (instead of yellow) for non-last items without previous passes with colored terminal progress indicator.
144 changes: 83 additions & 61 deletions src/_pytest/terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,17 @@

REPORT_COLLECTING_RESOLUTION = 0.5

KNOWN_TYPES = (
"failed",
"passed",
"skipped",
"deselected",
"xfailed",
"xpassed",
"warnings",
"error",
)


class MoreQuietAction(argparse.Action):
"""
Expand Down Expand Up @@ -247,6 +258,8 @@ def __init__(self, config: Config, file=None) -> None:
self._showfspath = None

self.stats = {} # type: Dict[str, List[Any]]
self._main_color = None # type: Optional[str]
self._known_types = None # type: Optional[List]
blueyed marked this conversation as resolved.
Show resolved Hide resolved
self.startdir = config.invocation_dir
if file is None:
file = sys.stdout
Expand Down Expand Up @@ -365,6 +378,12 @@ def section(self, title, sep="=", **kw):
def line(self, msg, **kw):
self._tw.line(msg, **kw)

def _add_stats(self, category: str, items: List) -> None:
set_main_color = category not in self.stats
blueyed marked this conversation as resolved.
Show resolved Hide resolved
self.stats.setdefault(category, []).extend(items[:])
if set_main_color:
self._set_main_color()

def pytest_internalerror(self, excrepr):
for line in str(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
Expand All @@ -374,15 +393,14 @@ def pytest_warning_captured(self, warning_message, item):
# from _pytest.nodes import get_fslocation_from_item
from _pytest.warnings import warning_record_to_str

warnings = self.stats.setdefault("warnings", [])
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)

nodeid = item.nodeid if item is not None else ""
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
warnings.append(warning_report)
self._add_stats("warnings", [warning_report])

def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
Expand All @@ -393,7 +411,7 @@ def pytest_plugin_registered(self, plugin):
self.write_line(msg)

def pytest_deselected(self, items):
self.stats.setdefault("deselected", []).extend(items)
self._add_stats("deselected", items)

def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
Expand All @@ -414,7 +432,7 @@ def pytest_runtest_logreport(self, report: TestReport) -> None:
word, markup = word
else:
markup = None
self.stats.setdefault(category, []).append(rep)
self._add_stats(category, [rep])
if not letter and not word:
# probably passed setup/teardown
return
Expand Down Expand Up @@ -456,6 +474,10 @@ def pytest_runtest_logreport(self, report: TestReport) -> None:
self._tw.write(" " + line)
self.currentfspath = -2

@property
def _is_last_item(self):
return len(self._progress_nodeids_reported) == self._session.testscollected

def pytest_runtest_logfinish(self, nodeid):
assert self._session
if self.verbosity <= 0 and self._show_progress_info:
Expand All @@ -465,15 +487,12 @@ def pytest_runtest_logfinish(self, nodeid):
else:
progress_length = len(" [100%]")

main_color, _ = _get_main_color(self.stats)

self._progress_nodeids_reported.add(nodeid)
is_last_item = (
len(self._progress_nodeids_reported) == self._session.testscollected
)
if is_last_item:
self._write_progress_information_filling_space(color=main_color)

if self._is_last_item:
self._write_progress_information_filling_space()
else:
main_color, _ = self._get_main_color()
w = self._width_of_current_line
past_edge = w + progress_length + 1 >= self._screen_width
if past_edge:
Expand All @@ -497,9 +516,8 @@ def _get_progress_information_message(self) -> str:
)
return " [100%]"

def _write_progress_information_filling_space(self, color=None):
if not color:
color, _ = _get_main_color(self.stats)
def _write_progress_information_filling_space(self):
color, _ = self._get_main_color()
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
Expand All @@ -524,9 +542,9 @@ def pytest_collection(self):

def pytest_collectreport(self, report: CollectReport) -> None:
if report.failed:
self.stats.setdefault("error", []).append(report)
self._add_stats("error", [report])
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
self._add_stats("skipped", [report])
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
Expand Down Expand Up @@ -909,7 +927,7 @@ def summary_stats(self):
return

session_duration = time.time() - self._sessionstarttime
(parts, main_color) = build_summary_stats_line(self.stats)
(parts, main_color) = self.build_summary_stats_line()
line_parts = []

display_sep = self.verbosity >= 0
Expand Down Expand Up @@ -1012,6 +1030,54 @@ def show_skipped(lines: List[str]) -> None:
for line in lines:
self.write_line(line)

def _get_main_color(self) -> Tuple[str, List[str]]:
if self._main_color is None or self._known_types is None or self._is_last_item:
self._set_main_color()
assert self._main_color
assert self._known_types
return self._main_color, self._known_types

def _determine_main_color(self, unknown_type_seen: bool) -> str:
stats = self.stats
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats or not self._is_last_item:
main_color = "green"
else:
main_color = "yellow"
return main_color

def _set_main_color(self) -> Tuple[str, List[str]]:
unknown_types = [] # type: List[str]
for found_type in self.stats.keys():
if found_type: # setup/teardown reports have an empty key, ignore them
if found_type not in KNOWN_TYPES and found_type not in unknown_types:
unknown_types.append(found_type)
self._known_types = list(KNOWN_TYPES) + unknown_types
self._main_color = self._determine_main_color(bool(unknown_types))
return self._main_color, self._known_types

def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()

parts = []
for key in known_types:
blueyed marked this conversation as resolved.
Show resolved Hide resolved
reports = self.stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % _make_plural(count, key), markup))

if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]

return parts, main_color


def _get_pos(config, rep):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
Expand Down Expand Up @@ -1100,50 +1166,6 @@ def _make_plural(count, noun):
return count, noun + "s" if count != 1 else noun


def _get_main_color(stats) -> Tuple[str, List[str]]:
known_types = (
"failed passed skipped deselected xfailed xpassed warnings error".split()
)
unknown_type_seen = False
for found_type in stats.keys():
if found_type not in known_types:
if found_type: # setup/teardown reports have an empty key, ignore them
known_types.append(found_type)
unknown_type_seen = True

# main color
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats:
main_color = "green"
else:
main_color = "yellow"

return main_color, known_types


def build_summary_stats_line(stats):
main_color, known_types = _get_main_color(stats)

parts = []
for key in known_types:
reports = stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % _make_plural(count, key), markup))

if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]

return parts, main_color


def _plugin_nameversions(plugininfo) -> List[str]:
values = [] # type: List[str]
for plugin, dist in plugininfo:
Expand Down
2 changes: 1 addition & 1 deletion testing/test_pdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ def function_1():
rest = child.read().decode("utf8")

assert "! _pytest.outcomes.Exit: Quitting debugger !" in rest
assert "= \x1b[33mno tests ran\x1b[0m\x1b[33m in" in rest
assert "= \x1b[33mno tests ran\x1b[0m\x1b[32m in" in rest
assert "BdbQuit" not in rest
assert "UNEXPECTED EXCEPTION" not in rest

Expand Down
70 changes: 59 additions & 11 deletions testing/test_terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,20 @@
import sys
import textwrap
from io import StringIO
from typing import Dict
from typing import List
from typing import Tuple

import pluggy
import py

import _pytest.config
import pytest
from _pytest.main import ExitCode
from _pytest.reports import BaseReport
from _pytest.terminal import _folded_skips
from _pytest.terminal import _get_line_with_reprcrash_message
from _pytest.terminal import _plugin_nameversions
from _pytest.terminal import build_summary_stats_line
from _pytest.terminal import getreportopt
from _pytest.terminal import TerminalReporter

Expand Down Expand Up @@ -1344,6 +1347,12 @@ def test_failure():
assert stdout.count("=== warnings summary ") == 1


@pytest.fixture(scope="session")
def tr() -> TerminalReporter:
config = _pytest.config._prepareconfig()
return TerminalReporter(config)


@pytest.mark.parametrize(
"exp_color, exp_line, stats_arg",
[
Expand Down Expand Up @@ -1431,10 +1440,10 @@ def test_failure():
),
("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": (1,)}),
(
"green",
"yellow",
[
("1 passed", {"bold": True, "green": True}),
("1 xpassed", {"bold": False, "yellow": True}),
("1 passed", {"bold": False, "green": True}),
("1 xpassed", {"bold": True, "yellow": True}),
],
{"xpassed": (1,), "passed": (1,)},
),
Expand Down Expand Up @@ -1474,26 +1483,47 @@ def test_failure():
),
],
)
def test_summary_stats(exp_line, exp_color, stats_arg):
def test_summary_stats(
tr: TerminalReporter,
exp_line: List[Tuple[str, Dict[str, bool]]],
exp_color: str,
stats_arg: Dict[str, List],
) -> None:
tr.stats = stats_arg

# Fake "_is_last_item" to be True.
class fake_session:
testscollected = 0

tr._session = fake_session # type: ignore[assignment] # noqa: F821
assert tr._is_last_item

# Reset cache.
tr._main_color = None

print("Based on stats: %s" % stats_arg)
print('Expect summary: "{}"; with color "{}"'.format(exp_line, exp_color))
(line, color) = build_summary_stats_line(stats_arg)
(line, color) = tr.build_summary_stats_line()
print('Actually got: "{}"; with color "{}"'.format(line, color))
assert line == exp_line
assert color == exp_color


def test_skip_counting_towards_summary():
def test_skip_counting_towards_summary(tr):
class DummyReport(BaseReport):
count_towards_summary = True

r1 = DummyReport()
r2 = DummyReport()
res = build_summary_stats_line({"failed": (r1, r2)})
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("2 failed", {"bold": True, "red": True})], "red")

r1.count_towards_summary = False
res = build_summary_stats_line({"failed": (r1, r2)})
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("1 failed", {"bold": True, "red": True})], "red")


Expand Down Expand Up @@ -1595,6 +1625,11 @@ def test_normal(self, many_tests_files, testdir):
def test_colored_progress(self, testdir, monkeypatch):
monkeypatch.setenv("PY_COLORS", "1")
testdir.makepyfile(
test_axfail="""
import pytest
@pytest.mark.xfail
def test_axfail(): assert 0
""",
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
Expand All @@ -1619,13 +1654,26 @@ def test_foobar(i): raise ValueError()
[
line.format(**RE_COLORS)
for line in [
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 50%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 75%\]{reset}",
r"test_axfail.py {yellow}x{reset}{green} \s+ \[ 4%\]{reset}",
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 52%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 76%\]{reset}",
r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}",
]
]
)

# Only xfail should have yellow progress indicator.
result = testdir.runpytest("test_axfail.py")
result.stdout.re_match_lines(
[
line.format(**RE_COLORS)
for line in [
r"test_axfail.py {yellow}x{reset}{yellow} \s+ \[100%\]{reset}",
r"^{yellow}=+ ({yellow}{bold}|{bold}{yellow})1 xfailed{reset}{yellow} in ",
]
]
)

def test_count(self, many_tests_files, testdir):
testdir.makeini(
"""
Expand Down