Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix doc test parsing #20

Merged
merged 2 commits into from
Sep 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 24 additions & 19 deletions src/parsing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,13 @@ pub fn parse_cargo_test_with_empty_ones<'s>(
) -> impl Iterator<Item = (TestRunner<'s>, TestInfo<'s>)> {
let parsed_stderr = parse_stderr(stderr);
let parsed_stdout = parse_stdout(stdout);
let err_len = parsed_stderr.len();
let out_len = parsed_stdout.len();
assert_eq!(
parsed_stderr.len(),
parsed_stdout.len(),
"the amount of test runners from stderr should equal to that from stdout"
err_len, out_len,
"{err_len} (the amount of test runners from stderr) should \
equal to {out_len} (that from stdout)\n\
stderr = {stderr:?}\nstdout = {stdout:?}"
);
parsed_stderr.into_iter().zip(parsed_stdout)
}
Expand Down Expand Up @@ -146,7 +149,7 @@ pub struct Src<'s> {
/// Name from the path of test runner binary. The path usually starts with `target/`.
///
/// But this field doesn't contain neither the `target/...` prefix nor hash postfix,
/// so it's possible to see same path from different crates.
/// so it's possible to see same name from different crates.
pub bin_name: Text<'s>,
}

Expand Down Expand Up @@ -201,8 +204,9 @@ fn status(ok: bool) -> ColoredString {
}

impl Stats {
/// Text at the end of root node.
pub fn inlay_string(&self) -> String {
/// Summary text at the end of root node.
/// If the metric is zero, it won't be shown.
pub fn inlay_summary_string(&self) -> String {
let Stats {
total,
passed,
Expand All @@ -213,21 +217,20 @@ impl Stats {
..
} = *self;
let time = finished_in.as_secs_f32();

let mut part = Vec::with_capacity(4);
let mut metrics = Vec::with_capacity(4);
if passed != 0 {
part.push(format!("✅ {passed}"));
metrics.push(format!("✅ {passed}"));
};
if failed != 0 {
part.push(format!("❌ {failed}").red().to_string());
metrics.push(format!("❌ {failed}").red().to_string());
};
if ignored != 0 {
part.push(format!("🔕 {ignored}"));
metrics.push(format!("🔕 {ignored}"));
};
if filtered_out != 0 {
part.push(format!("✂️ {filtered_out}"));
metrics.push(format!("✂️ {filtered_out}"));
};
format!("{total} tests in {time:.2}s: {}", part.join("; "))
format!("{total} tests in {time:.2}s: {}", metrics.join("; "))
}

/// Root of test tree node depending on the test type.
Expand All @@ -236,17 +239,18 @@ impl Stats {
"({}) {:} ... ({})",
status(self.ok),
pkg_name.blue().bold(),
self.inlay_string().bold()
self.inlay_summary_string().bold()
)
}

/// Root of test tree node depending on the test type.
pub fn subroot_string(&self, pkg_name: Text) -> String {
/// Subroot of test tree node depending on the test type.
/// Compared with `Stats::root_string`, texts except status are non-bold.
pub fn subroot_string(&self, runner_name: Text) -> String {
format!(
"({}) {} ... ({})",
status(self.ok),
pkg_name,
self.inlay_string()
runner_name,
self.inlay_summary_string()
)
}
}
Expand Down Expand Up @@ -430,7 +434,8 @@ pub fn parse_stdout(stdout: &str) -> Vec<TestInfo> {
assert_eq!(
parsed_amount_from_head, stats_total,
"the parsed amount of running tests {parsed_amount_from_head:?} \
should equal to the number in stats.total {stats_total:?}"
should equal to the number in stats.total {stats_total:?}\n\
split = {split:#?}\nparsed_stdout = {parsed_stdout:#?}"
);

split
Expand Down
15 changes: 13 additions & 2 deletions src/regex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,19 @@ lazy_static!(pub re, Re, {
// test submod::ignore_without_reason ... ignored
// test submod::panic::should_panic - should panic ... ok
// test submod::panic::should_panic_without_reanson - should panic ... ok
//
// Doc Test: ^test (?P<file>\S+) - (?P<item>\S+) \(line \d+\)( - compile( fail)?)? ... (?P<status>\S+(, .*)?)$
// test src/doc.rs - doc (line 3) ... ok
tree: Regex::new(r"(?m)^test (?P<split>\S+( - should panic)?( - doc \(line \d+\))?) \.\.\. (?P<status>\S+(, .*)?)$").expect(RE_ERROR),
// test tests/integration/src/lib.rs - attribute::edition2018 (line 100) ... ok
// test tests/integration/src/lib.rs - attribute::ignore (line 76) ... ignored
// test tests/integration/src/lib.rs - attribute::no_run (line 86) - compile ... ok
// test tests/integration/src/lib.rs - attribute::should_compile_fail (line 90) - compile fail ... ok
// test tests/integration/src/lib.rs - attribute::should_compile_fail_but_didnt (line 96) - compile fail ... FAILED
// test tests/integration/src/lib.rs - attribute::should_panic (line 80) ... ok
// test tests/integration/src/lib.rs - empty_doc_mod (line 41) ... ok
// test tests/integration/src/lib.rs - empty_doc_mod::Item (line 48) ... ok
// test tests/integration/src/lib.rs - empty_doc_mod::private_mod (line 44) ... ok
tree: Regex::new(r"(?m)^test (?P<split>\S+( - should panic)?(?<doctest> - \S+ \(line \d+\)( - compile( fail)?)?)?) \.\.\. (?P<status>\S+(, .*)?)$").expect(RE_ERROR),
// test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
stats: Regex::new(r"(?mx)
^test\ result:\ (?P<ok>\S+)\.
Expand All @@ -55,6 +66,6 @@ lazy_static!(pub re, Re, {
\ (?P<measured>\d+)\ measured;
\ (?P<filtered>\d+)\ filtered\ out;
\ finished\ in\ (?P<time>\S+)s$").expect(RE_ERROR),
separator: "*************************************************************".yellow().bold()
separator: "────────────────────────────────────────────────────────────────────────".yellow().bold()
}
});
64 changes: 62 additions & 2 deletions tests/integration/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#![allow(clippy::should_panic_without_expect)] // 1.73.0
#![allow(clippy::should_panic_without_expect, dead_code)] // 1.73.0
#[test]
fn works() {}

Expand Down Expand Up @@ -40,4 +40,64 @@ mod submod {

/// ```
/// ```
pub mod doc {}
pub mod empty_doc_mod {
///```
///```
mod private_mod {}

///```
///```
pub struct Item;
}

pub struct Struct;

/// ```
/// let _ = integration::Struct;
/// ```
pub mod normal_doc_mod {
///```
/// let _ = integration::Struct;
///```
mod private_mod {
///```
/// let _ = integration::Struct;
///```
struct Item {}
}

///```
/// let _ = integration::Struct;
///```
pub struct Item {}
}

mod attribute {
/// ```ignore
/// ```
fn ignore() {}

/// ```should_panic
/// assert!(false);
/// ```
fn should_panic() {}

/// `no_run` attribute will compile your code but not run it
///```no_run
///```
fn no_run() {}

/// ```compile_fail
/// let x = 5;
/// x += 2; // shouldn't compile!
/// ```
fn should_compile_fail() {}

/// ```compile_fail
/// ```
fn should_compile_fail_but_didnt() {}

/// ```edition2018
/// ```
fn edition2018() {}
}
80 changes: 62 additions & 18 deletions tests/parsing.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
use cargo_pretty_test::parsing::{parse_cargo_test_with_empty_ones, TestType};
use cargo_pretty_test::{
fetch::parse_cargo_test_output,
parsing::{parse_cargo_test_with_empty_ones, TestType},
};
use insta::assert_display_snapshot;
use pretty_assertions::assert_eq;
use std::time::Duration;

const STDERR: &str = "\
Finished test [unoptimized + debuginfo] target(s) in 0.00s
Expand All @@ -11,6 +14,8 @@ const STDERR: &str = "\
Doc-tests cargo-pretty-test\
";

// Note: the doc tests are from `test/integration`, but for simplicity, pretend they are
// for cargo-pretty-test.
const STDOUT: &str = "
running 0 tests

Expand All @@ -35,9 +40,22 @@ test snapshot_testing_for_pretty_output ... ok
test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.02s


running 0 tests

test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
running 13 tests
test src/lib.rs - attribute::edition2018 (line 100) ... ok
test src/lib.rs - attribute::ignore (line 76) ... ignored
test src/lib.rs - attribute::no_run (line 86) - compile ... ok
test src/lib.rs - attribute::should_compile_fail (line 90) - compile fail ... ok
test src/lib.rs - attribute::should_compile_fail_but_didnt (line 96) - compile fail ... FAILED
test src/lib.rs - attribute::should_panic (line 80) ... ok
test src/lib.rs - empty_doc_mod (line 41) ... ok
test src/lib.rs - empty_doc_mod::Item (line 48) ... ok
test src/lib.rs - empty_doc_mod::private_mod (line 44) ... ok
test src/lib.rs - normal_doc_mod (line 55) ... ok
test src/lib.rs - normal_doc_mod::Item (line 69) ... ok
test src/lib.rs - normal_doc_mod::private_mod (line 59) ... ok
test src/lib.rs - normal_doc_mod::private_mod::Item (line 63) ... ok

test result: ok. 11 passed; 1 failed; 1 ignored; 0 measured; 0 filtered out; finished in 0.00s
";

#[test]
Expand All @@ -60,19 +78,6 @@ fn parse_stderr_stdout() {
]
);

let total_time = 0.03;
assert!(
(parsed_tests_info
.iter()
.map(|(_, v)| v.stats.finished_in)
.sum::<Duration>()
.as_secs_f32()
- total_time)
.abs()
< f32::EPSILON,
"total time in running all tests should be {total_time}"
);

println!(
"{:#?}",
parsed_tests_info
Expand All @@ -88,3 +93,42 @@ fn parse_stderr_stdout() {
.collect::<Vec<_>>(),
);
}

#[test]
fn display_test_tree() {
let (tree, stats) = parse_cargo_test_output(STDERR, STDOUT);
assert_display_snapshot!(tree, @r###"
Generated by cargo-pretty-test
├── (OK) cargo_pretty_test ... (3 tests in 0.03s: ✅ 3)
│ ├── (OK) tests/golden_master_test.rs ... (1 tests in 0.01s: ✅ 1)
│ │ └─ ✅ golden_master_test
│ └── (OK) tests/mocking_project.rs ... (2 tests in 0.02s: ✅ 2)
│ ├─ ✅ snapshot_testing_for_parsed_output
│ └─ ✅ snapshot_testing_for_pretty_output
└── (OK) Doc Tests ... (13 tests in 0.00s: ✅ 11; ❌ 1; 🔕 1)
└── (OK) cargo-pretty-test ... (13 tests in 0.00s: ✅ 11; ❌ 1; 🔕 1)
├── src/lib.rs - attribute
│ ├─ ✅ edition2018 (line 100)
│ ├─ 🔕 ignore (line 76)
│ ├─ ✅ no_run (line 86) - compile
│ ├─ ✅ should_compile_fail (line 90) - compile fail
│ ├─ ❌ should_compile_fail_but_didnt (line 96) - compile fail
│ └─ ✅ should_panic (line 80)
├── src/lib.rs - empty_doc_mod
│ ├─ ✅ Item (line 48)
│ └─ ✅ private_mod (line 44)
├─ ✅ src/lib.rs - empty_doc_mod (line 41)
├── src/lib.rs - normal_doc_mod
│ ├─ ✅ Item (line 69)
│ ├── private_mod
│ │ └─ ✅ Item (line 63)
│ └─ ✅ private_mod (line 59)
└─ ✅ src/lib.rs - normal_doc_mod (line 55)
"###);

let total_time = 0.03;
assert!(
(stats.finished_in.as_secs_f32() - total_time).abs() < f32::EPSILON,
"total time in running all tests should be {total_time}"
);
}