diff --git a/README.md b/README.md index 7c5aa1b2..aad5def0 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ This project is a work in progress. Some of the features that are currently avai * Highly readable, colourful diffs intended to be as readable as possible * A human readable assertion API * Tested on Mac OS, Linux, and Windows +* stderr/stdout captured during test and fixture execution Planned features: @@ -23,7 +24,6 @@ Planned features: * Code coverage with `--coverage` flag * Handling flaky tests with test-specific retries, timeouts * Integration with unittest.mock (specifics to be ironed out) -* Capturing of stderr/stdout * Plugin system * Highlighting diffs on a per-character basis, similar to [diff-so-fancy](https://github.com/so-fancy/diff-so-fancy) (right now it's just per line) diff --git a/setup.py b/setup.py index 4394466a..68ea4751 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ with open("README.md", "r") as fh: long_description = fh.read() -version = "0.7.0a0" +version = "0.8.0a0" setup( name="ward", diff --git a/ward/suite.py b/ward/suite.py index 4dde47cb..1a0304af 100644 --- a/ward/suite.py +++ b/ward/suite.py @@ -1,3 +1,5 @@ +import io +from contextlib import suppress, redirect_stdout, redirect_stderr from dataclasses import dataclass from typing import Generator, List @@ -22,37 +24,47 @@ def num_fixtures(self): def generate_test_runs(self) -> Generator[TestResult, None, None]: for test in self.tests: if test.marker == WardMarker.SKIP: - yield TestResult(test, TestOutcome.SKIP, None, "") + yield TestResult(test, TestOutcome.SKIP) continue + sout, serr = io.StringIO(), io.StringIO() try: - resolved_fixtures = test.resolve_args(self.fixture_registry) + with redirect_stdout(sout), redirect_stderr(serr): + resolved_fixtures = test.resolve_args(self.fixture_registry) except FixtureExecutionError as e: - yield TestResult(test, TestOutcome.FAIL, e) + yield TestResult( + test, TestOutcome.FAIL, e, captured_stdout=sout.getvalue(), captured_stderr=serr.getvalue() + ) + sout.close() + serr.close() continue try: resolved_vals = {k: fix.resolved_val for (k, fix) in resolved_fixtures.items()} - # Run the test - test(**resolved_vals) + # Run the test, while capturing output. + with redirect_stdout(sout), redirect_stderr(serr): + test(**resolved_vals) # The test has completed without exception and therefore passed if test.marker == WardMarker.XFAIL: - yield TestResult(test, TestOutcome.XPASS, None) + yield TestResult( + test, TestOutcome.XPASS, captured_stdout=sout.getvalue(), captured_stderr=serr.getvalue() + ) else: - yield TestResult(test, TestOutcome.PASS, None) + yield TestResult(test, TestOutcome.PASS) except Exception as e: if test.marker == WardMarker.XFAIL: yield TestResult(test, TestOutcome.XFAIL, e) else: - yield TestResult(test, TestOutcome.FAIL, e) + yield TestResult( + test, TestOutcome.FAIL, e, captured_stdout=sout.getvalue(), captured_stderr=serr.getvalue() + ) finally: for fixture in resolved_fixtures.values(): if fixture.is_generator_fixture: - try: + with suppress(RuntimeError, StopIteration): fixture.cleanup() - except (RuntimeError, StopIteration): - # In Python 3.7, a RuntimeError is raised if we fall off the end of a generator - # (instead of a StopIteration) - pass + + sout.close() + serr.close() diff --git a/ward/terminal.py b/ward/terminal.py index cf9ad7a7..174c4bbb 100644 --- a/ward/terminal.py +++ b/ward/terminal.py @@ -51,6 +51,8 @@ def output_all_test_results( for failure in failed_test_results: self.output_why_test_failed_header(failure) self.output_why_test_failed(failure) + self.output_captured_stderr(failure) + self.output_captured_stdout(failure) return all_results @@ -77,6 +79,12 @@ def output_why_test_failed(self, test_result: TestResult): def output_test_run_post_failure_summary(self, test_results: List[TestResult]): raise NotImplementedError() + def output_captured_stderr(self, test_result: TestResult): + raise NotImplementedError() + + def output_captured_stdout(self, test_result: TestResult): + raise NotImplementedError() + def lightblack(s: str) -> str: return f"{Fore.LIGHTBLACK_EX}{s}{Style.RESET_ALL}" @@ -124,7 +132,7 @@ def output_why_test_failed(self, test_result: TestResult): truncation_chars = self.terminal_size.width - 24 err = test_result.error if isinstance(err, ExpectationFailed): - print(f"\n Given {truncate(repr(err.history[0].this), num_chars=truncation_chars)}") + print(f"\n Given {truncate(repr(err.history[0].this), num_chars=truncation_chars)}\n") for expect in err.history: if expect.success: @@ -141,7 +149,7 @@ def output_why_test_failed(self, test_result: TestResult): if err.history and err.history[-1].op == "equals": expect = err.history[-1] print( - f"\n Showing diff of {colored('expected value', color='green')}" + f"\n Showing diff of {colored('expected value', color='green')}" f" vs {colored('actual value', color='red')}:\n" ) @@ -182,6 +190,22 @@ def output_test_result_summary(self, test_results: List[TestResult], time_taken: f"{colored(str(outcome_counts[TestOutcome.PASS]) + ' passed', color='green')} ]" ) + def output_captured_stderr(self, test_result: TestResult): + if test_result.captured_stderr: + stderr = colored("standard error", color="red") + captured_stderr_lines = test_result.captured_stderr.split("\n") + print(f" Captured {stderr} during test run:\n") + for line in captured_stderr_lines: + print(" " + line) + + def output_captured_stdout(self, test_result: TestResult): + if test_result.captured_stdout: + stdout = colored("standard output", color="blue") + captured_stdout_lines = test_result.captured_stdout.split("\n") + print(f"\n Captured {stdout} during test run:\n") + for line in captured_stdout_lines: + print(" " + line) + def generate_chart(self, num_passed, num_failed, num_skipped, num_xfail, num_unexp): num_tests = num_passed + num_failed + num_skipped + num_xfail + num_unexp pass_pct = num_passed / max(num_tests, 1) diff --git a/ward/test_result.py b/ward/test_result.py index d7d1a3c1..8dd3ca0f 100644 --- a/ward/test_result.py +++ b/ward/test_result.py @@ -19,3 +19,5 @@ class TestResult: outcome: TestOutcome error: Optional[Exception] = None message: str = "" + captured_stdout: str = "" + captured_stderr: str = ""