Skip to content
This repository has been archived by the owner on Oct 1, 2024. It is now read-only.

Commit

Permalink
Merge pull request #42 from darrenburns/capturing-output
Browse files Browse the repository at this point in the history
Initial implentation of output capturing
  • Loading branch information
darrenburns authored Oct 18, 2019
2 parents b4f11a2 + eaf9a4e commit 1f5ebe8
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 17 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ This project is a work in progress. Some of the features that are currently avai
* Highly readable, colourful diffs intended to be as readable as possible
* A human readable assertion API
* Tested on Mac OS, Linux, and Windows
* stderr/stdout captured during test and fixture execution

Planned features:

Expand All @@ -23,7 +24,6 @@ Planned features:
* Code coverage with `--coverage` flag
* Handling flaky tests with test-specific retries, timeouts
* Integration with unittest.mock (specifics to be ironed out)
* Capturing of stderr/stdout
* Plugin system
* Highlighting diffs on a per-character basis, similar to [diff-so-fancy](https://github.com/so-fancy/diff-so-fancy) (right now it's just per line)

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
with open("README.md", "r") as fh:
long_description = fh.read()

version = "0.7.0a0"
version = "0.8.0a0"

setup(
name="ward",
Expand Down
38 changes: 25 additions & 13 deletions ward/suite.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import io
from contextlib import suppress, redirect_stdout, redirect_stderr
from dataclasses import dataclass
from typing import Generator, List

Expand All @@ -22,37 +24,47 @@ def num_fixtures(self):
def generate_test_runs(self) -> Generator[TestResult, None, None]:
for test in self.tests:
if test.marker == WardMarker.SKIP:
yield TestResult(test, TestOutcome.SKIP, None, "")
yield TestResult(test, TestOutcome.SKIP)
continue

sout, serr = io.StringIO(), io.StringIO()
try:
resolved_fixtures = test.resolve_args(self.fixture_registry)
with redirect_stdout(sout), redirect_stderr(serr):
resolved_fixtures = test.resolve_args(self.fixture_registry)
except FixtureExecutionError as e:
yield TestResult(test, TestOutcome.FAIL, e)
yield TestResult(
test, TestOutcome.FAIL, e, captured_stdout=sout.getvalue(), captured_stderr=serr.getvalue()
)
sout.close()
serr.close()
continue
try:
resolved_vals = {k: fix.resolved_val for (k, fix) in resolved_fixtures.items()}

# Run the test
test(**resolved_vals)
# Run the test, while capturing output.
with redirect_stdout(sout), redirect_stderr(serr):
test(**resolved_vals)

# The test has completed without exception and therefore passed
if test.marker == WardMarker.XFAIL:
yield TestResult(test, TestOutcome.XPASS, None)
yield TestResult(
test, TestOutcome.XPASS, captured_stdout=sout.getvalue(), captured_stderr=serr.getvalue()
)
else:
yield TestResult(test, TestOutcome.PASS, None)
yield TestResult(test, TestOutcome.PASS)

except Exception as e:
if test.marker == WardMarker.XFAIL:
yield TestResult(test, TestOutcome.XFAIL, e)
else:
yield TestResult(test, TestOutcome.FAIL, e)
yield TestResult(
test, TestOutcome.FAIL, e, captured_stdout=sout.getvalue(), captured_stderr=serr.getvalue()
)
finally:
for fixture in resolved_fixtures.values():
if fixture.is_generator_fixture:
try:
with suppress(RuntimeError, StopIteration):
fixture.cleanup()
except (RuntimeError, StopIteration):
# In Python 3.7, a RuntimeError is raised if we fall off the end of a generator
# (instead of a StopIteration)
pass

sout.close()
serr.close()
28 changes: 26 additions & 2 deletions ward/terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ def output_all_test_results(
for failure in failed_test_results:
self.output_why_test_failed_header(failure)
self.output_why_test_failed(failure)
self.output_captured_stderr(failure)
self.output_captured_stdout(failure)

return all_results

Expand All @@ -77,6 +79,12 @@ def output_why_test_failed(self, test_result: TestResult):
def output_test_run_post_failure_summary(self, test_results: List[TestResult]):
raise NotImplementedError()

def output_captured_stderr(self, test_result: TestResult):
raise NotImplementedError()

def output_captured_stdout(self, test_result: TestResult):
raise NotImplementedError()


def lightblack(s: str) -> str:
return f"{Fore.LIGHTBLACK_EX}{s}{Style.RESET_ALL}"
Expand Down Expand Up @@ -124,7 +132,7 @@ def output_why_test_failed(self, test_result: TestResult):
truncation_chars = self.terminal_size.width - 24
err = test_result.error
if isinstance(err, ExpectationFailed):
print(f"\n Given {truncate(repr(err.history[0].this), num_chars=truncation_chars)}")
print(f"\n Given {truncate(repr(err.history[0].this), num_chars=truncation_chars)}\n")

for expect in err.history:
if expect.success:
Expand All @@ -141,7 +149,7 @@ def output_why_test_failed(self, test_result: TestResult):
if err.history and err.history[-1].op == "equals":
expect = err.history[-1]
print(
f"\n Showing diff of {colored('expected value', color='green')}"
f"\n Showing diff of {colored('expected value', color='green')}"
f" vs {colored('actual value', color='red')}:\n"
)

Expand Down Expand Up @@ -182,6 +190,22 @@ def output_test_result_summary(self, test_results: List[TestResult], time_taken:
f"{colored(str(outcome_counts[TestOutcome.PASS]) + ' passed', color='green')} ]"
)

def output_captured_stderr(self, test_result: TestResult):
if test_result.captured_stderr:
stderr = colored("standard error", color="red")
captured_stderr_lines = test_result.captured_stderr.split("\n")
print(f" Captured {stderr} during test run:\n")
for line in captured_stderr_lines:
print(" " + line)

def output_captured_stdout(self, test_result: TestResult):
if test_result.captured_stdout:
stdout = colored("standard output", color="blue")
captured_stdout_lines = test_result.captured_stdout.split("\n")
print(f"\n Captured {stdout} during test run:\n")
for line in captured_stdout_lines:
print(" " + line)

def generate_chart(self, num_passed, num_failed, num_skipped, num_xfail, num_unexp):
num_tests = num_passed + num_failed + num_skipped + num_xfail + num_unexp
pass_pct = num_passed / max(num_tests, 1)
Expand Down
2 changes: 2 additions & 0 deletions ward/test_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,5 @@ class TestResult:
outcome: TestOutcome
error: Optional[Exception] = None
message: str = ""
captured_stdout: str = ""
captured_stderr: str = ""

0 comments on commit 1f5ebe8

Please sign in to comment.