Skip to content

Commit

Permalink
[replay] Improve compatibility with unstable builds
Browse files Browse the repository at this point in the history
  • Loading branch information
tysmith committed May 24, 2024
1 parent 2b9d6da commit bf29dd5
Show file tree
Hide file tree
Showing 2 changed files with 69 additions and 57 deletions.
56 changes: 26 additions & 30 deletions grizzly/replay/replay.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ class ReplayResult:
durations: Number of seconds spent running each testcase.
expected: Signature match.
count: Number of times detected.
early: Result was detected before a testcase was requested.
"""

report: Report
Expand Down Expand Up @@ -443,30 +444,27 @@ def harness_fn(_: str) -> bytes: # pragma: no cover
log_path, self.target.binary, is_hang=run_result.timeout
)
# set active signature
if (
not runner.startup_failure
and not self._any_crash
and not run_result.timeout
and not sig_set
):
if not self._any_crash and not run_result.timeout and not sig_set:
assert not expect_hang
assert self._signature is None
LOG.debug(
"no signature given, using short sig %r",
report.short_signature,
)
if runner.startup_failure:
LOG.warning(
"Using signature from startup failure! "
"Provide a signature to avoid this."
)
self._signature = report.crash_signature
sig_set = True
if self._signature is not None:
assert not sig_hash, "sig_hash should only be set once"
sig_hash = Report.calc_hash(self._signature)

# bucket result
if not runner.startup_failure and (
self._any_crash
or self.check_match(
self._signature, report, expect_hang, sig_set
)
if self._any_crash or self.check_match(
self._signature, report, expect_hang, sig_set
):
if sig_hash is not None:
LOG.debug("using signature hash (%s) to bucket", sig_hash)
Expand Down Expand Up @@ -555,17 +553,10 @@ def harness_fn(_: str) -> bytes: # pragma: no cover

# process results
if self._any_crash:
# add all results if min_results was reached
if sum(x.count for x in reports.values() if x.expected) >= min_results:
results: List[ReplayResult] = list(reports.values())
else:
# add only unexpected results since min_results was not reached
results = []
for result in reports.values():
if result.expected:
result.report.cleanup()
else:
results.append(result)
# all reports should be expected when self._any_crash=True
assert all(x.expected for x in reports.values())
success = sum(x.count for x in reports.values()) >= min_results
if not success:
LOG.debug(
"%d (any_crash) less than minimum %d",
self.status.results.total,
Expand All @@ -574,19 +565,24 @@ def harness_fn(_: str) -> bytes: # pragma: no cover
else:
# there should be at most one expected bucket
assert sum(x.expected for x in reports.values()) <= 1
# filter out unreliable expected results
results = []
for crash_hash, result in reports.items():
if result.expected and result.count < min_results:
success = any(
x.count >= min_results for x in reports.values() if x.expected
)
results: List[ReplayResult] = []
for crash_hash, result in reports.items():
# if min_results not met (success=False) cleanup expected reports
if not success and result.expected:
if not self._any_crash:
LOG.debug(
"%r less than minimum (%d/%d)",
crash_hash,
result.count,
min_results,
)
result.report.cleanup()
continue
results.append(result)
result.report.cleanup()
continue
results.append(result)

# this should only be displayed when both conditions are met:
# 1) runner does not close target (no delay was given before shutdown)
# 2) result has not been successfully reproduced
Expand All @@ -602,7 +598,7 @@ def harness_fn(_: str) -> bytes: # pragma: no cover
return results

finally:
# we don't want to clean up but we are not checking results
# we don't want to cleanup but we are not checking results
self.target.close(force_close=True)
# remove unprocessed reports
for result in reports.values():
Expand Down
70 changes: 43 additions & 27 deletions grizzly/replay/test_replay.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,21 +164,6 @@ def test_replay_05(mocker, server):
# target.close() called once in runner and once by ReplayManager.run()
assert target.close.call_count == 2
target.reset_mock()
# test target crashed
target.check_result.return_value = Result.FOUND
target.save_logs = _fake_save_logs
with ReplayManager([], server, target, use_harness=False) as replay:
results = replay.run(tests, 10, repeat=1)
assert replay.status
assert replay.status.ignored == 1
assert replay.status.iteration == 1
assert replay.status.results.total == 0
assert replay._signature is None
# target.close() called once in runner and once by ReplayManager.run()
assert target.close.call_count == 2
assert len(results) == 1
assert results[0].count == 1
assert not results[0].expected


def test_replay_06(mocker, server):
Expand Down Expand Up @@ -408,23 +393,54 @@ def test_replay_12(mocker, server):
assert report_2.cleanup.call_count == 1


def test_replay_13(mocker, server):
"""test ReplayManager.run() - any crash - startup failure"""
server.serve_path.return_value = (Served.NONE, {})
@mark.parametrize(
"to_serve, sig_value, expected, unexpected",
[
# No signature provided
(((Served.NONE, {}), (Served.ALL, {"a.html": "/fake/path"})), None, 2, 0),
(((Served.ALL, {"a.html": "/fake/path"}), (Served.NONE, {})), None, 2, 0),
(((Served.NONE, {}), (Served.NONE, {})), None, 2, 0),
# Signature provided (signatures match)
(((Served.NONE, {}), (Served.ALL, {"a.html": "/fake/path"})), "STDERR", 2, 0),
(((Served.ALL, {"a.html": "/fake/path"}), (Served.NONE, {})), "STDERR", 2, 0),
(((Served.NONE, {}), (Served.NONE, {})), "STDERR", 2, 0),
# Signature provided (signatures don't match)
(((Served.NONE, {}), (Served.ALL, {"a.html": "/fake/path"})), "miss", 0, 1),
(((Served.ALL, {"a.html": "/fake/path"}), (Served.NONE, {})), "miss", 0, 1),
(((Served.NONE, {}), (Served.NONE, {})), "miss", 0, 1),
],
)
def test_replay_13(mocker, server, tmp_path, to_serve, sig_value, expected, unexpected):
"""test ReplayManager.run() - results triggered after launch before running test"""
server.serve_path.side_effect = to_serve

# prepare signature
if sig_value is not None:
sig_file = tmp_path / "sig.json"
sig_file.write_text(
"{\n"
' "symptoms": [\n'
" {\n"
' "src": "stderr",\n'
' "type": "output",\n'
f' "value": "/{sig_value}/"\n'
" }\n"
" ]\n"
"}\n"
)
sig = CrashSignature.fromFile(str(sig_file))
else:
sig = None

target = mocker.Mock(spec_set=Target, binary=Path("bin"), launch_timeout=30)
target.check_result.return_value = Result.FOUND
target.save_logs = _fake_save_logs
target.monitor.is_healthy.return_value = False
tests = [mocker.MagicMock(spec_set=TestCase, entry_point="a.html")]
with ReplayManager([], server, target, any_crash=True, use_harness=False) as replay:
results = replay.run(tests, 10, repeat=1, min_results=1)
assert results
assert not any(x.expected for x in results)
assert target.close.call_count == 2
assert replay.status
assert replay.status.iteration == 1
assert replay.status.results.total == 0
assert replay.status.ignored == 1
with ReplayManager(set(), server, target, any_crash=False, signature=sig) as replay:
results = replay.run(tests, 10, repeat=2, min_results=2)
assert sum(x.count for x in results if x.expected) == expected
assert sum(x.count for x in results if not x.expected) == unexpected


def test_replay_14(mocker, server):
Expand Down

0 comments on commit bf29dd5

Please sign in to comment.