Add proper filtering to WprProxySimulatorTestRunner tests.
ios-wpr-simulator is currently timing out due to a lack of filtering.
Each site tested by autofill automation is reported as its own suite,
i.e.
ios_costco.test.AutofillAutomationTestCase/testActions
ios_ebay.test.AutofillAutomationTestCase/testActions
ios_westelm.test.AutofillAutomationTestCase/testActions
And the test_runner will automatically rerun any suite that fails,
filtering for just that suite.
However, filters were not implemented for WprProxySimulatorTestRunner,
meaning that if ios_ebay failed, every site would be retested,
resulting in an polynomial amount of test rerunning and a time out.
This change implements filtering properly as well as tests for
filtering, which should reduce the bot runtime to a reasonable level.
Change-Id: Ib614a5902be3ffd9d41720173bdf51d140705f83
Reviewed-on: https://chromium-review.googlesource.com/c/1374453
Reviewed-by: Sergey Berezin <[email protected]>
Commit-Queue: ericale <[email protected]>
Cr-Commit-Position: refs/heads/master@{#616065}
diff --git a/ios/build/bots/scripts/test_runner.py b/ios/build/bots/scripts/test_runner.py
index f415001d..2c65d214 100644
--- a/ios/build/bots/scripts/test_runner.py
+++ b/ios/build/bots/scripts/test_runner.py
@@ -1050,8 +1050,9 @@
Args:
cmd: List of strings forming the command to run.
NOTE: in the case of WprProxySimulatorTestRunner, cmd
- is just a descriptor for the test, and not indicative
- of the actual command we build and execute in _run.
+ is a dict forming the configuration for the test (including
+ filter rules), and not indicative of the actual command
+ we build and execute in _run.
Returns:
GTestResult instance.
@@ -1061,6 +1062,9 @@
completed_without_failure = True
total_returncode = 0
+ invert = cmd['invert']
+ test_filter = cmd['test_filter']
+
if shards > 1:
# TODO(crbug.com/881096): reimplement sharding in the future
raise ShardingDisabledError()
@@ -1086,7 +1090,19 @@
testName = os.path.splitext(baseName)[0]
replayPath = '{}/{}'.format(self.replay_path, testName)
- if os.path.isfile(replayPath):
+ # the filters are in the form 'testFileName.testSuiteName/testName'
+ # i.e. 'ios_costco.test.AutofillAutomationTestCase/testActions'
+ test_matched_filter = False
+ for filter_name in test_filter:
+ if testName in filter_name:
+ test_matched_filter = True
+
+ if test_filter == []:
+ test_matched_filter = True
+
+ # if the test matches the filter and invert is disabled, OR if the
+ # test doesn't match the filter and invert is enabled, run the test
+ if os.path.isfile(replayPath) and test_matched_filter != invert:
print 'Running test for recipe {}'.format(recipePath)
self.wprgo_start(replayPath)
@@ -1175,7 +1191,7 @@
print '%s test returned %s' % (recipePath, proc.returncode)
print
- else:
+ elif test_matched_filter != invert:
print 'No matching replay file for recipe {}'.format(
recipePath)
@@ -1189,9 +1205,9 @@
return result
def get_launch_command(self, test_filter=[], invert=False):
- '''Returns the name of the test, instead of the real launch command.
- We build our own command in _run, which is what this is usually passed to,
- so instead we just use this for a test descriptor.
+ '''Returns a config dict for the test, instead of the real launch command.
+ Normally this is passed into _run as the command it should use, but since
+ the WPR runner builds its own cmd, we use this to configure the function.
Args:
test_filter: List of test cases to filter.
@@ -1199,21 +1215,13 @@
match everything except the given test cases.
Returns:
- A list of strings forming the command to launch the test.
+ A dict forming the configuration for the test.
'''
- invert_str = "Inverted" if invert else ""
- if test_filter:
- return [
- '{} WprProxySimulatorTest'.format(invert_str),
- 'Test folder: {}'.format(self.replay_path)
- ]
- else:
- return [
- '{} WprProxySimulatorTest'.format(invert_str),
- 'Filter: {}'.format(' '.join(test_filter)),
- 'Test folder: {}'.format(self.replay_path)
- ]
+ test_config = {}
+ test_config['invert'] = invert
+ test_config['test_filter'] = test_filter
+ return test_config
def proxy_start(self):
'''Starts tsproxy and routes the machine's traffic through tsproxy.'''
diff --git a/ios/build/bots/scripts/test_runner_test.py b/ios/build/bots/scripts/test_runner_test.py
index 7cb988a..a6d71075 100755
--- a/ios/build/bots/scripts/test_runner_test.py
+++ b/ios/build/bots/scripts/test_runner_test.py
@@ -419,9 +419,8 @@
self.assertTrue(tr)
- def test_run(self):
- """Ensures the _run method can handle passed and failed tests."""
-
+ def run_wpr_test(self, test_filter=[], invert=False):
+ """Wrapper that mocks the _run method and returns its result."""
class FakeStdout:
def __init__(self):
self.line_index = 0
@@ -477,8 +476,12 @@
self.mock(subprocess, 'Popen', popen)
tr.xctest_path = 'fake.xctest'
- cmd = tr.get_launch_command()
- result = tr._run(cmd=cmd, shards=1)
+ cmd = tr.get_launch_command(test_filter=test_filter, invert=invert)
+ return tr._run(cmd=cmd, shards=1)
+
+ def test_run_no_filter(self):
+ """Ensures the _run method can handle passed and failed tests."""
+ result = self.run_wpr_test()
self.assertIn('file1.a/1', result.passed_tests)
self.assertIn('file1.b/2', result.passed_tests)
self.assertIn('file1.c/3', result.failed_tests)
@@ -486,6 +489,25 @@
self.assertIn('file2.b/2', result.passed_tests)
self.assertIn('file2.c/3', result.failed_tests)
+ def test_run_with_filter(self):
+ """Ensures the _run method works with a filter."""
+ result = self.run_wpr_test(test_filter=["file1"], invert=False)
+ self.assertIn('file1.a/1', result.passed_tests)
+ self.assertIn('file1.b/2', result.passed_tests)
+ self.assertIn('file1.c/3', result.failed_tests)
+ self.assertNotIn('file2.a/1', result.passed_tests)
+ self.assertNotIn('file2.b/2', result.passed_tests)
+ self.assertNotIn('file2.c/3', result.failed_tests)
+
+ def test_run_with_inverted_filter(self):
+ """Ensures the _run method works with an inverted filter."""
+ result = self.run_wpr_test(test_filter=["file1"], invert=True)
+ self.assertNotIn('file1.a/1', result.passed_tests)
+ self.assertNotIn('file1.b/2', result.passed_tests)
+ self.assertNotIn('file1.c/3', result.failed_tests)
+ self.assertIn('file2.a/1', result.passed_tests)
+ self.assertIn('file2.b/2', result.passed_tests)
+ self.assertIn('file2.c/3', result.failed_tests)
class DeviceTestRunnerTest(TestCase):
def setUp(self):