Added parsing test output for interrupted builds.

If tests run is interrupted(e.g. lost connection to testservice,
test was stuck and no output for 3 mins and was killed then),
xcodebuild_runner needs to parse test_output, collect passed tests
and re-run test_bundle by filtering already passed tests
(e.g. https://chromium-swarm.appspot.com/task?id=45c892feae862110).
Removed _make_cmd_list_for_failed_tests because now tests
will filter by passed tests for both cases
when tests were interrupted and not.

At the end of run also add a check whether all tests from test-bundle
were executed and if not add 'not executed tests' record.

Created the radar
'After primaryInstrumentsServerWithError xcodebuild did not finish its execution'
https://feedbackassistant.apple.com/feedback/6476415

Bug: 979267
Change-Id: I596945e10bd8382c41487633456a3c80a3569419
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1680973
Reviewed-by: Rohit Rao <[email protected]>
Reviewed-by: John Budorick <[email protected]>
Commit-Queue: Maksym Onufriienko <[email protected]>
Cr-Commit-Position: refs/heads/master@{#675844}
diff --git a/ios/build/bots/scripts/test_runner.py b/ios/build/bots/scripts/test_runner.py
index 078f54e..635afc22 100644
--- a/ios/build/bots/scripts/test_runner.py
+++ b/ios/build/bots/scripts/test_runner.py
@@ -334,6 +334,23 @@
   }
 
 
+def get_test_names(app_path):
+  """Gets list of tests from test app.
+
+  Args:
+     app_path: A path to test target bundle.
+
+  Returns:
+     List of tests.
+  """
+  cmd = ['otool', '-ov', app_path]
+  test_pattern = re.compile(
+      'imp (?:0[xX][0-9a-fA-F]+ )?-\['
+      '(?P<testSuite>[A-Za-z_][A-Za-z0-9_]*Test(?:Case)?)\s'
+      '(?P<testMethod>test[A-Za-z0-9_]*)\]')
+  return test_pattern.findall(subprocess.check_output(cmd))
+
+
 def shard_xctest(object_path, shards, test_cases=None):
   """Gets EarlGrey test methods inside a test target and splits them into shards
 
@@ -345,12 +362,7 @@
   Returns:
     A list of test shards.
   """
-  cmd = ['otool', '-ov', object_path]
-  test_pattern = re.compile(
-    'imp -\[(?P<testSuite>[A-Za-z_][A-Za-z0-9_]*Test[Case]*) '
-    '(?P<testMethod>test[A-Za-z0-9_]*)\]')
-  test_names = test_pattern.findall(subprocess.check_output(cmd))
-
+  test_names = get_test_names(object_path)
   # If test_cases are passed in, only shard the intersection of them and the
   # listed tests.  Format of passed-in test_cases can be either 'testSuite' or
   # 'testSuite/testMethod'.  The listed tests are tuples of ('testSuite',
diff --git a/ios/build/bots/scripts/test_runner_test.py b/ios/build/bots/scripts/test_runner_test.py
index dce3dc8..f468dc9 100755
--- a/ios/build/bots/scripts/test_runner_test.py
+++ b/ios/build/bots/scripts/test_runner_test.py
@@ -8,6 +8,7 @@
 import collections
 import glob
 import logging
+import mock
 import os
 import subprocess
 import unittest
@@ -604,6 +605,37 @@
         ['a', 'b', 'c', 'd']
     )
 
+  @mock.patch('subprocess.check_output', autospec=True)
+  def test_get_test_names(self, mock_subprocess):
+    otool_output = (
+        'imp 0x102492020 -[BrowserViewControllerTestCase testJavaScript]'
+        'name 0x105ee8b84 testFixForCrbug801165'
+        'types 0x105f0c842 v16 @ 0:8'
+        'name 0x105ee8b9a testOpenURLFromNTP'
+        'types 0x105f0c842 v16 @ 0:8'
+        'imp 0x102493b30 -[BrowserViewControllerTestCase testOpenURLFromNTP]'
+        'name 0x105ee8bad testOpenURLFromTab'
+        'types 0x105f0c842 v16 @ 0:8'
+        'imp 0x102494180 -[BrowserViewControllerTestCase testOpenURLFromTab]'
+        'name 0x105ee8bc0 testOpenURLFromTabSwitcher'
+        'types 0x105f0c842 v16 @ 0:8'
+        'imp 0x102494f70 -[BrowserViewControllerTestCase testTabSwitch]'
+        'types 0x105f0c842 v16 @ 0:8'
+        'imp 0x102494f70 -[BrowserViewControllerTestCase helper]'
+        'imp 0x102494f70 -[BrowserViewControllerTestCCCCCCCCC testMethod]'
+    )
+    mock_subprocess.return_value = otool_output
+    tests = test_runner.get_test_names('')
+    self.assertEqual(
+        [
+            ('BrowserViewControllerTestCase', 'testJavaScript'),
+            ('BrowserViewControllerTestCase', 'testOpenURLFromNTP'),
+            ('BrowserViewControllerTestCase', 'testOpenURLFromTab'),
+            ('BrowserViewControllerTestCase', 'testTabSwitch')
+        ],
+        tests
+    )
+
 
 if __name__ == '__main__':
   logging.basicConfig(format='[%(asctime)s:%(levelname)s] %(message)s',
diff --git a/ios/build/bots/scripts/xcode_log_parser.py b/ios/build/bots/scripts/xcode_log_parser.py
index d87a8262..1eb77d8f 100644
--- a/ios/build/bots/scripts/xcode_log_parser.py
+++ b/ios/build/bots/scripts/xcode_log_parser.py
@@ -18,6 +18,28 @@
 LOGGER = logging.getLogger(__name__)
 
 
+def parse_passed_tests_for_interrupted_run(output):
+  """Parses xcode runner output to get passed tests only.
+
+  Args:
+    output: [str] An output of test run.
+
+  Returns:
+    The list of passed tests only that will be a filter for next attempt.
+  """
+  passed_tests = []
+  # Test has format:
+  # [09:04:42:INFO] Test case '-[Test_class test_method]' passed.
+  passed_test_regex = re.compile(r'Test case \'\-\[(.+?)\s(.+?)\]\' passed')
+
+  for test_line in output:
+    m_test = passed_test_regex.search(test_line)
+    if m_test:
+      passed_tests.append('%s/%s' % (m_test.group(1), m_test.group(2)))
+  LOGGER.info('%d passed tests for interrupted build.' % len(passed_tests))
+  return passed_tests
+
+
 def format_test_case(test_case):
   """Format test case from `-[TestClass TestMethod]` to `TestClass_TestMethod`.
 
@@ -162,11 +184,12 @@
     return passed_tests
 
   @staticmethod
-  def collect_test_results(xcresult):
+  def collect_test_results(xcresult, output):
     """Gets test result data from xcresult.
 
     Args:
       xcresult: (str) A path to xcresult.
+      output: [str] An output of test run.
 
     Returns:
       Test result as a map:
@@ -190,7 +213,8 @@
     plist_path = os.path.join(xcresult + '.xcresult', 'Info.plist')
     if not os.path.exists(plist_path):
       test_results['failed']['BUILD_INTERRUPTED'] = [
-          '%s with test results does not exist.' % plist_path]
+          '%s with test results does not exist.' % plist_path] + output
+      test_results['passed'] = parse_passed_tests_for_interrupted_run(output)
       return test_results
 
     root = json.loads(Xcode11LogParser._xcresulttool_get(xcresult))
@@ -280,11 +304,12 @@
     return status_summary
 
   @staticmethod
-  def collect_test_results(output_folder):
+  def collect_test_results(output_folder, output):
     """Gets test result data from Info.plist.
 
     Args:
       output_folder: (str) A path to output folder.
+      output: [str] An output of test run.
     Returns:
       Test result as a map:
         {
@@ -301,7 +326,8 @@
     plist_path = os.path.join(output_folder, 'Info.plist')
     if not os.path.exists(plist_path):
       test_results['failed']['BUILD_INTERRUPTED'] = [
-          '%s with test results does not exist.' % plist_path]
+          '%s with test results does not exist.' % plist_path] + output
+      test_results['passed'] = parse_passed_tests_for_interrupted_run(output)
       return test_results
 
     root = plistlib.readPlist(plist_path)
diff --git a/ios/build/bots/scripts/xcode_log_parser_test.py b/ios/build/bots/scripts/xcode_log_parser_test.py
index 33473ce..bc5bb4cb 100644
--- a/ios/build/bots/scripts/xcode_log_parser_test.py
+++ b/ios/build/bots/scripts/xcode_log_parser_test.py
@@ -203,7 +203,7 @@
     mock_exist_file.return_value = True
     self.assertEqual(expected_test_results,
                      xcode_log_parser.Xcode11LogParser().collect_test_results(
-                         _XTEST_RESULT))
+                         _XTEST_RESULT, []))
 
   @mock.patch('os.path.exists', autospec=True)
   @mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
@@ -216,7 +216,7 @@
     mock_exist_file.return_value = True
     self.assertEqual(expected_test_results,
                      xcode_log_parser.Xcode11LogParser().collect_test_results(
-                         _XTEST_RESULT))
+                         _XTEST_RESULT, []))
 
   @mock.patch('os.path.exists', autospec=True)
   def testCollectTestsDidNotRun(self, mock_exist_file):
@@ -227,7 +227,7 @@
             '%s with test results does not exist.' % _XTEST_RESULT]}}
     self.assertEqual(expected_test_results,
                      xcode_log_parser.Xcode11LogParser().collect_test_results(
-                         _XTEST_RESULT))
+                         _XTEST_RESULT, []))
 
   @mock.patch('os.path.exists', autospec=True)
   def testCollectTestsInterruptedRun(self, mock_exist_file):
@@ -239,7 +239,7 @@
                 _XTEST_RESULT + '.xcresult', 'Info.plist')]}}
     self.assertEqual(expected_test_results,
                      xcode_log_parser.Xcode11LogParser().collect_test_results(
-                         _XTEST_RESULT))
+                         _XTEST_RESULT, []))
 
   @mock.patch('os.path.exists', autospec=True)
   @mock.patch('xcode_log_parser.Xcode11LogParser._xcresulttool_get')
@@ -250,3 +250,22 @@
     mock_xcresulttool_get.return_value = ACTIONS_RECORD_FAILED_TEST
     xcode_log_parser.Xcode11LogParser().copy_screenshots(_XTEST_RESULT)
     self.assertEqual(1, mock_copy.call_count)
+
+  @mock.patch('os.path.exists', autospec=True)
+  def testCollectTestResults_interruptedTests(self, mock_path_exists):
+    mock_path_exists.side_effect = [True, False]
+    output = [
+        '[09:03:42:INFO] Test case \'-[TestCase1 method1]\' passed on device.',
+        '[09:06:40:INFO] Test case \'-[TestCase2 method1]\' passed on device.',
+        '[09:09:00:INFO] Test case \'-[TestCase2 method1]\' failed on device.',
+        '** BUILD INTERRUPTED **',
+    ]
+    not_found_message = [
+        'Info.plist.xcresult/Info.plist with test results does not exist.']
+    res = xcode_log_parser.Xcode11LogParser().collect_test_results(
+        'Info.plist', output)
+    self.assertIn('BUILD_INTERRUPTED', res['failed'])
+    self.assertEqual(not_found_message + output,
+                     res['failed']['BUILD_INTERRUPTED'])
+    self.assertEqual(['TestCase1/method1', 'TestCase2/method1'],
+                     res['passed'])
diff --git a/ios/build/bots/scripts/xcodebuild_runner.py b/ios/build/bots/scripts/xcodebuild_runner.py
index b39f19a4..f1f984a 100644
--- a/ios/build/bots/scripts/xcodebuild_runner.py
+++ b/ios/build/bots/scripts/xcodebuild_runner.py
@@ -57,21 +57,22 @@
     egtests_app: full path to egtests app.
     project_path: root project folder.
     module_name: egtests module name.
-    filtered_tests: List of tests to include or exclude, depending on `invert`.
-    invert: type of filter(True - inclusive, False - exclusive).
+    included_tests: List of tests to run.
+    excluded_tests: List of tests not to run.
   """
 
-  def __init__(self, egtests_app, filtered_tests=None, invert=False,
+  def __init__(self, egtests_app, included_tests=None, excluded_tests=None,
                test_args=None, env_vars=None, host_app_path=None):
     """Initialize Egtests.
 
     Args:
       egtests_app: (str) full path to egtests app.
-      filtered_tests: (list) Specific tests to run
-        (it can inclusive/exclusive based on invert parameter).
+      included_tests: (list) Specific tests to run
          E.g.
           [ 'TestCaseClass1/testMethod1', 'TestCaseClass2/testMethod2']
-      invert: type of filter(True - inclusive, False - exclusive).
+      excluded_tests: (list) Specific tests not to run
+         E.g.
+          [ 'TestCaseClass1', 'TestCaseClass2/testMethod2']
       test_args: List of strings to pass as arguments to the test when
         launching.
       env_vars: List of environment variables to pass to the test itself.
@@ -85,8 +86,8 @@
     self.egtests_path = egtests_app
     self.project_path = os.path.dirname(self.egtests_path)
     self.module_name = os.path.splitext(os.path.basename(egtests_app))[0]
-    self.filter = filtered_tests
-    self.invert = invert
+    self.included_tests = included_tests or []
+    self.excluded_tests = excluded_tests or []
     self.test_args = test_args
     self.env_vars = env_vars
     self.host_app_path = host_app_path
@@ -121,15 +122,15 @@
     """
     module = self.module_name + '_module'
     module_data = {
-            'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),
-            'TestHostPath': '%s' % self.egtests_path,
-            'TestingEnvironmentVariables': {
-                'DYLD_INSERT_LIBRARIES': (
-                    '__PLATFORMS__/iPhoneSimulator.platform/Developer/'
-                    'usr/lib/libXCTestBundleInject.dylib'),
-                'DYLD_LIBRARY_PATH': self.project_path,
-                'DYLD_FRAMEWORK_PATH': self.project_path + ':',
-                'XCInjectBundleInto': '__TESTHOST__/%s' % self.module_name
+        'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),
+        'TestHostPath': '%s' % self.egtests_path,
+        'TestingEnvironmentVariables': {
+            'DYLD_INSERT_LIBRARIES': (
+                '__PLATFORMS__/iPhoneSimulator.platform/Developer/'
+                'usr/lib/libXCTestBundleInject.dylib'),
+            'DYLD_LIBRARY_PATH': self.project_path,
+            'DYLD_FRAMEWORK_PATH': self.project_path + ':',
+            'XCInjectBundleInto': '__TESTHOST__/%s' % self.module_name
             }
         }
     # Add module data specific to EG2 or EG1 tests
@@ -140,9 +141,9 @@
       module_data['UITargetAppPath'] = '%s' % self.host_app_path
       # Special handling for Xcode10.2
       dependent_products = [
-        module_data['UITargetAppPath'],
-        module_data['TestBundlePath'],
-        module_data['TestHostPath']
+          module_data['UITargetAppPath'],
+          module_data['TestBundlePath'],
+          module_data['TestHostPath']
       ]
       module_data['DependentProductPaths'] = dependent_products
     # EG1 tests
@@ -152,13 +153,12 @@
     xctestrun_data = {
         module: module_data
     }
-    if self.filter:
-      if self.invert:
-        xctestrun_data[module].update(
-            {'SkipTestIdentifiers': self.filter})
-      else:
-        xctestrun_data[module].update(
-            {'OnlyTestIdentifiers': self.filter})
+    if self.excluded_tests:
+      xctestrun_data[module].update(
+          {'SkipTestIdentifiers': self.excluded_tests})
+    if self.included_tests:
+      xctestrun_data[module].update(
+          {'OnlyTestIdentifiers': self.included_tests})
     if self.env_vars:
       xctestrun_data[module].update(
           {'EnvironmentVariables': self.env_vars})
@@ -207,34 +207,6 @@
     else:
       self._log_parser = xcode_log_parser.XcodeLogParser()
 
-  def _make_cmd_list_for_failed_tests(self, failed_results, out_dir,
-                                      test_args=None, env_vars=None):
-    """Makes cmd list based on failure results.
-
-    Args:
-      failed_results: Map of failed tests, where key is name of egtests_app and
-        value is a list of failed_test_case/test_methods:
-          {
-              'failed_test_case/test_methods': ['StackTrace']
-          }
-      out_dir: (str) An output path.
-      test_args: List of strings to pass as arguments to the test when
-        launching.
-      env_vars: List of environment variables to pass to the test itself.
-
-    Returns:
-      List of Launch commands to re-run failed tests.
-      Every destination will run on separate clone of a stimulator.
-    """
-    eg_app = EgtestsApp(
-        egtests_app=self.egtests_app.egtests_path,
-        filtered_tests=[test.replace(' ', '/') for test in failed_results],
-        test_args=test_args,
-        env_vars=env_vars,
-        host_app_path=self.egtests_app.host_app_path)
-    # Regenerates xctest run and gets a command.
-    return self.command(eg_app, out_dir, self.destination, shards=1)
-
   def summary_log(self):
     """Calculates test summary - how many passed, failed and error tests.
 
@@ -266,8 +238,10 @@
 
     Returns:
       returncode - return code of command run.
+      output - command output as list of strings.
     """
     LOGGER.info('Launching %s with env %s' % (cmd, self.env))
+    output = []
     proc = subprocess.Popen(
         cmd,
         env=self.env,
@@ -291,49 +265,47 @@
         break
       line = line.rstrip()
       LOGGER.info(line)
+      output.append(line)
       sys.stdout.flush()
 
     proc.wait()
     LOGGER.info('Command %s finished with %d' % (cmd, proc.returncode))
-    return proc.returncode
+    return proc.returncode, output
 
   def launch(self):
     """Launches tests using xcodebuild."""
     cmd_list = []
     self.test_results['attempts'] = []
+    cancelled_statuses = {'TESTS_DID_NOT_START', 'BUILD_INTERRUPTED'}
+    shards = self.shards
 
     # total number of attempts is self.retries+1
     for attempt in range(self.retries + 1):
       outdir_attempt = os.path.join(self.out_dir, 'attempt_%d' % attempt)
-      # Create a command for the 1st run or if tests did not start,
-      # re-run the same command but with different output folder.
-      # (http://crbug.com/916620) If tests did not start, repeat the command.
-      if (not self.test_results['attempts'] or
-          {'TESTS_DID_NOT_START', 'BUILD_INTERRUPTED'}.intersection(
-              self.test_results['attempts'][-1]['failed'].keys())):
-        cmd_list = self.command(self.egtests_app,
-                                outdir_attempt,
-                                self.destination,
-                                self.shards)
-      # Re-init the command based on list of failed tests.
-      else:
-        cmd_list = self._make_cmd_list_for_failed_tests(
-            self.test_results['attempts'][-1]['failed'],
-            outdir_attempt,
-            test_args=self.egtests_app.test_args,
-            env_vars=self.egtests_app.env_vars)
-
+      cmd_list = self.command(self.egtests_app,
+                              outdir_attempt,
+                              self.destination,
+                              shards)
       # TODO(crbug.com/914878): add heartbeat logging to xcodebuild_runner.
       LOGGER.info('Start test attempt #%d for command [%s]' % (
           attempt, ' '.join(cmd_list)))
-      self.launch_attempt(cmd_list, outdir_attempt)
+      _, output = self.launch_attempt(cmd_list, outdir_attempt)
       self.test_results['attempts'].append(
-          self._log_parser.collect_test_results(outdir_attempt))
+          self._log_parser.collect_test_results(outdir_attempt, output))
       if self.retries == attempt or not self.test_results[
           'attempts'][-1]['failed']:
         break
       self._log_parser.copy_screenshots(outdir_attempt)
-
+      # Exclude passed tests in next test attempt.
+      self.egtests_app.excluded_tests += self.test_results['attempts'][-1][
+          'passed']
+      # If tests are not completed(interrupted or did not start)
+      # re-run them with the same number of shards,
+      # otherwise re-run with shards=1 and exclude passed tests.
+      cancelled_attempt = cancelled_statuses.intersection(
+          self.test_results['attempts'][-1]['failed'].keys())
+      if not cancelled_attempt:
+        shards = 1
     self.test_results['end_run'] = int(time.time())
     self.summary_log()
 
@@ -514,7 +486,7 @@
     launch_commands = []
     for params in self.sharding_data:
       launch_commands.append(LaunchCommand(
-          EgtestsApp(params['app'], filtered_tests=params['test_cases'],
+          EgtestsApp(params['app'], included_tests=params['test_cases'],
                      env_vars=self.env_vars, test_args=self.test_args,
                      host_app_path=params['host']),
           params['destination'],
@@ -557,6 +529,12 @@
     self.logs['flaked tests'] = list(
         all_failures - set(self.logs['failed tests']))
 
+    # Gets not-started/interrupted tests
+    aborted_tests = list(set(self.get_all_tests()) - set(
+        self.logs['failed tests']) - set(self.logs['passed tests']))
+    aborted_tests.sort()
+    self.logs['aborted tests'] = aborted_tests
+
     # Test is failed if there are failures for the last run.
     return not self.logs['failed tests']
 
@@ -567,3 +545,20 @@
     """
     LOGGER.info('Erasing all simulators.')
     subprocess.call(['xcrun', 'simctl', 'erase', 'all'])
+
+  def get_all_tests(self):
+    """Gets all tests from test bundle."""
+    test_app_bundle = os.path.join(self.app_path, os.path.splitext(
+        os.path.basename(self.app_path))[0])
+    # Method names that starts with test* and also are in *TestCase classes
+    # but they are not test-methods.
+    # TODO(crbug.com/982435): Rename not test methods with test-suffix.
+    not_tests = ['ChromeTestCase/testServer', 'FindInPageTestCase/testURL']
+    all_tests = []
+    for test_class, test_method in test_runner.get_test_names(test_app_bundle):
+      test_name = '%s/%s' % (test_class, test_method)
+      if (test_name not in not_tests and
+          # Filter by self.test_cases if specified
+          (test_class in self.test_cases if self.test_cases else True)):
+        all_tests.append(test_name)
+    return all_tests
diff --git a/ios/build/bots/scripts/xcodebuild_runner_test.py b/ios/build/bots/scripts/xcodebuild_runner_test.py
index 2962183..18dfd6e 100644
--- a/ios/build/bots/scripts/xcodebuild_runner_test.py
+++ b/ios/build/bots/scripts/xcodebuild_runner_test.py
@@ -141,7 +141,7 @@
     filtered_tests = ['TestCase1/testMethod1', 'TestCase1/testMethod2',
                       'TestCase2/testMethod1', 'TestCase1/testMethod2']
     egtest_node = xcodebuild_runner.EgtestsApp(
-        _EGTESTS_APP_PATH, filtered_tests=filtered_tests).xctestrun_node()[
+        _EGTESTS_APP_PATH, included_tests=filtered_tests).xctestrun_node()[
             'any_egtests_module']
     self.assertEqual(filtered_tests, egtest_node['OnlyTestIdentifiers'])
     self.assertNotIn('SkipTestIdentifiers', egtest_node)
@@ -153,8 +153,8 @@
     skipped_tests = ['TestCase1/testMethod1', 'TestCase1/testMethod2',
                      'TestCase2/testMethod1', 'TestCase1/testMethod2']
     egtest_node = xcodebuild_runner.EgtestsApp(
-        _EGTESTS_APP_PATH, filtered_tests=skipped_tests,
-        invert=True).xctestrun_node()['any_egtests_module']
+        _EGTESTS_APP_PATH, excluded_tests=skipped_tests
+        ).xctestrun_node()['any_egtests_module']
     self.assertEqual(skipped_tests, egtest_node['SkipTestIdentifiers'])
     self.assertNotIn('OnlyTestIdentifiers', egtest_node)
 
@@ -195,42 +195,6 @@
       xcodebuild_runner.LaunchCommand([], 'destination', shards=1, retries=1,
                                       out_dir=_OUT_DIR).fill_xctest_run([])
 
-  @mock.patch('xcodebuild_runner.LaunchCommand.fill_xctest_run', autospec=True)
-  def testLaunchCommand_make_cmd_list_for_failed_tests(self,
-                                                       fill_xctest_run_mock):
-    fill_xctest_run_mock.side_effect = [
-        '/var/folders/tmpfile1'
-    ]
-    egtest_app = 'module_1_egtests.app'
-    egtest_app_path = '%s/%s' % (_ROOT_FOLDER_PATH, egtest_app)
-    host_app_path = '%s/%s' % (_ROOT_FOLDER_PATH, egtest_app)
-    failed_tests = {
-        egtest_app: [
-            'TestCase1_1/TestMethod1',
-            'TestCase1_1/TestMethod2',
-            'TestCase1_2/TestMethod1',
-        ]
-    }
-    expected_egtests = xcodebuild_runner.EgtestsApp(
-        egtest_app_path, filtered_tests=failed_tests[egtest_app])
-    mock_egtest = mock.MagicMock(spec=xcodebuild_runner.EgtestsApp)
-    type(mock_egtest).egtests_path = mock.PropertyMock(
-        return_value=egtest_app_path)
-    type(mock_egtest).host_app_path = mock.PropertyMock(
-        return_value=host_app_path)
-    cmd = xcodebuild_runner.LaunchCommand(
-        egtests_app=mock_egtest,
-        destination=_DESTINATION,
-        out_dir='out/dir/attempt_2/iPhone X 12.0',
-        shards=1,
-        retries=1
-    )
-    cmd._make_cmd_list_for_failed_tests(
-        failed_tests, os.path.join(_OUT_DIR, 'attempt_2'))
-    self.assertEqual(1, len(fill_xctest_run_mock.mock_calls))
-    self.assertItemsEqual(expected_egtests.__dict__,
-                          fill_xctest_run_mock.mock_calls[0][1][1].__dict__)
-
   @mock.patch('os.listdir', autospec=True)
   @mock.patch('test_runner.get_current_xcode_info', autospec=True)
   @mock.patch('xcode_log_parser.XcodeLogParser.collect_test_results')
@@ -240,7 +204,7 @@
     egtests = xcodebuild_runner.EgtestsApp(_EGTESTS_APP_PATH)
     xcode_version.return_value = {'version': '10.2.1'}
     mock_collect_results.side_effect = [
-        {'failed': {'TESTS_DID_NOT_START': ['not started']}},
+        {'failed': {'TESTS_DID_NOT_START': ['not started']}, 'passed': []},
         {'failed': {}, 'passed': ['passedTest1']}
     ]
     launch_command = xcodebuild_runner.LaunchCommand(egtests,