1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/test/launcher/test_launcher.h"
6 
7 #include <stdio.h>
8 
9 #include <algorithm>
10 #include <map>
11 #include <random>
12 #include <utility>
13 
14 #include "base/at_exit.h"
15 #include "base/bind.h"
16 #include "base/clang_profiling_buildflags.h"
17 #include "base/command_line.h"
18 #include "base/environment.h"
19 #include "base/files/file_enumerator.h"
20 #include "base/files/file_path.h"
21 #include "base/files/file_util.h"
22 #include "base/files/scoped_file.h"
23 #include "base/format_macros.h"
24 #include "base/hash/hash.h"
25 #include "base/lazy_instance.h"
26 #include "base/location.h"
27 #include "base/logging.h"
28 #include "base/macros.h"
29 #include "base/memory/ptr_util.h"
30 #include "base/numerics/safe_conversions.h"
31 #include "base/process/kill.h"
32 #include "base/process/launch.h"
33 #include "base/ranges/algorithm.h"
34 #include "base/run_loop.h"
35 #include "base/single_thread_task_runner.h"
36 #include "base/strings/pattern.h"
37 #include "base/strings/string_number_conversions.h"
38 #include "base/strings/string_piece.h"
39 #include "base/strings/string_split.h"
40 #include "base/strings/string_util.h"
41 #include "base/strings/stringize_macros.h"
42 #include "base/strings/stringprintf.h"
43 #include "base/strings/utf_string_conversions.h"
44 #include "base/system/sys_info.h"
45 #include "base/task/post_task.h"
46 #include "base/task/thread_pool/thread_pool_instance.h"
47 #include "base/test/gtest_util.h"
48 #include "base/test/gtest_xml_util.h"
49 #include "base/test/launcher/test_launcher_tracer.h"
50 #include "base/test/launcher/test_results_tracker.h"
51 #include "base/test/test_switches.h"
52 #include "base/test/test_timeouts.h"
53 #include "base/threading/thread_restrictions.h"
54 #include "base/threading/thread_task_runner_handle.h"
55 #include "base/time/time.h"
56 #include "build/build_config.h"
57 #include "testing/gtest/include/gtest/gtest.h"
58 
59 #if defined(OS_POSIX)
60 #include <signal.h>
61 #include <fcntl.h>
62 
63 #include "base/files/file_descriptor_watcher_posix.h"
64 #endif
65 
66 #if defined(OS_APPLE)
67 #include "base/mac/scoped_nsautorelease_pool.h"
68 #endif
69 
70 #if defined(OS_WIN)
71 #include "base/strings/string_util_win.h"
72 #include "base/win/windows_version.h"
73 #endif
74 
75 #if defined(OS_FUCHSIA)
76 #include <lib/fdio/namespace.h>
77 #include <lib/zx/job.h>
78 #include <lib/zx/time.h>
79 #include "base/atomic_sequence_num.h"
80 #include "base/base_paths_fuchsia.h"
81 #include "base/fuchsia/default_job.h"
82 #include "base/fuchsia/file_utils.h"
83 #include "base/fuchsia/fuchsia_logging.h"
84 #include "base/path_service.h"
85 #endif
86 
87 namespace base {
88 
89 // See https://groups.google.com/a/chromium.org/d/msg/chromium-dev/nkdTP7sstSc/uT3FaE_sgkAJ .
90 using ::operator<<;
91 
92 // The environment variable name for the total number of test shards.
93 const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
94 // The environment variable name for the test shard index.
95 const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
96 
97 // Prefix indicating test has to run prior to the other test.
98 const char kPreTestPrefix[] = "PRE_";
99 
100 // Prefix indicating test is disabled, will not run unless specified.
101 const char kDisabledTestPrefix[] = "DISABLED_";
102 
103 namespace {
104 
105 // Global tag for test runs where the results are unreliable for any reason.
106 const char kUnreliableResultsTag[] = "UNRELIABLE_RESULTS";
107 
108 // Maximum time of no output after which we print list of processes still
109 // running. This deliberately doesn't use TestTimeouts (which is otherwise
110 // a recommended solution), because they can be increased. This would defeat
111 // the purpose of this timeout, which is 1) to avoid buildbot "no output for
112 // X seconds" timeout killing the process 2) help communicate status of
113 // the test launcher to people looking at the output (no output for a long
114 // time is mysterious and gives no info about what is happening) 3) help
115 // debugging in case the process hangs anyway.
116 constexpr TimeDelta kOutputTimeout = TimeDelta::FromSeconds(15);
117 
118 // Limit of output snippet lines when printing to stdout.
119 // Avoids flooding the logs with amount of output that gums up
120 // the infrastructure.
121 const size_t kOutputSnippetLinesLimit = 5000;
122 
123 // Limit of output snippet size. Exceeding this limit
124 // results in truncating the output and failing the test.
125 const size_t kOutputSnippetBytesLimit = 300 * 1024;
126 
127 // Limit of seed values for gtest shuffling. Arbitrary, but based on
128 // gtest's similarly arbitrary choice.
129 const uint32_t kRandomSeedUpperBound = 100000;
130 
131 // Set of live launch test processes with corresponding lock (it is allowed
132 // for callers to launch processes on different threads).
GetLiveProcessesLock()133 Lock* GetLiveProcessesLock() {
134   static auto* lock = new Lock;
135   return lock;
136 }
137 
GetLiveProcesses()138 std::map<ProcessHandle, CommandLine>* GetLiveProcesses() {
139   static auto* map = new std::map<ProcessHandle, CommandLine>;
140   return map;
141 }
142 
143 // Performance trace generator.
GetTestLauncherTracer()144 TestLauncherTracer* GetTestLauncherTracer() {
145   static auto* tracer = new TestLauncherTracer;
146   return tracer;
147 }
148 
149 #if defined(OS_FUCHSIA)
WaitForJobExit(const zx::job & job)150 zx_status_t WaitForJobExit(const zx::job& job) {
151   zx::time deadline =
152       zx::deadline_after(zx::duration(kOutputTimeout.ToZxDuration()));
153   zx_signals_t to_wait_for = ZX_JOB_NO_JOBS | ZX_JOB_NO_PROCESSES;
154   while (to_wait_for) {
155     zx_signals_t observed = 0;
156     zx_status_t status = job.wait_one(to_wait_for, deadline, &observed);
157     if (status != ZX_OK)
158       return status;
159     to_wait_for &= ~observed;
160   }
161   return ZX_OK;
162 }
163 #endif  // defined(OS_FUCHSIA)
164 
165 #if defined(OS_POSIX)
166 // Self-pipe that makes it possible to do complex shutdown handling
167 // outside of the signal handler.
168 int g_shutdown_pipe[2] = { -1, -1 };
169 
ShutdownPipeSignalHandler(int signal)170 void ShutdownPipeSignalHandler(int signal) {
171   HANDLE_EINTR(write(g_shutdown_pipe[1], "q", 1));
172 }
173 
KillSpawnedTestProcesses()174 void KillSpawnedTestProcesses() {
175   // Keep the lock until exiting the process to prevent further processes
176   // from being spawned.
177   AutoLock lock(*GetLiveProcessesLock());
178 
179   fprintf(stdout, "Sending SIGTERM to %zu child processes... ",
180           GetLiveProcesses()->size());
181   fflush(stdout);
182 
183   for (const auto& pair : *GetLiveProcesses()) {
184     // Send the signal to entire process group.
185     kill((-1) * (pair.first), SIGTERM);
186   }
187 
188   fprintf(stdout,
189           "done.\nGiving processes a chance to terminate cleanly... ");
190   fflush(stdout);
191 
192   PlatformThread::Sleep(TimeDelta::FromMilliseconds(500));
193 
194   fprintf(stdout, "done.\n");
195   fflush(stdout);
196 
197   fprintf(stdout, "Sending SIGKILL to %zu child processes... ",
198           GetLiveProcesses()->size());
199   fflush(stdout);
200 
201   for (const auto& pair : *GetLiveProcesses()) {
202     // Send the signal to entire process group.
203     kill((-1) * (pair.first), SIGKILL);
204   }
205 
206   fprintf(stdout, "done.\n");
207   fflush(stdout);
208 }
209 #endif  // defined(OS_POSIX)
210 
211 // Parses the environment variable var as an Int32.  If it is unset, returns
212 // true.  If it is set, unsets it then converts it to Int32 before
213 // returning it in |result|.  Returns true on success.
TakeInt32FromEnvironment(const char * const var,int32_t * result)214 bool TakeInt32FromEnvironment(const char* const var, int32_t* result) {
215   std::unique_ptr<Environment> env(Environment::Create());
216   std::string str_val;
217 
218   if (!env->GetVar(var, &str_val))
219     return true;
220 
221   if (!env->UnSetVar(var)) {
222     LOG(ERROR) << "Invalid environment: we could not unset " << var << ".\n";
223     return false;
224   }
225 
226   if (!StringToInt(str_val, result)) {
227     LOG(ERROR) << "Invalid environment: " << var << " is not an integer.\n";
228     return false;
229   }
230 
231   return true;
232 }
233 
234 // Unsets the environment variable |name| and returns true on success.
235 // Also returns true if the variable just doesn't exist.
UnsetEnvironmentVariableIfExists(const std::string & name)236 bool UnsetEnvironmentVariableIfExists(const std::string& name) {
237   std::unique_ptr<Environment> env(Environment::Create());
238   std::string str_val;
239   if (!env->GetVar(name, &str_val))
240     return true;
241   return env->UnSetVar(name);
242 }
243 
244 // Returns true if bot mode has been requested, i.e. defaults optimized
245 // for continuous integration bots. This way developers don't have to remember
246 // special command-line flags.
BotModeEnabled(const CommandLine * command_line)247 bool BotModeEnabled(const CommandLine* command_line) {
248   std::unique_ptr<Environment> env(Environment::Create());
249   return command_line->HasSwitch(switches::kTestLauncherBotMode) ||
250          env->HasVar("CHROMIUM_TEST_LAUNCHER_BOT_MODE");
251 }
252 
253 // Returns command line command line after gtest-specific processing
254 // and applying |wrapper|.
PrepareCommandLineForGTest(const CommandLine & command_line,const std::string & wrapper,const size_t retries_left)255 CommandLine PrepareCommandLineForGTest(const CommandLine& command_line,
256                                        const std::string& wrapper,
257                                        const size_t retries_left) {
258   CommandLine new_command_line(command_line.GetProgram());
259   CommandLine::SwitchMap switches = command_line.GetSwitches();
260 
261   // Handled by the launcher process.
262   switches.erase(kGTestRepeatFlag);
263   switches.erase(kIsolatedScriptTestRepeatFlag);
264 
265   // Don't try to write the final XML report in child processes.
266   switches.erase(kGTestOutputFlag);
267 
268   if (switches.find(switches::kTestLauncherRetriesLeft) == switches.end()) {
269     switches[switches::kTestLauncherRetriesLeft] =
270 #if defined(OS_WIN)
271         base::NumberToWString(
272 #else
273         base::NumberToString(
274 #endif
275             retries_left);
276   }
277 
278   for (CommandLine::SwitchMap::const_iterator iter = switches.begin();
279        iter != switches.end(); ++iter) {
280     new_command_line.AppendSwitchNative((*iter).first, (*iter).second);
281   }
282 
283   // Prepend wrapper after last CommandLine quasi-copy operation. CommandLine
284   // does not really support removing switches well, and trying to do that
285   // on a CommandLine with a wrapper is known to break.
286   // TODO(phajdan.jr): Give it a try to support CommandLine removing switches.
287 #if defined(OS_WIN)
288   new_command_line.PrependWrapper(UTF8ToWide(wrapper));
289 #else
290   new_command_line.PrependWrapper(wrapper);
291 #endif
292 
293   return new_command_line;
294 }
295 
296 // Launches a child process using |command_line|. If the child process is still
297 // running after |timeout|, it is terminated and |*was_timeout| is set to true.
298 // Returns exit code of the process.
LaunchChildTestProcessWithOptions(const CommandLine & command_line,const LaunchOptions & options,int flags,TimeDelta timeout,TestLauncherDelegate * delegate,bool * was_timeout)299 int LaunchChildTestProcessWithOptions(const CommandLine& command_line,
300                                       const LaunchOptions& options,
301                                       int flags,
302                                       TimeDelta timeout,
303                                       TestLauncherDelegate* delegate,
304                                       bool* was_timeout) {
305   TimeTicks start_time(TimeTicks::Now());
306 
307 #if defined(OS_POSIX)
308   // Make sure an option we rely on is present - see LaunchChildGTestProcess.
309   DCHECK(options.new_process_group);
310 #endif
311 
312   LaunchOptions new_options(options);
313 
314 #if defined(OS_WIN)
315   DCHECK(!new_options.job_handle);
316 
317   win::ScopedHandle job_handle;
318   if (flags & TestLauncher::USE_JOB_OBJECTS) {
319     job_handle.Set(CreateJobObject(NULL, NULL));
320     if (!job_handle.IsValid()) {
321       LOG(ERROR) << "Could not create JobObject.";
322       return -1;
323     }
324 
325     DWORD job_flags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
326 
327     // Allow break-away from job since sandbox and few other places rely on it
328     // on Windows versions prior to Windows 8 (which supports nested jobs).
329     if (win::GetVersion() < win::Version::WIN8 &&
330         flags & TestLauncher::ALLOW_BREAKAWAY_FROM_JOB) {
331       job_flags |= JOB_OBJECT_LIMIT_BREAKAWAY_OK;
332     }
333 
334     if (!SetJobObjectLimitFlags(job_handle.Get(), job_flags)) {
335       LOG(ERROR) << "Could not SetJobObjectLimitFlags.";
336       return -1;
337     }
338 
339     new_options.job_handle = job_handle.Get();
340   }
341 #elif defined(OS_FUCHSIA)
342   DCHECK(!new_options.job_handle);
343 
344   // Set the clone policy, deliberately omitting FDIO_SPAWN_CLONE_NAMESPACE so
345   // that we can install a different /data.
346   new_options.spawn_flags = FDIO_SPAWN_CLONE_STDIO | FDIO_SPAWN_CLONE_JOB;
347 
348   const base::FilePath kDataPath(base::kPersistedDataDirectoryPath);
349 
350   // Clone all namespace entries from the current process, except /data, which
351   // is overridden below.
352   fdio_flat_namespace_t* flat_namespace = nullptr;
353   zx_status_t result = fdio_ns_export_root(&flat_namespace);
354   ZX_CHECK(ZX_OK == result, result) << "fdio_ns_export_root";
355   for (size_t i = 0; i < flat_namespace->count; ++i) {
356     base::FilePath path(flat_namespace->path[i]);
357     if (path == kDataPath) {
358       result = zx_handle_close(flat_namespace->handle[i]);
359       ZX_CHECK(ZX_OK == result, result) << "zx_handle_close";
360     } else {
361       new_options.paths_to_transfer.push_back(
362           {path, flat_namespace->handle[i]});
363     }
364   }
365   free(flat_namespace);
366 
367   zx::job job_handle;
368   result = zx::job::create(*GetDefaultJob(), 0, &job_handle);
369   ZX_CHECK(ZX_OK == result, result) << "zx_job_create";
370   new_options.job_handle = job_handle.get();
371 
372   // Give this test its own isolated /data directory by creating a new temporary
373   // subdirectory under data (/data/test-$PID) and binding that to /data on the
374   // child process.
375   CHECK(base::PathExists(kDataPath));
376 
377   // Create the test subdirectory with a name that is unique to the child test
378   // process (qualified by parent PID and an autoincrementing test process
379   // index).
380   static base::AtomicSequenceNumber child_launch_index;
381   base::FilePath nested_data_path = kDataPath.AppendASCII(
382       base::StringPrintf("test-%zu-%d", base::Process::Current().Pid(),
383                          child_launch_index.GetNext()));
384   CHECK(!base::DirectoryExists(nested_data_path));
385   CHECK(base::CreateDirectory(nested_data_path));
386   DCHECK(base::DirectoryExists(nested_data_path));
387 
388   // Bind the new test subdirectory to /data in the child process' namespace.
389   new_options.paths_to_transfer.push_back(
390       {kDataPath,
391        base::OpenDirectoryHandle(nested_data_path).TakeChannel().release()});
392 #endif  // defined(OS_FUCHSIA)
393 
394 #if defined(OS_LINUX) || defined(OS_CHROMEOS)
395   // To prevent accidental privilege sharing to an untrusted child, processes
396   // are started with PR_SET_NO_NEW_PRIVS. Do not set that here, since this
397   // new child will be privileged and trusted.
398   new_options.allow_new_privs = true;
399 #endif
400 
401   Process process;
402 
403   {
404     // Note how we grab the lock before the process possibly gets created.
405     // This ensures that when the lock is held, ALL the processes are registered
406     // in the set.
407     AutoLock lock(*GetLiveProcessesLock());
408 
409 #if defined(OS_WIN)
410     // Allow the handle used to capture stdio and stdout to be inherited by the
411     // child. Note that this is done under GetLiveProcessesLock() to ensure that
412     // only the desired child receives the handle.
413     if (new_options.stdout_handle) {
414       ::SetHandleInformation(new_options.stdout_handle, HANDLE_FLAG_INHERIT,
415                              HANDLE_FLAG_INHERIT);
416     }
417 #endif
418 
419     process = LaunchProcess(command_line, new_options);
420 
421 #if defined(OS_WIN)
422     // Revoke inheritance so that the handle isn't leaked into other children.
423     // Note that this is done under GetLiveProcessesLock() to ensure that only
424     // the desired child receives the handle.
425     if (new_options.stdout_handle)
426       ::SetHandleInformation(new_options.stdout_handle, HANDLE_FLAG_INHERIT, 0);
427 #endif
428 
429     if (!process.IsValid())
430       return -1;
431 
432     // TODO(rvargas) crbug.com/417532: Don't store process handles.
433     GetLiveProcesses()->insert(std::make_pair(process.Handle(), command_line));
434   }
435 
436   int exit_code = 0;
437   bool did_exit = false;
438 
439   {
440     base::ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
441     did_exit = process.WaitForExitWithTimeout(timeout, &exit_code);
442   }
443 
444   if (!did_exit) {
445     if (delegate)
446       delegate->OnTestTimedOut(command_line);
447 
448     *was_timeout = true;
449     exit_code = -1;  // Set a non-zero exit code to signal a failure.
450 
451     {
452       base::ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
453       // Ensure that the process terminates.
454       process.Terminate(-1, true);
455     }
456   }
457 
458 #if defined(OS_FUCHSIA)
459   zx_status_t wait_status = WaitForJobExit(job_handle);
460   if (wait_status != ZX_OK) {
461     LOG(ERROR) << "Batch leaked jobs or processes.";
462     exit_code = -1;
463   }
464 #endif  // defined(OS_FUCHSIA)
465 
466   {
467     // Note how we grab the log before issuing a possibly broad process kill.
468     // Other code parts that grab the log kill processes, so avoid trying
469     // to do that twice and trigger all kinds of log messages.
470     AutoLock lock(*GetLiveProcessesLock());
471 
472 #if defined(OS_FUCHSIA)
473     zx_status_t status = job_handle.kill();
474     ZX_CHECK(status == ZX_OK, status);
475 
476     // Cleanup the data directory.
477     CHECK(DeletePathRecursively(nested_data_path));
478 #elif defined(OS_POSIX)
479     // It is not possible to waitpid() on any leaked sub-processes of the test
480     // batch process, since those are not direct children of this process.
481     // kill()ing the process-group will return a result indicating whether the
482     // group was found (i.e. processes were still running in it) or not (i.e.
483     // sub-processes had exited already). Unfortunately many tests (e.g. browser
484     // tests) have processes exit asynchronously, so checking the kill() result
485     // will report false failures.
486     // Unconditionally kill the process group, regardless of the batch exit-code
487     // until a better solution is available.
488     kill(-1 * process.Handle(), SIGKILL);
489 #endif  // defined(OS_POSIX)
490 
491     GetLiveProcesses()->erase(process.Handle());
492   }
493 
494   GetTestLauncherTracer()->RecordProcessExecution(
495       start_time, TimeTicks::Now() - start_time);
496 
497   return exit_code;
498 }
499 
500 struct ChildProcessResults {
501   // Total time for DoLaunchChildTest Process to execute.
502   TimeDelta elapsed_time;
503   // If stdio is redirected, pass output file content.
504   std::string output_file_contents;
505   // True if child process timed out.
506   bool was_timeout = false;
507   // Exit code of child process.
508   int exit_code;
509 };
510 
511 // Returns the path to a temporary directory within |task_temp_dir| for the
512 // child process of index |child_index|, or an empty FilePath if per-child temp
513 // dirs are not supported.
CreateChildTempDirIfSupported(const FilePath & task_temp_dir,int child_index)514 FilePath CreateChildTempDirIfSupported(const FilePath& task_temp_dir,
515                                        int child_index) {
516   if (!TestLauncher::SupportsPerChildTempDirs())
517     return FilePath();
518   FilePath child_temp = task_temp_dir.AppendASCII(NumberToString(child_index));
519   CHECK(CreateDirectoryAndGetError(child_temp, nullptr));
520   return child_temp;
521 }
522 
523 // Adds the platform-specific variable setting |temp_dir| as a process's
524 // temporary directory to |environment|.
SetTemporaryDirectory(const FilePath & temp_dir,EnvironmentMap * environment)525 void SetTemporaryDirectory(const FilePath& temp_dir,
526                            EnvironmentMap* environment) {
527 #if defined(OS_WIN)
528   environment->emplace(L"TMP", temp_dir.value());
529 #elif defined(OS_APPLE)
530   environment->emplace("MAC_CHROMIUM_TMPDIR", temp_dir.value());
531 #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
532   environment->emplace("TMPDIR", temp_dir.value());
533 #endif
534 }
535 
536 // This launches the child test process, waits for it to complete,
537 // and returns child process results.
DoLaunchChildTestProcess(const CommandLine & command_line,const FilePath & process_temp_dir,TimeDelta timeout,const TestLauncher::LaunchOptions & test_launch_options,bool redirect_stdio,TestLauncherDelegate * delegate)538 ChildProcessResults DoLaunchChildTestProcess(
539     const CommandLine& command_line,
540     const FilePath& process_temp_dir,
541     TimeDelta timeout,
542     const TestLauncher::LaunchOptions& test_launch_options,
543     bool redirect_stdio,
544     TestLauncherDelegate* delegate) {
545   TimeTicks start_time = TimeTicks::Now();
546 
547   ChildProcessResults result;
548 
549   ScopedFILE output_file;
550   FilePath output_filename;
551   if (redirect_stdio) {
552     output_file = CreateAndOpenTemporaryStream(&output_filename);
553     CHECK(output_file);
554 #if defined(OS_WIN)
555     // Paint the file so that it will be deleted when all handles are closed.
556     if (!FILEToFile(output_file.get()).DeleteOnClose(true)) {
557       PLOG(WARNING) << "Failed to mark " << output_filename.AsUTF8Unsafe()
558                     << " for deletion on close";
559     }
560 #endif
561   }
562 
563   LaunchOptions options;
564 
565   // Tell the child process to use its designated temporary directory.
566   if (!process_temp_dir.empty())
567     SetTemporaryDirectory(process_temp_dir, &options.environment);
568 #if defined(OS_WIN)
569 
570   options.inherit_mode = test_launch_options.inherit_mode;
571   options.handles_to_inherit = test_launch_options.handles_to_inherit;
572   if (redirect_stdio) {
573     HANDLE handle =
574         reinterpret_cast<HANDLE>(_get_osfhandle(_fileno(output_file.get())));
575     CHECK_NE(INVALID_HANDLE_VALUE, handle);
576     options.stdin_handle = INVALID_HANDLE_VALUE;
577     options.stdout_handle = handle;
578     options.stderr_handle = handle;
579     // See LaunchOptions.stdout_handle comments for why this compares against
580     // FILE_TYPE_CHAR.
581     if (options.inherit_mode == base::LaunchOptions::Inherit::kSpecific &&
582         GetFileType(handle) != FILE_TYPE_CHAR) {
583       options.handles_to_inherit.push_back(handle);
584     }
585   }
586 
587 #else  // if !defined(OS_WIN)
588 
589   options.fds_to_remap = test_launch_options.fds_to_remap;
590   if (redirect_stdio) {
591     int output_file_fd = fileno(output_file.get());
592     CHECK_LE(0, output_file_fd);
593     options.fds_to_remap.push_back(
594         std::make_pair(output_file_fd, STDOUT_FILENO));
595     options.fds_to_remap.push_back(
596         std::make_pair(output_file_fd, STDERR_FILENO));
597   }
598 
599 #if !defined(OS_FUCHSIA)
600   options.new_process_group = true;
601 #endif
602 #if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_BSD)
603   options.kill_on_parent_death = true;
604 #endif
605 
606 #endif  // !defined(OS_WIN)
607 
608   result.exit_code = LaunchChildTestProcessWithOptions(
609       command_line, options, test_launch_options.flags, timeout, delegate,
610       &result.was_timeout);
611 
612   if (redirect_stdio) {
613     fflush(output_file.get());
614 
615     // Reading the file can sometimes fail when the process was killed midflight
616     // (e.g. on test suite timeout): https://crbug.com/826408. Attempt to read
617     // the output file anyways, but do not crash on failure in this case.
618     CHECK(ReadStreamToString(output_file.get(), &result.output_file_contents) ||
619           result.exit_code != 0);
620 
621     output_file.reset();
622 #if !defined(OS_WIN)
623     // On Windows, the reset() above is enough to delete the file since it was
624     // painted for such after being opened. Lesser platforms require an explicit
625     // delete now.
626     if (!DeleteFile(output_filename))
627       LOG(WARNING) << "Failed to delete " << output_filename.AsUTF8Unsafe();
628 #endif
629   }
630   result.elapsed_time = TimeTicks::Now() - start_time;
631   return result;
632 }
633 
ExtractTestsFromFilter(const std::string & filter,bool double_colon_supported)634 std::vector<std::string> ExtractTestsFromFilter(const std::string& filter,
635                                                 bool double_colon_supported) {
636   std::vector<std::string> tests;
637   if (double_colon_supported) {
638     tests =
639         SplitString(filter, "::", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
640   }
641   if (tests.size() <= 1) {
642     tests =
643         SplitString(filter, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
644   }
645   return tests;
646 }
647 
648 // A test runner object to run tests across a number of sequence runners,
649 // and control running pre tests in sequence.
650 class TestRunner {
651  public:
TestRunner(TestLauncher * launcher,size_t runner_count=1u,size_t batch_size=1u)652   explicit TestRunner(TestLauncher* launcher,
653                       size_t runner_count = 1u,
654                       size_t batch_size = 1u)
655       : launcher_(launcher),
656         runner_count_(runner_count),
657         batch_size_(batch_size) {}
658 
659   // Sets |test_names| to be run, with |batch_size| tests per process.
660   // Posts LaunchNextTask |runner_count| number of times, each with a separate
661   // task runner.
662   void Run(const std::vector<std::string>& test_names);
663 
664  private:
665   // Called to check if the next batch has to run on the same
666   // sequence task runner and using the same temporary directory.
ShouldReuseStateFromLastBatch(const std::vector<std::string> & test_names)667   static bool ShouldReuseStateFromLastBatch(
668       const std::vector<std::string>& test_names) {
669     return test_names.size() == 1u &&
670            test_names.front().find(kPreTestPrefix) != std::string::npos;
671   }
672 
673   // Launches the next child process on |task_runner| and clears
674   // |last_task_temp_dir| from the previous task.
675   void LaunchNextTask(scoped_refptr<TaskRunner> task_runner,
676                       const FilePath& last_task_temp_dir);
677 
678   // Forwards |last_task_temp_dir| and launches the next task on main thread.
679   // The method is called on |task_runner|.
ClearAndLaunchNext(scoped_refptr<TaskRunner> main_thread_runner,scoped_refptr<TaskRunner> task_runner,const FilePath & last_task_temp_dir)680   void ClearAndLaunchNext(scoped_refptr<TaskRunner> main_thread_runner,
681                           scoped_refptr<TaskRunner> task_runner,
682                           const FilePath& last_task_temp_dir) {
683     main_thread_runner->PostTask(
684         FROM_HERE,
685         BindOnce(&TestRunner::LaunchNextTask, weak_ptr_factory_.GetWeakPtr(),
686                  task_runner, last_task_temp_dir));
687   }
688 
689   ThreadChecker thread_checker_;
690 
691   std::vector<std::string> tests_to_run_;
692   TestLauncher* const launcher_;
693   std::vector<scoped_refptr<TaskRunner>> task_runners_;
694   // Number of sequenced task runners to use.
695   const size_t runner_count_;
696   // Number of TaskRunners that have finished.
697   size_t runners_done_ = 0;
698   // Number of tests per process, 0 is special case for all tests.
699   const size_t batch_size_;
700   RunLoop run_loop_;
701 
702   base::WeakPtrFactory<TestRunner> weak_ptr_factory_{this};
703 };
704 
Run(const std::vector<std::string> & test_names)705 void TestRunner::Run(const std::vector<std::string>& test_names) {
706   DCHECK(thread_checker_.CalledOnValidThread());
707   // No sequence runners, fail immediately.
708   CHECK_GT(runner_count_, 0u);
709   tests_to_run_ = test_names;
710   // Reverse test order to avoid coping the whole vector when removing tests.
711   ranges::reverse(tests_to_run_);
712   runners_done_ = 0;
713   task_runners_.clear();
714   for (size_t i = 0; i < runner_count_; i++) {
715     task_runners_.push_back(ThreadPool::CreateSequencedTaskRunner(
716         {MayBlock(), TaskShutdownBehavior::BLOCK_SHUTDOWN}));
717     ThreadTaskRunnerHandle::Get()->PostTask(
718         FROM_HERE,
719         BindOnce(&TestRunner::LaunchNextTask, weak_ptr_factory_.GetWeakPtr(),
720                  task_runners_.back(), FilePath()));
721   }
722   run_loop_.Run();
723 }
724 
LaunchNextTask(scoped_refptr<TaskRunner> task_runner,const FilePath & last_task_temp_dir)725 void TestRunner::LaunchNextTask(scoped_refptr<TaskRunner> task_runner,
726                                 const FilePath& last_task_temp_dir) {
727   DCHECK(thread_checker_.CalledOnValidThread());
728   // delete previous temporary directory
729   if (!last_task_temp_dir.empty() &&
730       !DeletePathRecursively(last_task_temp_dir)) {
731     // This needs to be non-fatal at least for Windows.
732     LOG(WARNING) << "Failed to delete " << last_task_temp_dir.AsUTF8Unsafe();
733   }
734 
735   // No more tests to run, finish sequence.
736   if (tests_to_run_.empty()) {
737     runners_done_++;
738     // All sequence runners are done, quit the loop.
739     if (runners_done_ == runner_count_)
740       run_loop_.QuitWhenIdle();
741     return;
742   }
743 
744   // Create a temporary directory for this task. This directory will hold the
745   // flags and results files for the child processes as well as their User Data
746   // dir, where appropriate. For platforms that support per-child temp dirs,
747   // this directory will also contain one subdirectory per child for that
748   // child's process-wide temp dir.
749   base::FilePath task_temp_dir;
750   CHECK(CreateNewTempDirectory(FilePath::StringType(), &task_temp_dir));
751   bool post_to_current_runner = true;
752   size_t batch_size = (batch_size_ == 0) ? tests_to_run_.size() : batch_size_;
753 
754   int child_index = 0;
755   while (post_to_current_runner && !tests_to_run_.empty()) {
756     batch_size = std::min(batch_size, tests_to_run_.size());
757     std::vector<std::string> batch(tests_to_run_.rbegin(),
758                                    tests_to_run_.rbegin() + batch_size);
759     tests_to_run_.erase(tests_to_run_.end() - batch_size, tests_to_run_.end());
760     task_runner->PostTask(
761         FROM_HERE,
762         BindOnce(&TestLauncher::LaunchChildGTestProcess, Unretained(launcher_),
763                  ThreadTaskRunnerHandle::Get(), batch, task_temp_dir,
764                  CreateChildTempDirIfSupported(task_temp_dir, child_index++)));
765     post_to_current_runner = ShouldReuseStateFromLastBatch(batch);
766   }
767   task_runner->PostTask(
768       FROM_HERE,
769       BindOnce(&TestRunner::ClearAndLaunchNext, Unretained(this),
770                ThreadTaskRunnerHandle::Get(), task_runner, task_temp_dir));
771 }
772 
773 // Returns the number of files and directories in |dir|, or 0 if |dir| is empty.
CountItemsInDirectory(const FilePath & dir)774 int CountItemsInDirectory(const FilePath& dir) {
775   if (dir.empty())
776     return 0;
777   int items = 0;
778   FileEnumerator file_enumerator(
779       dir, /*recursive=*/false,
780       FileEnumerator::FILES | FileEnumerator::DIRECTORIES);
781   for (FilePath name = file_enumerator.Next(); !name.empty();
782        name = file_enumerator.Next()) {
783     ++items;
784   }
785   return items;
786 }
787 
788 }  // namespace
789 
790 const char kGTestBreakOnFailure[] = "gtest_break_on_failure";
791 const char kGTestFilterFlag[] = "gtest_filter";
792 const char kGTestFlagfileFlag[] = "gtest_flagfile";
793 const char kGTestHelpFlag[]   = "gtest_help";
794 const char kGTestListTestsFlag[] = "gtest_list_tests";
795 const char kGTestRepeatFlag[] = "gtest_repeat";
796 const char kGTestRunDisabledTestsFlag[] = "gtest_also_run_disabled_tests";
797 const char kGTestOutputFlag[] = "gtest_output";
798 const char kGTestShuffleFlag[] = "gtest_shuffle";
799 const char kGTestRandomSeedFlag[] = "gtest_random_seed";
800 const char kIsolatedScriptRunDisabledTestsFlag[] =
801     "isolated-script-test-also-run-disabled-tests";
802 const char kIsolatedScriptTestFilterFlag[] = "isolated-script-test-filter";
803 const char kIsolatedScriptTestRepeatFlag[] = "isolated-script-test-repeat";
804 
805 class TestLauncher::TestInfo {
806  public:
807   TestInfo() = default;
808   TestInfo(const TestInfo& other) = default;
809   TestInfo(const TestIdentifier& test_id);
810   ~TestInfo() = default;
811 
812   // Returns test name excluding DISABLE_ prefix.
813   std::string GetDisabledStrippedName() const;
814 
815   // Returns full test name.
816   std::string GetFullName() const;
817 
818   // Returns test name with PRE_ prefix added, excluding DISABLE_ prefix.
819   std::string GetPreName() const;
820 
821   // Returns test name excluding DISABLED_ and PRE_ prefixes.
822   std::string GetPrefixStrippedName() const;
823 
test_case_name() const824   const std::string& test_case_name() const { return test_case_name_; }
test_name() const825   const std::string& test_name() const { return test_name_; }
file() const826   const std::string& file() const { return file_; }
line() const827   int line() const { return line_; }
disabled() const828   bool disabled() const { return disabled_; }
pre_test() const829   bool pre_test() const { return pre_test_; }
830 
831  private:
832   std::string test_case_name_;
833   std::string test_name_;
834   std::string file_;
835   int line_;
836   bool disabled_;
837   bool pre_test_;
838 };
839 
TestInfo(const TestIdentifier & test_id)840 TestLauncher::TestInfo::TestInfo(const TestIdentifier& test_id)
841     : test_case_name_(test_id.test_case_name),
842       test_name_(test_id.test_name),
843       file_(test_id.file),
844       line_(test_id.line),
845       disabled_(false),
846       pre_test_(false) {
847   disabled_ = GetFullName().find(kDisabledTestPrefix) != std::string::npos;
848   pre_test_ = test_name_.find(kPreTestPrefix) != std::string::npos;
849 }
850 
GetDisabledStrippedName() const851 std::string TestLauncher::TestInfo::GetDisabledStrippedName() const {
852   std::string test_name = GetFullName();
853   ReplaceSubstringsAfterOffset(&test_name, 0, kDisabledTestPrefix,
854                                std::string());
855   return test_name;
856 }
857 
GetFullName() const858 std::string TestLauncher::TestInfo::GetFullName() const {
859   return FormatFullTestName(test_case_name_, test_name_);
860 }
861 
GetPreName() const862 std::string TestLauncher::TestInfo::GetPreName() const {
863   std::string name = test_name_;
864   ReplaceSubstringsAfterOffset(&name, 0, kDisabledTestPrefix, std::string());
865   std::string case_name = test_case_name_;
866   ReplaceSubstringsAfterOffset(&case_name, 0, kDisabledTestPrefix,
867                                std::string());
868   return FormatFullTestName(case_name, kPreTestPrefix + name);
869 }
870 
GetPrefixStrippedName() const871 std::string TestLauncher::TestInfo::GetPrefixStrippedName() const {
872   std::string test_name = GetDisabledStrippedName();
873   ReplaceSubstringsAfterOffset(&test_name, 0, kPreTestPrefix, std::string());
874   return test_name;
875 }
876 
877 TestLauncherDelegate::~TestLauncherDelegate() = default;
878 
ShouldRunTest(const TestIdentifier & test)879 bool TestLauncherDelegate::ShouldRunTest(const TestIdentifier& test) {
880   return true;
881 }
882 
883 TestLauncher::LaunchOptions::LaunchOptions() = default;
884 TestLauncher::LaunchOptions::LaunchOptions(const LaunchOptions& other) =
885     default;
886 TestLauncher::LaunchOptions::~LaunchOptions() = default;
887 
TestLauncher(TestLauncherDelegate * launcher_delegate,size_t parallel_jobs,size_t retry_limit)888 TestLauncher::TestLauncher(TestLauncherDelegate* launcher_delegate,
889                            size_t parallel_jobs,
890                            size_t retry_limit)
891     : launcher_delegate_(launcher_delegate),
892       total_shards_(1),
893       shard_index_(0),
894       cycles_(1),
895       broken_threshold_(0),
896       test_started_count_(0),
897       test_finished_count_(0),
898       test_success_count_(0),
899       test_broken_count_(0),
900       retries_left_(0),
901       retry_limit_(retry_limit),
902       force_run_broken_tests_(false),
903       watchdog_timer_(FROM_HERE,
904                       kOutputTimeout,
905                       this,
906                       &TestLauncher::OnOutputTimeout),
907       parallel_jobs_(parallel_jobs),
908       print_test_stdio_(AUTO) {}
909 
~TestLauncher()910 TestLauncher::~TestLauncher() {
911   if (base::ThreadPoolInstance::Get()) {
912     base::ThreadPoolInstance::Get()->Shutdown();
913   }
914 }
915 
Run(CommandLine * command_line)916 bool TestLauncher::Run(CommandLine* command_line) {
917   if (!Init((command_line == nullptr) ? CommandLine::ForCurrentProcess()
918                                       : command_line))
919     return false;
920 
921 
922 #if defined(OS_POSIX)
923   CHECK_EQ(0, pipe(g_shutdown_pipe));
924 
925   struct sigaction action;
926   memset(&action, 0, sizeof(action));
927   sigemptyset(&action.sa_mask);
928   action.sa_handler = &ShutdownPipeSignalHandler;
929 
930   CHECK_EQ(0, sigaction(SIGINT, &action, nullptr));
931   CHECK_EQ(0, sigaction(SIGQUIT, &action, nullptr));
932   CHECK_EQ(0, sigaction(SIGTERM, &action, nullptr));
933 
934   auto controller = base::FileDescriptorWatcher::WatchReadable(
935       g_shutdown_pipe[0],
936       base::BindRepeating(&TestLauncher::OnShutdownPipeReadable,
937                           Unretained(this)));
938 #endif  // defined(OS_POSIX)
939 
940   // Start the watchdog timer.
941   watchdog_timer_.Reset();
942 
943   // Indicate a test did not succeed.
944   bool test_failed = false;
945   int iterations = cycles_;
946   if (cycles_ > 1 && !stop_on_failure_) {
947     // If we don't stop on failure, execute all the repeats in all iteration,
948     // which allows us to parallelize the execution.
949     iterations = 1;
950     repeats_per_iteration_ = cycles_;
951   }
952   // Set to false if any iteration fails.
953   bool run_result = true;
954 
955   while ((iterations > 0 || iterations == -1) &&
956          !(stop_on_failure_ && test_failed)) {
957     OnTestIterationStart();
958 
959     RunTests();
960     bool retry_result = RunRetryTests();
961     // Signal failure, but continue to run all requested test iterations.
962     // With the summary of all iterations at the end this is a good default.
963     run_result = run_result && retry_result;
964 
965     if (retry_result) {
966       fprintf(stdout, "SUCCESS: all tests passed.\n");
967       fflush(stdout);
968     }
969 
970     test_failed = test_success_count_ != test_finished_count_;
971     OnTestIterationFinished();
972     // Special value "-1" means "repeat indefinitely".
973     iterations = (iterations == -1) ? iterations : iterations - 1;
974   }
975 
976   if (cycles_ != 1)
977     results_tracker_.PrintSummaryOfAllIterations();
978 
979   MaybeSaveSummaryAsJSON(std::vector<std::string>());
980 
981   return run_result;
982 }
983 
LaunchChildGTestProcess(scoped_refptr<TaskRunner> task_runner,const std::vector<std::string> & test_names,const FilePath & task_temp_dir,const FilePath & child_temp_dir)984 void TestLauncher::LaunchChildGTestProcess(
985     scoped_refptr<TaskRunner> task_runner,
986     const std::vector<std::string>& test_names,
987     const FilePath& task_temp_dir,
988     const FilePath& child_temp_dir) {
989   FilePath result_file;
990   CommandLine cmd_line = launcher_delegate_->GetCommandLine(
991       test_names, task_temp_dir, &result_file);
992 
993   // Record the exact command line used to launch the child.
994   CommandLine new_command_line(PrepareCommandLineForGTest(
995       cmd_line, launcher_delegate_->GetWrapper(), retries_left_));
996   LaunchOptions options;
997   options.flags = launcher_delegate_->GetLaunchOptions();
998 
999   ChildProcessResults process_results = DoLaunchChildTestProcess(
1000       new_command_line, child_temp_dir,
1001       launcher_delegate_->GetTimeout() * test_names.size(), options,
1002       redirect_stdio_, launcher_delegate_);
1003 
1004   // Invoke ProcessTestResults on the original thread, not
1005   // on a worker pool thread.
1006   task_runner->PostTask(
1007       FROM_HERE,
1008       BindOnce(&TestLauncher::ProcessTestResults, Unretained(this), test_names,
1009                result_file, process_results.output_file_contents,
1010                process_results.elapsed_time, process_results.exit_code,
1011                process_results.was_timeout,
1012                CountItemsInDirectory(child_temp_dir)));
1013 }
1014 
1015 // Determines which result status will be assigned for missing test results.
MissingResultStatus(size_t tests_to_run_count,bool was_timeout,bool exit_code)1016 TestResult::Status MissingResultStatus(size_t tests_to_run_count,
1017                                        bool was_timeout,
1018                                        bool exit_code) {
1019   // There is more than one test, cannot assess status.
1020   if (tests_to_run_count > 1u)
1021     return TestResult::TEST_SKIPPED;
1022 
1023   // There is only one test and no results.
1024   // Try to determine status by timeout or exit code.
1025   if (was_timeout)
1026     return TestResult::TEST_TIMEOUT;
1027   if (exit_code != 0)
1028     return TestResult::TEST_FAILURE;
1029 
1030   // It's strange case when test executed successfully,
1031   // but we failed to read machine-readable report for it.
1032   return TestResult::TEST_UNKNOWN;
1033 }
1034 
1035 // Returns interpreted test results.
ProcessTestResults(const std::vector<std::string> & test_names,const FilePath & result_file,const std::string & output,TimeDelta elapsed_time,int exit_code,bool was_timeout,int leaked_items)1036 void TestLauncher::ProcessTestResults(
1037     const std::vector<std::string>& test_names,
1038     const FilePath& result_file,
1039     const std::string& output,
1040     TimeDelta elapsed_time,
1041     int exit_code,
1042     bool was_timeout,
1043     int leaked_items) {
1044   std::vector<TestResult> test_results;
1045   bool crashed = false;
1046   bool have_test_results =
1047       ProcessGTestOutput(result_file, &test_results, &crashed);
1048 
1049   if (!have_test_results) {
1050     // We do not have reliable details about test results (parsing test
1051     // stdout is known to be unreliable).
1052     LOG(ERROR) << "Failed to get out-of-band test success data, "
1053                   "dumping full stdio below:\n"
1054                << output << "\n";
1055     // This is odd, but sometimes ProcessGtestOutput returns
1056     // false, but TestResults is not empty.
1057     test_results.clear();
1058   }
1059 
1060   TestResult::Status missing_result_status =
1061       MissingResultStatus(test_names.size(), was_timeout, exit_code);
1062 
1063   // TODO(phajdan.jr): Check for duplicates and mismatches between
1064   // the results we got from XML file and tests we intended to run.
1065   std::map<std::string, TestResult> results_map;
1066   for (const auto& i : test_results)
1067     results_map[i.full_name] = i;
1068 
1069   // Results to be reported back to the test launcher.
1070   std::vector<TestResult> final_results;
1071 
1072   for (const auto& i : test_names) {
1073     if (Contains(results_map, i)) {
1074       TestResult test_result = results_map[i];
1075       // Fix up the test status: we forcibly kill the child process
1076       // after the timeout, so from XML results it looks just like
1077       // a crash.
1078       if ((was_timeout && test_result.status == TestResult::TEST_CRASH) ||
1079           // If we run multiple tests in a batch with a timeout applied
1080           // to the entire batch. It is possible that with other tests
1081           // running quickly some tests take longer than the per-test timeout.
1082           // For consistent handling of tests independent of order and other
1083           // factors, mark them as timing out.
1084           test_result.elapsed_time > launcher_delegate_->GetTimeout()) {
1085         test_result.status = TestResult::TEST_TIMEOUT;
1086       }
1087       final_results.push_back(test_result);
1088     } else {
1089       // TODO(phajdan.jr): Explicitly pass the info that the test didn't
1090       // run for a mysterious reason.
1091       LOG(ERROR) << "no test result for " << i;
1092       TestResult test_result;
1093       test_result.full_name = i;
1094       test_result.status = missing_result_status;
1095       final_results.push_back(test_result);
1096     }
1097   }
1098   // TODO(phajdan.jr): Handle the case where processing XML output
1099   // indicates a crash but none of the test results is marked as crashing.
1100 
1101   bool has_non_success_test = false;
1102   for (const auto& i : final_results) {
1103     if (i.status != TestResult::TEST_SUCCESS) {
1104       has_non_success_test = true;
1105       break;
1106     }
1107   }
1108 
1109   if (!has_non_success_test && exit_code != 0) {
1110     // This is a bit surprising case: all tests are marked as successful,
1111     // but the exit code was not zero. This can happen e.g. under memory
1112     // tools that report leaks this way. Mark all tests as a failure on exit,
1113     // and for more precise info they'd need to be retried serially.
1114     for (auto& i : final_results)
1115       i.status = TestResult::TEST_FAILURE_ON_EXIT;
1116   }
1117 
1118   for (auto& i : final_results) {
1119     // Fix the output snippet after possible changes to the test result.
1120     i.output_snippet = GetTestOutputSnippet(i, output);
1121   }
1122 
1123   if (leaked_items)
1124     results_tracker_.AddLeakedItems(leaked_items, test_names);
1125 
1126   launcher_delegate_->ProcessTestResults(final_results, elapsed_time);
1127 
1128   for (const auto& result : final_results)
1129     OnTestFinished(result);
1130 }
1131 
OnTestFinished(const TestResult & original_result)1132 void TestLauncher::OnTestFinished(const TestResult& original_result) {
1133   ++test_finished_count_;
1134 
1135   TestResult result(original_result);
1136 
1137   if (result.output_snippet.length() > kOutputSnippetBytesLimit) {
1138     if (result.status == TestResult::TEST_SUCCESS)
1139       result.status = TestResult::TEST_EXCESSIVE_OUTPUT;
1140 
1141     // Keep the top and bottom of the log and truncate the middle part.
1142     result.output_snippet =
1143         result.output_snippet.substr(0, kOutputSnippetBytesLimit / 2) + "\n" +
1144         StringPrintf("<truncated (%zu bytes)>\n",
1145                      result.output_snippet.length()) +
1146         result.output_snippet.substr(result.output_snippet.length() -
1147                                      kOutputSnippetBytesLimit / 2) +
1148         "\n";
1149   }
1150 
1151   bool print_snippet = false;
1152   if (print_test_stdio_ == AUTO) {
1153     print_snippet = (result.status != TestResult::TEST_SUCCESS);
1154   } else if (print_test_stdio_ == ALWAYS) {
1155     print_snippet = true;
1156   } else if (print_test_stdio_ == NEVER) {
1157     print_snippet = false;
1158   }
1159   if (print_snippet) {
1160     std::vector<base::StringPiece> snippet_lines =
1161         SplitStringPiece(result.output_snippet, "\n", base::KEEP_WHITESPACE,
1162                          base::SPLIT_WANT_ALL);
1163     if (snippet_lines.size() > kOutputSnippetLinesLimit) {
1164       size_t truncated_size = snippet_lines.size() - kOutputSnippetLinesLimit;
1165       snippet_lines.erase(
1166           snippet_lines.begin(),
1167           snippet_lines.begin() + truncated_size);
1168       snippet_lines.insert(snippet_lines.begin(), "<truncated>");
1169     }
1170     fprintf(stdout, "%s", base::JoinString(snippet_lines, "\n").c_str());
1171     fflush(stdout);
1172   }
1173 
1174   if (result.status == TestResult::TEST_SUCCESS) {
1175     ++test_success_count_;
1176   } else {
1177     // Records prefix stripped name to run all dependent tests.
1178     std::string test_name(result.full_name);
1179     ReplaceSubstringsAfterOffset(&test_name, 0, kPreTestPrefix, std::string());
1180     ReplaceSubstringsAfterOffset(&test_name, 0, kDisabledTestPrefix,
1181                                  std::string());
1182     tests_to_retry_.insert(test_name);
1183   }
1184 
1185   // There are no results for this tests,
1186   // most likley due to another test failing in the same batch.
1187   if (result.status != TestResult::TEST_SKIPPED)
1188     results_tracker_.AddTestResult(result);
1189 
1190   // TODO(phajdan.jr): Align counter (padding).
1191   std::string status_line(StringPrintf("[%zu/%zu] %s ", test_finished_count_,
1192                                        test_started_count_,
1193                                        result.full_name.c_str()));
1194   if (result.completed()) {
1195     status_line.append(StringPrintf("(%" PRId64 " ms)",
1196                                     result.elapsed_time.InMilliseconds()));
1197   } else if (result.status == TestResult::TEST_TIMEOUT) {
1198     status_line.append("(TIMED OUT)");
1199   } else if (result.status == TestResult::TEST_CRASH) {
1200     status_line.append("(CRASHED)");
1201   } else if (result.status == TestResult::TEST_SKIPPED) {
1202     status_line.append("(SKIPPED)");
1203   } else if (result.status == TestResult::TEST_UNKNOWN) {
1204     status_line.append("(UNKNOWN)");
1205   } else {
1206     // Fail very loudly so it's not ignored.
1207     CHECK(false) << "Unhandled test result status: " << result.status;
1208   }
1209   fprintf(stdout, "%s\n", status_line.c_str());
1210   fflush(stdout);
1211 
1212   // We just printed a status line, reset the watchdog timer.
1213   watchdog_timer_.Reset();
1214 
1215   // Do not waste time on timeouts.
1216   if (result.status == TestResult::TEST_TIMEOUT) {
1217     test_broken_count_++;
1218   }
1219   if (!force_run_broken_tests_ && test_broken_count_ >= broken_threshold_) {
1220     fprintf(stdout, "Too many badly broken tests (%zu), exiting now.\n",
1221             test_broken_count_);
1222     fflush(stdout);
1223 
1224 #if defined(OS_POSIX)
1225     KillSpawnedTestProcesses();
1226 #endif  // defined(OS_POSIX)
1227 
1228     MaybeSaveSummaryAsJSON({"BROKEN_TEST_EARLY_EXIT"});
1229 
1230     exit(1);
1231   }
1232 }
1233 
1234 // Helper used to parse test filter files. Syntax is documented in
1235 // //testing/buildbot/filters/README.md .
LoadFilterFile(const FilePath & file_path,std::vector<std::string> * positive_filter,std::vector<std::string> * negative_filter)1236 bool LoadFilterFile(const FilePath& file_path,
1237                     std::vector<std::string>* positive_filter,
1238                     std::vector<std::string>* negative_filter) {
1239   std::string file_content;
1240   if (!ReadFileToString(file_path, &file_content)) {
1241     LOG(ERROR) << "Failed to read the filter file.";
1242     return false;
1243   }
1244 
1245   std::vector<std::string> filter_lines = SplitString(
1246       file_content, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
1247   int line_num = 0;
1248   for (const std::string& filter_line : filter_lines) {
1249     line_num++;
1250 
1251     size_t hash_pos = filter_line.find('#');
1252 
1253     // In case when # symbol is not in the beginning of the line and is not
1254     // proceeded with a space then it's likely that the comment was
1255     // unintentional.
1256     if (hash_pos != std::string::npos && hash_pos > 0 &&
1257         filter_line[hash_pos - 1] != ' ') {
1258       LOG(WARNING) << "Content of line " << line_num << " in " << file_path
1259                    << " after # is treated as a comment, " << filter_line;
1260     }
1261 
1262     // Strip comments and whitespace from each line.
1263     std::string trimmed_line =
1264         TrimWhitespaceASCII(filter_line.substr(0, hash_pos), TRIM_ALL)
1265             .as_string();
1266 
1267     if (trimmed_line.substr(0, 2) == "//") {
1268       LOG(ERROR) << "Line " << line_num << " in " << file_path
1269                  << " starts with //, use # for comments.";
1270       return false;
1271     }
1272 
1273     // Treat a line starting with '//' as a comment.
1274     if (trimmed_line.empty())
1275       continue;
1276 
1277     if (trimmed_line[0] == '-')
1278       negative_filter->push_back(trimmed_line.substr(1));
1279     else
1280       positive_filter->push_back(trimmed_line);
1281   }
1282 
1283   return true;
1284 }
1285 
Init(CommandLine * command_line)1286 bool TestLauncher::Init(CommandLine* command_line) {
1287   // Initialize sharding. Command line takes precedence over legacy environment
1288   // variables.
1289   if (command_line->HasSwitch(switches::kTestLauncherTotalShards) &&
1290       command_line->HasSwitch(switches::kTestLauncherShardIndex)) {
1291     if (!StringToInt(
1292             command_line->GetSwitchValueASCII(
1293                 switches::kTestLauncherTotalShards),
1294             &total_shards_)) {
1295       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherTotalShards;
1296       return false;
1297     }
1298     if (!StringToInt(
1299             command_line->GetSwitchValueASCII(
1300                 switches::kTestLauncherShardIndex),
1301             &shard_index_)) {
1302       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherShardIndex;
1303       return false;
1304     }
1305     fprintf(stdout,
1306             "Using sharding settings from command line. This is shard %d/%d\n",
1307             shard_index_, total_shards_);
1308     fflush(stdout);
1309   } else {
1310     if (!TakeInt32FromEnvironment(kTestTotalShards, &total_shards_))
1311       return false;
1312     if (!TakeInt32FromEnvironment(kTestShardIndex, &shard_index_))
1313       return false;
1314     fprintf(stdout,
1315             "Using sharding settings from environment. This is shard %d/%d\n",
1316             shard_index_, total_shards_);
1317     fflush(stdout);
1318   }
1319   if (shard_index_ < 0 ||
1320       total_shards_ < 0 ||
1321       shard_index_ >= total_shards_) {
1322     LOG(ERROR) << "Invalid sharding settings: we require 0 <= "
1323                << kTestShardIndex << " < " << kTestTotalShards
1324                << ", but you have " << kTestShardIndex << "=" << shard_index_
1325                << ", " << kTestTotalShards << "=" << total_shards_ << ".\n";
1326     return false;
1327   }
1328 
1329   // Make sure we don't pass any sharding-related environment to the child
1330   // processes. This test launcher implements the sharding completely.
1331   CHECK(UnsetEnvironmentVariableIfExists("GTEST_TOTAL_SHARDS"));
1332   CHECK(UnsetEnvironmentVariableIfExists("GTEST_SHARD_INDEX"));
1333 
1334   if (command_line->HasSwitch(kGTestRepeatFlag) &&
1335       !StringToInt(command_line->GetSwitchValueASCII(kGTestRepeatFlag),
1336                    &cycles_)) {
1337     LOG(ERROR) << "Invalid value for " << kGTestRepeatFlag;
1338     return false;
1339   }
1340   if (command_line->HasSwitch(kIsolatedScriptTestRepeatFlag) &&
1341       !StringToInt(
1342           command_line->GetSwitchValueASCII(kIsolatedScriptTestRepeatFlag),
1343           &cycles_)) {
1344     LOG(ERROR) << "Invalid value for " << kIsolatedScriptTestRepeatFlag;
1345     return false;
1346   }
1347 
1348   if (command_line->HasSwitch(switches::kTestLauncherRetryLimit)) {
1349     int retry_limit = -1;
1350     if (!StringToInt(command_line->GetSwitchValueASCII(
1351                          switches::kTestLauncherRetryLimit), &retry_limit) ||
1352         retry_limit < 0) {
1353       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherRetryLimit;
1354       return false;
1355     }
1356 
1357     retry_limit_ = retry_limit;
1358   } else if (command_line->HasSwitch(
1359                  switches::kIsolatedScriptTestLauncherRetryLimit)) {
1360     int retry_limit = -1;
1361     if (!StringToInt(command_line->GetSwitchValueASCII(
1362                          switches::kIsolatedScriptTestLauncherRetryLimit),
1363                      &retry_limit) ||
1364         retry_limit < 0) {
1365       LOG(ERROR) << "Invalid value for "
1366                  << switches::kIsolatedScriptTestLauncherRetryLimit;
1367       return false;
1368     }
1369 
1370     retry_limit_ = retry_limit;
1371   } else if (command_line->HasSwitch(kGTestRepeatFlag) ||
1372              command_line->HasSwitch(kGTestBreakOnFailure)) {
1373     // If we are repeating tests or waiting for the first test to fail, disable
1374     // retries.
1375     retry_limit_ = 0U;
1376   } else if (!BotModeEnabled(command_line) &&
1377              (command_line->HasSwitch(kGTestFilterFlag) ||
1378               command_line->HasSwitch(kIsolatedScriptTestFilterFlag))) {
1379     // No retry flag specified, not in bot mode and filtered by flag
1380     // Set reties to zero
1381     retry_limit_ = 0U;
1382   }
1383 
1384   retries_left_ = retry_limit_;
1385   force_run_broken_tests_ =
1386       command_line->HasSwitch(switches::kTestLauncherForceRunBrokenTests);
1387 
1388   fprintf(stdout, "Using %zu parallel jobs.\n", parallel_jobs_);
1389   fflush(stdout);
1390 
1391   CreateAndStartThreadPool(static_cast<int>(parallel_jobs_));
1392 
1393   std::vector<std::string> positive_file_filter;
1394   std::vector<std::string> positive_gtest_filter;
1395 
1396   if (command_line->HasSwitch(switches::kTestLauncherFilterFile)) {
1397     auto filter =
1398         command_line->GetSwitchValueNative(switches::kTestLauncherFilterFile);
1399     for (auto filter_file :
1400          SplitStringPiece(filter, FILE_PATH_LITERAL(";"), base::TRIM_WHITESPACE,
1401                           base::SPLIT_WANT_ALL)) {
1402       base::FilePath filter_file_path =
1403           base::MakeAbsoluteFilePath(FilePath(filter_file));
1404       if (!LoadFilterFile(filter_file_path, &positive_file_filter,
1405                           &negative_test_filter_))
1406         return false;
1407     }
1408   }
1409 
1410   // Split --gtest_filter at '-', if there is one, to separate into
1411   // positive filter and negative filter portions.
1412   bool double_colon_supported = !command_line->HasSwitch(kGTestFilterFlag);
1413   std::string filter = command_line->GetSwitchValueASCII(
1414       double_colon_supported ? kIsolatedScriptTestFilterFlag
1415                              : kGTestFilterFlag);
1416   size_t dash_pos = filter.find('-');
1417   if (dash_pos == std::string::npos) {
1418     positive_gtest_filter =
1419         ExtractTestsFromFilter(filter, double_colon_supported);
1420   } else {
1421     // Everything up to the dash.
1422     positive_gtest_filter = ExtractTestsFromFilter(filter.substr(0, dash_pos),
1423                                                    double_colon_supported);
1424 
1425     // Everything after the dash.
1426     for (std::string pattern : ExtractTestsFromFilter(
1427              filter.substr(dash_pos + 1), double_colon_supported)) {
1428       negative_test_filter_.push_back(pattern);
1429     }
1430   }
1431 
1432   skip_diabled_tests_ =
1433       !command_line->HasSwitch(kGTestRunDisabledTestsFlag) &&
1434       !command_line->HasSwitch(kIsolatedScriptRunDisabledTestsFlag);
1435 
1436   if (!InitTests())
1437     return false;
1438 
1439   if (!ShuffleTests(command_line))
1440     return false;
1441 
1442   if (!ProcessAndValidateTests())
1443     return false;
1444 
1445   if (command_line->HasSwitch(switches::kTestLauncherPrintTestStdio)) {
1446     std::string print_test_stdio = command_line->GetSwitchValueASCII(
1447         switches::kTestLauncherPrintTestStdio);
1448     if (print_test_stdio == "auto") {
1449       print_test_stdio_ = AUTO;
1450     } else if (print_test_stdio == "always") {
1451       print_test_stdio_ = ALWAYS;
1452     } else if (print_test_stdio == "never") {
1453       print_test_stdio_ = NEVER;
1454     } else {
1455       LOG(WARNING) << "Invalid value of "
1456                    << switches::kTestLauncherPrintTestStdio << ": "
1457                    << print_test_stdio;
1458       return false;
1459     }
1460   }
1461 
1462   stop_on_failure_ = command_line->HasSwitch(kGTestBreakOnFailure);
1463 
1464   if (command_line->HasSwitch(switches::kTestLauncherSummaryOutput)) {
1465     summary_path_ = FilePath(
1466         command_line->GetSwitchValuePath(switches::kTestLauncherSummaryOutput));
1467   }
1468   if (command_line->HasSwitch(switches::kTestLauncherTrace)) {
1469     trace_path_ = FilePath(
1470         command_line->GetSwitchValuePath(switches::kTestLauncherTrace));
1471   }
1472 
1473   // When running in parallel mode we need to redirect stdio to avoid mixed-up
1474   // output. We also always redirect on the bots to get the test output into
1475   // JSON summary.
1476   redirect_stdio_ = (parallel_jobs_ > 1) || BotModeEnabled(command_line);
1477 
1478   CombinePositiveTestFilters(std::move(positive_gtest_filter),
1479                              std::move(positive_file_filter));
1480 
1481   if (!results_tracker_.Init(*command_line)) {
1482     LOG(ERROR) << "Failed to initialize test results tracker.";
1483     return 1;
1484   }
1485 
1486 #if defined(NDEBUG)
1487   results_tracker_.AddGlobalTag("MODE_RELEASE");
1488 #else
1489   results_tracker_.AddGlobalTag("MODE_DEBUG");
1490 #endif
1491 
1492   // Operating systems (sorted alphabetically).
1493   // Note that they can deliberately overlap, e.g. OS_LINUX is a subset
1494   // of OS_POSIX.
1495 #if defined(OS_ANDROID)
1496   results_tracker_.AddGlobalTag("OS_ANDROID");
1497 #endif
1498 
1499 #if defined(OS_APPLE)
1500   results_tracker_.AddGlobalTag("OS_APPLE");
1501 #endif
1502 
1503 #if defined(OS_BSD)
1504   results_tracker_.AddGlobalTag("OS_BSD");
1505 #endif
1506 
1507 #if defined(OS_DRAGONFLY)
1508   results_tracker_.AddGlobalTag("OS_DRAGONFLY");
1509 #endif
1510 
1511 #if defined(OS_FREEBSD)
1512   results_tracker_.AddGlobalTag("OS_FREEBSD");
1513 #endif
1514 
1515 #if defined(OS_FUCHSIA)
1516   results_tracker_.AddGlobalTag("OS_FUCHSIA");
1517 #endif
1518 
1519 #if defined(OS_IOS)
1520   results_tracker_.AddGlobalTag("OS_IOS");
1521 #endif
1522 
1523 #if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_BSD)
1524   results_tracker_.AddGlobalTag("OS_LINUX");
1525 #endif
1526 
1527 #if defined(OS_CHROMEOS)
1528   results_tracker_.AddGlobalTag("OS_CHROMEOS");
1529 #endif
1530 
1531 #if defined(OS_MAC)
1532   results_tracker_.AddGlobalTag("OS_MAC");
1533 #endif
1534 
1535 #if defined(OS_NACL)
1536   results_tracker_.AddGlobalTag("OS_NACL");
1537 #endif
1538 
1539 #if defined(OS_OPENBSD)
1540   results_tracker_.AddGlobalTag("OS_OPENBSD");
1541 #endif
1542 
1543 #if defined(OS_POSIX)
1544   results_tracker_.AddGlobalTag("OS_POSIX");
1545 #endif
1546 
1547 #if defined(OS_SOLARIS)
1548   results_tracker_.AddGlobalTag("OS_SOLARIS");
1549 #endif
1550 
1551 #if defined(OS_WIN)
1552   results_tracker_.AddGlobalTag("OS_WIN");
1553 #endif
1554 
1555   // CPU-related tags.
1556 #if defined(ARCH_CPU_32_BITS)
1557   results_tracker_.AddGlobalTag("CPU_32_BITS");
1558 #endif
1559 
1560 #if defined(ARCH_CPU_64_BITS)
1561   results_tracker_.AddGlobalTag("CPU_64_BITS");
1562 #endif
1563 
1564   return true;
1565 }
1566 
InitTests()1567 bool TestLauncher::InitTests() {
1568   std::vector<TestIdentifier> tests;
1569   if (!launcher_delegate_->GetTests(&tests)) {
1570     LOG(ERROR) << "Failed to get list of tests.";
1571     return false;
1572   }
1573   for (const TestIdentifier& test_id : tests) {
1574     TestInfo test_info(test_id);
1575     if (test_id.test_case_name == "GoogleTestVerification") {
1576       LOG(INFO) << "The following parameterized test case is not instantiated: "
1577                 << test_id.test_name;
1578       continue;
1579     }
1580     // TODO(isamsonov): crbug.com/1004417 remove when windows builders
1581     // stop flaking on MANAUAL_ tests.
1582     if (launcher_delegate_->ShouldRunTest(test_id))
1583       tests_.push_back(test_info);
1584   }
1585   return true;
1586 }
1587 
ShuffleTests(CommandLine * command_line)1588 bool TestLauncher::ShuffleTests(CommandLine* command_line) {
1589   if (command_line->HasSwitch(kGTestShuffleFlag)) {
1590     uint32_t shuffle_seed;
1591     if (command_line->HasSwitch(kGTestRandomSeedFlag)) {
1592       const std::string custom_seed_str =
1593           command_line->GetSwitchValueASCII(kGTestRandomSeedFlag);
1594       uint32_t custom_seed = 0;
1595       if (!StringToUint(custom_seed_str, &custom_seed)) {
1596         LOG(ERROR) << "Unable to parse seed \"" << custom_seed_str << "\".";
1597         return false;
1598       }
1599       if (custom_seed >= kRandomSeedUpperBound) {
1600         LOG(ERROR) << "Seed " << custom_seed << " outside of expected range "
1601                    << "[0, " << kRandomSeedUpperBound << ")";
1602         return false;
1603       }
1604       shuffle_seed = custom_seed;
1605     } else {
1606       std::uniform_int_distribution<uint32_t> dist(0, kRandomSeedUpperBound);
1607       std::random_device random_dev;
1608       shuffle_seed = dist(random_dev);
1609     }
1610 
1611     std::mt19937 randomizer;
1612     randomizer.seed(shuffle_seed);
1613     ranges::shuffle(tests_, randomizer);
1614 
1615     fprintf(stdout, "Randomizing with seed %u\n", shuffle_seed);
1616     fflush(stdout);
1617   } else if (command_line->HasSwitch(kGTestRandomSeedFlag)) {
1618     LOG(ERROR) << kGTestRandomSeedFlag << " requires " << kGTestShuffleFlag;
1619     return false;
1620   }
1621   return true;
1622 }
1623 
ProcessAndValidateTests()1624 bool TestLauncher::ProcessAndValidateTests() {
1625   bool result = true;
1626   std::unordered_set<std::string> disabled_tests;
1627   std::unordered_map<std::string, TestInfo> pre_tests;
1628 
1629   // Find disabled and pre tests
1630   for (const TestInfo& test_info : tests_) {
1631     std::string test_name = test_info.GetFullName();
1632     results_tracker_.AddTest(test_name);
1633     if (test_info.disabled()) {
1634       disabled_tests.insert(test_info.GetDisabledStrippedName());
1635       results_tracker_.AddDisabledTest(test_name);
1636     }
1637     if (test_info.pre_test())
1638       pre_tests[test_info.GetDisabledStrippedName()] = test_info;
1639   }
1640 
1641   std::vector<TestInfo> tests_to_run;
1642   for (const TestInfo& test_info : tests_) {
1643     std::string test_name = test_info.GetFullName();
1644     // If any test has a matching disabled test, fail and log for audit.
1645     if (base::Contains(disabled_tests, test_name)) {
1646       LOG(ERROR) << test_name << " duplicated by a DISABLED_ test";
1647       result = false;
1648     }
1649 
1650     // Passes on PRE tests, those will append when final test is found.
1651     if (test_info.pre_test())
1652       continue;
1653 
1654     std::vector<TestInfo> test_sequence;
1655     test_sequence.push_back(test_info);
1656     // Move Pre Tests prior to final test in order.
1657     while (base::Contains(pre_tests, test_sequence.back().GetPreName())) {
1658       test_sequence.push_back(pre_tests[test_sequence.back().GetPreName()]);
1659       pre_tests.erase(test_sequence.back().GetDisabledStrippedName());
1660     }
1661     // Skip disabled tests unless explicitly requested.
1662     if (!test_info.disabled() || !skip_diabled_tests_)
1663       tests_to_run.insert(tests_to_run.end(), test_sequence.rbegin(),
1664                           test_sequence.rend());
1665   }
1666   tests_ = std::move(tests_to_run);
1667 
1668   // If any tests remain in |pre_tests| map, fail and log for audit.
1669   for (const auto& i : pre_tests) {
1670     LOG(ERROR) << i.first << " is an orphaned pre test";
1671     result = false;
1672   }
1673   return result;
1674 }
1675 
CreateAndStartThreadPool(int num_parallel_jobs)1676 void TestLauncher::CreateAndStartThreadPool(int num_parallel_jobs) {
1677   base::ThreadPoolInstance::Create("TestLauncher");
1678   base::ThreadPoolInstance::Get()->Start({num_parallel_jobs});
1679 }
1680 
CombinePositiveTestFilters(std::vector<std::string> filter_a,std::vector<std::string> filter_b)1681 void TestLauncher::CombinePositiveTestFilters(
1682     std::vector<std::string> filter_a,
1683     std::vector<std::string> filter_b) {
1684   has_at_least_one_positive_filter_ = !filter_a.empty() || !filter_b.empty();
1685   if (!has_at_least_one_positive_filter_) {
1686     return;
1687   }
1688   // If two positive filters are present, only run tests that match a pattern
1689   // in both filters.
1690   if (!filter_a.empty() && !filter_b.empty()) {
1691     for (const auto& i : tests_) {
1692       std::string test_name = i.GetFullName();
1693       bool found_a = false;
1694       bool found_b = false;
1695       for (const auto& k : filter_a) {
1696         found_a = found_a || MatchPattern(test_name, k);
1697       }
1698       for (const auto& k : filter_b) {
1699         found_b = found_b || MatchPattern(test_name, k);
1700       }
1701       if (found_a && found_b) {
1702         positive_test_filter_.push_back(test_name);
1703       }
1704     }
1705   } else if (!filter_a.empty()) {
1706     positive_test_filter_ = std::move(filter_a);
1707   } else {
1708     positive_test_filter_ = std::move(filter_b);
1709   }
1710 }
1711 
CollectTests()1712 std::vector<std::string> TestLauncher::CollectTests() {
1713   std::vector<std::string> test_names;
1714   for (const TestInfo& test_info : tests_) {
1715     std::string test_name = test_info.GetFullName();
1716 
1717     std::string prefix_stripped_name = test_info.GetPrefixStrippedName();
1718 
1719     // Skip the test that doesn't match the filter (if given).
1720     if (has_at_least_one_positive_filter_) {
1721       bool found = false;
1722       for (auto filter : positive_test_filter_) {
1723         if (MatchPattern(test_name, filter) ||
1724             MatchPattern(prefix_stripped_name, filter)) {
1725           found = true;
1726           break;
1727         }
1728       }
1729 
1730       if (!found)
1731         continue;
1732     }
1733 
1734     if (!negative_test_filter_.empty()) {
1735       bool excluded = false;
1736       for (auto filter : negative_test_filter_) {
1737         if (MatchPattern(test_name, filter) ||
1738             MatchPattern(prefix_stripped_name, filter)) {
1739           excluded = true;
1740           break;
1741         }
1742       }
1743 
1744       if (excluded)
1745         continue;
1746     }
1747 
1748     // Tests with the name XYZ will cause tests with the name PRE_XYZ to run. We
1749     // should bucket all of these tests together.
1750     if (Hash(prefix_stripped_name) % total_shards_ !=
1751         static_cast<uint32_t>(shard_index_)) {
1752       continue;
1753     }
1754 
1755     // Report test locations after applying all filters, so that we report test
1756     // locations only for those tests that were run as part of this shard.
1757     results_tracker_.AddTestLocation(test_name, test_info.file(),
1758                                      test_info.line());
1759     if (!test_info.pre_test()) {
1760       // Only a subset of tests that are run require placeholders -- namely,
1761       // those that will output results.
1762       results_tracker_.AddTestPlaceholder(test_name);
1763     }
1764 
1765     test_names.push_back(test_name);
1766   }
1767 
1768   return test_names;
1769 }
1770 
RunTests()1771 void TestLauncher::RunTests() {
1772   std::vector<std::string> original_test_names = CollectTests();
1773 
1774   std::vector<std::string> test_names;
1775   for (int i = 0; i < repeats_per_iteration_; ++i) {
1776     test_names.insert(test_names.end(), original_test_names.begin(),
1777                       original_test_names.end());
1778   }
1779 
1780   broken_threshold_ = std::max(static_cast<size_t>(20), tests_.size() / 10);
1781 
1782   test_started_count_ = test_names.size();
1783 
1784   // If there are no matching tests, warn and notify of any matches against
1785   // *<filter>*.
1786   if (test_started_count_ == 0) {
1787     PrintFuzzyMatchingTestNames();
1788     fprintf(stdout, "WARNING: No matching tests to run.\n");
1789     fflush(stdout);
1790   }
1791 
1792   // Save an early test summary in case the launcher crashes or gets killed.
1793   results_tracker_.GeneratePlaceholderIteration();
1794   MaybeSaveSummaryAsJSON({"EARLY_SUMMARY"});
1795 
1796   // If we are repeating the test, set batch size to 1 to ensure that batch size
1797   // does not interfere with repeats (unittests are using filter for batches and
1798   // can't run the same test twice in the same batch).
1799   size_t batch_size =
1800       repeats_per_iteration_ > 1 ? 1U : launcher_delegate_->GetBatchSize();
1801 
1802   TestRunner test_runner(this, parallel_jobs_, batch_size);
1803   test_runner.Run(test_names);
1804 }
1805 
PrintFuzzyMatchingTestNames()1806 void TestLauncher::PrintFuzzyMatchingTestNames() {
1807   for (auto filter : positive_test_filter_) {
1808     if (filter.empty())
1809       continue;
1810     std::string almost_filter;
1811     if (filter.front() != '*')
1812       almost_filter += '*';
1813     almost_filter += filter;
1814     if (filter.back() != '*')
1815       almost_filter += '*';
1816 
1817     for (const TestInfo& test_info : tests_) {
1818       std::string test_name = test_info.GetFullName();
1819       std::string prefix_stripped_name = test_info.GetPrefixStrippedName();
1820       if (MatchPattern(test_name, almost_filter) ||
1821           MatchPattern(prefix_stripped_name, almost_filter)) {
1822         fprintf(stdout, "Filter \"%s\" would have matched: %s\n",
1823                 almost_filter.c_str(), test_name.c_str());
1824         fflush(stdout);
1825       }
1826     }
1827   }
1828 }
1829 
RunRetryTests()1830 bool TestLauncher::RunRetryTests() {
1831   while (!tests_to_retry_.empty() && retries_left_ > 0) {
1832     // Retry all tests that depend on a failing test.
1833     std::vector<std::string> test_names;
1834     for (const TestInfo& test_info : tests_) {
1835       if (base::Contains(tests_to_retry_, test_info.GetPrefixStrippedName()))
1836         test_names.push_back(test_info.GetFullName());
1837     }
1838     tests_to_retry_.clear();
1839 
1840     size_t retry_started_count = test_names.size();
1841     test_started_count_ += retry_started_count;
1842 
1843     // Only invoke RunLoop if there are any tasks to run.
1844     if (retry_started_count == 0)
1845       return false;
1846 
1847     fprintf(stdout, "Retrying %zu test%s (retry #%zu)\n", retry_started_count,
1848             retry_started_count > 1 ? "s" : "", retry_limit_ - retries_left_);
1849     fflush(stdout);
1850 
1851     --retries_left_;
1852     TestRunner test_runner(this);
1853     test_runner.Run(test_names);
1854   }
1855   return tests_to_retry_.empty();
1856 }
1857 
OnTestIterationStart()1858 void TestLauncher::OnTestIterationStart() {
1859   test_started_count_ = 0;
1860   test_finished_count_ = 0;
1861   test_success_count_ = 0;
1862   test_broken_count_ = 0;
1863   tests_to_retry_.clear();
1864   results_tracker_.OnTestIterationStarting();
1865 }
1866 
1867 #if defined(OS_POSIX)
1868 // I/O watcher for the reading end of the self-pipe above.
1869 // Terminates any launched child processes and exits the process.
OnShutdownPipeReadable()1870 void TestLauncher::OnShutdownPipeReadable() {
1871   fprintf(stdout, "\nCaught signal. Killing spawned test processes...\n");
1872   fflush(stdout);
1873 
1874   KillSpawnedTestProcesses();
1875 
1876   MaybeSaveSummaryAsJSON({"CAUGHT_TERMINATION_SIGNAL"});
1877 
1878   // The signal would normally kill the process, so exit now.
1879   _exit(1);
1880 }
1881 #endif  // defined(OS_POSIX)
1882 
MaybeSaveSummaryAsJSON(const std::vector<std::string> & additional_tags)1883 void TestLauncher::MaybeSaveSummaryAsJSON(
1884     const std::vector<std::string>& additional_tags) {
1885   if (!summary_path_.empty()) {
1886     if (!results_tracker_.SaveSummaryAsJSON(summary_path_, additional_tags)) {
1887       LOG(ERROR) << "Failed to save test launcher output summary.";
1888     }
1889   }
1890   if (!trace_path_.empty()) {
1891     if (!GetTestLauncherTracer()->Dump(trace_path_)) {
1892       LOG(ERROR) << "Failed to save test launcher trace.";
1893     }
1894   }
1895 }
1896 
OnTestIterationFinished()1897 void TestLauncher::OnTestIterationFinished() {
1898   TestResultsTracker::TestStatusMap tests_by_status(
1899       results_tracker_.GetTestStatusMapForCurrentIteration());
1900   if (!tests_by_status[TestResult::TEST_UNKNOWN].empty())
1901     results_tracker_.AddGlobalTag(kUnreliableResultsTag);
1902 
1903   results_tracker_.PrintSummaryOfCurrentIteration();
1904 }
1905 
OnOutputTimeout()1906 void TestLauncher::OnOutputTimeout() {
1907   DCHECK(thread_checker_.CalledOnValidThread());
1908 
1909   AutoLock lock(*GetLiveProcessesLock());
1910 
1911   fprintf(stdout, "Still waiting for the following processes to finish:\n");
1912 
1913   for (const auto& pair : *GetLiveProcesses()) {
1914 #if defined(OS_WIN)
1915     fwprintf(stdout, L"\t%s\n", pair.second.GetCommandLineString().c_str());
1916 #else
1917     fprintf(stdout, "\t%s\n", pair.second.GetCommandLineString().c_str());
1918 #endif
1919   }
1920 
1921   fflush(stdout);
1922 
1923   // Arm the timer again - otherwise it would fire only once.
1924   watchdog_timer_.Reset();
1925 }
1926 
NumParallelJobs(unsigned int cores_per_job)1927 size_t NumParallelJobs(unsigned int cores_per_job) {
1928   const CommandLine* command_line = CommandLine::ForCurrentProcess();
1929   if (command_line->HasSwitch(switches::kTestLauncherJobs)) {
1930     // If the number of test launcher jobs was specified, return that number.
1931     size_t jobs = 0U;
1932 
1933     if (!StringToSizeT(
1934             command_line->GetSwitchValueASCII(switches::kTestLauncherJobs),
1935             &jobs) ||
1936         !jobs) {
1937       LOG(ERROR) << "Invalid value for " << switches::kTestLauncherJobs;
1938       return 0U;
1939     }
1940     return jobs;
1941   }
1942   if (!BotModeEnabled(command_line) &&
1943       (command_line->HasSwitch(kGTestFilterFlag) ||
1944        command_line->HasSwitch(kIsolatedScriptTestFilterFlag))) {
1945     // Do not run jobs in parallel by default if we are running a subset of
1946     // the tests and if bot mode is off.
1947     return 1U;
1948   }
1949 
1950 #if defined(OS_WIN)
1951   // Use processors in all groups (Windows splits more than 64 logical
1952   // processors into groups).
1953   size_t cores = base::checked_cast<size_t>(
1954       ::GetActiveProcessorCount(ALL_PROCESSOR_GROUPS));
1955 #else
1956   size_t cores = base::checked_cast<size_t>(SysInfo::NumberOfProcessors());
1957 #endif
1958   return std::max(size_t(1), cores / cores_per_job);
1959 }
1960 
GetTestOutputSnippet(const TestResult & result,const std::string & full_output)1961 std::string GetTestOutputSnippet(const TestResult& result,
1962                                  const std::string& full_output) {
1963   size_t run_pos = full_output.find(std::string("[ RUN      ] ") +
1964                                     result.full_name);
1965   if (run_pos == std::string::npos)
1966     return std::string();
1967 
1968   size_t end_pos = full_output.find(std::string("[  FAILED  ] ") +
1969                                     result.full_name,
1970                                     run_pos);
1971   // Only clip the snippet to the "OK" message if the test really
1972   // succeeded or was skipped. It still might have e.g. crashed
1973   // after printing it.
1974   if (end_pos == std::string::npos) {
1975     if (result.status == TestResult::TEST_SUCCESS) {
1976       end_pos = full_output.find(std::string("[       OK ] ") +
1977                                 result.full_name,
1978                                 run_pos);
1979 
1980       // Also handle SKIPPED next to SUCCESS because the GTest XML output
1981       // doesn't make a difference between SKIPPED and SUCCESS
1982       if (end_pos == std::string::npos)
1983         end_pos = full_output.find(
1984             std::string("[  SKIPPED ] ") + result.full_name, run_pos);
1985     } else {
1986       // If test is not successful, include all output until subsequent test.
1987       end_pos = full_output.find(std::string("[ RUN      ]"), run_pos + 1);
1988       if (end_pos != std::string::npos)
1989         end_pos--;
1990     }
1991   }
1992   if (end_pos != std::string::npos) {
1993     size_t newline_pos = full_output.find("\n", end_pos);
1994     if (newline_pos != std::string::npos)
1995       end_pos = newline_pos + 1;
1996   }
1997 
1998   std::string snippet(full_output.substr(run_pos));
1999   if (end_pos != std::string::npos)
2000     snippet = full_output.substr(run_pos, end_pos - run_pos);
2001 
2002   return snippet;
2003 }
2004 
2005 }  // namespace base
2006