1 // Copyright 2014 The Kyua Authors.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 //   notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright
11 //   notice, this list of conditions and the following disclaimer in the
12 //   documentation and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors
14 //   may be used to endorse or promote products derived from this software
15 //   without specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 
29 #include "engine/scheduler.hpp"
30 
31 extern "C" {
32 #include <unistd.h>
33 }
34 
35 #include <cstdio>
36 #include <cstdlib>
37 #include <fstream>
38 #include <stdexcept>
39 
40 #include "engine/config.hpp"
41 #include "engine/exceptions.hpp"
42 #include "engine/requirements.hpp"
43 #include "model/context.hpp"
44 #include "model/metadata.hpp"
45 #include "model/test_case.hpp"
46 #include "model/test_program.hpp"
47 #include "model/test_result.hpp"
48 #include "utils/config/tree.ipp"
49 #include "utils/datetime.hpp"
50 #include "utils/defs.hpp"
51 #include "utils/env.hpp"
52 #include "utils/format/macros.hpp"
53 #include "utils/fs/directory.hpp"
54 #include "utils/fs/exceptions.hpp"
55 #include "utils/fs/operations.hpp"
56 #include "utils/fs/path.hpp"
57 #include "utils/logging/macros.hpp"
58 #include "utils/noncopyable.hpp"
59 #include "utils/optional.ipp"
60 #include "utils/passwd.hpp"
61 #include "utils/process/executor.ipp"
62 #include "utils/process/status.hpp"
63 #include "utils/sanity.hpp"
64 #include "utils/shared_ptr.hpp"
65 #include "utils/stacktrace.hpp"
66 #include "utils/stream.hpp"
67 #include "utils/text/operations.ipp"
68 
69 namespace config = utils::config;
70 namespace datetime = utils::datetime;
71 namespace executor = utils::process::executor;
72 namespace fs = utils::fs;
73 namespace logging = utils::logging;
74 namespace passwd = utils::passwd;
75 namespace process = utils::process;
76 namespace scheduler = engine::scheduler;
77 namespace text = utils::text;
78 
79 using utils::none;
80 using utils::optional;
81 
82 
83 /// Timeout for the test case cleanup operation.
84 ///
85 /// TODO(jmmv): This is here only for testing purposes.  Maybe we should expose
86 /// this setting as part of the user_config.
87 datetime::delta scheduler::cleanup_timeout(60, 0);
88 
89 
90 /// Timeout for the test case listing operation.
91 ///
92 /// TODO(jmmv): This is here only for testing purposes.  Maybe we should expose
93 /// this setting as part of the user_config.
94 datetime::delta scheduler::list_timeout(300, 0);
95 
96 
97 namespace {
98 
99 
100 /// Magic exit status to indicate that the test case was probably skipped.
101 ///
102 /// The test case was only skipped if and only if we return this exit code and
103 /// we find the skipped_cookie file on disk.
104 static const int exit_skipped = 84;
105 
106 
107 /// Text file containing the skip reason for the test case.
108 ///
109 /// This will only be present within unique_work_directory if the test case
110 /// exited with the exit_skipped code.  However, there is no guarantee that the
111 /// file is there (say if the test really decided to exit with code exit_skipped
112 /// on its own).
113 static const char* skipped_cookie = "skipped.txt";
114 
115 
116 /// Mapping of interface names to interface definitions.
117 typedef std::map< std::string, std::shared_ptr< scheduler::interface > >
118     interfaces_map;
119 
120 
121 /// Mapping of interface names to interface definitions.
122 ///
123 /// Use register_interface() to add an entry to this global table.
124 static interfaces_map interfaces;
125 
126 
127 /// Scans the contents of a directory and appends the file listing to a file.
128 ///
129 /// \param dir_path The directory to scan.
130 /// \param output_file The file to which to append the listing.
131 ///
132 /// \throw engine::error If there are problems listing the files.
133 static void
append_files_listing(const fs::path & dir_path,const fs::path & output_file)134 append_files_listing(const fs::path& dir_path, const fs::path& output_file)
135 {
136     std::ofstream output(output_file.c_str(), std::ios::app);
137     if (!output)
138         throw engine::error(F("Failed to open output file %s for append")
139                             % output_file);
140     try {
141         std::set < std::string > names;
142 
143         const fs::directory dir(dir_path);
144         for (fs::directory::const_iterator iter = dir.begin();
145              iter != dir.end(); ++iter) {
146             if (iter->name != "." && iter->name != "..")
147                 names.insert(iter->name);
148         }
149 
150         if (!names.empty()) {
151             output << "Files left in work directory after failure: "
152                    << text::join(names, ", ") << '\n';
153         }
154     } catch (const fs::error& e) {
155         throw engine::error(F("Cannot append files listing to %s: %s")
156                             % output_file % e.what());
157     }
158 }
159 
160 
161 /// Maintenance data held while a test is being executed.
162 ///
163 /// This data structure exists from the moment when a test is executed via
164 /// scheduler::spawn_test() or scheduler::impl::spawn_cleanup() to when it is
165 /// cleaned up with result_handle::cleanup().
166 ///
167 /// This is a base data type intended to be extended for the test and cleanup
168 /// cases so that each contains only the relevant data.
169 struct exec_data : utils::noncopyable {
170     /// Test program data for this test case.
171     const model::test_program_ptr test_program;
172 
173     /// Name of the test case.
174     const std::string test_case_name;
175 
176     /// Constructor.
177     ///
178     /// \param test_program_ Test program data for this test case.
179     /// \param test_case_name_ Name of the test case.
exec_data__anon6f8d5e5f0111::exec_data180     exec_data(const model::test_program_ptr test_program_,
181               const std::string& test_case_name_) :
182         test_program(test_program_), test_case_name(test_case_name_)
183     {
184     }
185 
186     /// Destructor.
~exec_data__anon6f8d5e5f0111::exec_data187     virtual ~exec_data(void)
188     {
189     }
190 };
191 
192 
193 /// Maintenance data held while a test is being executed.
194 struct test_exec_data : public exec_data {
195     /// Test program-specific execution interface.
196     const std::shared_ptr< scheduler::interface > interface;
197 
198     /// User configuration passed to the execution of the test.  We need this
199     /// here to recover it later when chaining the execution of a cleanup
200     /// routine (if any).
201     const config::tree user_config;
202 
203     /// Whether this test case still needs to have its cleanup routine executed.
204     ///
205     /// This is set externally when the cleanup routine is actually invoked to
206     /// denote that no further attempts shall be made at cleaning this up.
207     bool needs_cleanup;
208 
209     /// The exit_handle for this test once it has completed.
210     ///
211     /// This is set externally when the test case has finished, as we need this
212     /// information to invoke the followup cleanup routine in the right context,
213     /// as indicated by needs_cleanup.
214     optional< executor::exit_handle > exit_handle;
215 
216     /// Constructor.
217     ///
218     /// \param test_program_ Test program data for this test case.
219     /// \param test_case_name_ Name of the test case.
220     /// \param interface_ Test program-specific execution interface.
221     /// \param user_config_ User configuration passed to the test.
test_exec_data__anon6f8d5e5f0111::test_exec_data222     test_exec_data(const model::test_program_ptr test_program_,
223                    const std::string& test_case_name_,
224                    const std::shared_ptr< scheduler::interface > interface_,
225                    const config::tree& user_config_) :
226         exec_data(test_program_, test_case_name_),
227         interface(interface_), user_config(user_config_)
228     {
229         const model::test_case& test_case = test_program->find(test_case_name);
230         needs_cleanup = test_case.get_metadata().has_cleanup();
231     }
232 };
233 
234 
235 /// Maintenance data held while a test cleanup routine is being executed.
236 ///
237 /// Instances of this object are related to a previous test_exec_data, as
238 /// cleanup routines can only exist once the test has been run.
239 struct cleanup_exec_data : public exec_data {
240     /// The exit handle of the test.  This is necessary so that we can return
241     /// the correct exit_handle to the user of the scheduler.
242     executor::exit_handle body_exit_handle;
243 
244     /// The final result of the test's body.  This is necessary to compute the
245     /// right return value for a test with a cleanup routine: the body result is
246     /// respected if it is a "bad" result; else the result of the cleanup
247     /// routine is used if it has failed.
248     model::test_result body_result;
249 
250     /// Constructor.
251     ///
252     /// \param test_program_ Test program data for this test case.
253     /// \param test_case_name_ Name of the test case.
254     /// \param body_exit_handle_ If not none, exit handle of the body
255     ///     corresponding to the cleanup routine represented by this exec_data.
256     /// \param body_result_ If not none, result of the body corresponding to the
257     ///     cleanup routine represented by this exec_data.
cleanup_exec_data__anon6f8d5e5f0111::cleanup_exec_data258     cleanup_exec_data(const model::test_program_ptr test_program_,
259                       const std::string& test_case_name_,
260                       const executor::exit_handle& body_exit_handle_,
261                       const model::test_result& body_result_) :
262         exec_data(test_program_, test_case_name_),
263         body_exit_handle(body_exit_handle_), body_result(body_result_)
264     {
265     }
266 };
267 
268 
269 /// Shared pointer to exec_data.
270 ///
271 /// We require this because we want exec_data to not be copyable, and thus we
272 /// cannot just store it in the map without move constructors.
273 typedef std::shared_ptr< exec_data > exec_data_ptr;
274 
275 
276 /// Mapping of active PIDs to their maintenance data.
277 typedef std::map< int, exec_data_ptr > exec_data_map;
278 
279 
280 /// Enforces a test program to hold an absolute path.
281 ///
282 /// TODO(jmmv): This function (which is a pretty ugly hack) exists because we
283 /// want the interface hooks to receive a test_program as their argument.
284 /// However, those hooks run after the test program has been isolated, which
285 /// means that the current directory has changed since when the test_program
286 /// objects were created.  This causes the absolute_path() method of
287 /// test_program to return bogus values if the internal representation of their
288 /// path is relative.  We should fix somehow: maybe making the fs module grab
289 /// its "current_path" view at program startup time; or maybe by grabbing the
290 /// current path at test_program creation time; or maybe something else.
291 ///
292 /// \param program The test program to modify.
293 ///
294 /// \return A new test program whose internal paths are absolute.
295 static model::test_program
force_absolute_paths(const model::test_program program)296 force_absolute_paths(const model::test_program program)
297 {
298     const std::string& relative = program.relative_path().str();
299     const std::string absolute = program.absolute_path().str();
300 
301     const std::string root = absolute.substr(
302         0, absolute.length() - relative.length());
303 
304     return model::test_program(
305         program.interface_name(),
306         program.relative_path(), fs::path(root),
307         program.test_suite_name(),
308         program.get_metadata(), program.test_cases());
309 }
310 
311 
312 /// Functor to list the test cases of a test program.
313 class list_test_cases {
314     /// Interface of the test program to execute.
315     std::shared_ptr< scheduler::interface > _interface;
316 
317     /// Test program to execute.
318     const model::test_program _test_program;
319 
320     /// User-provided configuration variables.
321     const config::tree& _user_config;
322 
323 public:
324     /// Constructor.
325     ///
326     /// \param interface Interface of the test program to execute.
327     /// \param test_program Test program to execute.
328     /// \param user_config User-provided configuration variables.
list_test_cases(const std::shared_ptr<scheduler::interface> interface,const model::test_program * test_program,const config::tree & user_config)329     list_test_cases(
330         const std::shared_ptr< scheduler::interface > interface,
331         const model::test_program* test_program,
332         const config::tree& user_config) :
333         _interface(interface),
334         _test_program(force_absolute_paths(*test_program)),
335         _user_config(user_config)
336     {
337     }
338 
339     /// Body of the subprocess.
340     void
operator ()(const fs::path & UTILS_UNUSED_PARAM (control_directory))341     operator()(const fs::path& UTILS_UNUSED_PARAM(control_directory))
342     {
343         const config::properties_map vars = scheduler::generate_config(
344             _user_config, _test_program.test_suite_name());
345         _interface->exec_list(_test_program, vars);
346     }
347 };
348 
349 
350 /// Functor to execute a test program in a child process.
351 class run_test_program {
352     /// Interface of the test program to execute.
353     std::shared_ptr< scheduler::interface > _interface;
354 
355     /// Test program to execute.
356     const model::test_program _test_program;
357 
358     /// Name of the test case to execute.
359     const std::string& _test_case_name;
360 
361     /// User-provided configuration variables.
362     const config::tree& _user_config;
363 
364     /// Verifies if the test case needs to be skipped or not.
365     ///
366     /// We could very well run this on the scheduler parent process before
367     /// issuing the fork.  However, doing this here in the child process is
368     /// better for two reasons: first, it allows us to continue using the simple
369     /// spawn/wait abstraction of the scheduler; and, second, we parallelize the
370     /// requirements checks among tests.
371     ///
372     /// \post If the test's preconditions are not met, the caller process is
373     /// terminated with a special exit code and a "skipped cookie" is written to
374     /// the disk with the reason for the failure.
375     ///
376     /// \param skipped_cookie_path File to create with the skip reason details
377     ///     if this test is skipped.
378     void
do_requirements_check(const fs::path & skipped_cookie_path)379     do_requirements_check(const fs::path& skipped_cookie_path)
380     {
381         const model::test_case& test_case = _test_program.find(
382             _test_case_name);
383 
384         const std::string skip_reason = engine::check_reqs(
385             test_case.get_metadata(), _user_config,
386             _test_program.test_suite_name(),
387             fs::current_path());
388         if (skip_reason.empty())
389             return;
390 
391         std::ofstream output(skipped_cookie_path.c_str());
392         if (!output) {
393             std::perror((F("Failed to open %s for write") %
394                          skipped_cookie_path).str().c_str());
395             std::abort();
396         }
397         output << skip_reason;
398         output.close();
399 
400         // Abruptly terminate the process.  We don't want to run any destructors
401         // inherited from the parent process by mistake, which could, for
402         // example, delete our own control files!
403         ::_exit(exit_skipped);
404     }
405 
406 public:
407     /// Constructor.
408     ///
409     /// \param interface Interface of the test program to execute.
410     /// \param test_program Test program to execute.
411     /// \param test_case_name Name of the test case to execute.
412     /// \param user_config User-provided configuration variables.
run_test_program(const std::shared_ptr<scheduler::interface> interface,const model::test_program_ptr test_program,const std::string & test_case_name,const config::tree & user_config)413     run_test_program(
414         const std::shared_ptr< scheduler::interface > interface,
415         const model::test_program_ptr test_program,
416         const std::string& test_case_name,
417         const config::tree& user_config) :
418         _interface(interface),
419         _test_program(force_absolute_paths(*test_program)),
420         _test_case_name(test_case_name),
421         _user_config(user_config)
422     {
423     }
424 
425     /// Body of the subprocess.
426     void
operator ()(const fs::path & control_directory)427     operator()(const fs::path& control_directory)
428     {
429         const model::test_case& test_case = _test_program.find(
430             _test_case_name);
431         if (test_case.fake_result())
432             ::_exit(EXIT_SUCCESS);
433 
434         do_requirements_check(control_directory / skipped_cookie);
435 
436         const config::properties_map vars = scheduler::generate_config(
437             _user_config, _test_program.test_suite_name());
438         _interface->exec_test(_test_program, _test_case_name, vars,
439                               control_directory);
440     }
441 };
442 
443 
444 /// Functor to execute a test program in a child process.
445 class run_test_cleanup {
446     /// Interface of the test program to execute.
447     std::shared_ptr< scheduler::interface > _interface;
448 
449     /// Test program to execute.
450     const model::test_program _test_program;
451 
452     /// Name of the test case to execute.
453     const std::string& _test_case_name;
454 
455     /// User-provided configuration variables.
456     const config::tree& _user_config;
457 
458 public:
459     /// Constructor.
460     ///
461     /// \param interface Interface of the test program to execute.
462     /// \param test_program Test program to execute.
463     /// \param test_case_name Name of the test case to execute.
464     /// \param user_config User-provided configuration variables.
run_test_cleanup(const std::shared_ptr<scheduler::interface> interface,const model::test_program_ptr test_program,const std::string & test_case_name,const config::tree & user_config)465     run_test_cleanup(
466         const std::shared_ptr< scheduler::interface > interface,
467         const model::test_program_ptr test_program,
468         const std::string& test_case_name,
469         const config::tree& user_config) :
470         _interface(interface),
471         _test_program(force_absolute_paths(*test_program)),
472         _test_case_name(test_case_name),
473         _user_config(user_config)
474     {
475     }
476 
477     /// Body of the subprocess.
478     void
operator ()(const fs::path & control_directory)479     operator()(const fs::path& control_directory)
480     {
481         const config::properties_map vars = scheduler::generate_config(
482             _user_config, _test_program.test_suite_name());
483         _interface->exec_cleanup(_test_program, _test_case_name, vars,
484                                  control_directory);
485     }
486 };
487 
488 
489 /// Obtains the right scheduler interface for a given test program.
490 ///
491 /// \param name The name of the interface of the test program.
492 ///
493 /// \return An scheduler interface.
494 std::shared_ptr< scheduler::interface >
find_interface(const std::string & name)495 find_interface(const std::string& name)
496 {
497     const interfaces_map::const_iterator iter = interfaces.find(name);
498     PRE(interfaces.find(name) != interfaces.end());
499     return (*iter).second;
500 }
501 
502 
503 }  // anonymous namespace
504 
505 
506 void
exec_cleanup(const model::test_program & UTILS_UNUSED_PARAM (test_program),const std::string & UTILS_UNUSED_PARAM (test_case_name),const utils::config::properties_map & UTILS_UNUSED_PARAM (vars),const utils::fs::path & UTILS_UNUSED_PARAM (control_directory)) const507 scheduler::interface::exec_cleanup(
508     const model::test_program& UTILS_UNUSED_PARAM(test_program),
509     const std::string& UTILS_UNUSED_PARAM(test_case_name),
510     const utils::config::properties_map& UTILS_UNUSED_PARAM(vars),
511     const utils::fs::path& UTILS_UNUSED_PARAM(control_directory)) const
512 {
513     // Most test interfaces do not support standalone cleanup routines so
514     // provide a default implementation that does nothing.
515     UNREACHABLE_MSG("exec_cleanup not implemented for an interface that "
516                     "supports standalone cleanup routines");
517 }
518 
519 
520 /// Internal implementation of a lazy_test_program.
521 struct engine::scheduler::lazy_test_program::impl : utils::noncopyable {
522     /// Whether the test cases list has been yet loaded or not.
523     bool _loaded;
524 
525     /// User configuration to pass to the test program list operation.
526     config::tree _user_config;
527 
528     /// Scheduler context to use to load test cases.
529     scheduler::scheduler_handle& _scheduler_handle;
530 
531     /// Constructor.
implengine::scheduler::lazy_test_program::impl532     impl(const config::tree& user_config_,
533          scheduler::scheduler_handle& scheduler_handle_) :
534         _loaded(false), _user_config(user_config_),
535         _scheduler_handle(scheduler_handle_)
536     {
537     }
538 };
539 
540 
541 /// Constructs a new test program.
542 ///
543 /// \param interface_name_ Name of the test program interface.
544 /// \param binary_ The name of the test program binary relative to root_.
545 /// \param root_ The root of the test suite containing the test program.
546 /// \param test_suite_name_ The name of the test suite this program belongs to.
547 /// \param md_ Metadata of the test program.
548 /// \param user_config_ User configuration to pass to the scheduler.
549 /// \param scheduler_handle_ Scheduler context to use to load test cases.
lazy_test_program(const std::string & interface_name_,const fs::path & binary_,const fs::path & root_,const std::string & test_suite_name_,const model::metadata & md_,const config::tree & user_config_,scheduler::scheduler_handle & scheduler_handle_)550 scheduler::lazy_test_program::lazy_test_program(
551     const std::string& interface_name_,
552     const fs::path& binary_,
553     const fs::path& root_,
554     const std::string& test_suite_name_,
555     const model::metadata& md_,
556     const config::tree& user_config_,
557     scheduler::scheduler_handle& scheduler_handle_) :
558     test_program(interface_name_, binary_, root_, test_suite_name_, md_,
559                  model::test_cases_map()),
560     _pimpl(new impl(user_config_, scheduler_handle_))
561 {
562 }
563 
564 
565 /// Gets or loads the list of test cases from the test program.
566 ///
567 /// \return The list of test cases provided by the test program.
568 const model::test_cases_map&
test_cases(void) const569 scheduler::lazy_test_program::test_cases(void) const
570 {
571     _pimpl->_scheduler_handle.check_interrupt();
572 
573     if (!_pimpl->_loaded) {
574         const model::test_cases_map tcs = _pimpl->_scheduler_handle.list_tests(
575             this, _pimpl->_user_config);
576 
577         // Due to the restrictions on when set_test_cases() may be called (as a
578         // way to lazily initialize the test cases list before it is ever
579         // returned), this cast is valid.
580         const_cast< scheduler::lazy_test_program* >(this)->set_test_cases(tcs);
581 
582         _pimpl->_loaded = true;
583 
584         _pimpl->_scheduler_handle.check_interrupt();
585     }
586 
587     INV(_pimpl->_loaded);
588     return test_program::test_cases();
589 }
590 
591 
592 /// Internal implementation for the result_handle class.
593 struct engine::scheduler::result_handle::bimpl : utils::noncopyable {
594     /// Generic executor exit handle for this result handle.
595     executor::exit_handle generic;
596 
597     /// Mutable pointer to the corresponding scheduler state.
598     ///
599     /// This object references a member of the scheduler_handle that yielded
600     /// this result_handle instance.  We need this direct access to clean up
601     /// after ourselves when the result is destroyed.
602     exec_data_map& all_exec_data;
603 
604     /// Constructor.
605     ///
606     /// \param generic_ Generic executor exit handle for this result handle.
607     /// \param [in,out] all_exec_data_ Global object keeping track of all active
608     ///     executions for an scheduler.  This is a pointer to a member of the
609     ///     scheduler_handle object.
bimplengine::scheduler::result_handle::bimpl610     bimpl(const executor::exit_handle generic_, exec_data_map& all_exec_data_) :
611         generic(generic_), all_exec_data(all_exec_data_)
612     {
613     }
614 
615     /// Destructor.
~bimplengine::scheduler::result_handle::bimpl616     ~bimpl(void)
617     {
618         LD(F("Removing %s from all_exec_data") % generic.original_pid());
619         all_exec_data.erase(generic.original_pid());
620     }
621 };
622 
623 
624 /// Constructor.
625 ///
626 /// \param pbimpl Constructed internal implementation.
result_handle(std::shared_ptr<bimpl> pbimpl)627 scheduler::result_handle::result_handle(std::shared_ptr< bimpl > pbimpl) :
628     _pbimpl(pbimpl)
629 {
630 }
631 
632 
633 /// Destructor.
~result_handle(void)634 scheduler::result_handle::~result_handle(void)
635 {
636 }
637 
638 
639 /// Cleans up the test case results.
640 ///
641 /// This function should be called explicitly as it provides the means to
642 /// control any exceptions raised during cleanup.  Do not rely on the destructor
643 /// to clean things up.
644 ///
645 /// \throw engine::error If the cleanup fails, especially due to the inability
646 ///     to remove the work directory.
647 void
cleanup(void)648 scheduler::result_handle::cleanup(void)
649 {
650     _pbimpl->generic.cleanup();
651 }
652 
653 
654 /// Returns the original PID corresponding to this result.
655 ///
656 /// \return An exec_handle.
657 int
original_pid(void) const658 scheduler::result_handle::original_pid(void) const
659 {
660     return _pbimpl->generic.original_pid();
661 }
662 
663 
664 /// Returns the timestamp of when spawn_test was called.
665 ///
666 /// \return A timestamp.
667 const datetime::timestamp&
start_time(void) const668 scheduler::result_handle::start_time(void) const
669 {
670     return _pbimpl->generic.start_time();
671 }
672 
673 
674 /// Returns the timestamp of when wait_any_test returned this object.
675 ///
676 /// \return A timestamp.
677 const datetime::timestamp&
end_time(void) const678 scheduler::result_handle::end_time(void) const
679 {
680     return _pbimpl->generic.end_time();
681 }
682 
683 
684 /// Returns the path to the test-specific work directory.
685 ///
686 /// This is guaranteed to be clear of files created by the scheduler.
687 ///
688 /// \return The path to a directory that exists until cleanup() is called.
689 fs::path
work_directory(void) const690 scheduler::result_handle::work_directory(void) const
691 {
692     return _pbimpl->generic.work_directory();
693 }
694 
695 
696 /// Returns the path to the test's stdout file.
697 ///
698 /// \return The path to a file that exists until cleanup() is called.
699 const fs::path&
stdout_file(void) const700 scheduler::result_handle::stdout_file(void) const
701 {
702     return _pbimpl->generic.stdout_file();
703 }
704 
705 
706 /// Returns the path to the test's stderr file.
707 ///
708 /// \return The path to a file that exists until cleanup() is called.
709 const fs::path&
stderr_file(void) const710 scheduler::result_handle::stderr_file(void) const
711 {
712     return _pbimpl->generic.stderr_file();
713 }
714 
715 
716 /// Internal implementation for the test_result_handle class.
717 struct engine::scheduler::test_result_handle::impl : utils::noncopyable {
718     /// Test program data for this test case.
719     model::test_program_ptr test_program;
720 
721     /// Name of the test case.
722     std::string test_case_name;
723 
724     /// The actual result of the test execution.
725     const model::test_result test_result;
726 
727     /// Constructor.
728     ///
729     /// \param test_program_ Test program data for this test case.
730     /// \param test_case_name_ Name of the test case.
731     /// \param test_result_ The actual result of the test execution.
implengine::scheduler::test_result_handle::impl732     impl(const model::test_program_ptr test_program_,
733          const std::string& test_case_name_,
734          const model::test_result& test_result_) :
735         test_program(test_program_),
736         test_case_name(test_case_name_),
737         test_result(test_result_)
738     {
739     }
740 };
741 
742 
743 /// Constructor.
744 ///
745 /// \param pbimpl Constructed internal implementation for the base object.
746 /// \param pimpl Constructed internal implementation.
test_result_handle(std::shared_ptr<bimpl> pbimpl,std::shared_ptr<impl> pimpl)747 scheduler::test_result_handle::test_result_handle(
748     std::shared_ptr< bimpl > pbimpl, std::shared_ptr< impl > pimpl) :
749     result_handle(pbimpl), _pimpl(pimpl)
750 {
751 }
752 
753 
754 /// Destructor.
~test_result_handle(void)755 scheduler::test_result_handle::~test_result_handle(void)
756 {
757 }
758 
759 
760 /// Returns the test program that yielded this result.
761 ///
762 /// \return A test program.
763 const model::test_program_ptr
test_program(void) const764 scheduler::test_result_handle::test_program(void) const
765 {
766     return _pimpl->test_program;
767 }
768 
769 
770 /// Returns the name of the test case that yielded this result.
771 ///
772 /// \return A test case name
773 const std::string&
test_case_name(void) const774 scheduler::test_result_handle::test_case_name(void) const
775 {
776     return _pimpl->test_case_name;
777 }
778 
779 
780 /// Returns the actual result of the test execution.
781 ///
782 /// \return A test result.
783 const model::test_result&
test_result(void) const784 scheduler::test_result_handle::test_result(void) const
785 {
786     return _pimpl->test_result;
787 }
788 
789 
790 /// Internal implementation for the scheduler_handle.
791 struct engine::scheduler::scheduler_handle::impl : utils::noncopyable {
792     /// Generic executor instance encapsulated by this one.
793     executor::executor_handle generic;
794 
795     /// Mapping of exec handles to the data required at run time.
796     exec_data_map all_exec_data;
797 
798     /// Collection of test_exec_data objects.
799     typedef std::vector< const test_exec_data* > test_exec_data_vector;
800 
801     /// Constructor.
implengine::scheduler::scheduler_handle::impl802     impl(void) : generic(executor::setup())
803     {
804     }
805 
806     /// Destructor.
807     ///
808     /// This runs any pending cleanup routines, which should only happen if the
809     /// scheduler is abruptly terminated (aka if a signal is received).
~implengine::scheduler::scheduler_handle::impl810     ~impl(void)
811     {
812         const test_exec_data_vector tests_data = tests_needing_cleanup();
813 
814         for (test_exec_data_vector::const_iterator iter = tests_data.begin();
815              iter != tests_data.end(); ++iter) {
816             const test_exec_data* test_data = *iter;
817 
818             try {
819                 sync_cleanup(test_data);
820             } catch (const std::runtime_error& e) {
821                 LW(F("Failed to run cleanup routine for %s:%s on abrupt "
822                      "termination")
823                    % test_data->test_program->relative_path()
824                    % test_data->test_case_name);
825             }
826         }
827     }
828 
829     /// Finds any pending exec_datas that correspond to tests needing cleanup.
830     ///
831     /// \return The collection of test_exec_data objects that have their
832     /// needs_cleanup property set to true.
833     test_exec_data_vector
tests_needing_cleanupengine::scheduler::scheduler_handle::impl834     tests_needing_cleanup(void)
835     {
836         test_exec_data_vector tests_data;
837 
838         for (exec_data_map::const_iterator iter = all_exec_data.begin();
839              iter != all_exec_data.end(); ++iter) {
840             const exec_data_ptr data = (*iter).second;
841 
842             try {
843                 test_exec_data* test_data = &dynamic_cast< test_exec_data& >(
844                     *data.get());
845                 if (test_data->needs_cleanup) {
846                     tests_data.push_back(test_data);
847                     test_data->needs_cleanup = false;
848                 }
849             } catch (const std::bad_cast& e) {
850                 // Do nothing for cleanup_exec_data objects.
851             }
852         }
853 
854         return tests_data;
855     }
856 
857     /// Cleans up a single test case synchronously.
858     ///
859     /// \param test_data The data of the previously executed test case to be
860     ///     cleaned up.
861     void
sync_cleanupengine::scheduler::scheduler_handle::impl862     sync_cleanup(const test_exec_data* test_data)
863     {
864         // The message in this result should never be seen by the user, but use
865         // something reasonable just in case it leaks and we need to pinpoint
866         // the call site.
867         model::test_result result(model::test_result_broken,
868                                   "Test case died abruptly");
869 
870         const executor::exec_handle cleanup_handle = spawn_cleanup(
871             test_data->test_program, test_data->test_case_name,
872             test_data->user_config, test_data->exit_handle.get(),
873             result);
874         generic.wait(cleanup_handle);
875     }
876 
877     /// Forks and executes a test case cleanup routine asynchronously.
878     ///
879     /// \param test_program The container test program.
880     /// \param test_case_name The name of the test case to run.
881     /// \param user_config User-provided configuration variables.
882     /// \param body_handle The exit handle of the test case's corresponding
883     ///     body.  The cleanup will be executed in the same context.
884     /// \param body_result The result of the test case's corresponding body.
885     ///
886     /// \return A handle for the background operation.  Used to match the result
887     /// of the execution returned by wait_any() with this invocation.
888     executor::exec_handle
spawn_cleanupengine::scheduler::scheduler_handle::impl889     spawn_cleanup(const model::test_program_ptr test_program,
890                   const std::string& test_case_name,
891                   const config::tree& user_config,
892                   const executor::exit_handle& body_handle,
893                   const model::test_result& body_result)
894     {
895         generic.check_interrupt();
896 
897         const std::shared_ptr< scheduler::interface > interface =
898             find_interface(test_program->interface_name());
899 
900         LI(F("Spawning %s:%s (cleanup)") % test_program->absolute_path() %
901            test_case_name);
902 
903         const executor::exec_handle handle = generic.spawn_followup(
904             run_test_cleanup(interface, test_program, test_case_name,
905                              user_config),
906             body_handle, cleanup_timeout);
907 
908         const exec_data_ptr data(new cleanup_exec_data(
909             test_program, test_case_name, body_handle, body_result));
910         LD(F("Inserting %s into all_exec_data (cleanup)") % handle.pid());
911         INV_MSG(all_exec_data.find(handle.pid()) == all_exec_data.end(),
912                 F("PID %s already in all_exec_data; not properly cleaned "
913                   "up or reused too fast") % handle.pid());;
914         all_exec_data.insert(exec_data_map::value_type(handle.pid(), data));
915 
916         return handle;
917     }
918 };
919 
920 
921 /// Constructor.
scheduler_handle(void)922 scheduler::scheduler_handle::scheduler_handle(void) : _pimpl(new impl())
923 {
924 }
925 
926 
927 /// Destructor.
~scheduler_handle(void)928 scheduler::scheduler_handle::~scheduler_handle(void)
929 {
930 }
931 
932 
933 /// Queries the path to the root of the work directory for all tests.
934 ///
935 /// \return A path.
936 const fs::path&
root_work_directory(void) const937 scheduler::scheduler_handle::root_work_directory(void) const
938 {
939     return _pimpl->generic.root_work_directory();
940 }
941 
942 
943 /// Cleans up the scheduler state.
944 ///
945 /// This function should be called explicitly as it provides the means to
946 /// control any exceptions raised during cleanup.  Do not rely on the destructor
947 /// to clean things up.
948 ///
949 /// \throw engine::error If there are problems cleaning up the scheduler.
950 void
cleanup(void)951 scheduler::scheduler_handle::cleanup(void)
952 {
953     _pimpl->generic.cleanup();
954 }
955 
956 
957 /// Checks if the given interface name is valid.
958 ///
959 /// \param interface The name of the interface to validate.
960 ///
961 /// \throw engine::error If the given interface is not supported.
962 void
ensure_valid_interface(const std::string & name)963 scheduler::ensure_valid_interface(const std::string& name)
964 {
965     if (interfaces.find(name) == interfaces.end())
966         throw engine::error(F("Unsupported test interface '%s'") % name);
967 }
968 
969 
970 /// Registers a new interface.
971 ///
972 /// \param name The name of the interface.  Must not have yet been registered.
973 /// \param spec Interface specification.
974 void
register_interface(const std::string & name,const std::shared_ptr<interface> spec)975 scheduler::register_interface(const std::string& name,
976                               const std::shared_ptr< interface > spec)
977 {
978     PRE(interfaces.find(name) == interfaces.end());
979     interfaces.insert(interfaces_map::value_type(name, spec));
980 }
981 
982 
983 /// Returns the names of all registered interfaces.
984 ///
985 /// \return A collection of interface names.
986 std::set< std::string >
registered_interface_names(void)987 scheduler::registered_interface_names(void)
988 {
989     std::set< std::string > names;
990     for (interfaces_map::const_iterator iter = interfaces.begin();
991          iter != interfaces.end(); ++iter) {
992         names.insert((*iter).first);
993     }
994     return names;
995 }
996 
997 
998 /// Initializes the scheduler.
999 ///
1000 /// \pre This function can only be called if there is no other scheduler_handle
1001 /// object alive.
1002 ///
1003 /// \return A handle to the operations of the scheduler.
1004 scheduler::scheduler_handle
setup(void)1005 scheduler::setup(void)
1006 {
1007     return scheduler_handle();
1008 }
1009 
1010 
1011 /// Retrieves the list of test cases from a test program.
1012 ///
1013 /// This operation is currently synchronous.
1014 ///
1015 /// This operation should never throw.  Any errors during the processing of the
1016 /// test case list are subsumed into a single test case in the return value that
1017 /// represents the failed retrieval.
1018 ///
1019 /// \param test_program The test program from which to obtain the list of test
1020 /// cases.
1021 /// \param user_config User-provided configuration variables.
1022 ///
1023 /// \return The list of test cases.
1024 model::test_cases_map
list_tests(const model::test_program * test_program,const config::tree & user_config)1025 scheduler::scheduler_handle::list_tests(
1026     const model::test_program* test_program,
1027     const config::tree& user_config)
1028 {
1029     _pimpl->generic.check_interrupt();
1030 
1031     const std::shared_ptr< scheduler::interface > interface = find_interface(
1032         test_program->interface_name());
1033 
1034     try {
1035         const executor::exec_handle exec_handle = _pimpl->generic.spawn(
1036             list_test_cases(interface, test_program, user_config),
1037             list_timeout, none);
1038         executor::exit_handle exit_handle = _pimpl->generic.wait(exec_handle);
1039 
1040         const model::test_cases_map test_cases = interface->parse_list(
1041             exit_handle.status(),
1042             exit_handle.stdout_file(),
1043             exit_handle.stderr_file());
1044 
1045         exit_handle.cleanup();
1046 
1047         if (test_cases.empty())
1048             throw std::runtime_error("Empty test cases list");
1049 
1050         return test_cases;
1051     } catch (const std::runtime_error& e) {
1052         // TODO(jmmv): This is a very ugly workaround for the fact that we
1053         // cannot report failures at the test-program level.
1054         LW(F("Failed to load test cases list: %s") % e.what());
1055         model::test_cases_map fake_test_cases;
1056         fake_test_cases.insert(model::test_cases_map::value_type(
1057             "__test_cases_list__",
1058             model::test_case(
1059                 "__test_cases_list__",
1060                 "Represents the correct processing of the test cases list",
1061                 model::test_result(model::test_result_broken, e.what()))));
1062         return fake_test_cases;
1063     }
1064 }
1065 
1066 
1067 /// Forks and executes a test case asynchronously.
1068 ///
1069 /// Note that the caller needn't know if the test has a cleanup routine or not.
1070 /// If there indeed is a cleanup routine, we trigger it at wait_any() time.
1071 ///
1072 /// \param test_program The container test program.
1073 /// \param test_case_name The name of the test case to run.
1074 /// \param user_config User-provided configuration variables.
1075 ///
1076 /// \return A handle for the background operation.  Used to match the result of
1077 /// the execution returned by wait_any() with this invocation.
1078 scheduler::exec_handle
spawn_test(const model::test_program_ptr test_program,const std::string & test_case_name,const config::tree & user_config)1079 scheduler::scheduler_handle::spawn_test(
1080     const model::test_program_ptr test_program,
1081     const std::string& test_case_name,
1082     const config::tree& user_config)
1083 {
1084     _pimpl->generic.check_interrupt();
1085 
1086     const std::shared_ptr< scheduler::interface > interface = find_interface(
1087         test_program->interface_name());
1088 
1089     LI(F("Spawning %s:%s") % test_program->absolute_path() % test_case_name);
1090 
1091     const model::test_case& test_case = test_program->find(test_case_name);
1092 
1093     optional< passwd::user > unprivileged_user;
1094     if (user_config.is_set("unprivileged_user") &&
1095         test_case.get_metadata().required_user() == "unprivileged") {
1096         unprivileged_user = user_config.lookup< engine::user_node >(
1097             "unprivileged_user");
1098     }
1099 
1100     const executor::exec_handle handle = _pimpl->generic.spawn(
1101         run_test_program(interface, test_program, test_case_name,
1102                          user_config),
1103         test_case.get_metadata().timeout(),
1104         unprivileged_user);
1105 
1106     const exec_data_ptr data(new test_exec_data(
1107         test_program, test_case_name, interface, user_config));
1108     LD(F("Inserting %s into all_exec_data") % handle.pid());
1109     INV_MSG(
1110         _pimpl->all_exec_data.find(handle.pid()) == _pimpl->all_exec_data.end(),
1111         F("PID %s already in all_exec_data; not cleaned up or reused too fast")
1112         % handle.pid());;
1113     _pimpl->all_exec_data.insert(exec_data_map::value_type(handle.pid(), data));
1114 
1115     return handle.pid();
1116 }
1117 
1118 
1119 /// Waits for completion of any forked test case.
1120 ///
1121 /// Note that if the terminated test case has a cleanup routine, this function
1122 /// is the one in charge of spawning the cleanup routine asynchronously.
1123 ///
1124 /// \return The result of the execution of a subprocess.  This is a dynamically
1125 /// allocated object because the scheduler can spawn subprocesses of various
1126 /// types and, at wait time, we don't know upfront what we are going to get.
1127 scheduler::result_handle_ptr
wait_any(void)1128 scheduler::scheduler_handle::wait_any(void)
1129 {
1130     _pimpl->generic.check_interrupt();
1131 
1132     executor::exit_handle handle = _pimpl->generic.wait_any();
1133 
1134     const exec_data_map::iterator iter = _pimpl->all_exec_data.find(
1135         handle.original_pid());
1136     exec_data_ptr data = (*iter).second;
1137 
1138     utils::dump_stacktrace_if_available(data->test_program->absolute_path(),
1139                                         _pimpl->generic, handle);
1140 
1141     optional< model::test_result > result;
1142     try {
1143         test_exec_data* test_data = &dynamic_cast< test_exec_data& >(
1144             *data.get());
1145         LD(F("Got %s from all_exec_data") % handle.original_pid());
1146 
1147         test_data->exit_handle = handle;
1148 
1149         const model::test_case& test_case = test_data->test_program->find(
1150             test_data->test_case_name);
1151 
1152         result = test_case.fake_result();
1153 
1154         if (!result && handle.status() && handle.status().get().exited() &&
1155             handle.status().get().exitstatus() == exit_skipped) {
1156             // If the test's process terminated with our magic "exit_skipped"
1157             // status, there are two cases to handle.  The first is the case
1158             // where the "skipped cookie" exists, in which case we never got to
1159             // actually invoke the test program; if that's the case, handle it
1160             // here.  The second case is where the test case actually decided to
1161             // exit with the "exit_skipped" status; in that case, just fall back
1162             // to the regular status handling.
1163             const fs::path skipped_cookie_path = handle.control_directory() /
1164                 skipped_cookie;
1165             std::ifstream input(skipped_cookie_path.c_str());
1166             if (input) {
1167                 result = model::test_result(model::test_result_skipped,
1168                                             utils::read_stream(input));
1169                 input.close();
1170 
1171                 // If we determined that the test needs to be skipped, we do not
1172                 // want to run the cleanup routine because doing so could result
1173                 // in errors.  However, we still want to run the cleanup routine
1174                 // if the test's body reports a skip (because actions could have
1175                 // already been taken).
1176                 test_data->needs_cleanup = false;
1177             }
1178         }
1179         if (!result) {
1180             result = test_data->interface->compute_result(
1181                 handle.status(),
1182                 handle.control_directory(),
1183                 handle.stdout_file(),
1184                 handle.stderr_file());
1185         }
1186         INV(result);
1187 
1188         if (!result.get().good()) {
1189             append_files_listing(handle.work_directory(),
1190                                  handle.stderr_file());
1191         }
1192 
1193         if (test_data->needs_cleanup) {
1194             INV(test_case.get_metadata().has_cleanup());
1195             // The test body has completed and we have processed it.  If there
1196             // is a cleanup routine, trigger it now and wait for any other test
1197             // completion.  The caller never knows about cleanup routines.
1198             _pimpl->spawn_cleanup(test_data->test_program,
1199                                   test_data->test_case_name,
1200                                   test_data->user_config, handle, result.get());
1201             test_data->needs_cleanup = false;
1202 
1203             // TODO(jmmv): Chaining this call is ugly.  We'd be better off by
1204             // looping over terminated processes until we got a result suitable
1205             // for user consumption.  For the time being this is good enough and
1206             // not a problem because the call chain won't get big: the majority
1207             // of test cases do not have cleanup routines.
1208             return wait_any();
1209         }
1210     } catch (const std::bad_cast& e) {
1211         const cleanup_exec_data* cleanup_data =
1212             &dynamic_cast< const cleanup_exec_data& >(*data.get());
1213         LD(F("Got %s from all_exec_data (cleanup)") % handle.original_pid());
1214 
1215         // Handle the completion of cleanup subprocesses internally: the caller
1216         // is not aware that these exist so, when we return, we must return the
1217         // data for the original test that triggered this routine.  For example,
1218         // because the caller wants to see the exact same exec_handle that was
1219         // returned by spawn_test.
1220 
1221         const model::test_result& body_result = cleanup_data->body_result;
1222         if (body_result.good()) {
1223             if (!handle.status()) {
1224                 result = model::test_result(model::test_result_broken,
1225                                             "Test case cleanup timed out");
1226             } else {
1227                 if (!handle.status().get().exited() ||
1228                     handle.status().get().exitstatus() != EXIT_SUCCESS) {
1229                     result = model::test_result(
1230                         model::test_result_broken,
1231                         "Test case cleanup did not terminate successfully");
1232                 } else {
1233                     result = body_result;
1234                 }
1235             }
1236         } else {
1237             result = body_result;
1238         }
1239 
1240         // Untrack the cleanup process.  This must be done explicitly because we
1241         // do not create a result_handle object for the cleanup, and that is the
1242         // one in charge of doing so in the regular (non-cleanup) case.
1243         LD(F("Removing %s from all_exec_data (cleanup) in favor of %s")
1244            % handle.original_pid()
1245            % cleanup_data->body_exit_handle.original_pid());
1246         _pimpl->all_exec_data.erase(handle.original_pid());
1247 
1248         handle = cleanup_data->body_exit_handle;
1249     }
1250     INV(result);
1251 
1252     std::shared_ptr< result_handle::bimpl > result_handle_bimpl(
1253         new result_handle::bimpl(handle, _pimpl->all_exec_data));
1254     std::shared_ptr< test_result_handle::impl > test_result_handle_impl(
1255         new test_result_handle::impl(
1256             data->test_program, data->test_case_name, result.get()));
1257     return result_handle_ptr(new test_result_handle(result_handle_bimpl,
1258                                                     test_result_handle_impl));
1259 }
1260 
1261 
1262 /// Forks and executes a test case synchronously for debugging.
1263 ///
1264 /// \pre No other processes should be in execution by the scheduler.
1265 ///
1266 /// \param test_program The container test program.
1267 /// \param test_case_name The name of the test case to run.
1268 /// \param user_config User-provided configuration variables.
1269 /// \param stdout_target File to which to write the stdout of the test case.
1270 /// \param stderr_target File to which to write the stderr of the test case.
1271 ///
1272 /// \return The result of the execution of the test.
1273 scheduler::result_handle_ptr
debug_test(const model::test_program_ptr test_program,const std::string & test_case_name,const config::tree & user_config,const fs::path & stdout_target,const fs::path & stderr_target)1274 scheduler::scheduler_handle::debug_test(
1275     const model::test_program_ptr test_program,
1276     const std::string& test_case_name,
1277     const config::tree& user_config,
1278     const fs::path& stdout_target,
1279     const fs::path& stderr_target)
1280 {
1281     const exec_handle exec_handle = spawn_test(
1282         test_program, test_case_name, user_config);
1283     result_handle_ptr result_handle = wait_any();
1284 
1285     // TODO(jmmv): We need to do this while the subprocess is alive.  This is
1286     // important for debugging purposes, as we should see the contents of stdout
1287     // or stderr as they come in.
1288     //
1289     // Unfortunately, we cannot do so.  We cannot just read and block from a
1290     // file, waiting for further output to appear... as this only works on pipes
1291     // or sockets.  We need a better interface for this whole thing.
1292     {
1293         std::auto_ptr< std::ostream > output = utils::open_ostream(
1294             stdout_target);
1295         *output << utils::read_file(result_handle->stdout_file());
1296     }
1297     {
1298         std::auto_ptr< std::ostream > output = utils::open_ostream(
1299             stderr_target);
1300         *output << utils::read_file(result_handle->stderr_file());
1301     }
1302 
1303     INV(result_handle->original_pid() == exec_handle);
1304     return result_handle;
1305 }
1306 
1307 
1308 /// Checks if an interrupt has fired.
1309 ///
1310 /// Calls to this function should be sprinkled in strategic places through the
1311 /// code protected by an interrupts_handler object.
1312 ///
1313 /// This is just a wrapper over signals::check_interrupt() to avoid leaking this
1314 /// dependency to the caller.
1315 ///
1316 /// \throw signals::interrupted_error If there has been an interrupt.
1317 void
check_interrupt(void) const1318 scheduler::scheduler_handle::check_interrupt(void) const
1319 {
1320     _pimpl->generic.check_interrupt();
1321 }
1322 
1323 
1324 /// Queries the current execution context.
1325 ///
1326 /// \return The queried context.
1327 model::context
current_context(void)1328 scheduler::current_context(void)
1329 {
1330     return model::context(fs::current_path(), utils::getallenv());
1331 }
1332 
1333 
1334 /// Generates the set of configuration variables for a test program.
1335 ///
1336 /// \param user_config The configuration variables provided by the user.
1337 /// \param test_suite The name of the test suite.
1338 ///
1339 /// \return The mapping of configuration variables for the test program.
1340 config::properties_map
generate_config(const config::tree & user_config,const std::string & test_suite)1341 scheduler::generate_config(const config::tree& user_config,
1342                            const std::string& test_suite)
1343 {
1344     config::properties_map props;
1345 
1346     try {
1347         props = user_config.all_properties(F("test_suites.%s") % test_suite,
1348                                            true);
1349     } catch (const config::unknown_key_error& unused_error) {
1350         // Ignore: not all test suites have entries in the configuration.
1351     }
1352 
1353     // TODO(jmmv): This is a hack that exists for the ATF interface only, so it
1354     // should be moved there.
1355     if (user_config.is_set("unprivileged_user")) {
1356         const passwd::user& user =
1357             user_config.lookup< engine::user_node >("unprivileged_user");
1358         props["unprivileged-user"] = user.name;
1359     }
1360 
1361     return props;
1362 }
1363