1"""
2Base class for DarwinLog tests.
3"""
4
5# System imports
6from __future__ import print_function
7
8import json
9import platform
10import re
11import sys
12import threading
13
14
15# lldb imports
16import lldb
17from lldb import SBProcess, SBTarget
18
19from lldbsuite.test import decorators
20from lldbsuite.test import lldbtest
21from lldbsuite.test import lldbtest_config
22from lldbsuite.test import lldbutil
23
24
25def expand_darwinlog_command(command):
26    return "plugin structured-data darwin-log " + command
27
28
29def expand_darwinlog_settings_set_command(command):
30    return "settings set plugin.structured-data.darwin-log." + command
31
32
33class DarwinLogTestBase(lldbtest.TestBase):
34    """Base class for DarwinLog test cases that are pexpect-based."""
35    NO_DEBUG_INFO_TESTCASE = True
36
37    CONTINUE_REGEX = re.compile(r"Process \d+ resuming")
38
39    def setUp(self):
40        # Call super's setUp().
41        super(DarwinLogTestBase, self).setUp()
42
43        # Until other systems support this, exit
44        # early if we're not macOS version 10.12
45        # or greater.
46        version = platform.mac_ver()[0].split('.')
47        if ((int(version[0]) == 10) and (int(version[1]) < 12) or
48                (int(version[0]) < 10)):
49            self.skipTest("DarwinLog tests currently require macOS 10.12+")
50            return
51
52        self.child = None
53        self.child_prompt = '(lldb) '
54        self.strict_sources = False
55        self.enable_process_monitor_logging = False
56
57    def run_lldb_to_breakpoint(self, exe, source_file, line,
58                               enable_command=None, settings_commands=None):
59
60        import pexpect
61        # Set self.child_prompt, which is "(lldb) ".
62        prompt = self.child_prompt
63
64        # So that the child gets torn down after the test.
65        self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec,
66                                                 self.lldbOption, exe))
67        child = self.child
68
69        # Turn on logging for what the child sends back.
70        if self.TraceOn():
71            child.logfile_read = sys.stdout
72
73        if self.enable_process_monitor_logging:
74            if platform.system() == 'Darwin':
75                self.runCmd(
76                    "settings set target.process.extra-startup-command "
77                    "QSetLogging:bitmask=LOG_DARWIN_LOG;")
78                self.expect_prompt()
79
80        # Run the enable command if we have one.
81        if enable_command is not None:
82            self.runCmd(enable_command)
83            self.expect_prompt()
84
85        # Disable showing of source lines at our breakpoint.
86        # This is necessary for the logging tests, because the very
87        # text we want to match for output from the running inferior
88        # will show up in the source as well.  We don't want the source
89        # output to erroneously make a match with our expected output.
90        self.runCmd("settings set stop-line-count-before 0")
91        self.expect_prompt()
92        self.runCmd("settings set stop-line-count-after 0")
93        self.expect_prompt()
94
95        # While we're debugging, turn on packet logging.
96        self.runCmd("log enable -f /tmp/packets.log gdb-remote packets")
97        self.expect_prompt()
98
99        # Prevent mirroring of NSLog/os_log content to stderr.  We want log
100        # messages to come exclusively through our log channel.
101        self.runCmd(
102            "settings set target.env-vars IDE_DISABLED_OS_ACTIVITY_DT_MODE=1")
103        self.expect_prompt()
104
105        # Run any darwin-log settings commands now, before we enable logging.
106        if settings_commands is not None:
107            for setting_command in settings_commands:
108                self.runCmd(
109                    expand_darwinlog_settings_set_command(setting_command))
110                self.expect_prompt()
111
112        # Set breakpoint right before the os_log() macros.  We don't
113        # set it on the os_log*() calls because these are a number of
114        # nested-scoped calls that will cause the debugger to stop
115        # multiple times on the same line.  That is difficult to match
116        # os_log() content by since it is non-deterministic what the
117        # ordering between stops and log lines will be.  This is why
118        # we stop before, and then have the process run in a sleep
119        # afterwards, so we get the log messages while the target
120        # process is "running" (sleeping).
121        child.sendline('breakpoint set -f %s -l %d' % (source_file, line))
122        child.expect_exact(prompt)
123
124        # Now run to the breakpoint that we just set.
125        child.sendline('run')
126        child.expect_exact(prompt)
127
128        # Ensure we stopped at a breakpoint.
129        self.runCmd("thread list")
130        self.expect(re.compile(r"stop reason = breakpoint"))
131
132        # Now we're ready to check if DarwinLog is available.
133        if not self.darwin_log_available():
134            self.skipTest("DarwinLog not available")
135
136    def runCmd(self, cmd):
137        self.child.sendline(cmd)
138
139    def expect_prompt(self, exactly=True):
140        self.expect(self.child_prompt, exactly=exactly)
141
142    def expect(self, pattern, exactly=False, *args, **kwargs):
143        if exactly:
144            return self.child.expect_exact(pattern, *args, **kwargs)
145        return self.child.expect(pattern, *args, **kwargs)
146
147    def darwin_log_available(self):
148        self.runCmd("plugin structured-data darwin-log status")
149        self.expect(re.compile(r"Availability: ([\S]+)"))
150        return self.child.match is not None and (
151            self.child.match.group(1) == "available")
152
153    def do_test(self, enable_options, expect_regexes=None,
154                settings_commands=None):
155        """Test that a single fall-through reject rule rejects all logging."""
156        self.build(dictionary=self.d)
157        self.setTearDownCleanup(dictionary=self.d)
158
159        # Build the darwin-log enable command.
160        enable_cmd = expand_darwinlog_command('enable')
161        if enable_options is not None and len(enable_options) > 0:
162            enable_cmd += ' ' + ' '.join(enable_options)
163
164        exe = self.getBuildArtifact(self.exe_name)
165        self.run_lldb_to_breakpoint(exe, self.source, self.line,
166                                    enable_command=enable_cmd,
167                                    settings_commands=settings_commands)
168        self.expect_prompt()
169
170        # Now go.
171        self.runCmd("process continue")
172        self.expect(self.CONTINUE_REGEX)
173
174        if expect_regexes is None:
175            # Expect matching a log line or program exit.
176            # Test methods determine which ones are valid.
177            expect_regexes = (
178                [re.compile(r"source-log-([^-]+)-(\S+)"),
179                 re.compile(r"exited with status")
180                 ])
181        self.expect(expect_regexes)
182
183
184def remove_add_mode_entry(log_entries):
185    """libtrace creates an "Add Mode:..." message when logging is enabled.
186    Strip this out of results since our test subjects don't create it."""
187    return [entry for entry in log_entries
188            if "message" in entry and
189            not entry["message"].startswith("Add Mode:")]
190
191
192class DarwinLogEventBasedTestBase(lldbtest.TestBase):
193    """Base class for event-based DarwinLog tests."""
194    NO_DEBUG_INFO_TESTCASE = True
195
196    class EventListenerThread(threading.Thread):
197
198        def __init__(self, listener, process, trace_on, max_entry_count):
199            super(
200                DarwinLogEventBasedTestBase.EventListenerThread,
201                self).__init__()
202            self.process = process
203            self.listener = listener
204            self.trace_on = trace_on
205            self.max_entry_count = max_entry_count
206            self.exception = None
207            self.structured_data_event_count = 0
208            self.wait_seconds = 2
209            self.max_timeout_count = 4
210            self.log_entries = []
211
212        def handle_structured_data_event(self, event):
213            structured_data = SBProcess.GetStructuredDataFromEvent(event)
214            if not structured_data.IsValid():
215                if self.trace_on:
216                    print("invalid structured data")
217                return
218
219            # Track that we received a valid structured data event.
220            self.structured_data_event_count += 1
221
222            # Grab the individual log entries from the JSON.
223            stream = lldb.SBStream()
224            structured_data.GetAsJSON(stream)
225            dict = json.loads(stream.GetData())
226            self.log_entries.extend(dict["events"])
227            if self.trace_on:
228                print("Structured data (raw):", stream.GetData())
229
230            # Print the pretty-printed version.
231            if self.trace_on:
232                stream.Clear()
233                structured_data.PrettyPrint(stream)
234                print("Structured data (pretty print):",
235                      stream.GetData())
236
237        def done(self, timeout_count):
238            """Returns True when we're done listening for events."""
239            # See if we should consider the number of events retrieved.
240            if self.max_entry_count is not None:
241                if len(self.log_entries) >= self.max_entry_count:
242                    # We've received the max threshold of events expected,
243                    # we can exit here.
244                    if self.trace_on:
245                        print("Event listener thread exiting due to max "
246                              "expected log entry count being reached.")
247                    return True
248
249            # If our event timeout count has exceeded our maximum timeout count,
250            # we're done.
251            if timeout_count >= self.max_timeout_count:
252                if self.trace_on:
253                    print("Event listener thread exiting due to max number of "
254                          "WaitForEvent() timeouts being reached.")
255                return True
256
257            # If our process is dead, we're done.
258            if not self.process.is_alive:
259                if self.trace_on:
260                    print("Event listener thread exiting due to test inferior "
261                          "exiting.")
262                return True
263
264            # We're not done.
265            return False
266
267        def run(self):
268            event = lldb.SBEvent()
269            try:
270                timeout_count = 0
271
272                # Wait up to 4 times for the event to arrive.
273                while not self.done(timeout_count):
274                    if self.trace_on:
275                        print("Calling wait for event...")
276                    if self.listener.WaitForEvent(self.wait_seconds, event):
277                        while event.IsValid():
278                            # Check if it's a process event.
279                            if SBProcess.EventIsStructuredDataEvent(event):
280                                self.handle_structured_data_event(event)
281                            else:
282                                if self.trace_on:
283                                    print("ignoring unexpected event:",
284                                          lldbutil.get_description(event))
285                            # Grab the next event, if there is one.
286                            event.Clear()
287                            if not self.listener.GetNextEvent(event):
288                                if self.trace_on:
289                                    print("listener has no more events "
290                                          "available at this time")
291                    else:
292                        if self.trace_on:
293                            print("timeout occurred waiting for event...")
294                        timeout_count += 1
295                self.listener.Clear()
296            except Exception as e:
297                self.exception = e
298
299    def setUp(self):
300        # Call super's setUp().
301        super(DarwinLogEventBasedTestBase, self).setUp()
302
303        # Until other systems support this, exit
304        # early if we're not macOS version 10.12
305        # or greater.
306        version = platform.mac_ver()[0].split('.')
307        if ((int(version[0]) == 10) and (int(version[1]) < 12) or
308                (int(version[0]) < 10)):
309            self.skipTest("DarwinLog tests currently require macOS 10.12+")
310            return
311
312        # Source filename.
313        self.source = 'main.c'
314
315        # Output filename.
316        self.exe_name = 'a.out'
317        self.d = {'C_SOURCES': self.source, 'EXE': self.exe_name}
318
319        # Locate breakpoint.
320        self.line = lldbtest.line_number(self.source, '// break here')
321
322        # Enable debugserver logging of the darwin log collection
323        # mechanism.
324        self.runCmd("settings set target.process.extra-startup-command "
325                    "QSetLogging:bitmask=LOG_DARWIN_LOG;")
326
327    def darwin_log_available(self):
328        match = self.match("plugin structured-data darwin-log status",
329                           patterns=[r"Availability: ([\S]+)"])
330        return match is not None and (match.group(1) == "available")
331
332    def do_test(self, enable_options, settings_commands=None,
333                run_enable_after_breakpoint=False, max_entry_count=None):
334        """Runs the test inferior, returning collected events.
335
336        This method runs the test inferior to the first breakpoint hit.
337        It then adds a listener for structured data events, and collects
338        all events from that point forward until end of execution of the
339        test inferior.  It then returns those events.
340
341        @return
342            A list of structured data events received, in the order they
343            were received.
344        """
345        self.build(dictionary=self.d)
346        self.setTearDownCleanup(dictionary=self.d)
347
348        exe = self.getBuildArtifact(self.exe_name)
349
350        # Create a target by the debugger.
351        target = self.dbg.CreateTarget(exe)
352        self.assertTrue(target, lldbtest.VALID_TARGET)
353
354        # Run the darwin-log settings commands.
355        if settings_commands is not None:
356            for setting_command in settings_commands:
357                self.runCmd(
358                    expand_darwinlog_settings_set_command(setting_command))
359
360        # Build the darwin-log enable command.
361        enable_cmd = expand_darwinlog_command("enable")
362        if enable_options is not None and len(enable_options) > 0:
363            enable_cmd += ' ' + ' '.join(enable_options)
364
365        # Run the darwin-log enable command now if we are not supposed
366        # to do it at the first breakpoint.  This tests the start-up
367        # code, which has the benefit of being able to set os_log-related
368        # environment variables.
369        if not run_enable_after_breakpoint:
370            self.runCmd(enable_cmd)
371
372        # Create the breakpoint.
373        breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
374        self.assertIsNotNone(breakpoint)
375        self.assertTrue(breakpoint.IsValid())
376        self.assertEqual(1, breakpoint.GetNumLocations(),
377                         "Should have found one breakpoint")
378
379        # Enable packet logging.
380        # self.runCmd("log enable -f /tmp/packets.log gdb-remote packets")
381        # self.runCmd("log enable lldb process")
382
383        # Launch the process - doesn't stop at entry.
384        process = target.LaunchSimple(None, None, self.getBuildDir())
385        self.assertIsNotNone(process, lldbtest.PROCESS_IS_VALID)
386
387        # Keep track of whether we're tracing output.
388        trace_on = self.TraceOn()
389
390        # Get the next thread that stops.
391        from lldbsuite.test.lldbutil import get_stopped_thread
392        thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
393
394        self.assertIsNotNone(thread, "There should be a thread stopped "
395                             "due to breakpoint")
396
397        # The process should be stopped at this point.
398        self.expect("process status", lldbtest.PROCESS_STOPPED,
399                    patterns=['Process .* stopped'])
400
401        # The stop reason of the thread should be breakpoint.
402        self.expect("thread list", lldbtest.STOPPED_DUE_TO_BREAKPOINT,
403                    substrs=['stopped', 'stop reason = breakpoint'])
404
405        # And our one and only breakpoint should have been hit.
406        self.assertEquals(breakpoint.GetHitCount(), 1)
407
408        # Check if DarwinLog is available.  This check cannot be done
409        # until after the process has started, as the feature availability
410        # comes through the stub.  The stub isn't running until
411        # the target process is running.  So this is really the earliest
412        # we can check.
413        if not self.darwin_log_available():
414            self.skipTest("DarwinLog not available")
415
416        # Now setup the structured data listener.
417        #
418        # Grab the broadcaster for the process.  We'll be attaching our
419        # listener to it.
420        broadcaster = process.GetBroadcaster()
421        self.assertIsNotNone(broadcaster)
422
423        listener = lldb.SBListener("SBStructuredData listener")
424        self.assertIsNotNone(listener)
425
426        rc = broadcaster.AddListener(
427            listener, lldb.SBProcess.eBroadcastBitStructuredData)
428        self.assertTrue(rc, "Successfully add listener to process broadcaster")
429
430        # Start the listening thread to retrieve the events.
431        # Bump up max entry count for the potentially included Add Mode:
432        # entry.
433        if max_entry_count is not None:
434            max_entry_count += 1
435        event_thread = self.EventListenerThread(listener, process, trace_on,
436                                                max_entry_count)
437        event_thread.start()
438
439        # Continue the test inferior.  We should get any events after this.
440        process.Continue()
441
442        # Wait until the event thread terminates.
443        # print("main thread now waiting for event thread to receive events.")
444        event_thread.join()
445
446        # If the process is still alive, we kill it here.
447        if process.is_alive:
448            process.Kill()
449
450        # Fail on any exceptions that occurred during event execution.
451        if event_thread.exception is not None:
452            # Re-raise it here so it shows up as a test error.
453            raise event_thread
454
455        # Return the collected logging events.
456        return remove_add_mode_entry(event_thread.log_entries)
457