1 /* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
2 
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License, version 2.0,
5    as published by the Free Software Foundation.
6 
7    This program is also distributed with certain software (including
8    but not limited to OpenSSL) that is licensed under separate terms,
9    as designated in a particular file or component or in included license
10    documentation.  The authors of MySQL hereby grant you an additional
11    permission to link the program and your derivative works with the
12    separately licensed software that they have included with MySQL.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License, version 2.0, for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA  */
22 
23 /**
24    @file
25    Implementation of the Optimizer trace API (WL#5257)
26 */
27 
28 #include "sql/opt_trace.h"
29 
30 #include <float.h>
31 #include <stdio.h>
32 #include <algorithm>  // std::min
33 #include <new>
34 
35 #include "lex_string.h"
36 #include "m_string.h"  // _dig_vec_lower
37 #include "my_dbug.h"
38 #include "my_pointer_arithmetic.h"
39 #include "my_sys.h"
40 #include "mysql/psi/psi_base.h"
41 #include "mysqld_error.h"
42 #include "prealloced_array.h"
43 #include "sql/current_thd.h"
44 #include "sql/enum_query_type.h"
45 #include "sql/handler.h"
46 #include "sql/item.h"  // Item
47 #include "sql/table.h"
48 #include "sql_string.h"  // String
49 
50 namespace {
51 /**
52   A wrapper of class String, for storing query or trace.
53   Any memory allocation error in this class is reported by my_error(), see
54   OOM_HANDLING in opt_trace.h.
55 */
56 class Buffer {
57  private:
58   size_t allowed_mem_size;  ///< allowed memory size for this String
59   size_t missing_bytes;     ///< how many bytes could not be added
60   String string_buf;
61 
62  public:
Buffer()63   Buffer() : allowed_mem_size(0), missing_bytes(0) {}
64 
alloced_length() const65   size_t alloced_length() const { return string_buf.alloced_length(); }
length() const66   size_t length() const { return string_buf.length(); }
67   void prealloc();  ///< pro-actively extend buffer if soon short of space
c_ptr_safe()68   char *c_ptr_safe() { return string_buf.c_ptr_safe(); }
ptr() const69   const char *ptr() const { return string_buf.ptr(); }
70 
charset() const71   const CHARSET_INFO *charset() const { return string_buf.charset(); }
set_charset(const CHARSET_INFO * charset)72   void set_charset(const CHARSET_INFO *charset) {
73     string_buf.set_charset(charset);
74   }
75 
76   /**
77     Like @c String::append()
78     @param  str     String, in this instance's charset
79     @param  length  length of string
80   */
81   void append(const char *str, size_t length);
append(const char * str)82   void append(const char *str) { return append(str, strlen(str)); }
83   /**
84     Like @c append() but escapes certain characters for string values to
85     be JSON-compliant.
86     @param  str     String in UTF8
87     @param  length  length of string
88   */
89   void append_escaped(const char *str, size_t length);
90   void append(char chr);
91 
get_allowed_mem_size() const92   size_t get_allowed_mem_size() const { return allowed_mem_size; }
get_missing_bytes() const93   size_t get_missing_bytes() const { return missing_bytes; }
94 
set_allowed_mem_size(size_t a)95   void set_allowed_mem_size(size_t a) { allowed_mem_size = a; }
96 };
97 
98 }  // namespace
99 
100 /**
101   @class Opt_trace_stmt
102 
103   The trace of one statement. For example, executing a stored procedure
104   containing 3 sub-statements will produce 4 traces (one for the CALL
105   statement, one for each sub-statement), so 4 Opt_trace_stmt linked together
106   into Opt_trace_context's lists.
107 */
108 class Opt_trace_stmt {
109  public:
110   /**
111      Constructor, starts a trace for information_schema and dbug.
112      @param  ctx_arg          context
113   */
114   Opt_trace_stmt(Opt_trace_context *ctx_arg);
115 
116   /**
117      Ends a trace; destruction may not be possible immediately as we may have
118      to keep the trace in case the user later reads it from I_S.
119   */
120   void end();
121 
122   /// @returns whether @c end() has been called on this instance.
has_ended() const123   bool has_ended() const { return ended; }
124 
125   /// Sets the quantity of allowed memory for this trace.
126   void set_allowed_mem_size(size_t size);
127 
128   /// @sa Opt_trace_context::set_query()
129   void set_query(const char *query, size_t length, const CHARSET_INFO *charset);
130 
131   /* Below, functions for filling the statement's trace */
132 
133   /**
134      When creating an Opt_trace_struct: adds a key and the opening bracket to
135      the trace buffer, updates current_struct.
136      @param  key              key or NULL
137      @param  ots              structure being created
138      @param  wants_disable_I_S whether structure wants to disable I_S output
139      @param  opening_bracket  opening bracket to use
140      @retval false ok
141      @retval true  error, Opt_trace_struct must set itself to dummy; trace
142      may have been written to, will likely be invalid JSON.
143   */
144   bool open_struct(const char *key, Opt_trace_struct *ots,
145                    bool wants_disable_I_S, char opening_bracket);
146   /**
147      When closing an Opt_trace_struct:
148      - adds the closing bracket and optionally the key to the trace buffer
149      - re-enables I_S output if the dying structure had disabled it
150      - updates current_struct.
151      @param  saved_key        key or NULL
152      @param  has_disabled_I_S whether structure had disabled I_S output
153      @param  closing_bracket  closing bracket to use
154   */
155   void close_struct(const char *saved_key, bool has_disabled_I_S,
156                     char closing_bracket);
157 
158   /// Put optional comma, newline and indentation
159   void separator();
160   /// Put newline and indentation
161   void next_line();
162 
163   /**
164      Adds a key/value pair to the trace buffer.
165      @param  key  key or NULL
166      @param  val  representation of value as string
167      @param  val_length  length of value
168      @param  quotes  should value be delimited with '"' (false when the value
169      is the representation of a number, boolean or null)
170      @param  escape  does value need escaping (has special characters)
171 
172      @note Structures prepare a string representation of their value-to-add
173      and call this function.
174   */
175   void add(const char *key, const char *val, size_t val_length, bool quotes,
176            bool escape);
177 
178   /* Below, functions to request information from this instance */
179 
180   /// Fills user-level information @sa Opt_trace_iterator
181   void fill_info(Opt_trace_info *info) const;
182 
183   /// @returns 'size' last bytes of the trace buffer
184   const char *trace_buffer_tail(size_t size);
185 
186   /// @returns total memory used by this trace
alloced_length() const187   size_t alloced_length() const {
188     return trace_buffer.alloced_length() + query_buffer.alloced_length();
189   }
190 
assert_current_struct(const Opt_trace_struct * s MY_ATTRIBUTE ((unused))) const191   void assert_current_struct(
192       const Opt_trace_struct *s MY_ATTRIBUTE((unused))) const {
193     DBUG_ASSERT(current_struct == s);
194   }
195 
196   /// @see Opt_trace_context::missing_privilege()
197   void missing_privilege();
198 
support_I_S() const199   bool support_I_S() const { return I_S_disabled == 0; }
200 
201   /// Temporarily disables I_S output for this statement.
disable_I_S()202   void disable_I_S() { ++I_S_disabled; }
203 
204   /**
205      Restores I_S support to what it was before the previous call
206      to disable_I_S().
207   */
restore_I_S()208   void restore_I_S() { --I_S_disabled; }
209 
210   /**
211      Generate a dummy unique key, and return pointer to it. The pointed data
212      has the lifetime of Opt_trace_stmt, and is overwritten by the next call
213      to this function.
214   */
215   const char *make_unknown_key();
216 
217  private:
218   bool ended;  ///< Whether @c end() has been called on this instance
219 
220   /**
221     0 <=> this trace should be in information_schema.
222     In the life of an Opt_trace_stmt, support for I_S may be temporarily
223     disabled.
224     Once disabled, it must stay disabled until re-enabled at the same stack
225     frame. This:
226     Opt_trace_object1 // disables I_S
227        Opt_trace_object2 // re-enables I_S
228     is impossible (the top object wins).
229     So it is sufficient, to keep track of the current state, to have a counter
230     incremented each time we get a request to disable I_S.
231   */
232   int I_S_disabled;
233 
234   bool missing_priv;  ///< whether user lacks privilege to see this trace
235 
236   Opt_trace_context *ctx;            ///< context
237   Opt_trace_struct *current_struct;  ///< current open structure
238 
239   /// Same logic as Opt_trace_context::stack_of_current_stmts.
240   Prealloced_array<Opt_trace_struct *, 16> stack_of_current_structs;
241 
242   Buffer trace_buffer;  ///< Where the trace is accumulated
243   Buffer query_buffer;  ///< Where the original query is put
244 
245   /**
246     Counter which serves to have unique autogenerated keys, needed if we
247     autogenerate more than one key in a single object.
248     @see Opt_trace_struct::check_key() and @see Opt_trace_stmt::add() .
249   */
250   uint unknown_key_count;
251   /// Space for last autogenerated key
252   char unknown_key[24];
253 };
254 
255 // implementation of class Opt_trace_struct
256 
257 namespace {
258 /// opening and closing symbols for arrays ([])and objects ({})
259 const char brackets[] = {'[', '{', ']', '}'};
opening_bracket(bool requires_key)260 inline char opening_bracket(bool requires_key) {
261   return brackets[requires_key];
262 }
closing_bracket(bool requires_key)263 inline char closing_bracket(bool requires_key) {
264   return brackets[requires_key + 2];
265 }
266 }  // namespace
267 
do_construct(Opt_trace_context * ctx,bool requires_key_arg,const char * key,Opt_trace_context::feature_value feature)268 void Opt_trace_struct::do_construct(Opt_trace_context *ctx,
269                                     bool requires_key_arg, const char *key,
270                                     Opt_trace_context::feature_value feature) {
271   saved_key = key;
272   requires_key = requires_key_arg;
273 
274   DBUG_PRINT("opt", ("%s: starting struct", key));
275   stmt = ctx->get_current_stmt_in_gen();
276 #ifndef DBUG_OFF
277   previous_key[0] = 0;
278 #endif
279   has_disabled_I_S = !ctx->feature_enabled(feature);
280   empty = true;
281   if (likely(!stmt->open_struct(key, this, has_disabled_I_S,
282                                 opening_bracket(requires_key))))
283     started = true;
284 }
285 
do_destruct()286 void Opt_trace_struct::do_destruct() {
287   DBUG_PRINT("opt", ("%s: ending struct", saved_key));
288   DBUG_ASSERT(started);
289   stmt->close_struct(saved_key, has_disabled_I_S,
290                      closing_bracket(requires_key));
291   started = false;
292 }
293 
do_add(const char * key,const char * val,size_t val_length,bool escape)294 Opt_trace_struct &Opt_trace_struct::do_add(const char *key, const char *val,
295                                            size_t val_length, bool escape) {
296   DBUG_ASSERT(started);
297   DBUG_PRINT("opt", ("%s: \"%.*s\"", key, (int)val_length, val));
298   stmt->add(key, val, val_length, true, escape);
299   return *this;
300 }
301 
302 namespace {
303 /// human-readable names for boolean values
304 LEX_CSTRING bool_as_text[] = {{STRING_WITH_LEN("false")},
305                               {STRING_WITH_LEN("true")}};
306 }  // namespace
307 
do_add(const char * key,bool val)308 Opt_trace_struct &Opt_trace_struct::do_add(const char *key, bool val) {
309   DBUG_ASSERT(started);
310   DBUG_PRINT("opt", ("%s: %d", key, (int)val));
311   const LEX_CSTRING *text = &bool_as_text[val];
312   stmt->add(key, text->str, text->length, false, false);
313   return *this;
314 }
315 
do_add(const char * key,longlong val)316 Opt_trace_struct &Opt_trace_struct::do_add(const char *key, longlong val) {
317   DBUG_ASSERT(started);
318   char buf[22];  // 22 is enough for digits of a 64-bit int
319   llstr(val, buf);
320   DBUG_PRINT("opt", ("%s: %s", key, buf));
321   stmt->add(key, buf, strlen(buf), false, false);
322   return *this;
323 }
324 
do_add(const char * key,ulonglong val)325 Opt_trace_struct &Opt_trace_struct::do_add(const char *key, ulonglong val) {
326   DBUG_ASSERT(started);
327   char buf[22];
328   ullstr(val, buf);
329   DBUG_PRINT("opt", ("%s: %s", key, buf));
330   stmt->add(key, buf, strlen(buf), false, false);
331   return *this;
332 }
333 
do_add(const char * key,double val)334 Opt_trace_struct &Opt_trace_struct::do_add(const char *key, double val) {
335   DBUG_ASSERT(started);
336   char buf[32];  // 32 is enough for digits of a double
337   /*
338     To fit in FLT_DIG digits, my_gcvt rounds DBL_MAX (1.7976931...e308), or
339     anything >=1.5e308, to 2e308. But JSON parsers refuse to read 2e308. So,
340     lower the number.
341   */
342   my_gcvt(std::min(1e308, val), MY_GCVT_ARG_DOUBLE, FLT_DIG, buf, nullptr);
343   DBUG_PRINT("opt", ("%s: %s", key, buf));
344   stmt->add(key, buf, strlen(buf), false, false);
345   return *this;
346 }
347 
do_add_null(const char * key)348 Opt_trace_struct &Opt_trace_struct::do_add_null(const char *key) {
349   DBUG_ASSERT(started);
350   DBUG_PRINT("opt", ("%s: null", key));
351   stmt->add(key, STRING_WITH_LEN("null"), false, false);
352   return *this;
353 }
354 
do_add(const char * key,Item * item)355 Opt_trace_struct &Opt_trace_struct::do_add(const char *key, Item *item) {
356   char buff[256];
357   String str(buff, sizeof(buff), system_charset_info);
358   str.length(0);
359   if (item != nullptr) {
360     // QT_TO_SYSTEM_CHARSET because trace must be in UTF8
361     item->print(current_thd, &str,
362                 enum_query_type(QT_TO_SYSTEM_CHARSET | QT_SHOW_SELECT_NUMBER |
363                                 QT_NO_DEFAULT_DB));
364     /* needs escaping */
365     return do_add(key, str.ptr(), str.length(), true);
366   } else
367     return do_add_null(key);
368 }
369 
do_add(const char * key,const Cost_estimate & value)370 Opt_trace_struct &Opt_trace_struct::do_add(const char *key,
371                                            const Cost_estimate &value) {
372   return do_add(key, value.total_cost());
373 }
374 
do_add_hex(const char * key,uint64 val)375 Opt_trace_struct &Opt_trace_struct::do_add_hex(const char *key, uint64 val) {
376   DBUG_ASSERT(started);
377   char buf[2 + 16], *p_end = buf + sizeof(buf) - 1, *p = p_end;
378   for (;;) {
379     *p-- = _dig_vec_lower[val & 15];
380     *p-- = _dig_vec_lower[(val & 240) >> 4];
381     val >>= 8;
382     if (val == 0) break;
383   }
384   *p-- = 'x';
385   *p = '0';
386   const size_t len = p_end + 1 - p;
387   DBUG_PRINT("opt", ("%s: %.*s", key, static_cast<int>(len), p));
388   stmt->add(check_key(key), p, len, false, false);
389   return *this;
390 }
391 
do_add_utf8_table(const TABLE_LIST * tl)392 Opt_trace_struct &Opt_trace_struct::do_add_utf8_table(const TABLE_LIST *tl) {
393   if (tl != nullptr) {
394     StringBuffer<32> str;
395     tl->print(current_thd, &str,
396               enum_query_type(QT_TO_SYSTEM_CHARSET | QT_SHOW_SELECT_NUMBER |
397                               QT_NO_DEFAULT_DB | QT_DERIVED_TABLE_ONLY_ALIAS));
398     return do_add("table", str.ptr(), str.length(), true);
399   }
400   return *this;
401 }
402 
check_key(const char * key)403 const char *Opt_trace_struct::check_key(const char *key) {
404   DBUG_ASSERT(started);
405   //  User should always add to the innermost open object, not outside.
406   stmt->assert_current_struct(this);
407   bool has_key = key != nullptr;
408   if (unlikely(has_key != requires_key)) {
409     // fix the key to produce correct JSON syntax:
410     key = has_key ? nullptr : stmt->make_unknown_key();
411     has_key = !has_key;
412   }
413   if (has_key) {
414 #ifndef DBUG_OFF
415     /*
416       Check that we're not having two identical consecutive keys in one
417       object; though the real restriction should not have 'consecutive'.
418     */
419     DBUG_ASSERT(strncmp(previous_key, key, sizeof(previous_key) - 1) != 0);
420     strncpy(previous_key, key, sizeof(previous_key) - 1);
421     previous_key[sizeof(previous_key) - 1] = 0;
422 #endif
423   }
424   return key;
425 }
426 
427 // Implementation of Opt_trace_stmt class
428 
Opt_trace_stmt(Opt_trace_context * ctx_arg)429 Opt_trace_stmt::Opt_trace_stmt(Opt_trace_context *ctx_arg)
430     : ended(false),
431       I_S_disabled(0),
432       missing_priv(false),
433       ctx(ctx_arg),
434       current_struct(nullptr),
435       stack_of_current_structs(PSI_INSTRUMENT_ME),
436       unknown_key_count(0) {
437   // Trace is always in UTF8. This is the only charset which JSON accepts.
438   trace_buffer.set_charset(system_charset_info);
439   DBUG_ASSERT(system_charset_info == &my_charset_utf8_general_ci);
440 }
441 
end()442 void Opt_trace_stmt::end() {
443   DBUG_ASSERT(stack_of_current_structs.size() == 0);
444   DBUG_ASSERT(I_S_disabled >= 0);
445   ended = true;
446   /*
447     Because allocation is done in big chunks, buffer->Ptr[str_length]
448     may be uninitialized while buffer->Ptr[allocated length] is 0, so we
449     must use c_ptr_safe() as we want a 0-terminated string (which is easier
450     to manipulate in a debugger, or to compare in unit tests with
451     EXPECT_STREQ).
452     c_ptr_safe() may realloc an empty String from 0 bytes to 8 bytes,
453     when it adds the closing \0.
454   */
455   trace_buffer.c_ptr_safe();
456   // Send the full nice trace to DBUG.
457   DBUG_EXECUTE("opt", {
458     const char *trace = trace_buffer.c_ptr_safe();
459     DBUG_LOCK_FILE;
460     fputs("Complete optimizer trace:", DBUG_FILE);
461     fputs(trace, DBUG_FILE);
462     fputs("\n", DBUG_FILE);
463     DBUG_UNLOCK_FILE;
464   });
465   if (unlikely(missing_priv)) ctx->restore_I_S();
466 }
467 
set_allowed_mem_size(size_t size)468 void Opt_trace_stmt::set_allowed_mem_size(size_t size) {
469   trace_buffer.set_allowed_mem_size(size);
470 }
471 
set_query(const char * query,size_t length,const CHARSET_INFO * charset)472 void Opt_trace_stmt::set_query(const char *query, size_t length,
473                                const CHARSET_INFO *charset) {
474   // Should be called only once per statement.
475   DBUG_ASSERT(query_buffer.ptr() == nullptr);
476   query_buffer.set_charset(charset);
477   if (!support_I_S()) {
478     /*
479       Query won't be read, don't waste resources storing it. Still we have set
480       the charset, which is necessary.
481     */
482     return;
483   }
484   // We are taking a bit of space from 'trace_buffer'.
485   size_t available =
486       (trace_buffer.alloced_length() >= trace_buffer.get_allowed_mem_size())
487           ? 0
488           : (trace_buffer.get_allowed_mem_size() -
489              trace_buffer.alloced_length());
490   query_buffer.set_allowed_mem_size(available);
491   // No need to escape query, this is not for JSON.
492   query_buffer.append(query, length);
493   // Space which query took is taken out of the trace:
494   const size_t new_allowed_mem_size =
495       (query_buffer.alloced_length() >= trace_buffer.get_allowed_mem_size())
496           ? 0
497           : (trace_buffer.get_allowed_mem_size() -
498              query_buffer.alloced_length());
499   trace_buffer.set_allowed_mem_size(new_allowed_mem_size);
500 }
501 
open_struct(const char * key,Opt_trace_struct * ots,bool wants_disable_I_S,char opening_bracket)502 bool Opt_trace_stmt::open_struct(const char *key, Opt_trace_struct *ots,
503                                  bool wants_disable_I_S, char opening_bracket) {
504   if (support_I_S()) {
505     if (wants_disable_I_S) {
506       /*
507         User requested no tracing for this structure's feature. We are
508         entering a disabled portion; put an ellipsis "..." to alert the user.
509         Disabling applies to all the structure's children.
510         It is possible that inside this struct, a new statement is created
511         (range optimizer can evaluate stored functions...): its tracing is
512         disabled too.
513         When the structure is destroyed, the initial setting is restored.
514       */
515       if (current_struct != nullptr) {
516         if (key != nullptr)
517           current_struct->add_alnum(key, "...");
518         else
519           current_struct->add_alnum("...");
520       }
521     } else {
522       trace_buffer.prealloc();
523       add(key, &opening_bracket, 1, false, false);
524     }
525   }
526   if (wants_disable_I_S) ctx->disable_I_S_for_this_and_children();
527   {
528     DBUG_EXECUTE_IF("opt_trace_oom_in_open_struct",
529                     DBUG_SET("+d,simulate_out_of_memory"););
530     const bool rc = stack_of_current_structs.push_back(current_struct);
531     /*
532       If the append() above didn't trigger reallocation, we need to turn the
533       symbol off by ourselves, or it could make an unrelated allocation
534       fail.
535     */
536     DBUG_EXECUTE_IF("opt_trace_oom_in_open_struct",
537                     DBUG_SET("-d,simulate_out_of_memory"););
538     if (unlikely(rc)) return true;
539   }
540   current_struct = ots;
541   return false;
542 }
543 
close_struct(const char * saved_key,bool has_disabled_I_S,char closing_bracket)544 void Opt_trace_stmt::close_struct(const char *saved_key, bool has_disabled_I_S,
545                                   char closing_bracket) {
546   /*
547     This was constructed with current_stmt_in_gen=NULL which was pushed in
548     'open_struct()'. So this NULL is in the array, back() is safe.
549   */
550   current_struct = stack_of_current_structs.back();
551   stack_of_current_structs.pop_back();
552   if (support_I_S()) {
553     next_line();
554     trace_buffer.append(closing_bracket);
555     if (ctx->get_end_marker() && saved_key != nullptr) {
556       trace_buffer.append(STRING_WITH_LEN(" /* "));
557       trace_buffer.append(saved_key);
558       trace_buffer.append(STRING_WITH_LEN(" */"));
559     }
560   }
561   if (has_disabled_I_S) ctx->restore_I_S();
562 }
563 
separator()564 void Opt_trace_stmt::separator() {
565   DBUG_ASSERT(support_I_S());
566   // Put a comma first, if we have already written an object at this level.
567   if (current_struct != nullptr) {
568     if (!current_struct->set_not_empty()) trace_buffer.append(',');
569     next_line();
570   }
571 }
572 
573 namespace {
574 const char my_spaces[] =
575     "                                                                "
576     "                                                                "
577     "                                                                ";
578 }
579 
next_line()580 void Opt_trace_stmt::next_line() {
581   if (ctx->get_one_line()) return;
582   trace_buffer.append('\n');
583 
584   size_t to_be_printed = 2 * stack_of_current_structs.size();
585   const size_t spaces_len = sizeof(my_spaces) - 1;
586   while (to_be_printed > spaces_len) {
587     trace_buffer.append(my_spaces, spaces_len);
588     to_be_printed -= spaces_len;
589   }
590   trace_buffer.append(my_spaces, to_be_printed);
591 }
592 
make_unknown_key()593 const char *Opt_trace_stmt::make_unknown_key() {
594   snprintf(unknown_key, sizeof(unknown_key), "unknown_key_%u",
595            ++unknown_key_count);
596   return unknown_key;
597 }
598 
add(const char * key,const char * val,size_t val_length,bool quotes,bool escape)599 void Opt_trace_stmt::add(const char *key, const char *val, size_t val_length,
600                          bool quotes, bool escape) {
601   if (!support_I_S()) return;
602   separator();
603   if (current_struct != nullptr) key = current_struct->check_key(key);
604   if (key != nullptr) {
605     trace_buffer.append('"');
606     trace_buffer.append(key);
607     trace_buffer.append(STRING_WITH_LEN("\": "));
608   }
609   if (quotes) trace_buffer.append('"');
610   /*
611     Objects' keys use "normal" characters (A-Za-z0-9_), no escaping
612     needed. Same for numeric/bool values. Only string values may need
613     escaping.
614   */
615   if (escape)
616     trace_buffer.append_escaped(val, val_length);
617   else
618     trace_buffer.append(val, val_length);
619   if (quotes) trace_buffer.append('"');
620 }
621 
fill_info(Opt_trace_info * info) const622 void Opt_trace_stmt::fill_info(Opt_trace_info *info) const {
623   if (unlikely(info->missing_priv = missing_priv)) {
624     info->trace_ptr = info->query_ptr = "";
625     info->trace_length = info->query_length = 0;
626     info->query_charset = &my_charset_bin;
627     info->missing_bytes = 0;
628   } else {
629     info->trace_ptr = trace_buffer.ptr();
630     info->trace_length = trace_buffer.length();
631     info->query_ptr = query_buffer.ptr();
632     info->query_length = query_buffer.length();
633     info->query_charset = query_buffer.charset();
634     info->missing_bytes =
635         trace_buffer.get_missing_bytes() + query_buffer.get_missing_bytes();
636   }
637 }
638 
trace_buffer_tail(size_t size)639 const char *Opt_trace_stmt::trace_buffer_tail(size_t size) {
640   size_t buffer_len = trace_buffer.length();
641   const char *ptr = trace_buffer.c_ptr_safe();
642   if (buffer_len > size) ptr += buffer_len - size;
643   return ptr;
644 }
645 
missing_privilege()646 void Opt_trace_stmt::missing_privilege() {
647   if (!missing_priv) {
648     DBUG_PRINT("opt", ("trace denied"));
649     // This mark will make the trace appear empty in OPTIMIZER_TRACE table.
650     missing_priv = true;
651     // And all substatements will not be traced.
652     ctx->disable_I_S_for_this_and_children();
653   }
654 }
655 
656 // Implementation of class Buffer
657 
658 namespace {
659 
append_escaped(const char * str,size_t length)660 void Buffer::append_escaped(const char *str, size_t length) {
661   if (alloced_length() >= allowed_mem_size) {
662     missing_bytes += length;
663     return;
664   }
665   const char *pstr, *pstr_end;
666   char buf[128];  // Temporary output buffer.
667   char *pbuf = buf;
668   for (pstr = str, pstr_end = (str + length); pstr < pstr_end; pstr++) {
669     char esc;
670     const char c = *pstr;
671     /*
672       JSON syntax says that control characters must be escaped. Experience
673       confirms that this means ASCII 0->31 and " and \ . A few of
674       them are accepted with a short escaping syntax (using \ : like \n)
675       but for most of them, only \uXXXX works, where XXXX is a
676       hexadecimal value for the code point.
677       Rules also mention escaping / , but Python's and Perl's json modules
678       do not require it, and somewhere on Internet someone said JSON
679       allows escaping of / but does not require it.
680 
681       Because UTF8 has the same characters in range 0-127 as ASCII does, and
682       other UTF8 characters don't contain 0-127 bytes, if we see a byte
683       equal to 0 it is really the UTF8 u0000 character (a.k.a. ASCII NUL)
684       and not a part of a longer character; if we see a newline, same,
685       etc. That wouldn't necessarily be true with another character set.
686     */
687     switch (c) {
688         // Don't use \u when possible for common chars, \ is easier to read:
689       case '\\':
690         esc = '\\';
691         break;
692       case '"':
693         esc = '\"';
694         break;
695       case '\n':
696         esc = 'n';
697         break;
698       case '\r':
699         esc = 'r';
700         break;
701       case '\t':
702         esc = 't';
703         break;
704       default:
705         esc = 0;
706         break;
707     }
708     if (esc != 0)  // Escaping with backslash.
709     {
710       *pbuf++ = '\\';
711       *pbuf++ = esc;
712     } else {
713       uint ascii_code = (uint)c;
714       if (ascii_code < 32)  // Escaping with \u
715       {
716         *pbuf++ = '\\';
717         *pbuf++ = 'u';
718         *pbuf++ = '0';
719         *pbuf++ = '0';
720         if (ascii_code < 16) {
721           *pbuf++ = '0';
722         } else {
723           *pbuf++ = '1';
724           ascii_code -= 16;
725         }
726         *pbuf++ = _dig_vec_lower[ascii_code];
727       } else
728         *pbuf++ = c;  // Normal character, no escaping needed.
729     }
730     /*
731       To fit a next character, we need at most 6 bytes (happens when using
732       \uXXXX syntax) before the buffer's end:
733     */
734     if (pbuf > buf + (sizeof(buf) - 6)) {
735       // Possibly no room in 'buf' for next char, so flush buf.
736       string_buf.append(buf, pbuf - buf);
737       pbuf = buf;  // back to buf's start
738     }
739   }
740   // Flush any chars left in 'buf'.
741   string_buf.append(buf, pbuf - buf);
742 }
743 
append(const char * str,size_t length)744 void Buffer::append(const char *str, size_t length) {
745   if (alloced_length() >= allowed_mem_size) {
746     missing_bytes += length;
747     return;
748   }
749   DBUG_EXECUTE_IF("opt_trace_oom_in_buffers",
750                   DBUG_SET("+d,simulate_out_of_memory"););
751   string_buf.append(str, length);
752   DBUG_EXECUTE_IF("opt_trace_oom_in_buffers",
753                   DBUG_SET("-d,simulate_out_of_memory"););
754 }
755 
append(char chr)756 void Buffer::append(char chr) {
757   if (alloced_length() >= allowed_mem_size) {
758     missing_bytes++;
759     return;
760   }
761   // No need for escaping chr, given how this function is used.
762   string_buf.append(chr);
763 }
764 
prealloc()765 void Buffer::prealloc() {
766   const size_t alloced = alloced_length();
767   const size_t first_increment = 1024;
768   if ((alloced - length()) < (first_increment / 3)) {
769     /*
770       Support for I_S will produce long strings, and there is little free
771       space left in the allocated buffer, so it looks like
772       realloc is soon unavoidable; so let's get many bytes at a time.
773       Note that if this re-allocation fails, or any String::append(), we
774       will get a weird trace; either truncated if the server stops, or maybe
775       with a hole if there is later memory again for the trace's
776       continuation. The statement will fail anyway due to my_error(), in the
777       server.
778       We jump from 0 to first_increment and then multiply by 1.5. Unlike
779       addition of a constant length, multiplying is expected to give amortized
780       constant reallocation time; 1.5 is a commonly seen factor in the
781       litterature.
782     */
783     size_t new_size = (alloced == 0) ? first_increment : (alloced * 15 / 10);
784     size_t max_size = allowed_mem_size;
785     /*
786       Determine a safety margin:
787       (A) String::realloc() adds at most ALIGN_SIZE(1) bytes to requested
788       length, so we need to decrement max_size by this amount, to be sure that
789       we don't allocate more than max_size
790       (B) We need to stay at least one byte under that max_size, or the next
791       append() would trigger up-front truncation, which is potentially wrong
792       for a "pre-emptive allocation" as we do here.
793     */
794     const size_t safety_margin = ALIGN_SIZE(1) /* (A) */ + 1 /* (B) */;
795     if (max_size >= safety_margin) {
796       max_size -= safety_margin;
797       if (new_size > max_size)  // Don't pre-allocate more than the limit.
798         new_size = max_size;
799       if (new_size >= alloced)  // Never shrink string.
800         string_buf.mem_realloc(new_size);
801     }
802   }
803 }
804 
805 }  // namespace
806 
807 // Implementation of Opt_trace_context class
808 
809 const char *Opt_trace_context::flag_names[] = {"enabled", "one_line", "default",
810                                                NullS};
811 
812 const char *Opt_trace_context::feature_names[] = {
813     "greedy_search",      "range_optimizer", "dynamic_range",
814     "repeated_subselect", "default",         NullS};
815 
816 const Opt_trace_context::feature_value Opt_trace_context::default_features =
817     Opt_trace_context::feature_value(Opt_trace_context::GREEDY_SEARCH |
818                                      Opt_trace_context::RANGE_OPTIMIZER |
819                                      Opt_trace_context::DYNAMIC_RANGE |
820                                      Opt_trace_context::REPEATED_SUBSELECT);
821 
~Opt_trace_context()822 Opt_trace_context::~Opt_trace_context() {
823   if (unlikely(pimpl != nullptr)) {
824     /* There may well be some few ended traces left: */
825     purge_stmts(true);
826     /* All should have moved to 'del' list: */
827     DBUG_ASSERT(pimpl->all_stmts_for_I_S.size() == 0);
828     /* All of 'del' list should have been deleted: */
829     DBUG_ASSERT(pimpl->all_stmts_to_del.size() == 0);
830     delete pimpl;
831   }
832 }
833 
834 template <class T>
new_nothrow_w_my_error()835 T *new_nothrow_w_my_error() {
836   T *const t = new (std::nothrow) T();
837   if (unlikely(t == nullptr))
838     my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), static_cast<int>(sizeof(T)));
839   return t;
840 }
841 template <class T, class Arg>
842 T *new_nothrow_w_my_error(Arg a) {
843   T *const t = new (std::nothrow) T(a);
844   if (unlikely(t == nullptr))
845     my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), static_cast<int>(sizeof(T)));
846   return t;
847 }
848 
start(bool support_I_S_arg,bool support_dbug_or_missing_priv,bool end_marker_arg,bool one_line_arg,long offset_arg,long limit_arg,ulong max_mem_size_arg,ulonglong features_arg)849 bool Opt_trace_context::start(bool support_I_S_arg,
850                               bool support_dbug_or_missing_priv,
851                               bool end_marker_arg, bool one_line_arg,
852                               long offset_arg, long limit_arg,
853                               ulong max_mem_size_arg, ulonglong features_arg) {
854   DBUG_TRACE;
855 
856   if (I_S_disabled != 0) {
857     DBUG_PRINT("opt", ("opt_trace is already disabled"));
858     support_I_S_arg = false;
859   }
860 
861   /*
862     Decide on optimizations possible to realize the requested support.
863     If I_S or debug output is requested, we need to create an Opt_trace_stmt.
864     Same if we should support calls to Opt_trace_context::missing_privilege(),
865     because that function requires an Opt_trace_stmt.
866   */
867   if (!support_I_S_arg && !support_dbug_or_missing_priv) {
868     // The statement will not do tracing.
869     if (likely(pimpl == nullptr) || pimpl->current_stmt_in_gen == nullptr) {
870       /*
871         This should be the most commonly taken branch in a release binary,
872         when the connection rarely has optimizer tracing runtime-enabled.
873         It's thus important that it's optimized: we can short-cut the creation
874         and starting of Opt_trace_stmt, unlike in the next "else" branch.
875       */
876       return false;
877     }
878     /*
879       If we come here, there is a parent statement which has a trace.
880       Imagine that we don't create a trace for the child statement
881       here. Then trace structures of the child will be accidentally attached
882       to the parent's trace (as it is still 'current_stmt_in_gen', which
883       constructors of Opt_trace_struct will use); thus the child's trace
884       will be visible (as a chunk of the parent's trace). That would be
885       incorrect. To avoid this, we create a trace for the child but with I_S
886       output disabled; this changes 'current_stmt_in_gen', thus this child's
887       trace structures will be attached to the child's trace and thus not be
888       visible.
889     */
890   }
891 
892   DBUG_EXECUTE_IF("no_new_opt_trace_stmt", DBUG_ASSERT(0););
893 
894   if (pimpl == nullptr &&
895       ((pimpl = new_nothrow_w_my_error<Opt_trace_context_impl>()) == nullptr))
896     return true;
897 
898   /*
899     If tracing is disabled by some caller, then don't change settings (offset
900     etc). Doing otherwise would surely bring a problem.
901   */
902   if (I_S_disabled == 0) {
903     /*
904       Here we allow a stored routine's sub-statement to enable/disable
905       tracing, or change settings. Thus in a stored routine's body, there can
906       be some 'SET OPTIMIZER_TRACE="enabled=[on|off]"' to trace only certain
907       sub-statements.
908     */
909     pimpl->end_marker = end_marker_arg;
910     pimpl->one_line = one_line_arg;
911     pimpl->offset = offset_arg;
912     pimpl->limit = limit_arg;
913     pimpl->max_mem_size = max_mem_size_arg;
914     // MISC always on
915     pimpl->features = Opt_trace_context::feature_value(features_arg |
916                                                        Opt_trace_context::MISC);
917   }
918   if (support_I_S_arg && pimpl->offset >= 0) {
919     /* If outside the offset/limit window, no need to support I_S */
920     if (pimpl->since_offset_0 < pimpl->offset) {
921       DBUG_PRINT("opt", ("disabled: since_offset_0(%ld) < offset(%ld)",
922                          pimpl->since_offset_0, pimpl->offset));
923       support_I_S_arg = false;
924     } else if (pimpl->since_offset_0 >= (pimpl->offset + pimpl->limit)) {
925       DBUG_PRINT("opt", ("disabled: since_offset_0(%ld) >="
926                          " offset(%ld) + limit(%ld)",
927                          pimpl->since_offset_0, pimpl->offset, pimpl->limit));
928       support_I_S_arg = false;
929     }
930     pimpl->since_offset_0++;
931   }
932   {
933     /*
934       We don't allocate it in THD's MEM_ROOT as it must survive until a next
935       statement (SELECT) reads the trace.
936     */
937     Opt_trace_stmt *stmt = new_nothrow_w_my_error<Opt_trace_stmt>(this);
938 
939     DBUG_PRINT("opt", ("new stmt %p support_I_S %d", stmt, support_I_S_arg));
940 
941     if (unlikely(stmt == nullptr || pimpl->stack_of_current_stmts.push_back(
942                                         pimpl->current_stmt_in_gen)))
943       goto err;  // push_back() above called my_error()
944 
945     /*
946       If sending only to DBUG, don't show to the user.
947       Same if tracing was temporarily disabled at higher layers with
948       Opt_trace_disable_I_S.
949       So we just link it to the 'del' list for purging when ended.
950     */
951     Opt_trace_stmt_array *list;
952     if (support_I_S_arg)
953       list = &pimpl->all_stmts_for_I_S;
954     else {
955       stmt->disable_I_S();  // no need to fill a not-shown JSON trace
956       list = &pimpl->all_stmts_to_del;
957     }
958 
959     if (unlikely(list->push_back(stmt))) goto err;
960 
961     pimpl->current_stmt_in_gen = stmt;
962 
963     // As we just added one trace, maybe the previous ones are unneeded now
964     purge_stmts(false);
965     // This purge may have freed space, compute max allowed size:
966     stmt->set_allowed_mem_size(allowed_mem_size_for_current_stmt());
967     return false;
968   err:
969     delete stmt;
970     DBUG_ASSERT(0);
971     return true;
972   }
973 }
974 
end()975 void Opt_trace_context::end() {
976   DBUG_ASSERT(I_S_disabled >= 0);
977   if (likely(pimpl == nullptr)) return;
978   if (pimpl->current_stmt_in_gen != nullptr) {
979     pimpl->current_stmt_in_gen->end();
980     /*
981       pimpl was constructed with current_stmt_in_gen=NULL which was pushed in
982       'start()'. So this NULL is in the array, back() is safe.
983     */
984     Opt_trace_stmt *const parent = pimpl->stack_of_current_stmts.back();
985     pimpl->stack_of_current_stmts.pop_back();
986     pimpl->current_stmt_in_gen = parent;
987     if (parent != nullptr) {
988       /*
989         Parent regains control, now it needs to be told that its child has
990         used space, and thus parent's allowance has shrunk.
991       */
992       parent->set_allowed_mem_size(allowed_mem_size_for_current_stmt());
993     }
994     /*
995       Purge again. Indeed when we are here, compared to the previous start()
996       we have one more ended trace, so can potentially free more. Consider
997       offset=-1 and:
998          top_stmt, started
999            sub_stmt, starts: can't free top_stmt as it is not ended yet
1000            sub_stmt, ends: won't free sub_stmt (as user will want to see it),
1001            can't free top_stmt as not ended yet
1002          top_stmt, continued
1003          top_stmt, ends: free top_stmt as it's not last and is ended, keep
1004          only sub_stmt.
1005       Still the purge is done in ::start() too, as an optimization, for this
1006       case:
1007          sub_stmt, started
1008          sub_stmt, ended
1009          sub_stmt, starts: can free above sub_stmt, will save memory compared
1010          to free-ing it only when the new sub_stmt ends.
1011     */
1012     purge_stmts(false);
1013   } else
1014     DBUG_ASSERT(pimpl->stack_of_current_stmts.size() == 0);
1015 }
1016 
support_I_S() const1017 bool Opt_trace_context::support_I_S() const {
1018   return (pimpl != nullptr) && (pimpl->current_stmt_in_gen != nullptr) &&
1019          pimpl->current_stmt_in_gen->support_I_S();
1020 }
1021 
purge_stmts(bool purge_all)1022 void Opt_trace_context::purge_stmts(bool purge_all) {
1023   DBUG_TRACE;
1024   if (!purge_all && pimpl->offset >= 0) {
1025     /* This case is managed in @c Opt_trace_context::start() */
1026     return;
1027   }
1028   long idx;
1029   static_assert(
1030       static_cast<long>(static_cast<size_t>(LONG_MAX)) == LONG_MAX,
1031       "Every positive long must be able to round-trip through size_t.");
1032   /*
1033     Start from the newest traces (array's end), scroll back in time. This
1034     direction is necessary, as we may delete elements from the array (assume
1035     purge_all=true and array has 2 elements and we traverse starting from
1036     index 0: cell 0 is deleted, making cell 1 become cell 0; index is
1037     incremented to 1, which is past the array's end, so break out of the loop:
1038     cell 0 (old cell 1) was not deleted, wrong).
1039   */
1040   for (idx = (pimpl->all_stmts_for_I_S.size() - 1); idx >= 0; idx--) {
1041     // offset can be negative, so cast size() to signed!
1042     if (!purge_all && ((static_cast<long>(pimpl->all_stmts_for_I_S.size()) +
1043                         pimpl->offset) <= idx)) {
1044       /* OFFSET mandates that this trace should be kept; move to previous */
1045     } else {
1046       /*
1047         Remember to free it (as in @c free()) when possible. For now, make it
1048         invisible in OPTIMIZER_TRACE table.
1049       */
1050       DBUG_EXECUTE_IF("opt_trace_oom_in_purge",
1051                       DBUG_SET("+d,simulate_out_of_memory"););
1052       if (likely(!pimpl->all_stmts_to_del.push_back(
1053               pimpl->all_stmts_for_I_S.at(idx))))
1054         pimpl->all_stmts_for_I_S.erase(idx);
1055       else {
1056         /*
1057           OOM. Cannot purge. Which at worse should only break the
1058           offset/limit feature (the trace will accidentally still show up in
1059           the OPTIMIZER_TRACE table). append() above has called my_error().
1060         */
1061       }
1062       DBUG_EXECUTE_IF("opt_trace_oom_in_purge",
1063                       DBUG_SET("-d,simulate_out_of_memory"););
1064     }
1065   }
1066   /* Examine list of "to be freed" traces and free what can be */
1067   for (idx = (pimpl->all_stmts_to_del.size() - 1); idx >= 0; idx--) {
1068     Opt_trace_stmt *stmt = pimpl->all_stmts_to_del.at(idx);
1069 #ifndef DBUG_OFF
1070     bool skip_del = false;
1071     DBUG_EXECUTE_IF("opt_trace_oom_in_purge", skip_del = true;);
1072 #else
1073     const bool skip_del = false;
1074 #endif
1075     if (!stmt->has_ended() || skip_del) {
1076       /*
1077         This trace is not finished, freeing it now would lead to use of
1078         freed memory if a structure is later added to it. This would be
1079         possible: assume OFFSET=-1 and we have
1080         CALL statement starts executing
1081           create its trace (call it "trace #1")
1082           add structure to trace #1
1083           add structure to trace #1
1084           First sub-statement executing
1085             create its trace (call it "trace #2")
1086             from then on, trace #1 is not needed, free() it
1087             add structure to trace #2
1088             add structure to trace #2
1089           First sub-statement ends
1090           add structure to trace #1 - oops, adding to a free()d trace!
1091         So if a trace is not finished, we will wait until it is and
1092         re-consider it then (which is why this function is called in @c
1093         Opt_trace_stmt::end() too).
1094 
1095         In unit testing, to simulate OOM, we let the list grow so
1096         that it consumes its pre-allocated cells and finally requires a
1097         (failing) allocation.
1098       */
1099     } else {
1100       pimpl->all_stmts_to_del.erase(idx);
1101       delete stmt;
1102     }
1103   }
1104 }
1105 
allowed_mem_size_for_current_stmt() const1106 size_t Opt_trace_context::allowed_mem_size_for_current_stmt() const {
1107   size_t mem_size = 0;
1108   int idx;
1109   for (idx = (pimpl->all_stmts_for_I_S.size() - 1); idx >= 0; idx--) {
1110     const Opt_trace_stmt *stmt = pimpl->all_stmts_for_I_S.at(idx);
1111     mem_size += stmt->alloced_length();
1112   }
1113   // Even to-be-deleted traces use memory, so consider them in sum
1114   for (idx = (pimpl->all_stmts_to_del.size() - 1); idx >= 0; idx--) {
1115     const Opt_trace_stmt *stmt = pimpl->all_stmts_to_del.at(idx);
1116     mem_size += stmt->alloced_length();
1117   }
1118   /* The current statement is in exactly one of the two lists above */
1119   mem_size -= pimpl->current_stmt_in_gen->alloced_length();
1120   size_t rc =
1121       (mem_size <= pimpl->max_mem_size) ? (pimpl->max_mem_size - mem_size) : 0;
1122   DBUG_PRINT("opt", ("rc %llu max_mem_size %llu", (ulonglong)rc,
1123                      (ulonglong)pimpl->max_mem_size));
1124   return rc;
1125 }
1126 
set_query(const char * query,size_t length,const CHARSET_INFO * charset)1127 void Opt_trace_context::set_query(const char *query, size_t length,
1128                                   const CHARSET_INFO *charset) {
1129   pimpl->current_stmt_in_gen->set_query(query, length, charset);
1130 }
1131 
reset()1132 void Opt_trace_context::reset() {
1133   if (pimpl == nullptr) return;
1134   purge_stmts(true);
1135   pimpl->since_offset_0 = 0;
1136 }
1137 
1138 void Opt_trace_context::Opt_trace_context_impl::
disable_I_S_for_this_and_children()1139     disable_I_S_for_this_and_children() {
1140   if (current_stmt_in_gen != nullptr) current_stmt_in_gen->disable_I_S();
1141 }
1142 
restore_I_S()1143 void Opt_trace_context::Opt_trace_context_impl::restore_I_S() {
1144   if (current_stmt_in_gen != nullptr) current_stmt_in_gen->restore_I_S();
1145 }
1146 
missing_privilege()1147 void Opt_trace_context::missing_privilege() {
1148   /*
1149     By storing the 'missing_priv' mark in Opt_trace_stmt instead of in
1150     Opt_trace_context we get automatic re-enabling of I_S when the stmt ends,
1151     Opt_trace_stmt::missing_priv being the "memory" of where I_S has been
1152     disabled.
1153     Storing in Opt_trace_context would require an external memory (probably a
1154     RAII object), which would not be possible in
1155     TABLE_LIST::prepare_security(), where I_S must be disabled even after the
1156     end of that function - so RAII would not work.
1157 
1158     Which is why this function needs an existing current_stmt_in_gen.
1159   */
1160   pimpl->current_stmt_in_gen->missing_privilege();
1161 }
1162 
get_next_stmt_for_I_S(long * got_so_far) const1163 const Opt_trace_stmt *Opt_trace_context::get_next_stmt_for_I_S(
1164     long *got_so_far) const {
1165   const Opt_trace_stmt *p;
1166   if ((pimpl == nullptr) || (*got_so_far >= pimpl->limit) ||
1167       (*got_so_far >= static_cast<long>(pimpl->all_stmts_for_I_S.size())))
1168     p = nullptr;
1169   else {
1170     p = pimpl->all_stmts_for_I_S.at(*got_so_far);
1171     DBUG_ASSERT(p != nullptr);
1172     (*got_so_far)++;
1173   }
1174   return p;
1175 }
1176 
1177 // Implementation of class Opt_trace_iterator
1178 
Opt_trace_iterator(Opt_trace_context * ctx_arg)1179 Opt_trace_iterator::Opt_trace_iterator(Opt_trace_context *ctx_arg)
1180     : ctx(ctx_arg), row_count(0) {
1181   next();
1182 }
1183 
next()1184 void Opt_trace_iterator::next() {
1185   cursor = ctx->get_next_stmt_for_I_S(&row_count);
1186 }
1187 
get_value(Opt_trace_info * info) const1188 void Opt_trace_iterator::get_value(Opt_trace_info *info) const {
1189   cursor->fill_info(info);
1190 }
1191