1 /* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
2 
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License, version 2.0,
5    as published by the Free Software Foundation.
6 
7    This program is also distributed with certain software (including
8    but not limited to OpenSSL) that is licensed under separate terms,
9    as designated in a particular file or component or in included license
10    documentation.  The authors of MySQL hereby grant you an additional
11    permission to link the program and your derivative works with the
12    separately licensed software that they have included with MySQL.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License, version 2.0, for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA  */
22 
23 /**
24    @file
25    Implementation of the Optimizer trace API (WL#5257)
26 */
27 
28 #include "opt_trace.h"
29 #include "mysqld.h"    // system_charset_info
30 #include "item.h"      // Item
31 #include "sql_string.h" // String
32 #include "m_string.h"  // _dig_vec_lower
33 
34 #ifdef OPTIMIZER_TRACE
35 
36 // gcc.gnu.org/bugzilla/show_bug.cgi?id=29365
37 namespace random_name_to_avoid_gcc_bug_29365 {
38 /**
39   A wrapper of class String, for storing query or trace.
40   Any memory allocation error in this class is reported by my_error(), see
41   OOM_HANDLING in opt_trace.h.
42 */
43 class Buffer
44 {
45 private:
46   size_t allowed_mem_size;   ///< allowed memory size for this String
47   size_t missing_bytes;      ///< how many bytes could not be added
48   String string_buf;
49 public:
Buffer()50   Buffer() : allowed_mem_size(0), missing_bytes(0) {}
51 
alloced_length() const52   uint32 alloced_length() const { return string_buf.alloced_length(); }
length() const53   uint32 length() const { return string_buf.length(); }
54   void prealloc();    ///< pro-actively extend buffer if soon short of space
c_ptr_safe()55   char *c_ptr_safe() { return string_buf.c_ptr_safe(); }
ptr() const56   const char *ptr() const { return string_buf.ptr(); }
57 
charset() const58   const CHARSET_INFO *charset() const { return string_buf.charset(); }
set_charset(const CHARSET_INFO * charset)59   void set_charset(const CHARSET_INFO *charset)
60   { string_buf.set_charset(charset); }
61 
62   /**
63     Like @c String::append()
64     @param  str     String, in this instance's charset
65     @param  length  length of string
66   */
67   void append(const char *str, size_t length);
append(const char * str)68   void append(const char *str) { return append(str, strlen(str)); }
69   /**
70     Like @c append() but escapes certain characters for string values to
71     be JSON-compliant.
72     @param  str     String in UTF8
73     @param  length  length of string
74   */
75   void append_escaped(const char *str, size_t length);
76   void append(char chr);
77 
get_allowed_mem_size() const78   size_t get_allowed_mem_size() const { return allowed_mem_size; }
get_missing_bytes() const79   size_t get_missing_bytes() const { return missing_bytes; }
80 
set_allowed_mem_size(size_t a)81   void set_allowed_mem_size(size_t a) { allowed_mem_size= a; }
82 };
83 
84 
85 } // namespace
86 
87 
88 using random_name_to_avoid_gcc_bug_29365::Buffer;
89 
90 
91 /**
92   @class Opt_trace_stmt
93 
94   The trace of one statement. For example, executing a stored procedure
95   containing 3 sub-statements will produce 4 traces (one for the CALL
96   statement, one for each sub-statement), so 4 Opt_trace_stmt linked together
97   into Opt_trace_context's lists.
98 */
99 class Opt_trace_stmt
100 {
101 public:
102   /**
103      Constructor, starts a trace for information_schema and dbug.
104      @param  ctx_arg          context
105   */
106   Opt_trace_stmt(Opt_trace_context *ctx_arg);
107 
108   /**
109      Ends a trace; destruction may not be possible immediately as we may have
110      to keep the trace in case the user later reads it from I_S.
111   */
112   void end();
113 
114   /// @returns whether @c end() has been called on this instance.
has_ended() const115   bool has_ended() const { return ended; }
116 
117   /// Sets the quantity of allowed memory for this trace.
118   void set_allowed_mem_size(size_t size);
119 
120   /// @sa Opt_trace_context::set_query()
121   void set_query(const char* query, size_t length,
122                  const CHARSET_INFO *charset);
123 
124   /* Below, functions for filling the statement's trace */
125 
126   /**
127      When creating an Opt_trace_struct: adds a key and the opening bracket to
128      the trace buffer, updates current_struct.
129      @param  key              key or NULL
130      @param  ots              structure being created
131      @param  wants_disable_I_S whether structure wants to disable I_S output
132      @param  opening_bracket  opening bracket to use
133      @retval false ok
134      @retval true  error, Opt_trace_struct must set itself to dummy; trace
135      may have been written to, will likely be invalid JSON.
136   */
137   bool open_struct(const char *key, Opt_trace_struct *ots,
138                    bool wants_disable_I_S, char opening_bracket);
139   /**
140      When closing an Opt_trace_struct:
141      - adds the closing bracket and optionally the key to the trace buffer
142      - re-enables I_S output if the dying structure had disabled it
143      - updates current_struct.
144      @param  saved_key        key or NULL
145      @param  has_disabled_I_S whether structure had disabled I_S output
146      @param  closing_bracket  closing bracket to use
147   */
148   void close_struct(const char *saved_key, bool has_disabled_I_S,
149                     char closing_bracket);
150 
151   /// Put optional comma, newline and indentation
152   void separator();
153   /// Put newline and indentation
154   void next_line();
155 
156   /**
157      Adds a key/value pair to the trace buffer.
158      @param  key  key or NULL
159      @param  val  representation of value as string
160      @param  val_length  length of value
161      @param  quotes  should value be delimited with '"' (false when the value
162      is the representation of a number, boolean or null)
163      @param  escape  does value need escaping (has special characters)
164 
165      @note Structures prepare a string representation of their value-to-add
166      and call this function.
167   */
168   void add(const char *key, const char *val, size_t val_length,
169            bool quotes, bool escape);
170 
171   /* Below, functions to request information from this instance */
172 
173   /// Fills user-level information @sa Opt_trace_iterator
174   void fill_info(Opt_trace_info *info) const;
175 
176   /// @returns 'size' last bytes of the trace buffer
177   const char *trace_buffer_tail(size_t size);
178 
179   /// @returns total memory used by this trace
alloced_length() const180   size_t alloced_length() const
181   { return trace_buffer.alloced_length() + query_buffer.alloced_length(); }
182 
assert_current_struct(const Opt_trace_struct * s) const183   void assert_current_struct(const Opt_trace_struct *s) const
184   { DBUG_ASSERT(current_struct == s); }
185 
186   /// @see Opt_trace_context::missing_privilege()
187   void missing_privilege();
188 
support_I_S() const189   bool support_I_S() const { return I_S_disabled == 0; }
190 
191   /// Temporarily disables I_S output for this statement.
disable_I_S()192   void disable_I_S() { ++I_S_disabled; }
193 
194   /**
195      Restores I_S support to what it was before the previous call
196      to disable_I_S().
197   */
restore_I_S()198   void restore_I_S() { --I_S_disabled; }
199 
200   /**
201      Generate a dummy unique key, and return pointer to it. The pointed data
202      has the lifetime of Opt_trace_stmt, and is overwritten by the next call
203      to this function.
204   */
205   const char *make_unknown_key();
206 
207 private:
208 
209   bool ended;           ///< Whether @c end() has been called on this instance
210 
211   /**
212     0 <=> this trace should be in information_schema.
213     In the life of an Opt_trace_stmt, support for I_S may be temporarily
214     disabled.
215     Once disabled, it must stay disabled until re-enabled at the same stack
216     frame. This:
217     Opt_trace_object1 // disables I_S
218        Opt_trace_object2 // re-enables I_S
219     is impossible (the top object wins).
220     So it is sufficient, to keep track of the current state, to have a counter
221     incremented each time we get a request to disable I_S.
222   */
223   int I_S_disabled;
224 
225   bool missing_priv; ///< whether user lacks privilege to see this trace
226 
227   Opt_trace_context *ctx;                       ///< context
228   Opt_trace_struct *current_struct;             ///< current open structure
229 
230   /// Same logic as Opt_trace_context::stack_of_current_stmts.
231   Dynamic_array<Opt_trace_struct *> stack_of_current_structs;
232 
233   Buffer trace_buffer;                    ///< Where the trace is accumulated
234   Buffer query_buffer;                    ///< Where the original query is put
235 
236   /**
237     Counter which serves to have unique autogenerated keys, needed if we
238     autogenerate more than one key in a single object.
239     @see Opt_trace_struct::check_key() and @see Opt_trace_stmt::add() .
240   */
241   uint unknown_key_count;
242   /// Space for last autogenerated key
243   char unknown_key[24];
244 };
245 
246 
247 // implementation of class Opt_trace_struct
248 
249 namespace {
250 /// opening and closing symbols for arrays ([])and objects ({})
251 const char brackets[]= { '[', '{', ']', '}' };
opening_bracket(bool requires_key)252 inline char opening_bracket(bool requires_key)
253 {
254   return brackets[requires_key];
255 }
closing_bracket(bool requires_key)256 inline char closing_bracket(bool requires_key)
257 {
258   return brackets[requires_key + 2];
259 }
260 } // namespace
261 
262 
do_construct(Opt_trace_context * ctx,bool requires_key_arg,const char * key,Opt_trace_context::feature_value feature)263 void Opt_trace_struct::do_construct(Opt_trace_context *ctx,
264                                     bool requires_key_arg,
265                                     const char *key,
266                                     Opt_trace_context::feature_value feature)
267 {
268   saved_key= key;
269   requires_key= requires_key_arg;
270 
271   DBUG_PRINT("opt", ("%s: starting struct", key));
272   stmt= ctx->get_current_stmt_in_gen();
273 #ifndef DBUG_OFF
274   previous_key[0]= 0;
275 #endif
276   has_disabled_I_S= !ctx->feature_enabled(feature);
277   empty= true;
278   if (likely(!stmt->open_struct(key, this, has_disabled_I_S,
279                                 opening_bracket(requires_key))))
280     started= true;
281 }
282 
283 
do_destruct()284 void Opt_trace_struct::do_destruct()
285 {
286   DBUG_PRINT("opt", ("%s: ending struct", saved_key));
287   DBUG_ASSERT(started);
288   stmt->close_struct(saved_key, has_disabled_I_S,
289                      closing_bracket(requires_key));
290   started= false;
291 }
292 
293 
294 /**
295    @note add() has an up-front if(), hopefully inlined, so that in the common
296    case - tracing run-time disabled - we have no function call. If tracing is
297    enabled, we call do_add().
298    In a 20-table plan search (as in BUG#50595), the execution time was
299    decreased from 2.6 to 2.0 seconds thanks to this inlined-if trick.
300 */
do_add(const char * key,const char * val,size_t val_length,bool escape)301 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, const char *val,
302                                            size_t val_length,
303                                            bool escape)
304 {
305   DBUG_ASSERT(started);
306   DBUG_PRINT("opt", ("%s: \"%.*s\"", key, (int)val_length, val));
307   stmt->add(key, val, val_length, true, escape);
308   return *this;
309 }
310 
311 namespace {
312 /// human-readable names for boolean values
313 LEX_CSTRING bool_as_text[]= { { STRING_WITH_LEN("false") },
314                               { STRING_WITH_LEN("true") } };
315 }
316 
do_add(const char * key,bool val)317 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, bool val)
318 {
319   DBUG_ASSERT(started);
320   DBUG_PRINT("opt", ("%s: %d", key, (int)val));
321   const LEX_CSTRING *text= &bool_as_text[val];
322   stmt->add(key, text->str, text->length, false, false);
323   return *this;
324 }
325 
326 
do_add(const char * key,longlong val)327 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, longlong val)
328 {
329   DBUG_ASSERT(started);
330   char buf[22];                     // 22 is enough for digits of a 64-bit int
331   llstr(val, buf);
332   DBUG_PRINT("opt", ("%s: %s", key, buf));
333   stmt->add(key, buf, strlen(buf), false, false);
334   return *this;
335 }
336 
337 
do_add(const char * key,ulonglong val)338 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, ulonglong val)
339 {
340   DBUG_ASSERT(started);
341   char buf[22];
342   ullstr(val, buf);
343   DBUG_PRINT("opt", ("%s: %s", key, buf));
344   stmt->add(key, buf, strlen(buf), false, false);
345   return *this;
346 }
347 
348 
do_add(const char * key,double val)349 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, double val)
350 {
351   DBUG_ASSERT(started);
352   char buf[32];                         // 32 is enough for digits of a double
353   my_snprintf(buf, sizeof(buf), "%g", val);
354   DBUG_PRINT("opt", ("%s: %s", key, buf));
355   stmt->add(key, buf, strlen(buf), false, false);
356   return *this;
357 }
358 
359 
do_add_null(const char * key)360 Opt_trace_struct& Opt_trace_struct::do_add_null(const char *key)
361 {
362   DBUG_ASSERT(started);
363   DBUG_PRINT("opt", ("%s: null", key));
364   stmt->add(key, STRING_WITH_LEN("null"), false, false);
365   return *this;
366 }
367 
368 
do_add(const char * key,Item * item)369 Opt_trace_struct& Opt_trace_struct::do_add(const char *key, Item *item)
370 {
371   char buff[256];
372   String str(buff,(uint32) sizeof(buff), system_charset_info);
373   str.length(0);
374   if (item != NULL)
375   {
376     // QT_TO_SYSTEM_CHARSET because trace must be in UTF8
377     item->print(&str, enum_query_type(QT_TO_SYSTEM_CHARSET |
378                                       QT_SHOW_SELECT_NUMBER |
379                                       QT_NO_DEFAULT_DB));
380     /* needs escaping */
381     return do_add(key, str.ptr(), str.length(), true);
382   }
383   else
384     return do_add_null(key);
385 }
386 
387 
do_add_hex(const char * key,uint64 val)388 Opt_trace_struct& Opt_trace_struct::do_add_hex(const char *key, uint64 val)
389 {
390   DBUG_ASSERT(started);
391   char buf[2 + 16], *p_end= buf + sizeof(buf) - 1, *p= p_end;
392   for ( ; ; )
393   {
394     *p--= _dig_vec_lower[val & 15];
395     *p--= _dig_vec_lower[(val & 240) >> 4];
396     val>>= 8;
397     if (val == 0)
398       break;
399   }
400   *p--= 'x';
401   *p= '0';
402   const int len= p_end + 1 - p;
403   DBUG_PRINT("opt", ("%s: %.*s", key, len, p));
404   stmt->add(check_key(key), p, len, false, false);
405   return *this;
406 }
407 
408 
do_add_utf8_table(const TABLE * tab)409 Opt_trace_struct& Opt_trace_struct::do_add_utf8_table(const TABLE *tab)
410 {
411   TABLE_LIST * const tl= tab->pos_in_table_list;
412   if (tl != NULL)
413   {
414     StringBuffer<32> str;
415     tl->print(tab->in_use, &str, enum_query_type(QT_TO_SYSTEM_CHARSET |
416                                                  QT_SHOW_SELECT_NUMBER |
417                                                  QT_NO_DEFAULT_DB |
418                                                  QT_DERIVED_TABLE_ONLY_ALIAS));
419     return do_add("table", str.ptr(), str.length(), true);
420   }
421   return *this;
422 }
423 
424 
check_key(const char * key)425 const char *Opt_trace_struct::check_key(const char *key)
426 {
427   DBUG_ASSERT(started);
428   //  User should always add to the innermost open object, not outside.
429   stmt->assert_current_struct(this);
430   bool has_key= key != NULL;
431   if (unlikely(has_key != requires_key))
432   {
433     // fix the key to produce correct JSON syntax:
434     key= has_key ? NULL : stmt->make_unknown_key();
435     has_key= !has_key;
436   }
437   if (has_key)
438   {
439 #ifndef DBUG_OFF
440     /*
441       Check that we're not having two identical consecutive keys in one
442       object; though the real restriction should not have 'consecutive'.
443     */
444     DBUG_ASSERT(strncmp(previous_key, key, sizeof(previous_key) - 1) != 0);
445     strncpy(previous_key, key, sizeof(previous_key) - 1);
446     previous_key[sizeof(previous_key) - 1]= 0;
447 #endif
448   }
449   return key;
450 }
451 
452 
453 // Implementation of Opt_trace_stmt class
454 
Opt_trace_stmt(Opt_trace_context * ctx_arg)455 Opt_trace_stmt::Opt_trace_stmt(Opt_trace_context *ctx_arg) :
456   ended(false), I_S_disabled(0), missing_priv(false), ctx(ctx_arg),
457   current_struct(NULL), unknown_key_count(0)
458 {
459   // Trace is always in UTF8. This is the only charset which JSON accepts.
460   trace_buffer.set_charset(system_charset_info);
461   DBUG_ASSERT(system_charset_info == &my_charset_utf8_general_ci);
462 }
463 
464 
end()465 void Opt_trace_stmt::end()
466 {
467   DBUG_ASSERT(stack_of_current_structs.elements() == 0);
468   DBUG_ASSERT(I_S_disabled >= 0);
469   ended= true;
470   /*
471     Because allocation is done in big chunks, buffer->Ptr[str_length]
472     may be uninitialized while buffer->Ptr[allocated length] is 0, so we
473     must use c_ptr_safe() as we want a 0-terminated string (which is easier
474     to manipulate in a debugger, or to compare in unit tests with
475     EXPECT_STREQ).
476     c_ptr_safe() may realloc an empty String from 0 bytes to 8 bytes,
477     when it adds the closing \0.
478   */
479   trace_buffer.c_ptr_safe();
480   // Send the full nice trace to DBUG.
481   DBUG_EXECUTE("opt",
482                {
483                  const char *trace= trace_buffer.c_ptr_safe();
484                  DBUG_LOCK_FILE;
485                  fputs("Complete optimizer trace:", DBUG_FILE);
486                  fputs(trace, DBUG_FILE);
487                  fputs("\n", DBUG_FILE);
488                  DBUG_UNLOCK_FILE;
489                }
490                );
491   if (unlikely(missing_priv))
492     ctx->restore_I_S();
493 }
494 
495 
set_allowed_mem_size(size_t size)496 void Opt_trace_stmt::set_allowed_mem_size(size_t size)
497 {
498   trace_buffer.set_allowed_mem_size(size);
499 }
500 
501 
set_query(const char * query,size_t length,const CHARSET_INFO * charset)502 void Opt_trace_stmt::set_query(const char *query, size_t length,
503                                const CHARSET_INFO *charset)
504 {
505   // Should be called only once per statement.
506   DBUG_ASSERT(query_buffer.ptr() == NULL);
507   query_buffer.set_charset(charset);
508   if (!support_I_S())
509   {
510     /*
511       Query won't be read, don't waste resources storing it. Still we have set
512       the charset, which is necessary.
513     */
514     return;
515   }
516   // We are taking a bit of space from 'trace_buffer'.
517   size_t available=
518     (trace_buffer.alloced_length() >= trace_buffer.get_allowed_mem_size()) ?
519     0 : (trace_buffer.get_allowed_mem_size() - trace_buffer.alloced_length());
520   query_buffer.set_allowed_mem_size(available);
521   // No need to escape query, this is not for JSON.
522   query_buffer.append(query, length);
523   // Space which query took is taken out of the trace:
524   const size_t new_allowed_mem_size=
525     (query_buffer.alloced_length() >= trace_buffer.get_allowed_mem_size()) ?
526     0 : (trace_buffer.get_allowed_mem_size() - query_buffer.alloced_length());
527   trace_buffer.set_allowed_mem_size(new_allowed_mem_size);
528 }
529 
530 
open_struct(const char * key,Opt_trace_struct * ots,bool wants_disable_I_S,char opening_bracket)531 bool Opt_trace_stmt::open_struct(const char *key, Opt_trace_struct *ots,
532                                  bool wants_disable_I_S,
533                                  char opening_bracket)
534 {
535   if (support_I_S())
536   {
537     if (wants_disable_I_S)
538     {
539 
540       /*
541         User requested no tracing for this structure's feature. We are
542         entering a disabled portion; put an ellipsis "..." to alert the user.
543         Disabling applies to all the structure's children.
544         It is possible that inside this struct, a new statement is created
545         (range optimizer can evaluate stored functions...): its tracing is
546         disabled too.
547         When the structure is destroyed, the initial setting is restored.
548       */
549       if (current_struct != NULL)
550       {
551         if (key != NULL)
552           current_struct->add_alnum(key, "...");
553         else
554           current_struct->add_alnum("...");
555       }
556     }
557     else
558     {
559       trace_buffer.prealloc();
560       add(key, &opening_bracket, 1, false, false);
561     }
562   }
563   if (wants_disable_I_S)
564     ctx->disable_I_S_for_this_and_children();
565   {
566     DBUG_EXECUTE_IF("opt_trace_oom_in_open_struct",
567                     DBUG_SET("+d,simulate_out_of_memory"););
568     const bool rc= stack_of_current_structs.append(current_struct);
569     /*
570       If the append() above didn't trigger reallocation, we need to turn the
571       symbol off by ourselves, or it could make an unrelated allocation
572       fail.
573     */
574     DBUG_EXECUTE_IF("opt_trace_oom_in_open_struct",
575                     DBUG_SET("-d,simulate_out_of_memory"););
576     if (unlikely(rc))
577       return true;
578   }
579   current_struct= ots;
580   return false;
581 }
582 
583 
close_struct(const char * saved_key,bool has_disabled_I_S,char closing_bracket)584 void Opt_trace_stmt::close_struct(const char *saved_key,
585                                   bool has_disabled_I_S,
586                                   char closing_bracket)
587 {
588   /*
589     This was constructed with current_stmt_in_gen=NULL which was pushed in
590     'open_struct()'. So this NULL is in the array, back() is safe.
591   */
592   current_struct= *(stack_of_current_structs.back());
593   stack_of_current_structs.pop();
594   if (support_I_S())
595   {
596     next_line();
597     trace_buffer.append(closing_bracket);
598     if (ctx->get_end_marker() && saved_key != NULL)
599     {
600       trace_buffer.append(STRING_WITH_LEN(" /* "));
601       trace_buffer.append(saved_key);
602       trace_buffer.append(STRING_WITH_LEN(" */"));
603     }
604   }
605   if (has_disabled_I_S)
606     ctx->restore_I_S();
607 }
608 
609 
separator()610 void Opt_trace_stmt::separator()
611 {
612   DBUG_ASSERT(support_I_S());
613   // Put a comma first, if we have already written an object at this level.
614   if (current_struct != NULL)
615   {
616     if (!current_struct->set_not_empty())
617       trace_buffer.append(',');
618     next_line();
619   }
620 }
621 
622 
623 namespace {
624 const char my_spaces[] =
625   "                                                                "
626   "                                                                "
627   "                                                                "
628   ;
629 }
630 
631 
next_line()632 void Opt_trace_stmt::next_line()
633 {
634   if (ctx->get_one_line())
635     return;
636   trace_buffer.append('\n');
637 
638   uint to_be_printed= 2 * stack_of_current_structs.elements();
639   const size_t spaces_len= sizeof(my_spaces) - 1;
640   while (to_be_printed > spaces_len)
641   {
642     trace_buffer.append(my_spaces, spaces_len);
643     to_be_printed-= spaces_len;
644   }
645   trace_buffer.append(my_spaces, to_be_printed);
646 }
647 
648 
make_unknown_key()649 const char *Opt_trace_stmt::make_unknown_key()
650 {
651   my_snprintf(unknown_key, sizeof(unknown_key),
652               "unknown_key_%u", ++unknown_key_count);
653   return unknown_key;
654 }
655 
656 
add(const char * key,const char * val,size_t val_length,bool quotes,bool escape)657 void Opt_trace_stmt::add(const char *key, const char *val, size_t val_length,
658                          bool quotes, bool escape)
659 {
660   if (!support_I_S())
661     return;
662   separator();
663   if (current_struct != NULL)
664     key= current_struct->check_key(key);
665   if (key != NULL)
666   {
667     trace_buffer.append('"');
668     trace_buffer.append(key);
669     trace_buffer.append(STRING_WITH_LEN("\": "));
670   }
671   if (quotes)
672     trace_buffer.append('"');
673   /*
674     Objects' keys use "normal" characters (A-Za-z0-9_), no escaping
675     needed. Same for numeric/bool values. Only string values may need
676     escaping.
677   */
678   if (escape)
679     trace_buffer.append_escaped(val, val_length);
680   else
681     trace_buffer.append(val, val_length);
682   if (quotes)
683     trace_buffer.append('"');
684 }
685 
686 
fill_info(Opt_trace_info * info) const687 void Opt_trace_stmt::fill_info(Opt_trace_info *info) const
688 {
689   if (unlikely(info->missing_priv= missing_priv))
690   {
691     info->trace_ptr= info->query_ptr= "";
692     info->trace_length= info->query_length= 0;
693     info->query_charset= &my_charset_bin;
694     info->missing_bytes= 0;
695   }
696   else
697   {
698     info->trace_ptr=     trace_buffer.ptr();
699     info->trace_length=  trace_buffer.length();
700     info->query_ptr=     query_buffer.ptr();
701     info->query_length=  query_buffer.length();
702     info->query_charset= query_buffer.charset();
703     info->missing_bytes= trace_buffer.get_missing_bytes() +
704       query_buffer.get_missing_bytes();
705   }
706 }
707 
708 
trace_buffer_tail(size_t size)709 const char *Opt_trace_stmt::trace_buffer_tail(size_t size)
710 {
711   size_t buffer_len= trace_buffer.length();
712   const char *ptr= trace_buffer.c_ptr_safe();
713   if (buffer_len > size)
714     ptr+= buffer_len - size;
715   return ptr;
716 }
717 
718 
missing_privilege()719 void Opt_trace_stmt::missing_privilege()
720 {
721   if (!missing_priv)
722   {
723     DBUG_PRINT("opt", ("trace denied"));
724     // This mark will make the trace appear empty in OPTIMIZER_TRACE table.
725     missing_priv= true;
726     // And all substatements will not be traced.
727     ctx->disable_I_S_for_this_and_children();
728   }
729 }
730 
731 
732 // Implementation of class Buffer
733 
734 namespace random_name_to_avoid_gcc_bug_29365 {
735 
append_escaped(const char * str,size_t length)736 void Buffer::append_escaped(const char *str, size_t length)
737 {
738   if (alloced_length() >= allowed_mem_size)
739   {
740     missing_bytes+= length;
741     return;
742   }
743   const char *pstr, *pstr_end;
744   char buf[128];                     // Temporary output buffer.
745   char *pbuf= buf;
746   for (pstr= str, pstr_end= (str + length) ; pstr < pstr_end ; pstr++)
747   {
748     char esc;
749     const char c= *pstr;
750     /*
751       JSON syntax says that control characters must be escaped. Experience
752       confirms that this means ASCII 0->31 and " and \ . A few of
753       them are accepted with a short escaping syntax (using \ : like \n)
754       but for most of them, only \uXXXX works, where XXXX is a
755       hexadecimal value for the code point.
756       Rules also mention escaping / , but Python's and Perl's json modules
757       do not require it, and somewhere on Internet someone said JSON
758       allows escaping of / but does not require it.
759 
760       Because UTF8 has the same characters in range 0-127 as ASCII does, and
761       other UTF8 characters don't contain 0-127 bytes, if we see a byte
762       equal to 0 it is really the UTF8 u0000 character (a.k.a. ASCII NUL)
763       and not a part of a longer character; if we see a newline, same,
764       etc. That wouldn't necessarily be true with another character set.
765     */
766     switch (c)
767     {
768       // Don't use \u when possible for common chars, \ is easier to read:
769     case '\\': esc= '\\'; break;
770     case '"' : esc= '\"'; break;
771     case '\n': esc= 'n' ; break;
772     case '\r': esc= 'r' ; break;
773     case '\t': esc= 't' ; break;
774     default  : esc= 0   ; break;
775     }
776     if (esc != 0)                           // Escaping with backslash.
777     {
778       *pbuf++= '\\';
779       *pbuf++= esc;
780     }
781     else
782     {
783       uint ascii_code= (uint)c;
784       if (ascii_code < 32)                  // Escaping with \u
785       {
786         *pbuf++= '\\';
787         *pbuf++= 'u';
788         *pbuf++= '0';
789         *pbuf++= '0';
790         if (ascii_code < 16)
791         {
792           *pbuf++= '0';
793         }
794         else
795         {
796           *pbuf++= '1';
797           ascii_code-= 16;
798         }
799         *pbuf++= _dig_vec_lower[ascii_code];
800       }
801       else
802         *pbuf++= c; // Normal character, no escaping needed.
803     }
804     /*
805       To fit a next character, we need at most 6 bytes (happens when using
806       \uXXXX syntax) before the buffer's end:
807     */
808     if (pbuf > buf + (sizeof(buf) - 6))
809     {
810       // Possibly no room in 'buf' for next char, so flush buf.
811       string_buf.append(buf, static_cast<uint32>(pbuf - buf));
812       pbuf= buf; // back to buf's start
813     }
814   }
815   // Flush any chars left in 'buf'.
816   string_buf.append(buf, static_cast<uint32>(pbuf - buf));
817 }
818 
819 
append(const char * str,size_t length)820 void Buffer::append(const char *str, size_t length)
821 {
822   if (alloced_length() >= allowed_mem_size)
823   {
824     missing_bytes+= length;
825     return;
826   }
827   DBUG_EXECUTE_IF("opt_trace_oom_in_buffers",
828                   DBUG_SET("+d,simulate_out_of_memory"););
829   string_buf.append(str, static_cast<uint32>(length));
830   DBUG_EXECUTE_IF("opt_trace_oom_in_buffers",
831                   DBUG_SET("-d,simulate_out_of_memory"););
832 }
833 
834 
append(char chr)835 void Buffer::append(char chr)
836 {
837   if (alloced_length() >= allowed_mem_size)
838   {
839     missing_bytes++;
840     return;
841   }
842   // No need for escaping chr, given how this function is used.
843   string_buf.append(chr);
844 }
845 
846 
prealloc()847 void Buffer::prealloc()
848 {
849   const size_t alloced=   alloced_length();
850   const size_t first_increment= 1024;
851   if ((alloced - length()) < (first_increment / 3))
852   {
853     /*
854       Support for I_S will produce long strings, and there is little free
855       space left in the allocated buffer, so it looks like
856       realloc is soon unavoidable; so let's get many bytes at a time.
857       Note that if this re-allocation fails, or any String::append(), we
858       will get a weird trace; either truncated if the server stops, or maybe
859       with a hole if there is later memory again for the trace's
860       continuation. The statement will fail anyway due to my_error(), in the
861       server.
862       We jump from 0 to first_increment and then multiply by 1.5. Unlike
863       addition of a constant length, multiplying is expected to give amortized
864       constant reallocation time; 1.5 is a commonly seen factor in the
865       litterature.
866     */
867     size_t new_size= (alloced == 0) ? first_increment : (alloced * 15 / 10);
868     size_t max_size= allowed_mem_size;
869     /*
870       Determine a safety margin:
871       (A) String::realloc() adds at most ALIGN_SIZE(1) bytes to requested
872       length, so we need to decrement max_size by this amount, to be sure that
873       we don't allocate more than max_size
874       (B) We need to stay at least one byte under that max_size, or the next
875       append() would trigger up-front truncation, which is potentially wrong
876       for a "pre-emptive allocation" as we do here.
877     */
878     const size_t safety_margin= ALIGN_SIZE(1) /* (A) */ + 1 /* (B) */;
879     if (max_size >= safety_margin)
880     {
881       max_size-= safety_margin;
882       if (new_size > max_size) // Don't pre-allocate more than the limit.
883         new_size= max_size;
884       if (new_size >= alloced) // Never shrink string.
885         string_buf.realloc(static_cast<uint32>(new_size));
886     }
887   }
888 }
889 
890 } // namespace
891 
892 
893 // Implementation of Opt_trace_context class
894 
895 const char *Opt_trace_context::flag_names[]=
896 {
897   "enabled", "one_line", "default", NullS
898 };
899 
900 const char *Opt_trace_context::feature_names[]=
901 {
902   "greedy_search", "range_optimizer", "dynamic_range",
903   "repeated_subselect", "default", NullS
904 };
905 
906 const Opt_trace_context::feature_value
907 Opt_trace_context::default_features=
908   Opt_trace_context::feature_value(Opt_trace_context::GREEDY_SEARCH |
909                                    Opt_trace_context::RANGE_OPTIMIZER |
910                                    Opt_trace_context::DYNAMIC_RANGE   |
911                                    Opt_trace_context::REPEATED_SUBSELECT);
912 
913 
~Opt_trace_context()914 Opt_trace_context::~Opt_trace_context()
915 {
916   if (unlikely(pimpl != NULL))
917   {
918     /* There may well be some few ended traces left: */
919     purge_stmts(true);
920     /* All should have moved to 'del' list: */
921     DBUG_ASSERT(pimpl->all_stmts_for_I_S.elements() == 0);
922     /* All of 'del' list should have been deleted: */
923     DBUG_ASSERT(pimpl->all_stmts_to_del.elements() == 0);
924     delete pimpl;
925   }
926 }
927 
928 
new_nothrow_w_my_error()929 template<class T> T * new_nothrow_w_my_error()
930 {
931   T * const t= new (std::nothrow) T();
932   if (unlikely(t == NULL))
933     my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
934              static_cast<int>(sizeof(T)));
935   return t;
936 }
937 template<class T, class Arg> T * new_nothrow_w_my_error(Arg a)
938 {
939   T * const t= new (std::nothrow) T(a);
940   if (unlikely(t == NULL))
941     my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
942              static_cast<int>(sizeof(T)));
943   return t;
944 }
945 
946 
start(bool support_I_S_arg,bool support_dbug_or_missing_priv,bool end_marker_arg,bool one_line_arg,long offset_arg,long limit_arg,ulong max_mem_size_arg,ulonglong features_arg)947 bool Opt_trace_context::start(bool support_I_S_arg,
948                               bool support_dbug_or_missing_priv,
949                               bool end_marker_arg, bool one_line_arg,
950                               long offset_arg, long limit_arg,
951                               ulong max_mem_size_arg, ulonglong features_arg)
952 {
953   DBUG_ENTER("Opt_trace_context::start");
954 
955   if (I_S_disabled != 0)
956   {
957     DBUG_PRINT("opt", ("opt_trace is already disabled"));
958     support_I_S_arg= false;
959   }
960 
961   /*
962     Decide on optimizations possible to realize the requested support.
963     If I_S or debug output is requested, we need to create an Opt_trace_stmt.
964     Same if we should support calls to Opt_trace_context::missing_privilege(),
965     because that function requires an Opt_trace_stmt.
966   */
967   if (!support_I_S_arg && !support_dbug_or_missing_priv)
968   {
969     // The statement will not do tracing.
970     if (likely(pimpl == NULL) || pimpl->current_stmt_in_gen == NULL)
971     {
972       /*
973         This should be the most commonly taken branch in a release binary,
974         when the connection rarely has optimizer tracing runtime-enabled.
975         It's thus important that it's optimized: we can short-cut the creation
976         and starting of Opt_trace_stmt, unlike in the next "else" branch.
977       */
978       DBUG_RETURN(false);
979     }
980     /*
981       If we come here, there is a parent statement which has a trace.
982       Imagine that we don't create a trace for the child statement
983       here. Then trace structures of the child will be accidentally attached
984       to the parent's trace (as it is still 'current_stmt_in_gen', which
985       constructors of Opt_trace_struct will use); thus the child's trace
986       will be visible (as a chunk of the parent's trace). That would be
987       incorrect. To avoid this, we create a trace for the child but with I_S
988       output disabled; this changes 'current_stmt_in_gen', thus this child's
989       trace structures will be attached to the child's trace and thus not be
990       visible.
991     */
992   }
993 
994   DBUG_EXECUTE_IF("no_new_opt_trace_stmt", DBUG_ASSERT(0););
995 
996   if (pimpl == NULL &&
997       ((pimpl= new_nothrow_w_my_error<Opt_trace_context_impl>()) == NULL))
998     DBUG_RETURN(true);
999 
1000   /*
1001     If tracing is disabled by some caller, then don't change settings (offset
1002     etc). Doing otherwise would surely bring a problem.
1003   */
1004   if (I_S_disabled == 0)
1005   {
1006     /*
1007       Here we allow a stored routine's sub-statement to enable/disable
1008       tracing, or change settings. Thus in a stored routine's body, there can
1009       be some 'SET OPTIMIZER_TRACE="enabled=[on|off]"' to trace only certain
1010       sub-statements.
1011     */
1012     pimpl->end_marker= end_marker_arg;
1013     pimpl->one_line= one_line_arg;
1014     pimpl->offset= offset_arg;
1015     pimpl->limit= limit_arg;
1016     pimpl->max_mem_size= max_mem_size_arg;
1017     // MISC always on
1018     pimpl->features= Opt_trace_context::feature_value(features_arg |
1019                                                      Opt_trace_context::MISC);
1020   }
1021   if (support_I_S_arg && pimpl->offset >= 0)
1022   {
1023     /* If outside the offset/limit window, no need to support I_S */
1024     if (pimpl->since_offset_0 < pimpl->offset)
1025     {
1026       DBUG_PRINT("opt", ("disabled: since_offset_0(%ld) < offset(%ld)",
1027                          pimpl->since_offset_0, pimpl->offset));
1028       support_I_S_arg= false;
1029     }
1030     else if (pimpl->since_offset_0 >= (pimpl->offset + pimpl->limit))
1031     {
1032       DBUG_PRINT("opt", ("disabled: since_offset_0(%ld) >="
1033                          " offset(%ld) + limit(%ld)",
1034                          pimpl->since_offset_0, pimpl->offset, pimpl->limit));
1035       support_I_S_arg= false;
1036     }
1037     pimpl->since_offset_0++;
1038   }
1039   {
1040     /*
1041       We don't allocate it in THD's MEM_ROOT as it must survive until a next
1042       statement (SELECT) reads the trace.
1043     */
1044     Opt_trace_stmt *stmt= new_nothrow_w_my_error<Opt_trace_stmt>(this);
1045 
1046     DBUG_PRINT("opt",("new stmt %p support_I_S %d", stmt, support_I_S_arg));
1047 
1048     if (unlikely(stmt == NULL ||
1049                  pimpl->stack_of_current_stmts
1050                  .append(pimpl->current_stmt_in_gen)))
1051       goto err;                            // append() above called my_error()
1052 
1053     /*
1054       If sending only to DBUG, don't show to the user.
1055       Same if tracing was temporarily disabled at higher layers with
1056       Opt_trace_disable_I_S.
1057       So we just link it to the 'del' list for purging when ended.
1058     */
1059     Dynamic_array<Opt_trace_stmt *> *list;
1060     if (support_I_S_arg)
1061       list= &pimpl->all_stmts_for_I_S;
1062     else
1063     {
1064       stmt->disable_I_S();           // no need to fill a not-shown JSON trace
1065       list= &pimpl->all_stmts_to_del;
1066     }
1067 
1068     if (unlikely(list->append(stmt)))
1069         goto err;
1070 
1071     pimpl->current_stmt_in_gen= stmt;
1072 
1073     // As we just added one trace, maybe the previous ones are unneeded now
1074     purge_stmts(false);
1075     // This purge may have freed space, compute max allowed size:
1076     stmt->set_allowed_mem_size(allowed_mem_size_for_current_stmt());
1077     DBUG_RETURN(false);
1078 err:
1079     delete stmt;
1080     DBUG_ASSERT(0);
1081     DBUG_RETURN(true);
1082   }
1083 }
1084 
1085 
end()1086 void Opt_trace_context::end()
1087 {
1088   DBUG_ASSERT(I_S_disabled >= 0);
1089   if (likely(pimpl == NULL))
1090     return;
1091   if (pimpl->current_stmt_in_gen != NULL)
1092   {
1093     pimpl->current_stmt_in_gen->end();
1094     /*
1095       pimpl was constructed with current_stmt_in_gen=NULL which was pushed in
1096       'start()'. So this NULL is in the array, back() is safe.
1097     */
1098     Opt_trace_stmt * const parent= *(pimpl->stack_of_current_stmts.back());
1099     pimpl->stack_of_current_stmts.pop();
1100     pimpl->current_stmt_in_gen= parent;
1101     if (parent != NULL)
1102     {
1103       /*
1104         Parent regains control, now it needs to be told that its child has
1105         used space, and thus parent's allowance has shrunk.
1106       */
1107       parent->set_allowed_mem_size(allowed_mem_size_for_current_stmt());
1108     }
1109     /*
1110       Purge again. Indeed when we are here, compared to the previous start()
1111       we have one more ended trace, so can potentially free more. Consider
1112       offset=-1 and:
1113          top_stmt, started
1114            sub_stmt, starts: can't free top_stmt as it is not ended yet
1115            sub_stmt, ends: won't free sub_stmt (as user will want to see it),
1116            can't free top_stmt as not ended yet
1117          top_stmt, continued
1118          top_stmt, ends: free top_stmt as it's not last and is ended, keep
1119          only sub_stmt.
1120       Still the purge is done in ::start() too, as an optimization, for this
1121       case:
1122          sub_stmt, started
1123          sub_stmt, ended
1124          sub_stmt, starts: can free above sub_stmt, will save memory compared
1125          to free-ing it only when the new sub_stmt ends.
1126     */
1127     purge_stmts(false);
1128   }
1129   else
1130     DBUG_ASSERT(pimpl->stack_of_current_stmts.elements() == 0);
1131 }
1132 
1133 
support_I_S() const1134 bool Opt_trace_context::support_I_S() const
1135 {
1136   return (pimpl != NULL) && (pimpl->current_stmt_in_gen != NULL) &&
1137     pimpl->current_stmt_in_gen->support_I_S();
1138 }
1139 
1140 
purge_stmts(bool purge_all)1141 void Opt_trace_context::purge_stmts(bool purge_all)
1142 {
1143   DBUG_ENTER("Opt_trace_context::purge_stmts");
1144   if (!purge_all && pimpl->offset >= 0)
1145   {
1146     /* This case is managed in @c Opt_trace_context::start() */
1147     DBUG_VOID_RETURN;
1148   }
1149   long idx;
1150   /*
1151     Start from the newest traces (array's end), scroll back in time. This
1152     direction is necessary, as we may delete elements from the array (assume
1153     purge_all=true and array has 2 elements and we traverse starting from
1154     index 0: cell 0 is deleted, making cell 1 become cell 0; index is
1155     incremented to 1, which is past the array's end, so break out of the loop:
1156     cell 0 (old cell 1) was not deleted, wrong).
1157   */
1158   for (idx= (pimpl->all_stmts_for_I_S.elements() - 1) ; idx >= 0 ; idx--)
1159   {
1160     if (!purge_all &&
1161         ((pimpl->all_stmts_for_I_S.elements() + pimpl->offset) <= idx))
1162     {
1163       /* OFFSET mandates that this trace should be kept; move to previous */
1164     }
1165     else
1166     {
1167       /*
1168         Remember to free it (as in @c free()) when possible. For now, make it
1169         invisible in OPTIMIZER_TRACE table.
1170       */
1171       DBUG_EXECUTE_IF("opt_trace_oom_in_purge",
1172                       DBUG_SET("+d,simulate_out_of_memory"););
1173       if (likely(!pimpl->all_stmts_to_del
1174                  .append(pimpl->all_stmts_for_I_S.at(idx))))
1175         pimpl->all_stmts_for_I_S.del(idx);
1176       else
1177       {
1178         /*
1179           OOM. Cannot purge. Which at worse should only break the
1180           offset/limit feature (the trace will accidentally still show up in
1181           the OPTIMIZER_TRACE table). append() above has called my_error().
1182         */
1183       }
1184       DBUG_EXECUTE_IF("opt_trace_oom_in_purge",
1185                       DBUG_SET("-d,simulate_out_of_memory"););
1186     }
1187   }
1188   /* Examine list of "to be freed" traces and free what can be */
1189   for (idx= (pimpl->all_stmts_to_del.elements() - 1) ; idx >= 0 ; idx--)
1190   {
1191     Opt_trace_stmt *stmt= pimpl->all_stmts_to_del.at(idx);
1192 #ifndef DBUG_OFF
1193     bool skip_del= false;
1194     DBUG_EXECUTE_IF("opt_trace_oom_in_purge", skip_del= true;);
1195 #else
1196     const bool skip_del= false;
1197 #endif
1198     if (!stmt->has_ended() || skip_del)
1199     {
1200       /*
1201         This trace is not finished, freeing it now would lead to use of
1202         freed memory if a structure is later added to it. This would be
1203         possible: assume OFFSET=-1 and we have
1204         CALL statement starts executing
1205           create its trace (call it "trace #1")
1206           add structure to trace #1
1207           add structure to trace #1
1208           First sub-statement executing
1209             create its trace (call it "trace #2")
1210             from then on, trace #1 is not needed, free() it
1211             add structure to trace #2
1212             add structure to trace #2
1213           First sub-statement ends
1214           add structure to trace #1 - oops, adding to a free()d trace!
1215         So if a trace is not finished, we will wait until it is and
1216         re-consider it then (which is why this function is called in @c
1217         Opt_trace_stmt::end() too).
1218 
1219         In unit testing, to simulate OOM, we let the list grow so
1220         that it consumes its pre-allocated cells and finally requires a
1221         (failing) allocation.
1222       */
1223     }
1224     else
1225     {
1226       pimpl->all_stmts_to_del.del(idx);
1227       delete stmt;
1228     }
1229   }
1230   DBUG_VOID_RETURN;
1231 }
1232 
1233 
allowed_mem_size_for_current_stmt() const1234 size_t Opt_trace_context::allowed_mem_size_for_current_stmt() const
1235 {
1236   size_t mem_size= 0;
1237   int idx;
1238   for (idx= (pimpl->all_stmts_for_I_S.elements() - 1) ; idx >= 0 ; idx--)
1239   {
1240     const Opt_trace_stmt *stmt= pimpl->all_stmts_for_I_S.at(idx);
1241     mem_size+= stmt->alloced_length();
1242   }
1243   // Even to-be-deleted traces use memory, so consider them in sum
1244   for (idx= (pimpl->all_stmts_to_del.elements() - 1) ; idx >= 0 ; idx--)
1245   {
1246     const Opt_trace_stmt *stmt= pimpl->all_stmts_to_del.at(idx);
1247     mem_size+= stmt->alloced_length();
1248   }
1249   /* The current statement is in exactly one of the two lists above */
1250   mem_size-= pimpl->current_stmt_in_gen->alloced_length();
1251   size_t rc= (mem_size <= pimpl->max_mem_size) ?
1252     (pimpl->max_mem_size - mem_size) : 0;
1253   DBUG_PRINT("opt", ("rc %llu max_mem_size %llu",
1254                      (ulonglong)rc, (ulonglong)pimpl->max_mem_size));
1255   return rc;
1256 }
1257 
1258 
set_query(const char * query,size_t length,const CHARSET_INFO * charset)1259 void Opt_trace_context::set_query(const char *query, size_t length,
1260                                   const CHARSET_INFO *charset)
1261 {
1262   pimpl->current_stmt_in_gen->set_query(query, length, charset);
1263 }
1264 
1265 
reset()1266 void Opt_trace_context::reset()
1267 {
1268   if (pimpl == NULL)
1269     return;
1270   purge_stmts(true);
1271   pimpl->since_offset_0= 0;
1272 }
1273 
1274 
1275 void Opt_trace_context::
disable_I_S_for_this_and_children()1276 Opt_trace_context_impl::disable_I_S_for_this_and_children()
1277 {
1278   if (current_stmt_in_gen != NULL)
1279     current_stmt_in_gen->disable_I_S();
1280 }
1281 
1282 
restore_I_S()1283 void Opt_trace_context::Opt_trace_context_impl::restore_I_S()
1284 {
1285   if (current_stmt_in_gen != NULL)
1286     current_stmt_in_gen->restore_I_S();
1287 }
1288 
1289 
missing_privilege()1290 void Opt_trace_context::missing_privilege()
1291 {
1292   /*
1293     By storing the 'missing_priv' mark in Opt_trace_stmt instead of in
1294     Opt_trace_context we get automatic re-enabling of I_S when the stmt ends,
1295     Opt_trace_stmt::missing_priv being the "memory" of where I_S has been
1296     disabled.
1297     Storing in Opt_trace_context would require an external memory (probably a
1298     RAII object), which would not be possible in
1299     TABLE_LIST::prepare_security(), where I_S must be disabled even after the
1300     end of that function - so RAII would not work.
1301 
1302     Which is why this function needs an existing current_stmt_in_gen.
1303   */
1304   pimpl->current_stmt_in_gen->missing_privilege();
1305 }
1306 
1307 
1308 const Opt_trace_stmt
get_next_stmt_for_I_S(long * got_so_far) const1309 *Opt_trace_context::get_next_stmt_for_I_S(long *got_so_far) const
1310 {
1311   const Opt_trace_stmt *p;
1312   if ((pimpl == NULL) ||
1313       (*got_so_far >= pimpl->limit) ||
1314       (*got_so_far >= pimpl->all_stmts_for_I_S.elements()))
1315     p= NULL;
1316   else
1317   {
1318     p= pimpl->all_stmts_for_I_S.at(*got_so_far);
1319     DBUG_ASSERT(p != NULL);
1320     (*got_so_far)++;
1321   }
1322   return p;
1323 }
1324 
1325 
1326 // Implementation of class Opt_trace_iterator
1327 
Opt_trace_iterator(Opt_trace_context * ctx_arg)1328 Opt_trace_iterator::Opt_trace_iterator(Opt_trace_context *ctx_arg) :
1329   ctx(ctx_arg), row_count(0)
1330 {
1331   next();
1332 }
1333 
next()1334 void Opt_trace_iterator::next()
1335 {
1336   cursor= ctx->get_next_stmt_for_I_S(&row_count);
1337 }
1338 
1339 
get_value(Opt_trace_info * info) const1340 void Opt_trace_iterator::get_value(Opt_trace_info *info) const
1341 {
1342   cursor->fill_info(info);
1343 }
1344 
1345 #endif // OPTIMIZER_TRACE
1346