1 /* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software Foundation,
21 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
22
23 /**
24 @file storage/perfschema/pfs_instr.cc
25 Performance schema instruments (implementation).
26 */
27
28 #include <my_global.h>
29 #include <string.h>
30
31 #include "my_sys.h"
32 #include "pfs.h"
33 #include "pfs_stat.h"
34 #include "pfs_instr.h"
35 #include "pfs_host.h"
36 #include "pfs_user.h"
37 #include "pfs_account.h"
38 #include "pfs_global.h"
39 #include "pfs_instr_class.h"
40
41 /**
42 @addtogroup Performance_schema_buffers
43 @{
44 */
45
46 /** Size of the mutex instances array. @sa mutex_array */
47 ulong mutex_max;
48 /** True when @c mutex_array is full. */
49 bool mutex_full;
50 /** Number of mutexes instance lost. @sa mutex_array */
51 ulong mutex_lost;
52 /** Size of the rwlock instances array. @sa rwlock_array */
53 ulong rwlock_max;
54 /** True when @c rwlock_array is full. */
55 bool rwlock_full;
56 /** Number or rwlock instances lost. @sa rwlock_array */
57 ulong rwlock_lost;
58 /** Size of the conditions instances array. @sa cond_array */
59 ulong cond_max;
60 /** True when @c cond_array is full. */
61 bool cond_full;
62 /** Number of conditions instances lost. @sa cond_array */
63 ulong cond_lost;
64 /** Size of the thread instances array. @sa thread_array */
65 ulong thread_max;
66 /** True when @c thread_array is full. */
67 bool thread_full;
68 /** Number or thread instances lost. @sa thread_array */
69 ulong thread_lost;
70 /** Size of the file instances array. @sa file_array */
71 ulong file_max;
72 /** True when @c file_array is full. */
73 bool file_full;
74 /** Number of file instances lost. @sa file_array */
75 ulong file_lost;
76 /**
77 Size of the file handle array. @sa file_handle_array.
78 Signed value, for easier comparisons with a file descriptor number.
79 */
80 long file_handle_max;
81 /** True when @c file_handle_array is full. */
82 bool file_handle_full;
83 /** Number of file handle lost. @sa file_handle_array */
84 ulong file_handle_lost;
85 /** Size of the table instances array. @sa table_array */
86 ulong table_max;
87 /** True when @c table_array is full. */
88 bool table_full;
89 /** Number of table instances lost. @sa table_array */
90 ulong table_lost;
91 /** Size of the socket instances array. @sa socket_array */
92 ulong socket_max;
93 /** True when @c socket_array is full. */
94 bool socket_full;
95 /** Number of socket instances lost. @sa socket_array */
96 ulong socket_lost;
97 /** Number of EVENTS_WAITS_HISTORY records per thread. */
98 ulong events_waits_history_per_thread;
99 /** Number of EVENTS_STAGES_HISTORY records per thread. */
100 ulong events_stages_history_per_thread;
101 /** Number of EVENTS_STATEMENTS_HISTORY records per thread. */
102 ulong events_statements_history_per_thread;
103 uint statement_stack_max;
104 size_t pfs_max_digest_length= 0;
105 /** Number of locker lost. @sa LOCKER_STACK_SIZE. */
106 ulong locker_lost= 0;
107 /** Number of statement lost. @sa STATEMENT_STACK_SIZE. */
108 ulong statement_lost= 0;
109 /** Size of connection attribute storage per thread */
110 ulong session_connect_attrs_size_per_thread;
111 /** Number of connection attributes lost */
112 ulong session_connect_attrs_lost= 0;
113
114 /**
115 Mutex instrumentation instances array.
116 @sa mutex_max
117 @sa mutex_lost
118 */
119 PFS_mutex *mutex_array= NULL;
120
121 /**
122 RWLock instrumentation instances array.
123 @sa rwlock_max
124 @sa rwlock_lost
125 */
126 PFS_rwlock *rwlock_array= NULL;
127
128 /**
129 Condition instrumentation instances array.
130 @sa cond_max
131 @sa cond_lost
132 */
133 PFS_cond *cond_array= NULL;
134
135 /**
136 Thread instrumentation instances array.
137 @sa thread_max
138 @sa thread_lost
139 */
140 PFS_thread *thread_array= NULL;
141
142 /**
143 File instrumentation instances array.
144 @sa file_max
145 @sa file_lost
146 @sa filename_hash
147 */
148 PFS_file *file_array= NULL;
149
150 /**
151 File instrumentation handle array.
152 @sa file_handle_max
153 @sa file_handle_lost
154 */
155 PFS_file **file_handle_array= NULL;
156
157 /**
158 Table instrumentation instances array.
159 @sa table_max
160 @sa table_lost
161 */
162 PFS_table *table_array= NULL;
163
164 /**
165 Socket instrumentation instances array.
166 @sa socket_max
167 @sa socket_lost
168 */
169 PFS_socket *socket_array= NULL;
170
171 PFS_stage_stat *global_instr_class_stages_array= NULL;
172 PFS_statement_stat *global_instr_class_statements_array= NULL;
173
174 static volatile uint64 thread_internal_id_counter= 0;
175
176 static uint thread_instr_class_waits_sizing;
177 static uint thread_instr_class_stages_sizing;
178 static uint thread_instr_class_statements_sizing;
179 static PFS_single_stat *thread_instr_class_waits_array= NULL;
180 static PFS_stage_stat *thread_instr_class_stages_array= NULL;
181 static PFS_statement_stat *thread_instr_class_statements_array= NULL;
182
183 static PFS_events_waits *thread_waits_history_array= NULL;
184 static PFS_events_stages *thread_stages_history_array= NULL;
185 static PFS_events_statements *thread_statements_history_array= NULL;
186 static PFS_events_statements *thread_statements_stack_array= NULL;
187 static unsigned char *current_stmts_digest_token_array= NULL;
188 static unsigned char *history_stmts_digest_token_array= NULL;
189 static char *thread_session_connect_attrs_array= NULL;
190
191 /** Hash table for instrumented files. */
192 LF_HASH filename_hash;
193 /** True if filename_hash is initialized. */
194 static bool filename_hash_inited= false;
195
196 /**
197 Initialize all the instruments instance buffers.
198 @param param sizing parameters
199 @return 0 on success
200 */
init_instruments(const PFS_global_param * param)201 int init_instruments(const PFS_global_param *param)
202 {
203 PFS_events_statements *pfs_stmt;
204 unsigned char *pfs_tokens;
205
206 uint thread_waits_history_sizing;
207 uint thread_stages_history_sizing;
208 uint thread_statements_history_sizing;
209 uint thread_statements_stack_sizing;
210 uint thread_session_connect_attrs_sizing;
211 uint index;
212
213 /* Make sure init_event_name_sizing is called */
214 DBUG_ASSERT(wait_class_max != 0);
215
216 mutex_max= param->m_mutex_sizing;
217 mutex_full= false;
218 mutex_lost= 0;
219 rwlock_max= param->m_rwlock_sizing;
220 rwlock_full= false;
221 rwlock_lost= 0;
222 cond_max= param->m_cond_sizing;
223 cond_full= false;
224 cond_lost= 0;
225 file_max= param->m_file_sizing;
226 file_full= false;
227 file_lost= 0;
228 file_handle_max= param->m_file_handle_sizing;
229 file_handle_full= false;
230 file_handle_lost= 0;
231
232 pfs_max_digest_length= param->m_max_digest_length;
233
234 table_max= param->m_table_sizing;
235 table_full= false;
236 table_lost= 0;
237 thread_max= param->m_thread_sizing;
238 thread_full= false;
239 thread_lost= 0;
240 socket_max= param->m_socket_sizing;
241 socket_full= false;
242 socket_lost= 0;
243
244 events_waits_history_per_thread= param->m_events_waits_history_sizing;
245 thread_waits_history_sizing= param->m_thread_sizing
246 * events_waits_history_per_thread;
247
248 thread_instr_class_waits_sizing= param->m_thread_sizing
249 * wait_class_max;
250
251 events_stages_history_per_thread= param->m_events_stages_history_sizing;
252 thread_stages_history_sizing= param->m_thread_sizing
253 * events_stages_history_per_thread;
254
255 events_statements_history_per_thread= param->m_events_statements_history_sizing;
256 thread_statements_history_sizing= param->m_thread_sizing
257 * events_statements_history_per_thread;
258
259 statement_stack_max= 1;
260 thread_statements_stack_sizing= param->m_thread_sizing * statement_stack_max;
261
262 thread_instr_class_stages_sizing= param->m_thread_sizing
263 * param->m_stage_class_sizing;
264
265 thread_instr_class_statements_sizing= param->m_thread_sizing
266 * param->m_statement_class_sizing;
267
268 session_connect_attrs_size_per_thread= param->m_session_connect_attrs_sizing;
269 thread_session_connect_attrs_sizing= param->m_thread_sizing
270 * session_connect_attrs_size_per_thread;
271 session_connect_attrs_lost= 0;
272
273 size_t current_digest_tokens_sizing= param->m_thread_sizing * pfs_max_digest_length * statement_stack_max;
274 size_t history_digest_tokens_sizing= param->m_thread_sizing * pfs_max_digest_length * events_statements_history_per_thread;
275
276 mutex_array= NULL;
277 rwlock_array= NULL;
278 cond_array= NULL;
279 file_array= NULL;
280 file_handle_array= NULL;
281 table_array= NULL;
282 socket_array= NULL;
283 thread_array= NULL;
284 thread_waits_history_array= NULL;
285 thread_stages_history_array= NULL;
286 thread_statements_history_array= NULL;
287 thread_statements_stack_array= NULL;
288 current_stmts_digest_token_array= NULL;
289 history_stmts_digest_token_array= NULL;
290 thread_instr_class_waits_array= NULL;
291 thread_instr_class_stages_array= NULL;
292 thread_instr_class_statements_array= NULL;
293 thread_internal_id_counter= 0;
294
295 if (mutex_max > 0)
296 {
297 mutex_array= PFS_MALLOC_ARRAY(mutex_max, sizeof(PFS_mutex), PFS_mutex, MYF(MY_ZEROFILL));
298 if (unlikely(mutex_array == NULL))
299 return 1;
300 }
301
302 if (rwlock_max > 0)
303 {
304 rwlock_array= PFS_MALLOC_ARRAY(rwlock_max, sizeof(PFS_rwlock), PFS_rwlock, MYF(MY_ZEROFILL));
305 if (unlikely(rwlock_array == NULL))
306 return 1;
307 }
308
309 if (cond_max > 0)
310 {
311 cond_array= PFS_MALLOC_ARRAY(cond_max, sizeof(PFS_cond), PFS_cond, MYF(MY_ZEROFILL));
312 if (unlikely(cond_array == NULL))
313 return 1;
314 }
315
316 if (file_max > 0)
317 {
318 file_array= PFS_MALLOC_ARRAY(file_max, sizeof(PFS_file), PFS_file, MYF(MY_ZEROFILL));
319 if (unlikely(file_array == NULL))
320 return 1;
321 }
322
323 if (file_handle_max > 0)
324 {
325 file_handle_array= PFS_MALLOC_ARRAY(file_handle_max, sizeof(PFS_file*), PFS_file*, MYF(MY_ZEROFILL));
326 if (unlikely(file_handle_array == NULL))
327 return 1;
328 }
329
330 if (table_max > 0)
331 {
332 table_array= PFS_MALLOC_ARRAY(table_max, sizeof(PFS_table), PFS_table, MYF(MY_ZEROFILL));
333 if (unlikely(table_array == NULL))
334 return 1;
335 }
336
337 if (socket_max > 0)
338 {
339 socket_array= PFS_MALLOC_ARRAY(socket_max, sizeof(PFS_socket), PFS_socket, MYF(MY_ZEROFILL));
340 if (unlikely(socket_array == NULL))
341 return 1;
342 }
343
344 if (thread_max > 0)
345 {
346 thread_array= PFS_MALLOC_ARRAY(thread_max, sizeof(PFS_thread), PFS_thread, MYF(MY_ZEROFILL));
347 if (unlikely(thread_array == NULL))
348 return 1;
349 }
350
351 if (thread_waits_history_sizing > 0)
352 {
353 thread_waits_history_array=
354 PFS_MALLOC_ARRAY(thread_waits_history_sizing, sizeof(PFS_events_waits), PFS_events_waits,
355 MYF(MY_ZEROFILL));
356 if (unlikely(thread_waits_history_array == NULL))
357 return 1;
358 }
359
360 if (thread_instr_class_waits_sizing > 0)
361 {
362 thread_instr_class_waits_array=
363 PFS_MALLOC_ARRAY(thread_instr_class_waits_sizing,
364 sizeof(PFS_single_stat), PFS_single_stat, MYF(MY_ZEROFILL));
365 if (unlikely(thread_instr_class_waits_array == NULL))
366 return 1;
367
368 for (index= 0; index < thread_instr_class_waits_sizing; index++)
369 thread_instr_class_waits_array[index].reset();
370 }
371
372 if (thread_stages_history_sizing > 0)
373 {
374 thread_stages_history_array=
375 PFS_MALLOC_ARRAY(thread_stages_history_sizing, sizeof(PFS_events_stages), PFS_events_stages,
376 MYF(MY_ZEROFILL));
377 if (unlikely(thread_stages_history_array == NULL))
378 return 1;
379 }
380
381 if (thread_instr_class_stages_sizing > 0)
382 {
383 thread_instr_class_stages_array=
384 PFS_MALLOC_ARRAY(thread_instr_class_stages_sizing,
385 sizeof(PFS_stage_stat), PFS_stage_stat, MYF(MY_ZEROFILL));
386 if (unlikely(thread_instr_class_stages_array == NULL))
387 return 1;
388
389 for (index= 0; index < thread_instr_class_stages_sizing; index++)
390 thread_instr_class_stages_array[index].reset();
391 }
392
393 if (thread_statements_history_sizing > 0)
394 {
395 thread_statements_history_array=
396 PFS_MALLOC_ARRAY(thread_statements_history_sizing, sizeof(PFS_events_statements),
397 PFS_events_statements, MYF(MY_ZEROFILL));
398 if (unlikely(thread_statements_history_array == NULL))
399 return 1;
400 }
401
402 if (thread_statements_stack_sizing > 0)
403 {
404 thread_statements_stack_array=
405 PFS_MALLOC_ARRAY(thread_statements_stack_sizing, sizeof(PFS_events_statements),
406 PFS_events_statements, MYF(MY_ZEROFILL));
407 if (unlikely(thread_statements_stack_array == NULL))
408 return 1;
409 }
410
411 if (thread_instr_class_statements_sizing > 0)
412 {
413 thread_instr_class_statements_array=
414 PFS_MALLOC_ARRAY(thread_instr_class_statements_sizing,
415 sizeof(PFS_statement_stat), PFS_statement_stat, MYF(MY_ZEROFILL));
416 if (unlikely(thread_instr_class_statements_array == NULL))
417 return 1;
418
419 for (index= 0; index < thread_instr_class_statements_sizing; index++)
420 thread_instr_class_statements_array[index].reset();
421 }
422
423 if (thread_session_connect_attrs_sizing > 0)
424 {
425 thread_session_connect_attrs_array=
426 (char *)pfs_malloc(thread_session_connect_attrs_sizing, MYF(MY_ZEROFILL));
427 if (unlikely(thread_session_connect_attrs_array == NULL))
428 return 1;
429 }
430
431 if (current_digest_tokens_sizing > 0)
432 {
433 current_stmts_digest_token_array=
434 (unsigned char *)pfs_malloc(current_digest_tokens_sizing, MYF(MY_ZEROFILL));
435 if (unlikely(current_stmts_digest_token_array == NULL))
436 return 1;
437 }
438
439 if (history_digest_tokens_sizing > 0)
440 {
441 history_stmts_digest_token_array=
442 (unsigned char *)pfs_malloc(history_digest_tokens_sizing, MYF(MY_ZEROFILL));
443 if (unlikely(history_stmts_digest_token_array == NULL))
444 return 1;
445 }
446
447 for (index= 0; index < thread_max; index++)
448 {
449 thread_array[index].m_waits_history=
450 &thread_waits_history_array[index * events_waits_history_per_thread];
451 thread_array[index].m_instr_class_waits_stats=
452 &thread_instr_class_waits_array[index * wait_class_max];
453 thread_array[index].m_stages_history=
454 &thread_stages_history_array[index * events_stages_history_per_thread];
455 thread_array[index].m_instr_class_stages_stats=
456 &thread_instr_class_stages_array[index * stage_class_max];
457 thread_array[index].m_statements_history=
458 &thread_statements_history_array[index * events_statements_history_per_thread];
459 thread_array[index].m_statement_stack=
460 &thread_statements_stack_array[index * statement_stack_max];
461 thread_array[index].m_instr_class_statements_stats=
462 &thread_instr_class_statements_array[index * statement_class_max];
463 thread_array[index].m_session_connect_attrs=
464 &thread_session_connect_attrs_array[index * session_connect_attrs_size_per_thread];
465 }
466
467 for (index= 0; index < thread_statements_stack_sizing; index++)
468 {
469 pfs_stmt= & thread_statements_stack_array[index];
470
471 pfs_tokens= & current_stmts_digest_token_array[index * pfs_max_digest_length];
472 pfs_stmt->m_digest_storage.reset(pfs_tokens, pfs_max_digest_length);
473 }
474
475 for (index= 0; index < thread_statements_history_sizing; index++)
476 {
477 pfs_stmt= & thread_statements_history_array[index];
478
479 pfs_tokens= & history_stmts_digest_token_array[index * pfs_max_digest_length];
480 pfs_stmt->m_digest_storage.reset(pfs_tokens, pfs_max_digest_length);
481 }
482
483 if (stage_class_max > 0)
484 {
485 global_instr_class_stages_array=
486 PFS_MALLOC_ARRAY(stage_class_max,
487 sizeof(PFS_stage_stat), PFS_stage_stat, MYF(MY_ZEROFILL));
488 if (unlikely(global_instr_class_stages_array == NULL))
489 return 1;
490
491 for (index= 0; index < stage_class_max; index++)
492 global_instr_class_stages_array[index].reset();
493 }
494
495 if (statement_class_max > 0)
496 {
497 global_instr_class_statements_array=
498 PFS_MALLOC_ARRAY(statement_class_max,
499 sizeof(PFS_statement_stat), PFS_statement_stat, MYF(MY_ZEROFILL));
500 if (unlikely(global_instr_class_statements_array == NULL))
501 return 1;
502
503 for (index= 0; index < statement_class_max; index++)
504 global_instr_class_statements_array[index].reset();
505 }
506
507 return 0;
508 }
509
510 /** Cleanup all the instruments buffers. */
cleanup_instruments(void)511 void cleanup_instruments(void)
512 {
513 pfs_free(mutex_array);
514 mutex_array= NULL;
515 mutex_max= 0;
516 pfs_free(rwlock_array);
517 rwlock_array= NULL;
518 rwlock_max= 0;
519 pfs_free(cond_array);
520 cond_array= NULL;
521 cond_max= 0;
522 pfs_free(file_array);
523 file_array= NULL;
524 file_max= 0;
525 pfs_free(file_handle_array);
526 file_handle_array= NULL;
527 file_handle_max= 0;
528 pfs_free(table_array);
529 table_array= NULL;
530 table_max= 0;
531 pfs_free(socket_array);
532 socket_array= NULL;
533 socket_max= 0;
534 pfs_free(thread_array);
535 thread_array= NULL;
536 thread_max= 0;
537 pfs_free(thread_waits_history_array);
538 thread_waits_history_array= NULL;
539 pfs_free(thread_stages_history_array);
540 thread_stages_history_array= NULL;
541 pfs_free(thread_statements_history_array);
542 thread_statements_history_array= NULL;
543 pfs_free(thread_statements_stack_array);
544 thread_statements_stack_array= NULL;
545 pfs_free(thread_instr_class_waits_array);
546 thread_instr_class_waits_array= NULL;
547 pfs_free(global_instr_class_stages_array);
548 global_instr_class_stages_array= NULL;
549 pfs_free(global_instr_class_statements_array);
550 global_instr_class_statements_array= NULL;
551 pfs_free(thread_session_connect_attrs_array);
552 thread_session_connect_attrs_array=NULL;
553 pfs_free(current_stmts_digest_token_array);
554 current_stmts_digest_token_array= NULL;
555 pfs_free(history_stmts_digest_token_array);
556 history_stmts_digest_token_array= NULL;
557 }
558
559 C_MODE_START
560 /** Get hash table key for instrumented files. */
filename_hash_get_key(const uchar * entry,size_t * length,my_bool)561 static uchar *filename_hash_get_key(const uchar *entry, size_t *length,
562 my_bool)
563 {
564 const PFS_file * const *typed_entry;
565 const PFS_file *file;
566 const void *result;
567 typed_entry= reinterpret_cast<const PFS_file* const *> (entry);
568 DBUG_ASSERT(typed_entry != NULL);
569 file= *typed_entry;
570 DBUG_ASSERT(file != NULL);
571 *length= file->m_filename_length;
572 result= file->m_filename;
573 return const_cast<uchar*> (reinterpret_cast<const uchar*> (result));
574 }
575 C_MODE_END
576
577 /**
578 Initialize the file name hash.
579 @return 0 on success
580 */
init_file_hash(void)581 int init_file_hash(void)
582 {
583 if ((! filename_hash_inited) && (file_max > 0))
584 {
585 lf_hash_init(&filename_hash, sizeof(PFS_file*), LF_HASH_UNIQUE,
586 0, 0, filename_hash_get_key, &my_charset_bin);
587 filename_hash.size= file_max;
588 filename_hash_inited= true;
589 }
590 return 0;
591 }
592
593 /** Cleanup the file name hash. */
cleanup_file_hash(void)594 void cleanup_file_hash(void)
595 {
596 if (filename_hash_inited)
597 {
598 lf_hash_destroy(&filename_hash);
599 filename_hash_inited= false;
600 }
601 }
602
init(uint random,uint max_size)603 void PFS_scan::init(uint random, uint max_size)
604 {
605 m_pass= 0;
606
607 if (max_size == 0)
608 {
609 /* Degenerated case, no buffer */
610 m_pass_max= 0;
611 return;
612 }
613
614 DBUG_ASSERT(random < max_size);
615
616 if (PFS_MAX_ALLOC_RETRY < max_size)
617 {
618 /*
619 The buffer is big compared to PFS_MAX_ALLOC_RETRY,
620 scan it only partially.
621 */
622 if (random + PFS_MAX_ALLOC_RETRY < max_size)
623 {
624 /*
625 Pass 1: [random, random + PFS_MAX_ALLOC_RETRY - 1]
626 Pass 2: not used.
627 */
628 m_pass_max= 1;
629 m_first[0]= random;
630 m_last[0]= random + PFS_MAX_ALLOC_RETRY;
631 m_first[1]= 0;
632 m_last[1]= 0;
633 }
634 else
635 {
636 /*
637 Pass 1: [random, max_size - 1]
638 Pass 2: [0, ...]
639 The combined length of pass 1 and 2 is PFS_MAX_ALLOC_RETRY.
640 */
641 m_pass_max= 2;
642 m_first[0]= random;
643 m_last[0]= max_size;
644 m_first[1]= 0;
645 m_last[1]= PFS_MAX_ALLOC_RETRY - (max_size - random);
646 }
647 }
648 else
649 {
650 /*
651 The buffer is small compared to PFS_MAX_ALLOC_RETRY,
652 scan it in full in two passes.
653 Pass 1: [random, max_size - 1]
654 Pass 2: [0, random - 1]
655 */
656 m_pass_max= 2;
657 m_first[0]= random;
658 m_last[0]= max_size;
659 m_first[1]= 0;
660 m_last[1]= random;
661 }
662
663 DBUG_ASSERT(m_first[0] < max_size);
664 DBUG_ASSERT(m_first[1] < max_size);
665 DBUG_ASSERT(m_last[1] <= max_size);
666 DBUG_ASSERT(m_last[1] <= max_size);
667 /* The combined length of all passes should not exceed PFS_MAX_ALLOC_RETRY. */
668 DBUG_ASSERT((m_last[0] - m_first[0]) +
669 (m_last[1] - m_first[1]) <= PFS_MAX_ALLOC_RETRY);
670 }
671
672 /**
673 Create instrumentation for a mutex instance.
674 @param klass the mutex class
675 @param identity the mutex address
676 @return a mutex instance, or NULL
677 */
create_mutex(PFS_mutex_class * klass,const void * identity)678 PFS_mutex* create_mutex(PFS_mutex_class *klass, const void *identity)
679 {
680 static uint PFS_ALIGNED mutex_monotonic_index= 0;
681 uint index;
682 uint attempts= 0;
683 PFS_mutex *pfs;
684
685 if (mutex_full)
686 {
687 /*
688 This is a safety plug.
689 When mutex_array is severely undersized,
690 do not spin to death for each call.
691 */
692 mutex_lost++;
693 return NULL;
694 }
695
696 while (++attempts <= mutex_max)
697 {
698 /*
699 Problem:
700 Multiple threads running concurrently may need to create a new
701 instrumented mutex, and find an empty slot in mutex_array[].
702 With N1 threads running on a N2 core hardware:
703 - up to N2 hardware threads can run concurrently,
704 causing contention if looking at the same array[i] slot.
705 - up to N1 threads can run almost concurrently (with thread scheduling),
706 scanning maybe overlapping regions in the [0-mutex_max] array.
707
708 Solution:
709 Instead of letting different threads compete on the same array[i] entry,
710 this code forces all threads to cooperate with the monotonic_index.
711 Only one thread will be allowed to test a given array[i] slot.
712 All threads do scan from the same region, starting at monotonic_index.
713 Serializing on monotonic_index ensures that when a slot is found occupied
714 in a given loop by a given thread, other threads will not attempt this
715 slot.
716 */
717 index= PFS_atomic::add_u32(& mutex_monotonic_index, 1) % mutex_max;
718 pfs= mutex_array + index;
719
720 if (pfs->m_lock.is_free())
721 {
722 if (pfs->m_lock.free_to_dirty())
723 {
724 pfs->m_identity= identity;
725 pfs->m_class= klass;
726 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
727 pfs->m_timed= klass->m_timed;
728 pfs->m_mutex_stat.reset();
729 pfs->m_owner= NULL;
730 pfs->m_last_locked= 0;
731 pfs->m_lock.dirty_to_allocated();
732 if (klass->is_singleton())
733 klass->m_singleton= pfs;
734 return pfs;
735 }
736 }
737 }
738
739 mutex_lost++;
740 /*
741 Race condition.
742 The mutex_array might not be full if a concurrent thread
743 called destroy_mutex() during the scan, leaving one
744 empty slot we did not find.
745 However, 99.999 percent full tables or 100 percent full tables
746 are treated the same here, we declare the array overloaded.
747 */
748 mutex_full= true;
749 return NULL;
750 }
751
752 /**
753 Destroy instrumentation for a mutex instance.
754 @param pfs the mutex to destroy
755 */
destroy_mutex(PFS_mutex * pfs)756 void destroy_mutex(PFS_mutex *pfs)
757 {
758 DBUG_ASSERT(pfs != NULL);
759 PFS_mutex_class *klass= pfs->m_class;
760 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
761 klass->m_mutex_stat.aggregate(& pfs->m_mutex_stat);
762 pfs->m_mutex_stat.reset();
763 if (klass->is_singleton())
764 klass->m_singleton= NULL;
765 pfs->m_lock.allocated_to_free();
766 mutex_full= false;
767 }
768
769 /**
770 Create instrumentation for a rwlock instance.
771 @param klass the rwlock class
772 @param identity the rwlock address
773 @return a rwlock instance, or NULL
774 */
create_rwlock(PFS_rwlock_class * klass,const void * identity)775 PFS_rwlock* create_rwlock(PFS_rwlock_class *klass, const void *identity)
776 {
777 static uint PFS_ALIGNED rwlock_monotonic_index= 0;
778 uint index;
779 uint attempts= 0;
780 PFS_rwlock *pfs;
781
782 if (rwlock_full)
783 {
784 rwlock_lost++;
785 return NULL;
786 }
787
788 while (++attempts <= rwlock_max)
789 {
790 /* See create_mutex() */
791 index= PFS_atomic::add_u32(& rwlock_monotonic_index, 1) % rwlock_max;
792 pfs= rwlock_array + index;
793
794 if (pfs->m_lock.is_free())
795 {
796 if (pfs->m_lock.free_to_dirty())
797 {
798 pfs->m_identity= identity;
799 pfs->m_class= klass;
800 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
801 pfs->m_timed= klass->m_timed;
802 pfs->m_rwlock_stat.reset();
803 pfs->m_lock.dirty_to_allocated();
804 pfs->m_writer= NULL;
805 pfs->m_readers= 0;
806 pfs->m_last_written= 0;
807 pfs->m_last_read= 0;
808 if (klass->is_singleton())
809 klass->m_singleton= pfs;
810 return pfs;
811 }
812 }
813 }
814
815 rwlock_lost++;
816 rwlock_full= true;
817 return NULL;
818 }
819
820 /**
821 Destroy instrumentation for a rwlock instance.
822 @param pfs the rwlock to destroy
823 */
destroy_rwlock(PFS_rwlock * pfs)824 void destroy_rwlock(PFS_rwlock *pfs)
825 {
826 DBUG_ASSERT(pfs != NULL);
827 PFS_rwlock_class *klass= pfs->m_class;
828 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
829 klass->m_rwlock_stat.aggregate(& pfs->m_rwlock_stat);
830 pfs->m_rwlock_stat.reset();
831 if (klass->is_singleton())
832 klass->m_singleton= NULL;
833 pfs->m_lock.allocated_to_free();
834 rwlock_full= false;
835 }
836
837 /**
838 Create instrumentation for a condition instance.
839 @param klass the condition class
840 @param identity the condition address
841 @return a condition instance, or NULL
842 */
create_cond(PFS_cond_class * klass,const void * identity)843 PFS_cond* create_cond(PFS_cond_class *klass, const void *identity)
844 {
845 static uint PFS_ALIGNED cond_monotonic_index= 0;
846 uint index;
847 uint attempts= 0;
848 PFS_cond *pfs;
849
850 if (cond_full)
851 {
852 cond_lost++;
853 return NULL;
854 }
855
856 while (++attempts <= cond_max)
857 {
858 /* See create_mutex() */
859 index= PFS_atomic::add_u32(& cond_monotonic_index, 1) % cond_max;
860 pfs= cond_array + index;
861
862 if (pfs->m_lock.is_free())
863 {
864 if (pfs->m_lock.free_to_dirty())
865 {
866 pfs->m_identity= identity;
867 pfs->m_class= klass;
868 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
869 pfs->m_timed= klass->m_timed;
870 pfs->m_cond_stat.m_signal_count= 0;
871 pfs->m_cond_stat.m_broadcast_count= 0;
872 pfs->m_wait_stat.reset();
873 pfs->m_lock.dirty_to_allocated();
874 if (klass->is_singleton())
875 klass->m_singleton= pfs;
876 return pfs;
877 }
878 }
879 }
880
881 cond_lost++;
882 cond_full= true;
883 return NULL;
884 }
885
886 /**
887 Destroy instrumentation for a condition instance.
888 @param pfs the condition to destroy
889 */
destroy_cond(PFS_cond * pfs)890 void destroy_cond(PFS_cond *pfs)
891 {
892 DBUG_ASSERT(pfs != NULL);
893 PFS_cond_class *klass= pfs->m_class;
894 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
895 klass->m_cond_stat.aggregate(& pfs->m_cond_stat);
896 pfs->m_wait_stat.reset();
897 if (klass->is_singleton())
898 klass->m_singleton= NULL;
899 pfs->m_lock.allocated_to_free();
900 cond_full= false;
901 }
902
get_current_thread()903 PFS_thread* PFS_thread::get_current_thread()
904 {
905 PFS_thread *pfs= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
906 return pfs;
907 }
908
reset_session_connect_attrs()909 void PFS_thread::reset_session_connect_attrs()
910 {
911 m_session_connect_attrs_length= 0;
912 m_session_connect_attrs_cs_number= 0;
913
914 if ((m_session_connect_attrs != NULL) &&
915 (session_connect_attrs_size_per_thread > 0) )
916 {
917 /* Do not keep user data */
918 memset(m_session_connect_attrs, 0, session_connect_attrs_size_per_thread);
919 }
920 }
921
922 /**
923 Create instrumentation for a thread instance.
924 @param klass the thread class
925 @param identity the thread address,
926 or a value characteristic of this thread
927 @param processlist_id the PROCESSLIST id,
928 or 0 if unknown
929 @return a thread instance, or NULL
930 */
create_thread(PFS_thread_class * klass,const void * identity,ulonglong processlist_id)931 PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
932 ulonglong processlist_id)
933 {
934 static uint PFS_ALIGNED thread_monotonic_index= 0;
935 uint index;
936 uint attempts= 0;
937 PFS_thread *pfs;
938
939 if (thread_full)
940 {
941 thread_lost++;
942 return NULL;
943 }
944
945 while (++attempts <= thread_max)
946 {
947 /* See create_mutex() */
948 index= PFS_atomic::add_u32(& thread_monotonic_index, 1) % thread_max;
949 pfs= thread_array + index;
950
951 if (pfs->m_lock.is_free())
952 {
953 if (pfs->m_lock.free_to_dirty())
954 {
955 pfs->m_thread_internal_id=
956 PFS_atomic::add_u64(&thread_internal_id_counter, 1);
957 pfs->m_parent_thread_internal_id= 0;
958 pfs->m_processlist_id= processlist_id;
959 pfs->m_event_id= 1;
960 pfs->m_stmt_lock.set_allocated();
961 pfs->m_session_lock.set_allocated();
962 pfs->m_enabled= true;
963 pfs->m_class= klass;
964 pfs->m_events_waits_current= & pfs->m_events_waits_stack[WAIT_STACK_BOTTOM];
965 pfs->m_waits_history_full= false;
966 pfs->m_waits_history_index= 0;
967 pfs->m_stages_history_full= false;
968 pfs->m_stages_history_index= 0;
969 pfs->m_statements_history_full= false;
970 pfs->m_statements_history_index= 0;
971
972 pfs->reset_stats();
973 pfs->reset_session_connect_attrs();
974
975 pfs->m_filename_hash_pins= NULL;
976 pfs->m_table_share_hash_pins= NULL;
977 pfs->m_setup_actor_hash_pins= NULL;
978 pfs->m_setup_object_hash_pins= NULL;
979 pfs->m_user_hash_pins= NULL;
980 pfs->m_account_hash_pins= NULL;
981 pfs->m_host_hash_pins= NULL;
982 pfs->m_digest_hash_pins= NULL;
983
984 pfs->m_username_length= 0;
985 pfs->m_hostname_length= 0;
986 pfs->m_dbname_length= 0;
987 pfs->m_command= 0;
988 pfs->m_start_time= 0;
989 pfs->m_stage= 0;
990 pfs->m_processlist_info[0]= '\0';
991 pfs->m_processlist_info_length= 0;
992
993 pfs->m_host= NULL;
994 pfs->m_user= NULL;
995 pfs->m_account= NULL;
996 set_thread_account(pfs);
997
998 PFS_events_waits *child_wait;
999 for (index= 0; index < WAIT_STACK_SIZE; index++)
1000 {
1001 child_wait= & pfs->m_events_waits_stack[index];
1002 child_wait->m_thread_internal_id= pfs->m_thread_internal_id;
1003 child_wait->m_event_id= 0;
1004 child_wait->m_end_event_id= 0;
1005 child_wait->m_event_type= EVENT_TYPE_STATEMENT;
1006 child_wait->m_wait_class= NO_WAIT_CLASS;
1007 }
1008
1009 PFS_events_stages *child_stage= & pfs->m_stage_current;
1010 child_stage->m_thread_internal_id= pfs->m_thread_internal_id;
1011 child_stage->m_event_id= 0;
1012 child_stage->m_end_event_id= 0;
1013 child_stage->m_event_type= EVENT_TYPE_STATEMENT;
1014 child_stage->m_class= NULL;
1015 child_stage->m_timer_start= 0;
1016 child_stage->m_timer_end= 0;
1017 child_stage->m_source_file= NULL;
1018 child_stage->m_source_line= 0;
1019
1020 PFS_events_statements *child_statement;
1021 for (index= 0; index < statement_stack_max; index++)
1022 {
1023 child_statement= & pfs->m_statement_stack[index];
1024 child_statement->m_thread_internal_id= pfs->m_thread_internal_id;
1025 child_statement->m_event_id= 0;
1026 child_statement->m_end_event_id= 0;
1027 child_statement->m_event_type= EVENT_TYPE_STATEMENT;
1028 child_statement->m_class= NULL;
1029 child_statement->m_timer_start= 0;
1030 child_statement->m_timer_end= 0;
1031 child_statement->m_lock_time= 0;
1032 child_statement->m_source_file= NULL;
1033 child_statement->m_source_line= 0;
1034 child_statement->m_current_schema_name_length= 0;
1035 child_statement->m_sqltext_length= 0;
1036
1037 child_statement->m_message_text[0]= '\0';
1038 child_statement->m_sql_errno= 0;
1039 child_statement->m_sqlstate[0]= '\0';
1040 child_statement->m_error_count= 0;
1041 child_statement->m_warning_count= 0;
1042 child_statement->m_rows_affected= 0;
1043
1044 child_statement->m_rows_sent= 0;
1045 child_statement->m_rows_examined= 0;
1046 child_statement->m_created_tmp_disk_tables= 0;
1047 child_statement->m_created_tmp_tables= 0;
1048 child_statement->m_select_full_join= 0;
1049 child_statement->m_select_full_range_join= 0;
1050 child_statement->m_select_range= 0;
1051 child_statement->m_select_range_check= 0;
1052 child_statement->m_select_scan= 0;
1053 child_statement->m_sort_merge_passes= 0;
1054 child_statement->m_sort_range= 0;
1055 child_statement->m_sort_rows= 0;
1056 child_statement->m_sort_scan= 0;
1057 child_statement->m_no_index_used= 0;
1058 child_statement->m_no_good_index_used= 0;
1059 }
1060 pfs->m_events_statements_count= 0;
1061
1062 pfs->m_lock.dirty_to_allocated();
1063 return pfs;
1064 }
1065 }
1066 }
1067
1068 thread_lost++;
1069 thread_full= true;
1070 return NULL;
1071 }
1072
sanitize_mutex(PFS_mutex * unsafe)1073 PFS_mutex *sanitize_mutex(PFS_mutex *unsafe)
1074 {
1075 SANITIZE_ARRAY_BODY(PFS_mutex, mutex_array, mutex_max, unsafe);
1076 }
1077
sanitize_rwlock(PFS_rwlock * unsafe)1078 PFS_rwlock *sanitize_rwlock(PFS_rwlock *unsafe)
1079 {
1080 SANITIZE_ARRAY_BODY(PFS_rwlock, rwlock_array, rwlock_max, unsafe);
1081 }
1082
sanitize_cond(PFS_cond * unsafe)1083 PFS_cond *sanitize_cond(PFS_cond *unsafe)
1084 {
1085 SANITIZE_ARRAY_BODY(PFS_cond, cond_array, cond_max, unsafe);
1086 }
1087
1088 /**
1089 Sanitize a PFS_thread pointer.
1090 Validate that the PFS_thread is part of thread_array.
1091 Sanitizing data is required when the data can be
1092 damaged with expected race conditions, for example
1093 involving EVENTS_WAITS_HISTORY_LONG.
1094 @param unsafe the pointer to sanitize
1095 @return a valid pointer, or NULL
1096 */
sanitize_thread(PFS_thread * unsafe)1097 PFS_thread *sanitize_thread(PFS_thread *unsafe)
1098 {
1099 SANITIZE_ARRAY_BODY(PFS_thread, thread_array, thread_max, unsafe);
1100 }
1101
sanitize_file(PFS_file * unsafe)1102 PFS_file *sanitize_file(PFS_file *unsafe)
1103 {
1104 SANITIZE_ARRAY_BODY(PFS_file, file_array, file_max, unsafe);
1105 }
1106
sanitize_socket(PFS_socket * unsafe)1107 PFS_socket *sanitize_socket(PFS_socket *unsafe)
1108 {
1109 SANITIZE_ARRAY_BODY(PFS_socket, socket_array, socket_max, unsafe);
1110 }
1111
1112 /**
1113 Destroy instrumentation for a thread instance.
1114 @param pfs the thread to destroy
1115 */
destroy_thread(PFS_thread * pfs)1116 void destroy_thread(PFS_thread *pfs)
1117 {
1118 DBUG_ASSERT(pfs != NULL);
1119 pfs->reset_session_connect_attrs();
1120 if (pfs->m_account != NULL)
1121 {
1122 pfs->m_account->release();
1123 pfs->m_account= NULL;
1124 DBUG_ASSERT(pfs->m_user == NULL);
1125 DBUG_ASSERT(pfs->m_host == NULL);
1126 }
1127 else
1128 {
1129 if (pfs->m_user != NULL)
1130 {
1131 pfs->m_user->release();
1132 pfs->m_user= NULL;
1133 }
1134 if (pfs->m_host != NULL)
1135 {
1136 pfs->m_host->release();
1137 pfs->m_host= NULL;
1138 }
1139 }
1140 if (pfs->m_filename_hash_pins)
1141 {
1142 lf_hash_put_pins(pfs->m_filename_hash_pins);
1143 pfs->m_filename_hash_pins= NULL;
1144 }
1145 if (pfs->m_table_share_hash_pins)
1146 {
1147 lf_hash_put_pins(pfs->m_table_share_hash_pins);
1148 pfs->m_table_share_hash_pins= NULL;
1149 }
1150 if (pfs->m_setup_actor_hash_pins)
1151 {
1152 lf_hash_put_pins(pfs->m_setup_actor_hash_pins);
1153 pfs->m_setup_actor_hash_pins= NULL;
1154 }
1155 if (pfs->m_setup_object_hash_pins)
1156 {
1157 lf_hash_put_pins(pfs->m_setup_object_hash_pins);
1158 pfs->m_setup_object_hash_pins= NULL;
1159 }
1160 if (pfs->m_user_hash_pins)
1161 {
1162 lf_hash_put_pins(pfs->m_user_hash_pins);
1163 pfs->m_user_hash_pins= NULL;
1164 }
1165 if (pfs->m_account_hash_pins)
1166 {
1167 lf_hash_put_pins(pfs->m_account_hash_pins);
1168 pfs->m_account_hash_pins= NULL;
1169 }
1170 if (pfs->m_host_hash_pins)
1171 {
1172 lf_hash_put_pins(pfs->m_host_hash_pins);
1173 pfs->m_host_hash_pins= NULL;
1174 }
1175 if (pfs->m_digest_hash_pins)
1176 {
1177 lf_hash_put_pins(pfs->m_digest_hash_pins);
1178 pfs->m_digest_hash_pins= NULL;
1179 }
1180 pfs->m_lock.allocated_to_free();
1181 thread_full= false;
1182 }
1183
1184 /**
1185 Get the hash pins for @filename_hash.
1186 @param thread The running thread.
1187 @returns The LF_HASH pins for the thread.
1188 */
get_filename_hash_pins(PFS_thread * thread)1189 LF_PINS* get_filename_hash_pins(PFS_thread *thread)
1190 {
1191 if (unlikely(thread->m_filename_hash_pins == NULL))
1192 {
1193 if (! filename_hash_inited)
1194 return NULL;
1195 thread->m_filename_hash_pins= lf_hash_get_pins(&filename_hash);
1196 }
1197 return thread->m_filename_hash_pins;
1198 }
1199
1200 /**
1201 Find or create instrumentation for a file instance by file name.
1202 @param thread the executing instrumented thread
1203 @param klass the file class
1204 @param filename the file name
1205 @param len the length in bytes of filename
1206 @param create create a file instance if none found
1207 @return a file instance, or NULL
1208 */
1209 PFS_file*
find_or_create_file(PFS_thread * thread,PFS_file_class * klass,const char * filename,uint len,bool create)1210 find_or_create_file(PFS_thread *thread, PFS_file_class *klass,
1211 const char *filename, uint len, bool create)
1212 {
1213 PFS_file *pfs;
1214
1215 DBUG_ASSERT(klass != NULL || ! create);
1216
1217 LF_PINS *pins= get_filename_hash_pins(thread);
1218 if (unlikely(pins == NULL))
1219 {
1220 file_lost++;
1221 return NULL;
1222 }
1223
1224 char safe_buffer[FN_REFLEN];
1225 const char *safe_filename;
1226
1227 if (len >= FN_REFLEN)
1228 {
1229 /*
1230 The instrumented code uses file names that exceeds FN_REFLEN.
1231 This could be legal for instrumentation on non mysys APIs,
1232 so we support it.
1233 Truncate the file name so that:
1234 - it fits into pfs->m_filename
1235 - it is safe to use mysys apis to normalize the file name.
1236 */
1237 memcpy(safe_buffer, filename, FN_REFLEN - 1);
1238 safe_buffer[FN_REFLEN - 1]= 0;
1239 safe_filename= safe_buffer;
1240 }
1241 else
1242 safe_filename= filename;
1243
1244 /*
1245 Normalize the file name to avoid duplicates when using aliases:
1246 - absolute or relative paths
1247 - symbolic links
1248 Names are resolved as follows:
1249 - /real/path/to/real_file ==> same
1250 - /path/with/link/to/real_file ==> /real/path/to/real_file
1251 - real_file ==> /real/path/to/real_file
1252 - ./real_file ==> /real/path/to/real_file
1253 - /real/path/to/sym_link ==> same
1254 - /path/with/link/to/sym_link ==> /real/path/to/sym_link
1255 - sym_link ==> /real/path/to/sym_link
1256 - ./sym_link ==> /real/path/to/sym_link
1257 When the last component of a file is a symbolic link,
1258 the last component is *not* resolved, so that all file io
1259 operations on a link (create, read, write, delete) are counted
1260 against the link itself, not the target file.
1261 Resolving the name would lead to create counted against the link,
1262 and read/write/delete counted against the target, leading to
1263 incoherent results and instrumentation leaks.
1264 Also note that, when creating files, this name resolution
1265 works properly for files that do not exist (yet) on the file system.
1266 */
1267 char buffer[FN_REFLEN];
1268 char dirbuffer[FN_REFLEN];
1269 size_t dirlen;
1270 const char *normalized_filename;
1271 int normalized_length;
1272
1273 dirlen= dirname_length(safe_filename);
1274 if (dirlen == 0)
1275 {
1276 dirbuffer[0]= FN_CURLIB;
1277 dirbuffer[1]= FN_LIBCHAR;
1278 dirbuffer[2]= '\0';
1279 }
1280 else
1281 {
1282 memcpy(dirbuffer, safe_filename, dirlen);
1283 dirbuffer[dirlen]= '\0';
1284 }
1285
1286 if (my_realpath(buffer, dirbuffer, MYF(0)) != 0)
1287 {
1288 file_lost++;
1289 return NULL;
1290 }
1291
1292 /* Append the unresolved file name to the resolved path */
1293 char *ptr= buffer + strlen(buffer);
1294 char *buf_end= &buffer[sizeof(buffer)-1];
1295 if ((buf_end > ptr) && (*(ptr-1) != FN_LIBCHAR))
1296 *ptr++= FN_LIBCHAR;
1297 if (buf_end > ptr)
1298 strncpy(ptr, safe_filename + dirlen, buf_end - ptr);
1299 *buf_end= '\0';
1300
1301 normalized_filename= buffer;
1302 normalized_length= strlen(normalized_filename);
1303
1304 PFS_file **entry;
1305 uint retry_count= 0;
1306 const uint retry_max= 3;
1307 static uint PFS_ALIGNED file_monotonic_index= 0;
1308 uint index;
1309 uint attempts= 0;
1310
1311 search:
1312
1313 entry= reinterpret_cast<PFS_file**>
1314 (lf_hash_search(&filename_hash, pins,
1315 normalized_filename, normalized_length));
1316 if (entry && (entry != MY_ERRPTR))
1317 {
1318 pfs= *entry;
1319 pfs->m_file_stat.m_open_count++;
1320 lf_hash_search_unpin(pins);
1321 return pfs;
1322 }
1323
1324 lf_hash_search_unpin(pins);
1325
1326 if (! create)
1327 {
1328 /* No lost counter, just looking for the file existence. */
1329 return NULL;
1330 }
1331
1332 if (file_full)
1333 {
1334 file_lost++;
1335 return NULL;
1336 }
1337
1338 while (++attempts <= file_max)
1339 {
1340 /* See create_mutex() */
1341 index= PFS_atomic::add_u32(& file_monotonic_index, 1) % file_max;
1342 pfs= file_array + index;
1343
1344 if (pfs->m_lock.is_free())
1345 {
1346 if (pfs->m_lock.free_to_dirty())
1347 {
1348 pfs->m_class= klass;
1349 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
1350 pfs->m_timed= klass->m_timed;
1351 strncpy(pfs->m_filename, normalized_filename, normalized_length);
1352 pfs->m_filename[normalized_length]= '\0';
1353 pfs->m_filename_length= normalized_length;
1354 pfs->m_file_stat.m_open_count= 1;
1355 pfs->m_file_stat.m_io_stat.reset();
1356 pfs->m_identity= (const void *)pfs;
1357
1358 int res;
1359 res= lf_hash_insert(&filename_hash, thread->m_filename_hash_pins,
1360 &pfs);
1361 if (likely(res == 0))
1362 {
1363 pfs->m_lock.dirty_to_allocated();
1364 if (klass->is_singleton())
1365 klass->m_singleton= pfs;
1366 return pfs;
1367 }
1368
1369 pfs->m_lock.dirty_to_free();
1370
1371 if (res > 0)
1372 {
1373 /* Duplicate insert by another thread */
1374 if (++retry_count > retry_max)
1375 {
1376 /* Avoid infinite loops */
1377 file_lost++;
1378 return NULL;
1379 }
1380 goto search;
1381 }
1382
1383 /* OOM in lf_hash_insert */
1384 file_lost++;
1385 return NULL;
1386 }
1387 }
1388 }
1389
1390 file_lost++;
1391 file_full= true;
1392 return NULL;
1393 }
1394
1395 /**
1396 Release instrumentation for a file instance.
1397 @param pfs the file to release
1398 */
release_file(PFS_file * pfs)1399 void release_file(PFS_file *pfs)
1400 {
1401 DBUG_ASSERT(pfs != NULL);
1402 pfs->m_file_stat.m_open_count--;
1403 }
1404
1405 /**
1406 Destroy instrumentation for a file instance.
1407 @param thread the executing thread instrumentation
1408 @param pfs the file to destroy
1409 */
destroy_file(PFS_thread * thread,PFS_file * pfs)1410 void destroy_file(PFS_thread *thread, PFS_file *pfs)
1411 {
1412 DBUG_ASSERT(thread != NULL);
1413 DBUG_ASSERT(pfs != NULL);
1414 PFS_file_class *klass= pfs->m_class;
1415
1416 /* Aggregate to FILE_SUMMARY_BY_EVENT_NAME */
1417 klass->m_file_stat.aggregate(& pfs->m_file_stat);
1418 pfs->m_file_stat.reset();
1419
1420 if (klass->is_singleton())
1421 klass->m_singleton= NULL;
1422
1423 LF_PINS *pins= get_filename_hash_pins(thread);
1424 DBUG_ASSERT(pins != NULL);
1425
1426 lf_hash_delete(&filename_hash, pins,
1427 pfs->m_filename, pfs->m_filename_length);
1428 if (klass->is_singleton())
1429 klass->m_singleton= NULL;
1430 pfs->m_lock.allocated_to_free();
1431 file_full= false;
1432 }
1433
1434 /**
1435 Create instrumentation for a table instance.
1436 @param share the table share
1437 @param opening_thread the opening thread
1438 @param identity the table address
1439 @return a table instance, or NULL
1440 */
create_table(PFS_table_share * share,PFS_thread * opening_thread,const void * identity)1441 PFS_table* create_table(PFS_table_share *share, PFS_thread *opening_thread,
1442 const void *identity)
1443 {
1444 static uint PFS_ALIGNED table_monotonic_index= 0;
1445 uint index;
1446 uint attempts= 0;
1447 PFS_table *pfs;
1448
1449 if (table_full)
1450 {
1451 table_lost++;
1452 return NULL;
1453 }
1454
1455 while (++attempts <= table_max)
1456 {
1457 /* See create_mutex() */
1458 index= PFS_atomic::add_u32(& table_monotonic_index, 1) % table_max;
1459 pfs= table_array + index;
1460
1461 if (pfs->m_lock.is_free())
1462 {
1463 if (pfs->m_lock.free_to_dirty())
1464 {
1465 pfs->m_identity= identity;
1466 pfs->m_share= share;
1467 pfs->m_io_enabled= share->m_enabled &&
1468 flag_global_instrumentation && global_table_io_class.m_enabled;
1469 pfs->m_io_timed= share->m_timed && global_table_io_class.m_timed;
1470 pfs->m_lock_enabled= share->m_enabled &&
1471 flag_global_instrumentation && global_table_lock_class.m_enabled;
1472 pfs->m_lock_timed= share->m_timed && global_table_lock_class.m_timed;
1473 pfs->m_has_io_stats= false;
1474 pfs->m_has_lock_stats= false;
1475 share->inc_refcount();
1476 pfs->m_table_stat.fast_reset();
1477 pfs->m_thread_owner= opening_thread;
1478 pfs->m_lock.dirty_to_allocated();
1479 return pfs;
1480 }
1481 }
1482 }
1483
1484 table_lost++;
1485 table_full= true;
1486 return NULL;
1487 }
1488
sanitized_aggregate(void)1489 void PFS_table::sanitized_aggregate(void)
1490 {
1491 /*
1492 This thread could be a TRUNCATE on an aggregated summary table,
1493 and not own the table handle.
1494 */
1495 PFS_table_share *safe_share= sanitize_table_share(m_share);
1496 if (safe_share != NULL)
1497 {
1498 if (m_has_io_stats && m_has_lock_stats)
1499 {
1500 safe_aggregate(& m_table_stat, safe_share);
1501 m_has_io_stats= false;
1502 m_has_lock_stats= false;
1503 }
1504 else if (m_has_io_stats)
1505 {
1506 safe_aggregate_io(& m_table_stat, safe_share);
1507 m_has_io_stats= false;
1508 }
1509 else if (m_has_lock_stats)
1510 {
1511 safe_aggregate_lock(& m_table_stat, safe_share);
1512 m_has_lock_stats= false;
1513 }
1514 }
1515 }
1516
sanitized_aggregate_io(void)1517 void PFS_table::sanitized_aggregate_io(void)
1518 {
1519 PFS_table_share *safe_share= sanitize_table_share(m_share);
1520 if (safe_share != NULL && m_has_io_stats)
1521 {
1522 safe_aggregate_io(& m_table_stat, safe_share);
1523 m_has_io_stats= false;
1524 }
1525 }
1526
sanitized_aggregate_lock(void)1527 void PFS_table::sanitized_aggregate_lock(void)
1528 {
1529 PFS_table_share *safe_share= sanitize_table_share(m_share);
1530 if (safe_share != NULL && m_has_lock_stats)
1531 {
1532 safe_aggregate_lock(& m_table_stat, safe_share);
1533 m_has_lock_stats= false;
1534 }
1535 }
1536
safe_aggregate(PFS_table_stat * table_stat,PFS_table_share * table_share)1537 void PFS_table::safe_aggregate(PFS_table_stat *table_stat,
1538 PFS_table_share *table_share)
1539 {
1540 DBUG_ASSERT(table_stat != NULL);
1541 DBUG_ASSERT(table_share != NULL);
1542
1543 uint key_count= sanitize_index_count(table_share->m_key_count);
1544
1545 /* Aggregate to TABLE_IO_SUMMARY, TABLE_LOCK_SUMMARY */
1546 table_share->m_table_stat.aggregate(table_stat, key_count);
1547 table_stat->fast_reset();
1548 }
1549
safe_aggregate_io(PFS_table_stat * table_stat,PFS_table_share * table_share)1550 void PFS_table::safe_aggregate_io(PFS_table_stat *table_stat,
1551 PFS_table_share *table_share)
1552 {
1553 DBUG_ASSERT(table_stat != NULL);
1554 DBUG_ASSERT(table_share != NULL);
1555
1556 uint key_count= sanitize_index_count(table_share->m_key_count);
1557
1558 /* Aggregate to TABLE_IO_SUMMARY */
1559 table_share->m_table_stat.aggregate_io(table_stat, key_count);
1560 table_stat->fast_reset_io();
1561 }
1562
safe_aggregate_lock(PFS_table_stat * table_stat,PFS_table_share * table_share)1563 void PFS_table::safe_aggregate_lock(PFS_table_stat *table_stat,
1564 PFS_table_share *table_share)
1565 {
1566 DBUG_ASSERT(table_stat != NULL);
1567 DBUG_ASSERT(table_share != NULL);
1568
1569 /* Aggregate to TABLE_LOCK_SUMMARY */
1570 table_share->m_table_stat.aggregate_lock(table_stat);
1571 table_stat->fast_reset_lock();
1572 }
1573
1574 /**
1575 Destroy instrumentation for a table instance.
1576 @param pfs the table to destroy
1577 */
destroy_table(PFS_table * pfs)1578 void destroy_table(PFS_table *pfs)
1579 {
1580 DBUG_ASSERT(pfs != NULL);
1581 pfs->m_share->dec_refcount();
1582 pfs->m_lock.allocated_to_free();
1583 table_full= false;
1584 }
1585
1586 /**
1587 Create instrumentation for a socket instance.
1588 @param klass the socket class
1589 @param identity the socket descriptor
1590 @return a socket instance, or NULL
1591 */
create_socket(PFS_socket_class * klass,const my_socket * fd,const struct sockaddr * addr,socklen_t addr_len)1592 PFS_socket* create_socket(PFS_socket_class *klass, const my_socket *fd,
1593 const struct sockaddr *addr, socklen_t addr_len)
1594 {
1595 static uint PFS_ALIGNED socket_monotonic_index= 0;
1596 uint index;
1597 uint attempts= 0;
1598 PFS_socket *pfs;
1599
1600 if (socket_full)
1601 {
1602 socket_lost++;
1603 return NULL;
1604 }
1605
1606 uint fd_used= 0;
1607 uint addr_len_used= addr_len;
1608
1609 if (fd != NULL)
1610 fd_used= *fd;
1611
1612 if (addr_len_used > sizeof(sockaddr_storage))
1613 addr_len_used= sizeof(sockaddr_storage);
1614
1615 while (++attempts <= socket_max)
1616 {
1617 index= PFS_atomic::add_u32(& socket_monotonic_index, 1) % socket_max;
1618 pfs= socket_array + index;
1619
1620 if (pfs->m_lock.is_free())
1621 {
1622 if (pfs->m_lock.free_to_dirty())
1623 {
1624 pfs->m_fd= fd_used;
1625 /* There is no socket object, so we use the instrumentation. */
1626 pfs->m_identity= pfs;
1627 pfs->m_class= klass;
1628 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
1629 pfs->m_timed= klass->m_timed;
1630 pfs->m_idle= false;
1631 pfs->m_socket_stat.reset();
1632 pfs->m_thread_owner= NULL;
1633
1634 pfs->m_addr_len= addr_len_used;
1635 if ((addr != NULL) && (addr_len_used > 0))
1636 {
1637 pfs->m_addr_len= addr_len_used;
1638 memcpy(&pfs->m_sock_addr, addr, addr_len_used);
1639 }
1640 else
1641 {
1642 pfs->m_addr_len= 0;
1643 }
1644
1645 pfs->m_lock.dirty_to_allocated();
1646
1647 if (klass->is_singleton())
1648 klass->m_singleton= pfs;
1649 return pfs;
1650 }
1651 }
1652 }
1653
1654 socket_lost++;
1655 socket_full= true;
1656 return NULL;
1657 }
1658
1659 /**
1660 Destroy instrumentation for a socket instance.
1661 @param pfs the socket to destroy
1662 */
destroy_socket(PFS_socket * pfs)1663 void destroy_socket(PFS_socket *pfs)
1664 {
1665 DBUG_ASSERT(pfs != NULL);
1666 PFS_socket_class *klass= pfs->m_class;
1667
1668 /* Aggregate to SOCKET_SUMMARY_BY_EVENT_NAME */
1669 klass->m_socket_stat.m_io_stat.aggregate(&pfs->m_socket_stat.m_io_stat);
1670
1671 if (klass->is_singleton())
1672 klass->m_singleton= NULL;
1673
1674 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME */
1675 PFS_thread *thread= pfs->m_thread_owner;
1676 if (thread != NULL)
1677 {
1678 PFS_single_stat *event_name_array;
1679 event_name_array= thread->m_instr_class_waits_stats;
1680 uint index= pfs->m_class->m_event_name_index;
1681
1682 /* Combine stats for all operations */
1683 PFS_single_stat stat;
1684 pfs->m_socket_stat.m_io_stat.sum_waits(&stat);
1685 event_name_array[index].aggregate(&stat);
1686 }
1687
1688 pfs->m_socket_stat.reset();
1689 pfs->m_thread_owner= NULL;
1690 pfs->m_fd= 0;
1691 pfs->m_addr_len= 0;
1692 pfs->m_lock.allocated_to_free();
1693 socket_full= false;
1694 }
1695
reset_mutex_waits_by_instance(void)1696 static void reset_mutex_waits_by_instance(void)
1697 {
1698 PFS_mutex *pfs= mutex_array;
1699 PFS_mutex *pfs_last= mutex_array + mutex_max;
1700
1701 for ( ; pfs < pfs_last; pfs++)
1702 pfs->m_mutex_stat.reset();
1703 }
1704
reset_rwlock_waits_by_instance(void)1705 static void reset_rwlock_waits_by_instance(void)
1706 {
1707 PFS_rwlock *pfs= rwlock_array;
1708 PFS_rwlock *pfs_last= rwlock_array + rwlock_max;
1709
1710 for ( ; pfs < pfs_last; pfs++)
1711 pfs->m_rwlock_stat.reset();
1712 }
1713
reset_cond_waits_by_instance(void)1714 static void reset_cond_waits_by_instance(void)
1715 {
1716 PFS_cond *pfs= cond_array;
1717 PFS_cond *pfs_last= cond_array + cond_max;
1718
1719 for ( ; pfs < pfs_last; pfs++)
1720 pfs->m_cond_stat.reset();
1721 }
1722
reset_file_waits_by_instance(void)1723 static void reset_file_waits_by_instance(void)
1724 {
1725 PFS_file *pfs= file_array;
1726 PFS_file *pfs_last= file_array + file_max;
1727
1728 for ( ; pfs < pfs_last; pfs++)
1729 pfs->m_file_stat.reset();
1730 }
1731
reset_socket_waits_by_instance(void)1732 static void reset_socket_waits_by_instance(void)
1733 {
1734 PFS_socket *pfs= socket_array;
1735 PFS_socket *pfs_last= socket_array + socket_max;
1736
1737 for ( ; pfs < pfs_last; pfs++)
1738 pfs->m_socket_stat.reset();
1739 }
1740
1741 /** Reset the wait statistics per object instance. */
reset_events_waits_by_instance(void)1742 void reset_events_waits_by_instance(void)
1743 {
1744 reset_mutex_waits_by_instance();
1745 reset_rwlock_waits_by_instance();
1746 reset_cond_waits_by_instance();
1747 reset_file_waits_by_instance();
1748 reset_socket_waits_by_instance();
1749 }
1750
1751 /** Reset the io statistics per file instance. */
reset_file_instance_io(void)1752 void reset_file_instance_io(void)
1753 {
1754 PFS_file *pfs= file_array;
1755 PFS_file *pfs_last= file_array + file_max;
1756
1757 for ( ; pfs < pfs_last; pfs++)
1758 pfs->m_file_stat.m_io_stat.reset();
1759 }
1760
1761 /** Reset the io statistics per socket instance. */
reset_socket_instance_io(void)1762 void reset_socket_instance_io(void)
1763 {
1764 PFS_socket *pfs= socket_array;
1765 PFS_socket *pfs_last= socket_array + socket_max;
1766
1767 for ( ; pfs < pfs_last; pfs++)
1768 pfs->m_socket_stat.m_io_stat.reset();
1769 }
1770
aggregate_all_event_names(PFS_single_stat * from_array,PFS_single_stat * to_array)1771 void aggregate_all_event_names(PFS_single_stat *from_array,
1772 PFS_single_stat *to_array)
1773 {
1774 PFS_single_stat *from;
1775 PFS_single_stat *from_last;
1776 PFS_single_stat *to;
1777
1778 from= from_array;
1779 from_last= from_array + wait_class_max;
1780 to= to_array;
1781
1782 for ( ; from < from_last ; from++, to++)
1783 {
1784 if (from->m_count > 0)
1785 {
1786 to->aggregate(from);
1787 from->reset();
1788 }
1789 }
1790 }
1791
aggregate_all_event_names(PFS_single_stat * from_array,PFS_single_stat * to_array_1,PFS_single_stat * to_array_2)1792 void aggregate_all_event_names(PFS_single_stat *from_array,
1793 PFS_single_stat *to_array_1,
1794 PFS_single_stat *to_array_2)
1795 {
1796 PFS_single_stat *from;
1797 PFS_single_stat *from_last;
1798 PFS_single_stat *to_1;
1799 PFS_single_stat *to_2;
1800
1801 from= from_array;
1802 from_last= from_array + wait_class_max;
1803 to_1= to_array_1;
1804 to_2= to_array_2;
1805
1806 for ( ; from < from_last ; from++, to_1++, to_2++)
1807 {
1808 if (from->m_count > 0)
1809 {
1810 to_1->aggregate(from);
1811 to_2->aggregate(from);
1812 from->reset();
1813 }
1814 }
1815 }
1816
aggregate_all_stages(PFS_stage_stat * from_array,PFS_stage_stat * to_array)1817 void aggregate_all_stages(PFS_stage_stat *from_array,
1818 PFS_stage_stat *to_array)
1819 {
1820 PFS_stage_stat *from;
1821 PFS_stage_stat *from_last;
1822 PFS_stage_stat *to;
1823
1824 from= from_array;
1825 from_last= from_array + stage_class_max;
1826 to= to_array;
1827
1828 for ( ; from < from_last ; from++, to++)
1829 {
1830 if (from->m_timer1_stat.m_count > 0)
1831 {
1832 to->aggregate(from);
1833 from->reset();
1834 }
1835 }
1836 }
1837
aggregate_all_stages(PFS_stage_stat * from_array,PFS_stage_stat * to_array_1,PFS_stage_stat * to_array_2)1838 void aggregate_all_stages(PFS_stage_stat *from_array,
1839 PFS_stage_stat *to_array_1,
1840 PFS_stage_stat *to_array_2)
1841 {
1842 PFS_stage_stat *from;
1843 PFS_stage_stat *from_last;
1844 PFS_stage_stat *to_1;
1845 PFS_stage_stat *to_2;
1846
1847 from= from_array;
1848 from_last= from_array + stage_class_max;
1849 to_1= to_array_1;
1850 to_2= to_array_2;
1851
1852 for ( ; from < from_last ; from++, to_1++, to_2++)
1853 {
1854 if (from->m_timer1_stat.m_count > 0)
1855 {
1856 to_1->aggregate(from);
1857 to_2->aggregate(from);
1858 from->reset();
1859 }
1860 }
1861 }
1862
aggregate_all_statements(PFS_statement_stat * from_array,PFS_statement_stat * to_array)1863 void aggregate_all_statements(PFS_statement_stat *from_array,
1864 PFS_statement_stat *to_array)
1865 {
1866 PFS_statement_stat *from;
1867 PFS_statement_stat *from_last;
1868 PFS_statement_stat *to;
1869
1870 from= from_array;
1871 from_last= from_array + statement_class_max;
1872 to= to_array;
1873
1874 for ( ; from < from_last ; from++, to++)
1875 {
1876 if (from->m_timer1_stat.m_count > 0)
1877 {
1878 to->aggregate(from);
1879 from->reset();
1880 }
1881 }
1882 }
1883
aggregate_all_statements(PFS_statement_stat * from_array,PFS_statement_stat * to_array_1,PFS_statement_stat * to_array_2)1884 void aggregate_all_statements(PFS_statement_stat *from_array,
1885 PFS_statement_stat *to_array_1,
1886 PFS_statement_stat *to_array_2)
1887 {
1888 PFS_statement_stat *from;
1889 PFS_statement_stat *from_last;
1890 PFS_statement_stat *to_1;
1891 PFS_statement_stat *to_2;
1892
1893 from= from_array;
1894 from_last= from_array + statement_class_max;
1895 to_1= to_array_1;
1896 to_2= to_array_2;
1897
1898 for ( ; from < from_last ; from++, to_1++, to_2++)
1899 {
1900 if (from->m_timer1_stat.m_count > 0)
1901 {
1902 to_1->aggregate(from);
1903 to_2->aggregate(from);
1904 from->reset();
1905 }
1906 }
1907 }
1908
aggregate_thread_stats(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)1909 void aggregate_thread_stats(PFS_thread *thread,
1910 PFS_account *safe_account,
1911 PFS_user *safe_user,
1912 PFS_host *safe_host)
1913 {
1914 if (likely(safe_account != NULL))
1915 {
1916 safe_account->m_disconnected_count++;
1917 return;
1918 }
1919
1920 if (safe_user != NULL)
1921 safe_user->m_disconnected_count++;
1922
1923 if (safe_host != NULL)
1924 safe_host->m_disconnected_count++;
1925
1926 /* There is no global table for connections statistics. */
1927 return;
1928 }
1929
aggregate_thread(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)1930 void aggregate_thread(PFS_thread *thread,
1931 PFS_account *safe_account,
1932 PFS_user *safe_user,
1933 PFS_host *safe_host)
1934 {
1935 aggregate_thread_waits(thread, safe_account, safe_user, safe_host);
1936 aggregate_thread_stages(thread, safe_account, safe_user, safe_host);
1937 aggregate_thread_statements(thread, safe_account, safe_user, safe_host);
1938 aggregate_thread_stats(thread, safe_account, safe_user, safe_host);
1939 }
1940
aggregate_thread_waits(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)1941 void aggregate_thread_waits(PFS_thread *thread,
1942 PFS_account *safe_account,
1943 PFS_user *safe_user,
1944 PFS_host *safe_host)
1945 {
1946 if (likely(safe_account != NULL))
1947 {
1948 /*
1949 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1950 to EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
1951 */
1952 aggregate_all_event_names(thread->m_instr_class_waits_stats,
1953 safe_account->m_instr_class_waits_stats);
1954
1955 return;
1956 }
1957
1958 if ((safe_user != NULL) && (safe_host != NULL))
1959 {
1960 /*
1961 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
1962 - EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME
1963 - EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME
1964 in parallel.
1965 */
1966 aggregate_all_event_names(thread->m_instr_class_waits_stats,
1967 safe_user->m_instr_class_waits_stats,
1968 safe_host->m_instr_class_waits_stats);
1969 return;
1970 }
1971
1972 if (safe_user != NULL)
1973 {
1974 /*
1975 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1976 to EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME, directly.
1977 */
1978 aggregate_all_event_names(thread->m_instr_class_waits_stats,
1979 safe_user->m_instr_class_waits_stats);
1980 return;
1981 }
1982
1983 if (safe_host != NULL)
1984 {
1985 /*
1986 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1987 to EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
1988 */
1989 aggregate_all_event_names(thread->m_instr_class_waits_stats,
1990 safe_host->m_instr_class_waits_stats);
1991 return;
1992 }
1993
1994 /* Orphan thread, clean the waits stats. */
1995 thread->reset_waits_stats();
1996 }
1997
aggregate_thread_stages(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)1998 void aggregate_thread_stages(PFS_thread *thread,
1999 PFS_account *safe_account,
2000 PFS_user *safe_user,
2001 PFS_host *safe_host)
2002 {
2003 if (likely(safe_account != NULL))
2004 {
2005 /*
2006 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
2007 to EVENTS_STAGES_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
2008 */
2009 aggregate_all_stages(thread->m_instr_class_stages_stats,
2010 safe_account->m_instr_class_stages_stats);
2011
2012 return;
2013 }
2014
2015 if ((safe_user != NULL) && (safe_host != NULL))
2016 {
2017 /*
2018 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2019 - EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME
2020 - EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME
2021 in parallel.
2022 */
2023 aggregate_all_stages(thread->m_instr_class_stages_stats,
2024 safe_user->m_instr_class_stages_stats,
2025 safe_host->m_instr_class_stages_stats);
2026 return;
2027 }
2028
2029 if (safe_user != NULL)
2030 {
2031 /*
2032 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2033 - EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME
2034 - EVENTS_STAGES_SUMMARY_GLOBAL_BY_EVENT_NAME
2035 in parallel.
2036 */
2037 aggregate_all_stages(thread->m_instr_class_stages_stats,
2038 safe_user->m_instr_class_stages_stats,
2039 global_instr_class_stages_array);
2040 return;
2041 }
2042
2043 if (safe_host != NULL)
2044 {
2045 /*
2046 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
2047 to EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
2048 */
2049 aggregate_all_stages(thread->m_instr_class_stages_stats,
2050 safe_host->m_instr_class_stages_stats);
2051 return;
2052 }
2053
2054 /*
2055 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
2056 to EVENTS_STAGES_SUMMARY_GLOBAL_BY_EVENT_NAME.
2057 */
2058 aggregate_all_stages(thread->m_instr_class_stages_stats,
2059 global_instr_class_stages_array);
2060 }
2061
aggregate_thread_statements(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)2062 void aggregate_thread_statements(PFS_thread *thread,
2063 PFS_account *safe_account,
2064 PFS_user *safe_user,
2065 PFS_host *safe_host)
2066 {
2067 if (likely(safe_account != NULL))
2068 {
2069 /*
2070 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
2071 to EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
2072 */
2073 aggregate_all_statements(thread->m_instr_class_statements_stats,
2074 safe_account->m_instr_class_statements_stats);
2075
2076 return;
2077 }
2078
2079 if ((safe_user != NULL) && (safe_host != NULL))
2080 {
2081 /*
2082 Aggregate EVENTS_STATEMENT_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2083 - EVENTS_STATEMENT_SUMMARY_BY_USER_BY_EVENT_NAME
2084 - EVENTS_STATEMENT_SUMMARY_BY_HOST_BY_EVENT_NAME
2085 in parallel.
2086 */
2087 aggregate_all_statements(thread->m_instr_class_statements_stats,
2088 safe_user->m_instr_class_statements_stats,
2089 safe_host->m_instr_class_statements_stats);
2090 return;
2091 }
2092
2093 if (safe_user != NULL)
2094 {
2095 /*
2096 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2097 - EVENTS_STATEMENTS_SUMMARY_BY_USER_BY_EVENT_NAME
2098 - EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME
2099 in parallel.
2100 */
2101 aggregate_all_statements(thread->m_instr_class_statements_stats,
2102 safe_user->m_instr_class_statements_stats,
2103 global_instr_class_statements_array);
2104 return;
2105 }
2106
2107 if (safe_host != NULL)
2108 {
2109 /*
2110 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
2111 to EVENTS_STATEMENTS_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
2112 */
2113 aggregate_all_statements(thread->m_instr_class_statements_stats,
2114 safe_host->m_instr_class_statements_stats);
2115 return;
2116 }
2117
2118 /*
2119 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
2120 to EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME.
2121 */
2122 aggregate_all_statements(thread->m_instr_class_statements_stats,
2123 global_instr_class_statements_array);
2124 }
2125
clear_thread_account(PFS_thread * thread)2126 void clear_thread_account(PFS_thread *thread)
2127 {
2128 if (thread->m_account != NULL)
2129 {
2130 thread->m_account->release();
2131 thread->m_account= NULL;
2132 }
2133
2134 if (thread->m_user != NULL)
2135 {
2136 thread->m_user->release();
2137 thread->m_user= NULL;
2138 }
2139
2140 if (thread->m_host != NULL)
2141 {
2142 thread->m_host->release();
2143 thread->m_host= NULL;
2144 }
2145 }
2146
set_thread_account(PFS_thread * thread)2147 void set_thread_account(PFS_thread *thread)
2148 {
2149 DBUG_ASSERT(thread->m_account == NULL);
2150 DBUG_ASSERT(thread->m_user == NULL);
2151 DBUG_ASSERT(thread->m_host == NULL);
2152
2153 thread->m_account= find_or_create_account(thread,
2154 thread->m_username,
2155 thread->m_username_length,
2156 thread->m_hostname,
2157 thread->m_hostname_length);
2158
2159 if ((thread->m_account == NULL) && (thread->m_username_length > 0))
2160 thread->m_user= find_or_create_user(thread,
2161 thread->m_username,
2162 thread->m_username_length);
2163
2164 if ((thread->m_account == NULL) && (thread->m_hostname_length > 0))
2165 thread->m_host= find_or_create_host(thread,
2166 thread->m_hostname,
2167 thread->m_hostname_length);
2168 }
2169
update_mutex_derived_flags()2170 void update_mutex_derived_flags()
2171 {
2172 PFS_mutex *pfs= mutex_array;
2173 PFS_mutex *pfs_last= mutex_array + mutex_max;
2174 PFS_mutex_class *klass;
2175
2176 for ( ; pfs < pfs_last; pfs++)
2177 {
2178 klass= sanitize_mutex_class(pfs->m_class);
2179 if (likely(klass != NULL))
2180 {
2181 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2182 pfs->m_timed= klass->m_timed;
2183 }
2184 else
2185 {
2186 pfs->m_enabled= false;
2187 pfs->m_timed= false;
2188 }
2189 }
2190 }
2191
update_rwlock_derived_flags()2192 void update_rwlock_derived_flags()
2193 {
2194 PFS_rwlock *pfs= rwlock_array;
2195 PFS_rwlock *pfs_last= rwlock_array + rwlock_max;
2196 PFS_rwlock_class *klass;
2197
2198 for ( ; pfs < pfs_last; pfs++)
2199 {
2200 klass= sanitize_rwlock_class(pfs->m_class);
2201 if (likely(klass != NULL))
2202 {
2203 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2204 pfs->m_timed= klass->m_timed;
2205 }
2206 else
2207 {
2208 pfs->m_enabled= false;
2209 pfs->m_timed= false;
2210 }
2211 }
2212 }
2213
update_cond_derived_flags()2214 void update_cond_derived_flags()
2215 {
2216 PFS_cond *pfs= cond_array;
2217 PFS_cond *pfs_last= cond_array + cond_max;
2218 PFS_cond_class *klass;
2219
2220 for ( ; pfs < pfs_last; pfs++)
2221 {
2222 klass= sanitize_cond_class(pfs->m_class);
2223 if (likely(klass != NULL))
2224 {
2225 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2226 pfs->m_timed= klass->m_timed;
2227 }
2228 else
2229 {
2230 pfs->m_enabled= false;
2231 pfs->m_timed= false;
2232 }
2233 }
2234 }
2235
update_file_derived_flags()2236 void update_file_derived_flags()
2237 {
2238 PFS_file *pfs= file_array;
2239 PFS_file *pfs_last= file_array + file_max;
2240 PFS_file_class *klass;
2241
2242 for ( ; pfs < pfs_last; pfs++)
2243 {
2244 klass= sanitize_file_class(pfs->m_class);
2245 if (likely(klass != NULL))
2246 {
2247 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2248 pfs->m_timed= klass->m_timed;
2249 }
2250 else
2251 {
2252 pfs->m_enabled= false;
2253 pfs->m_timed= false;
2254 }
2255 }
2256 }
2257
update_table_derived_flags()2258 void update_table_derived_flags()
2259 {
2260 PFS_table *pfs= table_array;
2261 PFS_table *pfs_last= table_array + table_max;
2262 PFS_table_share *share;
2263
2264 for ( ; pfs < pfs_last; pfs++)
2265 {
2266 share= sanitize_table_share(pfs->m_share);
2267 if (likely(share != NULL))
2268 {
2269 pfs->m_io_enabled= share->m_enabled &&
2270 flag_global_instrumentation && global_table_io_class.m_enabled;
2271 pfs->m_io_timed= share->m_timed && global_table_io_class.m_timed;
2272 pfs->m_lock_enabled= share->m_enabled &&
2273 flag_global_instrumentation && global_table_lock_class.m_enabled;
2274 pfs->m_lock_timed= share->m_timed && global_table_lock_class.m_timed;
2275 }
2276 else
2277 {
2278 pfs->m_io_enabled= false;
2279 pfs->m_io_timed= false;
2280 pfs->m_lock_enabled= false;
2281 pfs->m_lock_timed= false;
2282 }
2283 }
2284 }
2285
update_socket_derived_flags()2286 void update_socket_derived_flags()
2287 {
2288 PFS_socket *pfs= socket_array;
2289 PFS_socket *pfs_last= socket_array + socket_max;
2290 PFS_socket_class *klass;
2291
2292 for ( ; pfs < pfs_last; pfs++)
2293 {
2294 klass= sanitize_socket_class(pfs->m_class);
2295 if (likely(klass != NULL))
2296 {
2297 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2298 pfs->m_timed= klass->m_timed;
2299 }
2300 else
2301 {
2302 pfs->m_enabled= false;
2303 pfs->m_timed= false;
2304 }
2305 }
2306 }
2307
update_instruments_derived_flags()2308 void update_instruments_derived_flags()
2309 {
2310 update_mutex_derived_flags();
2311 update_rwlock_derived_flags();
2312 update_cond_derived_flags();
2313 update_file_derived_flags();
2314 update_table_derived_flags();
2315 update_socket_derived_flags();
2316 /* nothing for stages and statements (no instances) */
2317 }
2318
2319 /** @} */
2320