1 /* Copyright (c) 2008, 2021, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software Foundation,
21 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
22
23 /**
24 @file storage/perfschema/pfs_instr.cc
25 Performance schema instruments (implementation).
26 */
27
28 #include <my_global.h>
29 #include <string.h>
30
31 #include "my_sys.h"
32 #include "pfs.h"
33 #include "pfs_stat.h"
34 #include "pfs_instr.h"
35 #include "pfs_host.h"
36 #include "pfs_user.h"
37 #include "pfs_account.h"
38 #include "pfs_global.h"
39 #include "pfs_instr_class.h"
40 #include "pfs_buffer_container.h"
41 #include "pfs_builtin_memory.h"
42
43 ulong nested_statement_lost= 0;
44
45 /**
46 @addtogroup Performance_schema_buffers
47 @{
48 */
49
50 /**
51 Size of the file handle array. @sa file_handle_array.
52 Signed value, for easier comparisons with a file descriptor number.
53 */
54 long file_handle_max= 0;
55 /** True when @c file_handle_array is full. */
56 bool file_handle_full;
57 /** Number of file handle lost. @sa file_handle_array */
58 ulong file_handle_lost= 0;
59 /** Number of EVENTS_WAITS_HISTORY records per thread. */
60 ulong events_waits_history_per_thread= 0;
61 /** Number of EVENTS_STAGES_HISTORY records per thread. */
62 ulong events_stages_history_per_thread= 0;
63 /** Number of EVENTS_STATEMENTS_HISTORY records per thread. */
64 ulong events_statements_history_per_thread= 0;
65 uint statement_stack_max= 0;
66 size_t pfs_max_digest_length= 0;
67 size_t pfs_max_sqltext= 0;
68 /** Number of locker lost. @sa LOCKER_STACK_SIZE. */
69 ulong locker_lost= 0;
70 /** Number of statements lost. @sa STATEMENT_STACK_SIZE. */
71 ulong statement_lost= 0;
72 /** Size of connection attribute storage per thread */
73 ulong session_connect_attrs_size_per_thread;
74 /** Number of connection attributes lost */
75 ulong session_connect_attrs_lost= 0;
76
77 /** Number of EVENTS_TRANSACTIONS_HISTORY records per thread. */
78 ulong events_transactions_history_per_thread= 0;
79
80 /**
81 File instrumentation handle array.
82 @sa file_handle_max
83 @sa file_handle_lost
84 */
85 PFS_file **file_handle_array= NULL;
86
87 PFS_stage_stat *global_instr_class_stages_array= NULL;
88 PFS_statement_stat *global_instr_class_statements_array= NULL;
89 PFS_memory_stat *global_instr_class_memory_array= NULL;
90
91 static PFS_ALIGNED PFS_cacheline_uint64 thread_internal_id_counter;
92
93 /** Hash table for instrumented files. */
94 LF_HASH filename_hash;
95 /** True if filename_hash is initialized. */
96 static bool filename_hash_inited= false;
97
98 /**
99 Initialize all the instruments instance buffers.
100 @param param sizing parameters
101 @return 0 on success
102 */
init_instruments(const PFS_global_param * param)103 int init_instruments(const PFS_global_param *param)
104 {
105 uint index;
106
107 /* Make sure init_event_name_sizing is called */
108 assert(wait_class_max != 0);
109
110 file_handle_max= param->m_file_handle_sizing;
111 file_handle_full= false;
112 file_handle_lost= 0;
113
114 pfs_max_digest_length= param->m_max_digest_length;
115 pfs_max_sqltext= param->m_max_sql_text_length;
116
117 events_waits_history_per_thread= param->m_events_waits_history_sizing;
118
119 events_stages_history_per_thread= param->m_events_stages_history_sizing;
120
121 events_statements_history_per_thread= param->m_events_statements_history_sizing;
122
123 statement_stack_max= param->m_statement_stack_sizing;
124
125 events_transactions_history_per_thread= param->m_events_transactions_history_sizing;
126
127 session_connect_attrs_size_per_thread= param->m_session_connect_attrs_sizing;
128 session_connect_attrs_lost= 0;
129
130 file_handle_array= NULL;
131
132 thread_internal_id_counter.m_u64= 0;
133
134 if (global_mutex_container.init(param->m_mutex_sizing))
135 return 1;
136
137 if (global_rwlock_container.init(param->m_rwlock_sizing))
138 return 1;
139
140 if (global_cond_container.init(param->m_cond_sizing))
141 return 1;
142
143 if (global_file_container.init(param->m_file_sizing))
144 return 1;
145
146 if (file_handle_max > 0)
147 {
148 file_handle_array= PFS_MALLOC_ARRAY(& builtin_memory_file_handle,
149 file_handle_max,
150 sizeof(PFS_file*), PFS_file*,
151 MYF(MY_ZEROFILL));
152 if (unlikely(file_handle_array == NULL))
153 return 1;
154 }
155
156 if (global_table_container.init(param->m_table_sizing))
157 return 1;
158
159 if (global_socket_container.init(param->m_socket_sizing))
160 return 1;
161
162 if (global_mdl_container.init(param->m_metadata_lock_sizing))
163 return 1;
164
165 if (global_thread_container.init(param->m_thread_sizing))
166 return 1;
167
168 if (stage_class_max > 0)
169 {
170 global_instr_class_stages_array=
171 PFS_MALLOC_ARRAY(& builtin_memory_global_stages,
172 stage_class_max,
173 sizeof(PFS_stage_stat), PFS_stage_stat,
174 MYF(MY_ZEROFILL));
175 if (unlikely(global_instr_class_stages_array == NULL))
176 return 1;
177
178 for (index= 0; index < stage_class_max; index++)
179 global_instr_class_stages_array[index].reset();
180 }
181
182 if (statement_class_max > 0)
183 {
184 global_instr_class_statements_array=
185 PFS_MALLOC_ARRAY(& builtin_memory_global_statements,
186 statement_class_max,
187 sizeof(PFS_statement_stat), PFS_statement_stat,
188 MYF(MY_ZEROFILL));
189 if (unlikely(global_instr_class_statements_array == NULL))
190 return 1;
191
192 for (index= 0; index < statement_class_max; index++)
193 global_instr_class_statements_array[index].reset();
194 }
195
196 if (memory_class_max > 0)
197 {
198 global_instr_class_memory_array=
199 PFS_MALLOC_ARRAY(& builtin_memory_global_memory,
200 memory_class_max,
201 sizeof(PFS_memory_stat), PFS_memory_stat,
202 MYF(MY_ZEROFILL));
203 if (unlikely(global_instr_class_memory_array == NULL))
204 return 1;
205
206 for (index= 0; index < memory_class_max; index++)
207 global_instr_class_memory_array[index].reset();
208 }
209
210 return 0;
211 }
212
213 /** Cleanup all the instruments buffers. */
cleanup_instruments(void)214 void cleanup_instruments(void)
215 {
216 global_mutex_container.cleanup();
217 global_rwlock_container.cleanup();
218 global_cond_container.cleanup();
219 global_file_container.cleanup();
220
221 PFS_FREE_ARRAY(& builtin_memory_file_handle,
222 file_handle_max, sizeof(PFS_file*),
223 file_handle_array);
224 file_handle_array= NULL;
225 file_handle_max= 0;
226
227 global_table_container.cleanup();
228 global_socket_container.cleanup();
229 global_mdl_container.cleanup();
230 global_thread_container.cleanup();
231
232 PFS_FREE_ARRAY(& builtin_memory_global_stages,
233 stage_class_max,
234 sizeof(PFS_stage_stat),
235 global_instr_class_stages_array);
236 global_instr_class_stages_array= NULL;
237
238 PFS_FREE_ARRAY(& builtin_memory_global_statements,
239 statement_class_max,
240 sizeof(PFS_statement_stat),
241 global_instr_class_statements_array);
242 global_instr_class_statements_array= NULL;
243
244 PFS_FREE_ARRAY(& builtin_memory_global_memory,
245 memory_class_max,
246 sizeof(PFS_memory_stat),
247 global_instr_class_memory_array);
248 global_instr_class_memory_array= NULL;
249 }
250
251 C_MODE_START
252 /** Get hash table key for instrumented files. */
filename_hash_get_key(const uchar * entry,size_t * length,my_bool)253 static uchar *filename_hash_get_key(const uchar *entry, size_t *length,
254 my_bool)
255 {
256 const PFS_file * const *typed_entry;
257 const PFS_file *file;
258 const void *result;
259 typed_entry= reinterpret_cast<const PFS_file* const *> (entry);
260 assert(typed_entry != NULL);
261 file= *typed_entry;
262 assert(file != NULL);
263 *length= file->m_filename_length;
264 result= file->m_filename;
265 return const_cast<uchar*> (reinterpret_cast<const uchar*> (result));
266 }
267 C_MODE_END
268
269 /**
270 Initialize the file name hash.
271 @return 0 on success
272 */
init_file_hash(const PFS_global_param * param)273 int init_file_hash(const PFS_global_param *param)
274 {
275 if ((! filename_hash_inited) && (param->m_file_sizing != 0))
276 {
277 lf_hash_init(&filename_hash, sizeof(PFS_file*), LF_HASH_UNIQUE,
278 0, 0, filename_hash_get_key, &my_charset_bin);
279 filename_hash_inited= true;
280 }
281 return 0;
282 }
283
284 /** Cleanup the file name hash. */
cleanup_file_hash(void)285 void cleanup_file_hash(void)
286 {
287 if (filename_hash_inited)
288 {
289 lf_hash_destroy(&filename_hash);
290 filename_hash_inited= false;
291 }
292 }
293
294 /**
295 Create instrumentation for a mutex instance.
296 @param klass the mutex class
297 @param identity the mutex address
298 @return a mutex instance, or NULL
299 */
create_mutex(PFS_mutex_class * klass,const void * identity)300 PFS_mutex* create_mutex(PFS_mutex_class *klass, const void *identity)
301 {
302 PFS_mutex *pfs;
303 pfs_dirty_state dirty_state;
304
305 pfs= global_mutex_container.allocate(& dirty_state, klass->m_volatility);
306 if (pfs != NULL)
307 {
308 pfs->m_identity= identity;
309 pfs->m_class= klass;
310 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
311 pfs->m_timed= klass->m_timed;
312 pfs->m_mutex_stat.reset();
313 pfs->m_owner= NULL;
314 pfs->m_last_locked= 0;
315 pfs->m_lock.dirty_to_allocated(& dirty_state);
316 if (klass->is_singleton())
317 klass->m_singleton= pfs;
318 }
319
320 return pfs;
321 }
322
323 /**
324 Destroy instrumentation for a mutex instance.
325 @param pfs the mutex to destroy
326 */
destroy_mutex(PFS_mutex * pfs)327 void destroy_mutex(PFS_mutex *pfs)
328 {
329 assert(pfs != NULL);
330 PFS_mutex_class *klass= pfs->m_class;
331 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
332 klass->m_mutex_stat.aggregate(& pfs->m_mutex_stat);
333 pfs->m_mutex_stat.reset();
334 if (klass->is_singleton())
335 klass->m_singleton= NULL;
336
337 global_mutex_container.deallocate(pfs);
338 }
339
340 /**
341 Create instrumentation for a rwlock instance.
342 @param klass the rwlock class
343 @param identity the rwlock address
344 @return a rwlock instance, or NULL
345 */
create_rwlock(PFS_rwlock_class * klass,const void * identity)346 PFS_rwlock* create_rwlock(PFS_rwlock_class *klass, const void *identity)
347 {
348 PFS_rwlock *pfs;
349 pfs_dirty_state dirty_state;
350
351 pfs= global_rwlock_container.allocate(& dirty_state);
352 if (pfs != NULL)
353 {
354 pfs->m_identity= identity;
355 pfs->m_class= klass;
356 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
357 pfs->m_timed= klass->m_timed;
358 pfs->m_rwlock_stat.reset();
359 pfs->m_writer= NULL;
360 pfs->m_readers= 0;
361 pfs->m_last_written= 0;
362 pfs->m_last_read= 0;
363 pfs->m_lock.dirty_to_allocated(& dirty_state);
364 if (klass->is_singleton())
365 klass->m_singleton= pfs;
366 }
367
368 return pfs;
369 }
370
371 /**
372 Destroy instrumentation for a rwlock instance.
373 @param pfs the rwlock to destroy
374 */
destroy_rwlock(PFS_rwlock * pfs)375 void destroy_rwlock(PFS_rwlock *pfs)
376 {
377 assert(pfs != NULL);
378 PFS_rwlock_class *klass= pfs->m_class;
379 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
380 klass->m_rwlock_stat.aggregate(& pfs->m_rwlock_stat);
381 pfs->m_rwlock_stat.reset();
382 if (klass->is_singleton())
383 klass->m_singleton= NULL;
384
385 global_rwlock_container.deallocate(pfs);
386 }
387
388 /**
389 Create instrumentation for a condition instance.
390 @param klass the condition class
391 @param identity the condition address
392 @return a condition instance, or NULL
393 */
create_cond(PFS_cond_class * klass,const void * identity)394 PFS_cond* create_cond(PFS_cond_class *klass, const void *identity)
395 {
396 PFS_cond *pfs;
397 pfs_dirty_state dirty_state;
398
399 pfs= global_cond_container.allocate(& dirty_state);
400 if (pfs != NULL)
401 {
402 pfs->m_identity= identity;
403 pfs->m_class= klass;
404 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
405 pfs->m_timed= klass->m_timed;
406 pfs->m_cond_stat.reset();
407 pfs->m_lock.dirty_to_allocated(& dirty_state);
408 if (klass->is_singleton())
409 klass->m_singleton= pfs;
410 }
411
412 return pfs;
413 }
414
415 /**
416 Destroy instrumentation for a condition instance.
417 @param pfs the condition to destroy
418 */
destroy_cond(PFS_cond * pfs)419 void destroy_cond(PFS_cond *pfs)
420 {
421 assert(pfs != NULL);
422 PFS_cond_class *klass= pfs->m_class;
423 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
424 klass->m_cond_stat.aggregate(& pfs->m_cond_stat);
425 pfs->m_cond_stat.reset();
426 if (klass->is_singleton())
427 klass->m_singleton= NULL;
428
429 global_cond_container.deallocate(pfs);
430 }
431
get_current_thread()432 PFS_thread* PFS_thread::get_current_thread()
433 {
434 return static_cast<PFS_thread*>(my_get_thread_local(THR_PFS));
435 }
436
reset_session_connect_attrs()437 void PFS_thread::reset_session_connect_attrs()
438 {
439 m_session_connect_attrs_length= 0;
440 m_session_connect_attrs_cs_number= 0;
441
442 if ((m_session_connect_attrs != NULL) &&
443 (session_connect_attrs_size_per_thread > 0) )
444 {
445 /* Do not keep user data */
446 memset(m_session_connect_attrs, 0, session_connect_attrs_size_per_thread);
447 }
448 }
449
set_history_derived_flags()450 void PFS_thread::set_history_derived_flags()
451 {
452 if (m_history)
453 {
454 m_flag_events_waits_history= flag_events_waits_history;
455 m_flag_events_waits_history_long= flag_events_waits_history_long;
456 m_flag_events_stages_history= flag_events_stages_history;
457 m_flag_events_stages_history_long= flag_events_stages_history_long;
458 m_flag_events_statements_history= flag_events_statements_history;
459 m_flag_events_statements_history_long= flag_events_statements_history_long;
460 m_flag_events_transactions_history= flag_events_transactions_history;
461 m_flag_events_transactions_history_long= flag_events_transactions_history_long;
462 }
463 else
464 {
465 m_flag_events_waits_history= false;
466 m_flag_events_waits_history_long= false;
467 m_flag_events_stages_history= false;
468 m_flag_events_stages_history_long= false;
469 m_flag_events_statements_history= false;
470 m_flag_events_statements_history_long= false;
471 m_flag_events_transactions_history= false;
472 m_flag_events_transactions_history_long= false;
473 }
474 }
475
carry_memory_stat_delta(PFS_memory_stat_delta * delta,uint index)476 void PFS_thread::carry_memory_stat_delta(PFS_memory_stat_delta *delta, uint index)
477 {
478 if (m_account != NULL)
479 {
480 m_account->carry_memory_stat_delta(delta, index);
481 return;
482 }
483
484 if (m_user != NULL)
485 {
486 m_user->carry_memory_stat_delta(delta, index);
487 /* do not return, need to process m_host below */
488 }
489
490 if (m_host != NULL)
491 {
492 m_host->carry_memory_stat_delta(delta, index);
493 return;
494 }
495
496 carry_global_memory_stat_delta(delta, index);
497 }
498
carry_global_memory_stat_delta(PFS_memory_stat_delta * delta,uint index)499 void carry_global_memory_stat_delta(PFS_memory_stat_delta *delta, uint index)
500 {
501 PFS_memory_stat *stat;
502 PFS_memory_stat_delta delta_buffer;
503
504 stat= & global_instr_class_memory_array[index];
505 (void) stat->apply_delta(delta, &delta_buffer);
506 }
507
508 /**
509 Create instrumentation for a thread instance.
510 @param klass the thread class
511 @param identity the thread address,
512 or a value characteristic of this thread
513 @param processlist_id the PROCESSLIST id,
514 or 0 if unknown
515 @return a thread instance, or NULL
516 */
create_thread(PFS_thread_class * klass,const void * identity,ulonglong processlist_id)517 PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
518 ulonglong processlist_id)
519 {
520 PFS_thread *pfs;
521 pfs_dirty_state dirty_state;
522
523 pfs= global_thread_container.allocate(& dirty_state);
524 if (pfs != NULL)
525 {
526 pfs->m_thread_internal_id=
527 PFS_atomic::add_u64(&thread_internal_id_counter.m_u64, 1);
528 pfs->m_parent_thread_internal_id= 0;
529 pfs->m_processlist_id= static_cast<ulong>(processlist_id);
530 pfs->m_thread_os_id= 0;
531 pfs->m_event_id= 1;
532 pfs->m_stmt_lock.set_allocated();
533 pfs->m_session_lock.set_allocated();
534 pfs->set_enabled(true);
535 pfs->m_disable_instrumentation= false;
536 pfs->set_history(true);
537 pfs->m_class= klass;
538 pfs->m_events_waits_current= & pfs->m_events_waits_stack[WAIT_STACK_BOTTOM];
539 pfs->m_waits_history_full= false;
540 pfs->m_waits_history_index= 0;
541 pfs->m_stages_history_full= false;
542 pfs->m_stages_history_index= 0;
543 pfs->m_statements_history_full= false;
544 pfs->m_statements_history_index= 0;
545 pfs->m_transactions_history_full= false;
546 pfs->m_transactions_history_index= 0;
547
548 pfs->reset_stats();
549 pfs->reset_session_connect_attrs();
550
551 pfs->m_filename_hash_pins= NULL;
552 pfs->m_table_share_hash_pins= NULL;
553 pfs->m_setup_actor_hash_pins= NULL;
554 pfs->m_setup_object_hash_pins= NULL;
555 pfs->m_user_hash_pins= NULL;
556 pfs->m_account_hash_pins= NULL;
557 pfs->m_host_hash_pins= NULL;
558 pfs->m_digest_hash_pins= NULL;
559 pfs->m_program_hash_pins= NULL;
560
561 pfs->m_username_length= 0;
562 pfs->m_hostname_length= 0;
563 pfs->m_dbname_length= 0;
564 pfs->m_command= 0;
565 pfs->m_start_time= 0;
566 pfs->m_stage= 0;
567 pfs->m_stage_progress= NULL;
568 pfs->m_processlist_info[0]= '\0';
569 pfs->m_processlist_info_length= 0;
570 pfs->m_connection_type= NO_VIO_TYPE;
571
572 pfs->m_thd= NULL;
573 pfs->m_host= NULL;
574 pfs->m_user= NULL;
575 pfs->m_account= NULL;
576 set_thread_account(pfs);
577
578 /*
579 For child waits, by default,
580 - NESTING_EVENT_ID is NULL
581 - NESTING_EVENT_TYPE is NULL
582 */
583 PFS_events_waits *child_wait= & pfs->m_events_waits_stack[0];
584 child_wait->m_event_id= 0;
585
586 /*
587 For child stages, by default,
588 - NESTING_EVENT_ID is NULL
589 - NESTING_EVENT_TYPE is NULL
590 */
591 PFS_events_stages *child_stage= & pfs->m_stage_current;
592 child_stage->m_nesting_event_id= 0;
593
594 pfs->m_events_statements_count= 0;
595 pfs->m_transaction_current.m_event_id= 0;
596
597 pfs->m_lock.dirty_to_allocated(& dirty_state);
598 }
599
600 return pfs;
601 }
602
sanitize_mutex(PFS_mutex * unsafe)603 PFS_mutex *sanitize_mutex(PFS_mutex *unsafe)
604 {
605 return global_mutex_container.sanitize(unsafe);
606 }
607
sanitize_rwlock(PFS_rwlock * unsafe)608 PFS_rwlock *sanitize_rwlock(PFS_rwlock *unsafe)
609 {
610 return global_rwlock_container.sanitize(unsafe);
611 }
612
sanitize_cond(PFS_cond * unsafe)613 PFS_cond *sanitize_cond(PFS_cond *unsafe)
614 {
615 return global_cond_container.sanitize(unsafe);
616 }
617
618 /**
619 Sanitize a PFS_thread pointer.
620 Validate that the PFS_thread is part of thread_array.
621 Sanitizing data is required when the data can be
622 damaged with expected race conditions, for example
623 involving EVENTS_WAITS_HISTORY_LONG.
624 @param unsafe the pointer to sanitize
625 @return a valid pointer, or NULL
626 */
sanitize_thread(PFS_thread * unsafe)627 PFS_thread *sanitize_thread(PFS_thread *unsafe)
628 {
629 return global_thread_container.sanitize(unsafe);
630 }
631
sanitize_file(PFS_file * unsafe)632 PFS_file *sanitize_file(PFS_file *unsafe)
633 {
634 return global_file_container.sanitize(unsafe);
635 }
636
sanitize_socket(PFS_socket * unsafe)637 PFS_socket *sanitize_socket(PFS_socket *unsafe)
638 {
639 return global_socket_container.sanitize(unsafe);
640 }
641
sanitize_metadata_lock(PFS_metadata_lock * unsafe)642 PFS_metadata_lock *sanitize_metadata_lock(PFS_metadata_lock *unsafe)
643 {
644 return global_mdl_container.sanitize(unsafe);
645 }
646
647 /**
648 Destroy instrumentation for a thread instance.
649 @param pfs the thread to destroy
650 */
destroy_thread(PFS_thread * pfs)651 void destroy_thread(PFS_thread *pfs)
652 {
653 assert(pfs != NULL);
654 pfs->reset_session_connect_attrs();
655 if (pfs->m_account != NULL)
656 {
657 pfs->m_account->release();
658 pfs->m_account= NULL;
659 assert(pfs->m_user == NULL);
660 assert(pfs->m_host == NULL);
661 }
662 else
663 {
664 if (pfs->m_user != NULL)
665 {
666 pfs->m_user->release();
667 pfs->m_user= NULL;
668 }
669 if (pfs->m_host != NULL)
670 {
671 pfs->m_host->release();
672 pfs->m_host= NULL;
673 }
674 }
675 if (pfs->m_filename_hash_pins)
676 {
677 lf_hash_put_pins(pfs->m_filename_hash_pins);
678 pfs->m_filename_hash_pins= NULL;
679 }
680 if (pfs->m_table_share_hash_pins)
681 {
682 lf_hash_put_pins(pfs->m_table_share_hash_pins);
683 pfs->m_table_share_hash_pins= NULL;
684 }
685 if (pfs->m_setup_actor_hash_pins)
686 {
687 lf_hash_put_pins(pfs->m_setup_actor_hash_pins);
688 pfs->m_setup_actor_hash_pins= NULL;
689 }
690 if (pfs->m_setup_object_hash_pins)
691 {
692 lf_hash_put_pins(pfs->m_setup_object_hash_pins);
693 pfs->m_setup_object_hash_pins= NULL;
694 }
695 if (pfs->m_user_hash_pins)
696 {
697 lf_hash_put_pins(pfs->m_user_hash_pins);
698 pfs->m_user_hash_pins= NULL;
699 }
700 if (pfs->m_account_hash_pins)
701 {
702 lf_hash_put_pins(pfs->m_account_hash_pins);
703 pfs->m_account_hash_pins= NULL;
704 }
705 if (pfs->m_host_hash_pins)
706 {
707 lf_hash_put_pins(pfs->m_host_hash_pins);
708 pfs->m_host_hash_pins= NULL;
709 }
710 if (pfs->m_digest_hash_pins)
711 {
712 lf_hash_put_pins(pfs->m_digest_hash_pins);
713 pfs->m_digest_hash_pins= NULL;
714 }
715 if (pfs->m_program_hash_pins)
716 {
717 lf_hash_put_pins(pfs->m_program_hash_pins);
718 pfs->m_program_hash_pins= NULL;
719 }
720 global_thread_container.deallocate(pfs);
721 }
722
723 /**
724 Get the hash pins for @c filename_hash.
725 @param thread The running thread.
726 @returns The LF_HASH pins for the thread.
727 */
get_filename_hash_pins(PFS_thread * thread)728 LF_PINS* get_filename_hash_pins(PFS_thread *thread)
729 {
730 if (unlikely(thread->m_filename_hash_pins == NULL))
731 {
732 if (! filename_hash_inited)
733 return NULL;
734 thread->m_filename_hash_pins= lf_hash_get_pins(&filename_hash);
735 }
736 return thread->m_filename_hash_pins;
737 }
738
739 /**
740 Find or create instrumentation for a file instance by file name.
741 @param thread the executing instrumented thread
742 @param klass the file class
743 @param filename the file name
744 @param len the length in bytes of filename
745 @param create create a file instance if none found
746 @return a file instance, or NULL
747 */
748 PFS_file*
find_or_create_file(PFS_thread * thread,PFS_file_class * klass,const char * filename,uint len,bool create)749 find_or_create_file(PFS_thread *thread, PFS_file_class *klass,
750 const char *filename, uint len, bool create)
751 {
752 PFS_file *pfs;
753
754 assert(klass != NULL || ! create);
755
756 LF_PINS *pins= get_filename_hash_pins(thread);
757 if (unlikely(pins == NULL))
758 {
759 global_file_container.m_lost++;
760 return NULL;
761 }
762
763 char safe_buffer[FN_REFLEN];
764 const char *safe_filename;
765
766 if (len >= FN_REFLEN)
767 {
768 /*
769 The instrumented code uses file names that exceeds FN_REFLEN.
770 This could be legal for instrumentation on non mysys APIs,
771 so we support it.
772 Truncate the file name so that:
773 - it fits into pfs->m_filename
774 - it is safe to use mysys apis to normalize the file name.
775 */
776 memcpy(safe_buffer, filename, FN_REFLEN - 1);
777 safe_buffer[FN_REFLEN - 1]= 0;
778 safe_filename= safe_buffer;
779 }
780 else
781 safe_filename= filename;
782
783 /*
784 Normalize the file name to avoid duplicates when using aliases:
785 - absolute or relative paths
786 - symbolic links
787 Names are resolved as follows:
788 - /real/path/to/real_file ==> same
789 - /path/with/link/to/real_file ==> /real/path/to/real_file
790 - real_file ==> /real/path/to/real_file
791 - ./real_file ==> /real/path/to/real_file
792 - /real/path/to/sym_link ==> same
793 - /path/with/link/to/sym_link ==> /real/path/to/sym_link
794 - sym_link ==> /real/path/to/sym_link
795 - ./sym_link ==> /real/path/to/sym_link
796 When the last component of a file is a symbolic link,
797 the last component is *not* resolved, so that all file io
798 operations on a link (create, read, write, delete) are counted
799 against the link itself, not the target file.
800 Resolving the name would lead to create counted against the link,
801 and read/write/delete counted against the target, leading to
802 incoherent results and instrumentation leaks.
803 Also note that, when creating files, this name resolution
804 works properly for files that do not exist (yet) on the file system.
805 */
806 char buffer[FN_REFLEN];
807 char dirbuffer[FN_REFLEN];
808 size_t dirlen;
809 const char *normalized_filename;
810 uint normalized_length;
811
812 dirlen= dirname_length(safe_filename);
813 if (dirlen == 0)
814 {
815 dirbuffer[0]= FN_CURLIB;
816 dirbuffer[1]= FN_LIBCHAR;
817 dirbuffer[2]= '\0';
818 }
819 else
820 {
821 memcpy(dirbuffer, safe_filename, dirlen);
822 dirbuffer[dirlen]= '\0';
823 }
824
825 if (my_realpath(buffer, dirbuffer, MYF(0)) != 0)
826 {
827 global_file_container.m_lost++;
828 return NULL;
829 }
830
831 /* Append the unresolved file name to the resolved path */
832 char *ptr= buffer + strlen(buffer);
833 char *buf_end= &buffer[sizeof(buffer)-1];
834 if ((buf_end > ptr) && (*(ptr-1) != FN_LIBCHAR))
835 *ptr++= FN_LIBCHAR;
836 if (buf_end > ptr)
837 strncpy(ptr, safe_filename + dirlen, buf_end - ptr);
838 *buf_end= '\0';
839
840 normalized_filename= buffer;
841 normalized_length= (uint)strlen(normalized_filename);
842
843 PFS_file **entry;
844 uint retry_count= 0;
845 const uint retry_max= 3;
846 pfs_dirty_state dirty_state;
847
848 search:
849
850 entry= reinterpret_cast<PFS_file**>
851 (lf_hash_search(&filename_hash, pins,
852 normalized_filename, normalized_length));
853 if (entry && (entry != MY_ERRPTR))
854 {
855 pfs= *entry;
856 pfs->m_file_stat.m_open_count++;
857 lf_hash_search_unpin(pins);
858 return pfs;
859 }
860
861 lf_hash_search_unpin(pins);
862
863 if (! create)
864 {
865 /* No lost counter, just looking for the file existence. */
866 return NULL;
867 }
868
869 pfs= global_file_container.allocate(& dirty_state);
870 if (pfs != NULL)
871 {
872 pfs->m_class= klass;
873 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
874 pfs->m_timed= klass->m_timed;
875 memcpy(pfs->m_filename, normalized_filename, normalized_length);
876 pfs->m_filename[normalized_length]= '\0';
877 pfs->m_filename_length= normalized_length;
878 pfs->m_file_stat.m_open_count= 1;
879 pfs->m_file_stat.m_io_stat.reset();
880 pfs->m_identity= (const void *)pfs;
881 pfs->m_temporary= false;
882
883 int res;
884 pfs->m_lock.dirty_to_allocated(& dirty_state);
885 res= lf_hash_insert(&filename_hash, pins,
886 &pfs);
887 if (likely(res == 0))
888 {
889 if (klass->is_singleton())
890 klass->m_singleton= pfs;
891 return pfs;
892 }
893
894 global_file_container.deallocate(pfs);
895
896 if (res > 0)
897 {
898 /* Duplicate insert by another thread */
899 if (++retry_count > retry_max)
900 {
901 /* Avoid infinite loops */
902 global_file_container.m_lost++;
903 return NULL;
904 }
905 goto search;
906 }
907
908 /* OOM in lf_hash_insert */
909 global_file_container.m_lost++;
910 return NULL;
911 }
912
913 return NULL;
914 }
915
916 /**
917 Find a file instrumentation instance by name, and rename it
918 @param thread the executing instrumented thread
919 @param old_filename the file to be renamed
920 @param old_len the length in bytes of the old filename
921 @param new_filename the new file name
922 @param new_len the length in bytes of the new filename
923 */
find_and_rename_file(PFS_thread * thread,const char * old_filename,uint old_len,const char * new_filename,uint new_len)924 void find_and_rename_file(PFS_thread *thread, const char *old_filename,
925 uint old_len, const char *new_filename, uint new_len)
926 {
927 PFS_file *pfs;
928
929 assert(thread != NULL);
930
931 LF_PINS *pins= get_filename_hash_pins(thread);
932 if (unlikely(pins == NULL))
933 {
934 global_file_container.m_lost++;
935 return;
936 }
937
938 /*
939 Normalize the old file name.
940 */
941 char safe_buffer[FN_REFLEN];
942 const char *safe_filename;
943
944 if (old_len >= FN_REFLEN)
945 {
946 memcpy(safe_buffer, old_filename, FN_REFLEN - 1);
947 safe_buffer[FN_REFLEN - 1]= 0;
948 safe_filename= safe_buffer;
949 }
950 else
951 safe_filename= old_filename;
952
953 char buffer[FN_REFLEN];
954 char dirbuffer[FN_REFLEN];
955 size_t dirlen;
956 const char *normalized_filename;
957 uint normalized_length;
958
959 dirlen= dirname_length(safe_filename);
960 if (dirlen == 0)
961 {
962 dirbuffer[0]= FN_CURLIB;
963 dirbuffer[1]= FN_LIBCHAR;
964 dirbuffer[2]= '\0';
965 }
966 else
967 {
968 memcpy(dirbuffer, safe_filename, dirlen);
969 dirbuffer[dirlen]= '\0';
970 }
971
972 if (my_realpath(buffer, dirbuffer, MYF(0)) != 0)
973 {
974 global_file_container.m_lost++;
975 return;
976 }
977
978 /* Append the unresolved file name to the resolved path */
979 char *ptr= buffer + strlen(buffer);
980 char *buf_end= &buffer[sizeof(buffer)-1];
981 if ((buf_end > ptr) && (*(ptr-1) != FN_LIBCHAR))
982 *ptr++= FN_LIBCHAR;
983 if (buf_end > ptr)
984 strncpy(ptr, safe_filename + dirlen, buf_end - ptr);
985 *buf_end= '\0';
986
987 normalized_filename= buffer;
988 normalized_length= (uint)strlen(normalized_filename);
989
990 PFS_file **entry;
991 entry= reinterpret_cast<PFS_file**>
992 (lf_hash_search(&filename_hash, pins,
993 normalized_filename, normalized_length));
994
995 if (entry && (entry != MY_ERRPTR))
996 pfs= *entry;
997 else
998 {
999 lf_hash_search_unpin(pins);
1000 return;
1001 }
1002
1003 lf_hash_delete(&filename_hash, pins,
1004 pfs->m_filename, pfs->m_filename_length);
1005
1006 /*
1007 Normalize the new file name.
1008 */
1009 if (new_len >= FN_REFLEN)
1010 {
1011 memcpy(safe_buffer, new_filename, FN_REFLEN - 1);
1012 safe_buffer[FN_REFLEN - 1]= 0;
1013 safe_filename= safe_buffer;
1014 }
1015 else
1016 safe_filename= new_filename;
1017
1018 dirlen= dirname_length(safe_filename);
1019 if (dirlen == 0)
1020 {
1021 dirbuffer[0]= FN_CURLIB;
1022 dirbuffer[1]= FN_LIBCHAR;
1023 dirbuffer[2]= '\0';
1024 }
1025 else
1026 {
1027 memcpy(dirbuffer, safe_filename, dirlen);
1028 dirbuffer[dirlen]= '\0';
1029 }
1030
1031 if (my_realpath(buffer, dirbuffer, MYF(0)) != 0)
1032 {
1033 global_file_container.m_lost++;
1034 return;
1035 }
1036
1037 /* Append the unresolved file name to the resolved path */
1038 ptr= buffer + strlen(buffer);
1039 buf_end= &buffer[sizeof(buffer)-1];
1040 if ((buf_end > ptr) && (*(ptr-1) != FN_LIBCHAR))
1041 *ptr++= FN_LIBCHAR;
1042 if (buf_end > ptr)
1043 strncpy(ptr, safe_filename + dirlen, buf_end - ptr);
1044 *buf_end= '\0';
1045
1046 normalized_filename= buffer;
1047 normalized_length= (uint)strlen(normalized_filename);
1048
1049 memcpy(pfs->m_filename, normalized_filename, normalized_length);
1050 pfs->m_filename[normalized_length]= '\0';
1051 pfs->m_filename_length= normalized_length;
1052
1053 int res;
1054 res= lf_hash_insert(&filename_hash, pins, &pfs);
1055
1056 if (likely(res == 0))
1057 return;
1058 else
1059 {
1060 global_file_container.deallocate(pfs);
1061 global_file_container.m_lost++;
1062 return;
1063 }
1064 }
1065
1066 /**
1067 Release instrumentation for a file instance.
1068 @param pfs the file to release
1069 */
release_file(PFS_file * pfs)1070 void release_file(PFS_file *pfs)
1071 {
1072 assert(pfs != NULL);
1073 pfs->m_file_stat.m_open_count--;
1074 }
1075
1076 /**
1077 Destroy instrumentation for a file instance.
1078 @param thread the executing thread instrumentation
1079 @param pfs the file to destroy
1080 */
destroy_file(PFS_thread * thread,PFS_file * pfs)1081 void destroy_file(PFS_thread *thread, PFS_file *pfs)
1082 {
1083 assert(thread != NULL);
1084 assert(pfs != NULL);
1085 PFS_file_class *klass= pfs->m_class;
1086
1087 /* Aggregate to FILE_SUMMARY_BY_EVENT_NAME */
1088 klass->m_file_stat.aggregate(& pfs->m_file_stat);
1089 pfs->m_file_stat.reset();
1090
1091 if (klass->is_singleton())
1092 klass->m_singleton= NULL;
1093
1094 LF_PINS *pins= get_filename_hash_pins(thread);
1095 assert(pins != NULL);
1096
1097 lf_hash_delete(&filename_hash, pins,
1098 pfs->m_filename, pfs->m_filename_length);
1099 if (klass->is_singleton())
1100 klass->m_singleton= NULL;
1101
1102 global_file_container.deallocate(pfs);
1103 }
1104
1105 /**
1106 Create instrumentation for a table instance.
1107 @param share the table share
1108 @param opening_thread the opening thread
1109 @param identity the table address
1110 @return a table instance, or NULL
1111 */
create_table(PFS_table_share * share,PFS_thread * opening_thread,const void * identity)1112 PFS_table* create_table(PFS_table_share *share, PFS_thread *opening_thread,
1113 const void *identity)
1114 {
1115 PFS_table *pfs;
1116 pfs_dirty_state dirty_state;
1117
1118 pfs= global_table_container.allocate(& dirty_state);
1119 if (pfs != NULL)
1120 {
1121 pfs->m_identity= identity;
1122 pfs->m_share= share;
1123 pfs->m_io_enabled= share->m_enabled &&
1124 flag_global_instrumentation && global_table_io_class.m_enabled;
1125 pfs->m_io_timed= share->m_timed && global_table_io_class.m_timed;
1126 pfs->m_lock_enabled= share->m_enabled &&
1127 flag_global_instrumentation && global_table_lock_class.m_enabled;
1128 pfs->m_lock_timed= share->m_timed && global_table_lock_class.m_timed;
1129 pfs->m_has_io_stats= false;
1130 pfs->m_has_lock_stats= false;
1131 pfs->m_internal_lock= PFS_TL_NONE;
1132 pfs->m_external_lock= PFS_TL_NONE;
1133 share->inc_refcount();
1134 pfs->m_table_stat.fast_reset();
1135 pfs->m_thread_owner= opening_thread;
1136 pfs->m_owner_event_id= opening_thread->m_event_id;
1137 pfs->m_lock.dirty_to_allocated(& dirty_state);
1138 }
1139
1140 return pfs;
1141 }
1142
sanitized_aggregate(void)1143 void PFS_table::sanitized_aggregate(void)
1144 {
1145 /*
1146 This thread could be a TRUNCATE on an aggregated summary table,
1147 and not own the table handle.
1148 */
1149 PFS_table_share *safe_share= sanitize_table_share(m_share);
1150 if (safe_share != NULL)
1151 {
1152 if (m_has_io_stats)
1153 {
1154 safe_aggregate_io(NULL, & m_table_stat, safe_share);
1155 m_has_io_stats= false;
1156 }
1157 if (m_has_lock_stats)
1158 {
1159 safe_aggregate_lock(& m_table_stat, safe_share);
1160 m_has_lock_stats= false;
1161 }
1162 }
1163 }
1164
sanitized_aggregate_io(void)1165 void PFS_table::sanitized_aggregate_io(void)
1166 {
1167 PFS_table_share *safe_share= sanitize_table_share(m_share);
1168 if (safe_share != NULL && m_has_io_stats)
1169 {
1170 safe_aggregate_io(NULL, & m_table_stat, safe_share);
1171 m_has_io_stats= false;
1172 }
1173 }
1174
sanitized_aggregate_lock(void)1175 void PFS_table::sanitized_aggregate_lock(void)
1176 {
1177 PFS_table_share *safe_share= sanitize_table_share(m_share);
1178 if (safe_share != NULL && m_has_lock_stats)
1179 {
1180 safe_aggregate_lock(& m_table_stat, safe_share);
1181 m_has_lock_stats= false;
1182 }
1183 }
1184
safe_aggregate_io(const TABLE_SHARE * optional_server_share,PFS_table_stat * table_stat,PFS_table_share * table_share)1185 void PFS_table::safe_aggregate_io(const TABLE_SHARE *optional_server_share,
1186 PFS_table_stat *table_stat,
1187 PFS_table_share *table_share)
1188 {
1189 assert(table_stat != NULL);
1190 assert(table_share != NULL);
1191
1192 uint key_count= sanitize_index_count(table_share->m_key_count);
1193
1194 PFS_table_share_index *to_stat;
1195 PFS_table_io_stat *from_stat;
1196 uint index;
1197
1198 assert(key_count <= MAX_INDEXES);
1199
1200 /* Aggregate stats for each index, if any */
1201 for (index= 0; index < key_count; index++)
1202 {
1203 from_stat= & table_stat->m_index_stat[index];
1204 if (from_stat->m_has_data)
1205 {
1206 if (optional_server_share != NULL)
1207 {
1208 /*
1209 An instrumented thread is closing a table,
1210 and capable of providing index names when
1211 creating index statistics on the fly.
1212 */
1213 to_stat= table_share->find_or_create_index_stat(optional_server_share, index);
1214 }
1215 else
1216 {
1217 /*
1218 A monitoring thread, performing TRUNCATE TABLE,
1219 is asking to flush existing stats from table handles,
1220 but it does not know about index names used in handles.
1221 If the index stat already exists, find it and aggregate to it.
1222 It the index stat does not exist yet, drop the stat and do nothing.
1223 */
1224 to_stat= table_share->find_index_stat(index);
1225 }
1226 if (to_stat != NULL)
1227 {
1228 /* Aggregate to TABLE_IO_SUMMARY */
1229 to_stat->m_stat.aggregate(from_stat);
1230 }
1231 }
1232 }
1233
1234 /* Aggregate stats for the table */
1235 from_stat= & table_stat->m_index_stat[MAX_INDEXES];
1236 if (from_stat->m_has_data)
1237 {
1238 to_stat= table_share->find_or_create_index_stat(NULL, MAX_INDEXES);
1239 if (to_stat != NULL)
1240 {
1241 /* Aggregate to TABLE_IO_SUMMARY */
1242 to_stat->m_stat.aggregate(from_stat);
1243 }
1244 }
1245
1246 table_stat->fast_reset_io();
1247 }
1248
safe_aggregate_lock(PFS_table_stat * table_stat,PFS_table_share * table_share)1249 void PFS_table::safe_aggregate_lock(PFS_table_stat *table_stat,
1250 PFS_table_share *table_share)
1251 {
1252 assert(table_stat != NULL);
1253 assert(table_share != NULL);
1254
1255 PFS_table_lock_stat *from_stat= & table_stat->m_lock_stat;
1256
1257 PFS_table_share_lock *to_stat;
1258
1259 to_stat= table_share->find_or_create_lock_stat();
1260 if (to_stat != NULL)
1261 {
1262 /* Aggregate to TABLE_LOCK_SUMMARY */
1263 to_stat->m_stat.aggregate(from_stat);
1264 }
1265
1266 table_stat->fast_reset_lock();
1267 }
1268
1269 /**
1270 Destroy instrumentation for a table instance.
1271 @param pfs the table to destroy
1272 */
destroy_table(PFS_table * pfs)1273 void destroy_table(PFS_table *pfs)
1274 {
1275 assert(pfs != NULL);
1276 pfs->m_share->dec_refcount();
1277 global_table_container.deallocate(pfs);
1278 }
1279
1280 /**
1281 Create instrumentation for a socket instance.
1282 @param klass the socket class
1283 @param fd the socket file descriptor
1284 @param addr the socket address
1285 @param addr_len the socket address length
1286 @return a socket instance, or NULL
1287 */
create_socket(PFS_socket_class * klass,const my_socket * fd,const struct sockaddr * addr,socklen_t addr_len)1288 PFS_socket* create_socket(PFS_socket_class *klass, const my_socket *fd,
1289 const struct sockaddr *addr, socklen_t addr_len)
1290 {
1291 PFS_socket *pfs;
1292 pfs_dirty_state dirty_state;
1293
1294 uint fd_used= 0;
1295 uint addr_len_used= addr_len;
1296
1297 if (fd != NULL)
1298 fd_used= *fd;
1299
1300 if (addr_len_used > sizeof(sockaddr_storage))
1301 addr_len_used= sizeof(sockaddr_storage);
1302
1303 pfs= global_socket_container.allocate(& dirty_state);
1304
1305 if (pfs != NULL)
1306 {
1307 pfs->m_fd= fd_used;
1308 /* There is no socket object, so we use the instrumentation. */
1309 pfs->m_identity= pfs;
1310 pfs->m_class= klass;
1311 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
1312 pfs->m_timed= klass->m_timed;
1313 pfs->m_idle= false;
1314 pfs->m_socket_stat.reset();
1315 pfs->m_thread_owner= NULL;
1316
1317 pfs->m_addr_len= addr_len_used;
1318 if ((addr != NULL) && (addr_len_used > 0))
1319 {
1320 pfs->m_addr_len= addr_len_used;
1321 memcpy(&pfs->m_sock_addr, addr, addr_len_used);
1322 }
1323 else
1324 {
1325 pfs->m_addr_len= 0;
1326 }
1327
1328 pfs->m_lock.dirty_to_allocated(& dirty_state);
1329
1330 if (klass->is_singleton())
1331 klass->m_singleton= pfs;
1332 }
1333
1334 return pfs;
1335 }
1336
1337 /**
1338 Destroy instrumentation for a socket instance.
1339 @param pfs the socket to destroy
1340 */
destroy_socket(PFS_socket * pfs)1341 void destroy_socket(PFS_socket *pfs)
1342 {
1343 assert(pfs != NULL);
1344 PFS_socket_class *klass= pfs->m_class;
1345
1346 /* Aggregate to SOCKET_SUMMARY_BY_EVENT_NAME */
1347 klass->m_socket_stat.m_io_stat.aggregate(&pfs->m_socket_stat.m_io_stat);
1348
1349 if (klass->is_singleton())
1350 klass->m_singleton= NULL;
1351
1352 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME */
1353 PFS_thread *thread= pfs->m_thread_owner;
1354 if (thread != NULL)
1355 {
1356 /* Combine stats for all operations */
1357 PFS_single_stat stat;
1358 pfs->m_socket_stat.m_io_stat.sum_waits(&stat);
1359 if (stat.m_count != 0)
1360 {
1361 PFS_single_stat *event_name_array;
1362 event_name_array= thread->write_instr_class_waits_stats();
1363 uint index= pfs->m_class->m_event_name_index;
1364
1365 event_name_array[index].aggregate(&stat);
1366 }
1367 }
1368
1369 pfs->m_socket_stat.reset();
1370 pfs->m_thread_owner= NULL;
1371 pfs->m_fd= 0;
1372 pfs->m_addr_len= 0;
1373
1374 global_socket_container.deallocate(pfs);
1375 }
1376
create_metadata_lock(void * identity,const MDL_key * mdl_key,opaque_mdl_type mdl_type,opaque_mdl_duration mdl_duration,opaque_mdl_status mdl_status,const char * src_file,uint src_line)1377 PFS_metadata_lock* create_metadata_lock(void *identity,
1378 const MDL_key *mdl_key,
1379 opaque_mdl_type mdl_type,
1380 opaque_mdl_duration mdl_duration,
1381 opaque_mdl_status mdl_status,
1382 const char *src_file,
1383 uint src_line)
1384 {
1385 PFS_metadata_lock *pfs;
1386 pfs_dirty_state dirty_state;
1387
1388 pfs= global_mdl_container.allocate(& dirty_state);
1389 if (pfs != NULL)
1390 {
1391 pfs->m_identity= identity;
1392 pfs->m_enabled= global_metadata_class.m_enabled && flag_global_instrumentation;
1393 pfs->m_timed= global_metadata_class.m_timed;
1394 pfs->m_mdl_key.mdl_key_init(mdl_key);
1395 pfs->m_mdl_type= mdl_type;
1396 pfs->m_mdl_duration= mdl_duration;
1397 pfs->m_mdl_status= mdl_status;
1398 pfs->m_src_file= src_file;
1399 pfs->m_src_line= src_line;
1400 pfs->m_owner_thread_id= 0;
1401 pfs->m_owner_event_id= 0;
1402 pfs->m_lock.dirty_to_allocated(& dirty_state);
1403 }
1404
1405 return pfs;
1406 }
1407
destroy_metadata_lock(PFS_metadata_lock * pfs)1408 void destroy_metadata_lock(PFS_metadata_lock *pfs)
1409 {
1410 assert(pfs != NULL);
1411 global_mdl_container.deallocate(pfs);
1412 }
1413
fct_reset_mutex_waits(PFS_mutex * pfs)1414 static void fct_reset_mutex_waits(PFS_mutex *pfs)
1415 {
1416 pfs->m_mutex_stat.reset();
1417 }
1418
reset_mutex_waits_by_instance(void)1419 static void reset_mutex_waits_by_instance(void)
1420 {
1421 global_mutex_container.apply_all(fct_reset_mutex_waits);
1422 }
1423
fct_reset_rwlock_waits(PFS_rwlock * pfs)1424 static void fct_reset_rwlock_waits(PFS_rwlock *pfs)
1425 {
1426 pfs->m_rwlock_stat.reset();
1427 }
1428
reset_rwlock_waits_by_instance(void)1429 static void reset_rwlock_waits_by_instance(void)
1430 {
1431 global_rwlock_container.apply_all(fct_reset_rwlock_waits);
1432 }
1433
fct_reset_cond_waits(PFS_cond * pfs)1434 static void fct_reset_cond_waits(PFS_cond *pfs)
1435 {
1436 pfs->m_cond_stat.reset();
1437 }
1438
reset_cond_waits_by_instance(void)1439 static void reset_cond_waits_by_instance(void)
1440 {
1441 global_cond_container.apply_all(fct_reset_cond_waits);
1442 }
1443
fct_reset_file_waits(PFS_file * pfs)1444 static void fct_reset_file_waits(PFS_file *pfs)
1445 {
1446 pfs->m_file_stat.reset();
1447 }
1448
reset_file_waits_by_instance(void)1449 static void reset_file_waits_by_instance(void)
1450 {
1451 global_file_container.apply_all(fct_reset_file_waits);
1452 }
1453
fct_reset_socket_waits(PFS_socket * pfs)1454 static void fct_reset_socket_waits(PFS_socket *pfs)
1455 {
1456 pfs->m_socket_stat.reset();
1457 }
1458
reset_socket_waits_by_instance(void)1459 static void reset_socket_waits_by_instance(void)
1460 {
1461 global_socket_container.apply_all(fct_reset_socket_waits);
1462 }
1463
1464 /** Reset the wait statistics per object instance. */
reset_events_waits_by_instance(void)1465 void reset_events_waits_by_instance(void)
1466 {
1467 reset_mutex_waits_by_instance();
1468 reset_rwlock_waits_by_instance();
1469 reset_cond_waits_by_instance();
1470 reset_file_waits_by_instance();
1471 reset_socket_waits_by_instance();
1472 }
1473
fct_reset_file_io(PFS_file * pfs)1474 static void fct_reset_file_io(PFS_file *pfs)
1475 {
1476 pfs->m_file_stat.m_io_stat.reset();
1477 }
1478
1479 /** Reset the io statistics per file instance. */
reset_file_instance_io(void)1480 void reset_file_instance_io(void)
1481 {
1482 global_file_container.apply_all(fct_reset_file_io);
1483 }
1484
fct_reset_socket_io(PFS_socket * pfs)1485 static void fct_reset_socket_io(PFS_socket *pfs)
1486 {
1487 pfs->m_socket_stat.m_io_stat.reset();
1488 }
1489
1490 /** Reset the io statistics per socket instance. */
reset_socket_instance_io(void)1491 void reset_socket_instance_io(void)
1492 {
1493 global_socket_container.apply_all(fct_reset_socket_io);
1494 }
1495
aggregate_all_event_names(PFS_single_stat * from_array,PFS_single_stat * to_array)1496 void aggregate_all_event_names(PFS_single_stat *from_array,
1497 PFS_single_stat *to_array)
1498 {
1499 PFS_single_stat *from;
1500 PFS_single_stat *from_last;
1501 PFS_single_stat *to;
1502
1503 from= from_array;
1504 from_last= from_array + wait_class_max;
1505 to= to_array;
1506
1507 for ( ; from < from_last ; from++, to++)
1508 {
1509 if (from->m_count > 0)
1510 {
1511 to->aggregate(from);
1512 from->reset();
1513 }
1514 }
1515 }
1516
aggregate_all_event_names(PFS_single_stat * from_array,PFS_single_stat * to_array_1,PFS_single_stat * to_array_2)1517 void aggregate_all_event_names(PFS_single_stat *from_array,
1518 PFS_single_stat *to_array_1,
1519 PFS_single_stat *to_array_2)
1520 {
1521 PFS_single_stat *from;
1522 PFS_single_stat *from_last;
1523 PFS_single_stat *to_1;
1524 PFS_single_stat *to_2;
1525
1526 from= from_array;
1527 from_last= from_array + wait_class_max;
1528 to_1= to_array_1;
1529 to_2= to_array_2;
1530
1531 for ( ; from < from_last ; from++, to_1++, to_2++)
1532 {
1533 if (from->m_count > 0)
1534 {
1535 to_1->aggregate(from);
1536 to_2->aggregate(from);
1537 from->reset();
1538 }
1539 }
1540 }
1541
aggregate_all_stages(PFS_stage_stat * from_array,PFS_stage_stat * to_array)1542 void aggregate_all_stages(PFS_stage_stat *from_array,
1543 PFS_stage_stat *to_array)
1544 {
1545 PFS_stage_stat *from;
1546 PFS_stage_stat *from_last;
1547 PFS_stage_stat *to;
1548
1549 from= from_array;
1550 from_last= from_array + stage_class_max;
1551 to= to_array;
1552
1553 for ( ; from < from_last ; from++, to++)
1554 {
1555 if (from->m_timer1_stat.m_count > 0)
1556 {
1557 to->aggregate(from);
1558 from->reset();
1559 }
1560 }
1561 }
1562
aggregate_all_stages(PFS_stage_stat * from_array,PFS_stage_stat * to_array_1,PFS_stage_stat * to_array_2)1563 void aggregate_all_stages(PFS_stage_stat *from_array,
1564 PFS_stage_stat *to_array_1,
1565 PFS_stage_stat *to_array_2)
1566 {
1567 PFS_stage_stat *from;
1568 PFS_stage_stat *from_last;
1569 PFS_stage_stat *to_1;
1570 PFS_stage_stat *to_2;
1571
1572 from= from_array;
1573 from_last= from_array + stage_class_max;
1574 to_1= to_array_1;
1575 to_2= to_array_2;
1576
1577 for ( ; from < from_last ; from++, to_1++, to_2++)
1578 {
1579 if (from->m_timer1_stat.m_count > 0)
1580 {
1581 to_1->aggregate(from);
1582 to_2->aggregate(from);
1583 from->reset();
1584 }
1585 }
1586 }
1587
aggregate_all_statements(PFS_statement_stat * from_array,PFS_statement_stat * to_array)1588 void aggregate_all_statements(PFS_statement_stat *from_array,
1589 PFS_statement_stat *to_array)
1590 {
1591 PFS_statement_stat *from;
1592 PFS_statement_stat *from_last;
1593 PFS_statement_stat *to;
1594
1595 from= from_array;
1596 from_last= from_array + statement_class_max;
1597 to= to_array;
1598
1599 for ( ; from < from_last ; from++, to++)
1600 {
1601 if (from->m_timer1_stat.m_count > 0)
1602 {
1603 to->aggregate(from);
1604 from->reset();
1605 }
1606 }
1607 }
1608
aggregate_all_statements(PFS_statement_stat * from_array,PFS_statement_stat * to_array_1,PFS_statement_stat * to_array_2)1609 void aggregate_all_statements(PFS_statement_stat *from_array,
1610 PFS_statement_stat *to_array_1,
1611 PFS_statement_stat *to_array_2)
1612 {
1613 PFS_statement_stat *from;
1614 PFS_statement_stat *from_last;
1615 PFS_statement_stat *to_1;
1616 PFS_statement_stat *to_2;
1617
1618 from= from_array;
1619 from_last= from_array + statement_class_max;
1620 to_1= to_array_1;
1621 to_2= to_array_2;
1622
1623 for ( ; from < from_last ; from++, to_1++, to_2++)
1624 {
1625 if (from->m_timer1_stat.m_count > 0)
1626 {
1627 to_1->aggregate(from);
1628 to_2->aggregate(from);
1629 from->reset();
1630 }
1631 }
1632 }
1633
aggregate_all_transactions(PFS_transaction_stat * from_array,PFS_transaction_stat * to_array)1634 void aggregate_all_transactions(PFS_transaction_stat *from_array,
1635 PFS_transaction_stat *to_array)
1636 {
1637 assert(from_array != NULL);
1638 assert(to_array != NULL);
1639
1640 if (from_array->count() > 0)
1641 {
1642 to_array->aggregate(from_array);
1643 from_array->reset();
1644 }
1645 }
1646
aggregate_all_transactions(PFS_transaction_stat * from_array,PFS_transaction_stat * to_array_1,PFS_transaction_stat * to_array_2)1647 void aggregate_all_transactions(PFS_transaction_stat *from_array,
1648 PFS_transaction_stat *to_array_1,
1649 PFS_transaction_stat *to_array_2)
1650 {
1651 assert(from_array != NULL);
1652 assert(to_array_1 != NULL);
1653 assert(to_array_2 != NULL);
1654
1655 if (from_array->count() > 0)
1656 {
1657 to_array_1->aggregate(from_array);
1658 to_array_2->aggregate(from_array);
1659 from_array->reset();
1660 }
1661 }
1662
aggregate_all_memory(bool alive,PFS_memory_stat * from_array,PFS_memory_stat * to_array)1663 void aggregate_all_memory(bool alive,
1664 PFS_memory_stat *from_array,
1665 PFS_memory_stat *to_array)
1666 {
1667 PFS_memory_stat *from;
1668 PFS_memory_stat *from_last;
1669 PFS_memory_stat *to;
1670
1671 from= from_array;
1672 from_last= from_array + memory_class_max;
1673 to= to_array;
1674
1675 if (alive)
1676 {
1677 for ( ; from < from_last ; from++, to++)
1678 {
1679 from->partial_aggregate_to(to);
1680 }
1681 }
1682 else
1683 {
1684 for ( ; from < from_last ; from++, to++)
1685 {
1686 from->full_aggregate_to(to);
1687 from->reset();
1688 }
1689 }
1690 }
1691
aggregate_all_memory(bool alive,PFS_memory_stat * from_array,PFS_memory_stat * to_array_1,PFS_memory_stat * to_array_2)1692 void aggregate_all_memory(bool alive,
1693 PFS_memory_stat *from_array,
1694 PFS_memory_stat *to_array_1,
1695 PFS_memory_stat *to_array_2)
1696 {
1697 PFS_memory_stat *from;
1698 PFS_memory_stat *from_last;
1699 PFS_memory_stat *to_1;
1700 PFS_memory_stat *to_2;
1701
1702 from= from_array;
1703 from_last= from_array + memory_class_max;
1704 to_1= to_array_1;
1705 to_2= to_array_2;
1706
1707 if (alive)
1708 {
1709 for ( ; from < from_last ; from++, to_1++, to_2++)
1710 {
1711 from->partial_aggregate_to(to_1, to_2);
1712 }
1713 }
1714 else
1715 {
1716 for ( ; from < from_last ; from++, to_1++, to_2++)
1717 {
1718 from->full_aggregate_to(to_1, to_2);
1719 from->reset();
1720 }
1721 }
1722 }
1723
aggregate_thread_status(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)1724 void aggregate_thread_status(PFS_thread *thread,
1725 PFS_account *safe_account,
1726 PFS_user *safe_user,
1727 PFS_host *safe_host)
1728 {
1729 THD *thd= thread->m_thd;
1730
1731 if (thd == NULL)
1732 return;
1733
1734 if (likely(safe_account != NULL))
1735 {
1736 safe_account->aggregate_status_stats(&thd->status_var);
1737 return;
1738 }
1739
1740 if (safe_user != NULL)
1741 {
1742 safe_user->aggregate_status_stats(&thd->status_var);
1743 }
1744
1745 if (safe_host != NULL)
1746 {
1747 safe_host->aggregate_status_stats(&thd->status_var);
1748 }
1749 return;
1750 }
1751
aggregate_thread_stats(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)1752 void aggregate_thread_stats(PFS_thread *thread,
1753 PFS_account *safe_account,
1754 PFS_user *safe_user,
1755 PFS_host *safe_host)
1756 {
1757 if (likely(safe_account != NULL))
1758 {
1759 safe_account->m_disconnected_count++;
1760 }
1761
1762 if (safe_user != NULL)
1763 {
1764 safe_user->m_disconnected_count++;
1765 }
1766
1767 if (safe_host != NULL)
1768 {
1769 safe_host->m_disconnected_count++;
1770 }
1771
1772 /* There is no global table for connections statistics. */
1773 return;
1774 }
1775
aggregate_thread(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)1776 void aggregate_thread(PFS_thread *thread,
1777 PFS_account *safe_account,
1778 PFS_user *safe_user,
1779 PFS_host *safe_host)
1780 {
1781 /* No HAVE_PSI_???_INTERFACE flag, waits cover multiple instrumentations */
1782 aggregate_thread_waits(thread, safe_account, safe_user, safe_host);
1783
1784 #ifdef HAVE_PSI_STAGE_INTERFACE
1785 aggregate_thread_stages(thread, safe_account, safe_user, safe_host);
1786 #endif
1787
1788 #ifdef HAVE_PSI_STATEMENT_INTERFACE
1789 aggregate_thread_statements(thread, safe_account, safe_user, safe_host);
1790 #endif
1791
1792 #ifdef HAVE_PSI_TRANSACTION_INTERFACE
1793 aggregate_thread_transactions(thread, safe_account, safe_user, safe_host);
1794 #endif
1795
1796 #ifdef HAVE_PSI_MEMORY_INTERFACE
1797 aggregate_thread_memory(false, thread, safe_account, safe_user, safe_host);
1798 #endif
1799
1800 if (!show_compatibility_56)
1801 aggregate_thread_status(thread, safe_account, safe_user, safe_host);
1802
1803 aggregate_thread_stats(thread, safe_account, safe_user, safe_host);
1804 }
1805
aggregate_thread_waits(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)1806 void aggregate_thread_waits(PFS_thread *thread,
1807 PFS_account *safe_account,
1808 PFS_user *safe_user,
1809 PFS_host *safe_host)
1810 {
1811 if (thread->read_instr_class_waits_stats() == NULL)
1812 return;
1813
1814 if (likely(safe_account != NULL))
1815 {
1816 /*
1817 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1818 to EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
1819 */
1820 aggregate_all_event_names(thread->write_instr_class_waits_stats(),
1821 safe_account->write_instr_class_waits_stats());
1822
1823 return;
1824 }
1825
1826 if ((safe_user != NULL) && (safe_host != NULL))
1827 {
1828 /*
1829 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
1830 - EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME
1831 - EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME
1832 in parallel.
1833 */
1834 aggregate_all_event_names(thread->write_instr_class_waits_stats(),
1835 safe_user->write_instr_class_waits_stats(),
1836 safe_host->write_instr_class_waits_stats());
1837 return;
1838 }
1839
1840 if (safe_user != NULL)
1841 {
1842 /*
1843 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1844 to EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME, directly.
1845 */
1846 aggregate_all_event_names(thread->write_instr_class_waits_stats(),
1847 safe_user->write_instr_class_waits_stats());
1848 return;
1849 }
1850
1851 if (safe_host != NULL)
1852 {
1853 /*
1854 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1855 to EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
1856 */
1857 aggregate_all_event_names(thread->write_instr_class_waits_stats(),
1858 safe_host->write_instr_class_waits_stats());
1859 return;
1860 }
1861
1862 /* Orphan thread, clean the waits stats. */
1863 thread->reset_waits_stats();
1864 }
1865
aggregate_thread_stages(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)1866 void aggregate_thread_stages(PFS_thread *thread,
1867 PFS_account *safe_account,
1868 PFS_user *safe_user,
1869 PFS_host *safe_host)
1870 {
1871 if (thread->read_instr_class_stages_stats() == NULL)
1872 return;
1873
1874 if (likely(safe_account != NULL))
1875 {
1876 /*
1877 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
1878 to EVENTS_STAGES_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
1879 */
1880 aggregate_all_stages(thread->write_instr_class_stages_stats(),
1881 safe_account->write_instr_class_stages_stats());
1882
1883 return;
1884 }
1885
1886 if ((safe_user != NULL) && (safe_host != NULL))
1887 {
1888 /*
1889 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
1890 - EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME
1891 - EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME
1892 in parallel.
1893 */
1894 aggregate_all_stages(thread->write_instr_class_stages_stats(),
1895 safe_user->write_instr_class_stages_stats(),
1896 safe_host->write_instr_class_stages_stats());
1897 return;
1898 }
1899
1900 if (safe_user != NULL)
1901 {
1902 /*
1903 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
1904 - EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME
1905 - EVENTS_STAGES_SUMMARY_GLOBAL_BY_EVENT_NAME
1906 in parallel.
1907 */
1908 aggregate_all_stages(thread->write_instr_class_stages_stats(),
1909 safe_user->write_instr_class_stages_stats(),
1910 global_instr_class_stages_array);
1911 return;
1912 }
1913
1914 if (safe_host != NULL)
1915 {
1916 /*
1917 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
1918 to EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
1919 */
1920 aggregate_all_stages(thread->write_instr_class_stages_stats(),
1921 safe_host->write_instr_class_stages_stats());
1922 return;
1923 }
1924
1925 /*
1926 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
1927 to EVENTS_STAGES_SUMMARY_GLOBAL_BY_EVENT_NAME.
1928 */
1929 aggregate_all_stages(thread->write_instr_class_stages_stats(),
1930 global_instr_class_stages_array);
1931 }
1932
aggregate_thread_statements(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)1933 void aggregate_thread_statements(PFS_thread *thread,
1934 PFS_account *safe_account,
1935 PFS_user *safe_user,
1936 PFS_host *safe_host)
1937 {
1938 if (thread->read_instr_class_statements_stats() == NULL)
1939 return;
1940
1941 if (likely(safe_account != NULL))
1942 {
1943 /*
1944 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1945 to EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
1946 */
1947 aggregate_all_statements(thread->write_instr_class_statements_stats(),
1948 safe_account->write_instr_class_statements_stats());
1949
1950 return;
1951 }
1952
1953 if ((safe_user != NULL) && (safe_host != NULL))
1954 {
1955 /*
1956 Aggregate EVENTS_STATEMENT_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
1957 - EVENTS_STATEMENT_SUMMARY_BY_USER_BY_EVENT_NAME
1958 - EVENTS_STATEMENT_SUMMARY_BY_HOST_BY_EVENT_NAME
1959 in parallel.
1960 */
1961 aggregate_all_statements(thread->write_instr_class_statements_stats(),
1962 safe_user->write_instr_class_statements_stats(),
1963 safe_host->write_instr_class_statements_stats());
1964 return;
1965 }
1966
1967 if (safe_user != NULL)
1968 {
1969 /*
1970 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
1971 - EVENTS_STATEMENTS_SUMMARY_BY_USER_BY_EVENT_NAME
1972 - EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME
1973 in parallel.
1974 */
1975 aggregate_all_statements(thread->write_instr_class_statements_stats(),
1976 safe_user->write_instr_class_statements_stats(),
1977 global_instr_class_statements_array);
1978 return;
1979 }
1980
1981 if (safe_host != NULL)
1982 {
1983 /*
1984 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1985 to EVENTS_STATEMENTS_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
1986 */
1987 aggregate_all_statements(thread->write_instr_class_statements_stats(),
1988 safe_host->write_instr_class_statements_stats());
1989 return;
1990 }
1991
1992 /*
1993 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1994 to EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME.
1995 */
1996 aggregate_all_statements(thread->write_instr_class_statements_stats(),
1997 global_instr_class_statements_array);
1998 }
1999
aggregate_thread_transactions(PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)2000 void aggregate_thread_transactions(PFS_thread *thread,
2001 PFS_account *safe_account,
2002 PFS_user *safe_user,
2003 PFS_host *safe_host)
2004 {
2005 if (thread->read_instr_class_transactions_stats() == NULL)
2006 return;
2007
2008 if (likely(safe_account != NULL))
2009 {
2010 /*
2011 Aggregate EVENTS_TRANSACTIONS_SUMMARY_BY_THREAD_BY_EVENT_NAME
2012 to EVENTS_TRANSACTIONS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
2013 */
2014 aggregate_all_transactions(thread->write_instr_class_transactions_stats(),
2015 safe_account->write_instr_class_transactions_stats());
2016
2017 return;
2018 }
2019
2020 if ((safe_user != NULL) && (safe_host != NULL))
2021 {
2022 /*
2023 Aggregate EVENTS_TRANSACTION_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2024 - EVENTS_TRANSACTION_SUMMARY_BY_USER_BY_EVENT_NAME
2025 - EVENTS_TRANSACTION_SUMMARY_BY_HOST_BY_EVENT_NAME
2026 in parallel.
2027 */
2028 aggregate_all_transactions(thread->write_instr_class_transactions_stats(),
2029 safe_user->write_instr_class_transactions_stats(),
2030 safe_host->write_instr_class_transactions_stats());
2031 return;
2032 }
2033
2034 if (safe_user != NULL)
2035 {
2036 /*
2037 Aggregate EVENTS_TRANSACTIONS_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2038 - EVENTS_TRANSACTIONS_SUMMARY_BY_USER_BY_EVENT_NAME
2039 - EVENTS_TRANSACTIONS_SUMMARY_GLOBAL_BY_EVENT_NAME
2040 in parallel.
2041 */
2042 aggregate_all_transactions(thread->write_instr_class_transactions_stats(),
2043 safe_user->write_instr_class_transactions_stats(),
2044 &global_transaction_stat);
2045 return;
2046 }
2047
2048 if (safe_host != NULL)
2049 {
2050 /*
2051 Aggregate EVENTS_TRANSACTIONS_SUMMARY_BY_THREAD_BY_EVENT_NAME
2052 to EVENTS_TRANSACTIONS_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
2053 */
2054 aggregate_all_transactions(thread->write_instr_class_transactions_stats(),
2055 safe_host->write_instr_class_transactions_stats());
2056 return;
2057 }
2058
2059 /*
2060 Aggregate EVENTS_TRANSACTIONS_SUMMARY_BY_THREAD_BY_EVENT_NAME
2061 to EVENTS_TRANSACTIONS_SUMMARY_GLOBAL_BY_EVENT_NAME.
2062 */
2063 aggregate_all_transactions(thread->write_instr_class_transactions_stats(),
2064 &global_transaction_stat);
2065 }
2066
aggregate_thread_memory(bool alive,PFS_thread * thread,PFS_account * safe_account,PFS_user * safe_user,PFS_host * safe_host)2067 void aggregate_thread_memory(bool alive, PFS_thread *thread,
2068 PFS_account *safe_account,
2069 PFS_user *safe_user,
2070 PFS_host *safe_host)
2071 {
2072 if (thread->read_instr_class_memory_stats() == NULL)
2073 return;
2074
2075 if (likely(safe_account != NULL))
2076 {
2077 /*
2078 Aggregate MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME
2079 to MEMORY_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
2080 */
2081 aggregate_all_memory(alive,
2082 thread->write_instr_class_memory_stats(),
2083 safe_account->write_instr_class_memory_stats());
2084
2085 return;
2086 }
2087
2088 if ((safe_user != NULL) && (safe_host != NULL))
2089 {
2090 /*
2091 Aggregate MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2092 - MEMORY_SUMMARY_BY_USER_BY_EVENT_NAME
2093 - MEMORY_SUMMARY_BY_HOST_BY_EVENT_NAME
2094 in parallel.
2095 */
2096 aggregate_all_memory(alive,
2097 thread->write_instr_class_memory_stats(),
2098 safe_user->write_instr_class_memory_stats(),
2099 safe_host->write_instr_class_memory_stats());
2100 return;
2101 }
2102
2103 if (safe_user != NULL)
2104 {
2105 /*
2106 Aggregate MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2107 - MEMORY_SUMMARY_BY_USER_BY_EVENT_NAME
2108 - MEMORY_SUMMARY_GLOBAL_BY_EVENT_NAME
2109 in parallel.
2110 */
2111 aggregate_all_memory(alive,
2112 thread->write_instr_class_memory_stats(),
2113 safe_user->write_instr_class_memory_stats(),
2114 global_instr_class_memory_array);
2115 return;
2116 }
2117
2118 if (safe_host != NULL)
2119 {
2120 /*
2121 Aggregate MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME
2122 to MEMORY_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
2123 */
2124 aggregate_all_memory(alive,
2125 thread->write_instr_class_memory_stats(),
2126 safe_host->write_instr_class_memory_stats());
2127 return;
2128 }
2129
2130 /*
2131 Aggregate MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME
2132 to MEMORY_SUMMARY_GLOBAL_BY_EVENT_NAME.
2133 */
2134 aggregate_all_memory(alive,
2135 thread->write_instr_class_memory_stats(),
2136 global_instr_class_memory_array);
2137 }
2138
clear_thread_account(PFS_thread * thread)2139 void clear_thread_account(PFS_thread *thread)
2140 {
2141 if (thread->m_account != NULL)
2142 {
2143 thread->m_account->release();
2144 thread->m_account= NULL;
2145 }
2146
2147 if (thread->m_user != NULL)
2148 {
2149 thread->m_user->release();
2150 thread->m_user= NULL;
2151 }
2152
2153 if (thread->m_host != NULL)
2154 {
2155 thread->m_host->release();
2156 thread->m_host= NULL;
2157 }
2158 }
2159
set_thread_account(PFS_thread * thread)2160 void set_thread_account(PFS_thread *thread)
2161 {
2162 assert(thread->m_account == NULL);
2163 assert(thread->m_user == NULL);
2164 assert(thread->m_host == NULL);
2165
2166 thread->m_account= find_or_create_account(thread,
2167 thread->m_username,
2168 thread->m_username_length,
2169 thread->m_hostname,
2170 thread->m_hostname_length);
2171
2172 if ((thread->m_account == NULL) && (thread->m_username_length > 0))
2173 thread->m_user= find_or_create_user(thread,
2174 thread->m_username,
2175 thread->m_username_length);
2176
2177 if ((thread->m_account == NULL) && (thread->m_hostname_length > 0))
2178 thread->m_host= find_or_create_host(thread,
2179 thread->m_hostname,
2180 thread->m_hostname_length);
2181 }
2182
fct_update_mutex_derived_flags(PFS_mutex * pfs)2183 static void fct_update_mutex_derived_flags(PFS_mutex *pfs)
2184 {
2185 PFS_mutex_class *klass= sanitize_mutex_class(pfs->m_class);
2186 if (likely(klass != NULL))
2187 {
2188 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2189 pfs->m_timed= klass->m_timed;
2190 }
2191 else
2192 {
2193 pfs->m_enabled= false;
2194 pfs->m_timed= false;
2195 }
2196 }
2197
update_mutex_derived_flags()2198 void update_mutex_derived_flags()
2199 {
2200 global_mutex_container.apply_all(fct_update_mutex_derived_flags);
2201 }
2202
fct_update_rwlock_derived_flags(PFS_rwlock * pfs)2203 static void fct_update_rwlock_derived_flags(PFS_rwlock *pfs)
2204 {
2205 PFS_rwlock_class *klass= sanitize_rwlock_class(pfs->m_class);
2206 if (likely(klass != NULL))
2207 {
2208 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2209 pfs->m_timed= klass->m_timed;
2210 }
2211 else
2212 {
2213 pfs->m_enabled= false;
2214 pfs->m_timed= false;
2215 }
2216 }
2217
update_rwlock_derived_flags()2218 void update_rwlock_derived_flags()
2219 {
2220 global_rwlock_container.apply_all(fct_update_rwlock_derived_flags);
2221 }
2222
fct_update_cond_derived_flags(PFS_cond * pfs)2223 static void fct_update_cond_derived_flags(PFS_cond *pfs)
2224 {
2225 PFS_cond_class *klass= sanitize_cond_class(pfs->m_class);
2226 if (likely(klass != NULL))
2227 {
2228 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2229 pfs->m_timed= klass->m_timed;
2230 }
2231 else
2232 {
2233 pfs->m_enabled= false;
2234 pfs->m_timed= false;
2235 }
2236 }
2237
update_cond_derived_flags()2238 void update_cond_derived_flags()
2239 {
2240 global_cond_container.apply_all(fct_update_cond_derived_flags);
2241 }
2242
fct_update_file_derived_flags(PFS_file * pfs)2243 static void fct_update_file_derived_flags(PFS_file *pfs)
2244 {
2245 PFS_file_class *klass= sanitize_file_class(pfs->m_class);
2246 if (likely(klass != NULL))
2247 {
2248 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2249 pfs->m_timed= klass->m_timed;
2250 }
2251 else
2252 {
2253 pfs->m_enabled= false;
2254 pfs->m_timed= false;
2255 }
2256 }
2257
update_file_derived_flags()2258 void update_file_derived_flags()
2259 {
2260 global_file_container.apply_all(fct_update_file_derived_flags);
2261 }
2262
fct_update_table_derived_flags(PFS_table * pfs)2263 void fct_update_table_derived_flags(PFS_table *pfs)
2264 {
2265 PFS_table_share *share= sanitize_table_share(pfs->m_share);
2266 if (likely(share != NULL))
2267 {
2268 pfs->m_io_enabled= share->m_enabled &&
2269 flag_global_instrumentation && global_table_io_class.m_enabled;
2270 pfs->m_io_timed= share->m_timed && global_table_io_class.m_timed;
2271 pfs->m_lock_enabled= share->m_enabled &&
2272 flag_global_instrumentation && global_table_lock_class.m_enabled;
2273 pfs->m_lock_timed= share->m_timed && global_table_lock_class.m_timed;
2274 }
2275 else
2276 {
2277 pfs->m_io_enabled= false;
2278 pfs->m_io_timed= false;
2279 pfs->m_lock_enabled= false;
2280 pfs->m_lock_timed= false;
2281 }
2282 }
2283
update_table_derived_flags()2284 void update_table_derived_flags()
2285 {
2286 global_table_container.apply_all(fct_update_table_derived_flags);
2287 }
2288
fct_update_socket_derived_flags(PFS_socket * pfs)2289 static void fct_update_socket_derived_flags(PFS_socket *pfs)
2290 {
2291 PFS_socket_class *klass= sanitize_socket_class(pfs->m_class);
2292 if (likely(klass != NULL))
2293 {
2294 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2295 pfs->m_timed= klass->m_timed;
2296 }
2297 else
2298 {
2299 pfs->m_enabled= false;
2300 pfs->m_timed= false;
2301 }
2302 }
2303
update_socket_derived_flags()2304 void update_socket_derived_flags()
2305 {
2306 global_socket_container.apply_all(fct_update_socket_derived_flags);
2307 }
2308
fct_update_metadata_derived_flags(PFS_metadata_lock * pfs)2309 static void fct_update_metadata_derived_flags(PFS_metadata_lock *pfs)
2310 {
2311 pfs->m_enabled= global_metadata_class.m_enabled && flag_global_instrumentation;
2312 pfs->m_timed= global_metadata_class.m_timed;
2313 }
2314
update_metadata_derived_flags()2315 void update_metadata_derived_flags()
2316 {
2317 global_mdl_container.apply_all(fct_update_metadata_derived_flags);
2318 }
2319
fct_update_thread_derived_flags(PFS_thread * pfs)2320 static void fct_update_thread_derived_flags(PFS_thread *pfs)
2321 {
2322 pfs->set_history_derived_flags();
2323 }
2324
update_thread_derived_flags()2325 void update_thread_derived_flags()
2326 {
2327 global_thread_container.apply(fct_update_thread_derived_flags);
2328 }
2329
update_instruments_derived_flags()2330 void update_instruments_derived_flags()
2331 {
2332 update_mutex_derived_flags();
2333 update_rwlock_derived_flags();
2334 update_cond_derived_flags();
2335 update_file_derived_flags();
2336 update_table_derived_flags();
2337 update_socket_derived_flags();
2338 update_metadata_derived_flags();
2339 /* nothing for stages, statements and transactions (no instances) */
2340 }
2341
2342 /** @} */
2343