1 /* Copyright (c) 2008, 2021, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software Foundation,
21 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
22
23 /**
24 @file storage/perfschema/pfs.cc
25 The performance schema implementation of all instruments.
26 */
27 #include "my_global.h"
28 #include "thr_lock.h"
29
30 /* Make sure exported prototypes match the implementation. */
31 #include "pfs_file_provider.h"
32 #include "pfs_idle_provider.h"
33 #include "pfs_memory_provider.h"
34 #include "pfs_metadata_provider.h"
35 #include "pfs_socket_provider.h"
36 #include "pfs_stage_provider.h"
37 #include "pfs_statement_provider.h"
38 #include "pfs_table_provider.h"
39 #include "pfs_thread_provider.h"
40 #include "pfs_transaction_provider.h"
41
42 #include "mysql/psi/psi.h"
43 #include "mysql/psi/mysql_thread.h"
44 #include "my_thread.h"
45 #include "sql_const.h"
46 #include "pfs.h"
47 #include "pfs_instr_class.h"
48 #include "pfs_instr.h"
49 #include "pfs_host.h"
50 #include "pfs_user.h"
51 #include "pfs_account.h"
52 #include "pfs_global.h"
53 #include "pfs_column_values.h"
54 #include "pfs_timer.h"
55 #include "pfs_events_waits.h"
56 #include "pfs_events_stages.h"
57 #include "pfs_events_statements.h"
58 #include "pfs_events_transactions.h"
59 #include "pfs_setup_actor.h"
60 #include "pfs_setup_object.h"
61 #include "sql_error.h"
62 #include "sp_head.h"
63 #include "mdl.h" /* mdl_key_init */
64 #include "pfs_digest.h"
65 #include "pfs_program.h"
66 #include "pfs_prepared_stmt.h"
67
68 using std::min;
69
70 /*
71 This is a development tool to investigate memory statistics,
72 do not use in production.
73 */
74 #undef PFS_PARANOID
75
76 #ifdef PFS_PARANOID
report_memory_accounting_error(const char * api_name,PFS_thread * new_thread,size_t size,PFS_memory_class * klass,PFS_thread * old_thread)77 static void report_memory_accounting_error(
78 const char *api_name,
79 PFS_thread *new_thread,
80 size_t size,
81 PFS_memory_class *klass,
82 PFS_thread *old_thread)
83 {
84 pfs_print_error("%s "
85 "thread <%d> of class <%s> "
86 "not owner of <%d> bytes in class <%s> "
87 "allocated by thread <%d> of class <%s>\n",
88 api_name,
89 new_thread->m_thread_internal_id,
90 new_thread->m_class->m_name,
91 size, klass->m_name,
92 old_thread->m_thread_internal_id,
93 old_thread->m_class->m_name);
94
95 assert(strcmp(new_thread->m_class->m_name, "thread/sql/event_worker") != 0);
96 assert(strcmp(new_thread->m_class->m_name, "thread/sql/event_scheduler") != 0);
97 assert(strcmp(new_thread->m_class->m_name, "thread/sql/one_connection") != 0);
98 }
99 #endif /* PFS_PARANOID */
100
101 /**
102 @page PAGE_PERFORMANCE_SCHEMA The Performance Schema main page
103 MySQL PERFORMANCE_SCHEMA implementation.
104
105 @section INTRO Introduction
106 The PERFORMANCE_SCHEMA is a way to introspect the internal execution of
107 the server at runtime.
108 The performance schema focuses primarily on performance data,
109 as opposed to the INFORMATION_SCHEMA whose purpose is to inspect metadata.
110
111 From a user point of view, the performance schema consists of:
112 - a dedicated database schema, named PERFORMANCE_SCHEMA,
113 - SQL tables, used to query the server internal state or change
114 configuration settings.
115
116 From an implementation point of view, the performance schema is a dedicated
117 Storage Engine which exposes data collected by 'Instrumentation Points'
118 placed in the server code.
119
120 @section INTERFACES Multiple interfaces
121
122 The performance schema exposes many different interfaces,
123 for different components, and for different purposes.
124
125 @subsection INT_INSTRUMENTING Instrumenting interface
126
127 All the data representing the server internal state exposed
128 in the performance schema must be first collected:
129 this is the role of the instrumenting interface.
130 The instrumenting interface is a coding interface provided
131 by implementors (of the performance schema) to implementors
132 (of the server or server components).
133
134 This interface is available to:
135 - C implementations
136 - C++ implementations
137 - the core SQL layer (/sql)
138 - the mysys library (/mysys)
139 - MySQL plugins, including storage engines,
140 - third party plugins, including third party storage engines.
141
142 For details, see the @ref PAGE_INSTRUMENTATION_INTERFACE
143 "instrumentation interface page".
144
145 @subsection INT_COMPILING Compiling interface
146
147 The implementation of the performance schema can be enabled or disabled at
148 build time, when building MySQL from the source code.
149
150 When building with the performance schema code, some compilation flags
151 are available to change the default values used in the code, if required.
152
153 For more details, see:
154 @verbatim ./configure --help @endverbatim
155
156 To compile with the performance schema:
157 @verbatim ./configure --with-perfschema @endverbatim
158
159 The implementation of all the compiling options is located in
160 @verbatim ./storage/perfschema/plug.in @endverbatim
161
162 @subsection INT_STARTUP Server startup interface
163
164 The server startup interface consists of the "./mysqld ..."
165 command line used to start the server.
166 When the performance schema is compiled in the server binary,
167 extra command line options are available.
168
169 These extra start options allow the DBA to:
170 - enable or disable the performance schema
171 - specify some sizing parameters.
172
173 To see help for the performance schema startup options, see:
174 @verbatim ./sql/mysqld --verbose --help @endverbatim
175
176 The implementation of all the startup options is located in
177 @verbatim ./sql/mysqld.cc, my_long_options[] @endverbatim
178
179 @subsection INT_BOOTSTRAP Server bootstrap interface
180
181 The bootstrap interface is a private interface exposed by
182 the performance schema, and used by the SQL layer.
183 Its role is to advertise all the SQL tables natively
184 supported by the performance schema to the SQL server.
185 The code consists of creating MySQL tables for the
186 performance schema itself, and is used in './mysql --bootstrap'
187 mode when a server is installed.
188
189 The implementation of the database creation script is located in
190 @verbatim ./scripts/mysql_system_tables.sql @endverbatim
191
192 @subsection INT_CONFIG Runtime configuration interface
193
194 When the performance schema is used at runtime, various configuration
195 parameters can be used to specify what kind of data is collected,
196 what kind of aggregations are computed, what kind of timers are used,
197 what events are timed, etc.
198
199 For all these capabilities, not a single statement or special syntax
200 was introduced in the parser.
201 Instead of new SQL statements, the interface consists of DML
202 (SELECT, INSERT, UPDATE, DELETE) against special "SETUP" tables.
203
204 For example:
205 @verbatim mysql> update performance_schema.SETUP_INSTRUMENTS
206 set ENABLED='YES', TIMED='YES';
207 Query OK, 234 rows affected (0.00 sec)
208 Rows matched: 234 Changed: 234 Warnings: 0 @endverbatim
209
210 @subsection INT_STATUS Internal audit interface
211
212 The internal audit interface is provided to the DBA to inspect if the
213 performance schema code itself is functioning properly.
214 This interface is necessary because a failure caused while
215 instrumenting code in the server should not cause failures in the
216 MySQL server itself, so that the performance schema implementation
217 never raises errors during runtime execution.
218
219 This auditing interface consists of:
220 @verbatim SHOW ENGINE PERFORMANCE_SCHEMA STATUS; @endverbatim
221 It displays data related to the memory usage of the performance schema,
222 as well as statistics about lost events, if any.
223
224 The SHOW STATUS command is implemented in
225 @verbatim ./storage/perfschema/pfs_engine_table.cc @endverbatim
226
227 @subsection INT_QUERY Query interface
228
229 The query interface is used to query the internal state of a running server.
230 It is provided as SQL tables.
231
232 For example:
233 @verbatim mysql> select * from performance_schema.EVENTS_WAITS_CURRENT;
234 @endverbatim
235
236 @section DESIGN_PRINCIPLES Design principles
237
238 @subsection PRINCIPLE_BEHAVIOR No behavior changes
239
240 The primary goal of the performance schema is to measure (instrument) the
241 execution of the server. A good measure should not cause any change
242 in behavior.
243
244 To achieve this, the overall design of the performance schema complies
245 with the following very severe design constraints:
246
247 The parser is unchanged. There are no new keywords, no new statements.
248 This guarantees that existing applications will run the same way with or
249 without the performance schema.
250
251 All the instrumentation points return "void", there are no error codes.
252 Even if the performance schema internally fails, execution of the server
253 code will proceed.
254
255 None of the instrumentation points allocate memory.
256 All the memory used by the performance schema is pre-allocated at startup,
257 and is considered "static" during the server life time.
258
259 None of the instrumentation points use any pthread_mutex, pthread_rwlock,
260 or pthread_cond (or platform equivalents).
261 Executing the instrumentation point should not cause thread scheduling to
262 change in the server.
263
264 In other words, the implementation of the instrumentation points,
265 including all the code called by the instrumentation points, is:
266 - malloc free
267 - mutex free
268 - rwlock free
269
270 TODO: All the code located in storage/perfschema is malloc free,
271 but unfortunately the usage of LF_HASH introduces some memory allocation.
272 This should be revised if possible, to use a lock-free,
273 malloc-free hash code table.
274
275 @subsection PRINCIPLE_PERFORMANCE No performance hit
276
277 The instrumentation of the server should be as fast as possible.
278 In cases when there are choices between:
279 - doing some processing when recording the performance data
280 in the instrumentation,
281 - doing some processing when retrieving the performance data,
282
283 priority is given in the design to make the instrumentation faster,
284 pushing some complexity to data retrieval.
285
286 As a result, some parts of the design, related to:
287 - the setup code path,
288 - the query code path,
289
290 might appear to be sub-optimal.
291
292 The criterion used here is to optimize primarily the critical path (data
293 collection), possibly at the expense of non-critical code paths.
294
295 @subsection PRINCIPLE_NOT_INTRUSIVE Unintrusive instrumentation
296
297 For the performance schema in general to be successful, the barrier
298 of entry for a developer should be low, so it's easy to instrument code.
299
300 In particular, the instrumentation interface:
301 - is available for C and C++ code (so it's a C interface),
302 - does not require parameters that the calling code can't easily provide,
303 - supports partial instrumentation (for example, instrumenting mutexes does
304 not require that every mutex is instrumented)
305
306 @subsection PRINCIPLE_EXTENDABLE Extendable instrumentation
307
308 As the content of the performance schema improves,
309 with more tables exposed and more data collected,
310 the instrumentation interface will also be augmented
311 to support instrumenting new concepts.
312 Existing instrumentations should not be affected when additional
313 instrumentation is made available, and making a new instrumentation
314 available should not require existing instrumented code to support it.
315
316 @subsection PRINCIPLE_VERSIONED Versioned instrumentation
317
318 Given that the instrumentation offered by the performance schema will
319 be augmented with time, when more features are implemented,
320 the interface itself should be versioned, to keep compatibility
321 with previous instrumented code.
322
323 For example, after both plugin-A and plugin-B have been instrumented for
324 mutexes, read write locks and conditions, using the instrumentation
325 interface, we can anticipate that the instrumentation interface
326 is expanded to support file based operations.
327
328 Plugin-A, a file based storage engine, will most likely use the expanded
329 interface and instrument its file usage, using the version 2
330 interface, while Plugin-B, a network based storage engine, will not change
331 its code and not release a new binary.
332
333 When later the instrumentation interface is expanded to support network
334 based operations (which will define interface version 3), the Plugin-B code
335 can then be changed to make use of it.
336
337 Note, this is just an example to illustrate the design concept here.
338 Both mutexes and file instrumentation are already available
339 since version 1 of the instrumentation interface.
340
341 @subsection PRINCIPLE_DEPLOYMENT Easy deployment
342
343 Internally, we might want every plugin implementation to upgrade the
344 instrumented code to the latest available, but this will cause additional
345 work and this is not practical if the code change is monolithic.
346
347 Externally, for third party plugin implementors, asking implementors to
348 always stay aligned to the latest instrumentation and make new releases,
349 even when the change does not provide new functionality for them,
350 is a bad idea.
351
352 For example, requiring a network based engine to re-release because the
353 instrumentation interface changed for file based operations, will create
354 too many deployment issues.
355
356 So, the performance schema implementation must support concurrently,
357 in the same deployment, multiple versions of the instrumentation
358 interface, and ensure binary compatibility with each version.
359
360 In addition to this, the performance schema can be included or excluded
361 from the server binary, using build time configuration options.
362
363 Regardless, the following types of deployment are valid:
364 - a server supporting the performance schema + a storage engine
365 that is not instrumented
366 - a server not supporting the performance schema + a storage engine
367 that is instrumented
368 */
369
370 /**
371 @page PAGE_INSTRUMENTATION_INTERFACE Performance schema: instrumentation interface page.
372 MySQL performance schema instrumentation interface.
373
374 @section INTRO Introduction
375
376 The instrumentation interface consist of two layers:
377 - a raw ABI (Application Binary Interface) layer, that exposes the primitive
378 instrumentation functions exported by the performance schema instrumentation
379 - an API (Application Programing Interface) layer,
380 that provides many helpers for a developer instrumenting some code,
381 to make the instrumentation as easy as possible.
382
383 The ABI layer consists of:
384 @code
385 #include "mysql/psi/psi.h"
386 @endcode
387
388 The API layer consists of:
389 @code
390 #include "mysql/psi/mutex_mutex.h"
391 #include "mysql/psi/mutex_file.h"
392 @endcode
393
394 The first helper is for mutexes, rwlocks and conditions,
395 the second for file io.
396
397 The API layer exposes C macros and typedefs which will expand:
398 - either to non-instrumented code, when compiled without the performance
399 schema instrumentation
400 - or to instrumented code, that will issue the raw calls to the ABI layer
401 so that the implementation can collect data.
402
403 Note that all the names introduced (for example, @c mysql_mutex_lock) do not
404 collide with any other namespace.
405 In particular, the macro @c mysql_mutex_lock is on purpose not named
406 @c pthread_mutex_lock.
407 This is to:
408 - avoid overloading @c pthread_mutex_lock with yet another macro,
409 which is dangerous as it can affect user code and pollute
410 the end-user namespace.
411 - allow the developer instrumenting code to selectively instrument
412 some code but not all.
413
414 @section PRINCIPLES Design principles
415
416 The ABI part is designed as a facade, that exposes basic primitives.
417 The expectation is that each primitive will be very stable over time,
418 but the list will constantly grow when more instruments are supported.
419 To support binary compatibility with plugins compiled with a different
420 version of the instrumentation, the ABI itself is versioned
421 (see @c PSI_v1, @c PSI_v2).
422
423 For a given instrumentation point in the API, the basic coding pattern
424 used is:
425 - (a) notify the performance schema of the operation
426 about to be performed.
427 - (b) execute the instrumented code.
428 - (c) notify the performance schema that the operation
429 is completed.
430
431 An opaque "locker" pointer is returned by (a), that is given to (c).
432 This pointer helps the implementation to keep context, for performances.
433
434 The following code fragment is annotated to show how in detail this pattern
435 in implemented, when the instrumentation is compiled in:
436
437 @verbatim
438 static inline int mysql_mutex_lock(
439 mysql_mutex_t *that, myf flags, const char *src_file, uint src_line)
440 {
441 int result;
442 struct PSI_mutex_locker_state state;
443 struct PSI_mutex_locker *locker= NULL;
444
445 ............... (a)
446 locker= PSI_MUTEX_CALL(start_mutex_wait)(&state, that->p_psi, PSI_MUTEX_LOCK,
447 locker, src_file, src_line);
448
449 ............... (b)
450 result= pthread_mutex_lock(&that->m_mutex);
451
452 ............... (c)
453 PSI_MUTEX_CALL(end_mutex_wait)(locker, result);
454
455 return result;
456 }
457 @endverbatim
458
459 When the performance schema instrumentation is not compiled in,
460 the code becomes simply a wrapper, expanded in line by the compiler:
461
462 @verbatim
463 static inline int mysql_mutex_lock(...)
464 {
465 int result;
466
467 ............... (b)
468 result= pthread_mutex_lock(&that->m_mutex);
469
470 return result;
471 }
472 @endverbatim
473
474 When the performance schema instrumentation is compiled in,
475 and when the code compiled is internal to the server implementation,
476 PSI_MUTEX_CALL expands directly to functions calls in the performance schema,
477 to make (a) and (c) calls as efficient as possible.
478
479 @verbatim
480 static inline int mysql_mutex_lock(...)
481 {
482 int result;
483 struct PSI_mutex_locker_state state;
484 struct PSI_mutex_locker *locker= NULL;
485
486 ............... (a)
487 locker= pfs_start_mutex_wait_v1(&state, that->p_psi, PSI_MUTEX_LOCK,
488 locker, src_file, src_line);
489
490 ............... (b)
491 result= pthread_mutex_lock(&that->m_mutex);
492
493 ............... (c)
494 pfs_end_mutex_wait_v1(locker, result);
495
496 return result;
497 }
498 @endverbatim
499
500 When the performance schema instrumentation is compiled in,
501 and when the code compiled is external to the server implementation
502 (typically, a dynamic plugin),
503 PSI_MUTEX_CALL expands to dynamic calls to the underlying implementation,
504 using the PSI_server entry point.
505 This makes (a) and (c) slower, as a function pointer is used instead of a static call,
506 but also independent of the implementation, for binary compatibility.
507
508 @verbatim
509 static inline int mysql_mutex_lock(...)
510 {
511 int result;
512 struct PSI_mutex_locker_state state;
513 struct PSI_mutex_locker *locker= NULL;
514
515 ............... (a)
516 locker= PSI_server->start_mutex_wait(&state, that->p_psi, PSI_MUTEX_LOCK,
517 locker, src_file, src_line);
518
519 ............... (b)
520 result= pthread_mutex_lock(&that->m_mutex);
521
522 ............... (c)
523 PSI_server->end_mutex_wait(locker, result);
524
525 return result;
526 }
527 @endverbatim
528
529 */
530
531 /**
532 @page PAGE_AGGREGATES Performance schema: the aggregates page.
533 Performance schema aggregates.
534
535 @section INTRO Introduction
536
537 Aggregates tables are tables that can be formally defined as
538 SELECT ... from EVENTS_WAITS_HISTORY_INFINITE ... group by 'group clause'.
539
540 Each group clause defines a different kind of aggregate, and corresponds to
541 a different table exposed by the performance schema.
542
543 Aggregates can be either:
544 - computed on the fly,
545 - computed on demand, based on other available data.
546
547 'EVENTS_WAITS_HISTORY_INFINITE' is a table that does not exist,
548 the best approximation is EVENTS_WAITS_HISTORY_LONG.
549 Aggregates computed on the fly in fact are based on EVENTS_WAITS_CURRENT,
550 while aggregates computed on demand are based on other
551 EVENTS_WAITS_SUMMARY_BY_xxx tables.
552
553 To better understand the implementation itself, a bit of math is
554 required first, to understand the model behind the code:
555 the code is deceptively simple, the real complexity resides
556 in the flyweight of pointers between various performance schema buffers.
557
558 @section DIMENSION Concept of dimension
559
560 An event measured by the instrumentation has many attributes.
561 An event is represented as a data point P(x1, x2, ..., xN),
562 where each x_i coordinate represents a given attribute value.
563
564 Examples of attributes are:
565 - the time waited
566 - the object waited on
567 - the instrument waited on
568 - the thread that waited
569 - the operation performed
570 - per object or per operation additional attributes, such as spins,
571 number of bytes, etc.
572
573 Computing an aggregate per thread is fundamentally different from
574 computing an aggregate by instrument, so the "_BY_THREAD" and
575 "_BY_EVENT_NAME" aggregates are different dimensions,
576 operating on different x_i and x_j coordinates.
577 These aggregates are "orthogonal".
578
579 @section PROJECTION Concept of projection
580
581 A given x_i attribute value can convey either just one basic information,
582 such as a number of bytes, or can convey implied information,
583 such as an object fully qualified name.
584
585 For example, from the value "test.t1", the name of the object schema
586 "test" can be separated from the object name "t1", so that now aggregates
587 by object schema can be implemented.
588
589 In math terms, that corresponds to defining a function:
590 F_i (x): x --> y
591 Applying this function to our point P gives another point P':
592
593 F_i (P):
594 P(x1, x2, ..., x{i-1}, x_i, x{i+1}, ..., x_N)
595 --> P' (x1, x2, ..., x{i-1}, f_i(x_i), x{i+1}, ..., x_N)
596
597 That function defines in fact an aggregate !
598 In SQL terms, this aggregate would look like the following table:
599
600 @verbatim
601 CREATE VIEW EVENTS_WAITS_SUMMARY_BY_Func_i AS
602 SELECT col_1, col_2, ..., col_{i-1},
603 Func_i(col_i),
604 COUNT(col_i),
605 MIN(col_i), AVG(col_i), MAX(col_i), -- if col_i is a numeric value
606 col_{i+1}, ..., col_N
607 FROM EVENTS_WAITS_HISTORY_INFINITE
608 group by col_1, col_2, ..., col_{i-1}, col{i+1}, ..., col_N.
609 @endverbatim
610
611 Note that not all columns have to be included,
612 in particular some columns that are dependent on the x_i column should
613 be removed, so that in practice, MySQL's aggregation method tends to
614 remove many attributes at each aggregation steps.
615
616 For example, when aggregating wait events by object instances,
617 - the wait_time and number_of_bytes can be summed,
618 and sum(wait_time) now becomes an object instance attribute.
619 - the source, timer_start, timer_end columns are not in the
620 _BY_INSTANCE table, because these attributes are only
621 meaningful for a wait.
622
623 @section COMPOSITION Concept of composition
624
625 Now, the "test.t1" --> "test" example was purely theory,
626 just to explain the concept, and does not lead very far.
627 Let's look at a more interesting example of data that can be derived
628 from the row event.
629
630 An event creates a transient object, PFS_wait_locker, per operation.
631 This object's life cycle is extremely short: it's created just
632 before the start_wait() instrumentation call, and is destroyed in
633 the end_wait() call.
634
635 The wait locker itself contains a pointer to the object instance
636 waited on.
637 That allows to implement a wait_locker --> object instance projection,
638 with m_target.
639 The object instance life cycle depends on _init and _destroy calls
640 from the code, such as mysql_mutex_init()
641 and mysql_mutex_destroy() for a mutex.
642
643 The object instance waited on contains a pointer to the object class,
644 which is represented by the instrument name.
645 That allows to implement an object instance --> object class projection.
646 The object class life cycle is permanent, as instruments are loaded in
647 the server and never removed.
648
649 The object class is named in such a way
650 (for example, "wait/sync/mutex/sql/LOCK_open",
651 "wait/io/file/maria/data_file) that the component ("sql", "maria")
652 that it belongs to can be inferred.
653 That allows to implement an object class --> server component projection.
654
655 Back to math again, we have, for example for mutexes:
656
657 F1 (l) : PFS_wait_locker l --> PFS_mutex m = l->m_target.m_mutex
658
659 F1_to_2 (m) : PFS_mutex m --> PFS_mutex_class i = m->m_class
660
661 F2_to_3 (i) : PFS_mutex_class i --> const char *component =
662 substring(i->m_name, ...)
663
664 Per components aggregates are not implemented, this is just an illustration.
665
666 F1 alone defines this aggregate:
667
668 EVENTS_WAITS_HISTORY_INFINITE --> EVENTS_WAITS_SUMMARY_BY_INSTANCE
669 (or MUTEX_INSTANCE)
670
671 F1_to_2 alone could define this aggregate:
672
673 EVENTS_WAITS_SUMMARY_BY_INSTANCE --> EVENTS_WAITS_SUMMARY_BY_EVENT_NAME
674
675 Alternatively, using function composition, with
676 F2 = F1_to_2 o F1, F2 defines:
677
678 EVENTS_WAITS_HISTORY_INFINITE --> EVENTS_WAITS_SUMMARY_BY_EVENT_NAME
679
680 Likewise, F_2_to_3 defines:
681
682 EVENTS_WAITS_SUMMARY_BY_EVENT_NAME --> EVENTS_WAITS_SUMMARY_BY_COMPONENT
683
684 and F3 = F_2_to_3 o F_1_to_2 o F1 defines:
685
686 EVENTS_WAITS_HISTORY_INFINITE --> EVENTS_WAITS_SUMMARY_BY_COMPONENT
687
688 What has all this to do with the code ?
689
690 Functions (or aggregates) such as F_3 are not implemented as is.
691 Instead, they are decomposed into F_2_to_3 o F_1_to_2 o F1,
692 and each intermediate aggregate is stored into an internal buffer.
693 This allows to support every F1, F2, F3 aggregates from shared
694 internal buffers, where computation already performed to compute F2
695 is reused when computing F3.
696
697 @section OBJECT_GRAPH Object graph
698
699 In terms of object instances, or records, pointers between
700 different buffers define an object instance graph.
701
702 For example, assuming the following scenario:
703 - A mutex class "M" is instrumented, the instrument name
704 is "wait/sync/mutex/sql/M"
705 - This mutex instrument has been instantiated twice,
706 mutex instances are noted M-1 and M-2
707 - Threads T-A and T-B are locking mutex instance M-1
708 - Threads T-C and T-D are locking mutex instance M-2
709
710 The performance schema will record the following data:
711 - EVENTS_WAITS_CURRENT has 4 rows, one for each mutex locker
712 - EVENTS_WAITS_SUMMARY_BY_INSTANCE shows 2 rows, for M-1 and M-2
713 - EVENTS_WAITS_SUMMARY_BY_EVENT_NAME shows 1 row, for M
714
715 The graph of structures will look like:
716
717 @verbatim
718 PFS_wait_locker (T-A, M-1) ----------
719 |
720 v
721 PFS_mutex (M-1)
722 - m_wait_stat ------------
723 ^ |
724 | |
725 PFS_wait_locker (T-B, M-1) ---------- |
726 v
727 PFS_mutex_class (M)
728 - m_wait_stat
729 PFS_wait_locker (T-C, M-2) ---------- ^
730 | |
731 v |
732 PFS_mutex (M-2) |
733 - m_wait_stat ------------
734 ^
735 |
736 PFS_wait_locker (T-D, M-2) ----------
737
738 || || ||
739 || || ||
740 vv vv vv
741
742 EVENTS_WAITS_CURRENT ..._SUMMARY_BY_INSTANCE ..._SUMMARY_BY_EVENT_NAME
743 @endverbatim
744
745 @section ON_THE_FLY On the fly aggregates
746
747 'On the fly' aggregates are computed during the code execution.
748 This is necessary because the data the aggregate is based on is volatile,
749 and can not be kept indefinitely.
750
751 With on the fly aggregates:
752 - the writer thread does all the computation
753 - the reader thread accesses the result directly
754
755 This model is to be avoided if possible, due to the overhead
756 caused when instrumenting code.
757
758 @section HIGHER_LEVEL Higher level aggregates
759
760 'Higher level' aggregates are implemented on demand only.
761 The code executing a SELECT from the aggregate table is
762 collecting data from multiple internal buffers to produce the result.
763
764 With higher level aggregates:
765 - the reader thread does all the computation
766 - the writer thread has no overhead.
767
768 @section MIXED Mixed level aggregates
769
770 The 'Mixed' model is a compromise between 'On the fly' and 'Higher level'
771 aggregates, for internal buffers that are not permanent.
772
773 While an object is present in a buffer, the higher level model is used.
774 When an object is about to be destroyed, statistics are saved into
775 a 'parent' buffer with a longer life cycle, to follow the on the fly model.
776
777 With mixed aggregates:
778 - the reader thread does a lot of complex computation,
779 - the writer thread has minimal overhead, on destroy events.
780
781 @section IMPL_WAIT Implementation for waits aggregates
782
783 For waits, the tables that contains aggregated wait data are:
784 - EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
785 - EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME
786 - EVENTS_WAITS_SUMMARY_BY_INSTANCE
787 - EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
788 - EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME
789 - EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME
790 - FILE_SUMMARY_BY_EVENT_NAME
791 - FILE_SUMMARY_BY_INSTANCE
792 - SOCKET_SUMMARY_BY_INSTANCE
793 - SOCKET_SUMMARY_BY_EVENT_NAME
794 - OBJECTS_SUMMARY_GLOBAL_BY_TYPE
795
796 The instrumented code that generates waits events consist of:
797 - mutexes (mysql_mutex_t)
798 - rwlocks (mysql_rwlock_t)
799 - conditions (mysql_cond_t)
800 - file io (MYSQL_FILE)
801 - socket io (MYSQL_SOCKET)
802 - table io
803 - table lock
804 - idle
805
806 The flow of data between aggregates tables varies for each instrumentation.
807
808 @subsection IMPL_WAIT_MUTEX Mutex waits
809
810 @verbatim
811 mutex_locker(T, M)
812 |
813 | [1]
814 |
815 |-> pfs_mutex(M) =====>> [B], [C]
816 | |
817 | | [2]
818 | |
819 | |-> pfs_mutex_class(M.class) =====>> [C]
820 |
821 |-> pfs_thread(T).event_name(M) =====>> [A], [D], [E], [F]
822 |
823 | [3]
824 |
825 3a |-> pfs_account(U, H).event_name(M) =====>> [D], [E], [F]
826 . |
827 . | [4-RESET]
828 . |
829 3b .....+-> pfs_user(U).event_name(M) =====>> [E]
830 . |
831 3c .....+-> pfs_host(H).event_name(M) =====>> [F]
832 @endverbatim
833
834 How to read this diagram:
835 - events that occur during the instrumented code execution are noted with numbers,
836 as in [1]. Code executed by these events has an impact on overhead.
837 - events that occur during TRUNCATE TABLE operations are noted with numbers,
838 followed by "-RESET", as in [4-RESET].
839 Code executed by these events has no impact on overhead,
840 since they are executed by independent monitoring sessions.
841 - events that occur when a reader extracts data from a performance schema table
842 are noted with letters, as in [A]. The name of the table involved,
843 and the method that builds a row are documented. Code executed by these events
844 has no impact on the instrumentation overhead. Note that the table
845 implementation may pull data from different buffers.
846 - nominal code paths are in plain lines. A "nominal" code path corresponds to
847 cases where the performance schema buffers are sized so that no records are lost.
848 - degenerated code paths are in dotted lines. A "degenerated" code path corresponds
849 to edge cases where parent buffers are full, which forces the code to aggregate to
850 grand parents directly.
851
852 Implemented as:
853 - [1] @c start_mutex_wait_v1(), @c end_mutex_wait_v1()
854 - [2] @c destroy_mutex_v1()
855 - [3] @c aggregate_thread_waits()
856 - [4] @c PFS_account::aggregate_waits()
857 - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
858 @c table_ews_by_thread_by_event_name::make_row()
859 - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
860 @c table_events_waits_summary_by_instance::make_mutex_row()
861 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
862 @c table_ews_global_by_event_name::make_mutex_row()
863 - [D] EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
864 @c table_ews_by_account_by_event_name::make_row()
865 - [E] EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME,
866 @c table_ews_by_user_by_event_name::make_row()
867 - [F] EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME,
868 @c table_ews_by_host_by_event_name::make_row()
869
870 Table EVENTS_WAITS_SUMMARY_BY_INSTANCE is a 'on the fly' aggregate,
871 because the data is collected on the fly by (1) and stored into a buffer,
872 pfs_mutex. The table implementation [B] simply reads the results directly
873 from this buffer.
874
875 Table EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME is a 'mixed' aggregate,
876 because some data is collected on the fly (1),
877 some data is preserved with (2) at a later time in the life cycle,
878 and two different buffers pfs_mutex and pfs_mutex_class are used to store the
879 statistics collected. The table implementation [C] is more complex, since
880 it reads from two buffers pfs_mutex and pfs_mutex_class.
881
882 @subsection IMPL_WAIT_RWLOCK Rwlock waits
883
884 @verbatim
885 rwlock_locker(T, R)
886 |
887 | [1]
888 |
889 |-> pfs_rwlock(R) =====>> [B], [C]
890 | |
891 | | [2]
892 | |
893 | |-> pfs_rwlock_class(R.class) =====>> [C]
894 |
895 |-> pfs_thread(T).event_name(R) =====>> [A]
896 |
897 ...
898 @endverbatim
899
900 Implemented as:
901 - [1] @c start_rwlock_rdwait_v1(), @c end_rwlock_rdwait_v1(), ...
902 - [2] @c destroy_rwlock_v1()
903 - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
904 @c table_ews_by_thread_by_event_name::make_row()
905 - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
906 @c table_events_waits_summary_by_instance::make_rwlock_row()
907 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
908 @c table_ews_global_by_event_name::make_rwlock_row()
909
910 @subsection IMPL_WAIT_COND Cond waits
911
912 @verbatim
913 cond_locker(T, C)
914 |
915 | [1]
916 |
917 |-> pfs_cond(C) =====>> [B], [C]
918 | |
919 | | [2]
920 | |
921 | |-> pfs_cond_class(C.class) =====>> [C]
922 |
923 |-> pfs_thread(T).event_name(C) =====>> [A]
924 |
925 ...
926 @endverbatim
927
928 Implemented as:
929 - [1] @c start_cond_wait_v1(), @c end_cond_wait_v1()
930 - [2] @c destroy_cond_v1()
931 - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
932 @c table_ews_by_thread_by_event_name::make_row()
933 - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
934 @c table_events_waits_summary_by_instance::make_cond_row()
935 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
936 @c table_ews_global_by_event_name::make_cond_row()
937
938 @subsection IMPL_WAIT_FILE File waits
939
940 @verbatim
941 file_locker(T, F)
942 |
943 | [1]
944 |
945 |-> pfs_file(F) =====>> [B], [C], [D], [E]
946 | |
947 | | [2]
948 | |
949 | |-> pfs_file_class(F.class) =====>> [C], [D]
950 |
951 |-> pfs_thread(T).event_name(F) =====>> [A]
952 |
953 ...
954 @endverbatim
955
956 Implemented as:
957 - [1] @c get_thread_file_name_locker_v1(), @c start_file_wait_v1(),
958 @c end_file_wait_v1(), ...
959 - [2] @c close_file_v1()
960 - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
961 @c table_ews_by_thread_by_event_name::make_row()
962 - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
963 @c table_events_waits_summary_by_instance::make_file_row()
964 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
965 @c table_ews_global_by_event_name::make_file_row()
966 - [D] FILE_SUMMARY_BY_EVENT_NAME,
967 @c table_file_summary_by_event_name::make_row()
968 - [E] FILE_SUMMARY_BY_INSTANCE,
969 @c table_file_summary_by_instance::make_row()
970
971 @subsection IMPL_WAIT_SOCKET Socket waits
972
973 @verbatim
974 socket_locker(T, S)
975 |
976 | [1]
977 |
978 |-> pfs_socket(S) =====>> [A], [B], [C], [D], [E]
979 |
980 | [2]
981 |
982 |-> pfs_socket_class(S.class) =====>> [C], [D]
983 |
984 |-> pfs_thread(T).event_name(S) =====>> [A]
985 |
986 | [3]
987 |
988 3a |-> pfs_account(U, H).event_name(S) =====>> [F], [G], [H]
989 . |
990 . | [4-RESET]
991 . |
992 3b .....+-> pfs_user(U).event_name(S) =====>> [G]
993 . |
994 3c .....+-> pfs_host(H).event_name(S) =====>> [H]
995 @endverbatim
996
997 Implemented as:
998 - [1] @c start_socket_wait_v1(), @c end_socket_wait_v1().
999 - [2] @c close_socket_v1()
1000 - [3] @c aggregate_thread_waits()
1001 - [4] @c PFS_account::aggregate_waits()
1002 - [5] @c PFS_host::aggregate_waits()
1003 - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1004 @c table_ews_by_thread_by_event_name::make_row()
1005 - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
1006 @c table_events_waits_summary_by_instance::make_socket_row()
1007 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
1008 @c table_ews_global_by_event_name::make_socket_row()
1009 - [D] SOCKET_SUMMARY_BY_EVENT_NAME,
1010 @c table_socket_summary_by_event_name::make_row()
1011 - [E] SOCKET_SUMMARY_BY_INSTANCE,
1012 @c table_socket_summary_by_instance::make_row()
1013 - [F] EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1014 @c table_ews_by_account_by_event_name::make_row()
1015 - [G] EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME,
1016 @c table_ews_by_user_by_event_name::make_row()
1017 - [H] EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME,
1018 @c table_ews_by_host_by_event_name::make_row()
1019
1020 @subsection IMPL_WAIT_TABLE Table waits
1021
1022 @verbatim
1023 table_locker(Thread Th, Table Tb, Event = io or lock)
1024 |
1025 | [1]
1026 |
1027 1a |-> pfs_table(Tb) =====>> [A], [B], [C]
1028 | |
1029 | | [2]
1030 | |
1031 | |-> pfs_table_share(Tb.share) =====>> [B], [C]
1032 | |
1033 | | [3]
1034 | |
1035 | |-> global_table_io_stat =====>> [C]
1036 | |
1037 | |-> global_table_lock_stat =====>> [C]
1038 |
1039 1b |-> pfs_thread(Th).event_name(E) =====>> [D], [E], [F], [G]
1040 | |
1041 | | [ 4-RESET]
1042 | |
1043 | |-> pfs_account(U, H).event_name(E) =====>> [E], [F], [G]
1044 | . |
1045 | . | [5-RESET]
1046 | . |
1047 | .....+-> pfs_user(U).event_name(E) =====>> [F]
1048 | . |
1049 | .....+-> pfs_host(H).event_name(E) =====>> [G]
1050 |
1051 1c |-> pfs_thread(Th).waits_current(W) =====>> [H]
1052 |
1053 1d |-> pfs_thread(Th).waits_history(W) =====>> [I]
1054 |
1055 1e |-> waits_history_long(W) =====>> [J]
1056 @endverbatim
1057
1058 Implemented as:
1059 - [1] @c start_table_io_wait_v1(), @c end_table_io_wait_v1()
1060 - [2] @c close_table_v1()
1061 - [3] @c drop_table_share_v1()
1062 - [4] @c TRUNCATE TABLE EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1063 - [5] @c TRUNCATE TABLE EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
1064 - [A] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
1065 @c table_events_waits_summary_by_instance::make_table_row()
1066 - [B] OBJECTS_SUMMARY_GLOBAL_BY_TYPE,
1067 @c table_os_global_by_type::make_row()
1068 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
1069 @c table_ews_global_by_event_name::make_table_io_row(),
1070 @c table_ews_global_by_event_name::make_table_lock_row()
1071 - [D] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1072 @c table_ews_by_thread_by_event_name::make_row()
1073 - [E] EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1074 @c table_ews_by_user_by_account_name::make_row()
1075 - [F] EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME,
1076 @c table_ews_by_user_by_event_name::make_row()
1077 - [G] EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME,
1078 @c table_ews_by_host_by_event_name::make_row()
1079 - [H] EVENTS_WAITS_CURRENT,
1080 @c table_events_waits_common::make_row()
1081 - [I] EVENTS_WAITS_HISTORY,
1082 @c table_events_waits_common::make_row()
1083 - [J] EVENTS_WAITS_HISTORY_LONG,
1084 @c table_events_waits_common::make_row()
1085
1086 @section IMPL_STAGE Implementation for stages aggregates
1087
1088 For stages, the tables that contains aggregated data are:
1089 - EVENTS_STAGES_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
1090 - EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME
1091 - EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
1092 - EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME
1093 - EVENTS_STAGES_SUMMARY_GLOBAL_BY_EVENT_NAME
1094
1095 @verbatim
1096 start_stage(T, S)
1097 |
1098 | [1]
1099 |
1100 1a |-> pfs_thread(T).event_name(S) =====>> [A], [B], [C], [D], [E]
1101 | |
1102 | | [2]
1103 | |
1104 | 2a |-> pfs_account(U, H).event_name(S) =====>> [B], [C], [D], [E]
1105 | . |
1106 | . | [3-RESET]
1107 | . |
1108 | 2b .....+-> pfs_user(U).event_name(S) =====>> [C]
1109 | . |
1110 | 2c .....+-> pfs_host(H).event_name(S) =====>> [D], [E]
1111 | . . |
1112 | . . | [4-RESET]
1113 | 2d . . |
1114 1b |----+----+----+-> pfs_stage_class(S) =====>> [E]
1115
1116 @endverbatim
1117
1118 Implemented as:
1119 - [1] @c start_stage_v1()
1120 - [2] @c delete_thread_v1(), @c aggregate_thread_stages()
1121 - [3] @c PFS_account::aggregate_stages()
1122 - [4] @c PFS_host::aggregate_stages()
1123 - [A] EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1124 @c table_esgs_by_thread_by_event_name::make_row()
1125 - [B] EVENTS_STAGES_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1126 @c table_esgs_by_account_by_event_name::make_row()
1127 - [C] EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME,
1128 @c table_esgs_by_user_by_event_name::make_row()
1129 - [D] EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME,
1130 @c table_esgs_by_host_by_event_name::make_row()
1131 - [E] EVENTS_STAGES_SUMMARY_GLOBAL_BY_EVENT_NAME,
1132 @c table_esgs_global_by_event_name::make_row()
1133
1134 @section IMPL_STATEMENT Implementation for statements consumers
1135
1136 For statements, the tables that contains individual event data are:
1137 - EVENTS_STATEMENTS_CURRENT
1138 - EVENTS_STATEMENTS_HISTORY
1139 - EVENTS_STATEMENTS_HISTORY_LONG
1140
1141 For statements, the tables that contains aggregated data are:
1142 - EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
1143 - EVENTS_STATEMENTS_SUMMARY_BY_HOST_BY_EVENT_NAME
1144 - EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1145 - EVENTS_STATEMENTS_SUMMARY_BY_USER_BY_EVENT_NAME
1146 - EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME
1147 - EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
1148
1149 @verbatim
1150 statement_locker(T, S)
1151 |
1152 | [1]
1153 |
1154 1a |-> pfs_thread(T).event_name(S) =====>> [A], [B], [C], [D], [E]
1155 | |
1156 | | [2]
1157 | |
1158 | 2a |-> pfs_account(U, H).event_name(S) =====>> [B], [C], [D], [E]
1159 | . |
1160 | . | [3-RESET]
1161 | . |
1162 | 2b .....+-> pfs_user(U).event_name(S) =====>> [C]
1163 | . |
1164 | 2c .....+-> pfs_host(H).event_name(S) =====>> [D], [E]
1165 | . . |
1166 | . . | [4-RESET]
1167 | 2d . . |
1168 1b |----+----+----+-> pfs_statement_class(S) =====>> [E]
1169 |
1170 1c |-> pfs_thread(T).statement_current(S) =====>> [F]
1171 |
1172 1d |-> pfs_thread(T).statement_history(S) =====>> [G]
1173 |
1174 1e |-> statement_history_long(S) =====>> [H]
1175 |
1176 1f |-> statement_digest(S) =====>> [I]
1177
1178 @endverbatim
1179
1180 Implemented as:
1181 - [1] @c start_statement_v1(), end_statement_v1()
1182 (1a, 1b) is an aggregation by EVENT_NAME,
1183 (1c, 1d, 1e) is an aggregation by TIME,
1184 (1f) is an aggregation by DIGEST
1185 all of these are orthogonal,
1186 and implemented in end_statement_v1().
1187 - [2] @c delete_thread_v1(), @c aggregate_thread_statements()
1188 - [3] @c PFS_account::aggregate_statements()
1189 - [4] @c PFS_host::aggregate_statements()
1190 - [A] EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1191 @c table_esms_by_thread_by_event_name::make_row()
1192 - [B] EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1193 @c table_esms_by_account_by_event_name::make_row()
1194 - [C] EVENTS_STATEMENTS_SUMMARY_BY_USER_BY_EVENT_NAME,
1195 @c table_esms_by_user_by_event_name::make_row()
1196 - [D] EVENTS_STATEMENTS_SUMMARY_BY_HOST_BY_EVENT_NAME,
1197 @c table_esms_by_host_by_event_name::make_row()
1198 - [E] EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME,
1199 @c table_esms_global_by_event_name::make_row()
1200 - [F] EVENTS_STATEMENTS_CURRENT,
1201 @c table_events_statements_current::rnd_next(),
1202 @c table_events_statements_common::make_row()
1203 - [G] EVENTS_STATEMENTS_HISTORY,
1204 @c table_events_statements_history::rnd_next(),
1205 @c table_events_statements_common::make_row()
1206 - [H] EVENTS_STATEMENTS_HISTORY_LONG,
1207 @c table_events_statements_history_long::rnd_next(),
1208 @c table_events_statements_common::make_row()
1209 - [I] EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
1210 @c table_esms_by_digest::make_row()
1211
1212 @section IMPL_TRANSACTION Implementation for transactions consumers
1213
1214 For transactions, the tables that contains individual event data are:
1215 - EVENTS_TRANSACTIONS_CURRENT
1216 - EVENTS_TRANSACTIONS_HISTORY
1217 - EVENTS_TRANSACTIONS_HISTORY_LONG
1218
1219 For transactions, the tables that contains aggregated data are:
1220 - EVENTS_TRANSACTIONS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
1221 - EVENTS_TRANSACTIONS_SUMMARY_BY_HOST_BY_EVENT_NAME
1222 - EVENTS_TRANSACTIONS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1223 - EVENTS_TRANSACTIONS_SUMMARY_BY_USER_BY_EVENT_NAME
1224 - EVENTS_TRANSACTIONS_SUMMARY_GLOBAL_BY_EVENT_NAME
1225
1226 @verbatim
1227 transaction_locker(T, TX)
1228 |
1229 | [1]
1230 |
1231 1a |-> pfs_thread(T).event_name(TX) =====>> [A], [B], [C], [D], [E]
1232 | |
1233 | | [2]
1234 | |
1235 | 2a |-> pfs_account(U, H).event_name(TX) =====>> [B], [C], [D], [E]
1236 | . |
1237 | . | [3-RESET]
1238 | . |
1239 | 2b .....+-> pfs_user(U).event_name(TX) =====>> [C]
1240 | . |
1241 | 2c .....+-> pfs_host(H).event_name(TX) =====>> [D], [E]
1242 | . . |
1243 | . . | [4-RESET]
1244 | 2d . . |
1245 1b |----+----+----+-> pfs_transaction_class(TX) =====>> [E]
1246 |
1247 1c |-> pfs_thread(T).transaction_current(TX) =====>> [F]
1248 |
1249 1d |-> pfs_thread(T).transaction_history(TX) =====>> [G]
1250 |
1251 1e |-> transaction_history_long(TX) =====>> [H]
1252
1253 @endverbatim
1254
1255 Implemented as:
1256 - [1] @c start_transaction_v1(), end_transaction_v1()
1257 (1a, 1b) is an aggregation by EVENT_NAME,
1258 (1c, 1d, 1e) is an aggregation by TIME,
1259 all of these are orthogonal,
1260 and implemented in end_transaction_v1().
1261 - [2] @c delete_thread_v1(), @c aggregate_thread_transactions()
1262 - [3] @c PFS_account::aggregate_transactions()
1263 - [4] @c PFS_host::aggregate_transactions()
1264
1265 - [A] EVENTS_TRANSACTIONS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1266 @c table_ets_by_thread_by_event_name::make_row()
1267 - [B] EVENTS_TRANSACTIONS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1268 @c table_ets_by_account_by_event_name::make_row()
1269 - [C] EVENTS_TRANSACTIONS_SUMMARY_BY_USER_BY_EVENT_NAME,
1270 @c table_ets_by_user_by_event_name::make_row()
1271 - [D] EVENTS_TRANSACTIONS_SUMMARY_BY_HOST_BY_EVENT_NAME,
1272 @c table_ets_by_host_by_event_name::make_row()
1273 - [E] EVENTS_TRANSACTIONS_SUMMARY_GLOBAL_BY_EVENT_NAME,
1274 @c table_ets_global_by_event_name::make_row()
1275 - [F] EVENTS_TRANSACTIONS_CURRENT,
1276 @c table_events_transactions_current::rnd_next(),
1277 @c table_events_transactions_common::make_row()
1278 - [G] EVENTS_TRANSACTIONS_HISTORY,
1279 @c table_events_transactions_history::rnd_next(),
1280 @c table_events_transactions_common::make_row()
1281 - [H] EVENTS_TRANSACTIONS_HISTORY_LONG,
1282 @c table_events_transactions_history_long::rnd_next(),
1283 @c table_events_transactions_common::make_row()
1284
1285 @section IMPL_MEMORY Implementation for memory instruments
1286
1287 For memory, there are no tables that contains individual event data.
1288
1289 For memory, the tables that contains aggregated data are:
1290 - MEMORY_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
1291 - MEMORY_SUMMARY_BY_HOST_BY_EVENT_NAME
1292 - MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME
1293 - MEMORY_SUMMARY_BY_USER_BY_EVENT_NAME
1294 - MEMORY_SUMMARY_GLOBAL_BY_EVENT_NAME
1295
1296 @verbatim
1297 memory_event(T, S)
1298 |
1299 | [1]
1300 |
1301 1a |-> pfs_thread(T).event_name(S) =====>> [A], [B], [C], [D], [E]
1302 | |
1303 | | [2]
1304 | |
1305 1+ | 2a |-> pfs_account(U, H).event_name(S) =====>> [B], [C], [D], [E]
1306 | . |
1307 | . | [3-RESET]
1308 | . |
1309 1+ | 2b .....+-> pfs_user(U).event_name(S) =====>> [C]
1310 | . |
1311 1+ | 2c .....+-> pfs_host(H).event_name(S) =====>> [D], [E]
1312 | . . |
1313 | . . | [4-RESET]
1314 | 2d . . |
1315 1b |----+----+----+-> global.event_name(S) =====>> [E]
1316
1317 @endverbatim
1318
1319 Implemented as:
1320 - [1] @c pfs_memory_alloc_v1(),
1321 @c pfs_memory_realloc_v1(),
1322 @c pfs_memory_free_v1().
1323 - [1+] are overflows that can happen during [1a],
1324 implemented with @c carry_memory_stat_delta()
1325 - [2] @c delete_thread_v1(), @c aggregate_thread_memory()
1326 - [3] @c PFS_account::aggregate_memory()
1327 - [4] @c PFS_host::aggregate_memory()
1328 - [A] EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1329 @c table_mems_by_thread_by_event_name::make_row()
1330 - [B] EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1331 @c table_mems_by_account_by_event_name::make_row()
1332 - [C] EVENTS_STATEMENTS_SUMMARY_BY_USER_BY_EVENT_NAME,
1333 @c table_mems_by_user_by_event_name::make_row()
1334 - [D] EVENTS_STATEMENTS_SUMMARY_BY_HOST_BY_EVENT_NAME,
1335 @c table_mems_by_host_by_event_name::make_row()
1336 - [E] EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME,
1337 @c table_mems_global_by_event_name::make_row()
1338
1339 */
1340
1341 /**
1342 @defgroup Performance_schema Performance Schema
1343 The performance schema component.
1344 For details, see the
1345 @ref PAGE_PERFORMANCE_SCHEMA "performance schema main page".
1346
1347 @defgroup Performance_schema_implementation Performance Schema Implementation
1348 @ingroup Performance_schema
1349
1350 @defgroup Performance_schema_tables Performance Schema Tables
1351 @ingroup Performance_schema_implementation
1352 */
1353
1354 thread_local_key_t THR_PFS;
1355 thread_local_key_t THR_PFS_VG; // global_variables
1356 thread_local_key_t THR_PFS_SV; // session_variables
1357 thread_local_key_t THR_PFS_VBT; // variables_by_thread
1358 thread_local_key_t THR_PFS_SG; // global_status
1359 thread_local_key_t THR_PFS_SS; // session_status
1360 thread_local_key_t THR_PFS_SBT; // status_by_thread
1361 thread_local_key_t THR_PFS_SBU; // status_by_user
1362 thread_local_key_t THR_PFS_SBH; // status_by_host
1363 thread_local_key_t THR_PFS_SBA; // status_by_account
1364
1365 bool THR_PFS_initialized= false;
1366
1367 static inline PFS_thread*
my_thread_get_THR_PFS()1368 my_thread_get_THR_PFS()
1369 {
1370 assert(THR_PFS_initialized);
1371 PFS_thread *thread= static_cast<PFS_thread*>(my_get_thread_local(THR_PFS));
1372 assert(thread == NULL || sanitize_thread(thread) != NULL);
1373 return thread;
1374 }
1375
1376 static inline void
my_thread_set_THR_PFS(PFS_thread * pfs)1377 my_thread_set_THR_PFS(PFS_thread *pfs)
1378 {
1379 assert(THR_PFS_initialized);
1380 my_set_thread_local(THR_PFS, pfs);
1381 }
1382
1383 /**
1384 Conversion map from PSI_mutex_operation to enum_operation_type.
1385 Indexed by enum PSI_mutex_operation.
1386 */
1387 static enum_operation_type mutex_operation_map[]=
1388 {
1389 OPERATION_TYPE_LOCK,
1390 OPERATION_TYPE_TRYLOCK
1391 };
1392
1393 /**
1394 Conversion map from PSI_rwlock_operation to enum_operation_type.
1395 Indexed by enum PSI_rwlock_operation.
1396 */
1397 static enum_operation_type rwlock_operation_map[]=
1398 {
1399 OPERATION_TYPE_READLOCK,
1400 OPERATION_TYPE_WRITELOCK,
1401 OPERATION_TYPE_TRYREADLOCK,
1402 OPERATION_TYPE_TRYWRITELOCK,
1403
1404 OPERATION_TYPE_SHAREDLOCK,
1405 OPERATION_TYPE_SHAREDEXCLUSIVELOCK,
1406 OPERATION_TYPE_EXCLUSIVELOCK,
1407 OPERATION_TYPE_TRYSHAREDLOCK,
1408 OPERATION_TYPE_TRYSHAREDEXCLUSIVELOCK,
1409 OPERATION_TYPE_TRYEXCLUSIVELOCK,
1410 };
1411
1412 /**
1413 Conversion map from PSI_cond_operation to enum_operation_type.
1414 Indexed by enum PSI_cond_operation.
1415 */
1416 static enum_operation_type cond_operation_map[]=
1417 {
1418 OPERATION_TYPE_WAIT,
1419 OPERATION_TYPE_TIMEDWAIT
1420 };
1421
1422 /**
1423 Conversion map from PSI_file_operation to enum_operation_type.
1424 Indexed by enum PSI_file_operation.
1425 */
1426 static enum_operation_type file_operation_map[]=
1427 {
1428 OPERATION_TYPE_FILECREATE,
1429 OPERATION_TYPE_FILECREATETMP,
1430 OPERATION_TYPE_FILEOPEN,
1431 OPERATION_TYPE_FILESTREAMOPEN,
1432 OPERATION_TYPE_FILECLOSE,
1433 OPERATION_TYPE_FILESTREAMCLOSE,
1434 OPERATION_TYPE_FILEREAD,
1435 OPERATION_TYPE_FILEWRITE,
1436 OPERATION_TYPE_FILESEEK,
1437 OPERATION_TYPE_FILETELL,
1438 OPERATION_TYPE_FILEFLUSH,
1439 OPERATION_TYPE_FILESTAT,
1440 OPERATION_TYPE_FILEFSTAT,
1441 OPERATION_TYPE_FILECHSIZE,
1442 OPERATION_TYPE_FILEDELETE,
1443 OPERATION_TYPE_FILERENAME,
1444 OPERATION_TYPE_FILESYNC
1445 };
1446
1447 /**
1448 Conversion map from PSI_table_operation to enum_operation_type.
1449 Indexed by enum PSI_table_io_operation.
1450 */
1451 static enum_operation_type table_io_operation_map[]=
1452 {
1453 OPERATION_TYPE_TABLE_FETCH,
1454 OPERATION_TYPE_TABLE_WRITE_ROW,
1455 OPERATION_TYPE_TABLE_UPDATE_ROW,
1456 OPERATION_TYPE_TABLE_DELETE_ROW
1457 };
1458
1459 /**
1460 Conversion map from enum PFS_TL_LOCK_TYPE to enum_operation_type.
1461 Indexed by enum PFS_TL_LOCK_TYPE.
1462 */
1463 static enum_operation_type table_lock_operation_map[]=
1464 {
1465 OPERATION_TYPE_TL_READ_NORMAL, /* PFS_TL_READ */
1466 OPERATION_TYPE_TL_READ_WITH_SHARED_LOCKS, /* PFS_TL_READ_WITH_SHARED_LOCKS */
1467 OPERATION_TYPE_TL_READ_HIGH_PRIORITY, /* PFS_TL_READ_HIGH_PRIORITY */
1468 OPERATION_TYPE_TL_READ_NO_INSERTS, /* PFS_TL_READ_NO_INSERT */
1469 OPERATION_TYPE_TL_WRITE_ALLOW_WRITE, /* PFS_TL_WRITE_ALLOW_WRITE */
1470 OPERATION_TYPE_TL_WRITE_CONCURRENT_INSERT, /* PFS_TL_WRITE_CONCURRENT_INSERT */
1471 OPERATION_TYPE_TL_WRITE_LOW_PRIORITY, /* PFS_TL_WRITE_LOW_PRIORITY */
1472 OPERATION_TYPE_TL_WRITE_NORMAL, /* PFS_TL_WRITE */
1473 OPERATION_TYPE_TL_READ_EXTERNAL, /* PFS_TL_READ_EXTERNAL */
1474 OPERATION_TYPE_TL_WRITE_EXTERNAL /* PFS_TL_WRITE_EXTERNAL */
1475 };
1476
1477 /**
1478 Conversion map from PSI_socket_operation to enum_operation_type.
1479 Indexed by enum PSI_socket_operation.
1480 */
1481 static enum_operation_type socket_operation_map[]=
1482 {
1483 OPERATION_TYPE_SOCKETCREATE,
1484 OPERATION_TYPE_SOCKETCONNECT,
1485 OPERATION_TYPE_SOCKETBIND,
1486 OPERATION_TYPE_SOCKETCLOSE,
1487 OPERATION_TYPE_SOCKETSEND,
1488 OPERATION_TYPE_SOCKETRECV,
1489 OPERATION_TYPE_SOCKETSENDTO,
1490 OPERATION_TYPE_SOCKETRECVFROM,
1491 OPERATION_TYPE_SOCKETSENDMSG,
1492 OPERATION_TYPE_SOCKETRECVMSG,
1493 OPERATION_TYPE_SOCKETSEEK,
1494 OPERATION_TYPE_SOCKETOPT,
1495 OPERATION_TYPE_SOCKETSTAT,
1496 OPERATION_TYPE_SOCKETSHUTDOWN,
1497 OPERATION_TYPE_SOCKETSELECT
1498 };
1499
1500 /**
1501 Build the prefix name of a class of instruments in a category.
1502 For example, this function builds the string 'wait/sync/mutex/sql/' from
1503 a prefix 'wait/sync/mutex' and a category 'sql'.
1504 This prefix is used later to build each instrument name, such as
1505 'wait/sync/mutex/sql/LOCK_open'.
1506 @param prefix Prefix for this class of instruments
1507 @param category Category name
1508 @param [out] output Buffer of length PFS_MAX_INFO_NAME_LENGTH.
1509 @param [out] output_length Length of the resulting output string.
1510 @return 0 for success, non zero for errors
1511 */
build_prefix(const LEX_STRING * prefix,const char * category,char * output,size_t * output_length)1512 static int build_prefix(const LEX_STRING *prefix, const char *category,
1513 char *output, size_t *output_length)
1514 {
1515 size_t len= strlen(category);
1516 char *out_ptr= output;
1517 size_t prefix_length= prefix->length;
1518
1519 if (unlikely((prefix_length + len + 1) >=
1520 PFS_MAX_FULL_PREFIX_NAME_LENGTH))
1521 {
1522 pfs_print_error("build_prefix: prefix+category is too long <%s> <%s>\n",
1523 prefix->str, category);
1524 return 1;
1525 }
1526
1527 if (unlikely(strchr(category, '/') != NULL))
1528 {
1529 pfs_print_error("build_prefix: invalid category <%s>\n",
1530 category);
1531 return 1;
1532 }
1533
1534 /* output = prefix + category + '/' */
1535 memcpy(out_ptr, prefix->str, prefix_length);
1536 out_ptr+= prefix_length;
1537 if (len > 0)
1538 {
1539 memcpy(out_ptr, category, len);
1540 out_ptr+= len;
1541 *out_ptr= '/';
1542 out_ptr++;
1543 }
1544 *output_length= int(out_ptr - output);
1545
1546 return 0;
1547 }
1548
1549 #define REGISTER_BODY_V1(KEY_T, PREFIX, REGISTER_FUNC) \
1550 KEY_T key; \
1551 char formatted_name[PFS_MAX_INFO_NAME_LENGTH]; \
1552 size_t prefix_length; \
1553 size_t len; \
1554 size_t full_length; \
1555 \
1556 assert(category != NULL); \
1557 assert(info != NULL); \
1558 if (unlikely(build_prefix(&PREFIX, category, \
1559 formatted_name, &prefix_length)) || \
1560 ! pfs_initialized) \
1561 { \
1562 for (; count>0; count--, info++) \
1563 *(info->m_key)= 0; \
1564 return ; \
1565 } \
1566 \
1567 for (; count>0; count--, info++) \
1568 { \
1569 assert(info->m_key != NULL); \
1570 assert(info->m_name != NULL); \
1571 len= strlen(info->m_name); \
1572 full_length= prefix_length + len; \
1573 if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH)) \
1574 { \
1575 memcpy(formatted_name + prefix_length, info->m_name, len); \
1576 key= REGISTER_FUNC(formatted_name, (uint)full_length, info->m_flags); \
1577 } \
1578 else \
1579 { \
1580 pfs_print_error("REGISTER_BODY_V1: name too long <%s> <%s>\n", \
1581 category, info->m_name); \
1582 key= 0; \
1583 } \
1584 \
1585 *(info->m_key)= key; \
1586 } \
1587 return;
1588
1589 /* Use C linkage for the interface functions. */
1590
1591 C_MODE_START
1592
1593 /**
1594 Implementation of the mutex instrumentation interface.
1595 @sa PSI_v1::register_mutex.
1596 */
pfs_register_mutex_v1(const char * category,PSI_mutex_info_v1 * info,int count)1597 void pfs_register_mutex_v1(const char *category,
1598 PSI_mutex_info_v1 *info,
1599 int count)
1600 {
1601 REGISTER_BODY_V1(PSI_mutex_key,
1602 mutex_instrument_prefix,
1603 register_mutex_class)
1604 }
1605
1606 /**
1607 Implementation of the rwlock instrumentation interface.
1608 @sa PSI_v1::register_rwlock.
1609 */
pfs_register_rwlock_v1(const char * category,PSI_rwlock_info_v1 * info,int count)1610 void pfs_register_rwlock_v1(const char *category,
1611 PSI_rwlock_info_v1 *info,
1612 int count)
1613 {
1614 PSI_rwlock_key key;
1615 char rw_formatted_name[PFS_MAX_INFO_NAME_LENGTH];
1616 char sx_formatted_name[PFS_MAX_INFO_NAME_LENGTH];
1617 size_t rw_prefix_length;
1618 size_t sx_prefix_length;
1619 size_t len;
1620 size_t full_length;
1621
1622 assert(category != NULL);
1623 assert(info != NULL);
1624 if (build_prefix(&rwlock_instrument_prefix, category,
1625 rw_formatted_name, &rw_prefix_length) ||
1626 build_prefix(&sxlock_instrument_prefix, category,
1627 sx_formatted_name, &sx_prefix_length) ||
1628 ! pfs_initialized)
1629 {
1630 for (; count>0; count--, info++)
1631 *(info->m_key)= 0;
1632 return ;
1633 }
1634
1635 for (; count>0; count--, info++)
1636 {
1637 assert(info->m_key != NULL);
1638 assert(info->m_name != NULL);
1639 len= strlen(info->m_name);
1640
1641 if (info->m_flags & PSI_RWLOCK_FLAG_SX)
1642 {
1643 full_length= sx_prefix_length + len;
1644 if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH))
1645 {
1646 memcpy(sx_formatted_name + sx_prefix_length, info->m_name, len);
1647 key= register_rwlock_class(sx_formatted_name, (uint)full_length, info->m_flags);
1648 }
1649 else
1650 {
1651 pfs_print_error("REGISTER_BODY_V1: (sx) name too long <%s> <%s>\n",
1652 category, info->m_name);
1653 key= 0;
1654 }
1655 }
1656 else
1657 {
1658 full_length= rw_prefix_length + len;
1659 if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH))
1660 {
1661 memcpy(rw_formatted_name + rw_prefix_length, info->m_name, len);
1662 key= register_rwlock_class(rw_formatted_name, (uint)full_length, info->m_flags);
1663 }
1664 else
1665 {
1666 pfs_print_error("REGISTER_BODY_V1: (rw) name too long <%s> <%s>\n",
1667 category, info->m_name);
1668 key= 0;
1669 }
1670 }
1671
1672 *(info->m_key)= key;
1673 }
1674 return;
1675 }
1676
1677 /**
1678 Implementation of the cond instrumentation interface.
1679 @sa PSI_v1::register_cond.
1680 */
pfs_register_cond_v1(const char * category,PSI_cond_info_v1 * info,int count)1681 void pfs_register_cond_v1(const char *category,
1682 PSI_cond_info_v1 *info,
1683 int count)
1684 {
1685 REGISTER_BODY_V1(PSI_cond_key,
1686 cond_instrument_prefix,
1687 register_cond_class)
1688 }
1689
1690 /**
1691 Implementation of the thread instrumentation interface.
1692 @sa PSI_v1::register_thread.
1693 */
pfs_register_thread_v1(const char * category,PSI_thread_info_v1 * info,int count)1694 void pfs_register_thread_v1(const char *category,
1695 PSI_thread_info_v1 *info,
1696 int count)
1697 {
1698 REGISTER_BODY_V1(PSI_thread_key,
1699 thread_instrument_prefix,
1700 register_thread_class)
1701 }
1702
1703 /**
1704 Implementation of the file instrumentation interface.
1705 @sa PSI_v1::register_file.
1706 */
pfs_register_file_v1(const char * category,PSI_file_info_v1 * info,int count)1707 void pfs_register_file_v1(const char *category,
1708 PSI_file_info_v1 *info,
1709 int count)
1710 {
1711 REGISTER_BODY_V1(PSI_file_key,
1712 file_instrument_prefix,
1713 register_file_class)
1714 }
1715
pfs_register_stage_v1(const char * category,PSI_stage_info_v1 ** info_array,int count)1716 void pfs_register_stage_v1(const char *category,
1717 PSI_stage_info_v1 **info_array,
1718 int count)
1719 {
1720 char formatted_name[PFS_MAX_INFO_NAME_LENGTH];
1721 size_t prefix_length;
1722 size_t len;
1723 size_t full_length;
1724 PSI_stage_info_v1 *info;
1725
1726 assert(category != NULL);
1727 assert(info_array != NULL);
1728 if (unlikely(build_prefix(&stage_instrument_prefix, category,
1729 formatted_name, &prefix_length)) ||
1730 ! pfs_initialized)
1731 {
1732 for (; count>0; count--, info_array++)
1733 (*info_array)->m_key= 0;
1734 return ;
1735 }
1736
1737 for (; count>0; count--, info_array++)
1738 {
1739 info= *info_array;
1740 assert(info != NULL);
1741 assert(info->m_name != NULL);
1742 len= strlen(info->m_name);
1743 full_length= prefix_length + len;
1744 if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH))
1745 {
1746 memcpy(formatted_name + prefix_length, info->m_name, len);
1747 info->m_key= register_stage_class(formatted_name,
1748 (uint)prefix_length,
1749 (uint)full_length,
1750 info->m_flags);
1751 }
1752 else
1753 {
1754 pfs_print_error("register_stage_v1: name too long <%s> <%s>\n",
1755 category, info->m_name);
1756 info->m_key= 0;
1757 }
1758 }
1759 return;
1760 }
1761
pfs_register_statement_v1(const char * category,PSI_statement_info_v1 * info,int count)1762 void pfs_register_statement_v1(const char *category,
1763 PSI_statement_info_v1 *info,
1764 int count)
1765 {
1766 char formatted_name[PFS_MAX_INFO_NAME_LENGTH];
1767 size_t prefix_length;
1768 size_t len;
1769 size_t full_length;
1770
1771 assert(category != NULL);
1772 assert(info != NULL);
1773 if (unlikely(build_prefix(&statement_instrument_prefix,
1774 category, formatted_name, &prefix_length)) ||
1775 ! pfs_initialized)
1776 {
1777 for (; count>0; count--, info++)
1778 info->m_key= 0;
1779 return ;
1780 }
1781
1782 for (; count>0; count--, info++)
1783 {
1784 assert(info->m_name != NULL);
1785 len= strlen(info->m_name);
1786 full_length= prefix_length + len;
1787 if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH))
1788 {
1789 memcpy(formatted_name + prefix_length, info->m_name, len);
1790 info->m_key= register_statement_class(formatted_name, (uint)full_length, info->m_flags);
1791 }
1792 else
1793 {
1794 pfs_print_error("register_statement_v1: name too long <%s>\n",
1795 info->m_name);
1796 info->m_key= 0;
1797 }
1798 }
1799 return;
1800 }
1801
pfs_register_socket_v1(const char * category,PSI_socket_info_v1 * info,int count)1802 void pfs_register_socket_v1(const char *category,
1803 PSI_socket_info_v1 *info,
1804 int count)
1805 {
1806 REGISTER_BODY_V1(PSI_socket_key,
1807 socket_instrument_prefix,
1808 register_socket_class)
1809 }
1810
1811 #define INIT_BODY_V1(T, KEY, ID) \
1812 PFS_##T##_class *klass; \
1813 PFS_##T *pfs; \
1814 klass= find_##T##_class(KEY); \
1815 if (unlikely(klass == NULL)) \
1816 return NULL; \
1817 pfs= create_##T(klass, ID); \
1818 return reinterpret_cast<PSI_##T *> (pfs)
1819
1820 /**
1821 Implementation of the mutex instrumentation interface.
1822 @sa PSI_v1::init_mutex.
1823 */
1824 PSI_mutex*
pfs_init_mutex_v1(PSI_mutex_key key,const void * identity)1825 pfs_init_mutex_v1(PSI_mutex_key key, const void *identity)
1826 {
1827 INIT_BODY_V1(mutex, key, identity);
1828 }
1829
1830 /**
1831 Implementation of the mutex instrumentation interface.
1832 @sa PSI_v1::destroy_mutex.
1833 */
pfs_destroy_mutex_v1(PSI_mutex * mutex)1834 void pfs_destroy_mutex_v1(PSI_mutex* mutex)
1835 {
1836 PFS_mutex *pfs= reinterpret_cast<PFS_mutex*> (mutex);
1837
1838 assert(pfs != NULL);
1839
1840 destroy_mutex(pfs);
1841 }
1842
1843 /**
1844 Implementation of the rwlock instrumentation interface.
1845 @sa PSI_v1::init_rwlock.
1846 */
1847 PSI_rwlock*
pfs_init_rwlock_v1(PSI_rwlock_key key,const void * identity)1848 pfs_init_rwlock_v1(PSI_rwlock_key key, const void *identity)
1849 {
1850 INIT_BODY_V1(rwlock, key, identity);
1851 }
1852
1853 /**
1854 Implementation of the rwlock instrumentation interface.
1855 @sa PSI_v1::destroy_rwlock.
1856 */
pfs_destroy_rwlock_v1(PSI_rwlock * rwlock)1857 void pfs_destroy_rwlock_v1(PSI_rwlock* rwlock)
1858 {
1859 PFS_rwlock *pfs= reinterpret_cast<PFS_rwlock*> (rwlock);
1860
1861 assert(pfs != NULL);
1862
1863 destroy_rwlock(pfs);
1864 }
1865
1866 /**
1867 Implementation of the cond instrumentation interface.
1868 @sa PSI_v1::init_cond.
1869 */
1870 PSI_cond*
pfs_init_cond_v1(PSI_cond_key key,const void * identity)1871 pfs_init_cond_v1(PSI_cond_key key, const void *identity)
1872 {
1873 INIT_BODY_V1(cond, key, identity);
1874 }
1875
1876 /**
1877 Implementation of the cond instrumentation interface.
1878 @sa PSI_v1::destroy_cond.
1879 */
pfs_destroy_cond_v1(PSI_cond * cond)1880 void pfs_destroy_cond_v1(PSI_cond* cond)
1881 {
1882 PFS_cond *pfs= reinterpret_cast<PFS_cond*> (cond);
1883
1884 assert(pfs != NULL);
1885
1886 destroy_cond(pfs);
1887 }
1888
1889 /**
1890 Implementation of the table instrumentation interface.
1891 @sa PSI_v1::get_table_share.
1892 */
1893 PSI_table_share*
pfs_get_table_share_v1(my_bool temporary,TABLE_SHARE * share)1894 pfs_get_table_share_v1(my_bool temporary, TABLE_SHARE *share)
1895 {
1896 /* Ignore temporary tables and views. */
1897 if (temporary || share->is_view)
1898 return NULL;
1899 /* An instrumented thread is required, for LF_PINS. */
1900 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
1901 if (unlikely(pfs_thread == NULL))
1902 return NULL;
1903 PFS_table_share* pfs_share;
1904 pfs_share= find_or_create_table_share(pfs_thread, temporary, share);
1905 return reinterpret_cast<PSI_table_share*> (pfs_share);
1906 }
1907
1908 /**
1909 Implementation of the table instrumentation interface.
1910 @sa PSI_v1::release_table_share.
1911 */
pfs_release_table_share_v1(PSI_table_share * share)1912 void pfs_release_table_share_v1(PSI_table_share* share)
1913 {
1914 PFS_table_share* pfs= reinterpret_cast<PFS_table_share*> (share);
1915
1916 if (unlikely(pfs == NULL))
1917 return;
1918
1919 release_table_share(pfs);
1920 }
1921
1922 /**
1923 Implementation of the table instrumentation interface.
1924 @sa PSI_v1::drop_table_share.
1925 */
1926 void
pfs_drop_table_share_v1(my_bool temporary,const char * schema_name,int schema_name_length,const char * table_name,int table_name_length)1927 pfs_drop_table_share_v1(my_bool temporary,
1928 const char *schema_name, int schema_name_length,
1929 const char *table_name, int table_name_length)
1930 {
1931 /* Ignore temporary tables. */
1932 if (temporary)
1933 return;
1934 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
1935 if (unlikely(pfs_thread == NULL))
1936 return;
1937 /* TODO: temporary tables */
1938 drop_table_share(pfs_thread, temporary, schema_name, schema_name_length,
1939 table_name, table_name_length);
1940 }
1941
1942 /**
1943 Implementation of the table instrumentation interface.
1944 @sa PSI_v1::open_table.
1945 */
1946 PSI_table*
pfs_open_table_v1(PSI_table_share * share,const void * identity)1947 pfs_open_table_v1(PSI_table_share *share, const void *identity)
1948 {
1949 PFS_table_share *pfs_table_share= reinterpret_cast<PFS_table_share*> (share);
1950
1951 if (unlikely(pfs_table_share == NULL))
1952 return NULL;
1953
1954 /* This object is not to be instrumented. */
1955 if (! pfs_table_share->m_enabled)
1956 return NULL;
1957
1958 /* This object is instrumented, but all table instruments are disabled. */
1959 if (! global_table_io_class.m_enabled && ! global_table_lock_class.m_enabled)
1960 return NULL;
1961
1962 /*
1963 When the performance schema is off, do not instrument anything.
1964 Table handles have short life cycle, instrumentation will happen
1965 again if needed during the next open().
1966 */
1967 if (! flag_global_instrumentation)
1968 return NULL;
1969
1970 PFS_thread *thread= my_thread_get_THR_PFS();
1971 if (unlikely(thread == NULL))
1972 return NULL;
1973
1974 PFS_table *pfs_table= create_table(pfs_table_share, thread, identity);
1975 return reinterpret_cast<PSI_table *> (pfs_table);
1976 }
1977
1978 /**
1979 Implementation of the table instrumentation interface.
1980 @sa PSI_v1::unbind_table.
1981 */
pfs_unbind_table_v1(PSI_table * table)1982 void pfs_unbind_table_v1(PSI_table *table)
1983 {
1984 PFS_table *pfs= reinterpret_cast<PFS_table*> (table);
1985 if (likely(pfs != NULL))
1986 {
1987 pfs->m_thread_owner= NULL;
1988 pfs->m_owner_event_id= 0;
1989 }
1990 }
1991
1992 /**
1993 Implementation of the table instrumentation interface.
1994 @sa PSI_v1::rebind_table.
1995 */
1996 PSI_table *
pfs_rebind_table_v1(PSI_table_share * share,const void * identity,PSI_table * table)1997 pfs_rebind_table_v1(PSI_table_share *share, const void *identity, PSI_table *table)
1998 {
1999 PFS_table *pfs= reinterpret_cast<PFS_table*> (table);
2000 if (likely(pfs != NULL))
2001 {
2002 assert(pfs->m_thread_owner == NULL);
2003
2004 if (unlikely(! pfs->m_share->m_enabled))
2005 {
2006 destroy_table(pfs);
2007 return NULL;
2008 }
2009
2010 if (unlikely(! global_table_io_class.m_enabled && ! global_table_lock_class.m_enabled))
2011 {
2012 destroy_table(pfs);
2013 return NULL;
2014 }
2015
2016 if (unlikely(! flag_global_instrumentation))
2017 {
2018 destroy_table(pfs);
2019 return NULL;
2020 }
2021
2022 /* The table handle was already instrumented, reuse it for this thread. */
2023 PFS_thread *thread= my_thread_get_THR_PFS();
2024 pfs->m_thread_owner= thread;
2025 if (thread != NULL)
2026 pfs->m_owner_event_id= thread->m_event_id;
2027 else
2028 pfs->m_owner_event_id= 0;
2029 return table;
2030 }
2031
2032 /* See open_table_v1() */
2033
2034 PFS_table_share *pfs_table_share= reinterpret_cast<PFS_table_share*> (share);
2035
2036 if (unlikely(pfs_table_share == NULL))
2037 return NULL;
2038
2039 if (! pfs_table_share->m_enabled)
2040 return NULL;
2041
2042 if (! global_table_io_class.m_enabled && ! global_table_lock_class.m_enabled)
2043 return NULL;
2044
2045 if (! flag_global_instrumentation)
2046 return NULL;
2047
2048 PFS_thread *thread= my_thread_get_THR_PFS();
2049 if (unlikely(thread == NULL))
2050 return NULL;
2051
2052 PFS_table *pfs_table= create_table(pfs_table_share, thread, identity);
2053 return reinterpret_cast<PSI_table *> (pfs_table);
2054 }
2055
2056 /**
2057 Implementation of the table instrumentation interface.
2058 @sa PSI_v1::close_table.
2059 */
pfs_close_table_v1(TABLE_SHARE * server_share,PSI_table * table)2060 void pfs_close_table_v1(TABLE_SHARE *server_share, PSI_table *table)
2061 {
2062 PFS_table *pfs= reinterpret_cast<PFS_table*> (table);
2063 if (unlikely(pfs == NULL))
2064 return;
2065 pfs->aggregate(server_share);
2066 destroy_table(pfs);
2067 }
2068
2069 PSI_socket*
pfs_init_socket_v1(PSI_socket_key key,const my_socket * fd,const struct sockaddr * addr,socklen_t addr_len)2070 pfs_init_socket_v1(PSI_socket_key key, const my_socket *fd,
2071 const struct sockaddr *addr, socklen_t addr_len)
2072 {
2073 PFS_socket_class *klass;
2074 PFS_socket *pfs;
2075 klass= find_socket_class(key);
2076 if (unlikely(klass == NULL))
2077 return NULL;
2078 pfs= create_socket(klass, fd, addr, addr_len);
2079 return reinterpret_cast<PSI_socket *> (pfs);
2080 }
2081
pfs_destroy_socket_v1(PSI_socket * socket)2082 void pfs_destroy_socket_v1(PSI_socket *socket)
2083 {
2084 PFS_socket *pfs= reinterpret_cast<PFS_socket*> (socket);
2085
2086 assert(pfs != NULL);
2087
2088 destroy_socket(pfs);
2089 }
2090
2091 /**
2092 Implementation of the file instrumentation interface.
2093 @sa PSI_v1::create_file.
2094 */
pfs_create_file_v1(PSI_file_key key,const char * name,File file)2095 void pfs_create_file_v1(PSI_file_key key, const char *name, File file)
2096 {
2097 if (! flag_global_instrumentation)
2098 return;
2099 int index= (int) file;
2100 if (unlikely(index < 0))
2101 return;
2102 PFS_file_class *klass= find_file_class(key);
2103 if (unlikely(klass == NULL))
2104 return;
2105 if (! klass->m_enabled)
2106 return;
2107
2108 /* A thread is needed for LF_PINS */
2109 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
2110 if (unlikely(pfs_thread == NULL))
2111 return;
2112
2113 if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
2114 return;
2115
2116 /*
2117 We want this check after pfs_thread->m_enabled,
2118 to avoid reporting false loss.
2119 */
2120 if (unlikely(index >= file_handle_max))
2121 {
2122 file_handle_lost++;
2123 return;
2124 }
2125
2126 uint len= (uint)strlen(name);
2127 PFS_file *pfs_file= find_or_create_file(pfs_thread, klass, name, len, true);
2128
2129 file_handle_array[index]= pfs_file;
2130 }
2131
2132 /**
2133 Arguments given from a parent to a child thread, packaged in one structure.
2134 This data is used when spawning a new instrumented thread.
2135 @sa pfs_spawn_thread.
2136 */
2137 struct PFS_spawn_thread_arg
2138 {
2139 ulonglong m_thread_internal_id;
2140 char m_username[USERNAME_LENGTH];
2141 uint m_username_length;
2142 char m_hostname[HOSTNAME_LENGTH];
2143 uint m_hostname_length;
2144
2145 PSI_thread_key m_child_key;
2146 const void *m_child_identity;
2147 void *(*m_user_start_routine)(void*);
2148 void *m_user_arg;
2149 };
2150
pfs_spawn_thread(void * arg)2151 extern "C" void* pfs_spawn_thread(void *arg)
2152 {
2153 PFS_spawn_thread_arg *typed_arg= (PFS_spawn_thread_arg*) arg;
2154 void *user_arg;
2155 void *(*user_start_routine)(void*);
2156
2157 PFS_thread *pfs;
2158
2159 /* First, attach instrumentation to this newly created pthread. */
2160 PFS_thread_class *klass= find_thread_class(typed_arg->m_child_key);
2161 if (likely(klass != NULL))
2162 {
2163 pfs= create_thread(klass, typed_arg->m_child_identity, 0);
2164 if (likely(pfs != NULL))
2165 {
2166 pfs->m_thread_os_id= my_thread_os_id();
2167 clear_thread_account(pfs);
2168
2169 pfs->m_parent_thread_internal_id= typed_arg->m_thread_internal_id;
2170
2171 memcpy(pfs->m_username, typed_arg->m_username, sizeof(pfs->m_username));
2172 pfs->m_username_length= typed_arg->m_username_length;
2173
2174 memcpy(pfs->m_hostname, typed_arg->m_hostname, sizeof(pfs->m_hostname));
2175 pfs->m_hostname_length= typed_arg->m_hostname_length;
2176
2177 set_thread_account(pfs);
2178 }
2179 }
2180 else
2181 {
2182 pfs= NULL;
2183 }
2184 my_thread_set_THR_PFS(pfs);
2185
2186 /*
2187 Secondly, free the memory allocated in spawn_thread_v1().
2188 It is preferable to do this before invoking the user
2189 routine, to avoid memory leaks at shutdown, in case
2190 the server exits without waiting for this thread.
2191 */
2192 user_start_routine= typed_arg->m_user_start_routine;
2193 user_arg= typed_arg->m_user_arg;
2194 my_free(typed_arg);
2195
2196 /* Then, execute the user code for this thread. */
2197 (*user_start_routine)(user_arg);
2198
2199 return NULL;
2200 }
2201
2202 /**
2203 Implementation of the thread instrumentation interface.
2204 @sa PSI_v1::spawn_thread.
2205 */
pfs_spawn_thread_v1(PSI_thread_key key,my_thread_handle * thread,const my_thread_attr_t * attr,void * (* start_routine)(void *),void * arg)2206 int pfs_spawn_thread_v1(PSI_thread_key key,
2207 my_thread_handle *thread, const my_thread_attr_t *attr,
2208 void *(*start_routine)(void*), void *arg)
2209 {
2210 PFS_spawn_thread_arg *psi_arg;
2211 PFS_thread *parent;
2212
2213 /* psi_arg can not be global, and can not be a local variable. */
2214 psi_arg= (PFS_spawn_thread_arg*) my_malloc(PSI_NOT_INSTRUMENTED,
2215 sizeof(PFS_spawn_thread_arg),
2216 MYF(MY_WME));
2217 if (unlikely(psi_arg == NULL))
2218 return EAGAIN;
2219
2220 psi_arg->m_child_key= key;
2221 psi_arg->m_child_identity= (arg ? arg : thread);
2222 psi_arg->m_user_start_routine= start_routine;
2223 psi_arg->m_user_arg= arg;
2224
2225 parent= my_thread_get_THR_PFS();
2226 if (parent != NULL)
2227 {
2228 /*
2229 Make a copy of the parent attributes.
2230 This is required, because instrumentation for this thread (the parent)
2231 may be destroyed before the child thread instrumentation is created.
2232 */
2233 psi_arg->m_thread_internal_id= parent->m_thread_internal_id;
2234
2235 memcpy(psi_arg->m_username, parent->m_username, sizeof(psi_arg->m_username));
2236 psi_arg->m_username_length= parent->m_username_length;
2237
2238 memcpy(psi_arg->m_hostname, parent->m_hostname, sizeof(psi_arg->m_hostname));
2239 psi_arg->m_hostname_length= parent->m_hostname_length;
2240 }
2241 else
2242 {
2243 psi_arg->m_thread_internal_id= 0;
2244 psi_arg->m_username_length= 0;
2245 psi_arg->m_hostname_length= 0;
2246 }
2247
2248 int result= my_thread_create(thread, attr, pfs_spawn_thread, psi_arg);
2249 if (unlikely(result != 0))
2250 my_free(psi_arg);
2251 return result;
2252 }
2253
2254 /**
2255 Implementation of the thread instrumentation interface.
2256 @sa PSI_v1::new_thread.
2257 */
2258 PSI_thread*
pfs_new_thread_v1(PSI_thread_key key,const void * identity,ulonglong processlist_id)2259 pfs_new_thread_v1(PSI_thread_key key, const void *identity, ulonglong processlist_id)
2260 {
2261 PFS_thread *pfs;
2262
2263 PFS_thread_class *klass= find_thread_class(key);
2264 if (likely(klass != NULL))
2265 pfs= create_thread(klass, identity, processlist_id);
2266 else
2267 pfs= NULL;
2268
2269 return reinterpret_cast<PSI_thread*> (pfs);
2270 }
2271
2272 /**
2273 Implementation of the thread instrumentation interface.
2274 @sa PSI_v1::set_thread_id.
2275 */
pfs_set_thread_id_v1(PSI_thread * thread,ulonglong processlist_id)2276 void pfs_set_thread_id_v1(PSI_thread *thread, ulonglong processlist_id)
2277 {
2278 PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2279 if (unlikely(pfs == NULL))
2280 return;
2281 pfs->m_processlist_id= (ulong)processlist_id;
2282 }
2283
2284 /**
2285 Implementation of the thread instrumentation interface.
2286 @sa PSI_v1::set_thread_THD.
2287 */
pfs_set_thread_THD_v1(PSI_thread * thread,THD * thd)2288 void pfs_set_thread_THD_v1(PSI_thread *thread, THD *thd)
2289 {
2290 PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2291 if (unlikely(pfs == NULL))
2292 return;
2293 pfs->m_thd= thd;
2294 }
2295
2296 /**
2297 Implementation of the thread instrumentation interface.
2298 @sa PSI_v1::set_thread_os_thread_id.
2299 */
pfs_set_thread_os_id_v1(PSI_thread * thread)2300 void pfs_set_thread_os_id_v1(PSI_thread *thread)
2301 {
2302 PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2303 if (unlikely(pfs == NULL))
2304 return;
2305 pfs->m_thread_os_id= my_thread_os_id();
2306 }
2307
2308 /**
2309 Implementation of the thread instrumentation interface.
2310 @sa PSI_v1::get_thread_id.
2311 */
2312 PSI_thread*
pfs_get_thread_v1(void)2313 pfs_get_thread_v1(void)
2314 {
2315 PFS_thread *pfs= my_thread_get_THR_PFS();
2316 return reinterpret_cast<PSI_thread*> (pfs);
2317 }
2318
2319 /**
2320 Implementation of the thread instrumentation interface.
2321 @sa PSI_v1::set_thread_user.
2322 */
pfs_set_thread_user_v1(const char * user,int user_len)2323 void pfs_set_thread_user_v1(const char *user, int user_len)
2324 {
2325 pfs_dirty_state dirty_state;
2326 PFS_thread *pfs= my_thread_get_THR_PFS();
2327
2328 assert((user != NULL) || (user_len == 0));
2329 assert(user_len >= 0);
2330 assert((uint) user_len <= sizeof(pfs->m_username));
2331
2332 if (unlikely(pfs == NULL))
2333 return;
2334
2335 aggregate_thread(pfs, pfs->m_account, pfs->m_user, pfs->m_host);
2336
2337 pfs->m_session_lock.allocated_to_dirty(& dirty_state);
2338
2339 clear_thread_account(pfs);
2340
2341 if (user_len > 0)
2342 memcpy(pfs->m_username, user, user_len);
2343 pfs->m_username_length= user_len;
2344
2345 set_thread_account(pfs);
2346
2347 bool enabled;
2348 bool history;
2349 if (pfs->m_account != NULL)
2350 {
2351 enabled= pfs->m_account->m_enabled;
2352 history= pfs->m_account->m_history;
2353 }
2354 else
2355 {
2356 if ((pfs->m_username_length > 0) && (pfs->m_hostname_length > 0))
2357 {
2358 lookup_setup_actor(pfs,
2359 pfs->m_username, pfs->m_username_length,
2360 pfs->m_hostname, pfs->m_hostname_length,
2361 &enabled, &history);
2362 }
2363 else
2364 {
2365 /* There is no setting for background threads */
2366 enabled= true;
2367 history= true;
2368 }
2369 }
2370 pfs->set_enabled(enabled);
2371 pfs->set_history(history);
2372
2373 pfs->m_session_lock.dirty_to_allocated(& dirty_state);
2374 }
2375
2376 /**
2377 Implementation of the thread instrumentation interface.
2378 @sa PSI_v1::set_thread_account.
2379 */
pfs_set_thread_account_v1(const char * user,int user_len,const char * host,int host_len)2380 void pfs_set_thread_account_v1(const char *user, int user_len,
2381 const char *host, int host_len)
2382 {
2383 pfs_dirty_state dirty_state;
2384 PFS_thread *pfs= my_thread_get_THR_PFS();
2385
2386 assert((user != NULL) || (user_len == 0));
2387 assert(user_len >= 0);
2388 assert((uint) user_len <= sizeof(pfs->m_username));
2389 assert((host != NULL) || (host_len == 0));
2390 assert(host_len >= 0);
2391
2392 host_len= min<size_t>(host_len, sizeof(pfs->m_hostname));
2393 if (unlikely(pfs == NULL))
2394 return;
2395
2396 pfs->m_session_lock.allocated_to_dirty(& dirty_state);
2397
2398 clear_thread_account(pfs);
2399
2400 if (host_len > 0)
2401 memcpy(pfs->m_hostname, host, host_len);
2402 pfs->m_hostname_length= host_len;
2403
2404 if (user_len > 0)
2405 memcpy(pfs->m_username, user, user_len);
2406 pfs->m_username_length= user_len;
2407
2408 set_thread_account(pfs);
2409
2410 bool enabled;
2411 bool history;
2412 if (pfs->m_account != NULL)
2413 {
2414 enabled= pfs->m_account->m_enabled;
2415 history= pfs->m_account->m_history;
2416 }
2417 else
2418 {
2419 if ((pfs->m_username_length > 0) && (pfs->m_hostname_length > 0))
2420 {
2421 lookup_setup_actor(pfs,
2422 pfs->m_username, pfs->m_username_length,
2423 pfs->m_hostname, pfs->m_hostname_length,
2424 &enabled, &history);
2425 }
2426 else
2427 {
2428 /* There is no setting for background threads */
2429 enabled= true;
2430 history= true;
2431 }
2432 }
2433 pfs->set_enabled(enabled);
2434 pfs->set_history(history);
2435
2436 pfs->m_session_lock.dirty_to_allocated(& dirty_state);
2437 }
2438
2439 /**
2440 Implementation of the thread instrumentation interface.
2441 @sa PSI_v1::set_thread_db.
2442 */
pfs_set_thread_db_v1(const char * db,int db_len)2443 void pfs_set_thread_db_v1(const char* db, int db_len)
2444 {
2445 PFS_thread *pfs= my_thread_get_THR_PFS();
2446
2447 assert((db != NULL) || (db_len == 0));
2448 assert(db_len >= 0);
2449 assert((uint) db_len <= sizeof(pfs->m_dbname));
2450
2451 if (likely(pfs != NULL))
2452 {
2453 pfs_dirty_state dirty_state;
2454 pfs->m_stmt_lock.allocated_to_dirty(& dirty_state);
2455 if (db_len > 0)
2456 memcpy(pfs->m_dbname, db, db_len);
2457 pfs->m_dbname_length= db_len;
2458 pfs->m_stmt_lock.dirty_to_allocated(& dirty_state);
2459 }
2460 }
2461
2462 /**
2463 Implementation of the thread instrumentation interface.
2464 @sa PSI_v1::set_thread_command.
2465 */
pfs_set_thread_command_v1(int command)2466 void pfs_set_thread_command_v1(int command)
2467 {
2468 PFS_thread *pfs= my_thread_get_THR_PFS();
2469
2470 assert(command >= 0);
2471 assert(command <= (int) COM_END);
2472
2473 if (likely(pfs != NULL))
2474 {
2475 pfs->m_command= command;
2476 }
2477 }
2478
2479 /**
2480 Implementation of the thread instrumentation interface.
2481 @sa PSI_v1::set_thread_connection_type.
2482 */
pfs_set_connection_type_v1(opaque_vio_type conn_type)2483 void pfs_set_connection_type_v1(opaque_vio_type conn_type)
2484 {
2485 PFS_thread *pfs= my_thread_get_THR_PFS();
2486
2487 assert(conn_type >= FIRST_VIO_TYPE);
2488 assert(conn_type <= LAST_VIO_TYPE);
2489
2490 if (likely(pfs != NULL))
2491 {
2492 pfs->m_connection_type= static_cast<enum_vio_type> (conn_type);
2493 }
2494 }
2495
2496
2497 /**
2498 Implementation of the thread instrumentation interface.
2499 @sa PSI_v1::set_thread_start_time.
2500 */
pfs_set_thread_start_time_v1(time_t start_time)2501 void pfs_set_thread_start_time_v1(time_t start_time)
2502 {
2503 PFS_thread *pfs= my_thread_get_THR_PFS();
2504
2505 if (likely(pfs != NULL))
2506 {
2507 pfs->m_start_time= start_time;
2508 }
2509 }
2510
2511 /**
2512 Implementation of the thread instrumentation interface.
2513 @sa PSI_v1::set_thread_state.
2514 */
pfs_set_thread_state_v1(const char * state)2515 void pfs_set_thread_state_v1(const char* state)
2516 {
2517 /* DEPRECATED. */
2518 }
2519
2520 /**
2521 Implementation of the thread instrumentation interface.
2522 @sa PSI_v1::set_thread_info.
2523 */
pfs_set_thread_info_v1(const char * info,uint info_len)2524 void pfs_set_thread_info_v1(const char* info, uint info_len)
2525 {
2526 pfs_dirty_state dirty_state;
2527 PFS_thread *pfs= my_thread_get_THR_PFS();
2528
2529 assert((info != NULL) || (info_len == 0));
2530
2531 if (likely(pfs != NULL))
2532 {
2533 if ((info != NULL) && (info_len > 0))
2534 {
2535 if (info_len > sizeof(pfs->m_processlist_info))
2536 info_len= sizeof(pfs->m_processlist_info);
2537
2538 pfs->m_stmt_lock.allocated_to_dirty(& dirty_state);
2539 memcpy(pfs->m_processlist_info, info, info_len);
2540 pfs->m_processlist_info_length= info_len;
2541 pfs->m_stmt_lock.dirty_to_allocated(& dirty_state);
2542 }
2543 else
2544 {
2545 pfs->m_stmt_lock.allocated_to_dirty(& dirty_state);
2546 pfs->m_processlist_info_length= 0;
2547 pfs->m_stmt_lock.dirty_to_allocated(& dirty_state);
2548 }
2549 }
2550 }
2551
2552 /**
2553 Implementation of the thread instrumentation interface.
2554 @sa PSI_v1::set_thread.
2555 */
pfs_set_thread_v1(PSI_thread * thread)2556 void pfs_set_thread_v1(PSI_thread* thread)
2557 {
2558 PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2559 my_thread_set_THR_PFS(pfs);
2560 }
2561
2562 /**
2563 Implementation of the thread instrumentation interface.
2564 @sa PSI_v1::delete_current_thread.
2565 */
pfs_delete_current_thread_v1(void)2566 void pfs_delete_current_thread_v1(void)
2567 {
2568 PFS_thread *thread= my_thread_get_THR_PFS();
2569 if (thread != NULL)
2570 {
2571 aggregate_thread(thread, thread->m_account, thread->m_user, thread->m_host);
2572 my_thread_set_THR_PFS(NULL);
2573 destroy_thread(thread);
2574 }
2575 }
2576
2577 /**
2578 Implementation of the thread instrumentation interface.
2579 @sa PSI_v1::delete_thread.
2580 */
pfs_delete_thread_v1(PSI_thread * thread)2581 void pfs_delete_thread_v1(PSI_thread *thread)
2582 {
2583 PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2584
2585 if (pfs != NULL)
2586 {
2587 aggregate_thread(pfs, pfs->m_account, pfs->m_user, pfs->m_host);
2588 destroy_thread(pfs);
2589 }
2590 }
2591
2592 /**
2593 Implementation of the mutex instrumentation interface.
2594 @sa PSI_v1::start_mutex_wait.
2595 */
2596 PSI_mutex_locker*
pfs_start_mutex_wait_v1(PSI_mutex_locker_state * state,PSI_mutex * mutex,PSI_mutex_operation op,const char * src_file,uint src_line)2597 pfs_start_mutex_wait_v1(PSI_mutex_locker_state *state,
2598 PSI_mutex *mutex, PSI_mutex_operation op,
2599 const char *src_file, uint src_line)
2600 {
2601 PFS_mutex *pfs_mutex= reinterpret_cast<PFS_mutex*> (mutex);
2602 assert((int) op >= 0);
2603 assert((uint) op < array_elements(mutex_operation_map));
2604 assert(state != NULL);
2605
2606 assert(pfs_mutex != NULL);
2607 assert(pfs_mutex->m_class != NULL);
2608
2609 if (! pfs_mutex->m_enabled)
2610 return NULL;
2611
2612 uint flags;
2613 ulonglong timer_start= 0;
2614
2615 if (flag_thread_instrumentation)
2616 {
2617 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
2618 if (unlikely(pfs_thread == NULL))
2619 return NULL;
2620 if (! pfs_thread->m_enabled)
2621 return NULL;
2622 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2623 flags= STATE_FLAG_THREAD;
2624
2625 if (pfs_mutex->m_timed)
2626 {
2627 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2628 state->m_timer_start= timer_start;
2629 flags|= STATE_FLAG_TIMED;
2630 }
2631
2632 if (flag_events_waits_current)
2633 {
2634 if (unlikely(pfs_thread->m_events_waits_current >=
2635 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2636 {
2637 locker_lost++;
2638 return NULL;
2639 }
2640 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2641 state->m_wait= wait;
2642 flags|= STATE_FLAG_EVENT;
2643
2644 PFS_events_waits *parent_event= wait - 1;
2645 wait->m_event_type= EVENT_TYPE_WAIT;
2646 wait->m_nesting_event_id= parent_event->m_event_id;
2647 wait->m_nesting_event_type= parent_event->m_event_type;
2648
2649 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
2650 wait->m_class= pfs_mutex->m_class;
2651 wait->m_timer_start= timer_start;
2652 wait->m_timer_end= 0;
2653 wait->m_object_instance_addr= pfs_mutex->m_identity;
2654 wait->m_event_id= pfs_thread->m_event_id++;
2655 wait->m_end_event_id= 0;
2656 wait->m_operation= mutex_operation_map[(int) op];
2657 wait->m_source_file= src_file;
2658 wait->m_source_line= src_line;
2659 wait->m_wait_class= WAIT_CLASS_MUTEX;
2660
2661 pfs_thread->m_events_waits_current++;
2662 }
2663 }
2664 else
2665 {
2666 if (pfs_mutex->m_timed)
2667 {
2668 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2669 state->m_timer_start= timer_start;
2670 flags= STATE_FLAG_TIMED;
2671 state->m_thread= NULL;
2672 }
2673 else
2674 {
2675 /*
2676 Complete shortcut.
2677 */
2678 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
2679 pfs_mutex->m_mutex_stat.m_wait_stat.aggregate_counted();
2680 return NULL;
2681 }
2682 }
2683
2684 state->m_flags= flags;
2685 state->m_mutex= mutex;
2686 return reinterpret_cast<PSI_mutex_locker*> (state);
2687 }
2688
2689 /**
2690 Implementation of the rwlock instrumentation interface.
2691 @sa PSI_v1::start_rwlock_rdwait
2692 @sa PSI_v1::start_rwlock_wrwait
2693 */
2694 PSI_rwlock_locker*
pfs_start_rwlock_wait_v1(PSI_rwlock_locker_state * state,PSI_rwlock * rwlock,PSI_rwlock_operation op,const char * src_file,uint src_line)2695 pfs_start_rwlock_wait_v1(PSI_rwlock_locker_state *state,
2696 PSI_rwlock *rwlock,
2697 PSI_rwlock_operation op,
2698 const char *src_file, uint src_line)
2699 {
2700 PFS_rwlock *pfs_rwlock= reinterpret_cast<PFS_rwlock*> (rwlock);
2701 assert(static_cast<int> (op) >= 0);
2702 assert(static_cast<uint> (op) < array_elements(rwlock_operation_map));
2703 assert(state != NULL);
2704 assert(pfs_rwlock != NULL);
2705 assert(pfs_rwlock->m_class != NULL);
2706
2707 /* Operations supported for READ WRITE LOCK */
2708
2709 assert( pfs_rwlock->m_class->is_shared_exclusive()
2710 || (op == PSI_RWLOCK_READLOCK)
2711 || (op == PSI_RWLOCK_WRITELOCK)
2712 || (op == PSI_RWLOCK_TRYREADLOCK)
2713 || (op == PSI_RWLOCK_TRYWRITELOCK)
2714 );
2715
2716 /* Operations supported for SHARED EXCLUSIVE LOCK */
2717
2718 assert( ! pfs_rwlock->m_class->is_shared_exclusive()
2719 || (op == PSI_RWLOCK_SHAREDLOCK)
2720 || (op == PSI_RWLOCK_SHAREDEXCLUSIVELOCK)
2721 || (op == PSI_RWLOCK_EXCLUSIVELOCK)
2722 || (op == PSI_RWLOCK_TRYSHAREDLOCK)
2723 || (op == PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK)
2724 || (op == PSI_RWLOCK_TRYEXCLUSIVELOCK)
2725 );
2726
2727 if (! pfs_rwlock->m_enabled)
2728 return NULL;
2729
2730 uint flags;
2731 ulonglong timer_start= 0;
2732
2733 if (flag_thread_instrumentation)
2734 {
2735 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
2736 if (unlikely(pfs_thread == NULL))
2737 return NULL;
2738 if (! pfs_thread->m_enabled)
2739 return NULL;
2740 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2741 flags= STATE_FLAG_THREAD;
2742
2743 if (pfs_rwlock->m_timed)
2744 {
2745 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2746 state->m_timer_start= timer_start;
2747 flags|= STATE_FLAG_TIMED;
2748 }
2749
2750 if (flag_events_waits_current)
2751 {
2752 if (unlikely(pfs_thread->m_events_waits_current >=
2753 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2754 {
2755 locker_lost++;
2756 return NULL;
2757 }
2758 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2759 state->m_wait= wait;
2760 flags|= STATE_FLAG_EVENT;
2761
2762 PFS_events_waits *parent_event= wait - 1;
2763 wait->m_event_type= EVENT_TYPE_WAIT;
2764 wait->m_nesting_event_id= parent_event->m_event_id;
2765 wait->m_nesting_event_type= parent_event->m_event_type;
2766
2767 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
2768 wait->m_class= pfs_rwlock->m_class;
2769 wait->m_timer_start= timer_start;
2770 wait->m_timer_end= 0;
2771 wait->m_object_instance_addr= pfs_rwlock->m_identity;
2772 wait->m_event_id= pfs_thread->m_event_id++;
2773 wait->m_end_event_id= 0;
2774 wait->m_operation= rwlock_operation_map[static_cast<int> (op)];
2775 wait->m_source_file= src_file;
2776 wait->m_source_line= src_line;
2777 wait->m_wait_class= WAIT_CLASS_RWLOCK;
2778
2779 pfs_thread->m_events_waits_current++;
2780 }
2781 }
2782 else
2783 {
2784 if (pfs_rwlock->m_timed)
2785 {
2786 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2787 state->m_timer_start= timer_start;
2788 flags= STATE_FLAG_TIMED;
2789 state->m_thread= NULL;
2790 }
2791 else
2792 {
2793 /*
2794 Complete shortcut.
2795 */
2796 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
2797 pfs_rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
2798 return NULL;
2799 }
2800 }
2801
2802 state->m_flags= flags;
2803 state->m_rwlock= rwlock;
2804 state->m_operation= op;
2805 return reinterpret_cast<PSI_rwlock_locker*> (state);
2806 }
2807
2808 PSI_rwlock_locker*
pfs_start_rwlock_rdwait_v1(PSI_rwlock_locker_state * state,PSI_rwlock * rwlock,PSI_rwlock_operation op,const char * src_file,uint src_line)2809 pfs_start_rwlock_rdwait_v1(PSI_rwlock_locker_state *state,
2810 PSI_rwlock *rwlock,
2811 PSI_rwlock_operation op,
2812 const char *src_file, uint src_line)
2813 {
2814 assert((op == PSI_RWLOCK_READLOCK) ||
2815 (op == PSI_RWLOCK_TRYREADLOCK) ||
2816 (op == PSI_RWLOCK_SHAREDLOCK) ||
2817 (op == PSI_RWLOCK_TRYSHAREDLOCK));
2818
2819 return pfs_start_rwlock_wait_v1(state, rwlock, op, src_file, src_line);
2820 }
2821
2822 PSI_rwlock_locker*
pfs_start_rwlock_wrwait_v1(PSI_rwlock_locker_state * state,PSI_rwlock * rwlock,PSI_rwlock_operation op,const char * src_file,uint src_line)2823 pfs_start_rwlock_wrwait_v1(PSI_rwlock_locker_state *state,
2824 PSI_rwlock *rwlock,
2825 PSI_rwlock_operation op,
2826 const char *src_file, uint src_line)
2827 {
2828 assert((op == PSI_RWLOCK_WRITELOCK) ||
2829 (op == PSI_RWLOCK_TRYWRITELOCK) ||
2830 (op == PSI_RWLOCK_SHAREDEXCLUSIVELOCK) ||
2831 (op == PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK) ||
2832 (op == PSI_RWLOCK_EXCLUSIVELOCK) ||
2833 (op == PSI_RWLOCK_TRYEXCLUSIVELOCK));
2834
2835 return pfs_start_rwlock_wait_v1(state, rwlock, op, src_file, src_line);
2836 }
2837
2838 /**
2839 Implementation of the cond instrumentation interface.
2840 @sa PSI_v1::start_cond_wait.
2841 */
2842 PSI_cond_locker*
pfs_start_cond_wait_v1(PSI_cond_locker_state * state,PSI_cond * cond,PSI_mutex * mutex,PSI_cond_operation op,const char * src_file,uint src_line)2843 pfs_start_cond_wait_v1(PSI_cond_locker_state *state,
2844 PSI_cond *cond, PSI_mutex *mutex,
2845 PSI_cond_operation op,
2846 const char *src_file, uint src_line)
2847 {
2848 /*
2849 Note about the unused PSI_mutex *mutex parameter:
2850 In the pthread library, a call to pthread_cond_wait()
2851 causes an unlock() + lock() on the mutex associated with the condition.
2852 This mutex operation is not instrumented, so the mutex will still
2853 appear as locked when a thread is waiting on a condition.
2854 This has no impact now, as unlock_mutex() is not recording events.
2855 When unlock_mutex() is implemented by later work logs,
2856 this parameter here will be used to adjust the mutex state,
2857 in start_cond_wait_v1() and end_cond_wait_v1().
2858 */
2859 PFS_cond *pfs_cond= reinterpret_cast<PFS_cond*> (cond);
2860 assert(static_cast<int> (op) >= 0);
2861 assert(static_cast<uint> (op) < array_elements(cond_operation_map));
2862 assert(state != NULL);
2863 assert(pfs_cond != NULL);
2864 assert(pfs_cond->m_class != NULL);
2865
2866 if (! pfs_cond->m_enabled)
2867 return NULL;
2868
2869 uint flags;
2870 ulonglong timer_start= 0;
2871
2872 if (flag_thread_instrumentation)
2873 {
2874 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
2875 if (unlikely(pfs_thread == NULL))
2876 return NULL;
2877 if (! pfs_thread->m_enabled)
2878 return NULL;
2879 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2880 flags= STATE_FLAG_THREAD;
2881
2882 if (pfs_cond->m_timed)
2883 {
2884 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2885 state->m_timer_start= timer_start;
2886 flags|= STATE_FLAG_TIMED;
2887 }
2888
2889 if (flag_events_waits_current)
2890 {
2891 if (unlikely(pfs_thread->m_events_waits_current >=
2892 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2893 {
2894 locker_lost++;
2895 return NULL;
2896 }
2897 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2898 state->m_wait= wait;
2899 flags|= STATE_FLAG_EVENT;
2900
2901 PFS_events_waits *parent_event= wait - 1;
2902 wait->m_event_type= EVENT_TYPE_WAIT;
2903 wait->m_nesting_event_id= parent_event->m_event_id;
2904 wait->m_nesting_event_type= parent_event->m_event_type;
2905
2906 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
2907 wait->m_class= pfs_cond->m_class;
2908 wait->m_timer_start= timer_start;
2909 wait->m_timer_end= 0;
2910 wait->m_object_instance_addr= pfs_cond->m_identity;
2911 wait->m_event_id= pfs_thread->m_event_id++;
2912 wait->m_end_event_id= 0;
2913 wait->m_operation= cond_operation_map[static_cast<int> (op)];
2914 wait->m_source_file= src_file;
2915 wait->m_source_line= src_line;
2916 wait->m_wait_class= WAIT_CLASS_COND;
2917
2918 pfs_thread->m_events_waits_current++;
2919 }
2920 }
2921 else
2922 {
2923 if (pfs_cond->m_timed)
2924 {
2925 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2926 state->m_timer_start= timer_start;
2927 flags= STATE_FLAG_TIMED;
2928 }
2929 else
2930 {
2931 /*
2932 Complete shortcut.
2933 */
2934 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
2935 pfs_cond->m_cond_stat.m_wait_stat.aggregate_counted();
2936 return NULL;
2937 }
2938 }
2939
2940 state->m_flags= flags;
2941 state->m_cond= cond;
2942 state->m_mutex= mutex;
2943 return reinterpret_cast<PSI_cond_locker*> (state);
2944 }
2945
lock_flags_to_lock_type(uint flags)2946 static inline PFS_TL_LOCK_TYPE lock_flags_to_lock_type(uint flags)
2947 {
2948 enum thr_lock_type value= static_cast<enum thr_lock_type> (flags);
2949
2950 switch (value)
2951 {
2952 case TL_READ:
2953 return PFS_TL_READ;
2954 case TL_READ_WITH_SHARED_LOCKS:
2955 return PFS_TL_READ_WITH_SHARED_LOCKS;
2956 case TL_READ_HIGH_PRIORITY:
2957 return PFS_TL_READ_HIGH_PRIORITY;
2958 case TL_READ_NO_INSERT:
2959 return PFS_TL_READ_NO_INSERT;
2960 case TL_WRITE_ALLOW_WRITE:
2961 return PFS_TL_WRITE_ALLOW_WRITE;
2962 case TL_WRITE_CONCURRENT_INSERT:
2963 return PFS_TL_WRITE_CONCURRENT_INSERT;
2964 case TL_WRITE_LOW_PRIORITY:
2965 return PFS_TL_WRITE_LOW_PRIORITY;
2966 case TL_WRITE:
2967 return PFS_TL_WRITE;
2968
2969 case TL_WRITE_ONLY:
2970 case TL_IGNORE:
2971 case TL_UNLOCK:
2972 case TL_READ_DEFAULT:
2973 case TL_WRITE_DEFAULT:
2974 case TL_WRITE_CONCURRENT_DEFAULT:
2975 default:
2976 assert(false);
2977 }
2978
2979 /* Dead code */
2980 return PFS_TL_READ;
2981 }
2982
external_lock_flags_to_lock_type(uint flags)2983 static inline PFS_TL_LOCK_TYPE external_lock_flags_to_lock_type(uint flags)
2984 {
2985 assert(flags == F_RDLCK || flags == F_WRLCK);
2986 return (flags == F_RDLCK ? PFS_TL_READ_EXTERNAL : PFS_TL_WRITE_EXTERNAL);
2987 }
2988
2989 /**
2990 Implementation of the table instrumentation interface.
2991 @sa PSI_v1::start_table_io_wait_v1
2992 */
2993 PSI_table_locker*
pfs_start_table_io_wait_v1(PSI_table_locker_state * state,PSI_table * table,PSI_table_io_operation op,uint index,const char * src_file,uint src_line)2994 pfs_start_table_io_wait_v1(PSI_table_locker_state *state,
2995 PSI_table *table,
2996 PSI_table_io_operation op,
2997 uint index,
2998 const char *src_file, uint src_line)
2999 {
3000 assert(static_cast<int> (op) >= 0);
3001 assert(static_cast<uint> (op) < array_elements(table_io_operation_map));
3002 assert(state != NULL);
3003 PFS_table *pfs_table= reinterpret_cast<PFS_table*> (table);
3004 assert(pfs_table != NULL);
3005 assert(pfs_table->m_share != NULL);
3006
3007 if (! pfs_table->m_io_enabled)
3008 return NULL;
3009
3010 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3011
3012 uint flags;
3013 ulonglong timer_start= 0;
3014
3015 if (flag_thread_instrumentation)
3016 {
3017 if (pfs_thread == NULL)
3018 return NULL;
3019 if (! pfs_thread->m_enabled)
3020 return NULL;
3021 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3022 flags= STATE_FLAG_THREAD;
3023
3024 if (pfs_table->m_io_timed)
3025 {
3026 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3027 state->m_timer_start= timer_start;
3028 flags|= STATE_FLAG_TIMED;
3029 }
3030
3031 if (flag_events_waits_current)
3032 {
3033 if (unlikely(pfs_thread->m_events_waits_current >=
3034 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3035 {
3036 locker_lost++;
3037 return NULL;
3038 }
3039 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3040 state->m_wait= wait;
3041 flags|= STATE_FLAG_EVENT;
3042
3043 PFS_events_waits *parent_event= wait - 1;
3044 wait->m_event_type= EVENT_TYPE_WAIT;
3045 wait->m_nesting_event_id= parent_event->m_event_id;
3046 wait->m_nesting_event_type= parent_event->m_event_type;
3047
3048 PFS_table_share *share= pfs_table->m_share;
3049 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3050 wait->m_class= &global_table_io_class;
3051 wait->m_timer_start= timer_start;
3052 wait->m_timer_end= 0;
3053 wait->m_object_instance_addr= pfs_table->m_identity;
3054 wait->m_event_id= pfs_thread->m_event_id++;
3055 wait->m_end_event_id= 0;
3056 wait->m_operation= table_io_operation_map[static_cast<int> (op)];
3057 wait->m_flags= 0;
3058 wait->m_object_type= share->get_object_type();
3059 wait->m_weak_table_share= share;
3060 wait->m_weak_version= share->get_version();
3061 wait->m_index= index;
3062 wait->m_source_file= src_file;
3063 wait->m_source_line= src_line;
3064 wait->m_wait_class= WAIT_CLASS_TABLE;
3065
3066 pfs_thread->m_events_waits_current++;
3067 }
3068 }
3069 else
3070 {
3071 if (pfs_table->m_io_timed)
3072 {
3073 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3074 state->m_timer_start= timer_start;
3075 flags= STATE_FLAG_TIMED;
3076 }
3077 else
3078 {
3079 /* TODO: consider a shortcut here */
3080 flags= 0;
3081 }
3082 }
3083
3084 state->m_flags= flags;
3085 state->m_table= table;
3086 state->m_io_operation= op;
3087 state->m_index= index;
3088 return reinterpret_cast<PSI_table_locker*> (state);
3089 }
3090
3091 /**
3092 Implementation of the table instrumentation interface.
3093 @sa PSI_v1::start_table_lock_wait.
3094 */
3095 PSI_table_locker*
pfs_start_table_lock_wait_v1(PSI_table_locker_state * state,PSI_table * table,PSI_table_lock_operation op,ulong op_flags,const char * src_file,uint src_line)3096 pfs_start_table_lock_wait_v1(PSI_table_locker_state *state,
3097 PSI_table *table,
3098 PSI_table_lock_operation op,
3099 ulong op_flags,
3100 const char *src_file, uint src_line)
3101 {
3102 assert(state != NULL);
3103 assert((op == PSI_TABLE_LOCK) || (op == PSI_TABLE_EXTERNAL_LOCK));
3104
3105 PFS_table *pfs_table= reinterpret_cast<PFS_table*> (table);
3106
3107 assert(pfs_table != NULL);
3108 assert(pfs_table->m_share != NULL);
3109
3110 if (! pfs_table->m_lock_enabled)
3111 return NULL;
3112
3113 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3114
3115 PFS_TL_LOCK_TYPE lock_type;
3116
3117 switch (op)
3118 {
3119 case PSI_TABLE_LOCK:
3120 lock_type= lock_flags_to_lock_type(op_flags);
3121 pfs_table->m_internal_lock= lock_type;
3122 break;
3123 case PSI_TABLE_EXTERNAL_LOCK:
3124 /*
3125 See the handler::external_lock() API design,
3126 there is no handler::external_unlock().
3127 */
3128 if (op_flags == F_UNLCK)
3129 {
3130 pfs_table->m_external_lock= PFS_TL_NONE;
3131 return NULL;
3132 }
3133 lock_type= external_lock_flags_to_lock_type(op_flags);
3134 pfs_table->m_external_lock= lock_type;
3135 break;
3136 default:
3137 lock_type= PFS_TL_READ;
3138 assert(false);
3139 }
3140
3141 assert((uint) lock_type < array_elements(table_lock_operation_map));
3142
3143 uint flags;
3144 ulonglong timer_start= 0;
3145
3146 if (flag_thread_instrumentation)
3147 {
3148 if (pfs_thread == NULL)
3149 return NULL;
3150 if (! pfs_thread->m_enabled)
3151 return NULL;
3152 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3153 flags= STATE_FLAG_THREAD;
3154
3155 if (pfs_table->m_lock_timed)
3156 {
3157 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3158 state->m_timer_start= timer_start;
3159 flags|= STATE_FLAG_TIMED;
3160 }
3161
3162 if (flag_events_waits_current)
3163 {
3164 if (unlikely(pfs_thread->m_events_waits_current >=
3165 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3166 {
3167 locker_lost++;
3168 return NULL;
3169 }
3170 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3171 state->m_wait= wait;
3172 flags|= STATE_FLAG_EVENT;
3173
3174 PFS_events_waits *parent_event= wait - 1;
3175 wait->m_event_type= EVENT_TYPE_WAIT;
3176 wait->m_nesting_event_id= parent_event->m_event_id;
3177 wait->m_nesting_event_type= parent_event->m_event_type;
3178
3179 PFS_table_share *share= pfs_table->m_share;
3180 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3181 wait->m_class= &global_table_lock_class;
3182 wait->m_timer_start= timer_start;
3183 wait->m_timer_end= 0;
3184 wait->m_object_instance_addr= pfs_table->m_identity;
3185 wait->m_event_id= pfs_thread->m_event_id++;
3186 wait->m_end_event_id= 0;
3187 wait->m_operation= table_lock_operation_map[lock_type];
3188 wait->m_flags= 0;
3189 wait->m_object_type= share->get_object_type();
3190 wait->m_weak_table_share= share;
3191 wait->m_weak_version= share->get_version();
3192 wait->m_index= 0;
3193 wait->m_source_file= src_file;
3194 wait->m_source_line= src_line;
3195 wait->m_wait_class= WAIT_CLASS_TABLE;
3196
3197 pfs_thread->m_events_waits_current++;
3198 }
3199 }
3200 else
3201 {
3202 if (pfs_table->m_lock_timed)
3203 {
3204 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3205 state->m_timer_start= timer_start;
3206 flags= STATE_FLAG_TIMED;
3207 }
3208 else
3209 {
3210 /* TODO: consider a shortcut here */
3211 flags= 0;
3212 }
3213 }
3214
3215 state->m_flags= flags;
3216 state->m_table= table;
3217 state->m_index= lock_type;
3218 return reinterpret_cast<PSI_table_locker*> (state);
3219 }
3220
3221 /**
3222 Implementation of the file instrumentation interface.
3223 @sa PSI_v1::get_thread_file_name_locker.
3224 */
3225 PSI_file_locker*
pfs_get_thread_file_name_locker_v1(PSI_file_locker_state * state,PSI_file_key key,PSI_file_operation op,const char * name,const void * identity)3226 pfs_get_thread_file_name_locker_v1(PSI_file_locker_state *state,
3227 PSI_file_key key,
3228 PSI_file_operation op,
3229 const char *name, const void *identity)
3230 {
3231 assert(static_cast<int> (op) >= 0);
3232 assert(static_cast<uint> (op) < array_elements(file_operation_map));
3233 assert(state != NULL);
3234
3235 if (! flag_global_instrumentation)
3236 return NULL;
3237 PFS_file_class *klass= find_file_class(key);
3238 if (unlikely(klass == NULL))
3239 return NULL;
3240 if (! klass->m_enabled)
3241 return NULL;
3242
3243 /* Needed for the LF_HASH */
3244 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3245 if (unlikely(pfs_thread == NULL))
3246 return NULL;
3247
3248 if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
3249 return NULL;
3250
3251 uint flags;
3252
3253 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3254 flags= STATE_FLAG_THREAD;
3255
3256 if (klass->m_timed)
3257 flags|= STATE_FLAG_TIMED;
3258
3259 if (flag_events_waits_current)
3260 {
3261 if (unlikely(pfs_thread->m_events_waits_current >=
3262 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3263 {
3264 locker_lost++;
3265 return NULL;
3266 }
3267 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3268 state->m_wait= wait;
3269 flags|= STATE_FLAG_EVENT;
3270
3271 PFS_events_waits *parent_event= wait - 1;
3272 wait->m_event_type= EVENT_TYPE_WAIT;
3273 wait->m_nesting_event_id= parent_event->m_event_id;
3274 wait->m_nesting_event_type= parent_event->m_event_type;
3275
3276 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3277 wait->m_class= klass;
3278 wait->m_timer_start= 0;
3279 wait->m_timer_end= 0;
3280 wait->m_object_instance_addr= NULL;
3281 wait->m_weak_file= NULL;
3282 wait->m_weak_version= 0;
3283 wait->m_event_id= pfs_thread->m_event_id++;
3284 wait->m_end_event_id= 0;
3285 wait->m_operation= file_operation_map[static_cast<int> (op)];
3286 wait->m_wait_class= WAIT_CLASS_FILE;
3287
3288 pfs_thread->m_events_waits_current++;
3289 }
3290
3291 state->m_flags= flags;
3292 state->m_file= NULL;
3293 state->m_name= name;
3294 state->m_class= klass;
3295 state->m_operation= op;
3296 return reinterpret_cast<PSI_file_locker*> (state);
3297 }
3298
3299 /**
3300 Implementation of the file instrumentation interface.
3301 @sa PSI_v1::get_thread_file_stream_locker.
3302 */
3303 PSI_file_locker*
pfs_get_thread_file_stream_locker_v1(PSI_file_locker_state * state,PSI_file * file,PSI_file_operation op)3304 pfs_get_thread_file_stream_locker_v1(PSI_file_locker_state *state,
3305 PSI_file *file, PSI_file_operation op)
3306 {
3307 PFS_file *pfs_file= reinterpret_cast<PFS_file*> (file);
3308 assert(static_cast<int> (op) >= 0);
3309 assert(static_cast<uint> (op) < array_elements(file_operation_map));
3310 assert(state != NULL);
3311
3312 if (unlikely(pfs_file == NULL))
3313 return NULL;
3314 assert(pfs_file->m_class != NULL);
3315 PFS_file_class *klass= pfs_file->m_class;
3316
3317 if (! pfs_file->m_enabled)
3318 return NULL;
3319
3320 /* Needed for the LF_HASH */
3321 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3322 if (unlikely(pfs_thread == NULL))
3323 return NULL;
3324
3325 uint flags;
3326
3327 /* Always populated */
3328 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3329
3330 if (flag_thread_instrumentation)
3331 {
3332 if (! pfs_thread->m_enabled)
3333 return NULL;
3334 flags= STATE_FLAG_THREAD;
3335
3336 if (pfs_file->m_timed)
3337 flags|= STATE_FLAG_TIMED;
3338
3339 if (flag_events_waits_current)
3340 {
3341 if (unlikely(pfs_thread->m_events_waits_current >=
3342 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3343 {
3344 locker_lost++;
3345 return NULL;
3346 }
3347 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3348 state->m_wait= wait;
3349 flags|= STATE_FLAG_EVENT;
3350
3351 PFS_events_waits *parent_event= wait - 1;
3352 wait->m_event_type= EVENT_TYPE_WAIT;
3353 wait->m_nesting_event_id= parent_event->m_event_id;
3354 wait->m_nesting_event_type= parent_event->m_event_type;
3355
3356 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3357 wait->m_class= klass;
3358 wait->m_timer_start= 0;
3359 wait->m_timer_end= 0;
3360 wait->m_object_instance_addr= pfs_file;
3361 wait->m_weak_file= pfs_file;
3362 wait->m_weak_version= pfs_file->get_version();
3363 wait->m_event_id= pfs_thread->m_event_id++;
3364 wait->m_end_event_id= 0;
3365 wait->m_operation= file_operation_map[static_cast<int> (op)];
3366 wait->m_wait_class= WAIT_CLASS_FILE;
3367
3368 pfs_thread->m_events_waits_current++;
3369 }
3370 }
3371 else
3372 {
3373 if (pfs_file->m_timed)
3374 {
3375 flags= STATE_FLAG_TIMED;
3376 }
3377 else
3378 {
3379 /* TODO: consider a shortcut. */
3380 flags= 0;
3381 }
3382 }
3383
3384 state->m_flags= flags;
3385 state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
3386 state->m_operation= op;
3387 state->m_name= NULL;
3388 state->m_class= klass;
3389 return reinterpret_cast<PSI_file_locker*> (state);
3390 }
3391
3392 /**
3393 Implementation of the file instrumentation interface.
3394 @sa PSI_v1::get_thread_file_descriptor_locker.
3395 */
3396 PSI_file_locker*
pfs_get_thread_file_descriptor_locker_v1(PSI_file_locker_state * state,File file,PSI_file_operation op)3397 pfs_get_thread_file_descriptor_locker_v1(PSI_file_locker_state *state,
3398 File file, PSI_file_operation op)
3399 {
3400 int index= static_cast<int> (file);
3401 assert(static_cast<int> (op) >= 0);
3402 assert(static_cast<uint> (op) < array_elements(file_operation_map));
3403 assert(state != NULL);
3404
3405 if (unlikely((index < 0) || (index >= file_handle_max)))
3406 return NULL;
3407
3408 PFS_file *pfs_file= file_handle_array[index];
3409 if (unlikely(pfs_file == NULL))
3410 return NULL;
3411
3412 /*
3413 We are about to close a file by descriptor number,
3414 and the calling code still holds the descriptor.
3415 Cleanup the file descriptor <--> file instrument association.
3416 Remove the instrumentation *before* the close to avoid race
3417 conditions with another thread opening a file
3418 (that could be given the same descriptor).
3419 */
3420 if (op == PSI_FILE_CLOSE)
3421 file_handle_array[index]= NULL;
3422
3423 if (! pfs_file->m_enabled)
3424 return NULL;
3425
3426 assert(pfs_file->m_class != NULL);
3427 PFS_file_class *klass= pfs_file->m_class;
3428
3429 /* Needed for the LF_HASH */
3430 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3431 if (unlikely(pfs_thread == NULL))
3432 return NULL;
3433
3434 uint flags;
3435
3436 /* Always populated */
3437 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3438
3439 if (flag_thread_instrumentation)
3440 {
3441 if (! pfs_thread->m_enabled)
3442 return NULL;
3443 flags= STATE_FLAG_THREAD;
3444
3445 if (pfs_file->m_timed)
3446 flags|= STATE_FLAG_TIMED;
3447
3448 if (flag_events_waits_current)
3449 {
3450 if (unlikely(pfs_thread->m_events_waits_current >=
3451 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3452 {
3453 locker_lost++;
3454 return NULL;
3455 }
3456 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3457 state->m_wait= wait;
3458 flags|= STATE_FLAG_EVENT;
3459
3460 PFS_events_waits *parent_event= wait - 1;
3461 wait->m_event_type= EVENT_TYPE_WAIT;
3462 wait->m_nesting_event_id= parent_event->m_event_id;
3463 wait->m_nesting_event_type= parent_event->m_event_type;
3464
3465 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3466 wait->m_class= klass;
3467 wait->m_timer_start= 0;
3468 wait->m_timer_end= 0;
3469 wait->m_object_instance_addr= pfs_file;
3470 wait->m_weak_file= pfs_file;
3471 wait->m_weak_version= pfs_file->get_version();
3472 wait->m_event_id= pfs_thread->m_event_id++;
3473 wait->m_end_event_id= 0;
3474 wait->m_operation= file_operation_map[static_cast<int> (op)];
3475 wait->m_wait_class= WAIT_CLASS_FILE;
3476
3477 pfs_thread->m_events_waits_current++;
3478 }
3479 }
3480 else
3481 {
3482 if (pfs_file->m_timed)
3483 {
3484 flags= STATE_FLAG_TIMED;
3485 }
3486 else
3487 {
3488 /* TODO: consider a shortcut. */
3489 flags= 0;
3490 }
3491 }
3492
3493 state->m_flags= flags;
3494 state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
3495 state->m_operation= op;
3496 state->m_name= NULL;
3497 state->m_class= klass;
3498 return reinterpret_cast<PSI_file_locker*> (state);
3499 }
3500
3501 /** Socket locker */
3502
3503 PSI_socket_locker*
pfs_start_socket_wait_v1(PSI_socket_locker_state * state,PSI_socket * socket,PSI_socket_operation op,size_t count,const char * src_file,uint src_line)3504 pfs_start_socket_wait_v1(PSI_socket_locker_state *state,
3505 PSI_socket *socket,
3506 PSI_socket_operation op,
3507 size_t count,
3508 const char *src_file, uint src_line)
3509 {
3510 assert(static_cast<int> (op) >= 0);
3511 assert(static_cast<uint> (op) < array_elements(socket_operation_map));
3512 assert(state != NULL);
3513 PFS_socket *pfs_socket= reinterpret_cast<PFS_socket*> (socket);
3514
3515 assert(pfs_socket != NULL);
3516 assert(pfs_socket->m_class != NULL);
3517
3518 if (!pfs_socket->m_enabled || pfs_socket->m_idle)
3519 return NULL;
3520
3521 uint flags= 0;
3522 ulonglong timer_start= 0;
3523
3524 if (flag_thread_instrumentation)
3525 {
3526 /*
3527 Do not use pfs_socket->m_thread_owner here,
3528 as different threads may use concurrently the same socket,
3529 for example during a KILL.
3530 */
3531 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3532
3533 if (unlikely(pfs_thread == NULL))
3534 return NULL;
3535
3536 if (!pfs_thread->m_enabled)
3537 return NULL;
3538
3539 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3540 flags= STATE_FLAG_THREAD;
3541
3542 if (pfs_socket->m_timed)
3543 {
3544 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3545 state->m_timer_start= timer_start;
3546 flags|= STATE_FLAG_TIMED;
3547 }
3548
3549 if (flag_events_waits_current)
3550 {
3551 if (unlikely(pfs_thread->m_events_waits_current >=
3552 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3553 {
3554 locker_lost++;
3555 return NULL;
3556 }
3557 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3558 state->m_wait= wait;
3559 flags|= STATE_FLAG_EVENT;
3560
3561 PFS_events_waits *parent_event= wait - 1;
3562 wait->m_event_type= EVENT_TYPE_WAIT;
3563 wait->m_nesting_event_id= parent_event->m_event_id;
3564 wait->m_nesting_event_type= parent_event->m_event_type;
3565 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3566 wait->m_class= pfs_socket->m_class;
3567 wait->m_timer_start= timer_start;
3568 wait->m_timer_end= 0;
3569 wait->m_object_instance_addr= pfs_socket->m_identity;
3570 wait->m_weak_socket= pfs_socket;
3571 wait->m_weak_version= pfs_socket->get_version();
3572 wait->m_event_id= pfs_thread->m_event_id++;
3573 wait->m_end_event_id= 0;
3574 wait->m_operation= socket_operation_map[static_cast<int>(op)];
3575 wait->m_source_file= src_file;
3576 wait->m_source_line= src_line;
3577 wait->m_number_of_bytes= count;
3578 wait->m_wait_class= WAIT_CLASS_SOCKET;
3579
3580 pfs_thread->m_events_waits_current++;
3581 }
3582 }
3583 else
3584 {
3585 if (pfs_socket->m_timed)
3586 {
3587 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3588 state->m_timer_start= timer_start;
3589 flags= STATE_FLAG_TIMED;
3590 }
3591 else
3592 {
3593 /*
3594 Even if timing is disabled, end_socket_wait() still needs a locker to
3595 capture the number of bytes sent or received by the socket operation.
3596 For operations that do not have a byte count, then just increment the
3597 event counter and return a NULL locker.
3598 */
3599 switch (op)
3600 {
3601 case PSI_SOCKET_CONNECT:
3602 case PSI_SOCKET_CREATE:
3603 case PSI_SOCKET_BIND:
3604 case PSI_SOCKET_SEEK:
3605 case PSI_SOCKET_OPT:
3606 case PSI_SOCKET_STAT:
3607 case PSI_SOCKET_SHUTDOWN:
3608 case PSI_SOCKET_CLOSE:
3609 case PSI_SOCKET_SELECT:
3610 pfs_socket->m_socket_stat.m_io_stat.m_misc.aggregate_counted();
3611 return NULL;
3612 default:
3613 break;
3614 }
3615 }
3616 }
3617
3618 state->m_flags= flags;
3619 state->m_socket= socket;
3620 state->m_operation= op;
3621 return reinterpret_cast<PSI_socket_locker*> (state);
3622 }
3623
3624 /**
3625 Implementation of the mutex instrumentation interface.
3626 @sa PSI_v1::unlock_mutex.
3627 */
pfs_unlock_mutex_v1(PSI_mutex * mutex)3628 void pfs_unlock_mutex_v1(PSI_mutex *mutex)
3629 {
3630 PFS_mutex *pfs_mutex= reinterpret_cast<PFS_mutex*> (mutex);
3631
3632 assert(pfs_mutex != NULL);
3633
3634 /*
3635 Note that this code is still protected by the instrumented mutex,
3636 and therefore is thread safe. See inline_mysql_mutex_unlock().
3637 */
3638
3639 /* Always update the instrumented state */
3640 pfs_mutex->m_owner= NULL;
3641 pfs_mutex->m_last_locked= 0;
3642
3643 #ifdef LATER_WL2333
3644 /*
3645 See WL#2333: SHOW ENGINE ... LOCK STATUS.
3646 PFS_mutex::m_lock_stat is not exposed in user visible tables
3647 currently, so there is no point spending time computing it.
3648 */
3649 if (! pfs_mutex->m_enabled)
3650 return;
3651
3652 if (! pfs_mutex->m_timed)
3653 return;
3654
3655 ulonglong locked_time;
3656 locked_time= get_timer_pico_value(wait_timer) - pfs_mutex->m_last_locked;
3657 pfs_mutex->m_mutex_stat.m_lock_stat.aggregate_value(locked_time);
3658 #endif
3659 }
3660
3661 /**
3662 Implementation of the rwlock instrumentation interface.
3663 @sa PSI_v1::unlock_rwlock.
3664 */
pfs_unlock_rwlock_v1(PSI_rwlock * rwlock)3665 void pfs_unlock_rwlock_v1(PSI_rwlock *rwlock)
3666 {
3667 PFS_rwlock *pfs_rwlock= reinterpret_cast<PFS_rwlock*> (rwlock);
3668 assert(pfs_rwlock != NULL);
3669 assert(pfs_rwlock == sanitize_rwlock(pfs_rwlock));
3670 assert(pfs_rwlock->m_class != NULL);
3671 assert(pfs_rwlock->m_lock.is_populated());
3672
3673 bool last_writer= false;
3674 bool last_reader= false;
3675
3676 /*
3677 Note that this code is still protected by the instrumented rwlock,
3678 and therefore is:
3679 - thread safe for write locks
3680 - almost thread safe for read locks (pfs_rwlock->m_readers is unsafe).
3681 See inline_mysql_rwlock_unlock()
3682 */
3683
3684 /* Always update the instrumented state */
3685 if (pfs_rwlock->m_writer != NULL)
3686 {
3687 /* Nominal case, a writer is unlocking. */
3688 last_writer= true;
3689 pfs_rwlock->m_writer= NULL;
3690 /* Reset the readers stats, they could be off */
3691 pfs_rwlock->m_readers= 0;
3692 }
3693 else if (likely(pfs_rwlock->m_readers > 0))
3694 {
3695 /* Nominal case, a reader is unlocking. */
3696 if (--(pfs_rwlock->m_readers) == 0)
3697 last_reader= true;
3698 }
3699 else
3700 {
3701 /*
3702 Edge case, we have no writer and no readers,
3703 on an unlock event.
3704 This is possible for:
3705 - partial instrumentation
3706 - instrumentation disabled at runtime,
3707 see when get_thread_rwlock_locker_v1() returns NULL
3708 No further action is taken here, the next
3709 write lock will put the statistics is a valid state.
3710 */
3711 }
3712
3713 #ifdef LATER_WL2333
3714 /* See WL#2333: SHOW ENGINE ... LOCK STATUS. */
3715
3716 if (! pfs_rwlock->m_enabled)
3717 return;
3718
3719 if (! pfs_rwlock->m_timed)
3720 return;
3721
3722 ulonglong locked_time;
3723 if (last_writer)
3724 {
3725 locked_time= get_timer_pico_value(wait_timer) - pfs_rwlock->m_last_written;
3726 pfs_rwlock->m_rwlock_stat.m_write_lock_stat.aggregate_value(locked_time);
3727 }
3728 else if (last_reader)
3729 {
3730 locked_time= get_timer_pico_value(wait_timer) - pfs_rwlock->m_last_read;
3731 pfs_rwlock->m_rwlock_stat.m_read_lock_stat.aggregate_value(locked_time);
3732 }
3733 #else
3734 (void) last_reader;
3735 (void) last_writer;
3736 #endif
3737 }
3738
3739 /**
3740 Implementation of the cond instrumentation interface.
3741 @sa PSI_v1::signal_cond.
3742 */
pfs_signal_cond_v1(PSI_cond * cond)3743 void pfs_signal_cond_v1(PSI_cond* cond)
3744 {
3745 #ifdef PFS_LATER
3746 PFS_cond *pfs_cond= reinterpret_cast<PFS_cond*> (cond);
3747
3748 assert(pfs_cond != NULL);
3749
3750 pfs_cond->m_cond_stat.m_signal_count++;
3751 #endif
3752 }
3753
3754 /**
3755 Implementation of the cond instrumentation interface.
3756 @sa PSI_v1::broadcast_cond.
3757 */
pfs_broadcast_cond_v1(PSI_cond * cond)3758 void pfs_broadcast_cond_v1(PSI_cond* cond)
3759 {
3760 #ifdef PFS_LATER
3761 PFS_cond *pfs_cond= reinterpret_cast<PFS_cond*> (cond);
3762
3763 assert(pfs_cond != NULL);
3764
3765 pfs_cond->m_cond_stat.m_broadcast_count++;
3766 #endif
3767 }
3768
3769 /**
3770 Implementation of the idle instrumentation interface.
3771 @sa PSI_v1::start_idle_wait.
3772 */
3773 PSI_idle_locker*
pfs_start_idle_wait_v1(PSI_idle_locker_state * state,const char * src_file,uint src_line)3774 pfs_start_idle_wait_v1(PSI_idle_locker_state* state, const char *src_file, uint src_line)
3775 {
3776 assert(state != NULL);
3777
3778 if (!flag_global_instrumentation)
3779 return NULL;
3780
3781 if (!global_idle_class.m_enabled)
3782 return NULL;
3783
3784 uint flags= 0;
3785 ulonglong timer_start= 0;
3786
3787 if (flag_thread_instrumentation)
3788 {
3789 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3790 if (unlikely(pfs_thread == NULL))
3791 return NULL;
3792 if (!pfs_thread->m_enabled)
3793 return NULL;
3794 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3795 flags= STATE_FLAG_THREAD;
3796
3797 assert(pfs_thread->m_events_statements_count == 0);
3798
3799 if (global_idle_class.m_timed)
3800 {
3801 timer_start= get_timer_raw_value_and_function(idle_timer, &state->m_timer);
3802 state->m_timer_start= timer_start;
3803 flags|= STATE_FLAG_TIMED;
3804 }
3805
3806 if (flag_events_waits_current)
3807 {
3808 if (unlikely(pfs_thread->m_events_waits_current >=
3809 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3810 {
3811 locker_lost++;
3812 return NULL;
3813 }
3814 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3815 state->m_wait= wait;
3816 flags|= STATE_FLAG_EVENT;
3817
3818 wait->m_event_type= EVENT_TYPE_WAIT;
3819 /*
3820 IDLE events are waits, but by definition we know that
3821 such waits happen outside of any STAGE and STATEMENT,
3822 so they have no parents.
3823 */
3824 wait->m_nesting_event_id= 0;
3825 /* no need to set wait->m_nesting_event_type */
3826
3827 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3828 wait->m_class= &global_idle_class;
3829 wait->m_timer_start= timer_start;
3830 wait->m_timer_end= 0;
3831 wait->m_event_id= pfs_thread->m_event_id++;
3832 wait->m_end_event_id= 0;
3833 wait->m_operation= OPERATION_TYPE_IDLE;
3834 wait->m_source_file= src_file;
3835 wait->m_source_line= src_line;
3836 wait->m_wait_class= WAIT_CLASS_IDLE;
3837
3838 pfs_thread->m_events_waits_current++;
3839 }
3840 }
3841 else
3842 {
3843 if (global_idle_class.m_timed)
3844 {
3845 timer_start= get_timer_raw_value_and_function(idle_timer, &state->m_timer);
3846 state->m_timer_start= timer_start;
3847 flags= STATE_FLAG_TIMED;
3848 }
3849 }
3850
3851 state->m_flags= flags;
3852 return reinterpret_cast<PSI_idle_locker*> (state);
3853 }
3854
3855 /**
3856 Implementation of the mutex instrumentation interface.
3857 @sa PSI_v1::end_idle_wait.
3858 */
pfs_end_idle_wait_v1(PSI_idle_locker * locker)3859 void pfs_end_idle_wait_v1(PSI_idle_locker* locker)
3860 {
3861 PSI_idle_locker_state *state= reinterpret_cast<PSI_idle_locker_state*> (locker);
3862 assert(state != NULL);
3863 ulonglong timer_end= 0;
3864 ulonglong wait_time= 0;
3865
3866 uint flags= state->m_flags;
3867
3868 if (flags & STATE_FLAG_TIMED)
3869 {
3870 timer_end= state->m_timer();
3871 wait_time= timer_end - state->m_timer_start;
3872 }
3873
3874 if (flags & STATE_FLAG_THREAD)
3875 {
3876 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3877 PFS_single_stat *event_name_array;
3878 event_name_array= thread->write_instr_class_waits_stats();
3879
3880 if (flags & STATE_FLAG_TIMED)
3881 {
3882 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
3883 event_name_array[GLOBAL_IDLE_EVENT_INDEX].aggregate_value(wait_time);
3884 }
3885 else
3886 {
3887 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
3888 event_name_array[GLOBAL_IDLE_EVENT_INDEX].aggregate_counted();
3889 }
3890
3891 if (flags & STATE_FLAG_EVENT)
3892 {
3893 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3894 assert(wait != NULL);
3895
3896 wait->m_timer_end= timer_end;
3897 wait->m_end_event_id= thread->m_event_id;
3898 if (thread->m_flag_events_waits_history)
3899 insert_events_waits_history(thread, wait);
3900 if (thread->m_flag_events_waits_history_long)
3901 insert_events_waits_history_long(wait);
3902 thread->m_events_waits_current--;
3903
3904 assert(wait == thread->m_events_waits_current);
3905 }
3906 }
3907
3908 if (flags & STATE_FLAG_TIMED)
3909 {
3910 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (timed) */
3911 global_idle_stat.aggregate_value(wait_time);
3912 }
3913 else
3914 {
3915 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (counted) */
3916 global_idle_stat.aggregate_counted();
3917 }
3918 }
3919
3920 /**
3921 Implementation of the mutex instrumentation interface.
3922 @sa PSI_v1::end_mutex_wait.
3923 */
pfs_end_mutex_wait_v1(PSI_mutex_locker * locker,int rc)3924 void pfs_end_mutex_wait_v1(PSI_mutex_locker* locker, int rc)
3925 {
3926 PSI_mutex_locker_state *state= reinterpret_cast<PSI_mutex_locker_state*> (locker);
3927 assert(state != NULL);
3928
3929 ulonglong timer_end= 0;
3930 ulonglong wait_time= 0;
3931
3932 PFS_mutex *mutex= reinterpret_cast<PFS_mutex *> (state->m_mutex);
3933 assert(mutex != NULL);
3934 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3935
3936 uint flags= state->m_flags;
3937
3938 if (flags & STATE_FLAG_TIMED)
3939 {
3940 timer_end= state->m_timer();
3941 wait_time= timer_end - state->m_timer_start;
3942 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
3943 mutex->m_mutex_stat.m_wait_stat.aggregate_value(wait_time);
3944 }
3945 else
3946 {
3947 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
3948 mutex->m_mutex_stat.m_wait_stat.aggregate_counted();
3949 }
3950
3951 if (likely(rc == 0))
3952 {
3953 mutex->m_owner= thread;
3954 mutex->m_last_locked= timer_end;
3955 }
3956
3957 if (flags & STATE_FLAG_THREAD)
3958 {
3959 PFS_single_stat *event_name_array;
3960 event_name_array= thread->write_instr_class_waits_stats();
3961 uint index= mutex->m_class->m_event_name_index;
3962
3963 assert(index <= wait_class_max);
3964 assert(sanitize_thread(thread) != NULL);
3965
3966 if (flags & STATE_FLAG_TIMED)
3967 {
3968 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
3969 event_name_array[index].aggregate_value(wait_time);
3970 }
3971 else
3972 {
3973 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
3974 event_name_array[index].aggregate_counted();
3975 }
3976
3977 if (flags & STATE_FLAG_EVENT)
3978 {
3979 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3980 assert(wait != NULL);
3981
3982 wait->m_timer_end= timer_end;
3983 wait->m_end_event_id= thread->m_event_id;
3984 if (thread->m_flag_events_waits_history)
3985 insert_events_waits_history(thread, wait);
3986 if (thread->m_flag_events_waits_history_long)
3987 insert_events_waits_history_long(wait);
3988 thread->m_events_waits_current--;
3989
3990 assert(wait == thread->m_events_waits_current);
3991 }
3992 }
3993 }
3994
3995 /**
3996 Implementation of the rwlock instrumentation interface.
3997 @sa PSI_v1::end_rwlock_rdwait.
3998 */
pfs_end_rwlock_rdwait_v1(PSI_rwlock_locker * locker,int rc)3999 void pfs_end_rwlock_rdwait_v1(PSI_rwlock_locker* locker, int rc)
4000 {
4001 PSI_rwlock_locker_state *state= reinterpret_cast<PSI_rwlock_locker_state*> (locker);
4002 assert(state != NULL);
4003
4004 ulonglong timer_end= 0;
4005 ulonglong wait_time= 0;
4006
4007 PFS_rwlock *rwlock= reinterpret_cast<PFS_rwlock *> (state->m_rwlock);
4008 assert(rwlock != NULL);
4009
4010 if (state->m_flags & STATE_FLAG_TIMED)
4011 {
4012 timer_end= state->m_timer();
4013 wait_time= timer_end - state->m_timer_start;
4014 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
4015 rwlock->m_rwlock_stat.m_wait_stat.aggregate_value(wait_time);
4016 }
4017 else
4018 {
4019 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
4020 rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
4021 }
4022
4023 if (rc == 0)
4024 {
4025 /*
4026 Warning:
4027 Multiple threads can execute this section concurrently
4028 (since multiple readers can execute in parallel).
4029 The statistics generated are not safe, which is why they are
4030 just statistics, not facts.
4031 */
4032 if (rwlock->m_readers == 0)
4033 rwlock->m_last_read= timer_end;
4034 rwlock->m_writer= NULL;
4035 rwlock->m_readers++;
4036 }
4037
4038 if (state->m_flags & STATE_FLAG_THREAD)
4039 {
4040 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4041 assert(thread != NULL);
4042
4043 PFS_single_stat *event_name_array;
4044 event_name_array= thread->write_instr_class_waits_stats();
4045 uint index= rwlock->m_class->m_event_name_index;
4046
4047 if (state->m_flags & STATE_FLAG_TIMED)
4048 {
4049 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4050 event_name_array[index].aggregate_value(wait_time);
4051 }
4052 else
4053 {
4054 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4055 event_name_array[index].aggregate_counted();
4056 }
4057
4058 if (state->m_flags & STATE_FLAG_EVENT)
4059 {
4060 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4061 assert(wait != NULL);
4062
4063 wait->m_timer_end= timer_end;
4064 wait->m_end_event_id= thread->m_event_id;
4065 if (thread->m_flag_events_waits_history)
4066 insert_events_waits_history(thread, wait);
4067 if (thread->m_flag_events_waits_history_long)
4068 insert_events_waits_history_long(wait);
4069 thread->m_events_waits_current--;
4070
4071 assert(wait == thread->m_events_waits_current);
4072 }
4073 }
4074 }
4075
4076 /**
4077 Implementation of the rwlock instrumentation interface.
4078 @sa PSI_v1::end_rwlock_wrwait.
4079 */
pfs_end_rwlock_wrwait_v1(PSI_rwlock_locker * locker,int rc)4080 void pfs_end_rwlock_wrwait_v1(PSI_rwlock_locker* locker, int rc)
4081 {
4082 PSI_rwlock_locker_state *state= reinterpret_cast<PSI_rwlock_locker_state*> (locker);
4083 assert(state != NULL);
4084
4085 ulonglong timer_end= 0;
4086 ulonglong wait_time= 0;
4087
4088 PFS_rwlock *rwlock= reinterpret_cast<PFS_rwlock *> (state->m_rwlock);
4089 assert(rwlock != NULL);
4090 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4091
4092 if (state->m_flags & STATE_FLAG_TIMED)
4093 {
4094 timer_end= state->m_timer();
4095 wait_time= timer_end - state->m_timer_start;
4096 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
4097 rwlock->m_rwlock_stat.m_wait_stat.aggregate_value(wait_time);
4098 }
4099 else
4100 {
4101 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
4102 rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
4103 }
4104
4105 if (likely(rc == 0))
4106 {
4107 /* Thread safe : we are protected by the instrumented rwlock */
4108 rwlock->m_writer= thread;
4109 rwlock->m_last_written= timer_end;
4110
4111 if ((state->m_operation != PSI_RWLOCK_SHAREDEXCLUSIVELOCK) &&
4112 (state->m_operation != PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK))
4113 {
4114 /* Reset the readers stats, they could be off */
4115 rwlock->m_readers= 0;
4116 rwlock->m_last_read= 0;
4117 }
4118 }
4119
4120 if (state->m_flags & STATE_FLAG_THREAD)
4121 {
4122 PFS_single_stat *event_name_array;
4123 event_name_array= thread->write_instr_class_waits_stats();
4124 uint index= rwlock->m_class->m_event_name_index;
4125
4126 if (state->m_flags & STATE_FLAG_TIMED)
4127 {
4128 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4129 event_name_array[index].aggregate_value(wait_time);
4130 }
4131 else
4132 {
4133 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4134 event_name_array[index].aggregate_counted();
4135 }
4136
4137 if (state->m_flags & STATE_FLAG_EVENT)
4138 {
4139 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4140 assert(wait != NULL);
4141
4142 wait->m_timer_end= timer_end;
4143 wait->m_end_event_id= thread->m_event_id;
4144 if (thread->m_flag_events_waits_history)
4145 insert_events_waits_history(thread, wait);
4146 if (thread->m_flag_events_waits_history_long)
4147 insert_events_waits_history_long(wait);
4148 thread->m_events_waits_current--;
4149
4150 assert(wait == thread->m_events_waits_current);
4151 }
4152 }
4153 }
4154
4155 /**
4156 Implementation of the cond instrumentation interface.
4157 @sa PSI_v1::end_cond_wait.
4158 */
pfs_end_cond_wait_v1(PSI_cond_locker * locker,int rc)4159 void pfs_end_cond_wait_v1(PSI_cond_locker* locker, int rc)
4160 {
4161 PSI_cond_locker_state *state= reinterpret_cast<PSI_cond_locker_state*> (locker);
4162 assert(state != NULL);
4163
4164 ulonglong timer_end= 0;
4165 ulonglong wait_time= 0;
4166
4167 PFS_cond *cond= reinterpret_cast<PFS_cond *> (state->m_cond);
4168 /* PFS_mutex *mutex= reinterpret_cast<PFS_mutex *> (state->m_mutex); */
4169
4170 if (state->m_flags & STATE_FLAG_TIMED)
4171 {
4172 timer_end= state->m_timer();
4173 wait_time= timer_end - state->m_timer_start;
4174 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
4175 cond->m_cond_stat.m_wait_stat.aggregate_value(wait_time);
4176 }
4177 else
4178 {
4179 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
4180 cond->m_cond_stat.m_wait_stat.aggregate_counted();
4181 }
4182
4183 if (state->m_flags & STATE_FLAG_THREAD)
4184 {
4185 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4186 assert(thread != NULL);
4187
4188 PFS_single_stat *event_name_array;
4189 event_name_array= thread->write_instr_class_waits_stats();
4190 uint index= cond->m_class->m_event_name_index;
4191
4192 if (state->m_flags & STATE_FLAG_TIMED)
4193 {
4194 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4195 event_name_array[index].aggregate_value(wait_time);
4196 }
4197 else
4198 {
4199 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4200 event_name_array[index].aggregate_counted();
4201 }
4202
4203 if (state->m_flags & STATE_FLAG_EVENT)
4204 {
4205 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4206 assert(wait != NULL);
4207
4208 wait->m_timer_end= timer_end;
4209 wait->m_end_event_id= thread->m_event_id;
4210 if (thread->m_flag_events_waits_history)
4211 insert_events_waits_history(thread, wait);
4212 if (thread->m_flag_events_waits_history_long)
4213 insert_events_waits_history_long(wait);
4214 thread->m_events_waits_current--;
4215
4216 assert(wait == thread->m_events_waits_current);
4217 }
4218 }
4219 }
4220
4221 /**
4222 Implementation of the table instrumentation interface.
4223 @sa PSI_v1::end_table_io_wait.
4224 */
pfs_end_table_io_wait_v1(PSI_table_locker * locker,ulonglong numrows)4225 void pfs_end_table_io_wait_v1(PSI_table_locker* locker, ulonglong numrows)
4226 {
4227 PSI_table_locker_state *state= reinterpret_cast<PSI_table_locker_state*> (locker);
4228 assert(state != NULL);
4229
4230 ulonglong timer_end= 0;
4231 ulonglong wait_time= 0;
4232
4233 PFS_table *table= reinterpret_cast<PFS_table *> (state->m_table);
4234 assert(table != NULL);
4235
4236 PFS_single_stat *stat;
4237 PFS_table_io_stat *table_io_stat;
4238
4239 assert((state->m_index < table->m_share->m_key_count) ||
4240 (state->m_index == MAX_INDEXES));
4241
4242 table_io_stat= & table->m_table_stat.m_index_stat[state->m_index];
4243 table_io_stat->m_has_data= true;
4244
4245 switch (state->m_io_operation)
4246 {
4247 case PSI_TABLE_FETCH_ROW:
4248 stat= & table_io_stat->m_fetch;
4249 break;
4250 case PSI_TABLE_WRITE_ROW:
4251 stat= & table_io_stat->m_insert;
4252 break;
4253 case PSI_TABLE_UPDATE_ROW:
4254 stat= & table_io_stat->m_update;
4255 break;
4256 case PSI_TABLE_DELETE_ROW:
4257 stat= & table_io_stat->m_delete;
4258 break;
4259 default:
4260 assert(false);
4261 stat= NULL;
4262 break;
4263 }
4264
4265 uint flags= state->m_flags;
4266
4267 if (flags & STATE_FLAG_TIMED)
4268 {
4269 timer_end= state->m_timer();
4270 wait_time= timer_end - state->m_timer_start;
4271 stat->aggregate_many_value(wait_time, numrows);
4272 }
4273 else
4274 {
4275 stat->aggregate_counted(numrows);
4276 }
4277
4278 if (flags & STATE_FLAG_THREAD)
4279 {
4280 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4281 assert(thread != NULL);
4282
4283 PFS_single_stat *event_name_array;
4284 event_name_array= thread->write_instr_class_waits_stats();
4285
4286 /*
4287 Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
4288 (for wait/io/table/sql/handler)
4289 */
4290 if (flags & STATE_FLAG_TIMED)
4291 {
4292 event_name_array[GLOBAL_TABLE_IO_EVENT_INDEX].aggregate_many_value(wait_time, numrows);
4293 }
4294 else
4295 {
4296 event_name_array[GLOBAL_TABLE_IO_EVENT_INDEX].aggregate_counted(numrows);
4297 }
4298
4299 if (flags & STATE_FLAG_EVENT)
4300 {
4301 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4302 assert(wait != NULL);
4303
4304 wait->m_timer_end= timer_end;
4305 wait->m_end_event_id= thread->m_event_id;
4306 wait->m_number_of_bytes= static_cast<size_t>(numrows);
4307 if (thread->m_flag_events_waits_history)
4308 insert_events_waits_history(thread, wait);
4309 if (thread->m_flag_events_waits_history_long)
4310 insert_events_waits_history_long(wait);
4311 thread->m_events_waits_current--;
4312
4313 assert(wait == thread->m_events_waits_current);
4314 }
4315 }
4316
4317 table->m_has_io_stats= true;
4318 }
4319
4320 /**
4321 Implementation of the table instrumentation interface.
4322 @sa PSI_v1::end_table_lock_wait.
4323 */
pfs_end_table_lock_wait_v1(PSI_table_locker * locker)4324 void pfs_end_table_lock_wait_v1(PSI_table_locker* locker)
4325 {
4326 PSI_table_locker_state *state= reinterpret_cast<PSI_table_locker_state*> (locker);
4327 assert(state != NULL);
4328
4329 ulonglong timer_end= 0;
4330 ulonglong wait_time= 0;
4331
4332 PFS_table *table= reinterpret_cast<PFS_table *> (state->m_table);
4333 assert(table != NULL);
4334
4335 PFS_single_stat *stat= & table->m_table_stat.m_lock_stat.m_stat[state->m_index];
4336
4337 uint flags= state->m_flags;
4338
4339 if (flags & STATE_FLAG_TIMED)
4340 {
4341 timer_end= state->m_timer();
4342 wait_time= timer_end - state->m_timer_start;
4343 stat->aggregate_value(wait_time);
4344 }
4345 else
4346 {
4347 stat->aggregate_counted();
4348 }
4349
4350 if (flags & STATE_FLAG_THREAD)
4351 {
4352 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4353 assert(thread != NULL);
4354
4355 PFS_single_stat *event_name_array;
4356 event_name_array= thread->write_instr_class_waits_stats();
4357
4358 /*
4359 Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
4360 (for wait/lock/table/sql/handler)
4361 */
4362 if (flags & STATE_FLAG_TIMED)
4363 {
4364 event_name_array[GLOBAL_TABLE_LOCK_EVENT_INDEX].aggregate_value(wait_time);
4365 }
4366 else
4367 {
4368 event_name_array[GLOBAL_TABLE_LOCK_EVENT_INDEX].aggregate_counted();
4369 }
4370
4371 if (flags & STATE_FLAG_EVENT)
4372 {
4373 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4374 assert(wait != NULL);
4375
4376 wait->m_timer_end= timer_end;
4377 wait->m_end_event_id= thread->m_event_id;
4378 if (thread->m_flag_events_waits_history)
4379 insert_events_waits_history(thread, wait);
4380 if (thread->m_flag_events_waits_history_long)
4381 insert_events_waits_history_long(wait);
4382 thread->m_events_waits_current--;
4383
4384 assert(wait == thread->m_events_waits_current);
4385 }
4386 }
4387
4388 table->m_has_lock_stats= true;
4389 }
4390
4391 void pfs_start_file_wait_v1(PSI_file_locker *locker,
4392 size_t count,
4393 const char *src_file,
4394 uint src_line);
4395
4396 void pfs_end_file_wait_v1(PSI_file_locker *locker,
4397 size_t count);
4398
4399 /**
4400 Implementation of the file instrumentation interface.
4401 @sa PSI_v1::start_file_open_wait.
4402 */
pfs_start_file_open_wait_v1(PSI_file_locker * locker,const char * src_file,uint src_line)4403 void pfs_start_file_open_wait_v1(PSI_file_locker *locker,
4404 const char *src_file,
4405 uint src_line)
4406 {
4407 pfs_start_file_wait_v1(locker, 0, src_file, src_line);
4408
4409 return;
4410 }
4411
4412 /**
4413 Implementation of the file instrumentation interface.
4414 @sa PSI_v1::end_file_open_wait.
4415 */
4416 PSI_file*
pfs_end_file_open_wait_v1(PSI_file_locker * locker,void * result)4417 pfs_end_file_open_wait_v1(PSI_file_locker *locker,
4418 void *result)
4419 {
4420 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4421 assert(state != NULL);
4422
4423 switch (state->m_operation)
4424 {
4425 case PSI_FILE_STAT:
4426 case PSI_FILE_RENAME:
4427 break;
4428 case PSI_FILE_STREAM_OPEN:
4429 case PSI_FILE_CREATE:
4430 case PSI_FILE_OPEN:
4431 if (result != NULL)
4432 {
4433 PFS_file_class *klass= reinterpret_cast<PFS_file_class*> (state->m_class);
4434 PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
4435 const char *name= state->m_name;
4436 uint len= (uint)strlen(name);
4437 PFS_file *pfs_file= find_or_create_file(thread, klass, name, len, true);
4438 state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
4439 }
4440 break;
4441 default:
4442 assert(false);
4443 break;
4444 }
4445
4446 pfs_end_file_wait_v1(locker, 0);
4447
4448 return state->m_file;
4449 }
4450
4451 /**
4452 Implementation of the file instrumentation interface.
4453 @sa PSI_v1::end_file_open_wait_and_bind_to_descriptor.
4454 */
pfs_end_file_open_wait_and_bind_to_descriptor_v1(PSI_file_locker * locker,File file)4455 void pfs_end_file_open_wait_and_bind_to_descriptor_v1
4456 (PSI_file_locker *locker, File file)
4457 {
4458 PFS_file *pfs_file= NULL;
4459 int index= (int) file;
4460 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4461 assert(state != NULL);
4462
4463 if (index >= 0)
4464 {
4465 PFS_file_class *klass= reinterpret_cast<PFS_file_class*> (state->m_class);
4466 PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
4467 const char *name= state->m_name;
4468 uint len= (uint)strlen(name);
4469 pfs_file= find_or_create_file(thread, klass, name, len, true);
4470 state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
4471 }
4472
4473 pfs_end_file_wait_v1(locker, 0);
4474
4475 if (likely(index >= 0))
4476 {
4477 if (likely(index < file_handle_max))
4478 file_handle_array[index]= pfs_file;
4479 else
4480 {
4481 if (pfs_file != NULL)
4482 release_file(pfs_file);
4483 file_handle_lost++;
4484 }
4485 }
4486 }
4487
4488 /**
4489 Implementation of the file instrumentation interface.
4490 @sa PSI_v1::end_temp_file_open_wait_and_bind_to_descriptor.
4491 */
pfs_end_temp_file_open_wait_and_bind_to_descriptor_v1(PSI_file_locker * locker,File file,const char * filename)4492 void pfs_end_temp_file_open_wait_and_bind_to_descriptor_v1
4493 (PSI_file_locker *locker, File file, const char *filename)
4494 {
4495 assert(filename != NULL);
4496 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4497 assert(state != NULL);
4498
4499 /* Set filename that was generated during creation of temporary file. */
4500 state->m_name= filename;
4501 pfs_end_file_open_wait_and_bind_to_descriptor_v1(locker, file);
4502
4503 PFS_file *pfs_file= reinterpret_cast<PFS_file *> (state->m_file);
4504 if (pfs_file != NULL)
4505 {
4506 pfs_file->m_temporary= true;
4507 }
4508 }
4509
4510
4511 /**
4512 Implementation of the file instrumentation interface.
4513 @sa PSI_v1::start_file_wait.
4514 */
pfs_start_file_wait_v1(PSI_file_locker * locker,size_t count,const char * src_file,uint src_line)4515 void pfs_start_file_wait_v1(PSI_file_locker *locker,
4516 size_t count,
4517 const char *src_file,
4518 uint src_line)
4519 {
4520 ulonglong timer_start= 0;
4521 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4522 assert(state != NULL);
4523
4524 uint flags= state->m_flags;
4525
4526 if (flags & STATE_FLAG_TIMED)
4527 {
4528 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
4529 state->m_timer_start= timer_start;
4530 }
4531
4532 if (flags & STATE_FLAG_EVENT)
4533 {
4534 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4535 assert(wait != NULL);
4536
4537 wait->m_timer_start= timer_start;
4538 wait->m_source_file= src_file;
4539 wait->m_source_line= src_line;
4540 wait->m_number_of_bytes= count;
4541 }
4542 }
4543
4544 /**
4545 Implementation of the file instrumentation interface.
4546 @sa PSI_v1::end_file_wait.
4547 */
pfs_end_file_wait_v1(PSI_file_locker * locker,size_t byte_count)4548 void pfs_end_file_wait_v1(PSI_file_locker *locker,
4549 size_t byte_count)
4550 {
4551 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4552 assert(state != NULL);
4553 PFS_file *file= reinterpret_cast<PFS_file *> (state->m_file);
4554 PFS_file_class *klass= reinterpret_cast<PFS_file_class *> (state->m_class);
4555 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4556
4557 ulonglong timer_end= 0;
4558 ulonglong wait_time= 0;
4559 PFS_byte_stat *byte_stat;
4560 uint flags= state->m_flags;
4561 size_t bytes= ((int)byte_count > -1 ? byte_count : 0);
4562
4563 PFS_file_stat *file_stat;
4564
4565 if (file != NULL)
4566 {
4567 file_stat= & file->m_file_stat;
4568 }
4569 else
4570 {
4571 file_stat= & klass->m_file_stat;
4572 }
4573
4574 switch (state->m_operation)
4575 {
4576 /* Group read operations */
4577 case PSI_FILE_READ:
4578 byte_stat= &file_stat->m_io_stat.m_read;
4579 break;
4580 /* Group write operations */
4581 case PSI_FILE_WRITE:
4582 byte_stat= &file_stat->m_io_stat.m_write;
4583 break;
4584 /* Group remaining operations as miscellaneous */
4585 case PSI_FILE_CREATE:
4586 case PSI_FILE_CREATE_TMP:
4587 case PSI_FILE_OPEN:
4588 case PSI_FILE_STREAM_OPEN:
4589 case PSI_FILE_STREAM_CLOSE:
4590 case PSI_FILE_SEEK:
4591 case PSI_FILE_TELL:
4592 case PSI_FILE_FLUSH:
4593 case PSI_FILE_FSTAT:
4594 case PSI_FILE_CHSIZE:
4595 case PSI_FILE_DELETE:
4596 case PSI_FILE_RENAME:
4597 case PSI_FILE_SYNC:
4598 case PSI_FILE_STAT:
4599 case PSI_FILE_CLOSE:
4600 byte_stat= &file_stat->m_io_stat.m_misc;
4601 break;
4602 default:
4603 assert(false);
4604 byte_stat= NULL;
4605 break;
4606 }
4607
4608 /* Aggregation for EVENTS_WAITS_SUMMARY_BY_INSTANCE */
4609 if (flags & STATE_FLAG_TIMED)
4610 {
4611 timer_end= state->m_timer();
4612 wait_time= timer_end - state->m_timer_start;
4613 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
4614 byte_stat->aggregate(wait_time, bytes);
4615 }
4616 else
4617 {
4618 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
4619 byte_stat->aggregate_counted(bytes);
4620 }
4621
4622 if (flags & STATE_FLAG_THREAD)
4623 {
4624 assert(thread != NULL);
4625
4626 PFS_single_stat *event_name_array;
4627 event_name_array= thread->write_instr_class_waits_stats();
4628 uint index= klass->m_event_name_index;
4629
4630 if (flags & STATE_FLAG_TIMED)
4631 {
4632 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4633 event_name_array[index].aggregate_value(wait_time);
4634 }
4635 else
4636 {
4637 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4638 event_name_array[index].aggregate_counted();
4639 }
4640
4641 if (state->m_flags & STATE_FLAG_EVENT)
4642 {
4643 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4644 assert(wait != NULL);
4645
4646 wait->m_timer_end= timer_end;
4647 wait->m_number_of_bytes= bytes;
4648 wait->m_end_event_id= thread->m_event_id;
4649 wait->m_object_instance_addr= file;
4650 wait->m_weak_file= file;
4651 wait->m_weak_version= (file ? file->get_version() : 0);
4652
4653 if (thread->m_flag_events_waits_history)
4654 insert_events_waits_history(thread, wait);
4655 if (thread->m_flag_events_waits_history_long)
4656 insert_events_waits_history_long(wait);
4657 thread->m_events_waits_current--;
4658
4659 assert(wait == thread->m_events_waits_current);
4660 }
4661 }
4662 }
4663
4664 /**
4665 Implementation of the file instrumentation interface.
4666 @sa PSI_v1::start_file_close_wait.
4667 */
pfs_start_file_close_wait_v1(PSI_file_locker * locker,const char * src_file,uint src_line)4668 void pfs_start_file_close_wait_v1(PSI_file_locker *locker,
4669 const char *src_file,
4670 uint src_line)
4671 {
4672 PFS_thread *thread;
4673 const char *name;
4674 uint len;
4675 PFS_file *pfs_file;
4676 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4677 assert(state != NULL);
4678
4679 switch (state->m_operation)
4680 {
4681 case PSI_FILE_DELETE:
4682 thread= reinterpret_cast<PFS_thread*> (state->m_thread);
4683 name= state->m_name;
4684 len= (uint)strlen(name);
4685 pfs_file= find_or_create_file(thread, NULL, name, len, false);
4686 state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
4687 break;
4688 case PSI_FILE_STREAM_CLOSE:
4689 case PSI_FILE_CLOSE:
4690 break;
4691 default:
4692 assert(false);
4693 break;
4694 }
4695
4696 pfs_start_file_wait_v1(locker, 0, src_file, src_line);
4697
4698 return;
4699 }
4700
4701 /**
4702 Implementation of the file instrumentation interface.
4703 @sa PSI_v1::end_file_close_wait.
4704 */
pfs_end_file_close_wait_v1(PSI_file_locker * locker,int rc)4705 void pfs_end_file_close_wait_v1(PSI_file_locker *locker, int rc)
4706 {
4707 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4708 assert(state != NULL);
4709
4710 pfs_end_file_wait_v1(locker, 0);
4711
4712 if (rc == 0)
4713 {
4714 PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
4715 PFS_file *file= reinterpret_cast<PFS_file*> (state->m_file);
4716
4717 /* Release or destroy the file if necessary */
4718 switch(state->m_operation)
4719 {
4720 case PSI_FILE_CLOSE:
4721 if (file != NULL)
4722 {
4723 if (file->m_temporary)
4724 {
4725 assert(file->m_file_stat.m_open_count <= 1);
4726 destroy_file(thread, file);
4727 }
4728 else
4729 release_file(file);
4730 }
4731 break;
4732 case PSI_FILE_STREAM_CLOSE:
4733 if (file != NULL)
4734 release_file(file);
4735 break;
4736 case PSI_FILE_DELETE:
4737 if (file != NULL)
4738 destroy_file(thread, file);
4739 break;
4740 default:
4741 assert(false);
4742 break;
4743 }
4744 }
4745 return;
4746 }
4747
4748 /**
4749 Implementation of the file instrumentation interface.
4750 @sa PSI_v1::end_file_rename_wait.
4751 */
pfs_end_file_rename_wait_v1(PSI_file_locker * locker,const char * old_name,const char * new_name,int rc)4752 void pfs_end_file_rename_wait_v1(PSI_file_locker *locker, const char *old_name,
4753 const char *new_name, int rc)
4754 {
4755 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4756 assert(state != NULL);
4757 assert(state->m_operation == PSI_FILE_RENAME);
4758
4759 if (rc == 0)
4760 {
4761 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4762
4763 uint old_len= (uint)strlen(old_name);
4764 uint new_len= (uint)strlen(new_name);
4765
4766 find_and_rename_file(thread, old_name, old_len, new_name, new_len);
4767 }
4768
4769 pfs_end_file_wait_v1(locker, 0);
4770 return;
4771 }
4772
4773 PSI_stage_progress*
pfs_start_stage_v1(PSI_stage_key key,const char * src_file,int src_line)4774 pfs_start_stage_v1(PSI_stage_key key, const char *src_file, int src_line)
4775 {
4776 ulonglong timer_value= 0;
4777
4778 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
4779 if (unlikely(pfs_thread == NULL))
4780 return NULL;
4781
4782 /* Always update column threads.processlist_state. */
4783 pfs_thread->m_stage= key;
4784 /* Default value when the stage is not instrumented for progress */
4785 pfs_thread->m_stage_progress= NULL;
4786
4787 if (! flag_global_instrumentation)
4788 return NULL;
4789
4790 if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
4791 return NULL;
4792
4793 PFS_events_stages *pfs= & pfs_thread->m_stage_current;
4794 PFS_events_waits *child_wait= & pfs_thread->m_events_waits_stack[0];
4795 PFS_events_statements *parent_statement= & pfs_thread->m_statement_stack[0];
4796
4797 PFS_instr_class *old_class= pfs->m_class;
4798 if (old_class != NULL)
4799 {
4800 PFS_stage_stat *event_name_array;
4801 event_name_array= pfs_thread->write_instr_class_stages_stats();
4802 uint index= old_class->m_event_name_index;
4803
4804 /* Finish old event */
4805 if (old_class->m_timed)
4806 {
4807 timer_value= get_timer_raw_value(stage_timer);;
4808 pfs->m_timer_end= timer_value;
4809
4810 /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4811 ulonglong stage_time= timer_value - pfs->m_timer_start;
4812 event_name_array[index].aggregate_value(stage_time);
4813 }
4814 else
4815 {
4816 /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4817 event_name_array[index].aggregate_counted();
4818 }
4819
4820 if (flag_events_stages_current)
4821 {
4822 pfs->m_end_event_id= pfs_thread->m_event_id;
4823 if (pfs_thread->m_flag_events_stages_history)
4824 insert_events_stages_history(pfs_thread, pfs);
4825 if (pfs_thread->m_flag_events_stages_history_long)
4826 insert_events_stages_history_long(pfs);
4827 }
4828
4829 /* This stage event is now complete. */
4830 pfs->m_class= NULL;
4831
4832 /* New waits will now be attached directly to the parent statement. */
4833 child_wait->m_event_id= parent_statement->m_event_id;
4834 child_wait->m_event_type= parent_statement->m_event_type;
4835 /* See below for new stages, that may overwrite this. */
4836 }
4837
4838 /* Start new event */
4839
4840 PFS_stage_class *new_klass= find_stage_class(key);
4841 if (unlikely(new_klass == NULL))
4842 return NULL;
4843
4844 if (! new_klass->m_enabled)
4845 return NULL;
4846
4847 pfs->m_class= new_klass;
4848 if (new_klass->m_timed)
4849 {
4850 /*
4851 Do not call the timer again if we have a
4852 TIMER_END for the previous stage already.
4853 */
4854 if (timer_value == 0)
4855 timer_value= get_timer_raw_value(stage_timer);
4856 pfs->m_timer_start= timer_value;
4857 }
4858 else
4859 pfs->m_timer_start= 0;
4860 pfs->m_timer_end= 0;
4861
4862 if (flag_events_stages_current)
4863 {
4864 pfs->m_thread_internal_id= pfs_thread->m_thread_internal_id;
4865 pfs->m_event_id= pfs_thread->m_event_id++;
4866 pfs->m_end_event_id= 0;
4867 pfs->m_source_file= src_file;
4868 pfs->m_source_line= src_line;
4869
4870 /* New wait events will have this new stage as parent. */
4871 child_wait->m_event_id= pfs->m_event_id;
4872 child_wait->m_event_type= EVENT_TYPE_STAGE;
4873 }
4874
4875 if (new_klass->is_progress())
4876 {
4877 pfs_thread->m_stage_progress= & pfs->m_progress;
4878 pfs->m_progress.m_work_completed= 0;
4879 pfs->m_progress.m_work_estimated= 0;
4880 }
4881
4882 return pfs_thread->m_stage_progress;
4883 }
4884
4885 PSI_stage_progress*
pfs_get_current_stage_progress_v1()4886 pfs_get_current_stage_progress_v1()
4887 {
4888 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
4889 if (unlikely(pfs_thread == NULL))
4890 return NULL;
4891
4892 return pfs_thread->m_stage_progress;
4893 }
4894
pfs_end_stage_v1()4895 void pfs_end_stage_v1()
4896 {
4897 ulonglong timer_value= 0;
4898
4899 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
4900 if (unlikely(pfs_thread == NULL))
4901 return;
4902
4903 pfs_thread->m_stage= 0;
4904 pfs_thread->m_stage_progress= NULL;
4905
4906 if (! flag_global_instrumentation)
4907 return;
4908
4909 if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
4910 return;
4911
4912 PFS_events_stages *pfs= & pfs_thread->m_stage_current;
4913
4914 PFS_instr_class *old_class= pfs->m_class;
4915 if (old_class != NULL)
4916 {
4917 PFS_stage_stat *event_name_array;
4918 event_name_array= pfs_thread->write_instr_class_stages_stats();
4919 uint index= old_class->m_event_name_index;
4920
4921 /* Finish old event */
4922 if (old_class->m_timed)
4923 {
4924 timer_value= get_timer_raw_value(stage_timer);;
4925 pfs->m_timer_end= timer_value;
4926
4927 /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4928 ulonglong stage_time= timer_value - pfs->m_timer_start;
4929 event_name_array[index].aggregate_value(stage_time);
4930 }
4931 else
4932 {
4933 /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4934 event_name_array[index].aggregate_counted();
4935 }
4936
4937 if (flag_events_stages_current)
4938 {
4939 pfs->m_end_event_id= pfs_thread->m_event_id;
4940 if (pfs_thread->m_flag_events_stages_history)
4941 insert_events_stages_history(pfs_thread, pfs);
4942 if (pfs_thread->m_flag_events_stages_history_long)
4943 insert_events_stages_history_long(pfs);
4944 }
4945
4946 /* New waits will now be attached directly to the parent statement. */
4947 PFS_events_waits *child_wait= & pfs_thread->m_events_waits_stack[0];
4948 PFS_events_statements *parent_statement= & pfs_thread->m_statement_stack[0];
4949 child_wait->m_event_id= parent_statement->m_event_id;
4950 child_wait->m_event_type= parent_statement->m_event_type;
4951
4952 /* This stage is completed */
4953 pfs->m_class= NULL;
4954 }
4955 }
4956
4957 PSI_statement_locker*
pfs_get_thread_statement_locker_v1(PSI_statement_locker_state * state,PSI_statement_key key,const void * charset,PSI_sp_share * sp_share)4958 pfs_get_thread_statement_locker_v1(PSI_statement_locker_state *state,
4959 PSI_statement_key key,
4960 const void *charset, PSI_sp_share *sp_share)
4961 {
4962 assert(state != NULL);
4963 assert(charset != NULL);
4964 if (! flag_global_instrumentation)
4965 return NULL;
4966 PFS_statement_class *klass= find_statement_class(key);
4967 if (unlikely(klass == NULL))
4968 return NULL;
4969 if (! klass->m_enabled)
4970 return NULL;
4971
4972 uint flags;
4973
4974 if (flag_thread_instrumentation)
4975 {
4976 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
4977 if (unlikely(pfs_thread == NULL))
4978 return NULL;
4979 if (! pfs_thread->m_enabled)
4980 return NULL;
4981 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
4982 flags= STATE_FLAG_THREAD;
4983
4984 if (klass->m_timed)
4985 flags|= STATE_FLAG_TIMED;
4986
4987 if (flag_events_statements_current)
4988 {
4989 ulonglong event_id= pfs_thread->m_event_id++;
4990
4991 if (pfs_thread->m_events_statements_count >= statement_stack_max)
4992 {
4993 nested_statement_lost++;
4994 return NULL;
4995 }
4996
4997 pfs_dirty_state dirty_state;
4998 pfs_thread->m_stmt_lock.allocated_to_dirty(& dirty_state);
4999 PFS_events_statements *pfs= & pfs_thread->m_statement_stack[pfs_thread->m_events_statements_count];
5000 pfs->m_thread_internal_id= pfs_thread->m_thread_internal_id;
5001 pfs->m_event_id= event_id;
5002 pfs->m_event_type= EVENT_TYPE_STATEMENT;
5003 pfs->m_end_event_id= 0;
5004 pfs->m_class= klass;
5005 pfs->m_timer_start= 0;
5006 pfs->m_timer_end= 0;
5007 pfs->m_lock_time= 0;
5008 pfs->m_current_schema_name_length= 0;
5009 pfs->m_sqltext_length= 0;
5010 pfs->m_sqltext_truncated= false;
5011 pfs->m_sqltext_cs_number= system_charset_info->number; /* default */
5012
5013 pfs->m_message_text[0]= '\0';
5014 pfs->m_sql_errno= 0;
5015 pfs->m_sqlstate[0]= '\0';
5016 pfs->m_error_count= 0;
5017 pfs->m_warning_count= 0;
5018 pfs->m_rows_affected= 0;
5019
5020 pfs->m_rows_sent= 0;
5021 pfs->m_rows_examined= 0;
5022 pfs->m_created_tmp_disk_tables= 0;
5023 pfs->m_created_tmp_tables= 0;
5024 pfs->m_select_full_join= 0;
5025 pfs->m_select_full_range_join= 0;
5026 pfs->m_select_range= 0;
5027 pfs->m_select_range_check= 0;
5028 pfs->m_select_scan= 0;
5029 pfs->m_sort_merge_passes= 0;
5030 pfs->m_sort_range= 0;
5031 pfs->m_sort_rows= 0;
5032 pfs->m_sort_scan= 0;
5033 pfs->m_no_index_used= 0;
5034 pfs->m_no_good_index_used= 0;
5035 pfs->m_digest_storage.reset();
5036
5037 /* New stages will have this statement as parent */
5038 PFS_events_stages *child_stage= & pfs_thread->m_stage_current;
5039 child_stage->m_nesting_event_id= event_id;
5040 child_stage->m_nesting_event_type= EVENT_TYPE_STATEMENT;
5041
5042 /* New waits will have this statement as parent, if no stage is instrumented */
5043 PFS_events_waits *child_wait= & pfs_thread->m_events_waits_stack[0];
5044 child_wait->m_event_id= event_id;
5045 child_wait->m_event_type= EVENT_TYPE_STATEMENT;
5046
5047 PFS_events_statements *parent_statement= NULL;
5048 PFS_events_transactions *parent_transaction= &pfs_thread->m_transaction_current;
5049 ulonglong parent_event= 0;
5050 enum_event_type parent_type= EVENT_TYPE_STATEMENT;
5051 uint parent_level= 0;
5052
5053 if (pfs_thread->m_events_statements_count > 0)
5054 {
5055 parent_statement= pfs - 1;
5056 parent_event= parent_statement->m_event_id;
5057 parent_type= parent_statement->m_event_type;
5058 parent_level= parent_statement->m_nesting_event_level + 1;
5059 }
5060
5061 if (parent_transaction->m_state == TRANS_STATE_ACTIVE &&
5062 parent_transaction->m_event_id > parent_event)
5063 {
5064 parent_event= parent_transaction->m_event_id;
5065 parent_type= parent_transaction->m_event_type;
5066 }
5067
5068 pfs->m_nesting_event_id= parent_event;
5069 pfs->m_nesting_event_type= parent_type;
5070 pfs->m_nesting_event_level= parent_level;
5071
5072 /* Set parent Stored Procedure information for this statement. */
5073 if(sp_share)
5074 {
5075 PFS_program *parent_sp= reinterpret_cast<PFS_program*>(sp_share);
5076 pfs->m_sp_type= parent_sp->m_type;
5077 memcpy(pfs->m_schema_name, parent_sp->m_schema_name,
5078 parent_sp->m_schema_name_length);
5079 pfs->m_schema_name_length= parent_sp->m_schema_name_length;
5080 memcpy(pfs->m_object_name, parent_sp->m_object_name,
5081 parent_sp->m_object_name_length);
5082 pfs->m_object_name_length= parent_sp->m_object_name_length;
5083 }
5084 else
5085 {
5086 pfs->m_sp_type= NO_OBJECT_TYPE;
5087 pfs->m_schema_name_length= 0;
5088 pfs->m_object_name_length= 0;
5089 }
5090
5091 state->m_statement= pfs;
5092 flags|= STATE_FLAG_EVENT;
5093
5094 pfs_thread->m_events_statements_count++;
5095 pfs_thread->m_stmt_lock.dirty_to_allocated(& dirty_state);
5096 }
5097 else
5098 {
5099 state->m_statement= NULL;
5100 }
5101 }
5102 else
5103 {
5104 state->m_statement= NULL;
5105
5106 if (klass->m_timed)
5107 flags= STATE_FLAG_TIMED;
5108 else
5109 flags= 0;
5110 }
5111
5112 if (flag_statements_digest)
5113 {
5114 flags|= STATE_FLAG_DIGEST;
5115 }
5116
5117 state->m_discarded= false;
5118 state->m_class= klass;
5119 state->m_flags= flags;
5120
5121 state->m_lock_time= 0;
5122 state->m_rows_sent= 0;
5123 state->m_rows_examined= 0;
5124 state->m_created_tmp_disk_tables= 0;
5125 state->m_created_tmp_tables= 0;
5126 state->m_select_full_join= 0;
5127 state->m_select_full_range_join= 0;
5128 state->m_select_range= 0;
5129 state->m_select_range_check= 0;
5130 state->m_select_scan= 0;
5131 state->m_sort_merge_passes= 0;
5132 state->m_sort_range= 0;
5133 state->m_sort_rows= 0;
5134 state->m_sort_scan= 0;
5135 state->m_no_index_used= 0;
5136 state->m_no_good_index_used= 0;
5137
5138 state->m_digest= NULL;
5139 state->m_cs_number= ((CHARSET_INFO *)charset)->number;
5140
5141 state->m_schema_name_length= 0;
5142 state->m_parent_sp_share= sp_share;
5143 state->m_parent_prepared_stmt= NULL;
5144
5145 return reinterpret_cast<PSI_statement_locker*> (state);
5146 }
5147
5148 PSI_statement_locker*
pfs_refine_statement_v1(PSI_statement_locker * locker,PSI_statement_key key)5149 pfs_refine_statement_v1(PSI_statement_locker *locker,
5150 PSI_statement_key key)
5151 {
5152 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
5153 if (state == NULL)
5154 return NULL;
5155 assert(state->m_class != NULL);
5156 PFS_statement_class *klass;
5157 /* Only refine statements for mutable instrumentation */
5158 klass= reinterpret_cast<PFS_statement_class*> (state->m_class);
5159 assert(klass->is_mutable());
5160 klass= find_statement_class(key);
5161
5162 uint flags= state->m_flags;
5163
5164 if (unlikely(klass == NULL) || !klass->m_enabled)
5165 {
5166 /* pop statement stack */
5167 if (flags & STATE_FLAG_THREAD)
5168 {
5169 PFS_thread *pfs_thread= reinterpret_cast<PFS_thread *> (state->m_thread);
5170 assert(pfs_thread != NULL);
5171 if (pfs_thread->m_events_statements_count > 0)
5172 pfs_thread->m_events_statements_count--;
5173 }
5174
5175 state->m_discarded= true;
5176 return NULL;
5177 }
5178
5179 if ((flags & STATE_FLAG_TIMED) && ! klass->m_timed)
5180 flags= flags & ~STATE_FLAG_TIMED;
5181
5182 if (flags & STATE_FLAG_EVENT)
5183 {
5184 PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
5185 assert(pfs != NULL);
5186
5187 /* mutate EVENTS_STATEMENTS_CURRENT.EVENT_NAME */
5188 pfs->m_class= klass;
5189 }
5190
5191 state->m_class= klass;
5192 state->m_flags= flags;
5193 return reinterpret_cast<PSI_statement_locker*> (state);
5194 }
5195
pfs_start_statement_v1(PSI_statement_locker * locker,const char * db,uint db_len,const char * src_file,uint src_line)5196 void pfs_start_statement_v1(PSI_statement_locker *locker,
5197 const char *db, uint db_len,
5198 const char *src_file, uint src_line)
5199 {
5200 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
5201 assert(state != NULL);
5202
5203 uint flags= state->m_flags;
5204 ulonglong timer_start= 0;
5205
5206 if (flags & STATE_FLAG_TIMED)
5207 {
5208 timer_start= get_timer_raw_value_and_function(statement_timer, & state->m_timer);
5209 state->m_timer_start= timer_start;
5210 }
5211
5212 compile_time_assert(PSI_SCHEMA_NAME_LEN == NAME_LEN);
5213 assert(db_len <= sizeof(state->m_schema_name));
5214
5215 if (db_len > 0)
5216 memcpy(state->m_schema_name, db, db_len);
5217 state->m_schema_name_length= db_len;
5218
5219 if (flags & STATE_FLAG_EVENT)
5220 {
5221 PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
5222 assert(pfs != NULL);
5223
5224 pfs->m_timer_start= timer_start;
5225 pfs->m_source_file= src_file;
5226 pfs->m_source_line= src_line;
5227
5228 assert(db_len <= sizeof(pfs->m_current_schema_name));
5229 if (db_len > 0)
5230 memcpy(pfs->m_current_schema_name, db, db_len);
5231 pfs->m_current_schema_name_length= db_len;
5232 }
5233 }
5234
pfs_set_statement_text_v1(PSI_statement_locker * locker,const char * text,uint text_len)5235 void pfs_set_statement_text_v1(PSI_statement_locker *locker,
5236 const char *text, uint text_len)
5237 {
5238 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
5239 assert(state != NULL);
5240
5241 if (state->m_discarded)
5242 return;
5243
5244 if (state->m_flags & STATE_FLAG_EVENT)
5245 {
5246 PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
5247 assert(pfs != NULL);
5248 if (text_len > pfs_max_sqltext)
5249 {
5250 text_len= (uint)pfs_max_sqltext;
5251 pfs->m_sqltext_truncated= true;
5252 }
5253 if (text_len)
5254 memcpy(pfs->m_sqltext, text, text_len);
5255 pfs->m_sqltext_length= text_len;
5256 pfs->m_sqltext_cs_number= state->m_cs_number;
5257 }
5258
5259 return;
5260 }
5261
5262 #define SET_STATEMENT_ATTR_BODY(LOCKER, ATTR, VALUE) \
5263 PSI_statement_locker_state *state; \
5264 state= reinterpret_cast<PSI_statement_locker_state*> (LOCKER); \
5265 if (unlikely(state == NULL)) \
5266 return; \
5267 if (state->m_discarded) \
5268 return; \
5269 state->ATTR= VALUE; \
5270 if (state->m_flags & STATE_FLAG_EVENT) \
5271 { \
5272 PFS_events_statements *pfs; \
5273 pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement); \
5274 assert(pfs != NULL); \
5275 pfs->ATTR= VALUE; \
5276 } \
5277 return;
5278
5279 #define INC_STATEMENT_ATTR_BODY(LOCKER, ATTR, VALUE) \
5280 PSI_statement_locker_state *state; \
5281 state= reinterpret_cast<PSI_statement_locker_state*> (LOCKER); \
5282 if (unlikely(state == NULL)) \
5283 return; \
5284 if (state->m_discarded) \
5285 return; \
5286 state->ATTR+= VALUE; \
5287 if (state->m_flags & STATE_FLAG_EVENT) \
5288 { \
5289 PFS_events_statements *pfs; \
5290 pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement); \
5291 assert(pfs != NULL); \
5292 pfs->ATTR+= VALUE; \
5293 } \
5294 return;
5295
pfs_set_statement_lock_time_v1(PSI_statement_locker * locker,ulonglong count)5296 void pfs_set_statement_lock_time_v1(PSI_statement_locker *locker,
5297 ulonglong count)
5298 {
5299 SET_STATEMENT_ATTR_BODY(locker, m_lock_time, count);
5300 }
5301
pfs_set_statement_rows_sent_v1(PSI_statement_locker * locker,ulonglong count)5302 void pfs_set_statement_rows_sent_v1(PSI_statement_locker *locker,
5303 ulonglong count)
5304 {
5305 SET_STATEMENT_ATTR_BODY(locker, m_rows_sent, count);
5306 }
5307
pfs_set_statement_rows_examined_v1(PSI_statement_locker * locker,ulonglong count)5308 void pfs_set_statement_rows_examined_v1(PSI_statement_locker *locker,
5309 ulonglong count)
5310 {
5311 SET_STATEMENT_ATTR_BODY(locker, m_rows_examined, count);
5312 }
5313
pfs_inc_statement_created_tmp_disk_tables_v1(PSI_statement_locker * locker,ulong count)5314 void pfs_inc_statement_created_tmp_disk_tables_v1(PSI_statement_locker *locker,
5315 ulong count)
5316 {
5317 INC_STATEMENT_ATTR_BODY(locker, m_created_tmp_disk_tables, count);
5318 }
5319
pfs_inc_statement_created_tmp_tables_v1(PSI_statement_locker * locker,ulong count)5320 void pfs_inc_statement_created_tmp_tables_v1(PSI_statement_locker *locker,
5321 ulong count)
5322 {
5323 INC_STATEMENT_ATTR_BODY(locker, m_created_tmp_tables, count);
5324 }
5325
pfs_inc_statement_select_full_join_v1(PSI_statement_locker * locker,ulong count)5326 void pfs_inc_statement_select_full_join_v1(PSI_statement_locker *locker,
5327 ulong count)
5328 {
5329 INC_STATEMENT_ATTR_BODY(locker, m_select_full_join, count);
5330 }
5331
pfs_inc_statement_select_full_range_join_v1(PSI_statement_locker * locker,ulong count)5332 void pfs_inc_statement_select_full_range_join_v1(PSI_statement_locker *locker,
5333 ulong count)
5334 {
5335 INC_STATEMENT_ATTR_BODY(locker, m_select_full_range_join, count);
5336 }
5337
pfs_inc_statement_select_range_v1(PSI_statement_locker * locker,ulong count)5338 void pfs_inc_statement_select_range_v1(PSI_statement_locker *locker,
5339 ulong count)
5340 {
5341 INC_STATEMENT_ATTR_BODY(locker, m_select_range, count);
5342 }
5343
pfs_inc_statement_select_range_check_v1(PSI_statement_locker * locker,ulong count)5344 void pfs_inc_statement_select_range_check_v1(PSI_statement_locker *locker,
5345 ulong count)
5346 {
5347 INC_STATEMENT_ATTR_BODY(locker, m_select_range_check, count);
5348 }
5349
pfs_inc_statement_select_scan_v1(PSI_statement_locker * locker,ulong count)5350 void pfs_inc_statement_select_scan_v1(PSI_statement_locker *locker,
5351 ulong count)
5352 {
5353 INC_STATEMENT_ATTR_BODY(locker, m_select_scan, count);
5354 }
5355
pfs_inc_statement_sort_merge_passes_v1(PSI_statement_locker * locker,ulong count)5356 void pfs_inc_statement_sort_merge_passes_v1(PSI_statement_locker *locker,
5357 ulong count)
5358 {
5359 INC_STATEMENT_ATTR_BODY(locker, m_sort_merge_passes, count);
5360 }
5361
pfs_inc_statement_sort_range_v1(PSI_statement_locker * locker,ulong count)5362 void pfs_inc_statement_sort_range_v1(PSI_statement_locker *locker,
5363 ulong count)
5364 {
5365 INC_STATEMENT_ATTR_BODY(locker, m_sort_range, count);
5366 }
5367
pfs_inc_statement_sort_rows_v1(PSI_statement_locker * locker,ulong count)5368 void pfs_inc_statement_sort_rows_v1(PSI_statement_locker *locker,
5369 ulong count)
5370 {
5371 INC_STATEMENT_ATTR_BODY(locker, m_sort_rows, count);
5372 }
5373
pfs_inc_statement_sort_scan_v1(PSI_statement_locker * locker,ulong count)5374 void pfs_inc_statement_sort_scan_v1(PSI_statement_locker *locker,
5375 ulong count)
5376 {
5377 INC_STATEMENT_ATTR_BODY(locker, m_sort_scan, count);
5378 }
5379
pfs_set_statement_no_index_used_v1(PSI_statement_locker * locker)5380 void pfs_set_statement_no_index_used_v1(PSI_statement_locker *locker)
5381 {
5382 SET_STATEMENT_ATTR_BODY(locker, m_no_index_used, 1);
5383 }
5384
pfs_set_statement_no_good_index_used_v1(PSI_statement_locker * locker)5385 void pfs_set_statement_no_good_index_used_v1(PSI_statement_locker *locker)
5386 {
5387 SET_STATEMENT_ATTR_BODY(locker, m_no_good_index_used, 1);
5388 }
5389
pfs_end_statement_v1(PSI_statement_locker * locker,void * stmt_da)5390 void pfs_end_statement_v1(PSI_statement_locker *locker, void *stmt_da)
5391 {
5392 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
5393 Diagnostics_area *da= reinterpret_cast<Diagnostics_area*> (stmt_da);
5394 assert(state != NULL);
5395 assert(da != NULL);
5396
5397 if (state->m_discarded)
5398 return;
5399
5400 PFS_statement_class *klass= reinterpret_cast<PFS_statement_class *> (state->m_class);
5401 assert(klass != NULL);
5402
5403 ulonglong timer_end= 0;
5404 ulonglong wait_time= 0;
5405 uint flags= state->m_flags;
5406
5407 if (flags & STATE_FLAG_TIMED)
5408 {
5409 timer_end= state->m_timer();
5410 wait_time= timer_end - state->m_timer_start;
5411 }
5412
5413 PFS_statement_stat *event_name_array;
5414 uint index= klass->m_event_name_index;
5415 PFS_statement_stat *stat;
5416
5417 /*
5418 Capture statement stats by digest.
5419 */
5420 const sql_digest_storage *digest_storage= NULL;
5421 PFS_statement_stat *digest_stat= NULL;
5422 PFS_program *pfs_program= NULL;
5423 PFS_prepared_stmt *pfs_prepared_stmt= NULL;
5424
5425 if (flags & STATE_FLAG_THREAD)
5426 {
5427 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
5428 assert(thread != NULL);
5429 event_name_array= thread->write_instr_class_statements_stats();
5430 /* Aggregate to EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME */
5431 stat= & event_name_array[index];
5432
5433 if (flags & STATE_FLAG_DIGEST)
5434 {
5435 digest_storage= state->m_digest;
5436
5437 if (digest_storage != NULL)
5438 {
5439 /* Populate PFS_statements_digest_stat with computed digest information.*/
5440 digest_stat= find_or_create_digest(thread, digest_storage,
5441 state->m_schema_name,
5442 state->m_schema_name_length);
5443 }
5444 }
5445
5446 if (flags & STATE_FLAG_EVENT)
5447 {
5448 PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
5449 assert(pfs != NULL);
5450
5451 pfs_dirty_state dirty_state;
5452 thread->m_stmt_lock.allocated_to_dirty(& dirty_state);
5453
5454 switch(da->status())
5455 {
5456 case Diagnostics_area::DA_EMPTY:
5457 break;
5458 case Diagnostics_area::DA_OK:
5459 memcpy(pfs->m_message_text, da->message_text(),
5460 MYSQL_ERRMSG_SIZE);
5461 pfs->m_message_text[MYSQL_ERRMSG_SIZE]= 0;
5462 pfs->m_rows_affected= da->affected_rows();
5463 pfs->m_warning_count= da->last_statement_cond_count();
5464 memcpy(pfs->m_sqlstate, "00000", SQLSTATE_LENGTH);
5465 break;
5466 case Diagnostics_area::DA_EOF:
5467 pfs->m_warning_count= da->last_statement_cond_count();
5468 break;
5469 case Diagnostics_area::DA_ERROR:
5470 memcpy(pfs->m_message_text, da->message_text(),
5471 MYSQL_ERRMSG_SIZE);
5472 pfs->m_message_text[MYSQL_ERRMSG_SIZE]= 0;
5473 pfs->m_sql_errno= da->mysql_errno();
5474 memcpy(pfs->m_sqlstate, da->returned_sqlstate(), SQLSTATE_LENGTH);
5475 pfs->m_error_count++;
5476 break;
5477 case Diagnostics_area::DA_DISABLED:
5478 break;
5479 }
5480
5481 pfs->m_timer_end= timer_end;
5482 pfs->m_end_event_id= thread->m_event_id;
5483
5484 if (digest_storage != NULL)
5485 {
5486 /*
5487 The following columns in events_statement_current:
5488 - DIGEST,
5489 - DIGEST_TEXT
5490 are computed from the digest storage.
5491 */
5492 pfs->m_digest_storage.copy(digest_storage);
5493 }
5494
5495 pfs_program= reinterpret_cast<PFS_program*>(state->m_parent_sp_share);
5496 pfs_prepared_stmt= reinterpret_cast<PFS_prepared_stmt*>(state->m_parent_prepared_stmt);
5497
5498 if (thread->m_flag_events_statements_history)
5499 insert_events_statements_history(thread, pfs);
5500 if (thread->m_flag_events_statements_history_long)
5501 insert_events_statements_history_long(pfs);
5502
5503 assert(thread->m_events_statements_count > 0);
5504 thread->m_events_statements_count--;
5505 thread->m_stmt_lock.dirty_to_allocated(& dirty_state);
5506 }
5507 }
5508 else
5509 {
5510 if (flags & STATE_FLAG_DIGEST)
5511 {
5512 PFS_thread *thread= my_thread_get_THR_PFS();
5513
5514 /* An instrumented thread is required, for LF_PINS. */
5515 if (thread != NULL)
5516 {
5517 /* Set digest stat. */
5518 digest_storage= state->m_digest;
5519
5520 if (digest_storage != NULL)
5521 {
5522 /* Populate statements_digest_stat with computed digest information. */
5523 digest_stat= find_or_create_digest(thread, digest_storage,
5524 state->m_schema_name,
5525 state->m_schema_name_length);
5526 }
5527 }
5528 }
5529
5530 event_name_array= global_instr_class_statements_array;
5531 /* Aggregate to EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME */
5532 stat= & event_name_array[index];
5533 }
5534
5535 stat->mark_used();
5536
5537 if (flags & STATE_FLAG_TIMED)
5538 {
5539 /* Aggregate to EVENTS_STATEMENTS_SUMMARY_..._BY_EVENT_NAME (timed) */
5540 stat->aggregate_value(wait_time);
5541 }
5542 else
5543 {
5544 /* Aggregate to EVENTS_STATEMENTS_SUMMARY_..._BY_EVENT_NAME (counted) */
5545 stat->aggregate_counted();
5546 }
5547
5548 stat->m_lock_time+= state->m_lock_time;
5549 stat->m_rows_sent+= state->m_rows_sent;
5550 stat->m_rows_examined+= state->m_rows_examined;
5551 stat->m_created_tmp_disk_tables+= state->m_created_tmp_disk_tables;
5552 stat->m_created_tmp_tables+= state->m_created_tmp_tables;
5553 stat->m_select_full_join+= state->m_select_full_join;
5554 stat->m_select_full_range_join+= state->m_select_full_range_join;
5555 stat->m_select_range+= state->m_select_range;
5556 stat->m_select_range_check+= state->m_select_range_check;
5557 stat->m_select_scan+= state->m_select_scan;
5558 stat->m_sort_merge_passes+= state->m_sort_merge_passes;
5559 stat->m_sort_range+= state->m_sort_range;
5560 stat->m_sort_rows+= state->m_sort_rows;
5561 stat->m_sort_scan+= state->m_sort_scan;
5562 stat->m_no_index_used+= state->m_no_index_used;
5563 stat->m_no_good_index_used+= state->m_no_good_index_used;
5564
5565 if (digest_stat != NULL)
5566 {
5567 digest_stat->mark_used();
5568
5569 if (flags & STATE_FLAG_TIMED)
5570 {
5571 digest_stat->aggregate_value(wait_time);
5572 }
5573 else
5574 {
5575 digest_stat->aggregate_counted();
5576 }
5577
5578 digest_stat->m_lock_time+= state->m_lock_time;
5579 digest_stat->m_rows_sent+= state->m_rows_sent;
5580 digest_stat->m_rows_examined+= state->m_rows_examined;
5581 digest_stat->m_created_tmp_disk_tables+= state->m_created_tmp_disk_tables;
5582 digest_stat->m_created_tmp_tables+= state->m_created_tmp_tables;
5583 digest_stat->m_select_full_join+= state->m_select_full_join;
5584 digest_stat->m_select_full_range_join+= state->m_select_full_range_join;
5585 digest_stat->m_select_range+= state->m_select_range;
5586 digest_stat->m_select_range_check+= state->m_select_range_check;
5587 digest_stat->m_select_scan+= state->m_select_scan;
5588 digest_stat->m_sort_merge_passes+= state->m_sort_merge_passes;
5589 digest_stat->m_sort_range+= state->m_sort_range;
5590 digest_stat->m_sort_rows+= state->m_sort_rows;
5591 digest_stat->m_sort_scan+= state->m_sort_scan;
5592 digest_stat->m_no_index_used+= state->m_no_index_used;
5593 digest_stat->m_no_good_index_used+= state->m_no_good_index_used;
5594 }
5595
5596 if(pfs_program != NULL)
5597 {
5598 PFS_statement_stat *sub_stmt_stat= NULL;
5599 sub_stmt_stat= &pfs_program->m_stmt_stat;
5600 if(sub_stmt_stat != NULL)
5601 {
5602 sub_stmt_stat->mark_used();
5603
5604 if (flags & STATE_FLAG_TIMED)
5605 {
5606 sub_stmt_stat->aggregate_value(wait_time);
5607 }
5608 else
5609 {
5610 sub_stmt_stat->aggregate_counted();
5611 }
5612
5613 sub_stmt_stat->m_lock_time+= state->m_lock_time;
5614 sub_stmt_stat->m_rows_sent+= state->m_rows_sent;
5615 sub_stmt_stat->m_rows_examined+= state->m_rows_examined;
5616 sub_stmt_stat->m_created_tmp_disk_tables+= state->m_created_tmp_disk_tables;
5617 sub_stmt_stat->m_created_tmp_tables+= state->m_created_tmp_tables;
5618 sub_stmt_stat->m_select_full_join+= state->m_select_full_join;
5619 sub_stmt_stat->m_select_full_range_join+= state->m_select_full_range_join;
5620 sub_stmt_stat->m_select_range+= state->m_select_range;
5621 sub_stmt_stat->m_select_range_check+= state->m_select_range_check;
5622 sub_stmt_stat->m_select_scan+= state->m_select_scan;
5623 sub_stmt_stat->m_sort_merge_passes+= state->m_sort_merge_passes;
5624 sub_stmt_stat->m_sort_range+= state->m_sort_range;
5625 sub_stmt_stat->m_sort_rows+= state->m_sort_rows;
5626 sub_stmt_stat->m_sort_scan+= state->m_sort_scan;
5627 sub_stmt_stat->m_no_index_used+= state->m_no_index_used;
5628 sub_stmt_stat->m_no_good_index_used+= state->m_no_good_index_used;
5629 }
5630 }
5631
5632 if (pfs_prepared_stmt != NULL)
5633 {
5634 if(state->m_in_prepare)
5635 {
5636 PFS_single_stat *prepared_stmt_stat= NULL;
5637 prepared_stmt_stat= &pfs_prepared_stmt->m_prepare_stat;
5638 if(prepared_stmt_stat != NULL)
5639 {
5640 if (flags & STATE_FLAG_TIMED)
5641 {
5642 prepared_stmt_stat->aggregate_value(wait_time);
5643 }
5644 else
5645 {
5646 prepared_stmt_stat->aggregate_counted();
5647 }
5648 }
5649 }
5650 else
5651 {
5652 PFS_statement_stat *prepared_stmt_stat= NULL;
5653 prepared_stmt_stat= &pfs_prepared_stmt->m_execute_stat;
5654 if(prepared_stmt_stat != NULL)
5655 {
5656 if (flags & STATE_FLAG_TIMED)
5657 {
5658 prepared_stmt_stat->aggregate_value(wait_time);
5659 }
5660 else
5661 {
5662 prepared_stmt_stat->aggregate_counted();
5663 }
5664
5665 prepared_stmt_stat->m_lock_time+= state->m_lock_time;
5666 prepared_stmt_stat->m_rows_sent+= state->m_rows_sent;
5667 prepared_stmt_stat->m_rows_examined+= state->m_rows_examined;
5668 prepared_stmt_stat->m_created_tmp_disk_tables+= state->m_created_tmp_disk_tables;
5669 prepared_stmt_stat->m_created_tmp_tables+= state->m_created_tmp_tables;
5670 prepared_stmt_stat->m_select_full_join+= state->m_select_full_join;
5671 prepared_stmt_stat->m_select_full_range_join+= state->m_select_full_range_join;
5672 prepared_stmt_stat->m_select_range+= state->m_select_range;
5673 prepared_stmt_stat->m_select_range_check+= state->m_select_range_check;
5674 prepared_stmt_stat->m_select_scan+= state->m_select_scan;
5675 prepared_stmt_stat->m_sort_merge_passes+= state->m_sort_merge_passes;
5676 prepared_stmt_stat->m_sort_range+= state->m_sort_range;
5677 prepared_stmt_stat->m_sort_rows+= state->m_sort_rows;
5678 prepared_stmt_stat->m_sort_scan+= state->m_sort_scan;
5679 prepared_stmt_stat->m_no_index_used+= state->m_no_index_used;
5680 prepared_stmt_stat->m_no_good_index_used+= state->m_no_good_index_used;
5681 }
5682 }
5683 }
5684
5685 PFS_statement_stat *sub_stmt_stat= NULL;
5686 if (pfs_program != NULL)
5687 sub_stmt_stat= &pfs_program->m_stmt_stat;
5688
5689 PFS_statement_stat *prepared_stmt_stat= NULL;
5690 if (pfs_prepared_stmt != NULL && !state->m_in_prepare)
5691 prepared_stmt_stat= &pfs_prepared_stmt->m_execute_stat;
5692
5693 switch (da->status())
5694 {
5695 case Diagnostics_area::DA_EMPTY:
5696 break;
5697 case Diagnostics_area::DA_OK:
5698 stat->m_rows_affected+= da->affected_rows();
5699 stat->m_warning_count+= da->last_statement_cond_count();
5700 if (digest_stat != NULL)
5701 {
5702 digest_stat->m_rows_affected+= da->affected_rows();
5703 digest_stat->m_warning_count+= da->last_statement_cond_count();
5704 }
5705 if(sub_stmt_stat != NULL)
5706 {
5707 sub_stmt_stat->m_rows_affected+= da->affected_rows();
5708 sub_stmt_stat->m_warning_count+= da->last_statement_cond_count();
5709 }
5710 if (prepared_stmt_stat != NULL)
5711 {
5712 prepared_stmt_stat->m_rows_affected+= da->affected_rows();
5713 prepared_stmt_stat->m_warning_count+= da->last_statement_cond_count();
5714 }
5715 break;
5716 case Diagnostics_area::DA_EOF:
5717 stat->m_warning_count+= da->last_statement_cond_count();
5718 if (digest_stat != NULL)
5719 {
5720 digest_stat->m_warning_count+= da->last_statement_cond_count();
5721 }
5722 if(sub_stmt_stat != NULL)
5723 {
5724 sub_stmt_stat->m_warning_count+= da->last_statement_cond_count();
5725 }
5726 if (prepared_stmt_stat != NULL)
5727 {
5728 prepared_stmt_stat->m_warning_count+= da->last_statement_cond_count();
5729 }
5730 break;
5731 case Diagnostics_area::DA_ERROR:
5732 stat->m_error_count++;
5733 if (digest_stat != NULL)
5734 {
5735 digest_stat->m_error_count++;
5736 }
5737 if (sub_stmt_stat != NULL)
5738 {
5739 sub_stmt_stat->m_error_count++;
5740 }
5741 if (prepared_stmt_stat != NULL)
5742 {
5743 prepared_stmt_stat->m_error_count++;
5744 }
5745 break;
5746 case Diagnostics_area::DA_DISABLED:
5747 break;
5748 }
5749 }
5750
sp_type_to_object_type(uint sp_type)5751 static inline enum_object_type sp_type_to_object_type(uint sp_type)
5752 {
5753 enum enum_sp_type value= static_cast<enum enum_sp_type> (sp_type);
5754
5755 switch (value)
5756 {
5757 case SP_TYPE_FUNCTION:
5758 return OBJECT_TYPE_FUNCTION;
5759 case SP_TYPE_PROCEDURE:
5760 return OBJECT_TYPE_PROCEDURE;
5761 case SP_TYPE_TRIGGER:
5762 return OBJECT_TYPE_TRIGGER;
5763 case SP_TYPE_EVENT:
5764 return OBJECT_TYPE_EVENT;
5765 default:
5766 assert(false);
5767 /* Dead code */
5768 return NO_OBJECT_TYPE;
5769 }
5770 }
5771
5772 /**
5773 Implementation of the stored program instrumentation interface.
5774 @sa PSI_v1::get_sp_share.
5775 */
pfs_get_sp_share_v1(uint sp_type,const char * schema_name,uint schema_name_length,const char * object_name,uint object_name_length)5776 PSI_sp_share *pfs_get_sp_share_v1(uint sp_type,
5777 const char* schema_name,
5778 uint schema_name_length,
5779 const char* object_name,
5780 uint object_name_length)
5781 {
5782
5783 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
5784 if (unlikely(pfs_thread == NULL))
5785 return NULL;
5786
5787 if (object_name_length > COL_OBJECT_NAME_SIZE)
5788 object_name_length= COL_OBJECT_NAME_SIZE;
5789 if (schema_name_length > COL_OBJECT_SCHEMA_SIZE)
5790 schema_name_length= COL_OBJECT_SCHEMA_SIZE;
5791
5792 PFS_program *pfs_program;
5793 pfs_program= find_or_create_program(pfs_thread,
5794 sp_type_to_object_type(sp_type),
5795 object_name,
5796 object_name_length,
5797 schema_name,
5798 schema_name_length);
5799
5800 return reinterpret_cast<PSI_sp_share *>(pfs_program);
5801 }
5802
pfs_release_sp_share_v1(PSI_sp_share * sp_share)5803 void pfs_release_sp_share_v1(PSI_sp_share* sp_share)
5804 {
5805 /* Unused */
5806 return;
5807 }
5808
pfs_start_sp_v1(PSI_sp_locker_state * state,PSI_sp_share * sp_share)5809 PSI_sp_locker* pfs_start_sp_v1(PSI_sp_locker_state *state,
5810 PSI_sp_share *sp_share)
5811 {
5812 assert(state != NULL);
5813 if (! flag_global_instrumentation)
5814 return NULL;
5815
5816 if (flag_thread_instrumentation)
5817 {
5818 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
5819 if (unlikely(pfs_thread == NULL))
5820 return NULL;
5821 if (! pfs_thread->m_enabled)
5822 return NULL;
5823 }
5824
5825 /*
5826 sp share might be null in case when stat array is full and no new
5827 stored program stats are being inserted into it.
5828 */
5829 PFS_program *pfs_program= reinterpret_cast<PFS_program*>(sp_share);
5830 if (pfs_program == NULL || !pfs_program->m_enabled)
5831 return NULL;
5832
5833 state->m_flags= 0;
5834
5835 if(pfs_program->m_timed)
5836 {
5837 state->m_flags|= STATE_FLAG_TIMED;
5838 state->m_timer_start= get_timer_raw_value_and_function(statement_timer,
5839 & state->m_timer);
5840 }
5841
5842 state->m_sp_share= sp_share;
5843
5844 return reinterpret_cast<PSI_sp_locker*> (state);
5845 }
5846
pfs_end_sp_v1(PSI_sp_locker * locker)5847 void pfs_end_sp_v1(PSI_sp_locker *locker)
5848 {
5849 PSI_sp_locker_state *state= reinterpret_cast<PSI_sp_locker_state*> (locker);
5850 assert(state != NULL);
5851
5852 ulonglong timer_end;
5853 ulonglong wait_time;
5854
5855 PFS_program *pfs_program= reinterpret_cast<PFS_program *>(state->m_sp_share);
5856 PFS_sp_stat *stat= &pfs_program->m_sp_stat;
5857
5858 if (state->m_flags & STATE_FLAG_TIMED)
5859 {
5860 timer_end= state->m_timer();
5861 wait_time= timer_end - state->m_timer_start;
5862
5863 /* Now use this timer_end and wait_time for timing information. */
5864 stat->aggregate_value(wait_time);
5865 }
5866 else
5867 {
5868 stat->aggregate_counted();
5869 }
5870 }
5871
pfs_drop_sp_v1(uint sp_type,const char * schema_name,uint schema_name_length,const char * object_name,uint object_name_length)5872 void pfs_drop_sp_v1(uint sp_type,
5873 const char* schema_name,
5874 uint schema_name_length,
5875 const char* object_name,
5876 uint object_name_length)
5877 {
5878 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
5879 if (unlikely(pfs_thread == NULL))
5880 return;
5881
5882 if (object_name_length > COL_OBJECT_NAME_SIZE)
5883 object_name_length= COL_OBJECT_NAME_SIZE;
5884 if (schema_name_length > COL_OBJECT_SCHEMA_SIZE)
5885 schema_name_length= COL_OBJECT_SCHEMA_SIZE;
5886
5887 drop_program(pfs_thread,
5888 sp_type_to_object_type(sp_type),
5889 object_name, object_name_length,
5890 schema_name, schema_name_length);
5891 }
5892
5893 PSI_transaction_locker*
pfs_get_thread_transaction_locker_v1(PSI_transaction_locker_state * state,const void * xid,const ulonglong * trxid,int isolation_level,my_bool read_only,my_bool autocommit)5894 pfs_get_thread_transaction_locker_v1(PSI_transaction_locker_state *state,
5895 const void *xid,
5896 const ulonglong *trxid,
5897 int isolation_level,
5898 my_bool read_only,
5899 my_bool autocommit)
5900 {
5901 assert(state != NULL);
5902
5903 if (!flag_global_instrumentation)
5904 return NULL;
5905
5906 if (!global_transaction_class.m_enabled)
5907 return NULL;
5908
5909 uint flags;
5910
5911 if (flag_thread_instrumentation)
5912 {
5913 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
5914 if (unlikely(pfs_thread == NULL))
5915 return NULL;
5916 if (!pfs_thread->m_enabled)
5917 return NULL;
5918 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
5919 flags= STATE_FLAG_THREAD;
5920
5921 if (global_transaction_class.m_timed)
5922 flags|= STATE_FLAG_TIMED;
5923
5924 if (flag_events_transactions_current)
5925 {
5926 ulonglong event_id= pfs_thread->m_event_id++;
5927
5928 PFS_events_transactions *pfs= &pfs_thread->m_transaction_current;
5929 pfs->m_thread_internal_id = pfs_thread->m_thread_internal_id;
5930 pfs->m_event_id= event_id;
5931 pfs->m_event_type= EVENT_TYPE_TRANSACTION;
5932 pfs->m_end_event_id= 0;
5933 pfs->m_class= &global_transaction_class;
5934 pfs->m_timer_start= 0;
5935 pfs->m_timer_end= 0;
5936 if (xid != NULL)
5937 pfs->m_xid= *(PSI_xid *)xid;
5938 pfs->m_xa= false;
5939 pfs->m_xa_state= TRANS_STATE_XA_NOTR;
5940 pfs->m_trxid= (trxid == NULL) ? 0 : *trxid;
5941 pfs->m_isolation_level= (enum_isolation_level)isolation_level;
5942 pfs->m_read_only= read_only;
5943 pfs->m_autocommit= autocommit;
5944 pfs->m_savepoint_count= 0;
5945 pfs->m_rollback_to_savepoint_count= 0;
5946 pfs->m_release_savepoint_count= 0;
5947
5948 uint statements_count= pfs_thread->m_events_statements_count;
5949 if (statements_count > 0)
5950 {
5951 PFS_events_statements *pfs_statement=
5952 &pfs_thread->m_statement_stack[statements_count - 1];
5953 pfs->m_nesting_event_id= pfs_statement->m_event_id;
5954 pfs->m_nesting_event_type= pfs_statement->m_event_type;
5955 }
5956 else
5957 {
5958 pfs->m_nesting_event_id= 0;
5959 /* pfs->m_nesting_event_type not used when m_nesting_event_id is 0 */
5960 }
5961
5962 state->m_transaction= pfs;
5963 flags|= STATE_FLAG_EVENT;
5964 }
5965 }
5966 else
5967 {
5968 if (global_transaction_class.m_timed)
5969 flags= STATE_FLAG_TIMED;
5970 else
5971 flags= 0;
5972 }
5973
5974 state->m_class= &global_transaction_class;
5975 state->m_flags= flags;
5976 state->m_autocommit= autocommit;
5977 state->m_read_only= read_only;
5978 state->m_savepoint_count= 0;
5979 state->m_rollback_to_savepoint_count= 0;
5980 state->m_release_savepoint_count= 0;
5981
5982 return reinterpret_cast<PSI_transaction_locker*> (state);
5983 }
5984
pfs_start_transaction_v1(PSI_transaction_locker * locker,const char * src_file,uint src_line)5985 void pfs_start_transaction_v1(PSI_transaction_locker *locker,
5986 const char *src_file, uint src_line)
5987 {
5988 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
5989 assert(state != NULL);
5990
5991 uint flags= state->m_flags;
5992 ulonglong timer_start= 0;
5993
5994 if (flags & STATE_FLAG_TIMED)
5995 {
5996 timer_start= get_timer_raw_value_and_function(transaction_timer, &state->m_timer);
5997 state->m_timer_start= timer_start;
5998 }
5999
6000 if (flags & STATE_FLAG_EVENT)
6001 {
6002 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6003 assert(pfs != NULL);
6004
6005 pfs->m_timer_start= timer_start;
6006 pfs->m_source_file= src_file;
6007 pfs->m_source_line= src_line;
6008 pfs->m_state= TRANS_STATE_ACTIVE;
6009 pfs->m_sid.clear();
6010 pfs->m_gtid_spec.set_automatic();
6011 }
6012 }
6013
pfs_set_transaction_gtid_v1(PSI_transaction_locker * locker,const void * sid,const void * gtid_spec)6014 void pfs_set_transaction_gtid_v1(PSI_transaction_locker *locker,
6015 const void *sid,
6016 const void *gtid_spec)
6017 {
6018 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
6019 assert(state != NULL);
6020 assert(sid != NULL);
6021 assert(gtid_spec != NULL);
6022
6023 if (state->m_flags & STATE_FLAG_EVENT)
6024 {
6025 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6026 assert(pfs != NULL);
6027 pfs->m_sid= *(rpl_sid *)sid;
6028 pfs->m_gtid_spec= *(Gtid_specification *)gtid_spec;
6029 }
6030 }
6031
pfs_set_transaction_xid_v1(PSI_transaction_locker * locker,const void * xid,int xa_state)6032 void pfs_set_transaction_xid_v1(PSI_transaction_locker *locker,
6033 const void *xid,
6034 int xa_state)
6035 {
6036 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
6037 assert(state != NULL);
6038
6039 if (state->m_flags & STATE_FLAG_EVENT)
6040 {
6041 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6042 assert(pfs != NULL);
6043 assert(xid != NULL);
6044
6045 pfs->m_xid= *(PSI_xid *)xid;
6046 pfs->m_xa_state= (enum_xa_transaction_state)xa_state;
6047 pfs->m_xa= true;
6048 }
6049 return;
6050 }
6051
pfs_set_transaction_xa_state_v1(PSI_transaction_locker * locker,int xa_state)6052 void pfs_set_transaction_xa_state_v1(PSI_transaction_locker *locker,
6053 int xa_state)
6054 {
6055 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
6056 assert(state != NULL);
6057
6058 if (state->m_flags & STATE_FLAG_EVENT)
6059 {
6060 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6061 assert(pfs != NULL);
6062
6063 pfs->m_xa_state= (enum_xa_transaction_state)xa_state;
6064 pfs->m_xa= true;
6065 }
6066 return;
6067 }
6068
pfs_set_transaction_trxid_v1(PSI_transaction_locker * locker,const ulonglong * trxid)6069 void pfs_set_transaction_trxid_v1(PSI_transaction_locker *locker,
6070 const ulonglong *trxid)
6071 {
6072 assert(trxid != NULL);
6073
6074 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
6075 assert(state != NULL);
6076
6077 if (state->m_flags & STATE_FLAG_EVENT)
6078 {
6079 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6080 assert(pfs != NULL);
6081
6082 if (pfs->m_trxid == 0)
6083 pfs->m_trxid= *trxid;
6084 }
6085 }
6086
6087 #define INC_TRANSACTION_ATTR_BODY(LOCKER, ATTR, VALUE) \
6088 PSI_transaction_locker_state *state; \
6089 state= reinterpret_cast<PSI_transaction_locker_state*> (LOCKER); \
6090 if (unlikely(state == NULL)) \
6091 return; \
6092 state->ATTR+= VALUE; \
6093 if (state->m_flags & STATE_FLAG_EVENT) \
6094 { \
6095 PFS_events_transactions *pfs; \
6096 pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction); \
6097 assert(pfs != NULL); \
6098 pfs->ATTR+= VALUE; \
6099 } \
6100 return;
6101
6102
pfs_inc_transaction_savepoints_v1(PSI_transaction_locker * locker,ulong count)6103 void pfs_inc_transaction_savepoints_v1(PSI_transaction_locker *locker,
6104 ulong count)
6105 {
6106 INC_TRANSACTION_ATTR_BODY(locker, m_savepoint_count, count);
6107 }
6108
pfs_inc_transaction_rollback_to_savepoint_v1(PSI_transaction_locker * locker,ulong count)6109 void pfs_inc_transaction_rollback_to_savepoint_v1(PSI_transaction_locker *locker,
6110 ulong count)
6111 {
6112 INC_TRANSACTION_ATTR_BODY(locker, m_rollback_to_savepoint_count, count);
6113 }
6114
pfs_inc_transaction_release_savepoint_v1(PSI_transaction_locker * locker,ulong count)6115 void pfs_inc_transaction_release_savepoint_v1(PSI_transaction_locker *locker,
6116 ulong count)
6117 {
6118 INC_TRANSACTION_ATTR_BODY(locker, m_release_savepoint_count, count);
6119 }
6120
pfs_end_transaction_v1(PSI_transaction_locker * locker,my_bool commit)6121 void pfs_end_transaction_v1(PSI_transaction_locker *locker, my_bool commit)
6122 {
6123 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
6124 assert(state != NULL);
6125
6126 ulonglong timer_end= 0;
6127 ulonglong wait_time= 0;
6128 uint flags= state->m_flags;
6129
6130 if (flags & STATE_FLAG_TIMED)
6131 {
6132 timer_end= state->m_timer();
6133 wait_time= timer_end - state->m_timer_start;
6134 }
6135
6136 PFS_transaction_stat *stat;
6137
6138 if (flags & STATE_FLAG_THREAD)
6139 {
6140 PFS_thread *pfs_thread= reinterpret_cast<PFS_thread *> (state->m_thread);
6141 assert(pfs_thread != NULL);
6142
6143 /* Aggregate to EVENTS_TRANSACTIONS_SUMMARY_BY_THREAD_BY_EVENT_NAME */
6144 stat= &pfs_thread->write_instr_class_transactions_stats()[GLOBAL_TRANSACTION_INDEX];
6145
6146 if (flags & STATE_FLAG_EVENT)
6147 {
6148 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6149 assert(pfs != NULL);
6150
6151 /* events_transactions_current may have been cleared while the transaction was active */
6152 if (unlikely(pfs->m_class == NULL))
6153 return;
6154
6155 pfs->m_timer_end= timer_end;
6156 pfs->m_end_event_id= pfs_thread->m_event_id;
6157
6158 pfs->m_state= (commit ? TRANS_STATE_COMMITTED : TRANS_STATE_ROLLED_BACK);
6159
6160 if (pfs->m_xa)
6161 pfs->m_xa_state= (commit ? TRANS_STATE_XA_COMMITTED : TRANS_STATE_XA_ROLLBACK_ONLY);
6162
6163 if (pfs_thread->m_flag_events_transactions_history)
6164 insert_events_transactions_history(pfs_thread, pfs);
6165 if (pfs_thread->m_flag_events_transactions_history_long)
6166 insert_events_transactions_history_long(pfs);
6167 }
6168 }
6169 else
6170 {
6171 /* Aggregate to EVENTS_TRANSACTIONS_SUMMARY_GLOBAL_BY_EVENT_NAME */
6172 stat= &global_transaction_stat;
6173 }
6174
6175 if (flags & STATE_FLAG_TIMED)
6176 {
6177 /* Aggregate to EVENTS_TRANSACTIONS_SUMMARY_..._BY_EVENT_NAME (timed) */
6178 if(state->m_read_only)
6179 stat->m_read_only_stat.aggregate_value(wait_time);
6180 else
6181 stat->m_read_write_stat.aggregate_value(wait_time);
6182 }
6183 else
6184 {
6185 /* Aggregate to EVENTS_TRANSACTIONS_SUMMARY_..._BY_EVENT_NAME (counted) */
6186 if(state->m_read_only)
6187 stat->m_read_only_stat.aggregate_counted();
6188 else
6189 stat->m_read_write_stat.aggregate_counted();
6190 }
6191
6192 stat->m_savepoint_count+= state->m_savepoint_count;
6193 stat->m_rollback_to_savepoint_count+= state->m_rollback_to_savepoint_count;
6194 stat->m_release_savepoint_count+= state->m_release_savepoint_count;
6195 }
6196
6197
6198 /**
6199 Implementation of the socket instrumentation interface.
6200 @sa PSI_v1::end_socket_wait.
6201 */
pfs_end_socket_wait_v1(PSI_socket_locker * locker,size_t byte_count)6202 void pfs_end_socket_wait_v1(PSI_socket_locker *locker, size_t byte_count)
6203 {
6204 PSI_socket_locker_state *state= reinterpret_cast<PSI_socket_locker_state*> (locker);
6205 assert(state != NULL);
6206
6207 PFS_socket *socket= reinterpret_cast<PFS_socket *>(state->m_socket);
6208 assert(socket != NULL);
6209
6210 ulonglong timer_end= 0;
6211 ulonglong wait_time= 0;
6212 PFS_byte_stat *byte_stat;
6213 uint flags= state->m_flags;
6214 size_t bytes= ((int)byte_count > -1 ? byte_count : 0);
6215
6216 switch (state->m_operation)
6217 {
6218 /* Group read operations */
6219 case PSI_SOCKET_RECV:
6220 case PSI_SOCKET_RECVFROM:
6221 case PSI_SOCKET_RECVMSG:
6222 byte_stat= &socket->m_socket_stat.m_io_stat.m_read;
6223 break;
6224 /* Group write operations */
6225 case PSI_SOCKET_SEND:
6226 case PSI_SOCKET_SENDTO:
6227 case PSI_SOCKET_SENDMSG:
6228 byte_stat= &socket->m_socket_stat.m_io_stat.m_write;
6229 break;
6230 /* Group remaining operations as miscellaneous */
6231 case PSI_SOCKET_CONNECT:
6232 case PSI_SOCKET_CREATE:
6233 case PSI_SOCKET_BIND:
6234 case PSI_SOCKET_SEEK:
6235 case PSI_SOCKET_OPT:
6236 case PSI_SOCKET_STAT:
6237 case PSI_SOCKET_SHUTDOWN:
6238 case PSI_SOCKET_SELECT:
6239 case PSI_SOCKET_CLOSE:
6240 byte_stat= &socket->m_socket_stat.m_io_stat.m_misc;
6241 break;
6242 default:
6243 assert(false);
6244 byte_stat= NULL;
6245 break;
6246 }
6247
6248 /* Aggregation for EVENTS_WAITS_SUMMARY_BY_INSTANCE */
6249 if (flags & STATE_FLAG_TIMED)
6250 {
6251 timer_end= state->m_timer();
6252 wait_time= timer_end - state->m_timer_start;
6253
6254 /* Aggregate to the socket instrument for now (timed) */
6255 byte_stat->aggregate(wait_time, bytes);
6256 }
6257 else
6258 {
6259 /* Aggregate to the socket instrument (event count and byte count) */
6260 byte_stat->aggregate_counted(bytes);
6261 }
6262
6263 /* Aggregate to EVENTS_WAITS_HISTORY and EVENTS_WAITS_HISTORY_LONG */
6264 if (flags & STATE_FLAG_EVENT)
6265 {
6266 PFS_thread *thread= reinterpret_cast<PFS_thread *>(state->m_thread);
6267 assert(thread != NULL);
6268 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
6269 assert(wait != NULL);
6270
6271 wait->m_timer_end= timer_end;
6272 wait->m_end_event_id= thread->m_event_id;
6273 wait->m_number_of_bytes= bytes;
6274
6275 if (thread->m_flag_events_waits_history)
6276 insert_events_waits_history(thread, wait);
6277 if (thread->m_flag_events_waits_history_long)
6278 insert_events_waits_history_long(wait);
6279 thread->m_events_waits_current--;
6280
6281 assert(wait == thread->m_events_waits_current);
6282 }
6283 }
6284
pfs_set_socket_state_v1(PSI_socket * socket,PSI_socket_state state)6285 void pfs_set_socket_state_v1(PSI_socket *socket, PSI_socket_state state)
6286 {
6287 assert((state == PSI_SOCKET_STATE_IDLE) || (state == PSI_SOCKET_STATE_ACTIVE));
6288 PFS_socket *pfs= reinterpret_cast<PFS_socket*>(socket);
6289 assert(pfs != NULL);
6290 assert(pfs->m_idle || (state == PSI_SOCKET_STATE_IDLE));
6291 assert(!pfs->m_idle || (state == PSI_SOCKET_STATE_ACTIVE));
6292 pfs->m_idle= (state == PSI_SOCKET_STATE_IDLE);
6293 }
6294
6295 /**
6296 Set socket descriptor and address info.
6297 */
pfs_set_socket_info_v1(PSI_socket * socket,const my_socket * fd,const struct sockaddr * addr,socklen_t addr_len)6298 void pfs_set_socket_info_v1(PSI_socket *socket,
6299 const my_socket *fd,
6300 const struct sockaddr *addr,
6301 socklen_t addr_len)
6302 {
6303 PFS_socket *pfs= reinterpret_cast<PFS_socket*>(socket);
6304 assert(pfs != NULL);
6305
6306 /** Set socket descriptor */
6307 if (fd != NULL)
6308 pfs->m_fd= (uint)*fd;
6309
6310 /** Set raw socket address and length */
6311 if (likely(addr != NULL && addr_len > 0))
6312 {
6313 pfs->m_addr_len= addr_len;
6314
6315 /** Restrict address length to size of struct */
6316 if (unlikely(pfs->m_addr_len > sizeof(sockaddr_storage)))
6317 pfs->m_addr_len= sizeof(struct sockaddr_storage);
6318
6319 memcpy(&pfs->m_sock_addr, addr, pfs->m_addr_len);
6320 }
6321 }
6322
6323 /**
6324 Implementation of the socket instrumentation interface.
6325 @sa PSI_v1::set_socket_info.
6326 */
pfs_set_socket_thread_owner_v1(PSI_socket * socket)6327 void pfs_set_socket_thread_owner_v1(PSI_socket *socket)
6328 {
6329 PFS_socket *pfs_socket= reinterpret_cast<PFS_socket*>(socket);
6330 assert(pfs_socket != NULL);
6331 pfs_socket->m_thread_owner= my_thread_get_THR_PFS();
6332 }
6333
6334 struct PSI_digest_locker*
pfs_digest_start_v1(PSI_statement_locker * locker)6335 pfs_digest_start_v1(PSI_statement_locker *locker)
6336 {
6337 PSI_statement_locker_state *statement_state;
6338 statement_state= reinterpret_cast<PSI_statement_locker_state*> (locker);
6339 assert(statement_state != NULL);
6340
6341 if (statement_state->m_discarded)
6342 return NULL;
6343
6344 if (statement_state->m_flags & STATE_FLAG_DIGEST)
6345 {
6346 return reinterpret_cast<PSI_digest_locker*> (locker);
6347 }
6348
6349 return NULL;
6350 }
6351
pfs_digest_end_v1(PSI_digest_locker * locker,const sql_digest_storage * digest)6352 void pfs_digest_end_v1(PSI_digest_locker *locker, const sql_digest_storage *digest)
6353 {
6354 PSI_statement_locker_state *statement_state;
6355 statement_state= reinterpret_cast<PSI_statement_locker_state*> (locker);
6356 assert(statement_state != NULL);
6357 assert(digest != NULL);
6358
6359 if (statement_state->m_discarded)
6360 return;
6361
6362 if (statement_state->m_flags & STATE_FLAG_DIGEST)
6363 {
6364 statement_state->m_digest= digest;
6365 }
6366 }
6367
6368 PSI_prepared_stmt*
pfs_create_prepared_stmt_v1(void * identity,uint stmt_id,PSI_statement_locker * locker,const char * stmt_name,size_t stmt_name_length,const char * sql_text,size_t sql_text_length)6369 pfs_create_prepared_stmt_v1(void *identity, uint stmt_id,
6370 PSI_statement_locker *locker,
6371 const char *stmt_name, size_t stmt_name_length,
6372 const char *sql_text, size_t sql_text_length)
6373 {
6374 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
6375 PFS_events_statements *pfs_stmt= reinterpret_cast<PFS_events_statements*> (state->m_statement);
6376 PFS_program *pfs_program= reinterpret_cast<PFS_program *>(state->m_parent_sp_share);
6377
6378 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6379 if (unlikely(pfs_thread == NULL))
6380 return NULL;
6381
6382 if (sql_text_length > COL_INFO_SIZE)
6383 sql_text_length= COL_INFO_SIZE;
6384
6385 PFS_prepared_stmt *pfs= create_prepared_stmt(identity,
6386 pfs_thread, pfs_program,
6387 pfs_stmt, stmt_id,
6388 stmt_name, stmt_name_length,
6389 sql_text, sql_text_length);
6390
6391 state->m_parent_prepared_stmt= reinterpret_cast<PSI_prepared_stmt*>(pfs);
6392 state->m_in_prepare= true;
6393
6394 return reinterpret_cast<PSI_prepared_stmt*>(pfs);
6395 }
6396
pfs_execute_prepared_stmt_v1(PSI_statement_locker * locker,PSI_prepared_stmt * ps)6397 void pfs_execute_prepared_stmt_v1 (PSI_statement_locker *locker,
6398 PSI_prepared_stmt* ps)
6399 {
6400 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
6401 assert(state != NULL);
6402
6403 state->m_parent_prepared_stmt= ps;
6404 state->m_in_prepare= false;
6405 }
6406
pfs_destroy_prepared_stmt_v1(PSI_prepared_stmt * prepared_stmt)6407 void pfs_destroy_prepared_stmt_v1(PSI_prepared_stmt* prepared_stmt)
6408 {
6409 PFS_prepared_stmt *pfs_prepared_stmt= reinterpret_cast<PFS_prepared_stmt*>(prepared_stmt);
6410 delete_prepared_stmt(pfs_prepared_stmt);
6411 return;
6412 }
6413
pfs_reprepare_prepared_stmt_v1(PSI_prepared_stmt * prepared_stmt)6414 void pfs_reprepare_prepared_stmt_v1(PSI_prepared_stmt* prepared_stmt)
6415 {
6416 PFS_prepared_stmt *pfs_prepared_stmt= reinterpret_cast<PFS_prepared_stmt*>(prepared_stmt);
6417 PFS_single_stat *prepared_stmt_stat= &pfs_prepared_stmt->m_reprepare_stat;
6418
6419 if (prepared_stmt_stat != NULL)
6420 prepared_stmt_stat->aggregate_counted();
6421 return;
6422 }
6423
pfs_set_prepared_stmt_text_v1(PSI_prepared_stmt * prepared_stmt,const char * text,uint text_len)6424 void pfs_set_prepared_stmt_text_v1(PSI_prepared_stmt *prepared_stmt,
6425 const char *text,
6426 uint text_len)
6427 {
6428 PFS_prepared_stmt *pfs_prepared_stmt =
6429 reinterpret_cast<PFS_prepared_stmt *>(prepared_stmt);
6430 assert(pfs_prepared_stmt != NULL);
6431
6432 uint max_len = COL_INFO_SIZE;
6433 if (text_len > max_len)
6434 {
6435 text_len = max_len;
6436 }
6437
6438 memcpy(pfs_prepared_stmt->m_sqltext, text, text_len);
6439 pfs_prepared_stmt->m_sqltext_length = text_len;
6440
6441 return;
6442 }
6443
6444 /**
6445 Implementation of the thread attribute connection interface
6446 @sa PSI_v1::set_thread_connect_attr.
6447 */
pfs_set_thread_connect_attrs_v1(const char * buffer,uint length,const void * from_cs)6448 int pfs_set_thread_connect_attrs_v1(const char *buffer, uint length,
6449 const void *from_cs)
6450 {
6451 PFS_thread *thd= my_thread_get_THR_PFS();
6452
6453 assert(buffer != NULL);
6454
6455 if (likely(thd != NULL) && session_connect_attrs_size_per_thread > 0)
6456 {
6457 pfs_dirty_state dirty_state;
6458 const CHARSET_INFO *cs = static_cast<const CHARSET_INFO *> (from_cs);
6459
6460 /* copy from the input buffer as much as we can fit */
6461 uint copy_size= (uint)(length < session_connect_attrs_size_per_thread ?
6462 length : session_connect_attrs_size_per_thread);
6463 thd->m_session_lock.allocated_to_dirty(& dirty_state);
6464 memcpy(thd->m_session_connect_attrs, buffer, copy_size);
6465 thd->m_session_connect_attrs_length= copy_size;
6466 thd->m_session_connect_attrs_cs_number= cs->number;
6467 thd->m_session_lock.dirty_to_allocated(& dirty_state);
6468
6469 if (copy_size == length)
6470 return 0;
6471
6472 session_connect_attrs_lost++;
6473 return 1;
6474 }
6475 return 0;
6476 }
6477
pfs_register_memory_v1(const char * category,PSI_memory_info_v1 * info,int count)6478 void pfs_register_memory_v1(const char *category,
6479 PSI_memory_info_v1 *info,
6480 int count)
6481 {
6482 REGISTER_BODY_V1(PSI_memory_key,
6483 memory_instrument_prefix,
6484 register_memory_class)
6485 }
6486
pfs_memory_alloc_v1(PSI_memory_key key,size_t size,PSI_thread ** owner)6487 PSI_memory_key pfs_memory_alloc_v1(PSI_memory_key key, size_t size, PSI_thread **owner)
6488 {
6489 PFS_thread ** owner_thread= reinterpret_cast<PFS_thread**>(owner);
6490 assert(owner_thread != NULL);
6491
6492 if (! flag_global_instrumentation)
6493 {
6494 *owner_thread= NULL;
6495 return PSI_NOT_INSTRUMENTED;
6496 }
6497
6498 PFS_memory_class *klass= find_memory_class(key);
6499 if (klass == NULL)
6500 {
6501 *owner_thread= NULL;
6502 return PSI_NOT_INSTRUMENTED;
6503 }
6504
6505 if (! klass->m_enabled)
6506 {
6507 *owner_thread= NULL;
6508 return PSI_NOT_INSTRUMENTED;
6509 }
6510
6511 PFS_memory_stat *event_name_array;
6512 PFS_memory_stat *stat;
6513 uint index= klass->m_event_name_index;
6514 PFS_memory_stat_delta delta_buffer;
6515 PFS_memory_stat_delta *delta;
6516
6517 if (flag_thread_instrumentation && ! klass->is_global())
6518 {
6519 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6520 if (unlikely(pfs_thread == NULL))
6521 {
6522 *owner_thread= NULL;
6523 return PSI_NOT_INSTRUMENTED;
6524 }
6525 if (! pfs_thread->m_enabled)
6526 {
6527 *owner_thread= NULL;
6528 return PSI_NOT_INSTRUMENTED;
6529 }
6530
6531 /* Aggregate to MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME */
6532 event_name_array= pfs_thread->write_instr_class_memory_stats();
6533 stat= & event_name_array[index];
6534 delta= stat->count_alloc(size, &delta_buffer);
6535
6536 if (delta != NULL)
6537 {
6538 pfs_thread->carry_memory_stat_delta(delta, index);
6539 }
6540
6541 /* Flag this memory as owned by the current thread. */
6542 *owner_thread= pfs_thread;
6543 }
6544 else
6545 {
6546 /* Aggregate to MEMORY_SUMMARY_GLOBAL_BY_EVENT_NAME */
6547 event_name_array= global_instr_class_memory_array;
6548 stat= & event_name_array[index];
6549 (void) stat->count_alloc(size, &delta_buffer);
6550
6551 *owner_thread= NULL;
6552 }
6553
6554 return key;
6555 }
6556
pfs_memory_realloc_v1(PSI_memory_key key,size_t old_size,size_t new_size,PSI_thread ** owner)6557 PSI_memory_key pfs_memory_realloc_v1(PSI_memory_key key, size_t old_size, size_t new_size, PSI_thread **owner)
6558 {
6559 PFS_thread ** owner_thread_hdl= reinterpret_cast<PFS_thread**>(owner);
6560 assert(owner != NULL);
6561
6562 PFS_memory_class *klass= find_memory_class(key);
6563 if (klass == NULL)
6564 {
6565 *owner_thread_hdl= NULL;
6566 return PSI_NOT_INSTRUMENTED;
6567 }
6568
6569 PFS_memory_stat *event_name_array;
6570 PFS_memory_stat *stat;
6571 uint index= klass->m_event_name_index;
6572 PFS_memory_stat_delta delta_buffer;
6573 PFS_memory_stat_delta *delta;
6574
6575 if (flag_thread_instrumentation && ! klass->is_global())
6576 {
6577 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6578 if (likely(pfs_thread != NULL))
6579 {
6580 #ifdef PFS_PARANOID
6581 PFS_thread *owner_thread= *owner_thread_hdl;
6582 if (owner_thread != pfs_thread)
6583 {
6584 owner_thread= sanitize_thread(owner_thread);
6585 if (owner_thread != NULL)
6586 {
6587 report_memory_accounting_error("pfs_memory_realloc_v1",
6588 pfs_thread, old_size, klass, owner_thread);
6589 }
6590 }
6591 #endif /* PFS_PARANOID */
6592
6593 /* Aggregate to MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME */
6594 event_name_array= pfs_thread->write_instr_class_memory_stats();
6595 stat= & event_name_array[index];
6596
6597 if (flag_global_instrumentation && klass->m_enabled)
6598 {
6599 delta= stat->count_realloc(old_size, new_size, &delta_buffer);
6600 *owner_thread_hdl= pfs_thread;
6601 }
6602 else
6603 {
6604 delta= stat->count_free(old_size, &delta_buffer);
6605 *owner_thread_hdl= NULL;
6606 key= PSI_NOT_INSTRUMENTED;
6607 }
6608
6609 if (delta != NULL)
6610 {
6611 pfs_thread->carry_memory_stat_delta(delta, index);
6612 }
6613 return key;
6614 }
6615 }
6616
6617 /* Aggregate to MEMORY_SUMMARY_GLOBAL_BY_EVENT_NAME */
6618 event_name_array= global_instr_class_memory_array;
6619 stat= & event_name_array[index];
6620
6621 if (flag_global_instrumentation && klass->m_enabled)
6622 {
6623 (void) stat->count_realloc(old_size, new_size, &delta_buffer);
6624 }
6625 else
6626 {
6627 (void) stat->count_free(old_size, &delta_buffer);
6628 key= PSI_NOT_INSTRUMENTED;
6629 }
6630
6631 *owner_thread_hdl= NULL;
6632 return key;
6633 }
6634
pfs_memory_claim_v1(PSI_memory_key key,size_t size,PSI_thread ** owner)6635 PSI_memory_key pfs_memory_claim_v1(PSI_memory_key key, size_t size, PSI_thread **owner)
6636 {
6637 PFS_thread ** owner_thread= reinterpret_cast<PFS_thread**>(owner);
6638 assert(owner_thread != NULL);
6639
6640 PFS_memory_class *klass= find_memory_class(key);
6641 if (klass == NULL)
6642 {
6643 *owner_thread= NULL;
6644 return PSI_NOT_INSTRUMENTED;
6645 }
6646
6647 /*
6648 Do not check klass->m_enabled.
6649 Do not check flag_global_instrumentation.
6650 If a memory alloc was instrumented,
6651 the corresponding free must be instrumented.
6652 */
6653
6654 PFS_memory_stat *event_name_array;
6655 PFS_memory_stat *stat;
6656 uint index= klass->m_event_name_index;
6657 PFS_memory_stat_delta delta_buffer;
6658 PFS_memory_stat_delta *delta;
6659
6660 if (flag_thread_instrumentation)
6661 {
6662 PFS_thread *old_thread= sanitize_thread(*owner_thread);
6663 PFS_thread *new_thread= my_thread_get_THR_PFS();
6664 if (old_thread != new_thread)
6665 {
6666 if (old_thread != NULL)
6667 {
6668 event_name_array= old_thread->write_instr_class_memory_stats();
6669 stat= & event_name_array[index];
6670 delta= stat->count_free(size, &delta_buffer);
6671
6672 if (delta != NULL)
6673 {
6674 old_thread->carry_memory_stat_delta(delta, index);
6675 }
6676 }
6677
6678 if (new_thread != NULL)
6679 {
6680 event_name_array= new_thread->write_instr_class_memory_stats();
6681 stat= & event_name_array[index];
6682 delta= stat->count_alloc(size, &delta_buffer);
6683
6684 if (delta != NULL)
6685 {
6686 new_thread->carry_memory_stat_delta(delta, index);
6687 }
6688 }
6689
6690 *owner_thread= new_thread;
6691 }
6692
6693 return key;
6694 }
6695
6696 *owner_thread= NULL;
6697 return key;
6698 }
6699
pfs_memory_free_v1(PSI_memory_key key,size_t size,PSI_thread * owner)6700 void pfs_memory_free_v1(PSI_memory_key key, size_t size, PSI_thread *owner)
6701 {
6702 PFS_memory_class *klass= find_memory_class(key);
6703 if (klass == NULL)
6704 return;
6705
6706 /*
6707 Do not check klass->m_enabled.
6708 Do not check flag_global_instrumentation.
6709 If a memory alloc was instrumented,
6710 the corresponding free must be instrumented.
6711 */
6712
6713 PFS_memory_stat *event_name_array;
6714 PFS_memory_stat *stat;
6715 uint index= klass->m_event_name_index;
6716 PFS_memory_stat_delta delta_buffer;
6717 PFS_memory_stat_delta *delta;
6718
6719 if (flag_thread_instrumentation && ! klass->is_global())
6720 {
6721 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6722 if (likely(pfs_thread != NULL))
6723 {
6724 #ifdef PFS_PARANOID
6725 PFS_thread *owner_thread= reinterpret_cast<PFS_thread*>(owner);
6726
6727 if (owner_thread != pfs_thread)
6728 {
6729 owner_thread= sanitize_thread(owner_thread);
6730 if (owner_thread != NULL)
6731 {
6732 report_memory_accounting_error("pfs_memory_free_v1",
6733 pfs_thread, size, klass, owner_thread);
6734 }
6735 }
6736 #endif /* PFS_PARANOID */
6737
6738 /*
6739 Do not check pfs_thread->m_enabled.
6740 If a memory alloc was instrumented,
6741 the corresponding free must be instrumented.
6742 */
6743 /* Aggregate to MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME */
6744 event_name_array= pfs_thread->write_instr_class_memory_stats();
6745 stat= & event_name_array[index];
6746 delta= stat->count_free(size, &delta_buffer);
6747
6748 if (delta != NULL)
6749 {
6750 pfs_thread->carry_memory_stat_delta(delta, index);
6751 }
6752 return;
6753 }
6754 }
6755
6756 /* Aggregate to MEMORY_SUMMARY_GLOBAL_BY_EVENT_NAME */
6757 event_name_array= global_instr_class_memory_array;
6758 if (event_name_array)
6759 {
6760 stat= & event_name_array[index];
6761 (void) stat->count_free(size, &delta_buffer);
6762 }
6763 return;
6764 }
6765
pfs_unlock_table_v1(PSI_table * table)6766 void pfs_unlock_table_v1(PSI_table *table)
6767 {
6768 PFS_table *pfs_table= reinterpret_cast<PFS_table*> (table);
6769
6770 assert(pfs_table != NULL);
6771
6772 pfs_table->m_internal_lock= PFS_TL_NONE;
6773 return;
6774 }
6775
6776 PSI_metadata_lock *
pfs_create_metadata_lock_v1(void * identity,const MDL_key * mdl_key,opaque_mdl_type mdl_type,opaque_mdl_duration mdl_duration,opaque_mdl_status mdl_status,const char * src_file,uint src_line)6777 pfs_create_metadata_lock_v1(
6778 void *identity,
6779 const MDL_key *mdl_key,
6780 opaque_mdl_type mdl_type,
6781 opaque_mdl_duration mdl_duration,
6782 opaque_mdl_status mdl_status,
6783 const char *src_file,
6784 uint src_line)
6785 {
6786 if (! flag_global_instrumentation)
6787 return NULL;
6788
6789 if (! global_metadata_class.m_enabled)
6790 return NULL;
6791
6792 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6793 if (pfs_thread == NULL)
6794 return NULL;
6795
6796 PFS_metadata_lock *pfs;
6797 pfs= create_metadata_lock(identity, mdl_key,
6798 mdl_type, mdl_duration, mdl_status,
6799 src_file, src_line);
6800
6801 if (pfs != NULL)
6802 {
6803 pfs->m_owner_thread_id= pfs_thread->m_thread_internal_id;
6804 pfs->m_owner_event_id= pfs_thread->m_event_id;
6805 }
6806
6807 return reinterpret_cast<PSI_metadata_lock *> (pfs);
6808 }
6809
6810 void
pfs_set_metadata_lock_status_v1(PSI_metadata_lock * lock,opaque_mdl_status mdl_status)6811 pfs_set_metadata_lock_status_v1(PSI_metadata_lock *lock, opaque_mdl_status mdl_status)
6812 {
6813 PFS_metadata_lock *pfs= reinterpret_cast<PFS_metadata_lock*> (lock);
6814 assert(pfs != NULL);
6815 pfs->m_mdl_status= mdl_status;
6816 }
6817
6818 void
pfs_destroy_metadata_lock_v1(PSI_metadata_lock * lock)6819 pfs_destroy_metadata_lock_v1(PSI_metadata_lock *lock)
6820 {
6821 PFS_metadata_lock *pfs= reinterpret_cast<PFS_metadata_lock*> (lock);
6822 assert(pfs != NULL);
6823 destroy_metadata_lock(pfs);
6824 }
6825
6826 PSI_metadata_locker *
pfs_start_metadata_wait_v1(PSI_metadata_locker_state * state,PSI_metadata_lock * lock,const char * src_file,uint src_line)6827 pfs_start_metadata_wait_v1(PSI_metadata_locker_state *state,
6828 PSI_metadata_lock *lock,
6829 const char *src_file,
6830 uint src_line)
6831 {
6832 PFS_metadata_lock *pfs_lock= reinterpret_cast<PFS_metadata_lock*> (lock);
6833 assert(state != NULL);
6834 assert(pfs_lock != NULL);
6835
6836 if (! pfs_lock->m_enabled)
6837 return NULL;
6838
6839 uint flags;
6840 ulonglong timer_start= 0;
6841
6842 if (flag_thread_instrumentation)
6843 {
6844 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6845 if (unlikely(pfs_thread == NULL))
6846 return NULL;
6847 if (! pfs_thread->m_enabled)
6848 return NULL;
6849 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
6850 flags= STATE_FLAG_THREAD;
6851
6852 if (pfs_lock->m_timed)
6853 {
6854 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
6855 state->m_timer_start= timer_start;
6856 flags|= STATE_FLAG_TIMED;
6857 }
6858
6859 if (flag_events_waits_current)
6860 {
6861 if (unlikely(pfs_thread->m_events_waits_current >=
6862 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
6863 {
6864 locker_lost++;
6865 return NULL;
6866 }
6867 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
6868 state->m_wait= wait;
6869 flags|= STATE_FLAG_EVENT;
6870
6871 PFS_events_waits *parent_event= wait - 1;
6872 wait->m_event_type= EVENT_TYPE_WAIT;
6873 wait->m_nesting_event_id= parent_event->m_event_id;
6874 wait->m_nesting_event_type= parent_event->m_event_type;
6875
6876 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
6877 wait->m_class= &global_metadata_class;
6878 wait->m_timer_start= timer_start;
6879 wait->m_timer_end= 0;
6880 wait->m_object_instance_addr= pfs_lock->m_identity;
6881 wait->m_event_id= pfs_thread->m_event_id++;
6882 wait->m_end_event_id= 0;
6883 wait->m_weak_metadata_lock= pfs_lock;
6884 wait->m_weak_version= pfs_lock->get_version();
6885 wait->m_operation= OPERATION_TYPE_METADATA;
6886 wait->m_source_file= src_file;
6887 wait->m_source_line= src_line;
6888 wait->m_wait_class= WAIT_CLASS_METADATA;
6889
6890 pfs_thread->m_events_waits_current++;
6891 }
6892 }
6893 else
6894 {
6895 if (pfs_lock->m_timed)
6896 {
6897 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
6898 state->m_timer_start= timer_start;
6899 flags= STATE_FLAG_TIMED;
6900 state->m_thread= NULL;
6901 }
6902 else
6903 {
6904 /*
6905 Complete shortcut.
6906 */
6907 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (counted) */
6908 global_metadata_stat.aggregate_counted();
6909 return NULL;
6910 }
6911 }
6912
6913 state->m_flags= flags;
6914 state->m_metadata_lock= lock;
6915 return reinterpret_cast<PSI_metadata_locker*> (state);
6916 }
6917
6918 void
pfs_end_metadata_wait_v1(PSI_metadata_locker * locker,int rc)6919 pfs_end_metadata_wait_v1(PSI_metadata_locker *locker,
6920 int rc)
6921 {
6922 PSI_metadata_locker_state *state= reinterpret_cast<PSI_metadata_locker_state*> (locker);
6923 assert(state != NULL);
6924
6925 ulonglong timer_end= 0;
6926 ulonglong wait_time= 0;
6927
6928 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
6929
6930 uint flags= state->m_flags;
6931
6932 if (flags & STATE_FLAG_TIMED)
6933 {
6934 timer_end= state->m_timer();
6935 wait_time= timer_end - state->m_timer_start;
6936 }
6937
6938 if (flags & STATE_FLAG_THREAD)
6939 {
6940 PFS_single_stat *event_name_array;
6941 event_name_array= thread->write_instr_class_waits_stats();
6942
6943 if (flags & STATE_FLAG_TIMED)
6944 {
6945 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
6946 event_name_array[GLOBAL_METADATA_EVENT_INDEX].aggregate_value(wait_time);
6947 }
6948 else
6949 {
6950 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
6951 event_name_array[GLOBAL_METADATA_EVENT_INDEX].aggregate_counted();
6952 }
6953
6954 if (flags & STATE_FLAG_EVENT)
6955 {
6956 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
6957 assert(wait != NULL);
6958
6959 wait->m_timer_end= timer_end;
6960 wait->m_end_event_id= thread->m_event_id;
6961 if (thread->m_flag_events_waits_history)
6962 insert_events_waits_history(thread, wait);
6963 if (thread->m_flag_events_waits_history_long)
6964 insert_events_waits_history_long(wait);
6965 thread->m_events_waits_current--;
6966
6967 assert(wait == thread->m_events_waits_current);
6968 }
6969 }
6970 else
6971 {
6972 if (flags & STATE_FLAG_TIMED)
6973 {
6974 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (timed) */
6975 global_metadata_stat.aggregate_value(wait_time);
6976 }
6977 else
6978 {
6979 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (counted) */
6980 global_metadata_stat.aggregate_counted();
6981 }
6982 }
6983 }
6984
6985 /**
6986 Implementation of the instrumentation interface.
6987 @sa PSI_v1.
6988 */
6989 PSI_v1 PFS_v1=
6990 {
6991 pfs_register_mutex_v1,
6992 pfs_register_rwlock_v1,
6993 pfs_register_cond_v1,
6994 pfs_register_thread_v1,
6995 pfs_register_file_v1,
6996 pfs_register_stage_v1,
6997 pfs_register_statement_v1,
6998 pfs_register_socket_v1,
6999 pfs_init_mutex_v1,
7000 pfs_destroy_mutex_v1,
7001 pfs_init_rwlock_v1,
7002 pfs_destroy_rwlock_v1,
7003 pfs_init_cond_v1,
7004 pfs_destroy_cond_v1,
7005 pfs_init_socket_v1,
7006 pfs_destroy_socket_v1,
7007 pfs_get_table_share_v1,
7008 pfs_release_table_share_v1,
7009 pfs_drop_table_share_v1,
7010 pfs_open_table_v1,
7011 pfs_unbind_table_v1,
7012 pfs_rebind_table_v1,
7013 pfs_close_table_v1,
7014 pfs_create_file_v1,
7015 pfs_spawn_thread_v1,
7016 pfs_new_thread_v1,
7017 pfs_set_thread_id_v1,
7018 pfs_set_thread_THD_v1,
7019 pfs_set_thread_os_id_v1,
7020 pfs_get_thread_v1,
7021 pfs_set_thread_user_v1,
7022 pfs_set_thread_account_v1,
7023 pfs_set_thread_db_v1,
7024 pfs_set_thread_command_v1,
7025 pfs_set_connection_type_v1,
7026 pfs_set_thread_start_time_v1,
7027 pfs_set_thread_state_v1,
7028 pfs_set_thread_info_v1,
7029 pfs_set_thread_v1,
7030 pfs_delete_current_thread_v1,
7031 pfs_delete_thread_v1,
7032 pfs_get_thread_file_name_locker_v1,
7033 pfs_get_thread_file_stream_locker_v1,
7034 pfs_get_thread_file_descriptor_locker_v1,
7035 pfs_unlock_mutex_v1,
7036 pfs_unlock_rwlock_v1,
7037 pfs_signal_cond_v1,
7038 pfs_broadcast_cond_v1,
7039 pfs_start_idle_wait_v1,
7040 pfs_end_idle_wait_v1,
7041 pfs_start_mutex_wait_v1,
7042 pfs_end_mutex_wait_v1,
7043 pfs_start_rwlock_rdwait_v1,
7044 pfs_end_rwlock_rdwait_v1,
7045 pfs_start_rwlock_wrwait_v1,
7046 pfs_end_rwlock_wrwait_v1,
7047 pfs_start_cond_wait_v1,
7048 pfs_end_cond_wait_v1,
7049 pfs_start_table_io_wait_v1,
7050 pfs_end_table_io_wait_v1,
7051 pfs_start_table_lock_wait_v1,
7052 pfs_end_table_lock_wait_v1,
7053 pfs_start_file_open_wait_v1,
7054 pfs_end_file_open_wait_v1,
7055 pfs_end_file_open_wait_and_bind_to_descriptor_v1,
7056 pfs_end_temp_file_open_wait_and_bind_to_descriptor_v1,
7057 pfs_start_file_wait_v1,
7058 pfs_end_file_wait_v1,
7059 pfs_start_file_close_wait_v1,
7060 pfs_end_file_close_wait_v1,
7061 pfs_end_file_rename_wait_v1,
7062 pfs_start_stage_v1,
7063 pfs_get_current_stage_progress_v1,
7064 pfs_end_stage_v1,
7065 pfs_get_thread_statement_locker_v1,
7066 pfs_refine_statement_v1,
7067 pfs_start_statement_v1,
7068 pfs_set_statement_text_v1,
7069 pfs_set_statement_lock_time_v1,
7070 pfs_set_statement_rows_sent_v1,
7071 pfs_set_statement_rows_examined_v1,
7072 pfs_inc_statement_created_tmp_disk_tables_v1,
7073 pfs_inc_statement_created_tmp_tables_v1,
7074 pfs_inc_statement_select_full_join_v1,
7075 pfs_inc_statement_select_full_range_join_v1,
7076 pfs_inc_statement_select_range_v1,
7077 pfs_inc_statement_select_range_check_v1,
7078 pfs_inc_statement_select_scan_v1,
7079 pfs_inc_statement_sort_merge_passes_v1,
7080 pfs_inc_statement_sort_range_v1,
7081 pfs_inc_statement_sort_rows_v1,
7082 pfs_inc_statement_sort_scan_v1,
7083 pfs_set_statement_no_index_used_v1,
7084 pfs_set_statement_no_good_index_used_v1,
7085 pfs_end_statement_v1,
7086 pfs_get_thread_transaction_locker_v1,
7087 pfs_start_transaction_v1,
7088 pfs_set_transaction_xid_v1,
7089 pfs_set_transaction_xa_state_v1,
7090 pfs_set_transaction_gtid_v1,
7091 pfs_set_transaction_trxid_v1,
7092 pfs_inc_transaction_savepoints_v1,
7093 pfs_inc_transaction_rollback_to_savepoint_v1,
7094 pfs_inc_transaction_release_savepoint_v1,
7095 pfs_end_transaction_v1,
7096 pfs_start_socket_wait_v1,
7097 pfs_end_socket_wait_v1,
7098 pfs_set_socket_state_v1,
7099 pfs_set_socket_info_v1,
7100 pfs_set_socket_thread_owner_v1,
7101 pfs_create_prepared_stmt_v1,
7102 pfs_destroy_prepared_stmt_v1,
7103 pfs_reprepare_prepared_stmt_v1,
7104 pfs_execute_prepared_stmt_v1,
7105 pfs_set_prepared_stmt_text_v1,
7106 pfs_digest_start_v1,
7107 pfs_digest_end_v1,
7108 pfs_set_thread_connect_attrs_v1,
7109 pfs_start_sp_v1,
7110 pfs_end_sp_v1,
7111 pfs_drop_sp_v1,
7112 pfs_get_sp_share_v1,
7113 pfs_release_sp_share_v1,
7114 pfs_register_memory_v1,
7115 pfs_memory_alloc_v1,
7116 pfs_memory_realloc_v1,
7117 pfs_memory_claim_v1,
7118 pfs_memory_free_v1,
7119 pfs_unlock_table_v1,
7120 pfs_create_metadata_lock_v1,
7121 pfs_set_metadata_lock_status_v1,
7122 pfs_destroy_metadata_lock_v1,
7123 pfs_start_metadata_wait_v1,
7124 pfs_end_metadata_wait_v1
7125 };
7126
get_interface(int version)7127 static void* get_interface(int version)
7128 {
7129 switch (version)
7130 {
7131 case PSI_VERSION_1:
7132 return &PFS_v1;
7133 default:
7134 return NULL;
7135 }
7136 }
7137
7138 C_MODE_END
7139
7140 struct PSI_bootstrap PFS_bootstrap=
7141 {
7142 get_interface
7143 };
7144