1 /* Copyright (c) 2008, 2021, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software Foundation,
21 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
22
23 /**
24 @file storage/perfschema/pfs.cc
25 The performance schema implementation of all instruments.
26 */
27 #include "my_global.h"
28 #include "thr_lock.h"
29
30 /* Make sure exported prototypes match the implementation. */
31 #include "pfs_file_provider.h"
32 #include "pfs_idle_provider.h"
33 #include "pfs_memory_provider.h"
34 #include "pfs_metadata_provider.h"
35 #include "pfs_socket_provider.h"
36 #include "pfs_stage_provider.h"
37 #include "pfs_statement_provider.h"
38 #include "pfs_table_provider.h"
39 #include "pfs_thread_provider.h"
40 #include "pfs_transaction_provider.h"
41
42 #include "mysql/psi/psi.h"
43 #include "mysql/psi/mysql_thread.h"
44 #include "my_thread.h"
45 #include "sql_const.h"
46 #include "pfs.h"
47 #include "pfs_instr_class.h"
48 #include "pfs_instr.h"
49 #include "pfs_host.h"
50 #include "pfs_user.h"
51 #include "pfs_account.h"
52 #include "pfs_global.h"
53 #include "pfs_column_values.h"
54 #include "pfs_timer.h"
55 #include "pfs_events_waits.h"
56 #include "pfs_events_stages.h"
57 #include "pfs_events_statements.h"
58 #include "pfs_events_transactions.h"
59 #include "pfs_setup_actor.h"
60 #include "pfs_setup_object.h"
61 #include "sql_error.h"
62 #include "sql_acl.h"
63 #include "sp_head.h"
64 #include "mdl.h" /* mdl_key_init */
65 #include "pfs_digest.h"
66 #include "pfs_program.h"
67 #include "pfs_prepared_stmt.h"
68
69 using std::min;
70
71 /*
72 This is a development tool to investigate memory statistics,
73 do not use in production.
74 */
75 #undef PFS_PARANOID
76
77 #ifdef PFS_PARANOID
report_memory_accounting_error(const char * api_name,PFS_thread * new_thread,size_t size,PFS_memory_class * klass,PFS_thread * old_thread)78 static void report_memory_accounting_error(
79 const char *api_name,
80 PFS_thread *new_thread,
81 size_t size,
82 PFS_memory_class *klass,
83 PFS_thread *old_thread)
84 {
85 pfs_print_error("%s "
86 "thread <%d> of class <%s> "
87 "not owner of <%d> bytes in class <%s> "
88 "allocated by thread <%d> of class <%s>\n",
89 api_name,
90 new_thread->m_thread_internal_id,
91 new_thread->m_class->m_name,
92 size, klass->m_name,
93 old_thread->m_thread_internal_id,
94 old_thread->m_class->m_name);
95
96 assert(strcmp(new_thread->m_class->m_name, "thread/sql/event_worker") != 0);
97 assert(strcmp(new_thread->m_class->m_name, "thread/sql/event_scheduler") != 0);
98 assert(strcmp(new_thread->m_class->m_name, "thread/sql/one_connection") != 0);
99 }
100 #endif /* PFS_PARANOID */
101
102 /**
103 @page PAGE_PERFORMANCE_SCHEMA The Performance Schema main page
104 MySQL PERFORMANCE_SCHEMA implementation.
105
106 @section INTRO Introduction
107 The PERFORMANCE_SCHEMA is a way to introspect the internal execution of
108 the server at runtime.
109 The performance schema focuses primarily on performance data,
110 as opposed to the INFORMATION_SCHEMA whose purpose is to inspect metadata.
111
112 From a user point of view, the performance schema consists of:
113 - a dedicated database schema, named PERFORMANCE_SCHEMA,
114 - SQL tables, used to query the server internal state or change
115 configuration settings.
116
117 From an implementation point of view, the performance schema is a dedicated
118 Storage Engine which exposes data collected by 'Instrumentation Points'
119 placed in the server code.
120
121 @section INTERFACES Multiple interfaces
122
123 The performance schema exposes many different interfaces,
124 for different components, and for different purposes.
125
126 @subsection INT_INSTRUMENTING Instrumenting interface
127
128 All the data representing the server internal state exposed
129 in the performance schema must be first collected:
130 this is the role of the instrumenting interface.
131 The instrumenting interface is a coding interface provided
132 by implementors (of the performance schema) to implementors
133 (of the server or server components).
134
135 This interface is available to:
136 - C implementations
137 - C++ implementations
138 - the core SQL layer (/sql)
139 - the mysys library (/mysys)
140 - MySQL plugins, including storage engines,
141 - third party plugins, including third party storage engines.
142
143 For details, see the @ref PAGE_INSTRUMENTATION_INTERFACE
144 "instrumentation interface page".
145
146 @subsection INT_COMPILING Compiling interface
147
148 The implementation of the performance schema can be enabled or disabled at
149 build time, when building MySQL from the source code.
150
151 When building with the performance schema code, some compilation flags
152 are available to change the default values used in the code, if required.
153
154 For more details, see:
155 @verbatim ./configure --help @endverbatim
156
157 To compile with the performance schema:
158 @verbatim ./configure --with-perfschema @endverbatim
159
160 The implementation of all the compiling options is located in
161 @verbatim ./storage/perfschema/plug.in @endverbatim
162
163 @subsection INT_STARTUP Server startup interface
164
165 The server startup interface consists of the "./mysqld ..."
166 command line used to start the server.
167 When the performance schema is compiled in the server binary,
168 extra command line options are available.
169
170 These extra start options allow the DBA to:
171 - enable or disable the performance schema
172 - specify some sizing parameters.
173
174 To see help for the performance schema startup options, see:
175 @verbatim ./sql/mysqld --verbose --help @endverbatim
176
177 The implementation of all the startup options is located in
178 @verbatim ./sql/mysqld.cc, my_long_options[] @endverbatim
179
180 @subsection INT_BOOTSTRAP Server bootstrap interface
181
182 The bootstrap interface is a private interface exposed by
183 the performance schema, and used by the SQL layer.
184 Its role is to advertise all the SQL tables natively
185 supported by the performance schema to the SQL server.
186 The code consists of creating MySQL tables for the
187 performance schema itself, and is used in './mysql --bootstrap'
188 mode when a server is installed.
189
190 The implementation of the database creation script is located in
191 @verbatim ./scripts/mysql_system_tables.sql @endverbatim
192
193 @subsection INT_CONFIG Runtime configuration interface
194
195 When the performance schema is used at runtime, various configuration
196 parameters can be used to specify what kind of data is collected,
197 what kind of aggregations are computed, what kind of timers are used,
198 what events are timed, etc.
199
200 For all these capabilities, not a single statement or special syntax
201 was introduced in the parser.
202 Instead of new SQL statements, the interface consists of DML
203 (SELECT, INSERT, UPDATE, DELETE) against special "SETUP" tables.
204
205 For example:
206 @verbatim mysql> update performance_schema.SETUP_INSTRUMENTS
207 set ENABLED='YES', TIMED='YES';
208 Query OK, 234 rows affected (0.00 sec)
209 Rows matched: 234 Changed: 234 Warnings: 0 @endverbatim
210
211 @subsection INT_STATUS Internal audit interface
212
213 The internal audit interface is provided to the DBA to inspect if the
214 performance schema code itself is functioning properly.
215 This interface is necessary because a failure caused while
216 instrumenting code in the server should not cause failures in the
217 MySQL server itself, so that the performance schema implementation
218 never raises errors during runtime execution.
219
220 This auditing interface consists of:
221 @verbatim SHOW ENGINE PERFORMANCE_SCHEMA STATUS; @endverbatim
222 It displays data related to the memory usage of the performance schema,
223 as well as statistics about lost events, if any.
224
225 The SHOW STATUS command is implemented in
226 @verbatim ./storage/perfschema/pfs_engine_table.cc @endverbatim
227
228 @subsection INT_QUERY Query interface
229
230 The query interface is used to query the internal state of a running server.
231 It is provided as SQL tables.
232
233 For example:
234 @verbatim mysql> select * from performance_schema.EVENTS_WAITS_CURRENT;
235 @endverbatim
236
237 @section DESIGN_PRINCIPLES Design principles
238
239 @subsection PRINCIPLE_BEHAVIOR No behavior changes
240
241 The primary goal of the performance schema is to measure (instrument) the
242 execution of the server. A good measure should not cause any change
243 in behavior.
244
245 To achieve this, the overall design of the performance schema complies
246 with the following very severe design constraints:
247
248 The parser is unchanged. There are no new keywords, no new statements.
249 This guarantees that existing applications will run the same way with or
250 without the performance schema.
251
252 All the instrumentation points return "void", there are no error codes.
253 Even if the performance schema internally fails, execution of the server
254 code will proceed.
255
256 None of the instrumentation points allocate memory.
257 All the memory used by the performance schema is pre-allocated at startup,
258 and is considered "static" during the server life time.
259
260 None of the instrumentation points use any pthread_mutex, pthread_rwlock,
261 or pthread_cond (or platform equivalents).
262 Executing the instrumentation point should not cause thread scheduling to
263 change in the server.
264
265 In other words, the implementation of the instrumentation points,
266 including all the code called by the instrumentation points, is:
267 - malloc free
268 - mutex free
269 - rwlock free
270
271 TODO: All the code located in storage/perfschema is malloc free,
272 but unfortunately the usage of LF_HASH introduces some memory allocation.
273 This should be revised if possible, to use a lock-free,
274 malloc-free hash code table.
275
276 @subsection PRINCIPLE_PERFORMANCE No performance hit
277
278 The instrumentation of the server should be as fast as possible.
279 In cases when there are choices between:
280 - doing some processing when recording the performance data
281 in the instrumentation,
282 - doing some processing when retrieving the performance data,
283
284 priority is given in the design to make the instrumentation faster,
285 pushing some complexity to data retrieval.
286
287 As a result, some parts of the design, related to:
288 - the setup code path,
289 - the query code path,
290
291 might appear to be sub-optimal.
292
293 The criterion used here is to optimize primarily the critical path (data
294 collection), possibly at the expense of non-critical code paths.
295
296 @subsection PRINCIPLE_NOT_INTRUSIVE Unintrusive instrumentation
297
298 For the performance schema in general to be successful, the barrier
299 of entry for a developer should be low, so it's easy to instrument code.
300
301 In particular, the instrumentation interface:
302 - is available for C and C++ code (so it's a C interface),
303 - does not require parameters that the calling code can't easily provide,
304 - supports partial instrumentation (for example, instrumenting mutexes does
305 not require that every mutex is instrumented)
306
307 @subsection PRINCIPLE_EXTENDABLE Extendable instrumentation
308
309 As the content of the performance schema improves,
310 with more tables exposed and more data collected,
311 the instrumentation interface will also be augmented
312 to support instrumenting new concepts.
313 Existing instrumentations should not be affected when additional
314 instrumentation is made available, and making a new instrumentation
315 available should not require existing instrumented code to support it.
316
317 @subsection PRINCIPLE_VERSIONED Versioned instrumentation
318
319 Given that the instrumentation offered by the performance schema will
320 be augmented with time, when more features are implemented,
321 the interface itself should be versioned, to keep compatibility
322 with previous instrumented code.
323
324 For example, after both plugin-A and plugin-B have been instrumented for
325 mutexes, read write locks and conditions, using the instrumentation
326 interface, we can anticipate that the instrumentation interface
327 is expanded to support file based operations.
328
329 Plugin-A, a file based storage engine, will most likely use the expanded
330 interface and instrument its file usage, using the version 2
331 interface, while Plugin-B, a network based storage engine, will not change
332 its code and not release a new binary.
333
334 When later the instrumentation interface is expanded to support network
335 based operations (which will define interface version 3), the Plugin-B code
336 can then be changed to make use of it.
337
338 Note, this is just an example to illustrate the design concept here.
339 Both mutexes and file instrumentation are already available
340 since version 1 of the instrumentation interface.
341
342 @subsection PRINCIPLE_DEPLOYMENT Easy deployment
343
344 Internally, we might want every plugin implementation to upgrade the
345 instrumented code to the latest available, but this will cause additional
346 work and this is not practical if the code change is monolithic.
347
348 Externally, for third party plugin implementors, asking implementors to
349 always stay aligned to the latest instrumentation and make new releases,
350 even when the change does not provide new functionality for them,
351 is a bad idea.
352
353 For example, requiring a network based engine to re-release because the
354 instrumentation interface changed for file based operations, will create
355 too many deployment issues.
356
357 So, the performance schema implementation must support concurrently,
358 in the same deployment, multiple versions of the instrumentation
359 interface, and ensure binary compatibility with each version.
360
361 In addition to this, the performance schema can be included or excluded
362 from the server binary, using build time configuration options.
363
364 Regardless, the following types of deployment are valid:
365 - a server supporting the performance schema + a storage engine
366 that is not instrumented
367 - a server not supporting the performance schema + a storage engine
368 that is instrumented
369 */
370
371 /**
372 @page PAGE_INSTRUMENTATION_INTERFACE Performance schema: instrumentation interface page.
373 MySQL performance schema instrumentation interface.
374
375 @section INTRO Introduction
376
377 The instrumentation interface consist of two layers:
378 - a raw ABI (Application Binary Interface) layer, that exposes the primitive
379 instrumentation functions exported by the performance schema instrumentation
380 - an API (Application Programing Interface) layer,
381 that provides many helpers for a developer instrumenting some code,
382 to make the instrumentation as easy as possible.
383
384 The ABI layer consists of:
385 @code
386 #include "mysql/psi/psi.h"
387 @endcode
388
389 The API layer consists of:
390 @code
391 #include "mysql/psi/mutex_mutex.h"
392 #include "mysql/psi/mutex_file.h"
393 @endcode
394
395 The first helper is for mutexes, rwlocks and conditions,
396 the second for file io.
397
398 The API layer exposes C macros and typedefs which will expand:
399 - either to non-instrumented code, when compiled without the performance
400 schema instrumentation
401 - or to instrumented code, that will issue the raw calls to the ABI layer
402 so that the implementation can collect data.
403
404 Note that all the names introduced (for example, @c mysql_mutex_lock) do not
405 collide with any other namespace.
406 In particular, the macro @c mysql_mutex_lock is on purpose not named
407 @c pthread_mutex_lock.
408 This is to:
409 - avoid overloading @c pthread_mutex_lock with yet another macro,
410 which is dangerous as it can affect user code and pollute
411 the end-user namespace.
412 - allow the developer instrumenting code to selectively instrument
413 some code but not all.
414
415 @section PRINCIPLES Design principles
416
417 The ABI part is designed as a facade, that exposes basic primitives.
418 The expectation is that each primitive will be very stable over time,
419 but the list will constantly grow when more instruments are supported.
420 To support binary compatibility with plugins compiled with a different
421 version of the instrumentation, the ABI itself is versioned
422 (see @c PSI_v1, @c PSI_v2).
423
424 For a given instrumentation point in the API, the basic coding pattern
425 used is:
426 - (a) notify the performance schema of the operation
427 about to be performed.
428 - (b) execute the instrumented code.
429 - (c) notify the performance schema that the operation
430 is completed.
431
432 An opaque "locker" pointer is returned by (a), that is given to (c).
433 This pointer helps the implementation to keep context, for performances.
434
435 The following code fragment is annotated to show how in detail this pattern
436 in implemented, when the instrumentation is compiled in:
437
438 @verbatim
439 static inline int mysql_mutex_lock(
440 mysql_mutex_t *that, myf flags, const char *src_file, uint src_line)
441 {
442 int result;
443 struct PSI_mutex_locker_state state;
444 struct PSI_mutex_locker *locker= NULL;
445
446 ............... (a)
447 locker= PSI_MUTEX_CALL(start_mutex_wait)(&state, that->p_psi, PSI_MUTEX_LOCK,
448 locker, src_file, src_line);
449
450 ............... (b)
451 result= pthread_mutex_lock(&that->m_mutex);
452
453 ............... (c)
454 PSI_MUTEX_CALL(end_mutex_wait)(locker, result);
455
456 return result;
457 }
458 @endverbatim
459
460 When the performance schema instrumentation is not compiled in,
461 the code becomes simply a wrapper, expanded in line by the compiler:
462
463 @verbatim
464 static inline int mysql_mutex_lock(...)
465 {
466 int result;
467
468 ............... (b)
469 result= pthread_mutex_lock(&that->m_mutex);
470
471 return result;
472 }
473 @endverbatim
474
475 When the performance schema instrumentation is compiled in,
476 and when the code compiled is internal to the server implementation,
477 PSI_MUTEX_CALL expands directly to functions calls in the performance schema,
478 to make (a) and (c) calls as efficient as possible.
479
480 @verbatim
481 static inline int mysql_mutex_lock(...)
482 {
483 int result;
484 struct PSI_mutex_locker_state state;
485 struct PSI_mutex_locker *locker= NULL;
486
487 ............... (a)
488 locker= pfs_start_mutex_wait_v1(&state, that->p_psi, PSI_MUTEX_LOCK,
489 locker, src_file, src_line);
490
491 ............... (b)
492 result= pthread_mutex_lock(&that->m_mutex);
493
494 ............... (c)
495 pfs_end_mutex_wait_v1(locker, result);
496
497 return result;
498 }
499 @endverbatim
500
501 When the performance schema instrumentation is compiled in,
502 and when the code compiled is external to the server implementation
503 (typically, a dynamic plugin),
504 PSI_MUTEX_CALL expands to dynamic calls to the underlying implementation,
505 using the PSI_server entry point.
506 This makes (a) and (c) slower, as a function pointer is used instead of a static call,
507 but also independent of the implementation, for binary compatibility.
508
509 @verbatim
510 static inline int mysql_mutex_lock(...)
511 {
512 int result;
513 struct PSI_mutex_locker_state state;
514 struct PSI_mutex_locker *locker= NULL;
515
516 ............... (a)
517 locker= PSI_server->start_mutex_wait(&state, that->p_psi, PSI_MUTEX_LOCK,
518 locker, src_file, src_line);
519
520 ............... (b)
521 result= pthread_mutex_lock(&that->m_mutex);
522
523 ............... (c)
524 PSI_server->end_mutex_wait(locker, result);
525
526 return result;
527 }
528 @endverbatim
529
530 */
531
532 /**
533 @page PAGE_AGGREGATES Performance schema: the aggregates page.
534 Performance schema aggregates.
535
536 @section INTRO Introduction
537
538 Aggregates tables are tables that can be formally defined as
539 SELECT ... from EVENTS_WAITS_HISTORY_INFINITE ... group by 'group clause'.
540
541 Each group clause defines a different kind of aggregate, and corresponds to
542 a different table exposed by the performance schema.
543
544 Aggregates can be either:
545 - computed on the fly,
546 - computed on demand, based on other available data.
547
548 'EVENTS_WAITS_HISTORY_INFINITE' is a table that does not exist,
549 the best approximation is EVENTS_WAITS_HISTORY_LONG.
550 Aggregates computed on the fly in fact are based on EVENTS_WAITS_CURRENT,
551 while aggregates computed on demand are based on other
552 EVENTS_WAITS_SUMMARY_BY_xxx tables.
553
554 To better understand the implementation itself, a bit of math is
555 required first, to understand the model behind the code:
556 the code is deceptively simple, the real complexity resides
557 in the flyweight of pointers between various performance schema buffers.
558
559 @section DIMENSION Concept of dimension
560
561 An event measured by the instrumentation has many attributes.
562 An event is represented as a data point P(x1, x2, ..., xN),
563 where each x_i coordinate represents a given attribute value.
564
565 Examples of attributes are:
566 - the time waited
567 - the object waited on
568 - the instrument waited on
569 - the thread that waited
570 - the operation performed
571 - per object or per operation additional attributes, such as spins,
572 number of bytes, etc.
573
574 Computing an aggregate per thread is fundamentally different from
575 computing an aggregate by instrument, so the "_BY_THREAD" and
576 "_BY_EVENT_NAME" aggregates are different dimensions,
577 operating on different x_i and x_j coordinates.
578 These aggregates are "orthogonal".
579
580 @section PROJECTION Concept of projection
581
582 A given x_i attribute value can convey either just one basic information,
583 such as a number of bytes, or can convey implied information,
584 such as an object fully qualified name.
585
586 For example, from the value "test.t1", the name of the object schema
587 "test" can be separated from the object name "t1", so that now aggregates
588 by object schema can be implemented.
589
590 In math terms, that corresponds to defining a function:
591 F_i (x): x --> y
592 Applying this function to our point P gives another point P':
593
594 F_i (P):
595 P(x1, x2, ..., x{i-1}, x_i, x{i+1}, ..., x_N)
596 --> P' (x1, x2, ..., x{i-1}, f_i(x_i), x{i+1}, ..., x_N)
597
598 That function defines in fact an aggregate !
599 In SQL terms, this aggregate would look like the following table:
600
601 @verbatim
602 CREATE VIEW EVENTS_WAITS_SUMMARY_BY_Func_i AS
603 SELECT col_1, col_2, ..., col_{i-1},
604 Func_i(col_i),
605 COUNT(col_i),
606 MIN(col_i), AVG(col_i), MAX(col_i), -- if col_i is a numeric value
607 col_{i+1}, ..., col_N
608 FROM EVENTS_WAITS_HISTORY_INFINITE
609 group by col_1, col_2, ..., col_{i-1}, col{i+1}, ..., col_N.
610 @endverbatim
611
612 Note that not all columns have to be included,
613 in particular some columns that are dependent on the x_i column should
614 be removed, so that in practice, MySQL's aggregation method tends to
615 remove many attributes at each aggregation steps.
616
617 For example, when aggregating wait events by object instances,
618 - the wait_time and number_of_bytes can be summed,
619 and sum(wait_time) now becomes an object instance attribute.
620 - the source, timer_start, timer_end columns are not in the
621 _BY_INSTANCE table, because these attributes are only
622 meaningful for a wait.
623
624 @section COMPOSITION Concept of composition
625
626 Now, the "test.t1" --> "test" example was purely theory,
627 just to explain the concept, and does not lead very far.
628 Let's look at a more interesting example of data that can be derived
629 from the row event.
630
631 An event creates a transient object, PFS_wait_locker, per operation.
632 This object's life cycle is extremely short: it's created just
633 before the start_wait() instrumentation call, and is destroyed in
634 the end_wait() call.
635
636 The wait locker itself contains a pointer to the object instance
637 waited on.
638 That allows to implement a wait_locker --> object instance projection,
639 with m_target.
640 The object instance life cycle depends on _init and _destroy calls
641 from the code, such as mysql_mutex_init()
642 and mysql_mutex_destroy() for a mutex.
643
644 The object instance waited on contains a pointer to the object class,
645 which is represented by the instrument name.
646 That allows to implement an object instance --> object class projection.
647 The object class life cycle is permanent, as instruments are loaded in
648 the server and never removed.
649
650 The object class is named in such a way
651 (for example, "wait/sync/mutex/sql/LOCK_open",
652 "wait/io/file/maria/data_file) that the component ("sql", "maria")
653 that it belongs to can be inferred.
654 That allows to implement an object class --> server component projection.
655
656 Back to math again, we have, for example for mutexes:
657
658 F1 (l) : PFS_wait_locker l --> PFS_mutex m = l->m_target.m_mutex
659
660 F1_to_2 (m) : PFS_mutex m --> PFS_mutex_class i = m->m_class
661
662 F2_to_3 (i) : PFS_mutex_class i --> const char *component =
663 substring(i->m_name, ...)
664
665 Per components aggregates are not implemented, this is just an illustration.
666
667 F1 alone defines this aggregate:
668
669 EVENTS_WAITS_HISTORY_INFINITE --> EVENTS_WAITS_SUMMARY_BY_INSTANCE
670 (or MUTEX_INSTANCE)
671
672 F1_to_2 alone could define this aggregate:
673
674 EVENTS_WAITS_SUMMARY_BY_INSTANCE --> EVENTS_WAITS_SUMMARY_BY_EVENT_NAME
675
676 Alternatively, using function composition, with
677 F2 = F1_to_2 o F1, F2 defines:
678
679 EVENTS_WAITS_HISTORY_INFINITE --> EVENTS_WAITS_SUMMARY_BY_EVENT_NAME
680
681 Likewise, F_2_to_3 defines:
682
683 EVENTS_WAITS_SUMMARY_BY_EVENT_NAME --> EVENTS_WAITS_SUMMARY_BY_COMPONENT
684
685 and F3 = F_2_to_3 o F_1_to_2 o F1 defines:
686
687 EVENTS_WAITS_HISTORY_INFINITE --> EVENTS_WAITS_SUMMARY_BY_COMPONENT
688
689 What has all this to do with the code ?
690
691 Functions (or aggregates) such as F_3 are not implemented as is.
692 Instead, they are decomposed into F_2_to_3 o F_1_to_2 o F1,
693 and each intermediate aggregate is stored into an internal buffer.
694 This allows to support every F1, F2, F3 aggregates from shared
695 internal buffers, where computation already performed to compute F2
696 is reused when computing F3.
697
698 @section OBJECT_GRAPH Object graph
699
700 In terms of object instances, or records, pointers between
701 different buffers define an object instance graph.
702
703 For example, assuming the following scenario:
704 - A mutex class "M" is instrumented, the instrument name
705 is "wait/sync/mutex/sql/M"
706 - This mutex instrument has been instantiated twice,
707 mutex instances are noted M-1 and M-2
708 - Threads T-A and T-B are locking mutex instance M-1
709 - Threads T-C and T-D are locking mutex instance M-2
710
711 The performance schema will record the following data:
712 - EVENTS_WAITS_CURRENT has 4 rows, one for each mutex locker
713 - EVENTS_WAITS_SUMMARY_BY_INSTANCE shows 2 rows, for M-1 and M-2
714 - EVENTS_WAITS_SUMMARY_BY_EVENT_NAME shows 1 row, for M
715
716 The graph of structures will look like:
717
718 @verbatim
719 PFS_wait_locker (T-A, M-1) ----------
720 |
721 v
722 PFS_mutex (M-1)
723 - m_wait_stat ------------
724 ^ |
725 | |
726 PFS_wait_locker (T-B, M-1) ---------- |
727 v
728 PFS_mutex_class (M)
729 - m_wait_stat
730 PFS_wait_locker (T-C, M-2) ---------- ^
731 | |
732 v |
733 PFS_mutex (M-2) |
734 - m_wait_stat ------------
735 ^
736 |
737 PFS_wait_locker (T-D, M-2) ----------
738
739 || || ||
740 || || ||
741 vv vv vv
742
743 EVENTS_WAITS_CURRENT ..._SUMMARY_BY_INSTANCE ..._SUMMARY_BY_EVENT_NAME
744 @endverbatim
745
746 @section ON_THE_FLY On the fly aggregates
747
748 'On the fly' aggregates are computed during the code execution.
749 This is necessary because the data the aggregate is based on is volatile,
750 and can not be kept indefinitely.
751
752 With on the fly aggregates:
753 - the writer thread does all the computation
754 - the reader thread accesses the result directly
755
756 This model is to be avoided if possible, due to the overhead
757 caused when instrumenting code.
758
759 @section HIGHER_LEVEL Higher level aggregates
760
761 'Higher level' aggregates are implemented on demand only.
762 The code executing a SELECT from the aggregate table is
763 collecting data from multiple internal buffers to produce the result.
764
765 With higher level aggregates:
766 - the reader thread does all the computation
767 - the writer thread has no overhead.
768
769 @section MIXED Mixed level aggregates
770
771 The 'Mixed' model is a compromise between 'On the fly' and 'Higher level'
772 aggregates, for internal buffers that are not permanent.
773
774 While an object is present in a buffer, the higher level model is used.
775 When an object is about to be destroyed, statistics are saved into
776 a 'parent' buffer with a longer life cycle, to follow the on the fly model.
777
778 With mixed aggregates:
779 - the reader thread does a lot of complex computation,
780 - the writer thread has minimal overhead, on destroy events.
781
782 @section IMPL_WAIT Implementation for waits aggregates
783
784 For waits, the tables that contains aggregated wait data are:
785 - EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
786 - EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME
787 - EVENTS_WAITS_SUMMARY_BY_INSTANCE
788 - EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
789 - EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME
790 - EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME
791 - FILE_SUMMARY_BY_EVENT_NAME
792 - FILE_SUMMARY_BY_INSTANCE
793 - SOCKET_SUMMARY_BY_INSTANCE
794 - SOCKET_SUMMARY_BY_EVENT_NAME
795 - OBJECTS_SUMMARY_GLOBAL_BY_TYPE
796
797 The instrumented code that generates waits events consist of:
798 - mutexes (mysql_mutex_t)
799 - rwlocks (mysql_rwlock_t)
800 - conditions (mysql_cond_t)
801 - file io (MYSQL_FILE)
802 - socket io (MYSQL_SOCKET)
803 - table io
804 - table lock
805 - idle
806
807 The flow of data between aggregates tables varies for each instrumentation.
808
809 @subsection IMPL_WAIT_MUTEX Mutex waits
810
811 @verbatim
812 mutex_locker(T, M)
813 |
814 | [1]
815 |
816 |-> pfs_mutex(M) =====>> [B], [C]
817 | |
818 | | [2]
819 | |
820 | |-> pfs_mutex_class(M.class) =====>> [C]
821 |
822 |-> pfs_thread(T).event_name(M) =====>> [A], [D], [E], [F]
823 |
824 | [3]
825 |
826 3a |-> pfs_account(U, H).event_name(M) =====>> [D], [E], [F]
827 . |
828 . | [4-RESET]
829 . |
830 3b .....+-> pfs_user(U).event_name(M) =====>> [E]
831 . |
832 3c .....+-> pfs_host(H).event_name(M) =====>> [F]
833 @endverbatim
834
835 How to read this diagram:
836 - events that occur during the instrumented code execution are noted with numbers,
837 as in [1]. Code executed by these events has an impact on overhead.
838 - events that occur during TRUNCATE TABLE operations are noted with numbers,
839 followed by "-RESET", as in [4-RESET].
840 Code executed by these events has no impact on overhead,
841 since they are executed by independent monitoring sessions.
842 - events that occur when a reader extracts data from a performance schema table
843 are noted with letters, as in [A]. The name of the table involved,
844 and the method that builds a row are documented. Code executed by these events
845 has no impact on the instrumentation overhead. Note that the table
846 implementation may pull data from different buffers.
847 - nominal code paths are in plain lines. A "nominal" code path corresponds to
848 cases where the performance schema buffers are sized so that no records are lost.
849 - degenerated code paths are in dotted lines. A "degenerated" code path corresponds
850 to edge cases where parent buffers are full, which forces the code to aggregate to
851 grand parents directly.
852
853 Implemented as:
854 - [1] @c start_mutex_wait_v1(), @c end_mutex_wait_v1()
855 - [2] @c destroy_mutex_v1()
856 - [3] @c aggregate_thread_waits()
857 - [4] @c PFS_account::aggregate_waits()
858 - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
859 @c table_ews_by_thread_by_event_name::make_row()
860 - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
861 @c table_events_waits_summary_by_instance::make_mutex_row()
862 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
863 @c table_ews_global_by_event_name::make_mutex_row()
864 - [D] EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
865 @c table_ews_by_account_by_event_name::make_row()
866 - [E] EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME,
867 @c table_ews_by_user_by_event_name::make_row()
868 - [F] EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME,
869 @c table_ews_by_host_by_event_name::make_row()
870
871 Table EVENTS_WAITS_SUMMARY_BY_INSTANCE is a 'on the fly' aggregate,
872 because the data is collected on the fly by (1) and stored into a buffer,
873 pfs_mutex. The table implementation [B] simply reads the results directly
874 from this buffer.
875
876 Table EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME is a 'mixed' aggregate,
877 because some data is collected on the fly (1),
878 some data is preserved with (2) at a later time in the life cycle,
879 and two different buffers pfs_mutex and pfs_mutex_class are used to store the
880 statistics collected. The table implementation [C] is more complex, since
881 it reads from two buffers pfs_mutex and pfs_mutex_class.
882
883 @subsection IMPL_WAIT_RWLOCK Rwlock waits
884
885 @verbatim
886 rwlock_locker(T, R)
887 |
888 | [1]
889 |
890 |-> pfs_rwlock(R) =====>> [B], [C]
891 | |
892 | | [2]
893 | |
894 | |-> pfs_rwlock_class(R.class) =====>> [C]
895 |
896 |-> pfs_thread(T).event_name(R) =====>> [A]
897 |
898 ...
899 @endverbatim
900
901 Implemented as:
902 - [1] @c start_rwlock_rdwait_v1(), @c end_rwlock_rdwait_v1(), ...
903 - [2] @c destroy_rwlock_v1()
904 - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
905 @c table_ews_by_thread_by_event_name::make_row()
906 - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
907 @c table_events_waits_summary_by_instance::make_rwlock_row()
908 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
909 @c table_ews_global_by_event_name::make_rwlock_row()
910
911 @subsection IMPL_WAIT_COND Cond waits
912
913 @verbatim
914 cond_locker(T, C)
915 |
916 | [1]
917 |
918 |-> pfs_cond(C) =====>> [B], [C]
919 | |
920 | | [2]
921 | |
922 | |-> pfs_cond_class(C.class) =====>> [C]
923 |
924 |-> pfs_thread(T).event_name(C) =====>> [A]
925 |
926 ...
927 @endverbatim
928
929 Implemented as:
930 - [1] @c start_cond_wait_v1(), @c end_cond_wait_v1()
931 - [2] @c destroy_cond_v1()
932 - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
933 @c table_ews_by_thread_by_event_name::make_row()
934 - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
935 @c table_events_waits_summary_by_instance::make_cond_row()
936 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
937 @c table_ews_global_by_event_name::make_cond_row()
938
939 @subsection IMPL_WAIT_FILE File waits
940
941 @verbatim
942 file_locker(T, F)
943 |
944 | [1]
945 |
946 |-> pfs_file(F) =====>> [B], [C], [D], [E]
947 | |
948 | | [2]
949 | |
950 | |-> pfs_file_class(F.class) =====>> [C], [D]
951 |
952 |-> pfs_thread(T).event_name(F) =====>> [A]
953 |
954 ...
955 @endverbatim
956
957 Implemented as:
958 - [1] @c get_thread_file_name_locker_v1(), @c start_file_wait_v1(),
959 @c end_file_wait_v1(), ...
960 - [2] @c close_file_v1()
961 - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
962 @c table_ews_by_thread_by_event_name::make_row()
963 - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
964 @c table_events_waits_summary_by_instance::make_file_row()
965 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
966 @c table_ews_global_by_event_name::make_file_row()
967 - [D] FILE_SUMMARY_BY_EVENT_NAME,
968 @c table_file_summary_by_event_name::make_row()
969 - [E] FILE_SUMMARY_BY_INSTANCE,
970 @c table_file_summary_by_instance::make_row()
971
972 @subsection IMPL_WAIT_SOCKET Socket waits
973
974 @verbatim
975 socket_locker(T, S)
976 |
977 | [1]
978 |
979 |-> pfs_socket(S) =====>> [A], [B], [C], [D], [E]
980 |
981 | [2]
982 |
983 |-> pfs_socket_class(S.class) =====>> [C], [D]
984 |
985 |-> pfs_thread(T).event_name(S) =====>> [A]
986 |
987 | [3]
988 |
989 3a |-> pfs_account(U, H).event_name(S) =====>> [F], [G], [H]
990 . |
991 . | [4-RESET]
992 . |
993 3b .....+-> pfs_user(U).event_name(S) =====>> [G]
994 . |
995 3c .....+-> pfs_host(H).event_name(S) =====>> [H]
996 @endverbatim
997
998 Implemented as:
999 - [1] @c start_socket_wait_v1(), @c end_socket_wait_v1().
1000 - [2] @c close_socket_v1()
1001 - [3] @c aggregate_thread_waits()
1002 - [4] @c PFS_account::aggregate_waits()
1003 - [5] @c PFS_host::aggregate_waits()
1004 - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1005 @c table_ews_by_thread_by_event_name::make_row()
1006 - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
1007 @c table_events_waits_summary_by_instance::make_socket_row()
1008 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
1009 @c table_ews_global_by_event_name::make_socket_row()
1010 - [D] SOCKET_SUMMARY_BY_EVENT_NAME,
1011 @c table_socket_summary_by_event_name::make_row()
1012 - [E] SOCKET_SUMMARY_BY_INSTANCE,
1013 @c table_socket_summary_by_instance::make_row()
1014 - [F] EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1015 @c table_ews_by_account_by_event_name::make_row()
1016 - [G] EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME,
1017 @c table_ews_by_user_by_event_name::make_row()
1018 - [H] EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME,
1019 @c table_ews_by_host_by_event_name::make_row()
1020
1021 @subsection IMPL_WAIT_TABLE Table waits
1022
1023 @verbatim
1024 table_locker(Thread Th, Table Tb, Event = io or lock)
1025 |
1026 | [1]
1027 |
1028 1a |-> pfs_table(Tb) =====>> [A], [B], [C]
1029 | |
1030 | | [2]
1031 | |
1032 | |-> pfs_table_share(Tb.share) =====>> [B], [C]
1033 | |
1034 | | [3]
1035 | |
1036 | |-> global_table_io_stat =====>> [C]
1037 | |
1038 | |-> global_table_lock_stat =====>> [C]
1039 |
1040 1b |-> pfs_thread(Th).event_name(E) =====>> [D], [E], [F], [G]
1041 | |
1042 | | [ 4-RESET]
1043 | |
1044 | |-> pfs_account(U, H).event_name(E) =====>> [E], [F], [G]
1045 | . |
1046 | . | [5-RESET]
1047 | . |
1048 | .....+-> pfs_user(U).event_name(E) =====>> [F]
1049 | . |
1050 | .....+-> pfs_host(H).event_name(E) =====>> [G]
1051 |
1052 1c |-> pfs_thread(Th).waits_current(W) =====>> [H]
1053 |
1054 1d |-> pfs_thread(Th).waits_history(W) =====>> [I]
1055 |
1056 1e |-> waits_history_long(W) =====>> [J]
1057 @endverbatim
1058
1059 Implemented as:
1060 - [1] @c start_table_io_wait_v1(), @c end_table_io_wait_v1()
1061 - [2] @c close_table_v1()
1062 - [3] @c drop_table_share_v1()
1063 - [4] @c TRUNCATE TABLE EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1064 - [5] @c TRUNCATE TABLE EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
1065 - [A] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
1066 @c table_events_waits_summary_by_instance::make_table_row()
1067 - [B] OBJECTS_SUMMARY_GLOBAL_BY_TYPE,
1068 @c table_os_global_by_type::make_row()
1069 - [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
1070 @c table_ews_global_by_event_name::make_table_io_row(),
1071 @c table_ews_global_by_event_name::make_table_lock_row()
1072 - [D] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1073 @c table_ews_by_thread_by_event_name::make_row()
1074 - [E] EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1075 @c table_ews_by_user_by_account_name::make_row()
1076 - [F] EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME,
1077 @c table_ews_by_user_by_event_name::make_row()
1078 - [G] EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME,
1079 @c table_ews_by_host_by_event_name::make_row()
1080 - [H] EVENTS_WAITS_CURRENT,
1081 @c table_events_waits_common::make_row()
1082 - [I] EVENTS_WAITS_HISTORY,
1083 @c table_events_waits_common::make_row()
1084 - [J] EVENTS_WAITS_HISTORY_LONG,
1085 @c table_events_waits_common::make_row()
1086
1087 @section IMPL_STAGE Implementation for stages aggregates
1088
1089 For stages, the tables that contains aggregated data are:
1090 - EVENTS_STAGES_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
1091 - EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME
1092 - EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
1093 - EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME
1094 - EVENTS_STAGES_SUMMARY_GLOBAL_BY_EVENT_NAME
1095
1096 @verbatim
1097 start_stage(T, S)
1098 |
1099 | [1]
1100 |
1101 1a |-> pfs_thread(T).event_name(S) =====>> [A], [B], [C], [D], [E]
1102 | |
1103 | | [2]
1104 | |
1105 | 2a |-> pfs_account(U, H).event_name(S) =====>> [B], [C], [D], [E]
1106 | . |
1107 | . | [3-RESET]
1108 | . |
1109 | 2b .....+-> pfs_user(U).event_name(S) =====>> [C]
1110 | . |
1111 | 2c .....+-> pfs_host(H).event_name(S) =====>> [D], [E]
1112 | . . |
1113 | . . | [4-RESET]
1114 | 2d . . |
1115 1b |----+----+----+-> pfs_stage_class(S) =====>> [E]
1116
1117 @endverbatim
1118
1119 Implemented as:
1120 - [1] @c start_stage_v1()
1121 - [2] @c delete_thread_v1(), @c aggregate_thread_stages()
1122 - [3] @c PFS_account::aggregate_stages()
1123 - [4] @c PFS_host::aggregate_stages()
1124 - [A] EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1125 @c table_esgs_by_thread_by_event_name::make_row()
1126 - [B] EVENTS_STAGES_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1127 @c table_esgs_by_account_by_event_name::make_row()
1128 - [C] EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME,
1129 @c table_esgs_by_user_by_event_name::make_row()
1130 - [D] EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME,
1131 @c table_esgs_by_host_by_event_name::make_row()
1132 - [E] EVENTS_STAGES_SUMMARY_GLOBAL_BY_EVENT_NAME,
1133 @c table_esgs_global_by_event_name::make_row()
1134
1135 @section IMPL_STATEMENT Implementation for statements consumers
1136
1137 For statements, the tables that contains individual event data are:
1138 - EVENTS_STATEMENTS_CURRENT
1139 - EVENTS_STATEMENTS_HISTORY
1140 - EVENTS_STATEMENTS_HISTORY_LONG
1141
1142 For statements, the tables that contains aggregated data are:
1143 - EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
1144 - EVENTS_STATEMENTS_SUMMARY_BY_HOST_BY_EVENT_NAME
1145 - EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1146 - EVENTS_STATEMENTS_SUMMARY_BY_USER_BY_EVENT_NAME
1147 - EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME
1148 - EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
1149
1150 @verbatim
1151 statement_locker(T, S)
1152 |
1153 | [1]
1154 |
1155 1a |-> pfs_thread(T).event_name(S) =====>> [A], [B], [C], [D], [E]
1156 | |
1157 | | [2]
1158 | |
1159 | 2a |-> pfs_account(U, H).event_name(S) =====>> [B], [C], [D], [E]
1160 | . |
1161 | . | [3-RESET]
1162 | . |
1163 | 2b .....+-> pfs_user(U).event_name(S) =====>> [C]
1164 | . |
1165 | 2c .....+-> pfs_host(H).event_name(S) =====>> [D], [E]
1166 | . . |
1167 | . . | [4-RESET]
1168 | 2d . . |
1169 1b |----+----+----+-> pfs_statement_class(S) =====>> [E]
1170 |
1171 1c |-> pfs_thread(T).statement_current(S) =====>> [F]
1172 |
1173 1d |-> pfs_thread(T).statement_history(S) =====>> [G]
1174 |
1175 1e |-> statement_history_long(S) =====>> [H]
1176 |
1177 1f |-> statement_digest(S) =====>> [I]
1178
1179 @endverbatim
1180
1181 Implemented as:
1182 - [1] @c start_statement_v1(), end_statement_v1()
1183 (1a, 1b) is an aggregation by EVENT_NAME,
1184 (1c, 1d, 1e) is an aggregation by TIME,
1185 (1f) is an aggregation by DIGEST
1186 all of these are orthogonal,
1187 and implemented in end_statement_v1().
1188 - [2] @c delete_thread_v1(), @c aggregate_thread_statements()
1189 - [3] @c PFS_account::aggregate_statements()
1190 - [4] @c PFS_host::aggregate_statements()
1191 - [A] EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1192 @c table_esms_by_thread_by_event_name::make_row()
1193 - [B] EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1194 @c table_esms_by_account_by_event_name::make_row()
1195 - [C] EVENTS_STATEMENTS_SUMMARY_BY_USER_BY_EVENT_NAME,
1196 @c table_esms_by_user_by_event_name::make_row()
1197 - [D] EVENTS_STATEMENTS_SUMMARY_BY_HOST_BY_EVENT_NAME,
1198 @c table_esms_by_host_by_event_name::make_row()
1199 - [E] EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME,
1200 @c table_esms_global_by_event_name::make_row()
1201 - [F] EVENTS_STATEMENTS_CURRENT,
1202 @c table_events_statements_current::rnd_next(),
1203 @c table_events_statements_common::make_row()
1204 - [G] EVENTS_STATEMENTS_HISTORY,
1205 @c table_events_statements_history::rnd_next(),
1206 @c table_events_statements_common::make_row()
1207 - [H] EVENTS_STATEMENTS_HISTORY_LONG,
1208 @c table_events_statements_history_long::rnd_next(),
1209 @c table_events_statements_common::make_row()
1210 - [I] EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
1211 @c table_esms_by_digest::make_row()
1212
1213 @section IMPL_TRANSACTION Implementation for transactions consumers
1214
1215 For transactions, the tables that contains individual event data are:
1216 - EVENTS_TRANSACTIONS_CURRENT
1217 - EVENTS_TRANSACTIONS_HISTORY
1218 - EVENTS_TRANSACTIONS_HISTORY_LONG
1219
1220 For transactions, the tables that contains aggregated data are:
1221 - EVENTS_TRANSACTIONS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
1222 - EVENTS_TRANSACTIONS_SUMMARY_BY_HOST_BY_EVENT_NAME
1223 - EVENTS_TRANSACTIONS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1224 - EVENTS_TRANSACTIONS_SUMMARY_BY_USER_BY_EVENT_NAME
1225 - EVENTS_TRANSACTIONS_SUMMARY_GLOBAL_BY_EVENT_NAME
1226
1227 @verbatim
1228 transaction_locker(T, TX)
1229 |
1230 | [1]
1231 |
1232 1a |-> pfs_thread(T).event_name(TX) =====>> [A], [B], [C], [D], [E]
1233 | |
1234 | | [2]
1235 | |
1236 | 2a |-> pfs_account(U, H).event_name(TX) =====>> [B], [C], [D], [E]
1237 | . |
1238 | . | [3-RESET]
1239 | . |
1240 | 2b .....+-> pfs_user(U).event_name(TX) =====>> [C]
1241 | . |
1242 | 2c .....+-> pfs_host(H).event_name(TX) =====>> [D], [E]
1243 | . . |
1244 | . . | [4-RESET]
1245 | 2d . . |
1246 1b |----+----+----+-> pfs_transaction_class(TX) =====>> [E]
1247 |
1248 1c |-> pfs_thread(T).transaction_current(TX) =====>> [F]
1249 |
1250 1d |-> pfs_thread(T).transaction_history(TX) =====>> [G]
1251 |
1252 1e |-> transaction_history_long(TX) =====>> [H]
1253
1254 @endverbatim
1255
1256 Implemented as:
1257 - [1] @c start_transaction_v1(), end_transaction_v1()
1258 (1a, 1b) is an aggregation by EVENT_NAME,
1259 (1c, 1d, 1e) is an aggregation by TIME,
1260 all of these are orthogonal,
1261 and implemented in end_transaction_v1().
1262 - [2] @c delete_thread_v1(), @c aggregate_thread_transactions()
1263 - [3] @c PFS_account::aggregate_transactions()
1264 - [4] @c PFS_host::aggregate_transactions()
1265
1266 - [A] EVENTS_TRANSACTIONS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1267 @c table_ets_by_thread_by_event_name::make_row()
1268 - [B] EVENTS_TRANSACTIONS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1269 @c table_ets_by_account_by_event_name::make_row()
1270 - [C] EVENTS_TRANSACTIONS_SUMMARY_BY_USER_BY_EVENT_NAME,
1271 @c table_ets_by_user_by_event_name::make_row()
1272 - [D] EVENTS_TRANSACTIONS_SUMMARY_BY_HOST_BY_EVENT_NAME,
1273 @c table_ets_by_host_by_event_name::make_row()
1274 - [E] EVENTS_TRANSACTIONS_SUMMARY_GLOBAL_BY_EVENT_NAME,
1275 @c table_ets_global_by_event_name::make_row()
1276 - [F] EVENTS_TRANSACTIONS_CURRENT,
1277 @c table_events_transactions_current::rnd_next(),
1278 @c table_events_transactions_common::make_row()
1279 - [G] EVENTS_TRANSACTIONS_HISTORY,
1280 @c table_events_transactions_history::rnd_next(),
1281 @c table_events_transactions_common::make_row()
1282 - [H] EVENTS_TRANSACTIONS_HISTORY_LONG,
1283 @c table_events_transactions_history_long::rnd_next(),
1284 @c table_events_transactions_common::make_row()
1285
1286 @section IMPL_MEMORY Implementation for memory instruments
1287
1288 For memory, there are no tables that contains individual event data.
1289
1290 For memory, the tables that contains aggregated data are:
1291 - MEMORY_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
1292 - MEMORY_SUMMARY_BY_HOST_BY_EVENT_NAME
1293 - MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME
1294 - MEMORY_SUMMARY_BY_USER_BY_EVENT_NAME
1295 - MEMORY_SUMMARY_GLOBAL_BY_EVENT_NAME
1296
1297 @verbatim
1298 memory_event(T, S)
1299 |
1300 | [1]
1301 |
1302 1a |-> pfs_thread(T).event_name(S) =====>> [A], [B], [C], [D], [E]
1303 | |
1304 | | [2]
1305 | |
1306 1+ | 2a |-> pfs_account(U, H).event_name(S) =====>> [B], [C], [D], [E]
1307 | . |
1308 | . | [3-RESET]
1309 | . |
1310 1+ | 2b .....+-> pfs_user(U).event_name(S) =====>> [C]
1311 | . |
1312 1+ | 2c .....+-> pfs_host(H).event_name(S) =====>> [D], [E]
1313 | . . |
1314 | . . | [4-RESET]
1315 | 2d . . |
1316 1b |----+----+----+-> global.event_name(S) =====>> [E]
1317
1318 @endverbatim
1319
1320 Implemented as:
1321 - [1] @c pfs_memory_alloc_v1(),
1322 @c pfs_memory_realloc_v1(),
1323 @c pfs_memory_free_v1().
1324 - [1+] are overflows that can happen during [1a],
1325 implemented with @c carry_memory_stat_delta()
1326 - [2] @c delete_thread_v1(), @c aggregate_thread_memory()
1327 - [3] @c PFS_account::aggregate_memory()
1328 - [4] @c PFS_host::aggregate_memory()
1329 - [A] EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
1330 @c table_mems_by_thread_by_event_name::make_row()
1331 - [B] EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
1332 @c table_mems_by_account_by_event_name::make_row()
1333 - [C] EVENTS_STATEMENTS_SUMMARY_BY_USER_BY_EVENT_NAME,
1334 @c table_mems_by_user_by_event_name::make_row()
1335 - [D] EVENTS_STATEMENTS_SUMMARY_BY_HOST_BY_EVENT_NAME,
1336 @c table_mems_by_host_by_event_name::make_row()
1337 - [E] EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME,
1338 @c table_mems_global_by_event_name::make_row()
1339
1340 */
1341
1342 /**
1343 @defgroup Performance_schema Performance Schema
1344 The performance schema component.
1345 For details, see the
1346 @ref PAGE_PERFORMANCE_SCHEMA "performance schema main page".
1347
1348 @defgroup Performance_schema_implementation Performance Schema Implementation
1349 @ingroup Performance_schema
1350
1351 @defgroup Performance_schema_tables Performance Schema Tables
1352 @ingroup Performance_schema_implementation
1353 */
1354
1355 thread_local_key_t THR_PFS;
1356 thread_local_key_t THR_PFS_VG; // global_variables
1357 thread_local_key_t THR_PFS_SV; // session_variables
1358 thread_local_key_t THR_PFS_VBT; // variables_by_thread
1359 thread_local_key_t THR_PFS_SG; // global_status
1360 thread_local_key_t THR_PFS_SS; // session_status
1361 thread_local_key_t THR_PFS_SBT; // status_by_thread
1362 thread_local_key_t THR_PFS_SBU; // status_by_user
1363 thread_local_key_t THR_PFS_SBH; // status_by_host
1364 thread_local_key_t THR_PFS_SBA; // status_by_account
1365
1366 bool THR_PFS_initialized= false;
1367
1368 static inline PFS_thread*
my_thread_get_THR_PFS()1369 my_thread_get_THR_PFS()
1370 {
1371 assert(THR_PFS_initialized);
1372 PFS_thread *thread= static_cast<PFS_thread*>(my_get_thread_local(THR_PFS));
1373 assert(thread == NULL || sanitize_thread(thread) != NULL);
1374 return thread;
1375 }
1376
1377 static inline void
my_thread_set_THR_PFS(PFS_thread * pfs)1378 my_thread_set_THR_PFS(PFS_thread *pfs)
1379 {
1380 assert(THR_PFS_initialized);
1381 my_set_thread_local(THR_PFS, pfs);
1382 }
1383
1384 /**
1385 Conversion map from PSI_mutex_operation to enum_operation_type.
1386 Indexed by enum PSI_mutex_operation.
1387 */
1388 static enum_operation_type mutex_operation_map[]=
1389 {
1390 OPERATION_TYPE_LOCK,
1391 OPERATION_TYPE_TRYLOCK
1392 };
1393
1394 /**
1395 Conversion map from PSI_rwlock_operation to enum_operation_type.
1396 Indexed by enum PSI_rwlock_operation.
1397 */
1398 static enum_operation_type rwlock_operation_map[]=
1399 {
1400 OPERATION_TYPE_READLOCK,
1401 OPERATION_TYPE_WRITELOCK,
1402 OPERATION_TYPE_TRYREADLOCK,
1403 OPERATION_TYPE_TRYWRITELOCK,
1404
1405 OPERATION_TYPE_SHAREDLOCK,
1406 OPERATION_TYPE_SHAREDEXCLUSIVELOCK,
1407 OPERATION_TYPE_EXCLUSIVELOCK,
1408 OPERATION_TYPE_TRYSHAREDLOCK,
1409 OPERATION_TYPE_TRYSHAREDEXCLUSIVELOCK,
1410 OPERATION_TYPE_TRYEXCLUSIVELOCK,
1411 };
1412
1413 /**
1414 Conversion map from PSI_cond_operation to enum_operation_type.
1415 Indexed by enum PSI_cond_operation.
1416 */
1417 static enum_operation_type cond_operation_map[]=
1418 {
1419 OPERATION_TYPE_WAIT,
1420 OPERATION_TYPE_TIMEDWAIT
1421 };
1422
1423 /**
1424 Conversion map from PSI_file_operation to enum_operation_type.
1425 Indexed by enum PSI_file_operation.
1426 */
1427 static enum_operation_type file_operation_map[]=
1428 {
1429 OPERATION_TYPE_FILECREATE,
1430 OPERATION_TYPE_FILECREATETMP,
1431 OPERATION_TYPE_FILEOPEN,
1432 OPERATION_TYPE_FILESTREAMOPEN,
1433 OPERATION_TYPE_FILECLOSE,
1434 OPERATION_TYPE_FILESTREAMCLOSE,
1435 OPERATION_TYPE_FILEREAD,
1436 OPERATION_TYPE_FILEWRITE,
1437 OPERATION_TYPE_FILESEEK,
1438 OPERATION_TYPE_FILETELL,
1439 OPERATION_TYPE_FILEFLUSH,
1440 OPERATION_TYPE_FILESTAT,
1441 OPERATION_TYPE_FILEFSTAT,
1442 OPERATION_TYPE_FILECHSIZE,
1443 OPERATION_TYPE_FILEDELETE,
1444 OPERATION_TYPE_FILERENAME,
1445 OPERATION_TYPE_FILESYNC
1446 };
1447
1448 /**
1449 Conversion map from PSI_table_operation to enum_operation_type.
1450 Indexed by enum PSI_table_io_operation.
1451 */
1452 static enum_operation_type table_io_operation_map[]=
1453 {
1454 OPERATION_TYPE_TABLE_FETCH,
1455 OPERATION_TYPE_TABLE_WRITE_ROW,
1456 OPERATION_TYPE_TABLE_UPDATE_ROW,
1457 OPERATION_TYPE_TABLE_DELETE_ROW
1458 };
1459
1460 /**
1461 Conversion map from enum PFS_TL_LOCK_TYPE to enum_operation_type.
1462 Indexed by enum PFS_TL_LOCK_TYPE.
1463 */
1464 static enum_operation_type table_lock_operation_map[]=
1465 {
1466 OPERATION_TYPE_TL_READ_NORMAL, /* PFS_TL_READ */
1467 OPERATION_TYPE_TL_READ_WITH_SHARED_LOCKS, /* PFS_TL_READ_WITH_SHARED_LOCKS */
1468 OPERATION_TYPE_TL_READ_HIGH_PRIORITY, /* PFS_TL_READ_HIGH_PRIORITY */
1469 OPERATION_TYPE_TL_READ_NO_INSERTS, /* PFS_TL_READ_NO_INSERT */
1470 OPERATION_TYPE_TL_WRITE_ALLOW_WRITE, /* PFS_TL_WRITE_ALLOW_WRITE */
1471 OPERATION_TYPE_TL_WRITE_CONCURRENT_INSERT, /* PFS_TL_WRITE_CONCURRENT_INSERT */
1472 OPERATION_TYPE_TL_WRITE_LOW_PRIORITY, /* PFS_TL_WRITE_LOW_PRIORITY */
1473 OPERATION_TYPE_TL_WRITE_NORMAL, /* PFS_TL_WRITE */
1474 OPERATION_TYPE_TL_READ_EXTERNAL, /* PFS_TL_READ_EXTERNAL */
1475 OPERATION_TYPE_TL_WRITE_EXTERNAL /* PFS_TL_WRITE_EXTERNAL */
1476 };
1477
1478 /**
1479 Conversion map from PSI_socket_operation to enum_operation_type.
1480 Indexed by enum PSI_socket_operation.
1481 */
1482 static enum_operation_type socket_operation_map[]=
1483 {
1484 OPERATION_TYPE_SOCKETCREATE,
1485 OPERATION_TYPE_SOCKETCONNECT,
1486 OPERATION_TYPE_SOCKETBIND,
1487 OPERATION_TYPE_SOCKETCLOSE,
1488 OPERATION_TYPE_SOCKETSEND,
1489 OPERATION_TYPE_SOCKETRECV,
1490 OPERATION_TYPE_SOCKETSENDTO,
1491 OPERATION_TYPE_SOCKETRECVFROM,
1492 OPERATION_TYPE_SOCKETSENDMSG,
1493 OPERATION_TYPE_SOCKETRECVMSG,
1494 OPERATION_TYPE_SOCKETSEEK,
1495 OPERATION_TYPE_SOCKETOPT,
1496 OPERATION_TYPE_SOCKETSTAT,
1497 OPERATION_TYPE_SOCKETSHUTDOWN,
1498 OPERATION_TYPE_SOCKETSELECT
1499 };
1500
1501 /**
1502 Build the prefix name of a class of instruments in a category.
1503 For example, this function builds the string 'wait/sync/mutex/sql/' from
1504 a prefix 'wait/sync/mutex' and a category 'sql'.
1505 This prefix is used later to build each instrument name, such as
1506 'wait/sync/mutex/sql/LOCK_open'.
1507 @param prefix Prefix for this class of instruments
1508 @param category Category name
1509 @param [out] output Buffer of length PFS_MAX_INFO_NAME_LENGTH.
1510 @param [out] output_length Length of the resulting output string.
1511 @return 0 for success, non zero for errors
1512 */
build_prefix(const LEX_STRING * prefix,const char * category,char * output,size_t * output_length)1513 static int build_prefix(const LEX_STRING *prefix, const char *category,
1514 char *output, size_t *output_length)
1515 {
1516 size_t len= strlen(category);
1517 char *out_ptr= output;
1518 size_t prefix_length= prefix->length;
1519
1520 if (unlikely((prefix_length + len + 1) >=
1521 PFS_MAX_FULL_PREFIX_NAME_LENGTH))
1522 {
1523 pfs_print_error("build_prefix: prefix+category is too long <%s> <%s>\n",
1524 prefix->str, category);
1525 return 1;
1526 }
1527
1528 if (unlikely(strchr(category, '/') != NULL))
1529 {
1530 pfs_print_error("build_prefix: invalid category <%s>\n",
1531 category);
1532 return 1;
1533 }
1534
1535 /* output = prefix + category + '/' */
1536 memcpy(out_ptr, prefix->str, prefix_length);
1537 out_ptr+= prefix_length;
1538 if (len > 0)
1539 {
1540 memcpy(out_ptr, category, len);
1541 out_ptr+= len;
1542 *out_ptr= '/';
1543 out_ptr++;
1544 }
1545 *output_length= int(out_ptr - output);
1546
1547 return 0;
1548 }
1549
1550 #define REGISTER_BODY_V1(KEY_T, PREFIX, REGISTER_FUNC) \
1551 KEY_T key; \
1552 char formatted_name[PFS_MAX_INFO_NAME_LENGTH]; \
1553 size_t prefix_length; \
1554 size_t len; \
1555 size_t full_length; \
1556 \
1557 assert(category != NULL); \
1558 assert(info != NULL); \
1559 if (unlikely(build_prefix(&PREFIX, category, \
1560 formatted_name, &prefix_length)) || \
1561 ! pfs_initialized) \
1562 { \
1563 for (; count>0; count--, info++) \
1564 *(info->m_key)= 0; \
1565 return ; \
1566 } \
1567 \
1568 for (; count>0; count--, info++) \
1569 { \
1570 assert(info->m_key != NULL); \
1571 assert(info->m_name != NULL); \
1572 len= strlen(info->m_name); \
1573 full_length= prefix_length + len; \
1574 if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH)) \
1575 { \
1576 memcpy(formatted_name + prefix_length, info->m_name, len); \
1577 key= REGISTER_FUNC(formatted_name, (uint)full_length, info->m_flags); \
1578 } \
1579 else \
1580 { \
1581 pfs_print_error("REGISTER_BODY_V1: name too long <%s> <%s>\n", \
1582 category, info->m_name); \
1583 key= 0; \
1584 } \
1585 \
1586 *(info->m_key)= key; \
1587 } \
1588 return;
1589
1590 /* Use C linkage for the interface functions. */
1591
1592 C_MODE_START
1593
1594 /**
1595 Implementation of the mutex instrumentation interface.
1596 @sa PSI_v1::register_mutex.
1597 */
pfs_register_mutex_v1(const char * category,PSI_mutex_info_v1 * info,int count)1598 void pfs_register_mutex_v1(const char *category,
1599 PSI_mutex_info_v1 *info,
1600 int count)
1601 {
1602 REGISTER_BODY_V1(PSI_mutex_key,
1603 mutex_instrument_prefix,
1604 register_mutex_class)
1605 }
1606
1607 /**
1608 Implementation of the rwlock instrumentation interface.
1609 @sa PSI_v1::register_rwlock.
1610 */
pfs_register_rwlock_v1(const char * category,PSI_rwlock_info_v1 * info,int count)1611 void pfs_register_rwlock_v1(const char *category,
1612 PSI_rwlock_info_v1 *info,
1613 int count)
1614 {
1615 PSI_rwlock_key key;
1616 char rw_formatted_name[PFS_MAX_INFO_NAME_LENGTH];
1617 char sx_formatted_name[PFS_MAX_INFO_NAME_LENGTH];
1618 size_t rw_prefix_length;
1619 size_t sx_prefix_length;
1620 size_t len;
1621 size_t full_length;
1622
1623 assert(category != NULL);
1624 assert(info != NULL);
1625 if (build_prefix(&rwlock_instrument_prefix, category,
1626 rw_formatted_name, &rw_prefix_length) ||
1627 build_prefix(&sxlock_instrument_prefix, category,
1628 sx_formatted_name, &sx_prefix_length) ||
1629 ! pfs_initialized)
1630 {
1631 for (; count>0; count--, info++)
1632 *(info->m_key)= 0;
1633 return ;
1634 }
1635
1636 for (; count>0; count--, info++)
1637 {
1638 assert(info->m_key != NULL);
1639 assert(info->m_name != NULL);
1640 len= strlen(info->m_name);
1641
1642 if (info->m_flags & PSI_RWLOCK_FLAG_SX)
1643 {
1644 full_length= sx_prefix_length + len;
1645 if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH))
1646 {
1647 memcpy(sx_formatted_name + sx_prefix_length, info->m_name, len);
1648 key= register_rwlock_class(sx_formatted_name, (uint)full_length, info->m_flags);
1649 }
1650 else
1651 {
1652 pfs_print_error("REGISTER_BODY_V1: (sx) name too long <%s> <%s>\n",
1653 category, info->m_name);
1654 key= 0;
1655 }
1656 }
1657 else
1658 {
1659 full_length= rw_prefix_length + len;
1660 if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH))
1661 {
1662 memcpy(rw_formatted_name + rw_prefix_length, info->m_name, len);
1663 key= register_rwlock_class(rw_formatted_name, (uint)full_length, info->m_flags);
1664 }
1665 else
1666 {
1667 pfs_print_error("REGISTER_BODY_V1: (rw) name too long <%s> <%s>\n",
1668 category, info->m_name);
1669 key= 0;
1670 }
1671 }
1672
1673 *(info->m_key)= key;
1674 }
1675 return;
1676 }
1677
1678 /**
1679 Implementation of the cond instrumentation interface.
1680 @sa PSI_v1::register_cond.
1681 */
pfs_register_cond_v1(const char * category,PSI_cond_info_v1 * info,int count)1682 void pfs_register_cond_v1(const char *category,
1683 PSI_cond_info_v1 *info,
1684 int count)
1685 {
1686 REGISTER_BODY_V1(PSI_cond_key,
1687 cond_instrument_prefix,
1688 register_cond_class)
1689 }
1690
1691 /**
1692 Implementation of the thread instrumentation interface.
1693 @sa PSI_v1::register_thread.
1694 */
pfs_register_thread_v1(const char * category,PSI_thread_info_v1 * info,int count)1695 void pfs_register_thread_v1(const char *category,
1696 PSI_thread_info_v1 *info,
1697 int count)
1698 {
1699 REGISTER_BODY_V1(PSI_thread_key,
1700 thread_instrument_prefix,
1701 register_thread_class)
1702 }
1703
1704 /**
1705 Implementation of the file instrumentation interface.
1706 @sa PSI_v1::register_file.
1707 */
pfs_register_file_v1(const char * category,PSI_file_info_v1 * info,int count)1708 void pfs_register_file_v1(const char *category,
1709 PSI_file_info_v1 *info,
1710 int count)
1711 {
1712 REGISTER_BODY_V1(PSI_file_key,
1713 file_instrument_prefix,
1714 register_file_class)
1715 }
1716
pfs_register_stage_v1(const char * category,PSI_stage_info_v1 ** info_array,int count)1717 void pfs_register_stage_v1(const char *category,
1718 PSI_stage_info_v1 **info_array,
1719 int count)
1720 {
1721 char formatted_name[PFS_MAX_INFO_NAME_LENGTH];
1722 size_t prefix_length;
1723 size_t len;
1724 size_t full_length;
1725 PSI_stage_info_v1 *info;
1726
1727 assert(category != NULL);
1728 assert(info_array != NULL);
1729 if (unlikely(build_prefix(&stage_instrument_prefix, category,
1730 formatted_name, &prefix_length)) ||
1731 ! pfs_initialized)
1732 {
1733 for (; count>0; count--, info_array++)
1734 (*info_array)->m_key= 0;
1735 return ;
1736 }
1737
1738 for (; count>0; count--, info_array++)
1739 {
1740 info= *info_array;
1741 assert(info != NULL);
1742 assert(info->m_name != NULL);
1743 len= strlen(info->m_name);
1744 full_length= prefix_length + len;
1745 if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH))
1746 {
1747 memcpy(formatted_name + prefix_length, info->m_name, len);
1748 info->m_key= register_stage_class(formatted_name,
1749 (uint)prefix_length,
1750 (uint)full_length,
1751 info->m_flags);
1752 }
1753 else
1754 {
1755 pfs_print_error("register_stage_v1: name too long <%s> <%s>\n",
1756 category, info->m_name);
1757 info->m_key= 0;
1758 }
1759 }
1760 return;
1761 }
1762
pfs_register_statement_v1(const char * category,PSI_statement_info_v1 * info,int count)1763 void pfs_register_statement_v1(const char *category,
1764 PSI_statement_info_v1 *info,
1765 int count)
1766 {
1767 char formatted_name[PFS_MAX_INFO_NAME_LENGTH];
1768 size_t prefix_length;
1769 size_t len;
1770 size_t full_length;
1771
1772 assert(category != NULL);
1773 assert(info != NULL);
1774 if (unlikely(build_prefix(&statement_instrument_prefix,
1775 category, formatted_name, &prefix_length)) ||
1776 ! pfs_initialized)
1777 {
1778 for (; count>0; count--, info++)
1779 info->m_key= 0;
1780 return ;
1781 }
1782
1783 for (; count>0; count--, info++)
1784 {
1785 assert(info->m_name != NULL);
1786 len= strlen(info->m_name);
1787 full_length= prefix_length + len;
1788 if (likely(full_length <= PFS_MAX_INFO_NAME_LENGTH))
1789 {
1790 memcpy(formatted_name + prefix_length, info->m_name, len);
1791 info->m_key= register_statement_class(formatted_name, (uint)full_length, info->m_flags);
1792 }
1793 else
1794 {
1795 pfs_print_error("register_statement_v1: name too long <%s>\n",
1796 info->m_name);
1797 info->m_key= 0;
1798 }
1799 }
1800 return;
1801 }
1802
pfs_register_socket_v1(const char * category,PSI_socket_info_v1 * info,int count)1803 void pfs_register_socket_v1(const char *category,
1804 PSI_socket_info_v1 *info,
1805 int count)
1806 {
1807 REGISTER_BODY_V1(PSI_socket_key,
1808 socket_instrument_prefix,
1809 register_socket_class)
1810 }
1811
1812 #define INIT_BODY_V1(T, KEY, ID) \
1813 PFS_##T##_class *klass; \
1814 PFS_##T *pfs; \
1815 klass= find_##T##_class(KEY); \
1816 if (unlikely(klass == NULL)) \
1817 return NULL; \
1818 pfs= create_##T(klass, ID); \
1819 return reinterpret_cast<PSI_##T *> (pfs)
1820
1821 /**
1822 Implementation of the mutex instrumentation interface.
1823 @sa PSI_v1::init_mutex.
1824 */
1825 PSI_mutex*
pfs_init_mutex_v1(PSI_mutex_key key,const void * identity)1826 pfs_init_mutex_v1(PSI_mutex_key key, const void *identity)
1827 {
1828 INIT_BODY_V1(mutex, key, identity);
1829 }
1830
1831 /**
1832 Implementation of the mutex instrumentation interface.
1833 @sa PSI_v1::destroy_mutex.
1834 */
pfs_destroy_mutex_v1(PSI_mutex * mutex)1835 void pfs_destroy_mutex_v1(PSI_mutex* mutex)
1836 {
1837 PFS_mutex *pfs= reinterpret_cast<PFS_mutex*> (mutex);
1838
1839 assert(pfs != NULL);
1840
1841 destroy_mutex(pfs);
1842 }
1843
1844 /**
1845 Implementation of the rwlock instrumentation interface.
1846 @sa PSI_v1::init_rwlock.
1847 */
1848 PSI_rwlock*
pfs_init_rwlock_v1(PSI_rwlock_key key,const void * identity)1849 pfs_init_rwlock_v1(PSI_rwlock_key key, const void *identity)
1850 {
1851 INIT_BODY_V1(rwlock, key, identity);
1852 }
1853
1854 /**
1855 Implementation of the rwlock instrumentation interface.
1856 @sa PSI_v1::destroy_rwlock.
1857 */
pfs_destroy_rwlock_v1(PSI_rwlock * rwlock)1858 void pfs_destroy_rwlock_v1(PSI_rwlock* rwlock)
1859 {
1860 PFS_rwlock *pfs= reinterpret_cast<PFS_rwlock*> (rwlock);
1861
1862 assert(pfs != NULL);
1863
1864 destroy_rwlock(pfs);
1865 }
1866
1867 /**
1868 Implementation of the cond instrumentation interface.
1869 @sa PSI_v1::init_cond.
1870 */
1871 PSI_cond*
pfs_init_cond_v1(PSI_cond_key key,const void * identity)1872 pfs_init_cond_v1(PSI_cond_key key, const void *identity)
1873 {
1874 INIT_BODY_V1(cond, key, identity);
1875 }
1876
1877 /**
1878 Implementation of the cond instrumentation interface.
1879 @sa PSI_v1::destroy_cond.
1880 */
pfs_destroy_cond_v1(PSI_cond * cond)1881 void pfs_destroy_cond_v1(PSI_cond* cond)
1882 {
1883 PFS_cond *pfs= reinterpret_cast<PFS_cond*> (cond);
1884
1885 assert(pfs != NULL);
1886
1887 destroy_cond(pfs);
1888 }
1889
1890 /**
1891 Implementation of the table instrumentation interface.
1892 @sa PSI_v1::get_table_share.
1893 */
1894 PSI_table_share*
pfs_get_table_share_v1(my_bool temporary,TABLE_SHARE * share)1895 pfs_get_table_share_v1(my_bool temporary, TABLE_SHARE *share)
1896 {
1897 /* Ignore temporary tables and views. */
1898 if (temporary || share->is_view)
1899 return NULL;
1900 /* An instrumented thread is required, for LF_PINS. */
1901 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
1902 if (unlikely(pfs_thread == NULL))
1903 return NULL;
1904 PFS_table_share* pfs_share;
1905 pfs_share= find_or_create_table_share(pfs_thread, temporary, share);
1906 return reinterpret_cast<PSI_table_share*> (pfs_share);
1907 }
1908
1909 /**
1910 Implementation of the table instrumentation interface.
1911 @sa PSI_v1::release_table_share.
1912 */
pfs_release_table_share_v1(PSI_table_share * share)1913 void pfs_release_table_share_v1(PSI_table_share* share)
1914 {
1915 PFS_table_share* pfs= reinterpret_cast<PFS_table_share*> (share);
1916
1917 if (unlikely(pfs == NULL))
1918 return;
1919
1920 release_table_share(pfs);
1921 }
1922
1923 /**
1924 Implementation of the table instrumentation interface.
1925 @sa PSI_v1::drop_table_share.
1926 */
1927 void
pfs_drop_table_share_v1(my_bool temporary,const char * schema_name,int schema_name_length,const char * table_name,int table_name_length)1928 pfs_drop_table_share_v1(my_bool temporary,
1929 const char *schema_name, int schema_name_length,
1930 const char *table_name, int table_name_length)
1931 {
1932 /* Ignore temporary tables. */
1933 if (temporary)
1934 return;
1935 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
1936 if (unlikely(pfs_thread == NULL))
1937 return;
1938 /* TODO: temporary tables */
1939 drop_table_share(pfs_thread, temporary, schema_name, schema_name_length,
1940 table_name, table_name_length);
1941 }
1942
1943 /**
1944 Implementation of the table instrumentation interface.
1945 @sa PSI_v1::open_table.
1946 */
1947 PSI_table*
pfs_open_table_v1(PSI_table_share * share,const void * identity)1948 pfs_open_table_v1(PSI_table_share *share, const void *identity)
1949 {
1950 PFS_table_share *pfs_table_share= reinterpret_cast<PFS_table_share*> (share);
1951
1952 if (unlikely(pfs_table_share == NULL))
1953 return NULL;
1954
1955 /* This object is not to be instrumented. */
1956 if (! pfs_table_share->m_enabled)
1957 return NULL;
1958
1959 /* This object is instrumented, but all table instruments are disabled. */
1960 if (! global_table_io_class.m_enabled && ! global_table_lock_class.m_enabled)
1961 return NULL;
1962
1963 /*
1964 When the performance schema is off, do not instrument anything.
1965 Table handles have short life cycle, instrumentation will happen
1966 again if needed during the next open().
1967 */
1968 if (! flag_global_instrumentation)
1969 return NULL;
1970
1971 PFS_thread *thread= my_thread_get_THR_PFS();
1972 if (unlikely(thread == NULL))
1973 return NULL;
1974
1975 PFS_table *pfs_table= create_table(pfs_table_share, thread, identity);
1976 return reinterpret_cast<PSI_table *> (pfs_table);
1977 }
1978
1979 /**
1980 Implementation of the table instrumentation interface.
1981 @sa PSI_v1::unbind_table.
1982 */
pfs_unbind_table_v1(PSI_table * table)1983 void pfs_unbind_table_v1(PSI_table *table)
1984 {
1985 PFS_table *pfs= reinterpret_cast<PFS_table*> (table);
1986 if (likely(pfs != NULL))
1987 {
1988 pfs->m_thread_owner= NULL;
1989 pfs->m_owner_event_id= 0;
1990 }
1991 }
1992
1993 /**
1994 Implementation of the table instrumentation interface.
1995 @sa PSI_v1::rebind_table.
1996 */
1997 PSI_table *
pfs_rebind_table_v1(PSI_table_share * share,const void * identity,PSI_table * table)1998 pfs_rebind_table_v1(PSI_table_share *share, const void *identity, PSI_table *table)
1999 {
2000 PFS_table *pfs= reinterpret_cast<PFS_table*> (table);
2001 if (likely(pfs != NULL))
2002 {
2003 assert(pfs->m_thread_owner == NULL);
2004
2005 if (unlikely(! pfs->m_share->m_enabled))
2006 {
2007 destroy_table(pfs);
2008 return NULL;
2009 }
2010
2011 if (unlikely(! global_table_io_class.m_enabled && ! global_table_lock_class.m_enabled))
2012 {
2013 destroy_table(pfs);
2014 return NULL;
2015 }
2016
2017 if (unlikely(! flag_global_instrumentation))
2018 {
2019 destroy_table(pfs);
2020 return NULL;
2021 }
2022
2023 /* The table handle was already instrumented, reuse it for this thread. */
2024 PFS_thread *thread= my_thread_get_THR_PFS();
2025 pfs->m_thread_owner= thread;
2026 if (thread != NULL)
2027 pfs->m_owner_event_id= thread->m_event_id;
2028 else
2029 pfs->m_owner_event_id= 0;
2030 return table;
2031 }
2032
2033 /* See open_table_v1() */
2034
2035 PFS_table_share *pfs_table_share= reinterpret_cast<PFS_table_share*> (share);
2036
2037 if (unlikely(pfs_table_share == NULL))
2038 return NULL;
2039
2040 if (! pfs_table_share->m_enabled)
2041 return NULL;
2042
2043 if (! global_table_io_class.m_enabled && ! global_table_lock_class.m_enabled)
2044 return NULL;
2045
2046 if (! flag_global_instrumentation)
2047 return NULL;
2048
2049 PFS_thread *thread= my_thread_get_THR_PFS();
2050 if (unlikely(thread == NULL))
2051 return NULL;
2052
2053 PFS_table *pfs_table= create_table(pfs_table_share, thread, identity);
2054 return reinterpret_cast<PSI_table *> (pfs_table);
2055 }
2056
2057 /**
2058 Implementation of the table instrumentation interface.
2059 @sa PSI_v1::close_table.
2060 */
pfs_close_table_v1(TABLE_SHARE * server_share,PSI_table * table)2061 void pfs_close_table_v1(TABLE_SHARE *server_share, PSI_table *table)
2062 {
2063 PFS_table *pfs= reinterpret_cast<PFS_table*> (table);
2064 if (unlikely(pfs == NULL))
2065 return;
2066 pfs->aggregate(server_share);
2067 destroy_table(pfs);
2068 }
2069
2070 PSI_socket*
pfs_init_socket_v1(PSI_socket_key key,const my_socket * fd,const struct sockaddr * addr,socklen_t addr_len)2071 pfs_init_socket_v1(PSI_socket_key key, const my_socket *fd,
2072 const struct sockaddr *addr, socklen_t addr_len)
2073 {
2074 PFS_socket_class *klass;
2075 PFS_socket *pfs;
2076 klass= find_socket_class(key);
2077 if (unlikely(klass == NULL))
2078 return NULL;
2079 pfs= create_socket(klass, fd, addr, addr_len);
2080 return reinterpret_cast<PSI_socket *> (pfs);
2081 }
2082
pfs_destroy_socket_v1(PSI_socket * socket)2083 void pfs_destroy_socket_v1(PSI_socket *socket)
2084 {
2085 PFS_socket *pfs= reinterpret_cast<PFS_socket*> (socket);
2086
2087 assert(pfs != NULL);
2088
2089 destroy_socket(pfs);
2090 }
2091
2092 /**
2093 Implementation of the file instrumentation interface.
2094 @sa PSI_v1::create_file.
2095 */
pfs_create_file_v1(PSI_file_key key,const char * name,File file)2096 void pfs_create_file_v1(PSI_file_key key, const char *name, File file)
2097 {
2098 if (! flag_global_instrumentation)
2099 return;
2100 int index= (int) file;
2101 if (unlikely(index < 0))
2102 return;
2103 PFS_file_class *klass= find_file_class(key);
2104 if (unlikely(klass == NULL))
2105 return;
2106 if (! klass->m_enabled)
2107 return;
2108
2109 /* A thread is needed for LF_PINS */
2110 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
2111 if (unlikely(pfs_thread == NULL))
2112 return;
2113
2114 if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
2115 return;
2116
2117 /*
2118 We want this check after pfs_thread->m_enabled,
2119 to avoid reporting false loss.
2120 */
2121 if (unlikely(index >= file_handle_max))
2122 {
2123 file_handle_lost++;
2124 return;
2125 }
2126
2127 uint len= (uint)strlen(name);
2128 PFS_file *pfs_file= find_or_create_file(pfs_thread, klass, name, len, true);
2129
2130 file_handle_array[index]= pfs_file;
2131 }
2132
2133 /**
2134 Arguments given from a parent to a child thread, packaged in one structure.
2135 This data is used when spawning a new instrumented thread.
2136 @sa pfs_spawn_thread.
2137 */
2138 struct PFS_spawn_thread_arg
2139 {
2140 ulonglong m_thread_internal_id;
2141 char m_username[USERNAME_LENGTH];
2142 uint m_username_length;
2143 char m_hostname[HOSTNAME_LENGTH];
2144 uint m_hostname_length;
2145
2146 PSI_thread_key m_child_key;
2147 const void *m_child_identity;
2148 void *(*m_user_start_routine)(void*);
2149 void *m_user_arg;
2150 };
2151
pfs_spawn_thread(void * arg)2152 extern "C" void* pfs_spawn_thread(void *arg)
2153 {
2154 PFS_spawn_thread_arg *typed_arg= (PFS_spawn_thread_arg*) arg;
2155 void *user_arg;
2156 void *(*user_start_routine)(void*);
2157
2158 PFS_thread *pfs;
2159
2160 /* First, attach instrumentation to this newly created pthread. */
2161 PFS_thread_class *klass= find_thread_class(typed_arg->m_child_key);
2162 if (likely(klass != NULL))
2163 {
2164 pfs= create_thread(klass, typed_arg->m_child_identity, 0);
2165 if (likely(pfs != NULL))
2166 {
2167 pfs->m_thread_os_id= my_thread_os_id();
2168 clear_thread_account(pfs);
2169
2170 pfs->m_parent_thread_internal_id= typed_arg->m_thread_internal_id;
2171
2172 memcpy(pfs->m_username, typed_arg->m_username, sizeof(pfs->m_username));
2173 pfs->m_username_length= typed_arg->m_username_length;
2174
2175 memcpy(pfs->m_hostname, typed_arg->m_hostname, sizeof(pfs->m_hostname));
2176 pfs->m_hostname_length= typed_arg->m_hostname_length;
2177
2178 set_thread_account(pfs);
2179 }
2180 }
2181 else
2182 {
2183 pfs= NULL;
2184 }
2185 my_thread_set_THR_PFS(pfs);
2186
2187 /*
2188 Secondly, free the memory allocated in spawn_thread_v1().
2189 It is preferable to do this before invoking the user
2190 routine, to avoid memory leaks at shutdown, in case
2191 the server exits without waiting for this thread.
2192 */
2193 user_start_routine= typed_arg->m_user_start_routine;
2194 user_arg= typed_arg->m_user_arg;
2195 my_free(typed_arg);
2196
2197 /* Then, execute the user code for this thread. */
2198 (*user_start_routine)(user_arg);
2199
2200 return NULL;
2201 }
2202
2203 /**
2204 Implementation of the thread instrumentation interface.
2205 @sa PSI_v1::spawn_thread.
2206 */
pfs_spawn_thread_v1(PSI_thread_key key,my_thread_handle * thread,const my_thread_attr_t * attr,void * (* start_routine)(void *),void * arg)2207 int pfs_spawn_thread_v1(PSI_thread_key key,
2208 my_thread_handle *thread, const my_thread_attr_t *attr,
2209 void *(*start_routine)(void*), void *arg)
2210 {
2211 PFS_spawn_thread_arg *psi_arg;
2212 PFS_thread *parent;
2213
2214 /* psi_arg can not be global, and can not be a local variable. */
2215 psi_arg= (PFS_spawn_thread_arg*) my_malloc(PSI_NOT_INSTRUMENTED,
2216 sizeof(PFS_spawn_thread_arg),
2217 MYF(MY_WME));
2218 if (unlikely(psi_arg == NULL))
2219 return EAGAIN;
2220
2221 psi_arg->m_child_key= key;
2222 psi_arg->m_child_identity= (arg ? arg : thread);
2223 psi_arg->m_user_start_routine= start_routine;
2224 psi_arg->m_user_arg= arg;
2225
2226 parent= my_thread_get_THR_PFS();
2227 if (parent != NULL)
2228 {
2229 /*
2230 Make a copy of the parent attributes.
2231 This is required, because instrumentation for this thread (the parent)
2232 may be destroyed before the child thread instrumentation is created.
2233 */
2234 psi_arg->m_thread_internal_id= parent->m_thread_internal_id;
2235
2236 memcpy(psi_arg->m_username, parent->m_username, sizeof(psi_arg->m_username));
2237 psi_arg->m_username_length= parent->m_username_length;
2238
2239 memcpy(psi_arg->m_hostname, parent->m_hostname, sizeof(psi_arg->m_hostname));
2240 psi_arg->m_hostname_length= parent->m_hostname_length;
2241 }
2242 else
2243 {
2244 psi_arg->m_thread_internal_id= 0;
2245 psi_arg->m_username_length= 0;
2246 psi_arg->m_hostname_length= 0;
2247 }
2248
2249 int result= my_thread_create(thread, attr, pfs_spawn_thread, psi_arg);
2250 if (unlikely(result != 0))
2251 my_free(psi_arg);
2252 return result;
2253 }
2254
2255 /**
2256 Implementation of the thread instrumentation interface.
2257 @sa PSI_v1::new_thread.
2258 */
2259 PSI_thread*
pfs_new_thread_v1(PSI_thread_key key,const void * identity,ulonglong processlist_id)2260 pfs_new_thread_v1(PSI_thread_key key, const void *identity, ulonglong processlist_id)
2261 {
2262 PFS_thread *pfs;
2263
2264 PFS_thread_class *klass= find_thread_class(key);
2265 if (likely(klass != NULL))
2266 pfs= create_thread(klass, identity, processlist_id);
2267 else
2268 pfs= NULL;
2269
2270 return reinterpret_cast<PSI_thread*> (pfs);
2271 }
2272
2273 /**
2274 Implementation of the thread instrumentation interface.
2275 @sa PSI_v1::set_thread_id.
2276 */
pfs_set_thread_id_v1(PSI_thread * thread,ulonglong processlist_id)2277 void pfs_set_thread_id_v1(PSI_thread *thread, ulonglong processlist_id)
2278 {
2279 PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2280 if (unlikely(pfs == NULL))
2281 return;
2282 pfs->m_processlist_id= (ulong)processlist_id;
2283 }
2284
2285 /**
2286 Implementation of the thread instrumentation interface.
2287 @sa PSI_v1::set_thread_THD.
2288 */
pfs_set_thread_THD_v1(PSI_thread * thread,THD * thd)2289 void pfs_set_thread_THD_v1(PSI_thread *thread, THD *thd)
2290 {
2291 PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2292 if (unlikely(pfs == NULL))
2293 return;
2294 pfs->m_thd= thd;
2295 }
2296
2297 /**
2298 Implementation of the thread instrumentation interface.
2299 @sa PSI_v1::set_thread_os_thread_id.
2300 */
pfs_set_thread_os_id_v1(PSI_thread * thread)2301 void pfs_set_thread_os_id_v1(PSI_thread *thread)
2302 {
2303 PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2304 if (unlikely(pfs == NULL))
2305 return;
2306 pfs->m_thread_os_id= my_thread_os_id();
2307 }
2308
2309 /**
2310 Implementation of the thread instrumentation interface.
2311 @sa PSI_v1::get_thread_id.
2312 */
2313 PSI_thread*
pfs_get_thread_v1(void)2314 pfs_get_thread_v1(void)
2315 {
2316 PFS_thread *pfs= my_thread_get_THR_PFS();
2317 return reinterpret_cast<PSI_thread*> (pfs);
2318 }
2319
2320 /**
2321 Implementation of the thread instrumentation interface.
2322 @sa PSI_v1::set_thread_user.
2323 */
pfs_set_thread_user_v1(const char * user,int user_len)2324 void pfs_set_thread_user_v1(const char *user, int user_len)
2325 {
2326 pfs_dirty_state dirty_state;
2327 PFS_thread *pfs= my_thread_get_THR_PFS();
2328
2329 assert((user != NULL) || (user_len == 0));
2330 assert(user_len >= 0);
2331 assert((uint) user_len <= sizeof(pfs->m_username));
2332
2333 if (unlikely(pfs == NULL))
2334 return;
2335
2336 aggregate_thread(pfs, pfs->m_account, pfs->m_user, pfs->m_host);
2337
2338 pfs->m_session_lock.allocated_to_dirty(& dirty_state);
2339
2340 clear_thread_account(pfs);
2341
2342 if (user_len > 0)
2343 memcpy(pfs->m_username, user, user_len);
2344 pfs->m_username_length= user_len;
2345
2346 set_thread_account(pfs);
2347
2348 bool enabled;
2349 bool history;
2350 if (pfs->m_account != NULL)
2351 {
2352 enabled= pfs->m_account->m_enabled;
2353 history= pfs->m_account->m_history;
2354 }
2355 else
2356 {
2357 if ((pfs->m_username_length > 0) && (pfs->m_hostname_length > 0))
2358 {
2359 lookup_setup_actor(pfs,
2360 pfs->m_username, pfs->m_username_length,
2361 pfs->m_hostname, pfs->m_hostname_length,
2362 &enabled, &history);
2363 }
2364 else
2365 {
2366 /* There is no setting for background threads */
2367 enabled= true;
2368 history= true;
2369 }
2370 }
2371 pfs->set_enabled(enabled);
2372 pfs->set_history(history);
2373
2374 pfs->m_session_lock.dirty_to_allocated(& dirty_state);
2375 }
2376
2377 /**
2378 Implementation of the thread instrumentation interface.
2379 @sa PSI_v1::set_thread_account.
2380 */
pfs_set_thread_account_v1(const char * user,int user_len,const char * host,int host_len)2381 void pfs_set_thread_account_v1(const char *user, int user_len,
2382 const char *host, int host_len)
2383 {
2384 pfs_dirty_state dirty_state;
2385 PFS_thread *pfs= my_thread_get_THR_PFS();
2386
2387 assert((user != NULL) || (user_len == 0));
2388 assert(user_len >= 0);
2389 assert((uint) user_len <= sizeof(pfs->m_username));
2390 assert((host != NULL) || (host_len == 0));
2391 assert(host_len >= 0);
2392
2393 host_len= min<size_t>(host_len, sizeof(pfs->m_hostname));
2394 if (unlikely(pfs == NULL))
2395 return;
2396
2397 pfs->m_session_lock.allocated_to_dirty(& dirty_state);
2398
2399 clear_thread_account(pfs);
2400
2401 if (acl_is_utility_user(user, host, NULL)) {
2402 /* We do not want the utility user to show up in any PFS statistics,
2403 so we keep this pfs session dirty. This fixes many, but not all tables.
2404 The remaining seems to honor m_enabled, so we also set that to false. */
2405 pfs->m_enabled= false;
2406 pfs->m_disable_instrumentation = true;
2407 return;
2408 }
2409
2410 if (host_len > 0)
2411 memcpy(pfs->m_hostname, host, host_len);
2412 pfs->m_hostname_length= host_len;
2413
2414 if (user_len > 0)
2415 memcpy(pfs->m_username, user, user_len);
2416 pfs->m_username_length= user_len;
2417
2418 set_thread_account(pfs);
2419
2420 bool enabled;
2421 bool history;
2422 if (pfs->m_account != NULL)
2423 {
2424 enabled= pfs->m_account->m_enabled;
2425 history= pfs->m_account->m_history;
2426 }
2427 else
2428 {
2429 if ((pfs->m_username_length > 0) && (pfs->m_hostname_length > 0))
2430 {
2431 lookup_setup_actor(pfs,
2432 pfs->m_username, pfs->m_username_length,
2433 pfs->m_hostname, pfs->m_hostname_length,
2434 &enabled, &history);
2435 }
2436 else
2437 {
2438 /* There is no setting for background threads */
2439 enabled= true;
2440 history= true;
2441 }
2442 }
2443 pfs->set_enabled(enabled);
2444 pfs->set_history(history);
2445
2446 pfs->m_session_lock.dirty_to_allocated(& dirty_state);
2447 }
2448
2449 /**
2450 Implementation of the thread instrumentation interface.
2451 @sa PSI_v1::set_thread_db.
2452 */
pfs_set_thread_db_v1(const char * db,int db_len)2453 void pfs_set_thread_db_v1(const char* db, int db_len)
2454 {
2455 PFS_thread *pfs= my_thread_get_THR_PFS();
2456
2457 assert((db != NULL) || (db_len == 0));
2458 assert(db_len >= 0);
2459 assert((uint) db_len <= sizeof(pfs->m_dbname));
2460
2461 if (likely(pfs != NULL))
2462 {
2463 pfs_dirty_state dirty_state;
2464 pfs->m_stmt_lock.allocated_to_dirty(& dirty_state);
2465 if (db_len > 0)
2466 memcpy(pfs->m_dbname, db, db_len);
2467 pfs->m_dbname_length= db_len;
2468 pfs->m_stmt_lock.dirty_to_allocated(& dirty_state);
2469 }
2470 }
2471
2472 /**
2473 Implementation of the thread instrumentation interface.
2474 @sa PSI_v1::set_thread_command.
2475 */
pfs_set_thread_command_v1(int command)2476 void pfs_set_thread_command_v1(int command)
2477 {
2478 PFS_thread *pfs= my_thread_get_THR_PFS();
2479
2480 assert(command >= 0);
2481 assert(command <= (int) COM_END);
2482
2483 if (likely(pfs != NULL))
2484 {
2485 pfs->m_command= command;
2486 }
2487 }
2488
2489 /**
2490 Implementation of the thread instrumentation interface.
2491 @sa PSI_v1::set_thread_connection_type.
2492 */
pfs_set_connection_type_v1(opaque_vio_type conn_type)2493 void pfs_set_connection_type_v1(opaque_vio_type conn_type)
2494 {
2495 PFS_thread *pfs= my_thread_get_THR_PFS();
2496
2497 assert(conn_type >= FIRST_VIO_TYPE);
2498 assert(conn_type <= LAST_VIO_TYPE);
2499
2500 if (likely(pfs != NULL))
2501 {
2502 pfs->m_connection_type= static_cast<enum_vio_type> (conn_type);
2503 }
2504 }
2505
2506
2507 /**
2508 Implementation of the thread instrumentation interface.
2509 @sa PSI_v1::set_thread_start_time.
2510 */
pfs_set_thread_start_time_v1(time_t start_time)2511 void pfs_set_thread_start_time_v1(time_t start_time)
2512 {
2513 PFS_thread *pfs= my_thread_get_THR_PFS();
2514
2515 if (likely(pfs != NULL))
2516 {
2517 pfs->m_start_time= start_time;
2518 }
2519 }
2520
2521 /**
2522 Implementation of the thread instrumentation interface.
2523 @sa PSI_v1::set_thread_state.
2524 */
pfs_set_thread_state_v1(const char * state)2525 void pfs_set_thread_state_v1(const char* state)
2526 {
2527 /* DEPRECATED. */
2528 }
2529
2530 /**
2531 Implementation of the thread instrumentation interface.
2532 @sa PSI_v1::set_thread_info.
2533 */
pfs_set_thread_info_v1(const char * info,uint info_len)2534 void pfs_set_thread_info_v1(const char* info, uint info_len)
2535 {
2536 pfs_dirty_state dirty_state;
2537 PFS_thread *pfs= my_thread_get_THR_PFS();
2538
2539 assert((info != NULL) || (info_len == 0));
2540
2541 if (likely(pfs != NULL))
2542 {
2543 if ((info != NULL) && (info_len > 0))
2544 {
2545 if (info_len > sizeof(pfs->m_processlist_info))
2546 info_len= sizeof(pfs->m_processlist_info);
2547
2548 pfs->m_stmt_lock.allocated_to_dirty(& dirty_state);
2549 memcpy(pfs->m_processlist_info, info, info_len);
2550 pfs->m_processlist_info_length= info_len;
2551 pfs->m_stmt_lock.dirty_to_allocated(& dirty_state);
2552 }
2553 else
2554 {
2555 pfs->m_stmt_lock.allocated_to_dirty(& dirty_state);
2556 pfs->m_processlist_info_length= 0;
2557 pfs->m_stmt_lock.dirty_to_allocated(& dirty_state);
2558 }
2559 }
2560 }
2561
2562 /**
2563 Implementation of the thread instrumentation interface.
2564 @sa PSI_v1::set_thread.
2565 */
pfs_set_thread_v1(PSI_thread * thread)2566 void pfs_set_thread_v1(PSI_thread* thread)
2567 {
2568 PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2569 my_thread_set_THR_PFS(pfs);
2570 }
2571
2572 /**
2573 Implementation of the thread instrumentation interface.
2574 @sa PSI_v1::delete_current_thread.
2575 */
pfs_delete_current_thread_v1(void)2576 void pfs_delete_current_thread_v1(void)
2577 {
2578 PFS_thread *thread= my_thread_get_THR_PFS();
2579 if (thread != NULL)
2580 {
2581 aggregate_thread(thread, thread->m_account, thread->m_user, thread->m_host);
2582 my_thread_set_THR_PFS(NULL);
2583 destroy_thread(thread);
2584 }
2585 }
2586
2587 /**
2588 Implementation of the thread instrumentation interface.
2589 @sa PSI_v1::delete_thread.
2590 */
pfs_delete_thread_v1(PSI_thread * thread)2591 void pfs_delete_thread_v1(PSI_thread *thread)
2592 {
2593 PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
2594
2595 if (pfs != NULL)
2596 {
2597 aggregate_thread(pfs, pfs->m_account, pfs->m_user, pfs->m_host);
2598 destroy_thread(pfs);
2599 }
2600 }
2601
2602 /**
2603 Implementation of the mutex instrumentation interface.
2604 @sa PSI_v1::start_mutex_wait.
2605 */
2606 PSI_mutex_locker*
pfs_start_mutex_wait_v1(PSI_mutex_locker_state * state,PSI_mutex * mutex,PSI_mutex_operation op,const char * src_file,uint src_line)2607 pfs_start_mutex_wait_v1(PSI_mutex_locker_state *state,
2608 PSI_mutex *mutex, PSI_mutex_operation op,
2609 const char *src_file, uint src_line)
2610 {
2611 PFS_mutex *pfs_mutex= reinterpret_cast<PFS_mutex*> (mutex);
2612 assert((int) op >= 0);
2613 assert((uint) op < array_elements(mutex_operation_map));
2614 assert(state != NULL);
2615
2616 assert(pfs_mutex != NULL);
2617 assert(pfs_mutex->m_class != NULL);
2618
2619 if (! pfs_mutex->m_enabled)
2620 return NULL;
2621
2622 uint flags;
2623 ulonglong timer_start= 0;
2624
2625 if (flag_thread_instrumentation)
2626 {
2627 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
2628 if (unlikely(pfs_thread == NULL))
2629 return NULL;
2630 if (! pfs_thread->m_enabled)
2631 return NULL;
2632 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2633 flags= STATE_FLAG_THREAD;
2634
2635 if (pfs_mutex->m_timed)
2636 {
2637 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2638 state->m_timer_start= timer_start;
2639 flags|= STATE_FLAG_TIMED;
2640 }
2641
2642 if (flag_events_waits_current)
2643 {
2644 if (unlikely(pfs_thread->m_events_waits_current >=
2645 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2646 {
2647 locker_lost++;
2648 return NULL;
2649 }
2650 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2651 state->m_wait= wait;
2652 flags|= STATE_FLAG_EVENT;
2653
2654 PFS_events_waits *parent_event= wait - 1;
2655 wait->m_event_type= EVENT_TYPE_WAIT;
2656 wait->m_nesting_event_id= parent_event->m_event_id;
2657 wait->m_nesting_event_type= parent_event->m_event_type;
2658
2659 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
2660 wait->m_class= pfs_mutex->m_class;
2661 wait->m_timer_start= timer_start;
2662 wait->m_timer_end= 0;
2663 wait->m_object_instance_addr= pfs_mutex->m_identity;
2664 wait->m_event_id= pfs_thread->m_event_id++;
2665 wait->m_end_event_id= 0;
2666 wait->m_operation= mutex_operation_map[(int) op];
2667 wait->m_source_file= src_file;
2668 wait->m_source_line= src_line;
2669 wait->m_wait_class= WAIT_CLASS_MUTEX;
2670
2671 pfs_thread->m_events_waits_current++;
2672 }
2673 }
2674 else
2675 {
2676 if (pfs_mutex->m_timed)
2677 {
2678 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2679 state->m_timer_start= timer_start;
2680 flags= STATE_FLAG_TIMED;
2681 state->m_thread= NULL;
2682 }
2683 else
2684 {
2685 /*
2686 Complete shortcut.
2687 */
2688 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
2689 pfs_mutex->m_mutex_stat.m_wait_stat.aggregate_counted();
2690 return NULL;
2691 }
2692 }
2693
2694 state->m_flags= flags;
2695 state->m_mutex= mutex;
2696 return reinterpret_cast<PSI_mutex_locker*> (state);
2697 }
2698
2699 /**
2700 Implementation of the rwlock instrumentation interface.
2701 @sa PSI_v1::start_rwlock_rdwait
2702 @sa PSI_v1::start_rwlock_wrwait
2703 */
2704 PSI_rwlock_locker*
pfs_start_rwlock_wait_v1(PSI_rwlock_locker_state * state,PSI_rwlock * rwlock,PSI_rwlock_operation op,const char * src_file,uint src_line)2705 pfs_start_rwlock_wait_v1(PSI_rwlock_locker_state *state,
2706 PSI_rwlock *rwlock,
2707 PSI_rwlock_operation op,
2708 const char *src_file, uint src_line)
2709 {
2710 PFS_rwlock *pfs_rwlock= reinterpret_cast<PFS_rwlock*> (rwlock);
2711 assert(static_cast<int> (op) >= 0);
2712 assert(static_cast<uint> (op) < array_elements(rwlock_operation_map));
2713 assert(state != NULL);
2714 assert(pfs_rwlock != NULL);
2715 assert(pfs_rwlock->m_class != NULL);
2716
2717 /* Operations supported for READ WRITE LOCK */
2718
2719 assert( pfs_rwlock->m_class->is_shared_exclusive()
2720 || (op == PSI_RWLOCK_READLOCK)
2721 || (op == PSI_RWLOCK_WRITELOCK)
2722 || (op == PSI_RWLOCK_TRYREADLOCK)
2723 || (op == PSI_RWLOCK_TRYWRITELOCK)
2724 );
2725
2726 /* Operations supported for SHARED EXCLUSIVE LOCK */
2727
2728 assert( ! pfs_rwlock->m_class->is_shared_exclusive()
2729 || (op == PSI_RWLOCK_SHAREDLOCK)
2730 || (op == PSI_RWLOCK_SHAREDEXCLUSIVELOCK)
2731 || (op == PSI_RWLOCK_EXCLUSIVELOCK)
2732 || (op == PSI_RWLOCK_TRYSHAREDLOCK)
2733 || (op == PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK)
2734 || (op == PSI_RWLOCK_TRYEXCLUSIVELOCK)
2735 );
2736
2737 if (! pfs_rwlock->m_enabled)
2738 return NULL;
2739
2740 uint flags;
2741 ulonglong timer_start= 0;
2742
2743 if (flag_thread_instrumentation)
2744 {
2745 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
2746 if (unlikely(pfs_thread == NULL))
2747 return NULL;
2748 if (! pfs_thread->m_enabled)
2749 return NULL;
2750 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2751 flags= STATE_FLAG_THREAD;
2752
2753 if (pfs_rwlock->m_timed)
2754 {
2755 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2756 state->m_timer_start= timer_start;
2757 flags|= STATE_FLAG_TIMED;
2758 }
2759
2760 if (flag_events_waits_current)
2761 {
2762 if (unlikely(pfs_thread->m_events_waits_current >=
2763 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2764 {
2765 locker_lost++;
2766 return NULL;
2767 }
2768 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2769 state->m_wait= wait;
2770 flags|= STATE_FLAG_EVENT;
2771
2772 PFS_events_waits *parent_event= wait - 1;
2773 wait->m_event_type= EVENT_TYPE_WAIT;
2774 wait->m_nesting_event_id= parent_event->m_event_id;
2775 wait->m_nesting_event_type= parent_event->m_event_type;
2776
2777 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
2778 wait->m_class= pfs_rwlock->m_class;
2779 wait->m_timer_start= timer_start;
2780 wait->m_timer_end= 0;
2781 wait->m_object_instance_addr= pfs_rwlock->m_identity;
2782 wait->m_event_id= pfs_thread->m_event_id++;
2783 wait->m_end_event_id= 0;
2784 wait->m_operation= rwlock_operation_map[static_cast<int> (op)];
2785 wait->m_source_file= src_file;
2786 wait->m_source_line= src_line;
2787 wait->m_wait_class= WAIT_CLASS_RWLOCK;
2788
2789 pfs_thread->m_events_waits_current++;
2790 }
2791 }
2792 else
2793 {
2794 if (pfs_rwlock->m_timed)
2795 {
2796 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2797 state->m_timer_start= timer_start;
2798 flags= STATE_FLAG_TIMED;
2799 state->m_thread= NULL;
2800 }
2801 else
2802 {
2803 /*
2804 Complete shortcut.
2805 */
2806 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
2807 pfs_rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
2808 return NULL;
2809 }
2810 }
2811
2812 state->m_flags= flags;
2813 state->m_rwlock= rwlock;
2814 state->m_operation= op;
2815 return reinterpret_cast<PSI_rwlock_locker*> (state);
2816 }
2817
2818 PSI_rwlock_locker*
pfs_start_rwlock_rdwait_v1(PSI_rwlock_locker_state * state,PSI_rwlock * rwlock,PSI_rwlock_operation op,const char * src_file,uint src_line)2819 pfs_start_rwlock_rdwait_v1(PSI_rwlock_locker_state *state,
2820 PSI_rwlock *rwlock,
2821 PSI_rwlock_operation op,
2822 const char *src_file, uint src_line)
2823 {
2824 assert((op == PSI_RWLOCK_READLOCK) ||
2825 (op == PSI_RWLOCK_TRYREADLOCK) ||
2826 (op == PSI_RWLOCK_SHAREDLOCK) ||
2827 (op == PSI_RWLOCK_TRYSHAREDLOCK));
2828
2829 return pfs_start_rwlock_wait_v1(state, rwlock, op, src_file, src_line);
2830 }
2831
2832 PSI_rwlock_locker*
pfs_start_rwlock_wrwait_v1(PSI_rwlock_locker_state * state,PSI_rwlock * rwlock,PSI_rwlock_operation op,const char * src_file,uint src_line)2833 pfs_start_rwlock_wrwait_v1(PSI_rwlock_locker_state *state,
2834 PSI_rwlock *rwlock,
2835 PSI_rwlock_operation op,
2836 const char *src_file, uint src_line)
2837 {
2838 assert((op == PSI_RWLOCK_WRITELOCK) ||
2839 (op == PSI_RWLOCK_TRYWRITELOCK) ||
2840 (op == PSI_RWLOCK_SHAREDEXCLUSIVELOCK) ||
2841 (op == PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK) ||
2842 (op == PSI_RWLOCK_EXCLUSIVELOCK) ||
2843 (op == PSI_RWLOCK_TRYEXCLUSIVELOCK));
2844
2845 return pfs_start_rwlock_wait_v1(state, rwlock, op, src_file, src_line);
2846 }
2847
2848 /**
2849 Implementation of the cond instrumentation interface.
2850 @sa PSI_v1::start_cond_wait.
2851 */
2852 PSI_cond_locker*
pfs_start_cond_wait_v1(PSI_cond_locker_state * state,PSI_cond * cond,PSI_mutex * mutex,PSI_cond_operation op,const char * src_file,uint src_line)2853 pfs_start_cond_wait_v1(PSI_cond_locker_state *state,
2854 PSI_cond *cond, PSI_mutex *mutex,
2855 PSI_cond_operation op,
2856 const char *src_file, uint src_line)
2857 {
2858 /*
2859 Note about the unused PSI_mutex *mutex parameter:
2860 In the pthread library, a call to pthread_cond_wait()
2861 causes an unlock() + lock() on the mutex associated with the condition.
2862 This mutex operation is not instrumented, so the mutex will still
2863 appear as locked when a thread is waiting on a condition.
2864 This has no impact now, as unlock_mutex() is not recording events.
2865 When unlock_mutex() is implemented by later work logs,
2866 this parameter here will be used to adjust the mutex state,
2867 in start_cond_wait_v1() and end_cond_wait_v1().
2868 */
2869 PFS_cond *pfs_cond= reinterpret_cast<PFS_cond*> (cond);
2870 assert(static_cast<int> (op) >= 0);
2871 assert(static_cast<uint> (op) < array_elements(cond_operation_map));
2872 assert(state != NULL);
2873 assert(pfs_cond != NULL);
2874 assert(pfs_cond->m_class != NULL);
2875
2876 if (! pfs_cond->m_enabled)
2877 return NULL;
2878
2879 uint flags;
2880 ulonglong timer_start= 0;
2881
2882 if (flag_thread_instrumentation)
2883 {
2884 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
2885 if (unlikely(pfs_thread == NULL))
2886 return NULL;
2887 if (! pfs_thread->m_enabled)
2888 return NULL;
2889 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
2890 flags= STATE_FLAG_THREAD;
2891
2892 if (pfs_cond->m_timed)
2893 {
2894 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2895 state->m_timer_start= timer_start;
2896 flags|= STATE_FLAG_TIMED;
2897 }
2898
2899 if (flag_events_waits_current)
2900 {
2901 if (unlikely(pfs_thread->m_events_waits_current >=
2902 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
2903 {
2904 locker_lost++;
2905 return NULL;
2906 }
2907 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
2908 state->m_wait= wait;
2909 flags|= STATE_FLAG_EVENT;
2910
2911 PFS_events_waits *parent_event= wait - 1;
2912 wait->m_event_type= EVENT_TYPE_WAIT;
2913 wait->m_nesting_event_id= parent_event->m_event_id;
2914 wait->m_nesting_event_type= parent_event->m_event_type;
2915
2916 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
2917 wait->m_class= pfs_cond->m_class;
2918 wait->m_timer_start= timer_start;
2919 wait->m_timer_end= 0;
2920 wait->m_object_instance_addr= pfs_cond->m_identity;
2921 wait->m_event_id= pfs_thread->m_event_id++;
2922 wait->m_end_event_id= 0;
2923 wait->m_operation= cond_operation_map[static_cast<int> (op)];
2924 wait->m_source_file= src_file;
2925 wait->m_source_line= src_line;
2926 wait->m_wait_class= WAIT_CLASS_COND;
2927
2928 pfs_thread->m_events_waits_current++;
2929 }
2930 }
2931 else
2932 {
2933 if (pfs_cond->m_timed)
2934 {
2935 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
2936 state->m_timer_start= timer_start;
2937 flags= STATE_FLAG_TIMED;
2938 }
2939 else
2940 {
2941 /*
2942 Complete shortcut.
2943 */
2944 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
2945 pfs_cond->m_cond_stat.m_wait_stat.aggregate_counted();
2946 return NULL;
2947 }
2948 }
2949
2950 state->m_flags= flags;
2951 state->m_cond= cond;
2952 state->m_mutex= mutex;
2953 return reinterpret_cast<PSI_cond_locker*> (state);
2954 }
2955
lock_flags_to_lock_type(uint flags)2956 static inline PFS_TL_LOCK_TYPE lock_flags_to_lock_type(uint flags)
2957 {
2958 enum thr_lock_type value= static_cast<enum thr_lock_type> (flags);
2959
2960 switch (value)
2961 {
2962 case TL_READ:
2963 return PFS_TL_READ;
2964 case TL_READ_WITH_SHARED_LOCKS:
2965 return PFS_TL_READ_WITH_SHARED_LOCKS;
2966 case TL_READ_HIGH_PRIORITY:
2967 return PFS_TL_READ_HIGH_PRIORITY;
2968 case TL_READ_NO_INSERT:
2969 return PFS_TL_READ_NO_INSERT;
2970 case TL_WRITE_ALLOW_WRITE:
2971 return PFS_TL_WRITE_ALLOW_WRITE;
2972 case TL_WRITE_CONCURRENT_INSERT:
2973 return PFS_TL_WRITE_CONCURRENT_INSERT;
2974 case TL_WRITE_LOW_PRIORITY:
2975 return PFS_TL_WRITE_LOW_PRIORITY;
2976 case TL_WRITE:
2977 return PFS_TL_WRITE;
2978
2979 case TL_WRITE_ONLY:
2980 case TL_IGNORE:
2981 case TL_UNLOCK:
2982 case TL_READ_DEFAULT:
2983 case TL_WRITE_DEFAULT:
2984 case TL_WRITE_CONCURRENT_DEFAULT:
2985 default:
2986 assert(false);
2987 }
2988
2989 /* Dead code */
2990 return PFS_TL_READ;
2991 }
2992
external_lock_flags_to_lock_type(uint flags)2993 static inline PFS_TL_LOCK_TYPE external_lock_flags_to_lock_type(uint flags)
2994 {
2995 assert(flags == F_RDLCK || flags == F_WRLCK);
2996 return (flags == F_RDLCK ? PFS_TL_READ_EXTERNAL : PFS_TL_WRITE_EXTERNAL);
2997 }
2998
2999 /**
3000 Implementation of the table instrumentation interface.
3001 @sa PSI_v1::start_table_io_wait_v1
3002 */
3003 PSI_table_locker*
pfs_start_table_io_wait_v1(PSI_table_locker_state * state,PSI_table * table,PSI_table_io_operation op,uint index,const char * src_file,uint src_line)3004 pfs_start_table_io_wait_v1(PSI_table_locker_state *state,
3005 PSI_table *table,
3006 PSI_table_io_operation op,
3007 uint index,
3008 const char *src_file, uint src_line)
3009 {
3010 assert(static_cast<int> (op) >= 0);
3011 assert(static_cast<uint> (op) < array_elements(table_io_operation_map));
3012 assert(state != NULL);
3013 PFS_table *pfs_table= reinterpret_cast<PFS_table*> (table);
3014 assert(pfs_table != NULL);
3015 assert(pfs_table->m_share != NULL);
3016
3017 if (! pfs_table->m_io_enabled)
3018 return NULL;
3019
3020 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3021
3022 uint flags;
3023 ulonglong timer_start= 0;
3024
3025 if (flag_thread_instrumentation)
3026 {
3027 if (pfs_thread == NULL)
3028 return NULL;
3029 if (! pfs_thread->m_enabled)
3030 return NULL;
3031 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3032 flags= STATE_FLAG_THREAD;
3033
3034 if (pfs_table->m_io_timed)
3035 {
3036 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3037 state->m_timer_start= timer_start;
3038 flags|= STATE_FLAG_TIMED;
3039 }
3040
3041 if (flag_events_waits_current)
3042 {
3043 if (unlikely(pfs_thread->m_events_waits_current >=
3044 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3045 {
3046 locker_lost++;
3047 return NULL;
3048 }
3049 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3050 state->m_wait= wait;
3051 flags|= STATE_FLAG_EVENT;
3052
3053 PFS_events_waits *parent_event= wait - 1;
3054 wait->m_event_type= EVENT_TYPE_WAIT;
3055 wait->m_nesting_event_id= parent_event->m_event_id;
3056 wait->m_nesting_event_type= parent_event->m_event_type;
3057
3058 PFS_table_share *share= pfs_table->m_share;
3059 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3060 wait->m_class= &global_table_io_class;
3061 wait->m_timer_start= timer_start;
3062 wait->m_timer_end= 0;
3063 wait->m_object_instance_addr= pfs_table->m_identity;
3064 wait->m_event_id= pfs_thread->m_event_id++;
3065 wait->m_end_event_id= 0;
3066 wait->m_operation= table_io_operation_map[static_cast<int> (op)];
3067 wait->m_flags= 0;
3068 wait->m_object_type= share->get_object_type();
3069 wait->m_weak_table_share= share;
3070 wait->m_weak_version= share->get_version();
3071 wait->m_index= index;
3072 wait->m_source_file= src_file;
3073 wait->m_source_line= src_line;
3074 wait->m_wait_class= WAIT_CLASS_TABLE;
3075
3076 pfs_thread->m_events_waits_current++;
3077 }
3078 }
3079 else
3080 {
3081 if (pfs_table->m_io_timed)
3082 {
3083 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3084 state->m_timer_start= timer_start;
3085 flags= STATE_FLAG_TIMED;
3086 }
3087 else
3088 {
3089 /* TODO: consider a shortcut here */
3090 flags= 0;
3091 }
3092 }
3093
3094 state->m_flags= flags;
3095 state->m_table= table;
3096 state->m_io_operation= op;
3097 state->m_index= index;
3098 return reinterpret_cast<PSI_table_locker*> (state);
3099 }
3100
3101 /**
3102 Implementation of the table instrumentation interface.
3103 @sa PSI_v1::start_table_lock_wait.
3104 */
3105 PSI_table_locker*
pfs_start_table_lock_wait_v1(PSI_table_locker_state * state,PSI_table * table,PSI_table_lock_operation op,ulong op_flags,const char * src_file,uint src_line)3106 pfs_start_table_lock_wait_v1(PSI_table_locker_state *state,
3107 PSI_table *table,
3108 PSI_table_lock_operation op,
3109 ulong op_flags,
3110 const char *src_file, uint src_line)
3111 {
3112 assert(state != NULL);
3113 assert((op == PSI_TABLE_LOCK) || (op == PSI_TABLE_EXTERNAL_LOCK));
3114
3115 PFS_table *pfs_table= reinterpret_cast<PFS_table*> (table);
3116
3117 assert(pfs_table != NULL);
3118 assert(pfs_table->m_share != NULL);
3119
3120 if (! pfs_table->m_lock_enabled)
3121 return NULL;
3122
3123 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3124
3125 PFS_TL_LOCK_TYPE lock_type;
3126
3127 switch (op)
3128 {
3129 case PSI_TABLE_LOCK:
3130 lock_type= lock_flags_to_lock_type(op_flags);
3131 pfs_table->m_internal_lock= lock_type;
3132 break;
3133 case PSI_TABLE_EXTERNAL_LOCK:
3134 /*
3135 See the handler::external_lock() API design,
3136 there is no handler::external_unlock().
3137 */
3138 if (op_flags == F_UNLCK)
3139 {
3140 pfs_table->m_external_lock= PFS_TL_NONE;
3141 return NULL;
3142 }
3143 lock_type= external_lock_flags_to_lock_type(op_flags);
3144 pfs_table->m_external_lock= lock_type;
3145 break;
3146 default:
3147 lock_type= PFS_TL_READ;
3148 assert(false);
3149 }
3150
3151 assert((uint) lock_type < array_elements(table_lock_operation_map));
3152
3153 uint flags;
3154 ulonglong timer_start= 0;
3155
3156 if (flag_thread_instrumentation)
3157 {
3158 if (pfs_thread == NULL)
3159 return NULL;
3160 if (! pfs_thread->m_enabled)
3161 return NULL;
3162 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3163 flags= STATE_FLAG_THREAD;
3164
3165 if (pfs_table->m_lock_timed)
3166 {
3167 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3168 state->m_timer_start= timer_start;
3169 flags|= STATE_FLAG_TIMED;
3170 }
3171
3172 if (flag_events_waits_current)
3173 {
3174 if (unlikely(pfs_thread->m_events_waits_current >=
3175 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3176 {
3177 locker_lost++;
3178 return NULL;
3179 }
3180 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3181 state->m_wait= wait;
3182 flags|= STATE_FLAG_EVENT;
3183
3184 PFS_events_waits *parent_event= wait - 1;
3185 wait->m_event_type= EVENT_TYPE_WAIT;
3186 wait->m_nesting_event_id= parent_event->m_event_id;
3187 wait->m_nesting_event_type= parent_event->m_event_type;
3188
3189 PFS_table_share *share= pfs_table->m_share;
3190 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3191 wait->m_class= &global_table_lock_class;
3192 wait->m_timer_start= timer_start;
3193 wait->m_timer_end= 0;
3194 wait->m_object_instance_addr= pfs_table->m_identity;
3195 wait->m_event_id= pfs_thread->m_event_id++;
3196 wait->m_end_event_id= 0;
3197 wait->m_operation= table_lock_operation_map[lock_type];
3198 wait->m_flags= 0;
3199 wait->m_object_type= share->get_object_type();
3200 wait->m_weak_table_share= share;
3201 wait->m_weak_version= share->get_version();
3202 wait->m_index= 0;
3203 wait->m_source_file= src_file;
3204 wait->m_source_line= src_line;
3205 wait->m_wait_class= WAIT_CLASS_TABLE;
3206
3207 pfs_thread->m_events_waits_current++;
3208 }
3209 }
3210 else
3211 {
3212 if (pfs_table->m_lock_timed)
3213 {
3214 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3215 state->m_timer_start= timer_start;
3216 flags= STATE_FLAG_TIMED;
3217 }
3218 else
3219 {
3220 /* TODO: consider a shortcut here */
3221 flags= 0;
3222 }
3223 }
3224
3225 state->m_flags= flags;
3226 state->m_table= table;
3227 state->m_index= lock_type;
3228 return reinterpret_cast<PSI_table_locker*> (state);
3229 }
3230
3231 /**
3232 Implementation of the file instrumentation interface.
3233 @sa PSI_v1::get_thread_file_name_locker.
3234 */
3235 PSI_file_locker*
pfs_get_thread_file_name_locker_v1(PSI_file_locker_state * state,PSI_file_key key,PSI_file_operation op,const char * name,const void * identity)3236 pfs_get_thread_file_name_locker_v1(PSI_file_locker_state *state,
3237 PSI_file_key key,
3238 PSI_file_operation op,
3239 const char *name, const void *identity)
3240 {
3241 assert(static_cast<int> (op) >= 0);
3242 assert(static_cast<uint> (op) < array_elements(file_operation_map));
3243 assert(state != NULL);
3244
3245 if (! flag_global_instrumentation)
3246 return NULL;
3247 PFS_file_class *klass= find_file_class(key);
3248 if (unlikely(klass == NULL))
3249 return NULL;
3250 if (! klass->m_enabled)
3251 return NULL;
3252
3253 /* Needed for the LF_HASH */
3254 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3255 if (unlikely(pfs_thread == NULL))
3256 return NULL;
3257
3258 if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
3259 return NULL;
3260
3261 uint flags;
3262
3263 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3264 flags= STATE_FLAG_THREAD;
3265
3266 if (klass->m_timed)
3267 flags|= STATE_FLAG_TIMED;
3268
3269 if (flag_events_waits_current)
3270 {
3271 if (unlikely(pfs_thread->m_events_waits_current >=
3272 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3273 {
3274 locker_lost++;
3275 return NULL;
3276 }
3277 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3278 state->m_wait= wait;
3279 flags|= STATE_FLAG_EVENT;
3280
3281 PFS_events_waits *parent_event= wait - 1;
3282 wait->m_event_type= EVENT_TYPE_WAIT;
3283 wait->m_nesting_event_id= parent_event->m_event_id;
3284 wait->m_nesting_event_type= parent_event->m_event_type;
3285
3286 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3287 wait->m_class= klass;
3288 wait->m_timer_start= 0;
3289 wait->m_timer_end= 0;
3290 wait->m_object_instance_addr= NULL;
3291 wait->m_weak_file= NULL;
3292 wait->m_weak_version= 0;
3293 wait->m_event_id= pfs_thread->m_event_id++;
3294 wait->m_end_event_id= 0;
3295 wait->m_operation= file_operation_map[static_cast<int> (op)];
3296 wait->m_wait_class= WAIT_CLASS_FILE;
3297
3298 pfs_thread->m_events_waits_current++;
3299 }
3300
3301 state->m_flags= flags;
3302 state->m_file= NULL;
3303 state->m_name= name;
3304 state->m_class= klass;
3305 state->m_operation= op;
3306 return reinterpret_cast<PSI_file_locker*> (state);
3307 }
3308
3309 /**
3310 Implementation of the file instrumentation interface.
3311 @sa PSI_v1::get_thread_file_stream_locker.
3312 */
3313 PSI_file_locker*
pfs_get_thread_file_stream_locker_v1(PSI_file_locker_state * state,PSI_file * file,PSI_file_operation op)3314 pfs_get_thread_file_stream_locker_v1(PSI_file_locker_state *state,
3315 PSI_file *file, PSI_file_operation op)
3316 {
3317 PFS_file *pfs_file= reinterpret_cast<PFS_file*> (file);
3318 assert(static_cast<int> (op) >= 0);
3319 assert(static_cast<uint> (op) < array_elements(file_operation_map));
3320 assert(state != NULL);
3321
3322 if (unlikely(pfs_file == NULL))
3323 return NULL;
3324 assert(pfs_file->m_class != NULL);
3325 PFS_file_class *klass= pfs_file->m_class;
3326
3327 if (! pfs_file->m_enabled)
3328 return NULL;
3329
3330 /* Needed for the LF_HASH */
3331 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3332 if (unlikely(pfs_thread == NULL))
3333 return NULL;
3334
3335 uint flags;
3336
3337 /* Always populated */
3338 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3339
3340 if (flag_thread_instrumentation)
3341 {
3342 if (! pfs_thread->m_enabled)
3343 return NULL;
3344 flags= STATE_FLAG_THREAD;
3345
3346 if (pfs_file->m_timed)
3347 flags|= STATE_FLAG_TIMED;
3348
3349 if (flag_events_waits_current)
3350 {
3351 if (unlikely(pfs_thread->m_events_waits_current >=
3352 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3353 {
3354 locker_lost++;
3355 return NULL;
3356 }
3357 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3358 state->m_wait= wait;
3359 flags|= STATE_FLAG_EVENT;
3360
3361 PFS_events_waits *parent_event= wait - 1;
3362 wait->m_event_type= EVENT_TYPE_WAIT;
3363 wait->m_nesting_event_id= parent_event->m_event_id;
3364 wait->m_nesting_event_type= parent_event->m_event_type;
3365
3366 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3367 wait->m_class= klass;
3368 wait->m_timer_start= 0;
3369 wait->m_timer_end= 0;
3370 wait->m_object_instance_addr= pfs_file;
3371 wait->m_weak_file= pfs_file;
3372 wait->m_weak_version= pfs_file->get_version();
3373 wait->m_event_id= pfs_thread->m_event_id++;
3374 wait->m_end_event_id= 0;
3375 wait->m_operation= file_operation_map[static_cast<int> (op)];
3376 wait->m_wait_class= WAIT_CLASS_FILE;
3377
3378 pfs_thread->m_events_waits_current++;
3379 }
3380 }
3381 else
3382 {
3383 if (pfs_file->m_timed)
3384 {
3385 flags= STATE_FLAG_TIMED;
3386 }
3387 else
3388 {
3389 /* TODO: consider a shortcut. */
3390 flags= 0;
3391 }
3392 }
3393
3394 state->m_flags= flags;
3395 state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
3396 state->m_operation= op;
3397 state->m_name= NULL;
3398 state->m_class= klass;
3399 return reinterpret_cast<PSI_file_locker*> (state);
3400 }
3401
3402 /**
3403 Implementation of the file instrumentation interface.
3404 @sa PSI_v1::get_thread_file_descriptor_locker.
3405 */
3406 PSI_file_locker*
pfs_get_thread_file_descriptor_locker_v1(PSI_file_locker_state * state,File file,PSI_file_operation op)3407 pfs_get_thread_file_descriptor_locker_v1(PSI_file_locker_state *state,
3408 File file, PSI_file_operation op)
3409 {
3410 int index= static_cast<int> (file);
3411 assert(static_cast<int> (op) >= 0);
3412 assert(static_cast<uint> (op) < array_elements(file_operation_map));
3413 assert(state != NULL);
3414
3415 if (unlikely((index < 0) || (index >= file_handle_max)))
3416 return NULL;
3417
3418 PFS_file *pfs_file= file_handle_array[index];
3419 if (unlikely(pfs_file == NULL))
3420 return NULL;
3421
3422 /*
3423 We are about to close a file by descriptor number,
3424 and the calling code still holds the descriptor.
3425 Cleanup the file descriptor <--> file instrument association.
3426 Remove the instrumentation *before* the close to avoid race
3427 conditions with another thread opening a file
3428 (that could be given the same descriptor).
3429 */
3430 if (op == PSI_FILE_CLOSE)
3431 file_handle_array[index]= NULL;
3432
3433 if (! pfs_file->m_enabled)
3434 return NULL;
3435
3436 assert(pfs_file->m_class != NULL);
3437 PFS_file_class *klass= pfs_file->m_class;
3438
3439 /* Needed for the LF_HASH */
3440 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3441 if (unlikely(pfs_thread == NULL))
3442 return NULL;
3443
3444 uint flags;
3445
3446 /* Always populated */
3447 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3448
3449 if (flag_thread_instrumentation)
3450 {
3451 if (! pfs_thread->m_enabled)
3452 return NULL;
3453 flags= STATE_FLAG_THREAD;
3454
3455 if (pfs_file->m_timed)
3456 flags|= STATE_FLAG_TIMED;
3457
3458 if (flag_events_waits_current)
3459 {
3460 if (unlikely(pfs_thread->m_events_waits_current >=
3461 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3462 {
3463 locker_lost++;
3464 return NULL;
3465 }
3466 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3467 state->m_wait= wait;
3468 flags|= STATE_FLAG_EVENT;
3469
3470 PFS_events_waits *parent_event= wait - 1;
3471 wait->m_event_type= EVENT_TYPE_WAIT;
3472 wait->m_nesting_event_id= parent_event->m_event_id;
3473 wait->m_nesting_event_type= parent_event->m_event_type;
3474
3475 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3476 wait->m_class= klass;
3477 wait->m_timer_start= 0;
3478 wait->m_timer_end= 0;
3479 wait->m_object_instance_addr= pfs_file;
3480 wait->m_weak_file= pfs_file;
3481 wait->m_weak_version= pfs_file->get_version();
3482 wait->m_event_id= pfs_thread->m_event_id++;
3483 wait->m_end_event_id= 0;
3484 wait->m_operation= file_operation_map[static_cast<int> (op)];
3485 wait->m_wait_class= WAIT_CLASS_FILE;
3486
3487 pfs_thread->m_events_waits_current++;
3488 }
3489 }
3490 else
3491 {
3492 if (pfs_file->m_timed)
3493 {
3494 flags= STATE_FLAG_TIMED;
3495 }
3496 else
3497 {
3498 /* TODO: consider a shortcut. */
3499 flags= 0;
3500 }
3501 }
3502
3503 state->m_flags= flags;
3504 state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
3505 state->m_operation= op;
3506 state->m_name= NULL;
3507 state->m_class= klass;
3508 return reinterpret_cast<PSI_file_locker*> (state);
3509 }
3510
3511 /** Socket locker */
3512
3513 PSI_socket_locker*
pfs_start_socket_wait_v1(PSI_socket_locker_state * state,PSI_socket * socket,PSI_socket_operation op,size_t count,const char * src_file,uint src_line)3514 pfs_start_socket_wait_v1(PSI_socket_locker_state *state,
3515 PSI_socket *socket,
3516 PSI_socket_operation op,
3517 size_t count,
3518 const char *src_file, uint src_line)
3519 {
3520 assert(static_cast<int> (op) >= 0);
3521 assert(static_cast<uint> (op) < array_elements(socket_operation_map));
3522 assert(state != NULL);
3523 PFS_socket *pfs_socket= reinterpret_cast<PFS_socket*> (socket);
3524
3525 assert(pfs_socket != NULL);
3526 assert(pfs_socket->m_class != NULL);
3527
3528 if (!pfs_socket->m_enabled || pfs_socket->m_idle)
3529 return NULL;
3530
3531 uint flags= 0;
3532 ulonglong timer_start= 0;
3533
3534 if (flag_thread_instrumentation)
3535 {
3536 /*
3537 Do not use pfs_socket->m_thread_owner here,
3538 as different threads may use concurrently the same socket,
3539 for example during a KILL.
3540 */
3541 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3542
3543 if (unlikely(pfs_thread == NULL))
3544 return NULL;
3545
3546 if (!pfs_thread->m_enabled)
3547 return NULL;
3548
3549 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3550 flags= STATE_FLAG_THREAD;
3551
3552 if (pfs_socket->m_timed)
3553 {
3554 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3555 state->m_timer_start= timer_start;
3556 flags|= STATE_FLAG_TIMED;
3557 }
3558
3559 if (flag_events_waits_current)
3560 {
3561 if (unlikely(pfs_thread->m_events_waits_current >=
3562 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3563 {
3564 locker_lost++;
3565 return NULL;
3566 }
3567 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3568 state->m_wait= wait;
3569 flags|= STATE_FLAG_EVENT;
3570
3571 PFS_events_waits *parent_event= wait - 1;
3572 wait->m_event_type= EVENT_TYPE_WAIT;
3573 wait->m_nesting_event_id= parent_event->m_event_id;
3574 wait->m_nesting_event_type= parent_event->m_event_type;
3575 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3576 wait->m_class= pfs_socket->m_class;
3577 wait->m_timer_start= timer_start;
3578 wait->m_timer_end= 0;
3579 wait->m_object_instance_addr= pfs_socket->m_identity;
3580 wait->m_weak_socket= pfs_socket;
3581 wait->m_weak_version= pfs_socket->get_version();
3582 wait->m_event_id= pfs_thread->m_event_id++;
3583 wait->m_end_event_id= 0;
3584 wait->m_operation= socket_operation_map[static_cast<int>(op)];
3585 wait->m_source_file= src_file;
3586 wait->m_source_line= src_line;
3587 wait->m_number_of_bytes= count;
3588 wait->m_wait_class= WAIT_CLASS_SOCKET;
3589
3590 pfs_thread->m_events_waits_current++;
3591 }
3592 }
3593 else
3594 {
3595 if (pfs_socket->m_timed)
3596 {
3597 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
3598 state->m_timer_start= timer_start;
3599 flags= STATE_FLAG_TIMED;
3600 }
3601 else
3602 {
3603 /*
3604 Even if timing is disabled, end_socket_wait() still needs a locker to
3605 capture the number of bytes sent or received by the socket operation.
3606 For operations that do not have a byte count, then just increment the
3607 event counter and return a NULL locker.
3608 */
3609 switch (op)
3610 {
3611 case PSI_SOCKET_CONNECT:
3612 case PSI_SOCKET_CREATE:
3613 case PSI_SOCKET_BIND:
3614 case PSI_SOCKET_SEEK:
3615 case PSI_SOCKET_OPT:
3616 case PSI_SOCKET_STAT:
3617 case PSI_SOCKET_SHUTDOWN:
3618 case PSI_SOCKET_CLOSE:
3619 case PSI_SOCKET_SELECT:
3620 pfs_socket->m_socket_stat.m_io_stat.m_misc.aggregate_counted();
3621 return NULL;
3622 default:
3623 break;
3624 }
3625 }
3626 }
3627
3628 state->m_flags= flags;
3629 state->m_socket= socket;
3630 state->m_operation= op;
3631 return reinterpret_cast<PSI_socket_locker*> (state);
3632 }
3633
3634 /**
3635 Implementation of the mutex instrumentation interface.
3636 @sa PSI_v1::unlock_mutex.
3637 */
pfs_unlock_mutex_v1(PSI_mutex * mutex)3638 void pfs_unlock_mutex_v1(PSI_mutex *mutex)
3639 {
3640 PFS_mutex *pfs_mutex= reinterpret_cast<PFS_mutex*> (mutex);
3641
3642 assert(pfs_mutex != NULL);
3643
3644 /*
3645 Note that this code is still protected by the instrumented mutex,
3646 and therefore is thread safe. See inline_mysql_mutex_unlock().
3647 */
3648
3649 /* Always update the instrumented state */
3650 pfs_mutex->m_owner= NULL;
3651 pfs_mutex->m_last_locked= 0;
3652
3653 #ifdef LATER_WL2333
3654 /*
3655 See WL#2333: SHOW ENGINE ... LOCK STATUS.
3656 PFS_mutex::m_lock_stat is not exposed in user visible tables
3657 currently, so there is no point spending time computing it.
3658 */
3659 if (! pfs_mutex->m_enabled)
3660 return;
3661
3662 if (! pfs_mutex->m_timed)
3663 return;
3664
3665 ulonglong locked_time;
3666 locked_time= get_timer_pico_value(wait_timer) - pfs_mutex->m_last_locked;
3667 pfs_mutex->m_mutex_stat.m_lock_stat.aggregate_value(locked_time);
3668 #endif
3669 }
3670
3671 /**
3672 Implementation of the rwlock instrumentation interface.
3673 @sa PSI_v1::unlock_rwlock.
3674 */
pfs_unlock_rwlock_v1(PSI_rwlock * rwlock)3675 void pfs_unlock_rwlock_v1(PSI_rwlock *rwlock)
3676 {
3677 PFS_rwlock *pfs_rwlock= reinterpret_cast<PFS_rwlock*> (rwlock);
3678 assert(pfs_rwlock != NULL);
3679 assert(pfs_rwlock == sanitize_rwlock(pfs_rwlock));
3680 assert(pfs_rwlock->m_class != NULL);
3681 assert(pfs_rwlock->m_lock.is_populated());
3682
3683 bool last_writer= false;
3684 bool last_reader= false;
3685
3686 /*
3687 Note that this code is still protected by the instrumented rwlock,
3688 and therefore is:
3689 - thread safe for write locks
3690 - almost thread safe for read locks (pfs_rwlock->m_readers is unsafe).
3691 See inline_mysql_rwlock_unlock()
3692 */
3693
3694 /* Always update the instrumented state */
3695 if (pfs_rwlock->m_writer != NULL)
3696 {
3697 /* Nominal case, a writer is unlocking. */
3698 last_writer= true;
3699 pfs_rwlock->m_writer= NULL;
3700 /* Reset the readers stats, they could be off */
3701 pfs_rwlock->m_readers= 0;
3702 }
3703 else if (likely(pfs_rwlock->m_readers > 0))
3704 {
3705 /* Nominal case, a reader is unlocking. */
3706 if (--(pfs_rwlock->m_readers) == 0)
3707 last_reader= true;
3708 }
3709 else
3710 {
3711 /*
3712 Edge case, we have no writer and no readers,
3713 on an unlock event.
3714 This is possible for:
3715 - partial instrumentation
3716 - instrumentation disabled at runtime,
3717 see when get_thread_rwlock_locker_v1() returns NULL
3718 No further action is taken here, the next
3719 write lock will put the statistics is a valid state.
3720 */
3721 }
3722
3723 #ifdef LATER_WL2333
3724 /* See WL#2333: SHOW ENGINE ... LOCK STATUS. */
3725
3726 if (! pfs_rwlock->m_enabled)
3727 return;
3728
3729 if (! pfs_rwlock->m_timed)
3730 return;
3731
3732 ulonglong locked_time;
3733 if (last_writer)
3734 {
3735 locked_time= get_timer_pico_value(wait_timer) - pfs_rwlock->m_last_written;
3736 pfs_rwlock->m_rwlock_stat.m_write_lock_stat.aggregate_value(locked_time);
3737 }
3738 else if (last_reader)
3739 {
3740 locked_time= get_timer_pico_value(wait_timer) - pfs_rwlock->m_last_read;
3741 pfs_rwlock->m_rwlock_stat.m_read_lock_stat.aggregate_value(locked_time);
3742 }
3743 #else
3744 (void) last_reader;
3745 (void) last_writer;
3746 #endif
3747 }
3748
3749 /**
3750 Implementation of the cond instrumentation interface.
3751 @sa PSI_v1::signal_cond.
3752 */
pfs_signal_cond_v1(PSI_cond * cond)3753 void pfs_signal_cond_v1(PSI_cond* cond)
3754 {
3755 #ifdef PFS_LATER
3756 PFS_cond *pfs_cond= reinterpret_cast<PFS_cond*> (cond);
3757
3758 assert(pfs_cond != NULL);
3759
3760 pfs_cond->m_cond_stat.m_signal_count++;
3761 #endif
3762 }
3763
3764 /**
3765 Implementation of the cond instrumentation interface.
3766 @sa PSI_v1::broadcast_cond.
3767 */
pfs_broadcast_cond_v1(PSI_cond * cond)3768 void pfs_broadcast_cond_v1(PSI_cond* cond)
3769 {
3770 #ifdef PFS_LATER
3771 PFS_cond *pfs_cond= reinterpret_cast<PFS_cond*> (cond);
3772
3773 assert(pfs_cond != NULL);
3774
3775 pfs_cond->m_cond_stat.m_broadcast_count++;
3776 #endif
3777 }
3778
3779 /**
3780 Implementation of the idle instrumentation interface.
3781 @sa PSI_v1::start_idle_wait.
3782 */
3783 PSI_idle_locker*
pfs_start_idle_wait_v1(PSI_idle_locker_state * state,const char * src_file,uint src_line)3784 pfs_start_idle_wait_v1(PSI_idle_locker_state* state, const char *src_file, uint src_line)
3785 {
3786 assert(state != NULL);
3787
3788 if (!flag_global_instrumentation)
3789 return NULL;
3790
3791 if (!global_idle_class.m_enabled)
3792 return NULL;
3793
3794 uint flags= 0;
3795 ulonglong timer_start= 0;
3796
3797 if (flag_thread_instrumentation)
3798 {
3799 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
3800 if (unlikely(pfs_thread == NULL))
3801 return NULL;
3802 if (!pfs_thread->m_enabled)
3803 return NULL;
3804 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
3805 flags= STATE_FLAG_THREAD;
3806
3807 assert(pfs_thread->m_events_statements_count == 0);
3808
3809 if (global_idle_class.m_timed)
3810 {
3811 timer_start= get_timer_raw_value_and_function(idle_timer, &state->m_timer);
3812 state->m_timer_start= timer_start;
3813 flags|= STATE_FLAG_TIMED;
3814 }
3815
3816 if (flag_events_waits_current)
3817 {
3818 if (unlikely(pfs_thread->m_events_waits_current >=
3819 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
3820 {
3821 locker_lost++;
3822 return NULL;
3823 }
3824 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
3825 state->m_wait= wait;
3826 flags|= STATE_FLAG_EVENT;
3827
3828 wait->m_event_type= EVENT_TYPE_WAIT;
3829 /*
3830 IDLE events are waits, but by definition we know that
3831 such waits happen outside of any STAGE and STATEMENT,
3832 so they have no parents.
3833 */
3834 wait->m_nesting_event_id= 0;
3835 /* no need to set wait->m_nesting_event_type */
3836
3837 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
3838 wait->m_class= &global_idle_class;
3839 wait->m_timer_start= timer_start;
3840 wait->m_timer_end= 0;
3841 wait->m_event_id= pfs_thread->m_event_id++;
3842 wait->m_end_event_id= 0;
3843 wait->m_operation= OPERATION_TYPE_IDLE;
3844 wait->m_source_file= src_file;
3845 wait->m_source_line= src_line;
3846 wait->m_wait_class= WAIT_CLASS_IDLE;
3847
3848 pfs_thread->m_events_waits_current++;
3849 }
3850 }
3851 else
3852 {
3853 if (global_idle_class.m_timed)
3854 {
3855 timer_start= get_timer_raw_value_and_function(idle_timer, &state->m_timer);
3856 state->m_timer_start= timer_start;
3857 flags= STATE_FLAG_TIMED;
3858 }
3859 }
3860
3861 state->m_flags= flags;
3862 return reinterpret_cast<PSI_idle_locker*> (state);
3863 }
3864
3865 /**
3866 Implementation of the mutex instrumentation interface.
3867 @sa PSI_v1::end_idle_wait.
3868 */
pfs_end_idle_wait_v1(PSI_idle_locker * locker)3869 void pfs_end_idle_wait_v1(PSI_idle_locker* locker)
3870 {
3871 PSI_idle_locker_state *state= reinterpret_cast<PSI_idle_locker_state*> (locker);
3872 assert(state != NULL);
3873 ulonglong timer_end= 0;
3874 ulonglong wait_time= 0;
3875
3876 uint flags= state->m_flags;
3877
3878 if (flags & STATE_FLAG_TIMED)
3879 {
3880 timer_end= state->m_timer();
3881 wait_time= timer_end - state->m_timer_start;
3882 }
3883
3884 if (flags & STATE_FLAG_THREAD)
3885 {
3886 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3887 PFS_single_stat *event_name_array;
3888 event_name_array= thread->write_instr_class_waits_stats();
3889
3890 if (flags & STATE_FLAG_TIMED)
3891 {
3892 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
3893 event_name_array[GLOBAL_IDLE_EVENT_INDEX].aggregate_value(wait_time);
3894 }
3895 else
3896 {
3897 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
3898 event_name_array[GLOBAL_IDLE_EVENT_INDEX].aggregate_counted();
3899 }
3900
3901 if (flags & STATE_FLAG_EVENT)
3902 {
3903 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3904 assert(wait != NULL);
3905
3906 wait->m_timer_end= timer_end;
3907 wait->m_end_event_id= thread->m_event_id;
3908 if (thread->m_flag_events_waits_history)
3909 insert_events_waits_history(thread, wait);
3910 if (thread->m_flag_events_waits_history_long)
3911 insert_events_waits_history_long(wait);
3912 thread->m_events_waits_current--;
3913
3914 assert(wait == thread->m_events_waits_current);
3915 }
3916 }
3917
3918 if (flags & STATE_FLAG_TIMED)
3919 {
3920 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (timed) */
3921 global_idle_stat.aggregate_value(wait_time);
3922 }
3923 else
3924 {
3925 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (counted) */
3926 global_idle_stat.aggregate_counted();
3927 }
3928 }
3929
3930 /**
3931 Implementation of the mutex instrumentation interface.
3932 @sa PSI_v1::end_mutex_wait.
3933 */
pfs_end_mutex_wait_v1(PSI_mutex_locker * locker,int rc)3934 void pfs_end_mutex_wait_v1(PSI_mutex_locker* locker, int rc)
3935 {
3936 PSI_mutex_locker_state *state= reinterpret_cast<PSI_mutex_locker_state*> (locker);
3937 assert(state != NULL);
3938
3939 ulonglong timer_end= 0;
3940 ulonglong wait_time= 0;
3941
3942 PFS_mutex *mutex= reinterpret_cast<PFS_mutex *> (state->m_mutex);
3943 assert(mutex != NULL);
3944 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
3945
3946 uint flags= state->m_flags;
3947
3948 if (flags & STATE_FLAG_TIMED)
3949 {
3950 timer_end= state->m_timer();
3951 wait_time= timer_end - state->m_timer_start;
3952 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
3953 mutex->m_mutex_stat.m_wait_stat.aggregate_value(wait_time);
3954 }
3955 else
3956 {
3957 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
3958 mutex->m_mutex_stat.m_wait_stat.aggregate_counted();
3959 }
3960
3961 if (likely(rc == 0))
3962 {
3963 mutex->m_owner= thread;
3964 mutex->m_last_locked= timer_end;
3965 }
3966
3967 if (flags & STATE_FLAG_THREAD)
3968 {
3969 PFS_single_stat *event_name_array;
3970 event_name_array= thread->write_instr_class_waits_stats();
3971 uint index= mutex->m_class->m_event_name_index;
3972
3973 assert(index <= wait_class_max);
3974 assert(sanitize_thread(thread) != NULL);
3975
3976 if (flags & STATE_FLAG_TIMED)
3977 {
3978 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
3979 event_name_array[index].aggregate_value(wait_time);
3980 }
3981 else
3982 {
3983 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
3984 event_name_array[index].aggregate_counted();
3985 }
3986
3987 if (flags & STATE_FLAG_EVENT)
3988 {
3989 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
3990 assert(wait != NULL);
3991
3992 wait->m_timer_end= timer_end;
3993 wait->m_end_event_id= thread->m_event_id;
3994 if (thread->m_flag_events_waits_history)
3995 insert_events_waits_history(thread, wait);
3996 if (thread->m_flag_events_waits_history_long)
3997 insert_events_waits_history_long(wait);
3998 thread->m_events_waits_current--;
3999
4000 assert(wait == thread->m_events_waits_current);
4001 }
4002 }
4003 }
4004
4005 /**
4006 Implementation of the rwlock instrumentation interface.
4007 @sa PSI_v1::end_rwlock_rdwait.
4008 */
pfs_end_rwlock_rdwait_v1(PSI_rwlock_locker * locker,int rc)4009 void pfs_end_rwlock_rdwait_v1(PSI_rwlock_locker* locker, int rc)
4010 {
4011 PSI_rwlock_locker_state *state= reinterpret_cast<PSI_rwlock_locker_state*> (locker);
4012 assert(state != NULL);
4013
4014 ulonglong timer_end= 0;
4015 ulonglong wait_time= 0;
4016
4017 PFS_rwlock *rwlock= reinterpret_cast<PFS_rwlock *> (state->m_rwlock);
4018 assert(rwlock != NULL);
4019
4020 if (state->m_flags & STATE_FLAG_TIMED)
4021 {
4022 timer_end= state->m_timer();
4023 wait_time= timer_end - state->m_timer_start;
4024 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
4025 rwlock->m_rwlock_stat.m_wait_stat.aggregate_value(wait_time);
4026 }
4027 else
4028 {
4029 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
4030 rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
4031 }
4032
4033 if (rc == 0)
4034 {
4035 /*
4036 Warning:
4037 Multiple threads can execute this section concurrently
4038 (since multiple readers can execute in parallel).
4039 The statistics generated are not safe, which is why they are
4040 just statistics, not facts.
4041 */
4042 if (rwlock->m_readers == 0)
4043 rwlock->m_last_read= timer_end;
4044 rwlock->m_writer= NULL;
4045 rwlock->m_readers++;
4046 }
4047
4048 if (state->m_flags & STATE_FLAG_THREAD)
4049 {
4050 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4051 assert(thread != NULL);
4052
4053 PFS_single_stat *event_name_array;
4054 event_name_array= thread->write_instr_class_waits_stats();
4055 uint index= rwlock->m_class->m_event_name_index;
4056
4057 if (state->m_flags & STATE_FLAG_TIMED)
4058 {
4059 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4060 event_name_array[index].aggregate_value(wait_time);
4061 }
4062 else
4063 {
4064 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4065 event_name_array[index].aggregate_counted();
4066 }
4067
4068 if (state->m_flags & STATE_FLAG_EVENT)
4069 {
4070 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4071 assert(wait != NULL);
4072
4073 wait->m_timer_end= timer_end;
4074 wait->m_end_event_id= thread->m_event_id;
4075 if (thread->m_flag_events_waits_history)
4076 insert_events_waits_history(thread, wait);
4077 if (thread->m_flag_events_waits_history_long)
4078 insert_events_waits_history_long(wait);
4079 thread->m_events_waits_current--;
4080
4081 assert(wait == thread->m_events_waits_current);
4082 }
4083 }
4084 }
4085
4086 /**
4087 Implementation of the rwlock instrumentation interface.
4088 @sa PSI_v1::end_rwlock_wrwait.
4089 */
pfs_end_rwlock_wrwait_v1(PSI_rwlock_locker * locker,int rc)4090 void pfs_end_rwlock_wrwait_v1(PSI_rwlock_locker* locker, int rc)
4091 {
4092 PSI_rwlock_locker_state *state= reinterpret_cast<PSI_rwlock_locker_state*> (locker);
4093 assert(state != NULL);
4094
4095 ulonglong timer_end= 0;
4096 ulonglong wait_time= 0;
4097
4098 PFS_rwlock *rwlock= reinterpret_cast<PFS_rwlock *> (state->m_rwlock);
4099 assert(rwlock != NULL);
4100 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4101
4102 if (state->m_flags & STATE_FLAG_TIMED)
4103 {
4104 timer_end= state->m_timer();
4105 wait_time= timer_end - state->m_timer_start;
4106 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
4107 rwlock->m_rwlock_stat.m_wait_stat.aggregate_value(wait_time);
4108 }
4109 else
4110 {
4111 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
4112 rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
4113 }
4114
4115 if (likely(rc == 0))
4116 {
4117 /* Thread safe : we are protected by the instrumented rwlock */
4118 rwlock->m_writer= thread;
4119 rwlock->m_last_written= timer_end;
4120
4121 if ((state->m_operation != PSI_RWLOCK_SHAREDEXCLUSIVELOCK) &&
4122 (state->m_operation != PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK))
4123 {
4124 /* Reset the readers stats, they could be off */
4125 rwlock->m_readers= 0;
4126 rwlock->m_last_read= 0;
4127 }
4128 }
4129
4130 if (state->m_flags & STATE_FLAG_THREAD)
4131 {
4132 PFS_single_stat *event_name_array;
4133 event_name_array= thread->write_instr_class_waits_stats();
4134 uint index= rwlock->m_class->m_event_name_index;
4135
4136 if (state->m_flags & STATE_FLAG_TIMED)
4137 {
4138 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4139 event_name_array[index].aggregate_value(wait_time);
4140 }
4141 else
4142 {
4143 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4144 event_name_array[index].aggregate_counted();
4145 }
4146
4147 if (state->m_flags & STATE_FLAG_EVENT)
4148 {
4149 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4150 assert(wait != NULL);
4151
4152 wait->m_timer_end= timer_end;
4153 wait->m_end_event_id= thread->m_event_id;
4154 if (thread->m_flag_events_waits_history)
4155 insert_events_waits_history(thread, wait);
4156 if (thread->m_flag_events_waits_history_long)
4157 insert_events_waits_history_long(wait);
4158 thread->m_events_waits_current--;
4159
4160 assert(wait == thread->m_events_waits_current);
4161 }
4162 }
4163 }
4164
4165 /**
4166 Implementation of the cond instrumentation interface.
4167 @sa PSI_v1::end_cond_wait.
4168 */
pfs_end_cond_wait_v1(PSI_cond_locker * locker,int rc)4169 void pfs_end_cond_wait_v1(PSI_cond_locker* locker, int rc)
4170 {
4171 PSI_cond_locker_state *state= reinterpret_cast<PSI_cond_locker_state*> (locker);
4172 assert(state != NULL);
4173
4174 ulonglong timer_end= 0;
4175 ulonglong wait_time= 0;
4176
4177 PFS_cond *cond= reinterpret_cast<PFS_cond *> (state->m_cond);
4178 /* PFS_mutex *mutex= reinterpret_cast<PFS_mutex *> (state->m_mutex); */
4179
4180 if (state->m_flags & STATE_FLAG_TIMED)
4181 {
4182 timer_end= state->m_timer();
4183 wait_time= timer_end - state->m_timer_start;
4184 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
4185 cond->m_cond_stat.m_wait_stat.aggregate_value(wait_time);
4186 }
4187 else
4188 {
4189 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
4190 cond->m_cond_stat.m_wait_stat.aggregate_counted();
4191 }
4192
4193 if (state->m_flags & STATE_FLAG_THREAD)
4194 {
4195 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4196 assert(thread != NULL);
4197
4198 PFS_single_stat *event_name_array;
4199 event_name_array= thread->write_instr_class_waits_stats();
4200 uint index= cond->m_class->m_event_name_index;
4201
4202 if (state->m_flags & STATE_FLAG_TIMED)
4203 {
4204 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4205 event_name_array[index].aggregate_value(wait_time);
4206 }
4207 else
4208 {
4209 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4210 event_name_array[index].aggregate_counted();
4211 }
4212
4213 if (state->m_flags & STATE_FLAG_EVENT)
4214 {
4215 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4216 assert(wait != NULL);
4217
4218 wait->m_timer_end= timer_end;
4219 wait->m_end_event_id= thread->m_event_id;
4220 if (thread->m_flag_events_waits_history)
4221 insert_events_waits_history(thread, wait);
4222 if (thread->m_flag_events_waits_history_long)
4223 insert_events_waits_history_long(wait);
4224 thread->m_events_waits_current--;
4225
4226 assert(wait == thread->m_events_waits_current);
4227 }
4228 }
4229 }
4230
4231 /**
4232 Implementation of the table instrumentation interface.
4233 @sa PSI_v1::end_table_io_wait.
4234 */
pfs_end_table_io_wait_v1(PSI_table_locker * locker,ulonglong numrows)4235 void pfs_end_table_io_wait_v1(PSI_table_locker* locker, ulonglong numrows)
4236 {
4237 PSI_table_locker_state *state= reinterpret_cast<PSI_table_locker_state*> (locker);
4238 assert(state != NULL);
4239
4240 ulonglong timer_end= 0;
4241 ulonglong wait_time= 0;
4242
4243 PFS_table *table= reinterpret_cast<PFS_table *> (state->m_table);
4244 assert(table != NULL);
4245
4246 PFS_single_stat *stat;
4247 PFS_table_io_stat *table_io_stat;
4248
4249 assert((state->m_index < table->m_share->m_key_count) ||
4250 (state->m_index == MAX_INDEXES));
4251
4252 table_io_stat= & table->m_table_stat.m_index_stat[state->m_index];
4253 table_io_stat->m_has_data= true;
4254
4255 switch (state->m_io_operation)
4256 {
4257 case PSI_TABLE_FETCH_ROW:
4258 stat= & table_io_stat->m_fetch;
4259 break;
4260 case PSI_TABLE_WRITE_ROW:
4261 stat= & table_io_stat->m_insert;
4262 break;
4263 case PSI_TABLE_UPDATE_ROW:
4264 stat= & table_io_stat->m_update;
4265 break;
4266 case PSI_TABLE_DELETE_ROW:
4267 stat= & table_io_stat->m_delete;
4268 break;
4269 default:
4270 assert(false);
4271 stat= NULL;
4272 break;
4273 }
4274
4275 uint flags= state->m_flags;
4276
4277 if (flags & STATE_FLAG_TIMED)
4278 {
4279 timer_end= state->m_timer();
4280 wait_time= timer_end - state->m_timer_start;
4281 stat->aggregate_many_value(wait_time, numrows);
4282 }
4283 else
4284 {
4285 stat->aggregate_counted(numrows);
4286 }
4287
4288 if (flags & STATE_FLAG_THREAD)
4289 {
4290 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4291 assert(thread != NULL);
4292
4293 PFS_single_stat *event_name_array;
4294 event_name_array= thread->write_instr_class_waits_stats();
4295
4296 /*
4297 Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
4298 (for wait/io/table/sql/handler)
4299 */
4300 if (flags & STATE_FLAG_TIMED)
4301 {
4302 event_name_array[GLOBAL_TABLE_IO_EVENT_INDEX].aggregate_many_value(wait_time, numrows);
4303 }
4304 else
4305 {
4306 event_name_array[GLOBAL_TABLE_IO_EVENT_INDEX].aggregate_counted(numrows);
4307 }
4308
4309 if (flags & STATE_FLAG_EVENT)
4310 {
4311 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4312 assert(wait != NULL);
4313
4314 wait->m_timer_end= timer_end;
4315 wait->m_end_event_id= thread->m_event_id;
4316 wait->m_number_of_bytes= static_cast<size_t>(numrows);
4317 if (thread->m_flag_events_waits_history)
4318 insert_events_waits_history(thread, wait);
4319 if (thread->m_flag_events_waits_history_long)
4320 insert_events_waits_history_long(wait);
4321 thread->m_events_waits_current--;
4322
4323 assert(wait == thread->m_events_waits_current);
4324 }
4325 }
4326
4327 table->m_has_io_stats= true;
4328 }
4329
4330 /**
4331 Implementation of the table instrumentation interface.
4332 @sa PSI_v1::end_table_lock_wait.
4333 */
pfs_end_table_lock_wait_v1(PSI_table_locker * locker)4334 void pfs_end_table_lock_wait_v1(PSI_table_locker* locker)
4335 {
4336 PSI_table_locker_state *state= reinterpret_cast<PSI_table_locker_state*> (locker);
4337 assert(state != NULL);
4338
4339 ulonglong timer_end= 0;
4340 ulonglong wait_time= 0;
4341
4342 PFS_table *table= reinterpret_cast<PFS_table *> (state->m_table);
4343 assert(table != NULL);
4344
4345 PFS_single_stat *stat= & table->m_table_stat.m_lock_stat.m_stat[state->m_index];
4346
4347 uint flags= state->m_flags;
4348
4349 if (flags & STATE_FLAG_TIMED)
4350 {
4351 timer_end= state->m_timer();
4352 wait_time= timer_end - state->m_timer_start;
4353 stat->aggregate_value(wait_time);
4354 }
4355 else
4356 {
4357 stat->aggregate_counted();
4358 }
4359
4360 if (flags & STATE_FLAG_THREAD)
4361 {
4362 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4363 assert(thread != NULL);
4364
4365 PFS_single_stat *event_name_array;
4366 event_name_array= thread->write_instr_class_waits_stats();
4367
4368 /*
4369 Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
4370 (for wait/lock/table/sql/handler)
4371 */
4372 if (flags & STATE_FLAG_TIMED)
4373 {
4374 event_name_array[GLOBAL_TABLE_LOCK_EVENT_INDEX].aggregate_value(wait_time);
4375 }
4376 else
4377 {
4378 event_name_array[GLOBAL_TABLE_LOCK_EVENT_INDEX].aggregate_counted();
4379 }
4380
4381 if (flags & STATE_FLAG_EVENT)
4382 {
4383 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4384 assert(wait != NULL);
4385
4386 wait->m_timer_end= timer_end;
4387 wait->m_end_event_id= thread->m_event_id;
4388 if (thread->m_flag_events_waits_history)
4389 insert_events_waits_history(thread, wait);
4390 if (thread->m_flag_events_waits_history_long)
4391 insert_events_waits_history_long(wait);
4392 thread->m_events_waits_current--;
4393
4394 assert(wait == thread->m_events_waits_current);
4395 }
4396 }
4397
4398 table->m_has_lock_stats= true;
4399 }
4400
4401 void pfs_start_file_wait_v1(PSI_file_locker *locker,
4402 size_t count,
4403 const char *src_file,
4404 uint src_line);
4405
4406 void pfs_end_file_wait_v1(PSI_file_locker *locker,
4407 size_t count);
4408
4409 /**
4410 Implementation of the file instrumentation interface.
4411 @sa PSI_v1::start_file_open_wait.
4412 */
pfs_start_file_open_wait_v1(PSI_file_locker * locker,const char * src_file,uint src_line)4413 void pfs_start_file_open_wait_v1(PSI_file_locker *locker,
4414 const char *src_file,
4415 uint src_line)
4416 {
4417 pfs_start_file_wait_v1(locker, 0, src_file, src_line);
4418
4419 return;
4420 }
4421
4422 /**
4423 Implementation of the file instrumentation interface.
4424 @sa PSI_v1::end_file_open_wait.
4425 */
4426 PSI_file*
pfs_end_file_open_wait_v1(PSI_file_locker * locker,void * result)4427 pfs_end_file_open_wait_v1(PSI_file_locker *locker,
4428 void *result)
4429 {
4430 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4431 assert(state != NULL);
4432
4433 switch (state->m_operation)
4434 {
4435 case PSI_FILE_STAT:
4436 case PSI_FILE_RENAME:
4437 break;
4438 case PSI_FILE_STREAM_OPEN:
4439 case PSI_FILE_CREATE:
4440 case PSI_FILE_OPEN:
4441 if (result != NULL)
4442 {
4443 PFS_file_class *klass= reinterpret_cast<PFS_file_class*> (state->m_class);
4444 PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
4445 const char *name= state->m_name;
4446 uint len= (uint)strlen(name);
4447 PFS_file *pfs_file= find_or_create_file(thread, klass, name, len, true);
4448 state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
4449 }
4450 break;
4451 default:
4452 assert(false);
4453 break;
4454 }
4455
4456 pfs_end_file_wait_v1(locker, 0);
4457
4458 return state->m_file;
4459 }
4460
4461 /**
4462 Implementation of the file instrumentation interface.
4463 @sa PSI_v1::end_file_open_wait_and_bind_to_descriptor.
4464 */
pfs_end_file_open_wait_and_bind_to_descriptor_v1(PSI_file_locker * locker,File file)4465 void pfs_end_file_open_wait_and_bind_to_descriptor_v1
4466 (PSI_file_locker *locker, File file)
4467 {
4468 PFS_file *pfs_file= NULL;
4469 int index= (int) file;
4470 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4471 assert(state != NULL);
4472
4473 if (index >= 0)
4474 {
4475 PFS_file_class *klass= reinterpret_cast<PFS_file_class*> (state->m_class);
4476 PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
4477 const char *name= state->m_name;
4478 uint len= (uint)strlen(name);
4479 pfs_file= find_or_create_file(thread, klass, name, len, true);
4480 state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
4481 }
4482
4483 pfs_end_file_wait_v1(locker, 0);
4484
4485 if (likely(index >= 0))
4486 {
4487 if (likely(index < file_handle_max))
4488 file_handle_array[index]= pfs_file;
4489 else
4490 {
4491 if (pfs_file != NULL)
4492 release_file(pfs_file);
4493 file_handle_lost++;
4494 }
4495 }
4496 }
4497
4498 /**
4499 Implementation of the file instrumentation interface.
4500 @sa PSI_v1::end_temp_file_open_wait_and_bind_to_descriptor.
4501 */
pfs_end_temp_file_open_wait_and_bind_to_descriptor_v1(PSI_file_locker * locker,File file,const char * filename)4502 void pfs_end_temp_file_open_wait_and_bind_to_descriptor_v1
4503 (PSI_file_locker *locker, File file, const char *filename)
4504 {
4505 assert(filename != NULL);
4506 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4507 assert(state != NULL);
4508
4509 /* Set filename that was generated during creation of temporary file. */
4510 state->m_name= filename;
4511 pfs_end_file_open_wait_and_bind_to_descriptor_v1(locker, file);
4512
4513 PFS_file *pfs_file= reinterpret_cast<PFS_file *> (state->m_file);
4514 if (pfs_file != NULL)
4515 {
4516 pfs_file->m_temporary= true;
4517 }
4518 }
4519
4520
4521 /**
4522 Implementation of the file instrumentation interface.
4523 @sa PSI_v1::start_file_wait.
4524 */
pfs_start_file_wait_v1(PSI_file_locker * locker,size_t count,const char * src_file,uint src_line)4525 void pfs_start_file_wait_v1(PSI_file_locker *locker,
4526 size_t count,
4527 const char *src_file,
4528 uint src_line)
4529 {
4530 ulonglong timer_start= 0;
4531 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4532 assert(state != NULL);
4533
4534 uint flags= state->m_flags;
4535
4536 if (flags & STATE_FLAG_TIMED)
4537 {
4538 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
4539 state->m_timer_start= timer_start;
4540 }
4541
4542 if (flags & STATE_FLAG_EVENT)
4543 {
4544 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4545 assert(wait != NULL);
4546
4547 wait->m_timer_start= timer_start;
4548 wait->m_source_file= src_file;
4549 wait->m_source_line= src_line;
4550 wait->m_number_of_bytes= count;
4551 }
4552 }
4553
4554 /**
4555 Implementation of the file instrumentation interface.
4556 @sa PSI_v1::end_file_wait.
4557 */
pfs_end_file_wait_v1(PSI_file_locker * locker,size_t byte_count)4558 void pfs_end_file_wait_v1(PSI_file_locker *locker,
4559 size_t byte_count)
4560 {
4561 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4562 assert(state != NULL);
4563 PFS_file *file= reinterpret_cast<PFS_file *> (state->m_file);
4564 PFS_file_class *klass= reinterpret_cast<PFS_file_class *> (state->m_class);
4565 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4566
4567 ulonglong timer_end= 0;
4568 ulonglong wait_time= 0;
4569 PFS_byte_stat *byte_stat;
4570 uint flags= state->m_flags;
4571 size_t bytes= ((int)byte_count > -1 ? byte_count : 0);
4572
4573 PFS_file_stat *file_stat;
4574
4575 if (file != NULL)
4576 {
4577 file_stat= & file->m_file_stat;
4578 }
4579 else
4580 {
4581 file_stat= & klass->m_file_stat;
4582 }
4583
4584 switch (state->m_operation)
4585 {
4586 /* Group read operations */
4587 case PSI_FILE_READ:
4588 byte_stat= &file_stat->m_io_stat.m_read;
4589 break;
4590 /* Group write operations */
4591 case PSI_FILE_WRITE:
4592 byte_stat= &file_stat->m_io_stat.m_write;
4593 break;
4594 /* Group remaining operations as miscellaneous */
4595 case PSI_FILE_CREATE:
4596 case PSI_FILE_CREATE_TMP:
4597 case PSI_FILE_OPEN:
4598 case PSI_FILE_STREAM_OPEN:
4599 case PSI_FILE_STREAM_CLOSE:
4600 case PSI_FILE_SEEK:
4601 case PSI_FILE_TELL:
4602 case PSI_FILE_FLUSH:
4603 case PSI_FILE_FSTAT:
4604 case PSI_FILE_CHSIZE:
4605 case PSI_FILE_DELETE:
4606 case PSI_FILE_RENAME:
4607 case PSI_FILE_SYNC:
4608 case PSI_FILE_STAT:
4609 case PSI_FILE_CLOSE:
4610 byte_stat= &file_stat->m_io_stat.m_misc;
4611 break;
4612 default:
4613 assert(false);
4614 byte_stat= NULL;
4615 break;
4616 }
4617
4618 /* Aggregation for EVENTS_WAITS_SUMMARY_BY_INSTANCE */
4619 if (flags & STATE_FLAG_TIMED)
4620 {
4621 timer_end= state->m_timer();
4622 wait_time= timer_end - state->m_timer_start;
4623 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
4624 byte_stat->aggregate(wait_time, bytes);
4625 }
4626 else
4627 {
4628 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
4629 byte_stat->aggregate_counted(bytes);
4630 }
4631
4632 if (flags & STATE_FLAG_THREAD)
4633 {
4634 assert(thread != NULL);
4635
4636 PFS_single_stat *event_name_array;
4637 event_name_array= thread->write_instr_class_waits_stats();
4638 uint index= klass->m_event_name_index;
4639
4640 if (flags & STATE_FLAG_TIMED)
4641 {
4642 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4643 event_name_array[index].aggregate_value(wait_time);
4644 }
4645 else
4646 {
4647 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4648 event_name_array[index].aggregate_counted();
4649 }
4650
4651 if (state->m_flags & STATE_FLAG_EVENT)
4652 {
4653 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
4654 assert(wait != NULL);
4655
4656 wait->m_timer_end= timer_end;
4657 wait->m_number_of_bytes= bytes;
4658 wait->m_end_event_id= thread->m_event_id;
4659 wait->m_object_instance_addr= file;
4660 wait->m_weak_file= file;
4661 wait->m_weak_version= (file ? file->get_version() : 0);
4662
4663 if (thread->m_flag_events_waits_history)
4664 insert_events_waits_history(thread, wait);
4665 if (thread->m_flag_events_waits_history_long)
4666 insert_events_waits_history_long(wait);
4667 thread->m_events_waits_current--;
4668
4669 assert(wait == thread->m_events_waits_current);
4670 }
4671 }
4672 }
4673
4674 /**
4675 Implementation of the file instrumentation interface.
4676 @sa PSI_v1::start_file_close_wait.
4677 */
pfs_start_file_close_wait_v1(PSI_file_locker * locker,const char * src_file,uint src_line)4678 void pfs_start_file_close_wait_v1(PSI_file_locker *locker,
4679 const char *src_file,
4680 uint src_line)
4681 {
4682 PFS_thread *thread;
4683 const char *name;
4684 uint len;
4685 PFS_file *pfs_file;
4686 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4687 assert(state != NULL);
4688
4689 switch (state->m_operation)
4690 {
4691 case PSI_FILE_DELETE:
4692 thread= reinterpret_cast<PFS_thread*> (state->m_thread);
4693 name= state->m_name;
4694 len= (uint)strlen(name);
4695 pfs_file= find_or_create_file(thread, NULL, name, len, false);
4696 state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
4697 break;
4698 case PSI_FILE_STREAM_CLOSE:
4699 case PSI_FILE_CLOSE:
4700 break;
4701 default:
4702 assert(false);
4703 break;
4704 }
4705
4706 pfs_start_file_wait_v1(locker, 0, src_file, src_line);
4707
4708 return;
4709 }
4710
4711 /**
4712 Implementation of the file instrumentation interface.
4713 @sa PSI_v1::end_file_close_wait.
4714 */
pfs_end_file_close_wait_v1(PSI_file_locker * locker,int rc)4715 void pfs_end_file_close_wait_v1(PSI_file_locker *locker, int rc)
4716 {
4717 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4718 assert(state != NULL);
4719
4720 pfs_end_file_wait_v1(locker, 0);
4721
4722 if (rc == 0)
4723 {
4724 PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
4725 PFS_file *file= reinterpret_cast<PFS_file*> (state->m_file);
4726
4727 /* Release or destroy the file if necessary */
4728 switch(state->m_operation)
4729 {
4730 case PSI_FILE_CLOSE:
4731 if (file != NULL)
4732 {
4733 if (file->m_temporary)
4734 {
4735 assert(file->m_file_stat.m_open_count <= 1);
4736 destroy_file(thread, file);
4737 }
4738 else
4739 release_file(file);
4740 }
4741 break;
4742 case PSI_FILE_STREAM_CLOSE:
4743 if (file != NULL)
4744 release_file(file);
4745 break;
4746 case PSI_FILE_DELETE:
4747 if (file != NULL)
4748 destroy_file(thread, file);
4749 break;
4750 default:
4751 assert(false);
4752 break;
4753 }
4754 }
4755 return;
4756 }
4757
4758 /**
4759 Implementation of the file instrumentation interface.
4760 @sa PSI_v1::end_file_rename_wait.
4761 */
pfs_end_file_rename_wait_v1(PSI_file_locker * locker,const char * old_name,const char * new_name,int rc)4762 void pfs_end_file_rename_wait_v1(PSI_file_locker *locker, const char *old_name,
4763 const char *new_name, int rc)
4764 {
4765 PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
4766 assert(state != NULL);
4767 assert(state->m_operation == PSI_FILE_RENAME);
4768
4769 if (rc == 0)
4770 {
4771 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
4772
4773 uint old_len= (uint)strlen(old_name);
4774 uint new_len= (uint)strlen(new_name);
4775
4776 find_and_rename_file(thread, old_name, old_len, new_name, new_len);
4777 }
4778
4779 pfs_end_file_wait_v1(locker, 0);
4780 return;
4781 }
4782
4783 PSI_stage_progress*
pfs_start_stage_v1(PSI_stage_key key,const char * src_file,int src_line)4784 pfs_start_stage_v1(PSI_stage_key key, const char *src_file, int src_line)
4785 {
4786 ulonglong timer_value= 0;
4787
4788 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
4789 if (unlikely(pfs_thread == NULL))
4790 return NULL;
4791
4792 /* Always update column threads.processlist_state. */
4793 pfs_thread->m_stage= key;
4794 /* Default value when the stage is not instrumented for progress */
4795 pfs_thread->m_stage_progress= NULL;
4796
4797 if (! flag_global_instrumentation)
4798 return NULL;
4799
4800 if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
4801 return NULL;
4802
4803 PFS_events_stages *pfs= & pfs_thread->m_stage_current;
4804 PFS_events_waits *child_wait= & pfs_thread->m_events_waits_stack[0];
4805 PFS_events_statements *parent_statement= & pfs_thread->m_statement_stack[0];
4806
4807 PFS_instr_class *old_class= pfs->m_class;
4808 if (old_class != NULL)
4809 {
4810 PFS_stage_stat *event_name_array;
4811 event_name_array= pfs_thread->write_instr_class_stages_stats();
4812 uint index= old_class->m_event_name_index;
4813
4814 /* Finish old event */
4815 if (old_class->m_timed)
4816 {
4817 timer_value= get_timer_raw_value(stage_timer);;
4818 pfs->m_timer_end= timer_value;
4819
4820 /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4821 ulonglong stage_time= timer_value - pfs->m_timer_start;
4822 event_name_array[index].aggregate_value(stage_time);
4823 }
4824 else
4825 {
4826 /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4827 event_name_array[index].aggregate_counted();
4828 }
4829
4830 if (flag_events_stages_current)
4831 {
4832 pfs->m_end_event_id= pfs_thread->m_event_id;
4833 if (pfs_thread->m_flag_events_stages_history)
4834 insert_events_stages_history(pfs_thread, pfs);
4835 if (pfs_thread->m_flag_events_stages_history_long)
4836 insert_events_stages_history_long(pfs);
4837 }
4838
4839 /* This stage event is now complete. */
4840 pfs->m_class= NULL;
4841
4842 /* New waits will now be attached directly to the parent statement. */
4843 child_wait->m_event_id= parent_statement->m_event_id;
4844 child_wait->m_event_type= parent_statement->m_event_type;
4845 /* See below for new stages, that may overwrite this. */
4846 }
4847
4848 /* Start new event */
4849
4850 PFS_stage_class *new_klass= find_stage_class(key);
4851 if (unlikely(new_klass == NULL))
4852 return NULL;
4853
4854 if (! new_klass->m_enabled)
4855 return NULL;
4856
4857 pfs->m_class= new_klass;
4858 if (new_klass->m_timed)
4859 {
4860 /*
4861 Do not call the timer again if we have a
4862 TIMER_END for the previous stage already.
4863 */
4864 if (timer_value == 0)
4865 timer_value= get_timer_raw_value(stage_timer);
4866 pfs->m_timer_start= timer_value;
4867 }
4868 else
4869 pfs->m_timer_start= 0;
4870 pfs->m_timer_end= 0;
4871
4872 if (flag_events_stages_current)
4873 {
4874 pfs->m_thread_internal_id= pfs_thread->m_thread_internal_id;
4875 pfs->m_event_id= pfs_thread->m_event_id++;
4876 pfs->m_end_event_id= 0;
4877 pfs->m_source_file= src_file;
4878 pfs->m_source_line= src_line;
4879
4880 /* New wait events will have this new stage as parent. */
4881 child_wait->m_event_id= pfs->m_event_id;
4882 child_wait->m_event_type= EVENT_TYPE_STAGE;
4883 }
4884
4885 if (new_klass->is_progress())
4886 {
4887 pfs_thread->m_stage_progress= & pfs->m_progress;
4888 pfs->m_progress.m_work_completed= 0;
4889 pfs->m_progress.m_work_estimated= 0;
4890 }
4891
4892 return pfs_thread->m_stage_progress;
4893 }
4894
4895 PSI_stage_progress*
pfs_get_current_stage_progress_v1()4896 pfs_get_current_stage_progress_v1()
4897 {
4898 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
4899 if (unlikely(pfs_thread == NULL))
4900 return NULL;
4901
4902 return pfs_thread->m_stage_progress;
4903 }
4904
pfs_end_stage_v1()4905 void pfs_end_stage_v1()
4906 {
4907 ulonglong timer_value= 0;
4908
4909 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
4910 if (unlikely(pfs_thread == NULL))
4911 return;
4912
4913 pfs_thread->m_stage= 0;
4914 pfs_thread->m_stage_progress= NULL;
4915
4916 if (! flag_global_instrumentation)
4917 return;
4918
4919 if (flag_thread_instrumentation && ! pfs_thread->m_enabled)
4920 return;
4921
4922 PFS_events_stages *pfs= & pfs_thread->m_stage_current;
4923
4924 PFS_instr_class *old_class= pfs->m_class;
4925 if (old_class != NULL)
4926 {
4927 PFS_stage_stat *event_name_array;
4928 event_name_array= pfs_thread->write_instr_class_stages_stats();
4929 uint index= old_class->m_event_name_index;
4930
4931 /* Finish old event */
4932 if (old_class->m_timed)
4933 {
4934 timer_value= get_timer_raw_value(stage_timer);;
4935 pfs->m_timer_end= timer_value;
4936
4937 /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
4938 ulonglong stage_time= timer_value - pfs->m_timer_start;
4939 event_name_array[index].aggregate_value(stage_time);
4940 }
4941 else
4942 {
4943 /* Aggregate to EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
4944 event_name_array[index].aggregate_counted();
4945 }
4946
4947 if (flag_events_stages_current)
4948 {
4949 pfs->m_end_event_id= pfs_thread->m_event_id;
4950 if (pfs_thread->m_flag_events_stages_history)
4951 insert_events_stages_history(pfs_thread, pfs);
4952 if (pfs_thread->m_flag_events_stages_history_long)
4953 insert_events_stages_history_long(pfs);
4954 }
4955
4956 /* New waits will now be attached directly to the parent statement. */
4957 PFS_events_waits *child_wait= & pfs_thread->m_events_waits_stack[0];
4958 PFS_events_statements *parent_statement= & pfs_thread->m_statement_stack[0];
4959 child_wait->m_event_id= parent_statement->m_event_id;
4960 child_wait->m_event_type= parent_statement->m_event_type;
4961
4962 /* This stage is completed */
4963 pfs->m_class= NULL;
4964 }
4965 }
4966
4967 PSI_statement_locker*
pfs_get_thread_statement_locker_v1(PSI_statement_locker_state * state,PSI_statement_key key,const void * charset,PSI_sp_share * sp_share)4968 pfs_get_thread_statement_locker_v1(PSI_statement_locker_state *state,
4969 PSI_statement_key key,
4970 const void *charset, PSI_sp_share *sp_share)
4971 {
4972 assert(state != NULL);
4973 assert(charset != NULL);
4974 if (! flag_global_instrumentation)
4975 return NULL;
4976 PFS_statement_class *klass= find_statement_class(key);
4977 if (unlikely(klass == NULL))
4978 return NULL;
4979 if (! klass->m_enabled)
4980 return NULL;
4981
4982 uint flags;
4983
4984 if (flag_thread_instrumentation)
4985 {
4986 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
4987 if (unlikely(pfs_thread == NULL))
4988 return NULL;
4989 if (! pfs_thread->m_enabled)
4990 return NULL;
4991 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
4992 flags= STATE_FLAG_THREAD;
4993
4994 if (klass->m_timed)
4995 flags|= STATE_FLAG_TIMED;
4996
4997 if (flag_events_statements_current)
4998 {
4999 ulonglong event_id= pfs_thread->m_event_id++;
5000
5001 if (pfs_thread->m_events_statements_count >= statement_stack_max)
5002 {
5003 nested_statement_lost++;
5004 return NULL;
5005 }
5006
5007 pfs_dirty_state dirty_state;
5008 pfs_thread->m_stmt_lock.allocated_to_dirty(& dirty_state);
5009 PFS_events_statements *pfs= & pfs_thread->m_statement_stack[pfs_thread->m_events_statements_count];
5010 pfs->m_thread_internal_id= pfs_thread->m_thread_internal_id;
5011 pfs->m_event_id= event_id;
5012 pfs->m_event_type= EVENT_TYPE_STATEMENT;
5013 pfs->m_end_event_id= 0;
5014 pfs->m_class= klass;
5015 pfs->m_timer_start= 0;
5016 pfs->m_timer_end= 0;
5017 pfs->m_lock_time= 0;
5018 pfs->m_current_schema_name_length= 0;
5019 pfs->m_sqltext_length= 0;
5020 pfs->m_sqltext_truncated= false;
5021 pfs->m_sqltext_cs_number= system_charset_info->number; /* default */
5022
5023 pfs->m_message_text[0]= '\0';
5024 pfs->m_sql_errno= 0;
5025 pfs->m_sqlstate[0]= '\0';
5026 pfs->m_error_count= 0;
5027 pfs->m_warning_count= 0;
5028 pfs->m_rows_affected= 0;
5029
5030 pfs->m_rows_sent= 0;
5031 pfs->m_rows_examined= 0;
5032 pfs->m_created_tmp_disk_tables= 0;
5033 pfs->m_created_tmp_tables= 0;
5034 pfs->m_select_full_join= 0;
5035 pfs->m_select_full_range_join= 0;
5036 pfs->m_select_range= 0;
5037 pfs->m_select_range_check= 0;
5038 pfs->m_select_scan= 0;
5039 pfs->m_sort_merge_passes= 0;
5040 pfs->m_sort_range= 0;
5041 pfs->m_sort_rows= 0;
5042 pfs->m_sort_scan= 0;
5043 pfs->m_no_index_used= 0;
5044 pfs->m_no_good_index_used= 0;
5045 pfs->m_digest_storage.reset();
5046
5047 /* New stages will have this statement as parent */
5048 PFS_events_stages *child_stage= & pfs_thread->m_stage_current;
5049 child_stage->m_nesting_event_id= event_id;
5050 child_stage->m_nesting_event_type= EVENT_TYPE_STATEMENT;
5051
5052 /* New waits will have this statement as parent, if no stage is instrumented */
5053 PFS_events_waits *child_wait= & pfs_thread->m_events_waits_stack[0];
5054 child_wait->m_event_id= event_id;
5055 child_wait->m_event_type= EVENT_TYPE_STATEMENT;
5056
5057 PFS_events_statements *parent_statement= NULL;
5058 PFS_events_transactions *parent_transaction= &pfs_thread->m_transaction_current;
5059 ulonglong parent_event= 0;
5060 enum_event_type parent_type= EVENT_TYPE_STATEMENT;
5061 uint parent_level= 0;
5062
5063 if (pfs_thread->m_events_statements_count > 0)
5064 {
5065 parent_statement= pfs - 1;
5066 parent_event= parent_statement->m_event_id;
5067 parent_type= parent_statement->m_event_type;
5068 parent_level= parent_statement->m_nesting_event_level + 1;
5069 }
5070
5071 if (parent_transaction->m_state == TRANS_STATE_ACTIVE &&
5072 parent_transaction->m_event_id > parent_event)
5073 {
5074 parent_event= parent_transaction->m_event_id;
5075 parent_type= parent_transaction->m_event_type;
5076 }
5077
5078 pfs->m_nesting_event_id= parent_event;
5079 pfs->m_nesting_event_type= parent_type;
5080 pfs->m_nesting_event_level= parent_level;
5081
5082 /* Set parent Stored Procedure information for this statement. */
5083 if(sp_share)
5084 {
5085 PFS_program *parent_sp= reinterpret_cast<PFS_program*>(sp_share);
5086 pfs->m_sp_type= parent_sp->m_type;
5087 memcpy(pfs->m_schema_name, parent_sp->m_schema_name,
5088 parent_sp->m_schema_name_length);
5089 pfs->m_schema_name_length= parent_sp->m_schema_name_length;
5090 memcpy(pfs->m_object_name, parent_sp->m_object_name,
5091 parent_sp->m_object_name_length);
5092 pfs->m_object_name_length= parent_sp->m_object_name_length;
5093 }
5094 else
5095 {
5096 pfs->m_sp_type= NO_OBJECT_TYPE;
5097 pfs->m_schema_name_length= 0;
5098 pfs->m_object_name_length= 0;
5099 }
5100
5101 state->m_statement= pfs;
5102 flags|= STATE_FLAG_EVENT;
5103
5104 pfs_thread->m_events_statements_count++;
5105 pfs_thread->m_stmt_lock.dirty_to_allocated(& dirty_state);
5106 }
5107 else
5108 {
5109 state->m_statement= NULL;
5110 }
5111 }
5112 else
5113 {
5114 state->m_statement= NULL;
5115
5116 if (klass->m_timed)
5117 flags= STATE_FLAG_TIMED;
5118 else
5119 flags= 0;
5120 }
5121
5122 if (flag_statements_digest)
5123 {
5124 flags|= STATE_FLAG_DIGEST;
5125 }
5126
5127 state->m_discarded= false;
5128 state->m_class= klass;
5129 state->m_flags= flags;
5130
5131 state->m_lock_time= 0;
5132 state->m_rows_sent= 0;
5133 state->m_rows_examined= 0;
5134 state->m_created_tmp_disk_tables= 0;
5135 state->m_created_tmp_tables= 0;
5136 state->m_select_full_join= 0;
5137 state->m_select_full_range_join= 0;
5138 state->m_select_range= 0;
5139 state->m_select_range_check= 0;
5140 state->m_select_scan= 0;
5141 state->m_sort_merge_passes= 0;
5142 state->m_sort_range= 0;
5143 state->m_sort_rows= 0;
5144 state->m_sort_scan= 0;
5145 state->m_no_index_used= 0;
5146 state->m_no_good_index_used= 0;
5147
5148 state->m_digest= NULL;
5149 state->m_cs_number= ((CHARSET_INFO *)charset)->number;
5150
5151 state->m_schema_name_length= 0;
5152 state->m_parent_sp_share= sp_share;
5153 state->m_parent_prepared_stmt= NULL;
5154
5155 return reinterpret_cast<PSI_statement_locker*> (state);
5156 }
5157
5158 PSI_statement_locker*
pfs_refine_statement_v1(PSI_statement_locker * locker,PSI_statement_key key)5159 pfs_refine_statement_v1(PSI_statement_locker *locker,
5160 PSI_statement_key key)
5161 {
5162 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
5163 if (state == NULL)
5164 return NULL;
5165 assert(state->m_class != NULL);
5166 PFS_statement_class *klass;
5167 /* Only refine statements for mutable instrumentation */
5168 klass= reinterpret_cast<PFS_statement_class*> (state->m_class);
5169 assert(klass->is_mutable());
5170 klass= find_statement_class(key);
5171
5172 uint flags= state->m_flags;
5173
5174 if (unlikely(klass == NULL) || !klass->m_enabled)
5175 {
5176 /* pop statement stack */
5177 if (flags & STATE_FLAG_THREAD)
5178 {
5179 PFS_thread *pfs_thread= reinterpret_cast<PFS_thread *> (state->m_thread);
5180 assert(pfs_thread != NULL);
5181 if (pfs_thread->m_events_statements_count > 0)
5182 pfs_thread->m_events_statements_count--;
5183 }
5184
5185 state->m_discarded= true;
5186 return NULL;
5187 }
5188
5189 if ((flags & STATE_FLAG_TIMED) && ! klass->m_timed)
5190 flags= flags & ~STATE_FLAG_TIMED;
5191
5192 if (flags & STATE_FLAG_EVENT)
5193 {
5194 PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
5195 assert(pfs != NULL);
5196
5197 /* mutate EVENTS_STATEMENTS_CURRENT.EVENT_NAME */
5198 pfs->m_class= klass;
5199 }
5200
5201 state->m_class= klass;
5202 state->m_flags= flags;
5203 return reinterpret_cast<PSI_statement_locker*> (state);
5204 }
5205
pfs_start_statement_v1(PSI_statement_locker * locker,const char * db,uint db_len,const char * src_file,uint src_line)5206 void pfs_start_statement_v1(PSI_statement_locker *locker,
5207 const char *db, uint db_len,
5208 const char *src_file, uint src_line)
5209 {
5210 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
5211 assert(state != NULL);
5212
5213 uint flags= state->m_flags;
5214 ulonglong timer_start= 0;
5215
5216 if (flags & STATE_FLAG_TIMED)
5217 {
5218 timer_start= get_timer_raw_value_and_function(statement_timer, & state->m_timer);
5219 state->m_timer_start= timer_start;
5220 }
5221
5222 compile_time_assert(PSI_SCHEMA_NAME_LEN == NAME_LEN);
5223 assert(db_len <= sizeof(state->m_schema_name));
5224
5225 if (db_len > 0)
5226 memcpy(state->m_schema_name, db, db_len);
5227 state->m_schema_name_length= db_len;
5228
5229 if (flags & STATE_FLAG_EVENT)
5230 {
5231 PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
5232 assert(pfs != NULL);
5233
5234 pfs->m_timer_start= timer_start;
5235 pfs->m_source_file= src_file;
5236 pfs->m_source_line= src_line;
5237
5238 assert(db_len <= sizeof(pfs->m_current_schema_name));
5239 if (db_len > 0)
5240 memcpy(pfs->m_current_schema_name, db, db_len);
5241 pfs->m_current_schema_name_length= db_len;
5242 }
5243 }
5244
pfs_set_statement_text_v1(PSI_statement_locker * locker,const char * text,uint text_len)5245 void pfs_set_statement_text_v1(PSI_statement_locker *locker,
5246 const char *text, uint text_len)
5247 {
5248 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
5249 assert(state != NULL);
5250
5251 if (state->m_discarded)
5252 return;
5253
5254 if (state->m_flags & STATE_FLAG_EVENT)
5255 {
5256 PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
5257 assert(pfs != NULL);
5258 if (text_len > pfs_max_sqltext)
5259 {
5260 text_len= (uint)pfs_max_sqltext;
5261 pfs->m_sqltext_truncated= true;
5262 }
5263 if (text_len)
5264 memcpy(pfs->m_sqltext, text, text_len);
5265 pfs->m_sqltext_length= text_len;
5266 pfs->m_sqltext_cs_number= state->m_cs_number;
5267 }
5268
5269 return;
5270 }
5271
5272 #define SET_STATEMENT_ATTR_BODY(LOCKER, ATTR, VALUE) \
5273 PSI_statement_locker_state *state; \
5274 state= reinterpret_cast<PSI_statement_locker_state*> (LOCKER); \
5275 if (unlikely(state == NULL)) \
5276 return; \
5277 if (state->m_discarded) \
5278 return; \
5279 state->ATTR= VALUE; \
5280 if (state->m_flags & STATE_FLAG_EVENT) \
5281 { \
5282 PFS_events_statements *pfs; \
5283 pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement); \
5284 assert(pfs != NULL); \
5285 pfs->ATTR= VALUE; \
5286 } \
5287 return;
5288
5289 #define INC_STATEMENT_ATTR_BODY(LOCKER, ATTR, VALUE) \
5290 PSI_statement_locker_state *state; \
5291 state= reinterpret_cast<PSI_statement_locker_state*> (LOCKER); \
5292 if (unlikely(state == NULL)) \
5293 return; \
5294 if (state->m_discarded) \
5295 return; \
5296 state->ATTR+= VALUE; \
5297 if (state->m_flags & STATE_FLAG_EVENT) \
5298 { \
5299 PFS_events_statements *pfs; \
5300 pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement); \
5301 assert(pfs != NULL); \
5302 pfs->ATTR+= VALUE; \
5303 } \
5304 return;
5305
pfs_set_statement_lock_time_v1(PSI_statement_locker * locker,ulonglong count)5306 void pfs_set_statement_lock_time_v1(PSI_statement_locker *locker,
5307 ulonglong count)
5308 {
5309 SET_STATEMENT_ATTR_BODY(locker, m_lock_time, count);
5310 }
5311
pfs_set_statement_rows_sent_v1(PSI_statement_locker * locker,ulonglong count)5312 void pfs_set_statement_rows_sent_v1(PSI_statement_locker *locker,
5313 ulonglong count)
5314 {
5315 SET_STATEMENT_ATTR_BODY(locker, m_rows_sent, count);
5316 }
5317
pfs_set_statement_rows_examined_v1(PSI_statement_locker * locker,ulonglong count)5318 void pfs_set_statement_rows_examined_v1(PSI_statement_locker *locker,
5319 ulonglong count)
5320 {
5321 SET_STATEMENT_ATTR_BODY(locker, m_rows_examined, count);
5322 }
5323
pfs_inc_statement_created_tmp_disk_tables_v1(PSI_statement_locker * locker,ulong count)5324 void pfs_inc_statement_created_tmp_disk_tables_v1(PSI_statement_locker *locker,
5325 ulong count)
5326 {
5327 INC_STATEMENT_ATTR_BODY(locker, m_created_tmp_disk_tables, count);
5328 }
5329
pfs_inc_statement_created_tmp_tables_v1(PSI_statement_locker * locker,ulong count)5330 void pfs_inc_statement_created_tmp_tables_v1(PSI_statement_locker *locker,
5331 ulong count)
5332 {
5333 INC_STATEMENT_ATTR_BODY(locker, m_created_tmp_tables, count);
5334 }
5335
pfs_inc_statement_select_full_join_v1(PSI_statement_locker * locker,ulong count)5336 void pfs_inc_statement_select_full_join_v1(PSI_statement_locker *locker,
5337 ulong count)
5338 {
5339 INC_STATEMENT_ATTR_BODY(locker, m_select_full_join, count);
5340 }
5341
pfs_inc_statement_select_full_range_join_v1(PSI_statement_locker * locker,ulong count)5342 void pfs_inc_statement_select_full_range_join_v1(PSI_statement_locker *locker,
5343 ulong count)
5344 {
5345 INC_STATEMENT_ATTR_BODY(locker, m_select_full_range_join, count);
5346 }
5347
pfs_inc_statement_select_range_v1(PSI_statement_locker * locker,ulong count)5348 void pfs_inc_statement_select_range_v1(PSI_statement_locker *locker,
5349 ulong count)
5350 {
5351 INC_STATEMENT_ATTR_BODY(locker, m_select_range, count);
5352 }
5353
pfs_inc_statement_select_range_check_v1(PSI_statement_locker * locker,ulong count)5354 void pfs_inc_statement_select_range_check_v1(PSI_statement_locker *locker,
5355 ulong count)
5356 {
5357 INC_STATEMENT_ATTR_BODY(locker, m_select_range_check, count);
5358 }
5359
pfs_inc_statement_select_scan_v1(PSI_statement_locker * locker,ulong count)5360 void pfs_inc_statement_select_scan_v1(PSI_statement_locker *locker,
5361 ulong count)
5362 {
5363 INC_STATEMENT_ATTR_BODY(locker, m_select_scan, count);
5364 }
5365
pfs_inc_statement_sort_merge_passes_v1(PSI_statement_locker * locker,ulong count)5366 void pfs_inc_statement_sort_merge_passes_v1(PSI_statement_locker *locker,
5367 ulong count)
5368 {
5369 INC_STATEMENT_ATTR_BODY(locker, m_sort_merge_passes, count);
5370 }
5371
pfs_inc_statement_sort_range_v1(PSI_statement_locker * locker,ulong count)5372 void pfs_inc_statement_sort_range_v1(PSI_statement_locker *locker,
5373 ulong count)
5374 {
5375 INC_STATEMENT_ATTR_BODY(locker, m_sort_range, count);
5376 }
5377
pfs_inc_statement_sort_rows_v1(PSI_statement_locker * locker,ulong count)5378 void pfs_inc_statement_sort_rows_v1(PSI_statement_locker *locker,
5379 ulong count)
5380 {
5381 INC_STATEMENT_ATTR_BODY(locker, m_sort_rows, count);
5382 }
5383
pfs_inc_statement_sort_scan_v1(PSI_statement_locker * locker,ulong count)5384 void pfs_inc_statement_sort_scan_v1(PSI_statement_locker *locker,
5385 ulong count)
5386 {
5387 INC_STATEMENT_ATTR_BODY(locker, m_sort_scan, count);
5388 }
5389
pfs_set_statement_no_index_used_v1(PSI_statement_locker * locker)5390 void pfs_set_statement_no_index_used_v1(PSI_statement_locker *locker)
5391 {
5392 SET_STATEMENT_ATTR_BODY(locker, m_no_index_used, 1);
5393 }
5394
pfs_set_statement_no_good_index_used_v1(PSI_statement_locker * locker)5395 void pfs_set_statement_no_good_index_used_v1(PSI_statement_locker *locker)
5396 {
5397 SET_STATEMENT_ATTR_BODY(locker, m_no_good_index_used, 1);
5398 }
5399
pfs_end_statement_v1(PSI_statement_locker * locker,void * stmt_da)5400 void pfs_end_statement_v1(PSI_statement_locker *locker, void *stmt_da)
5401 {
5402 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
5403 Diagnostics_area *da= reinterpret_cast<Diagnostics_area*> (stmt_da);
5404 assert(state != NULL);
5405 assert(da != NULL);
5406
5407 if (state->m_discarded)
5408 return;
5409
5410 PFS_statement_class *klass= reinterpret_cast<PFS_statement_class *> (state->m_class);
5411 assert(klass != NULL);
5412
5413 ulonglong timer_end= 0;
5414 ulonglong wait_time= 0;
5415 uint flags= state->m_flags;
5416
5417 if (flags & STATE_FLAG_TIMED)
5418 {
5419 timer_end= state->m_timer();
5420 wait_time= timer_end - state->m_timer_start;
5421 }
5422
5423 PFS_statement_stat *event_name_array;
5424 uint index= klass->m_event_name_index;
5425 PFS_statement_stat *stat;
5426
5427 /*
5428 Capture statement stats by digest.
5429 */
5430 const sql_digest_storage *digest_storage= NULL;
5431 PFS_statement_stat *digest_stat= NULL;
5432 PFS_program *pfs_program= NULL;
5433 PFS_prepared_stmt *pfs_prepared_stmt= NULL;
5434
5435 if (flags & STATE_FLAG_THREAD)
5436 {
5437 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
5438 assert(thread != NULL);
5439 event_name_array= thread->write_instr_class_statements_stats();
5440 /* Aggregate to EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME */
5441 stat= & event_name_array[index];
5442
5443 if (flags & STATE_FLAG_DIGEST)
5444 {
5445 digest_storage= state->m_digest;
5446
5447 if (digest_storage != NULL)
5448 {
5449 /* Populate PFS_statements_digest_stat with computed digest information.*/
5450 digest_stat= find_or_create_digest(thread, digest_storage,
5451 state->m_schema_name,
5452 state->m_schema_name_length);
5453 }
5454 }
5455
5456 if (flags & STATE_FLAG_EVENT)
5457 {
5458 PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
5459 assert(pfs != NULL);
5460
5461 pfs_dirty_state dirty_state;
5462 thread->m_stmt_lock.allocated_to_dirty(& dirty_state);
5463
5464 switch(da->status())
5465 {
5466 case Diagnostics_area::DA_EMPTY:
5467 break;
5468 case Diagnostics_area::DA_OK:
5469 memcpy(pfs->m_message_text, da->message_text(),
5470 MYSQL_ERRMSG_SIZE);
5471 pfs->m_message_text[MYSQL_ERRMSG_SIZE]= 0;
5472 pfs->m_rows_affected= da->affected_rows();
5473 pfs->m_warning_count= da->last_statement_cond_count();
5474 memcpy(pfs->m_sqlstate, "00000", SQLSTATE_LENGTH);
5475 break;
5476 case Diagnostics_area::DA_EOF:
5477 pfs->m_warning_count= da->last_statement_cond_count();
5478 break;
5479 case Diagnostics_area::DA_ERROR:
5480 memcpy(pfs->m_message_text, da->message_text(),
5481 MYSQL_ERRMSG_SIZE);
5482 pfs->m_message_text[MYSQL_ERRMSG_SIZE]= 0;
5483 pfs->m_sql_errno= da->mysql_errno();
5484 memcpy(pfs->m_sqlstate, da->returned_sqlstate(), SQLSTATE_LENGTH);
5485 pfs->m_error_count++;
5486 break;
5487 case Diagnostics_area::DA_DISABLED:
5488 break;
5489 }
5490
5491 pfs->m_timer_end= timer_end;
5492 pfs->m_end_event_id= thread->m_event_id;
5493
5494 if (digest_storage != NULL)
5495 {
5496 /*
5497 The following columns in events_statement_current:
5498 - DIGEST,
5499 - DIGEST_TEXT
5500 are computed from the digest storage.
5501 */
5502 pfs->m_digest_storage.copy(digest_storage);
5503 }
5504
5505 pfs_program= reinterpret_cast<PFS_program*>(state->m_parent_sp_share);
5506 pfs_prepared_stmt= reinterpret_cast<PFS_prepared_stmt*>(state->m_parent_prepared_stmt);
5507
5508 if (thread->m_flag_events_statements_history)
5509 insert_events_statements_history(thread, pfs);
5510 if (thread->m_flag_events_statements_history_long)
5511 insert_events_statements_history_long(pfs);
5512
5513 assert(thread->m_events_statements_count > 0);
5514 thread->m_events_statements_count--;
5515 thread->m_stmt_lock.dirty_to_allocated(& dirty_state);
5516 }
5517 }
5518 else
5519 {
5520 if (flags & STATE_FLAG_DIGEST)
5521 {
5522 PFS_thread *thread= my_thread_get_THR_PFS();
5523
5524 /* An instrumented thread is required, for LF_PINS. */
5525 if (thread != NULL)
5526 {
5527 /* Set digest stat. */
5528 digest_storage= state->m_digest;
5529
5530 if (digest_storage != NULL)
5531 {
5532 /* Populate statements_digest_stat with computed digest information. */
5533 digest_stat= find_or_create_digest(thread, digest_storage,
5534 state->m_schema_name,
5535 state->m_schema_name_length);
5536 }
5537 }
5538 }
5539
5540 event_name_array= global_instr_class_statements_array;
5541 /* Aggregate to EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME */
5542 stat= & event_name_array[index];
5543 }
5544
5545 stat->mark_used();
5546
5547 if (flags & STATE_FLAG_TIMED)
5548 {
5549 /* Aggregate to EVENTS_STATEMENTS_SUMMARY_..._BY_EVENT_NAME (timed) */
5550 stat->aggregate_value(wait_time);
5551 }
5552 else
5553 {
5554 /* Aggregate to EVENTS_STATEMENTS_SUMMARY_..._BY_EVENT_NAME (counted) */
5555 stat->aggregate_counted();
5556 }
5557
5558 stat->m_lock_time+= state->m_lock_time;
5559 stat->m_rows_sent+= state->m_rows_sent;
5560 stat->m_rows_examined+= state->m_rows_examined;
5561 stat->m_created_tmp_disk_tables+= state->m_created_tmp_disk_tables;
5562 stat->m_created_tmp_tables+= state->m_created_tmp_tables;
5563 stat->m_select_full_join+= state->m_select_full_join;
5564 stat->m_select_full_range_join+= state->m_select_full_range_join;
5565 stat->m_select_range+= state->m_select_range;
5566 stat->m_select_range_check+= state->m_select_range_check;
5567 stat->m_select_scan+= state->m_select_scan;
5568 stat->m_sort_merge_passes+= state->m_sort_merge_passes;
5569 stat->m_sort_range+= state->m_sort_range;
5570 stat->m_sort_rows+= state->m_sort_rows;
5571 stat->m_sort_scan+= state->m_sort_scan;
5572 stat->m_no_index_used+= state->m_no_index_used;
5573 stat->m_no_good_index_used+= state->m_no_good_index_used;
5574
5575 if (digest_stat != NULL)
5576 {
5577 digest_stat->mark_used();
5578
5579 if (flags & STATE_FLAG_TIMED)
5580 {
5581 digest_stat->aggregate_value(wait_time);
5582 }
5583 else
5584 {
5585 digest_stat->aggregate_counted();
5586 }
5587
5588 digest_stat->m_lock_time+= state->m_lock_time;
5589 digest_stat->m_rows_sent+= state->m_rows_sent;
5590 digest_stat->m_rows_examined+= state->m_rows_examined;
5591 digest_stat->m_created_tmp_disk_tables+= state->m_created_tmp_disk_tables;
5592 digest_stat->m_created_tmp_tables+= state->m_created_tmp_tables;
5593 digest_stat->m_select_full_join+= state->m_select_full_join;
5594 digest_stat->m_select_full_range_join+= state->m_select_full_range_join;
5595 digest_stat->m_select_range+= state->m_select_range;
5596 digest_stat->m_select_range_check+= state->m_select_range_check;
5597 digest_stat->m_select_scan+= state->m_select_scan;
5598 digest_stat->m_sort_merge_passes+= state->m_sort_merge_passes;
5599 digest_stat->m_sort_range+= state->m_sort_range;
5600 digest_stat->m_sort_rows+= state->m_sort_rows;
5601 digest_stat->m_sort_scan+= state->m_sort_scan;
5602 digest_stat->m_no_index_used+= state->m_no_index_used;
5603 digest_stat->m_no_good_index_used+= state->m_no_good_index_used;
5604 }
5605
5606 if(pfs_program != NULL)
5607 {
5608 PFS_statement_stat *sub_stmt_stat= NULL;
5609 sub_stmt_stat= &pfs_program->m_stmt_stat;
5610 if(sub_stmt_stat != NULL)
5611 {
5612 sub_stmt_stat->mark_used();
5613
5614 if (flags & STATE_FLAG_TIMED)
5615 {
5616 sub_stmt_stat->aggregate_value(wait_time);
5617 }
5618 else
5619 {
5620 sub_stmt_stat->aggregate_counted();
5621 }
5622
5623 sub_stmt_stat->m_lock_time+= state->m_lock_time;
5624 sub_stmt_stat->m_rows_sent+= state->m_rows_sent;
5625 sub_stmt_stat->m_rows_examined+= state->m_rows_examined;
5626 sub_stmt_stat->m_created_tmp_disk_tables+= state->m_created_tmp_disk_tables;
5627 sub_stmt_stat->m_created_tmp_tables+= state->m_created_tmp_tables;
5628 sub_stmt_stat->m_select_full_join+= state->m_select_full_join;
5629 sub_stmt_stat->m_select_full_range_join+= state->m_select_full_range_join;
5630 sub_stmt_stat->m_select_range+= state->m_select_range;
5631 sub_stmt_stat->m_select_range_check+= state->m_select_range_check;
5632 sub_stmt_stat->m_select_scan+= state->m_select_scan;
5633 sub_stmt_stat->m_sort_merge_passes+= state->m_sort_merge_passes;
5634 sub_stmt_stat->m_sort_range+= state->m_sort_range;
5635 sub_stmt_stat->m_sort_rows+= state->m_sort_rows;
5636 sub_stmt_stat->m_sort_scan+= state->m_sort_scan;
5637 sub_stmt_stat->m_no_index_used+= state->m_no_index_used;
5638 sub_stmt_stat->m_no_good_index_used+= state->m_no_good_index_used;
5639 }
5640 }
5641
5642 if (pfs_prepared_stmt != NULL)
5643 {
5644 if(state->m_in_prepare)
5645 {
5646 PFS_single_stat *prepared_stmt_stat= NULL;
5647 prepared_stmt_stat= &pfs_prepared_stmt->m_prepare_stat;
5648 if(prepared_stmt_stat != NULL)
5649 {
5650 if (flags & STATE_FLAG_TIMED)
5651 {
5652 prepared_stmt_stat->aggregate_value(wait_time);
5653 }
5654 else
5655 {
5656 prepared_stmt_stat->aggregate_counted();
5657 }
5658 }
5659 }
5660 else
5661 {
5662 PFS_statement_stat *prepared_stmt_stat= NULL;
5663 prepared_stmt_stat= &pfs_prepared_stmt->m_execute_stat;
5664 if(prepared_stmt_stat != NULL)
5665 {
5666 if (flags & STATE_FLAG_TIMED)
5667 {
5668 prepared_stmt_stat->aggregate_value(wait_time);
5669 }
5670 else
5671 {
5672 prepared_stmt_stat->aggregate_counted();
5673 }
5674
5675 prepared_stmt_stat->m_lock_time+= state->m_lock_time;
5676 prepared_stmt_stat->m_rows_sent+= state->m_rows_sent;
5677 prepared_stmt_stat->m_rows_examined+= state->m_rows_examined;
5678 prepared_stmt_stat->m_created_tmp_disk_tables+= state->m_created_tmp_disk_tables;
5679 prepared_stmt_stat->m_created_tmp_tables+= state->m_created_tmp_tables;
5680 prepared_stmt_stat->m_select_full_join+= state->m_select_full_join;
5681 prepared_stmt_stat->m_select_full_range_join+= state->m_select_full_range_join;
5682 prepared_stmt_stat->m_select_range+= state->m_select_range;
5683 prepared_stmt_stat->m_select_range_check+= state->m_select_range_check;
5684 prepared_stmt_stat->m_select_scan+= state->m_select_scan;
5685 prepared_stmt_stat->m_sort_merge_passes+= state->m_sort_merge_passes;
5686 prepared_stmt_stat->m_sort_range+= state->m_sort_range;
5687 prepared_stmt_stat->m_sort_rows+= state->m_sort_rows;
5688 prepared_stmt_stat->m_sort_scan+= state->m_sort_scan;
5689 prepared_stmt_stat->m_no_index_used+= state->m_no_index_used;
5690 prepared_stmt_stat->m_no_good_index_used+= state->m_no_good_index_used;
5691 }
5692 }
5693 }
5694
5695 PFS_statement_stat *sub_stmt_stat= NULL;
5696 if (pfs_program != NULL)
5697 sub_stmt_stat= &pfs_program->m_stmt_stat;
5698
5699 PFS_statement_stat *prepared_stmt_stat= NULL;
5700 if (pfs_prepared_stmt != NULL && !state->m_in_prepare)
5701 prepared_stmt_stat= &pfs_prepared_stmt->m_execute_stat;
5702
5703 switch (da->status())
5704 {
5705 case Diagnostics_area::DA_EMPTY:
5706 break;
5707 case Diagnostics_area::DA_OK:
5708 stat->m_rows_affected+= da->affected_rows();
5709 stat->m_warning_count+= da->last_statement_cond_count();
5710 if (digest_stat != NULL)
5711 {
5712 digest_stat->m_rows_affected+= da->affected_rows();
5713 digest_stat->m_warning_count+= da->last_statement_cond_count();
5714 }
5715 if(sub_stmt_stat != NULL)
5716 {
5717 sub_stmt_stat->m_rows_affected+= da->affected_rows();
5718 sub_stmt_stat->m_warning_count+= da->last_statement_cond_count();
5719 }
5720 if (prepared_stmt_stat != NULL)
5721 {
5722 prepared_stmt_stat->m_rows_affected+= da->affected_rows();
5723 prepared_stmt_stat->m_warning_count+= da->last_statement_cond_count();
5724 }
5725 break;
5726 case Diagnostics_area::DA_EOF:
5727 stat->m_warning_count+= da->last_statement_cond_count();
5728 if (digest_stat != NULL)
5729 {
5730 digest_stat->m_warning_count+= da->last_statement_cond_count();
5731 }
5732 if(sub_stmt_stat != NULL)
5733 {
5734 sub_stmt_stat->m_warning_count+= da->last_statement_cond_count();
5735 }
5736 if (prepared_stmt_stat != NULL)
5737 {
5738 prepared_stmt_stat->m_warning_count+= da->last_statement_cond_count();
5739 }
5740 break;
5741 case Diagnostics_area::DA_ERROR:
5742 stat->m_error_count++;
5743 if (digest_stat != NULL)
5744 {
5745 digest_stat->m_error_count++;
5746 }
5747 if (sub_stmt_stat != NULL)
5748 {
5749 sub_stmt_stat->m_error_count++;
5750 }
5751 if (prepared_stmt_stat != NULL)
5752 {
5753 prepared_stmt_stat->m_error_count++;
5754 }
5755 break;
5756 case Diagnostics_area::DA_DISABLED:
5757 break;
5758 }
5759 }
5760
sp_type_to_object_type(uint sp_type)5761 static inline enum_object_type sp_type_to_object_type(uint sp_type)
5762 {
5763 enum enum_sp_type value= static_cast<enum enum_sp_type> (sp_type);
5764
5765 switch (value)
5766 {
5767 case SP_TYPE_FUNCTION:
5768 return OBJECT_TYPE_FUNCTION;
5769 case SP_TYPE_PROCEDURE:
5770 return OBJECT_TYPE_PROCEDURE;
5771 case SP_TYPE_TRIGGER:
5772 return OBJECT_TYPE_TRIGGER;
5773 case SP_TYPE_EVENT:
5774 return OBJECT_TYPE_EVENT;
5775 default:
5776 assert(false);
5777 /* Dead code */
5778 return NO_OBJECT_TYPE;
5779 }
5780 }
5781
5782 /**
5783 Implementation of the stored program instrumentation interface.
5784 @sa PSI_v1::get_sp_share.
5785 */
pfs_get_sp_share_v1(uint sp_type,const char * schema_name,uint schema_name_length,const char * object_name,uint object_name_length)5786 PSI_sp_share *pfs_get_sp_share_v1(uint sp_type,
5787 const char* schema_name,
5788 uint schema_name_length,
5789 const char* object_name,
5790 uint object_name_length)
5791 {
5792
5793 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
5794 if (unlikely(pfs_thread == NULL))
5795 return NULL;
5796
5797 if (object_name_length > COL_OBJECT_NAME_SIZE)
5798 object_name_length= COL_OBJECT_NAME_SIZE;
5799 if (schema_name_length > COL_OBJECT_SCHEMA_SIZE)
5800 schema_name_length= COL_OBJECT_SCHEMA_SIZE;
5801
5802 PFS_program *pfs_program;
5803 pfs_program= find_or_create_program(pfs_thread,
5804 sp_type_to_object_type(sp_type),
5805 object_name,
5806 object_name_length,
5807 schema_name,
5808 schema_name_length);
5809
5810 return reinterpret_cast<PSI_sp_share *>(pfs_program);
5811 }
5812
pfs_release_sp_share_v1(PSI_sp_share * sp_share)5813 void pfs_release_sp_share_v1(PSI_sp_share* sp_share)
5814 {
5815 /* Unused */
5816 return;
5817 }
5818
pfs_start_sp_v1(PSI_sp_locker_state * state,PSI_sp_share * sp_share)5819 PSI_sp_locker* pfs_start_sp_v1(PSI_sp_locker_state *state,
5820 PSI_sp_share *sp_share)
5821 {
5822 assert(state != NULL);
5823 if (! flag_global_instrumentation)
5824 return NULL;
5825
5826 if (flag_thread_instrumentation)
5827 {
5828 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
5829 if (unlikely(pfs_thread == NULL))
5830 return NULL;
5831 if (! pfs_thread->m_enabled)
5832 return NULL;
5833 }
5834
5835 /*
5836 sp share might be null in case when stat array is full and no new
5837 stored program stats are being inserted into it.
5838 */
5839 PFS_program *pfs_program= reinterpret_cast<PFS_program*>(sp_share);
5840 if (pfs_program == NULL || !pfs_program->m_enabled)
5841 return NULL;
5842
5843 state->m_flags= 0;
5844
5845 if(pfs_program->m_timed)
5846 {
5847 state->m_flags|= STATE_FLAG_TIMED;
5848 state->m_timer_start= get_timer_raw_value_and_function(statement_timer,
5849 & state->m_timer);
5850 }
5851
5852 state->m_sp_share= sp_share;
5853
5854 return reinterpret_cast<PSI_sp_locker*> (state);
5855 }
5856
pfs_end_sp_v1(PSI_sp_locker * locker)5857 void pfs_end_sp_v1(PSI_sp_locker *locker)
5858 {
5859 PSI_sp_locker_state *state= reinterpret_cast<PSI_sp_locker_state*> (locker);
5860 assert(state != NULL);
5861
5862 ulonglong timer_end;
5863 ulonglong wait_time;
5864
5865 PFS_program *pfs_program= reinterpret_cast<PFS_program *>(state->m_sp_share);
5866 PFS_sp_stat *stat= &pfs_program->m_sp_stat;
5867
5868 if (state->m_flags & STATE_FLAG_TIMED)
5869 {
5870 timer_end= state->m_timer();
5871 wait_time= timer_end - state->m_timer_start;
5872
5873 /* Now use this timer_end and wait_time for timing information. */
5874 stat->aggregate_value(wait_time);
5875 }
5876 else
5877 {
5878 stat->aggregate_counted();
5879 }
5880 }
5881
pfs_drop_sp_v1(uint sp_type,const char * schema_name,uint schema_name_length,const char * object_name,uint object_name_length)5882 void pfs_drop_sp_v1(uint sp_type,
5883 const char* schema_name,
5884 uint schema_name_length,
5885 const char* object_name,
5886 uint object_name_length)
5887 {
5888 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
5889 if (unlikely(pfs_thread == NULL))
5890 return;
5891
5892 if (object_name_length > COL_OBJECT_NAME_SIZE)
5893 object_name_length= COL_OBJECT_NAME_SIZE;
5894 if (schema_name_length > COL_OBJECT_SCHEMA_SIZE)
5895 schema_name_length= COL_OBJECT_SCHEMA_SIZE;
5896
5897 drop_program(pfs_thread,
5898 sp_type_to_object_type(sp_type),
5899 object_name, object_name_length,
5900 schema_name, schema_name_length);
5901 }
5902
5903 PSI_transaction_locker*
pfs_get_thread_transaction_locker_v1(PSI_transaction_locker_state * state,const void * xid,const ulonglong * trxid,int isolation_level,my_bool read_only,my_bool autocommit)5904 pfs_get_thread_transaction_locker_v1(PSI_transaction_locker_state *state,
5905 const void *xid,
5906 const ulonglong *trxid,
5907 int isolation_level,
5908 my_bool read_only,
5909 my_bool autocommit)
5910 {
5911 assert(state != NULL);
5912
5913 if (!flag_global_instrumentation)
5914 return NULL;
5915
5916 if (!global_transaction_class.m_enabled)
5917 return NULL;
5918
5919 uint flags;
5920
5921 if (flag_thread_instrumentation)
5922 {
5923 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
5924 if (unlikely(pfs_thread == NULL))
5925 return NULL;
5926 if (!pfs_thread->m_enabled)
5927 return NULL;
5928 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
5929 flags= STATE_FLAG_THREAD;
5930
5931 if (global_transaction_class.m_timed)
5932 flags|= STATE_FLAG_TIMED;
5933
5934 if (flag_events_transactions_current)
5935 {
5936 ulonglong event_id= pfs_thread->m_event_id++;
5937
5938 PFS_events_transactions *pfs= &pfs_thread->m_transaction_current;
5939 pfs->m_thread_internal_id = pfs_thread->m_thread_internal_id;
5940 pfs->m_event_id= event_id;
5941 pfs->m_event_type= EVENT_TYPE_TRANSACTION;
5942 pfs->m_end_event_id= 0;
5943 pfs->m_class= &global_transaction_class;
5944 pfs->m_timer_start= 0;
5945 pfs->m_timer_end= 0;
5946 if (xid != NULL)
5947 pfs->m_xid= *(PSI_xid *)xid;
5948 pfs->m_xa= false;
5949 pfs->m_xa_state= TRANS_STATE_XA_NOTR;
5950 pfs->m_trxid= (trxid == NULL) ? 0 : *trxid;
5951 pfs->m_isolation_level= (enum_isolation_level)isolation_level;
5952 pfs->m_read_only= read_only;
5953 pfs->m_autocommit= autocommit;
5954 pfs->m_savepoint_count= 0;
5955 pfs->m_rollback_to_savepoint_count= 0;
5956 pfs->m_release_savepoint_count= 0;
5957
5958 uint statements_count= pfs_thread->m_events_statements_count;
5959 if (statements_count > 0)
5960 {
5961 PFS_events_statements *pfs_statement=
5962 &pfs_thread->m_statement_stack[statements_count - 1];
5963 pfs->m_nesting_event_id= pfs_statement->m_event_id;
5964 pfs->m_nesting_event_type= pfs_statement->m_event_type;
5965 }
5966 else
5967 {
5968 pfs->m_nesting_event_id= 0;
5969 /* pfs->m_nesting_event_type not used when m_nesting_event_id is 0 */
5970 }
5971
5972 state->m_transaction= pfs;
5973 flags|= STATE_FLAG_EVENT;
5974 }
5975 }
5976 else
5977 {
5978 if (global_transaction_class.m_timed)
5979 flags= STATE_FLAG_TIMED;
5980 else
5981 flags= 0;
5982 }
5983
5984 state->m_class= &global_transaction_class;
5985 state->m_flags= flags;
5986 state->m_autocommit= autocommit;
5987 state->m_read_only= read_only;
5988 state->m_savepoint_count= 0;
5989 state->m_rollback_to_savepoint_count= 0;
5990 state->m_release_savepoint_count= 0;
5991
5992 return reinterpret_cast<PSI_transaction_locker*> (state);
5993 }
5994
pfs_start_transaction_v1(PSI_transaction_locker * locker,const char * src_file,uint src_line)5995 void pfs_start_transaction_v1(PSI_transaction_locker *locker,
5996 const char *src_file, uint src_line)
5997 {
5998 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
5999 assert(state != NULL);
6000
6001 uint flags= state->m_flags;
6002 ulonglong timer_start= 0;
6003
6004 if (flags & STATE_FLAG_TIMED)
6005 {
6006 timer_start= get_timer_raw_value_and_function(transaction_timer, &state->m_timer);
6007 state->m_timer_start= timer_start;
6008 }
6009
6010 if (flags & STATE_FLAG_EVENT)
6011 {
6012 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6013 assert(pfs != NULL);
6014
6015 pfs->m_timer_start= timer_start;
6016 pfs->m_source_file= src_file;
6017 pfs->m_source_line= src_line;
6018 pfs->m_state= TRANS_STATE_ACTIVE;
6019 pfs->m_sid.clear();
6020 pfs->m_gtid_spec.set_automatic();
6021 }
6022 }
6023
pfs_set_transaction_gtid_v1(PSI_transaction_locker * locker,const void * sid,const void * gtid_spec)6024 void pfs_set_transaction_gtid_v1(PSI_transaction_locker *locker,
6025 const void *sid,
6026 const void *gtid_spec)
6027 {
6028 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
6029 assert(state != NULL);
6030 assert(sid != NULL);
6031 assert(gtid_spec != NULL);
6032
6033 if (state->m_flags & STATE_FLAG_EVENT)
6034 {
6035 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6036 assert(pfs != NULL);
6037 pfs->m_sid= *(rpl_sid *)sid;
6038 pfs->m_gtid_spec= *(Gtid_specification *)gtid_spec;
6039 }
6040 }
6041
pfs_set_transaction_xid_v1(PSI_transaction_locker * locker,const void * xid,int xa_state)6042 void pfs_set_transaction_xid_v1(PSI_transaction_locker *locker,
6043 const void *xid,
6044 int xa_state)
6045 {
6046 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
6047 assert(state != NULL);
6048
6049 if (state->m_flags & STATE_FLAG_EVENT)
6050 {
6051 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6052 assert(pfs != NULL);
6053 assert(xid != NULL);
6054
6055 pfs->m_xid= *(PSI_xid *)xid;
6056 pfs->m_xa_state= (enum_xa_transaction_state)xa_state;
6057 pfs->m_xa= true;
6058 }
6059 return;
6060 }
6061
pfs_set_transaction_xa_state_v1(PSI_transaction_locker * locker,int xa_state)6062 void pfs_set_transaction_xa_state_v1(PSI_transaction_locker *locker,
6063 int xa_state)
6064 {
6065 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
6066 assert(state != NULL);
6067
6068 if (state->m_flags & STATE_FLAG_EVENT)
6069 {
6070 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6071 assert(pfs != NULL);
6072
6073 pfs->m_xa_state= (enum_xa_transaction_state)xa_state;
6074 pfs->m_xa= true;
6075 }
6076 return;
6077 }
6078
pfs_set_transaction_trxid_v1(PSI_transaction_locker * locker,const ulonglong * trxid)6079 void pfs_set_transaction_trxid_v1(PSI_transaction_locker *locker,
6080 const ulonglong *trxid)
6081 {
6082 assert(trxid != NULL);
6083
6084 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
6085 assert(state != NULL);
6086
6087 if (state->m_flags & STATE_FLAG_EVENT)
6088 {
6089 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6090 assert(pfs != NULL);
6091
6092 if (pfs->m_trxid == 0)
6093 pfs->m_trxid= *trxid;
6094 }
6095 }
6096
6097 #define INC_TRANSACTION_ATTR_BODY(LOCKER, ATTR, VALUE) \
6098 PSI_transaction_locker_state *state; \
6099 state= reinterpret_cast<PSI_transaction_locker_state*> (LOCKER); \
6100 if (unlikely(state == NULL)) \
6101 return; \
6102 state->ATTR+= VALUE; \
6103 if (state->m_flags & STATE_FLAG_EVENT) \
6104 { \
6105 PFS_events_transactions *pfs; \
6106 pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction); \
6107 assert(pfs != NULL); \
6108 pfs->ATTR+= VALUE; \
6109 } \
6110 return;
6111
6112
pfs_inc_transaction_savepoints_v1(PSI_transaction_locker * locker,ulong count)6113 void pfs_inc_transaction_savepoints_v1(PSI_transaction_locker *locker,
6114 ulong count)
6115 {
6116 INC_TRANSACTION_ATTR_BODY(locker, m_savepoint_count, count);
6117 }
6118
pfs_inc_transaction_rollback_to_savepoint_v1(PSI_transaction_locker * locker,ulong count)6119 void pfs_inc_transaction_rollback_to_savepoint_v1(PSI_transaction_locker *locker,
6120 ulong count)
6121 {
6122 INC_TRANSACTION_ATTR_BODY(locker, m_rollback_to_savepoint_count, count);
6123 }
6124
pfs_inc_transaction_release_savepoint_v1(PSI_transaction_locker * locker,ulong count)6125 void pfs_inc_transaction_release_savepoint_v1(PSI_transaction_locker *locker,
6126 ulong count)
6127 {
6128 INC_TRANSACTION_ATTR_BODY(locker, m_release_savepoint_count, count);
6129 }
6130
pfs_end_transaction_v1(PSI_transaction_locker * locker,my_bool commit)6131 void pfs_end_transaction_v1(PSI_transaction_locker *locker, my_bool commit)
6132 {
6133 PSI_transaction_locker_state *state= reinterpret_cast<PSI_transaction_locker_state*> (locker);
6134 assert(state != NULL);
6135
6136 ulonglong timer_end= 0;
6137 ulonglong wait_time= 0;
6138 uint flags= state->m_flags;
6139
6140 if (flags & STATE_FLAG_TIMED)
6141 {
6142 timer_end= state->m_timer();
6143 wait_time= timer_end - state->m_timer_start;
6144 }
6145
6146 PFS_transaction_stat *stat;
6147
6148 if (flags & STATE_FLAG_THREAD)
6149 {
6150 PFS_thread *pfs_thread= reinterpret_cast<PFS_thread *> (state->m_thread);
6151 assert(pfs_thread != NULL);
6152
6153 /* Aggregate to EVENTS_TRANSACTIONS_SUMMARY_BY_THREAD_BY_EVENT_NAME */
6154 stat= &pfs_thread->write_instr_class_transactions_stats()[GLOBAL_TRANSACTION_INDEX];
6155
6156 if (flags & STATE_FLAG_EVENT)
6157 {
6158 PFS_events_transactions *pfs= reinterpret_cast<PFS_events_transactions*> (state->m_transaction);
6159 assert(pfs != NULL);
6160
6161 /* events_transactions_current may have been cleared while the transaction was active */
6162 if (unlikely(pfs->m_class == NULL))
6163 return;
6164
6165 pfs->m_timer_end= timer_end;
6166 pfs->m_end_event_id= pfs_thread->m_event_id;
6167
6168 pfs->m_state= (commit ? TRANS_STATE_COMMITTED : TRANS_STATE_ROLLED_BACK);
6169
6170 if (pfs->m_xa)
6171 pfs->m_xa_state= (commit ? TRANS_STATE_XA_COMMITTED : TRANS_STATE_XA_ROLLBACK_ONLY);
6172
6173 if (pfs_thread->m_flag_events_transactions_history)
6174 insert_events_transactions_history(pfs_thread, pfs);
6175 if (pfs_thread->m_flag_events_transactions_history_long)
6176 insert_events_transactions_history_long(pfs);
6177 }
6178 }
6179 else
6180 {
6181 /* Aggregate to EVENTS_TRANSACTIONS_SUMMARY_GLOBAL_BY_EVENT_NAME */
6182 stat= &global_transaction_stat;
6183 }
6184
6185 if (flags & STATE_FLAG_TIMED)
6186 {
6187 /* Aggregate to EVENTS_TRANSACTIONS_SUMMARY_..._BY_EVENT_NAME (timed) */
6188 if(state->m_read_only)
6189 stat->m_read_only_stat.aggregate_value(wait_time);
6190 else
6191 stat->m_read_write_stat.aggregate_value(wait_time);
6192 }
6193 else
6194 {
6195 /* Aggregate to EVENTS_TRANSACTIONS_SUMMARY_..._BY_EVENT_NAME (counted) */
6196 if(state->m_read_only)
6197 stat->m_read_only_stat.aggregate_counted();
6198 else
6199 stat->m_read_write_stat.aggregate_counted();
6200 }
6201
6202 stat->m_savepoint_count+= state->m_savepoint_count;
6203 stat->m_rollback_to_savepoint_count+= state->m_rollback_to_savepoint_count;
6204 stat->m_release_savepoint_count+= state->m_release_savepoint_count;
6205 }
6206
6207
6208 /**
6209 Implementation of the socket instrumentation interface.
6210 @sa PSI_v1::end_socket_wait.
6211 */
pfs_end_socket_wait_v1(PSI_socket_locker * locker,size_t byte_count)6212 void pfs_end_socket_wait_v1(PSI_socket_locker *locker, size_t byte_count)
6213 {
6214 PSI_socket_locker_state *state= reinterpret_cast<PSI_socket_locker_state*> (locker);
6215 assert(state != NULL);
6216
6217 PFS_socket *socket= reinterpret_cast<PFS_socket *>(state->m_socket);
6218 assert(socket != NULL);
6219
6220 ulonglong timer_end= 0;
6221 ulonglong wait_time= 0;
6222 PFS_byte_stat *byte_stat;
6223 uint flags= state->m_flags;
6224 size_t bytes= ((int)byte_count > -1 ? byte_count : 0);
6225
6226 switch (state->m_operation)
6227 {
6228 /* Group read operations */
6229 case PSI_SOCKET_RECV:
6230 case PSI_SOCKET_RECVFROM:
6231 case PSI_SOCKET_RECVMSG:
6232 byte_stat= &socket->m_socket_stat.m_io_stat.m_read;
6233 break;
6234 /* Group write operations */
6235 case PSI_SOCKET_SEND:
6236 case PSI_SOCKET_SENDTO:
6237 case PSI_SOCKET_SENDMSG:
6238 byte_stat= &socket->m_socket_stat.m_io_stat.m_write;
6239 break;
6240 /* Group remaining operations as miscellaneous */
6241 case PSI_SOCKET_CONNECT:
6242 case PSI_SOCKET_CREATE:
6243 case PSI_SOCKET_BIND:
6244 case PSI_SOCKET_SEEK:
6245 case PSI_SOCKET_OPT:
6246 case PSI_SOCKET_STAT:
6247 case PSI_SOCKET_SHUTDOWN:
6248 case PSI_SOCKET_SELECT:
6249 case PSI_SOCKET_CLOSE:
6250 byte_stat= &socket->m_socket_stat.m_io_stat.m_misc;
6251 break;
6252 default:
6253 assert(false);
6254 byte_stat= NULL;
6255 break;
6256 }
6257
6258 /* Aggregation for EVENTS_WAITS_SUMMARY_BY_INSTANCE */
6259 if (flags & STATE_FLAG_TIMED)
6260 {
6261 timer_end= state->m_timer();
6262 wait_time= timer_end - state->m_timer_start;
6263
6264 /* Aggregate to the socket instrument for now (timed) */
6265 byte_stat->aggregate(wait_time, bytes);
6266 }
6267 else
6268 {
6269 /* Aggregate to the socket instrument (event count and byte count) */
6270 byte_stat->aggregate_counted(bytes);
6271 }
6272
6273 /* Aggregate to EVENTS_WAITS_HISTORY and EVENTS_WAITS_HISTORY_LONG */
6274 if (flags & STATE_FLAG_EVENT)
6275 {
6276 PFS_thread *thread= reinterpret_cast<PFS_thread *>(state->m_thread);
6277 assert(thread != NULL);
6278 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
6279 assert(wait != NULL);
6280
6281 wait->m_timer_end= timer_end;
6282 wait->m_end_event_id= thread->m_event_id;
6283 wait->m_number_of_bytes= bytes;
6284
6285 if (thread->m_flag_events_waits_history)
6286 insert_events_waits_history(thread, wait);
6287 if (thread->m_flag_events_waits_history_long)
6288 insert_events_waits_history_long(wait);
6289 thread->m_events_waits_current--;
6290
6291 assert(wait == thread->m_events_waits_current);
6292 }
6293 }
6294
pfs_set_socket_state_v1(PSI_socket * socket,PSI_socket_state state)6295 void pfs_set_socket_state_v1(PSI_socket *socket, PSI_socket_state state)
6296 {
6297 assert((state == PSI_SOCKET_STATE_IDLE) || (state == PSI_SOCKET_STATE_ACTIVE));
6298 PFS_socket *pfs= reinterpret_cast<PFS_socket*>(socket);
6299 assert(pfs != NULL);
6300 assert(pfs->m_idle || (state == PSI_SOCKET_STATE_IDLE));
6301 assert(!pfs->m_idle || (state == PSI_SOCKET_STATE_ACTIVE));
6302 pfs->m_idle= (state == PSI_SOCKET_STATE_IDLE);
6303 }
6304
6305 /**
6306 Set socket descriptor and address info.
6307 */
pfs_set_socket_info_v1(PSI_socket * socket,const my_socket * fd,const struct sockaddr * addr,socklen_t addr_len)6308 void pfs_set_socket_info_v1(PSI_socket *socket,
6309 const my_socket *fd,
6310 const struct sockaddr *addr,
6311 socklen_t addr_len)
6312 {
6313 PFS_socket *pfs= reinterpret_cast<PFS_socket*>(socket);
6314 assert(pfs != NULL);
6315
6316 /** Set socket descriptor */
6317 if (fd != NULL)
6318 pfs->m_fd= (uint)*fd;
6319
6320 /** Set raw socket address and length */
6321 if (likely(addr != NULL && addr_len > 0))
6322 {
6323 pfs->m_addr_len= addr_len;
6324
6325 /** Restrict address length to size of struct */
6326 if (unlikely(pfs->m_addr_len > sizeof(sockaddr_storage)))
6327 pfs->m_addr_len= sizeof(struct sockaddr_storage);
6328
6329 memcpy(&pfs->m_sock_addr, addr, pfs->m_addr_len);
6330 }
6331 }
6332
6333 /**
6334 Implementation of the socket instrumentation interface.
6335 @sa PSI_v1::set_socket_info.
6336 */
pfs_set_socket_thread_owner_v1(PSI_socket * socket)6337 void pfs_set_socket_thread_owner_v1(PSI_socket *socket)
6338 {
6339 PFS_socket *pfs_socket= reinterpret_cast<PFS_socket*>(socket);
6340 assert(pfs_socket != NULL);
6341 pfs_socket->m_thread_owner= my_thread_get_THR_PFS();
6342 }
6343
6344 struct PSI_digest_locker*
pfs_digest_start_v1(PSI_statement_locker * locker)6345 pfs_digest_start_v1(PSI_statement_locker *locker)
6346 {
6347 PSI_statement_locker_state *statement_state;
6348 statement_state= reinterpret_cast<PSI_statement_locker_state*> (locker);
6349 assert(statement_state != NULL);
6350
6351 if (statement_state->m_discarded)
6352 return NULL;
6353
6354 if (statement_state->m_flags & STATE_FLAG_DIGEST)
6355 {
6356 return reinterpret_cast<PSI_digest_locker*> (locker);
6357 }
6358
6359 return NULL;
6360 }
6361
pfs_digest_end_v1(PSI_digest_locker * locker,const sql_digest_storage * digest)6362 void pfs_digest_end_v1(PSI_digest_locker *locker, const sql_digest_storage *digest)
6363 {
6364 PSI_statement_locker_state *statement_state;
6365 statement_state= reinterpret_cast<PSI_statement_locker_state*> (locker);
6366 assert(statement_state != NULL);
6367 assert(digest != NULL);
6368
6369 if (statement_state->m_discarded)
6370 return;
6371
6372 if (statement_state->m_flags & STATE_FLAG_DIGEST)
6373 {
6374 statement_state->m_digest= digest;
6375 }
6376 }
6377
6378 PSI_prepared_stmt*
pfs_create_prepared_stmt_v1(void * identity,uint stmt_id,PSI_statement_locker * locker,const char * stmt_name,size_t stmt_name_length,const char * sql_text,size_t sql_text_length)6379 pfs_create_prepared_stmt_v1(void *identity, uint stmt_id,
6380 PSI_statement_locker *locker,
6381 const char *stmt_name, size_t stmt_name_length,
6382 const char *sql_text, size_t sql_text_length)
6383 {
6384 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
6385 PFS_events_statements *pfs_stmt= reinterpret_cast<PFS_events_statements*> (state->m_statement);
6386 PFS_program *pfs_program= reinterpret_cast<PFS_program *>(state->m_parent_sp_share);
6387
6388 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6389 if (unlikely(pfs_thread == NULL))
6390 return NULL;
6391
6392 if (sql_text_length > COL_INFO_SIZE)
6393 sql_text_length= COL_INFO_SIZE;
6394
6395 PFS_prepared_stmt *pfs= create_prepared_stmt(identity,
6396 pfs_thread, pfs_program,
6397 pfs_stmt, stmt_id,
6398 stmt_name, stmt_name_length,
6399 sql_text, sql_text_length);
6400
6401 state->m_parent_prepared_stmt= reinterpret_cast<PSI_prepared_stmt*>(pfs);
6402 state->m_in_prepare= true;
6403
6404 return reinterpret_cast<PSI_prepared_stmt*>(pfs);
6405 }
6406
pfs_execute_prepared_stmt_v1(PSI_statement_locker * locker,PSI_prepared_stmt * ps)6407 void pfs_execute_prepared_stmt_v1 (PSI_statement_locker *locker,
6408 PSI_prepared_stmt* ps)
6409 {
6410 PSI_statement_locker_state *state= reinterpret_cast<PSI_statement_locker_state*> (locker);
6411 assert(state != NULL);
6412
6413 state->m_parent_prepared_stmt= ps;
6414 state->m_in_prepare= false;
6415 }
6416
pfs_destroy_prepared_stmt_v1(PSI_prepared_stmt * prepared_stmt)6417 void pfs_destroy_prepared_stmt_v1(PSI_prepared_stmt* prepared_stmt)
6418 {
6419 PFS_prepared_stmt *pfs_prepared_stmt= reinterpret_cast<PFS_prepared_stmt*>(prepared_stmt);
6420 delete_prepared_stmt(pfs_prepared_stmt);
6421 return;
6422 }
6423
pfs_reprepare_prepared_stmt_v1(PSI_prepared_stmt * prepared_stmt)6424 void pfs_reprepare_prepared_stmt_v1(PSI_prepared_stmt* prepared_stmt)
6425 {
6426 PFS_prepared_stmt *pfs_prepared_stmt= reinterpret_cast<PFS_prepared_stmt*>(prepared_stmt);
6427 PFS_single_stat *prepared_stmt_stat= &pfs_prepared_stmt->m_reprepare_stat;
6428
6429 if (prepared_stmt_stat != NULL)
6430 prepared_stmt_stat->aggregate_counted();
6431 return;
6432 }
6433
pfs_set_prepared_stmt_text_v1(PSI_prepared_stmt * prepared_stmt,const char * text,uint text_len)6434 void pfs_set_prepared_stmt_text_v1(PSI_prepared_stmt *prepared_stmt,
6435 const char *text,
6436 uint text_len)
6437 {
6438 PFS_prepared_stmt *pfs_prepared_stmt =
6439 reinterpret_cast<PFS_prepared_stmt *>(prepared_stmt);
6440 assert(pfs_prepared_stmt != NULL);
6441
6442 uint max_len = COL_INFO_SIZE;
6443 if (text_len > max_len)
6444 {
6445 text_len = max_len;
6446 }
6447
6448 memcpy(pfs_prepared_stmt->m_sqltext, text, text_len);
6449 pfs_prepared_stmt->m_sqltext_length = text_len;
6450
6451 return;
6452 }
6453
6454 /**
6455 Implementation of the thread attribute connection interface
6456 @sa PSI_v1::set_thread_connect_attr.
6457 */
pfs_set_thread_connect_attrs_v1(const char * buffer,uint length,const void * from_cs)6458 int pfs_set_thread_connect_attrs_v1(const char *buffer, uint length,
6459 const void *from_cs)
6460 {
6461 PFS_thread *thd= my_thread_get_THR_PFS();
6462
6463 assert(buffer != NULL);
6464
6465 if (likely(thd != NULL) && session_connect_attrs_size_per_thread > 0)
6466 {
6467 pfs_dirty_state dirty_state;
6468 const CHARSET_INFO *cs = static_cast<const CHARSET_INFO *> (from_cs);
6469
6470 /* copy from the input buffer as much as we can fit */
6471 uint copy_size= (uint)(length < session_connect_attrs_size_per_thread ?
6472 length : session_connect_attrs_size_per_thread);
6473 thd->m_session_lock.allocated_to_dirty(& dirty_state);
6474 memcpy(thd->m_session_connect_attrs, buffer, copy_size);
6475 thd->m_session_connect_attrs_length= copy_size;
6476 thd->m_session_connect_attrs_cs_number= cs->number;
6477 thd->m_session_lock.dirty_to_allocated(& dirty_state);
6478
6479 if (copy_size == length)
6480 return 0;
6481
6482 session_connect_attrs_lost++;
6483 return 1;
6484 }
6485 return 0;
6486 }
6487
pfs_register_memory_v1(const char * category,PSI_memory_info_v1 * info,int count)6488 void pfs_register_memory_v1(const char *category,
6489 PSI_memory_info_v1 *info,
6490 int count)
6491 {
6492 REGISTER_BODY_V1(PSI_memory_key,
6493 memory_instrument_prefix,
6494 register_memory_class)
6495 }
6496
pfs_memory_alloc_v1(PSI_memory_key key,size_t size,PSI_thread ** owner)6497 PSI_memory_key pfs_memory_alloc_v1(PSI_memory_key key, size_t size, PSI_thread **owner)
6498 {
6499 PFS_thread ** owner_thread= reinterpret_cast<PFS_thread**>(owner);
6500 assert(owner_thread != NULL);
6501
6502 if (! flag_global_instrumentation)
6503 {
6504 *owner_thread= NULL;
6505 return PSI_NOT_INSTRUMENTED;
6506 }
6507
6508 PFS_memory_class *klass= find_memory_class(key);
6509 if (klass == NULL)
6510 {
6511 *owner_thread= NULL;
6512 return PSI_NOT_INSTRUMENTED;
6513 }
6514
6515 if (! klass->m_enabled)
6516 {
6517 *owner_thread= NULL;
6518 return PSI_NOT_INSTRUMENTED;
6519 }
6520
6521 PFS_memory_stat *event_name_array;
6522 PFS_memory_stat *stat;
6523 uint index= klass->m_event_name_index;
6524 PFS_memory_stat_delta delta_buffer;
6525 PFS_memory_stat_delta *delta;
6526
6527 if (flag_thread_instrumentation && ! klass->is_global())
6528 {
6529 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6530 if (unlikely(pfs_thread == NULL))
6531 {
6532 *owner_thread= NULL;
6533 return PSI_NOT_INSTRUMENTED;
6534 }
6535 if (! pfs_thread->m_enabled)
6536 {
6537 *owner_thread= NULL;
6538 return PSI_NOT_INSTRUMENTED;
6539 }
6540
6541 /* Aggregate to MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME */
6542 event_name_array= pfs_thread->write_instr_class_memory_stats();
6543 stat= & event_name_array[index];
6544 delta= stat->count_alloc(size, &delta_buffer);
6545
6546 if (delta != NULL)
6547 {
6548 pfs_thread->carry_memory_stat_delta(delta, index);
6549 }
6550
6551 /* Flag this memory as owned by the current thread. */
6552 *owner_thread= pfs_thread;
6553 }
6554 else
6555 {
6556 /* Aggregate to MEMORY_SUMMARY_GLOBAL_BY_EVENT_NAME */
6557 event_name_array= global_instr_class_memory_array;
6558 stat= & event_name_array[index];
6559 (void) stat->count_alloc(size, &delta_buffer);
6560
6561 *owner_thread= NULL;
6562 }
6563
6564 return key;
6565 }
6566
pfs_memory_realloc_v1(PSI_memory_key key,size_t old_size,size_t new_size,PSI_thread ** owner)6567 PSI_memory_key pfs_memory_realloc_v1(PSI_memory_key key, size_t old_size, size_t new_size, PSI_thread **owner)
6568 {
6569 PFS_thread ** owner_thread_hdl= reinterpret_cast<PFS_thread**>(owner);
6570 assert(owner != NULL);
6571
6572 PFS_memory_class *klass= find_memory_class(key);
6573 if (klass == NULL)
6574 {
6575 *owner_thread_hdl= NULL;
6576 return PSI_NOT_INSTRUMENTED;
6577 }
6578
6579 PFS_memory_stat *event_name_array;
6580 PFS_memory_stat *stat;
6581 uint index= klass->m_event_name_index;
6582 PFS_memory_stat_delta delta_buffer;
6583 PFS_memory_stat_delta *delta;
6584
6585 if (flag_thread_instrumentation && ! klass->is_global())
6586 {
6587 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6588 if (likely(pfs_thread != NULL))
6589 {
6590 #ifdef PFS_PARANOID
6591 PFS_thread *owner_thread= *owner_thread_hdl;
6592 if (owner_thread != pfs_thread)
6593 {
6594 owner_thread= sanitize_thread(owner_thread);
6595 if (owner_thread != NULL)
6596 {
6597 report_memory_accounting_error("pfs_memory_realloc_v1",
6598 pfs_thread, old_size, klass, owner_thread);
6599 }
6600 }
6601 #endif /* PFS_PARANOID */
6602
6603 /* Aggregate to MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME */
6604 event_name_array= pfs_thread->write_instr_class_memory_stats();
6605 stat= & event_name_array[index];
6606
6607 if (flag_global_instrumentation && klass->m_enabled)
6608 {
6609 delta= stat->count_realloc(old_size, new_size, &delta_buffer);
6610 *owner_thread_hdl= pfs_thread;
6611 }
6612 else
6613 {
6614 delta= stat->count_free(old_size, &delta_buffer);
6615 *owner_thread_hdl= NULL;
6616 key= PSI_NOT_INSTRUMENTED;
6617 }
6618
6619 if (delta != NULL)
6620 {
6621 pfs_thread->carry_memory_stat_delta(delta, index);
6622 }
6623 return key;
6624 }
6625 }
6626
6627 /* Aggregate to MEMORY_SUMMARY_GLOBAL_BY_EVENT_NAME */
6628 event_name_array= global_instr_class_memory_array;
6629 stat= & event_name_array[index];
6630
6631 if (flag_global_instrumentation && klass->m_enabled)
6632 {
6633 (void) stat->count_realloc(old_size, new_size, &delta_buffer);
6634 }
6635 else
6636 {
6637 (void) stat->count_free(old_size, &delta_buffer);
6638 key= PSI_NOT_INSTRUMENTED;
6639 }
6640
6641 *owner_thread_hdl= NULL;
6642 return key;
6643 }
6644
pfs_memory_claim_v1(PSI_memory_key key,size_t size,PSI_thread ** owner)6645 PSI_memory_key pfs_memory_claim_v1(PSI_memory_key key, size_t size, PSI_thread **owner)
6646 {
6647 PFS_thread ** owner_thread= reinterpret_cast<PFS_thread**>(owner);
6648 assert(owner_thread != NULL);
6649
6650 PFS_memory_class *klass= find_memory_class(key);
6651 if (klass == NULL)
6652 {
6653 *owner_thread= NULL;
6654 return PSI_NOT_INSTRUMENTED;
6655 }
6656
6657 /*
6658 Do not check klass->m_enabled.
6659 Do not check flag_global_instrumentation.
6660 If a memory alloc was instrumented,
6661 the corresponding free must be instrumented.
6662 */
6663
6664 PFS_memory_stat *event_name_array;
6665 PFS_memory_stat *stat;
6666 uint index= klass->m_event_name_index;
6667 PFS_memory_stat_delta delta_buffer;
6668 PFS_memory_stat_delta *delta;
6669
6670 if (flag_thread_instrumentation)
6671 {
6672 PFS_thread *old_thread= sanitize_thread(*owner_thread);
6673 PFS_thread *new_thread= my_thread_get_THR_PFS();
6674 if (old_thread != new_thread)
6675 {
6676 if (old_thread != NULL)
6677 {
6678 event_name_array= old_thread->write_instr_class_memory_stats();
6679 stat= & event_name_array[index];
6680 delta= stat->count_free(size, &delta_buffer);
6681
6682 if (delta != NULL)
6683 {
6684 old_thread->carry_memory_stat_delta(delta, index);
6685 }
6686 }
6687
6688 if (new_thread != NULL)
6689 {
6690 event_name_array= new_thread->write_instr_class_memory_stats();
6691 stat= & event_name_array[index];
6692 delta= stat->count_alloc(size, &delta_buffer);
6693
6694 if (delta != NULL)
6695 {
6696 new_thread->carry_memory_stat_delta(delta, index);
6697 }
6698 }
6699
6700 *owner_thread= new_thread;
6701 }
6702
6703 return key;
6704 }
6705
6706 *owner_thread= NULL;
6707 return key;
6708 }
6709
pfs_memory_free_v1(PSI_memory_key key,size_t size,PSI_thread * owner)6710 void pfs_memory_free_v1(PSI_memory_key key, size_t size, PSI_thread *owner)
6711 {
6712 PFS_memory_class *klass= find_memory_class(key);
6713 if (klass == NULL)
6714 return;
6715
6716 /*
6717 Do not check klass->m_enabled.
6718 Do not check flag_global_instrumentation.
6719 If a memory alloc was instrumented,
6720 the corresponding free must be instrumented.
6721 */
6722
6723 PFS_memory_stat *event_name_array;
6724 PFS_memory_stat *stat;
6725 uint index= klass->m_event_name_index;
6726 PFS_memory_stat_delta delta_buffer;
6727 PFS_memory_stat_delta *delta;
6728
6729 if (flag_thread_instrumentation && ! klass->is_global())
6730 {
6731 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6732 if (likely(pfs_thread != NULL))
6733 {
6734 #ifdef PFS_PARANOID
6735 PFS_thread *owner_thread= reinterpret_cast<PFS_thread*>(owner);
6736
6737 if (owner_thread != pfs_thread)
6738 {
6739 owner_thread= sanitize_thread(owner_thread);
6740 if (owner_thread != NULL)
6741 {
6742 report_memory_accounting_error("pfs_memory_free_v1",
6743 pfs_thread, size, klass, owner_thread);
6744 }
6745 }
6746 #endif /* PFS_PARANOID */
6747
6748 /*
6749 Do not check pfs_thread->m_enabled.
6750 If a memory alloc was instrumented,
6751 the corresponding free must be instrumented.
6752 */
6753 /* Aggregate to MEMORY_SUMMARY_BY_THREAD_BY_EVENT_NAME */
6754 event_name_array= pfs_thread->write_instr_class_memory_stats();
6755 stat= & event_name_array[index];
6756 delta= stat->count_free(size, &delta_buffer);
6757
6758 if (delta != NULL)
6759 {
6760 pfs_thread->carry_memory_stat_delta(delta, index);
6761 }
6762 return;
6763 }
6764 }
6765
6766 /* Aggregate to MEMORY_SUMMARY_GLOBAL_BY_EVENT_NAME */
6767 event_name_array= global_instr_class_memory_array;
6768 if (event_name_array)
6769 {
6770 stat= & event_name_array[index];
6771 (void) stat->count_free(size, &delta_buffer);
6772 }
6773 return;
6774 }
6775
pfs_unlock_table_v1(PSI_table * table)6776 void pfs_unlock_table_v1(PSI_table *table)
6777 {
6778 PFS_table *pfs_table= reinterpret_cast<PFS_table*> (table);
6779
6780 assert(pfs_table != NULL);
6781
6782 pfs_table->m_internal_lock= PFS_TL_NONE;
6783 return;
6784 }
6785
6786 PSI_metadata_lock *
pfs_create_metadata_lock_v1(void * identity,const MDL_key * mdl_key,opaque_mdl_type mdl_type,opaque_mdl_duration mdl_duration,opaque_mdl_status mdl_status,const char * src_file,uint src_line)6787 pfs_create_metadata_lock_v1(
6788 void *identity,
6789 const MDL_key *mdl_key,
6790 opaque_mdl_type mdl_type,
6791 opaque_mdl_duration mdl_duration,
6792 opaque_mdl_status mdl_status,
6793 const char *src_file,
6794 uint src_line)
6795 {
6796 if (! flag_global_instrumentation)
6797 return NULL;
6798
6799 if (! global_metadata_class.m_enabled)
6800 return NULL;
6801
6802 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6803 if (pfs_thread == NULL)
6804 return NULL;
6805
6806 PFS_metadata_lock *pfs;
6807 pfs= create_metadata_lock(identity, mdl_key,
6808 mdl_type, mdl_duration, mdl_status,
6809 src_file, src_line);
6810
6811 if (pfs != NULL)
6812 {
6813 pfs->m_owner_thread_id= pfs_thread->m_thread_internal_id;
6814 pfs->m_owner_event_id= pfs_thread->m_event_id;
6815 }
6816
6817 return reinterpret_cast<PSI_metadata_lock *> (pfs);
6818 }
6819
6820 void
pfs_set_metadata_lock_status_v1(PSI_metadata_lock * lock,opaque_mdl_status mdl_status)6821 pfs_set_metadata_lock_status_v1(PSI_metadata_lock *lock, opaque_mdl_status mdl_status)
6822 {
6823 PFS_metadata_lock *pfs= reinterpret_cast<PFS_metadata_lock*> (lock);
6824 assert(pfs != NULL);
6825 pfs->m_mdl_status= mdl_status;
6826 }
6827
6828 void
pfs_destroy_metadata_lock_v1(PSI_metadata_lock * lock)6829 pfs_destroy_metadata_lock_v1(PSI_metadata_lock *lock)
6830 {
6831 PFS_metadata_lock *pfs= reinterpret_cast<PFS_metadata_lock*> (lock);
6832 assert(pfs != NULL);
6833 destroy_metadata_lock(pfs);
6834 }
6835
6836 PSI_metadata_locker *
pfs_start_metadata_wait_v1(PSI_metadata_locker_state * state,PSI_metadata_lock * lock,const char * src_file,uint src_line)6837 pfs_start_metadata_wait_v1(PSI_metadata_locker_state *state,
6838 PSI_metadata_lock *lock,
6839 const char *src_file,
6840 uint src_line)
6841 {
6842 PFS_metadata_lock *pfs_lock= reinterpret_cast<PFS_metadata_lock*> (lock);
6843 assert(state != NULL);
6844 assert(pfs_lock != NULL);
6845
6846 if (! pfs_lock->m_enabled)
6847 return NULL;
6848
6849 uint flags;
6850 ulonglong timer_start= 0;
6851
6852 if (flag_thread_instrumentation)
6853 {
6854 PFS_thread *pfs_thread= my_thread_get_THR_PFS();
6855 if (unlikely(pfs_thread == NULL))
6856 return NULL;
6857 if (! pfs_thread->m_enabled)
6858 return NULL;
6859 state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
6860 flags= STATE_FLAG_THREAD;
6861
6862 if (pfs_lock->m_timed)
6863 {
6864 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
6865 state->m_timer_start= timer_start;
6866 flags|= STATE_FLAG_TIMED;
6867 }
6868
6869 if (flag_events_waits_current)
6870 {
6871 if (unlikely(pfs_thread->m_events_waits_current >=
6872 & pfs_thread->m_events_waits_stack[WAIT_STACK_SIZE]))
6873 {
6874 locker_lost++;
6875 return NULL;
6876 }
6877 PFS_events_waits *wait= pfs_thread->m_events_waits_current;
6878 state->m_wait= wait;
6879 flags|= STATE_FLAG_EVENT;
6880
6881 PFS_events_waits *parent_event= wait - 1;
6882 wait->m_event_type= EVENT_TYPE_WAIT;
6883 wait->m_nesting_event_id= parent_event->m_event_id;
6884 wait->m_nesting_event_type= parent_event->m_event_type;
6885
6886 wait->m_thread_internal_id= pfs_thread->m_thread_internal_id;
6887 wait->m_class= &global_metadata_class;
6888 wait->m_timer_start= timer_start;
6889 wait->m_timer_end= 0;
6890 wait->m_object_instance_addr= pfs_lock->m_identity;
6891 wait->m_event_id= pfs_thread->m_event_id++;
6892 wait->m_end_event_id= 0;
6893 wait->m_weak_metadata_lock= pfs_lock;
6894 wait->m_weak_version= pfs_lock->get_version();
6895 wait->m_operation= OPERATION_TYPE_METADATA;
6896 wait->m_source_file= src_file;
6897 wait->m_source_line= src_line;
6898 wait->m_wait_class= WAIT_CLASS_METADATA;
6899
6900 pfs_thread->m_events_waits_current++;
6901 }
6902 }
6903 else
6904 {
6905 if (pfs_lock->m_timed)
6906 {
6907 timer_start= get_timer_raw_value_and_function(wait_timer, & state->m_timer);
6908 state->m_timer_start= timer_start;
6909 flags= STATE_FLAG_TIMED;
6910 state->m_thread= NULL;
6911 }
6912 else
6913 {
6914 /*
6915 Complete shortcut.
6916 */
6917 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (counted) */
6918 global_metadata_stat.aggregate_counted();
6919 return NULL;
6920 }
6921 }
6922
6923 state->m_flags= flags;
6924 state->m_metadata_lock= lock;
6925 return reinterpret_cast<PSI_metadata_locker*> (state);
6926 }
6927
6928 void
pfs_end_metadata_wait_v1(PSI_metadata_locker * locker,int rc)6929 pfs_end_metadata_wait_v1(PSI_metadata_locker *locker,
6930 int rc)
6931 {
6932 PSI_metadata_locker_state *state= reinterpret_cast<PSI_metadata_locker_state*> (locker);
6933 assert(state != NULL);
6934
6935 ulonglong timer_end= 0;
6936 ulonglong wait_time= 0;
6937
6938 PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
6939
6940 uint flags= state->m_flags;
6941
6942 if (flags & STATE_FLAG_TIMED)
6943 {
6944 timer_end= state->m_timer();
6945 wait_time= timer_end - state->m_timer_start;
6946 }
6947
6948 if (flags & STATE_FLAG_THREAD)
6949 {
6950 PFS_single_stat *event_name_array;
6951 event_name_array= thread->write_instr_class_waits_stats();
6952
6953 if (flags & STATE_FLAG_TIMED)
6954 {
6955 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
6956 event_name_array[GLOBAL_METADATA_EVENT_INDEX].aggregate_value(wait_time);
6957 }
6958 else
6959 {
6960 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
6961 event_name_array[GLOBAL_METADATA_EVENT_INDEX].aggregate_counted();
6962 }
6963
6964 if (flags & STATE_FLAG_EVENT)
6965 {
6966 PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
6967 assert(wait != NULL);
6968
6969 wait->m_timer_end= timer_end;
6970 wait->m_end_event_id= thread->m_event_id;
6971 if (thread->m_flag_events_waits_history)
6972 insert_events_waits_history(thread, wait);
6973 if (thread->m_flag_events_waits_history_long)
6974 insert_events_waits_history_long(wait);
6975 thread->m_events_waits_current--;
6976
6977 assert(wait == thread->m_events_waits_current);
6978 }
6979 }
6980 else
6981 {
6982 if (flags & STATE_FLAG_TIMED)
6983 {
6984 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (timed) */
6985 global_metadata_stat.aggregate_value(wait_time);
6986 }
6987 else
6988 {
6989 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (counted) */
6990 global_metadata_stat.aggregate_counted();
6991 }
6992 }
6993 }
6994
6995 /**
6996 Implementation of the instrumentation interface.
6997 @sa PSI_v1.
6998 */
6999 PSI_v1 PFS_v1=
7000 {
7001 pfs_register_mutex_v1,
7002 pfs_register_rwlock_v1,
7003 pfs_register_cond_v1,
7004 pfs_register_thread_v1,
7005 pfs_register_file_v1,
7006 pfs_register_stage_v1,
7007 pfs_register_statement_v1,
7008 pfs_register_socket_v1,
7009 pfs_init_mutex_v1,
7010 pfs_destroy_mutex_v1,
7011 pfs_init_rwlock_v1,
7012 pfs_destroy_rwlock_v1,
7013 pfs_init_cond_v1,
7014 pfs_destroy_cond_v1,
7015 pfs_init_socket_v1,
7016 pfs_destroy_socket_v1,
7017 pfs_get_table_share_v1,
7018 pfs_release_table_share_v1,
7019 pfs_drop_table_share_v1,
7020 pfs_open_table_v1,
7021 pfs_unbind_table_v1,
7022 pfs_rebind_table_v1,
7023 pfs_close_table_v1,
7024 pfs_create_file_v1,
7025 pfs_spawn_thread_v1,
7026 pfs_new_thread_v1,
7027 pfs_set_thread_id_v1,
7028 pfs_set_thread_THD_v1,
7029 pfs_set_thread_os_id_v1,
7030 pfs_get_thread_v1,
7031 pfs_set_thread_user_v1,
7032 pfs_set_thread_account_v1,
7033 pfs_set_thread_db_v1,
7034 pfs_set_thread_command_v1,
7035 pfs_set_connection_type_v1,
7036 pfs_set_thread_start_time_v1,
7037 pfs_set_thread_state_v1,
7038 pfs_set_thread_info_v1,
7039 pfs_set_thread_v1,
7040 pfs_delete_current_thread_v1,
7041 pfs_delete_thread_v1,
7042 pfs_get_thread_file_name_locker_v1,
7043 pfs_get_thread_file_stream_locker_v1,
7044 pfs_get_thread_file_descriptor_locker_v1,
7045 pfs_unlock_mutex_v1,
7046 pfs_unlock_rwlock_v1,
7047 pfs_signal_cond_v1,
7048 pfs_broadcast_cond_v1,
7049 pfs_start_idle_wait_v1,
7050 pfs_end_idle_wait_v1,
7051 pfs_start_mutex_wait_v1,
7052 pfs_end_mutex_wait_v1,
7053 pfs_start_rwlock_rdwait_v1,
7054 pfs_end_rwlock_rdwait_v1,
7055 pfs_start_rwlock_wrwait_v1,
7056 pfs_end_rwlock_wrwait_v1,
7057 pfs_start_cond_wait_v1,
7058 pfs_end_cond_wait_v1,
7059 pfs_start_table_io_wait_v1,
7060 pfs_end_table_io_wait_v1,
7061 pfs_start_table_lock_wait_v1,
7062 pfs_end_table_lock_wait_v1,
7063 pfs_start_file_open_wait_v1,
7064 pfs_end_file_open_wait_v1,
7065 pfs_end_file_open_wait_and_bind_to_descriptor_v1,
7066 pfs_end_temp_file_open_wait_and_bind_to_descriptor_v1,
7067 pfs_start_file_wait_v1,
7068 pfs_end_file_wait_v1,
7069 pfs_start_file_close_wait_v1,
7070 pfs_end_file_close_wait_v1,
7071 pfs_end_file_rename_wait_v1,
7072 pfs_start_stage_v1,
7073 pfs_get_current_stage_progress_v1,
7074 pfs_end_stage_v1,
7075 pfs_get_thread_statement_locker_v1,
7076 pfs_refine_statement_v1,
7077 pfs_start_statement_v1,
7078 pfs_set_statement_text_v1,
7079 pfs_set_statement_lock_time_v1,
7080 pfs_set_statement_rows_sent_v1,
7081 pfs_set_statement_rows_examined_v1,
7082 pfs_inc_statement_created_tmp_disk_tables_v1,
7083 pfs_inc_statement_created_tmp_tables_v1,
7084 pfs_inc_statement_select_full_join_v1,
7085 pfs_inc_statement_select_full_range_join_v1,
7086 pfs_inc_statement_select_range_v1,
7087 pfs_inc_statement_select_range_check_v1,
7088 pfs_inc_statement_select_scan_v1,
7089 pfs_inc_statement_sort_merge_passes_v1,
7090 pfs_inc_statement_sort_range_v1,
7091 pfs_inc_statement_sort_rows_v1,
7092 pfs_inc_statement_sort_scan_v1,
7093 pfs_set_statement_no_index_used_v1,
7094 pfs_set_statement_no_good_index_used_v1,
7095 pfs_end_statement_v1,
7096 pfs_get_thread_transaction_locker_v1,
7097 pfs_start_transaction_v1,
7098 pfs_set_transaction_xid_v1,
7099 pfs_set_transaction_xa_state_v1,
7100 pfs_set_transaction_gtid_v1,
7101 pfs_set_transaction_trxid_v1,
7102 pfs_inc_transaction_savepoints_v1,
7103 pfs_inc_transaction_rollback_to_savepoint_v1,
7104 pfs_inc_transaction_release_savepoint_v1,
7105 pfs_end_transaction_v1,
7106 pfs_start_socket_wait_v1,
7107 pfs_end_socket_wait_v1,
7108 pfs_set_socket_state_v1,
7109 pfs_set_socket_info_v1,
7110 pfs_set_socket_thread_owner_v1,
7111 pfs_create_prepared_stmt_v1,
7112 pfs_destroy_prepared_stmt_v1,
7113 pfs_reprepare_prepared_stmt_v1,
7114 pfs_execute_prepared_stmt_v1,
7115 pfs_set_prepared_stmt_text_v1,
7116 pfs_digest_start_v1,
7117 pfs_digest_end_v1,
7118 pfs_set_thread_connect_attrs_v1,
7119 pfs_start_sp_v1,
7120 pfs_end_sp_v1,
7121 pfs_drop_sp_v1,
7122 pfs_get_sp_share_v1,
7123 pfs_release_sp_share_v1,
7124 pfs_register_memory_v1,
7125 pfs_memory_alloc_v1,
7126 pfs_memory_realloc_v1,
7127 pfs_memory_claim_v1,
7128 pfs_memory_free_v1,
7129 pfs_unlock_table_v1,
7130 pfs_create_metadata_lock_v1,
7131 pfs_set_metadata_lock_status_v1,
7132 pfs_destroy_metadata_lock_v1,
7133 pfs_start_metadata_wait_v1,
7134 pfs_end_metadata_wait_v1
7135 };
7136
get_interface(int version)7137 static void* get_interface(int version)
7138 {
7139 switch (version)
7140 {
7141 case PSI_VERSION_1:
7142 return &PFS_v1;
7143 default:
7144 return NULL;
7145 }
7146 }
7147
7148 C_MODE_END
7149
7150 struct PSI_bootstrap PFS_bootstrap=
7151 {
7152 get_interface
7153 };
7154