1 /* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software Foundation,
21 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
22
23 /**
24 @file storage/perfschema/pfs_events_waits.cc
25 Events waits data structures (implementation).
26 */
27
28 #include "my_global.h"
29 #include "my_sys.h"
30 #include "pfs_global.h"
31 #include "pfs_instr_class.h"
32 #include "pfs_instr.h"
33 #include "pfs_user.h"
34 #include "pfs_host.h"
35 #include "pfs_account.h"
36 #include "pfs_events_waits.h"
37 #include "pfs_atomic.h"
38 #include "m_string.h"
39
40 ulong events_waits_history_long_size= 0;
41 /** Consumer flag for table EVENTS_WAITS_CURRENT. */
42 bool flag_events_waits_current= false;
43 /** Consumer flag for table EVENTS_WAITS_HISTORY. */
44 bool flag_events_waits_history= false;
45 /** Consumer flag for table EVENTS_WAITS_HISTORY_LONG. */
46 bool flag_events_waits_history_long= false;
47 /** Consumer flag for the global instrumentation. */
48 bool flag_global_instrumentation= false;
49 /** Consumer flag for the per thread instrumentation. */
50 bool flag_thread_instrumentation= false;
51
52 /** True if EVENTS_WAITS_HISTORY_LONG circular buffer is full. */
53 bool events_waits_history_long_full= false;
54 /** Index in EVENTS_WAITS_HISTORY_LONG circular buffer. */
55 volatile uint32 events_waits_history_long_index= 0;
56 /** EVENTS_WAITS_HISTORY_LONG circular buffer. */
57 PFS_events_waits *events_waits_history_long_array= NULL;
58
59 /**
60 Initialize table EVENTS_WAITS_HISTORY_LONG.
61 @param events_waits_history_long_sizing table sizing
62 */
init_events_waits_history_long(uint events_waits_history_long_sizing)63 int init_events_waits_history_long(uint events_waits_history_long_sizing)
64 {
65 events_waits_history_long_size= events_waits_history_long_sizing;
66 events_waits_history_long_full= false;
67 PFS_atomic::store_u32(&events_waits_history_long_index, 0);
68
69 if (events_waits_history_long_size == 0)
70 return 0;
71
72 events_waits_history_long_array=
73 PFS_MALLOC_ARRAY(events_waits_history_long_size, sizeof(PFS_events_waits),
74 PFS_events_waits, MYF(MY_ZEROFILL));
75
76 return (events_waits_history_long_array ? 0 : 1);
77 }
78
79 /** Cleanup table EVENTS_WAITS_HISTORY_LONG. */
cleanup_events_waits_history_long(void)80 void cleanup_events_waits_history_long(void)
81 {
82 pfs_free(events_waits_history_long_array);
83 events_waits_history_long_array= NULL;
84 }
85
copy_events_waits(PFS_events_waits * dest,const PFS_events_waits * source)86 static inline void copy_events_waits(PFS_events_waits *dest,
87 const PFS_events_waits *source)
88 {
89 memcpy(dest, source, sizeof(PFS_events_waits));
90 }
91
92 /**
93 Insert a wait record in table EVENTS_WAITS_HISTORY.
94 @param thread thread that executed the wait
95 @param wait record to insert
96 */
insert_events_waits_history(PFS_thread * thread,PFS_events_waits * wait)97 void insert_events_waits_history(PFS_thread *thread, PFS_events_waits *wait)
98 {
99 if (unlikely(events_waits_history_per_thread == 0))
100 return;
101
102 uint index= thread->m_waits_history_index;
103
104 /*
105 A concurrent thread executing TRUNCATE TABLE EVENTS_WAITS_CURRENT
106 could alter the data that this thread is inserting,
107 causing a potential race condition.
108 We are not testing for this and insert a possibly empty record,
109 to make this thread (the writer) faster.
110 This is ok, the readers of m_waits_history will filter this out.
111 */
112 copy_events_waits(&thread->m_waits_history[index], wait);
113
114 index++;
115 if (index >= events_waits_history_per_thread)
116 {
117 index= 0;
118 thread->m_waits_history_full= true;
119 }
120 thread->m_waits_history_index= index;
121 }
122
123 /**
124 Insert a wait record in table EVENTS_WAITS_HISTORY_LONG.
125 @param wait record to insert
126 */
insert_events_waits_history_long(PFS_events_waits * wait)127 void insert_events_waits_history_long(PFS_events_waits *wait)
128 {
129 if (unlikely(events_waits_history_long_size == 0))
130 return;
131
132 uint index= PFS_atomic::add_u32(&events_waits_history_long_index, 1);
133
134 index= index % events_waits_history_long_size;
135 if (index == 0)
136 events_waits_history_long_full= true;
137
138 /* See related comment in insert_events_waits_history. */
139 copy_events_waits(&events_waits_history_long_array[index], wait);
140 }
141
142 /** Reset table EVENTS_WAITS_CURRENT data. */
reset_events_waits_current(void)143 void reset_events_waits_current(void)
144 {
145 PFS_thread *pfs_thread= thread_array;
146 PFS_thread *pfs_thread_last= thread_array + thread_max;
147
148 for ( ; pfs_thread < pfs_thread_last; pfs_thread++)
149 {
150 PFS_events_waits *pfs_wait= pfs_thread->m_events_waits_stack;
151 PFS_events_waits *pfs_wait_last= pfs_wait + WAIT_STACK_SIZE;
152
153 for ( ; pfs_wait < pfs_wait_last; pfs_wait++)
154 pfs_wait->m_wait_class= NO_WAIT_CLASS;
155 }
156 }
157
158 /** Reset table EVENTS_WAITS_HISTORY data. */
reset_events_waits_history(void)159 void reset_events_waits_history(void)
160 {
161 PFS_thread *pfs_thread= thread_array;
162 PFS_thread *pfs_thread_last= thread_array + thread_max;
163
164 for ( ; pfs_thread < pfs_thread_last; pfs_thread++)
165 {
166 PFS_events_waits *wait= pfs_thread->m_waits_history;
167 PFS_events_waits *wait_last= wait + events_waits_history_per_thread;
168
169 pfs_thread->m_waits_history_index= 0;
170 pfs_thread->m_waits_history_full= false;
171 for ( ; wait < wait_last; wait++)
172 wait->m_wait_class= NO_WAIT_CLASS;
173 }
174 }
175
176 /** Reset table EVENTS_WAITS_HISTORY_LONG data. */
reset_events_waits_history_long(void)177 void reset_events_waits_history_long(void)
178 {
179 PFS_atomic::store_u32(&events_waits_history_long_index, 0);
180 events_waits_history_long_full= false;
181
182 PFS_events_waits *wait= events_waits_history_long_array;
183 PFS_events_waits *wait_last= wait + events_waits_history_long_size;
184 for ( ; wait < wait_last; wait++)
185 wait->m_wait_class= NO_WAIT_CLASS;
186 }
187
188 /** Reset table EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME data. */
reset_events_waits_by_thread()189 void reset_events_waits_by_thread()
190 {
191 PFS_thread *thread= thread_array;
192 PFS_thread *thread_last= thread_array + thread_max;
193 PFS_account *account;
194 PFS_user *user;
195 PFS_host *host;
196
197 for ( ; thread < thread_last; thread++)
198 {
199 if (thread->m_lock.is_populated())
200 {
201 account= sanitize_account(thread->m_account);
202 user= sanitize_user(thread->m_user);
203 host= sanitize_host(thread->m_host);
204 aggregate_thread_waits(thread, account, user, host);
205 }
206 }
207 }
208
209 /** Reset table EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME data. */
reset_events_waits_by_account()210 void reset_events_waits_by_account()
211 {
212 PFS_account *pfs= account_array;
213 PFS_account *pfs_last= account_array + account_max;
214 PFS_user *user;
215 PFS_host *host;
216
217 for ( ; pfs < pfs_last; pfs++)
218 {
219 if (pfs->m_lock.is_populated())
220 {
221 user= sanitize_user(pfs->m_user);
222 host= sanitize_host(pfs->m_host);
223 pfs->aggregate_waits(user, host);
224 }
225 }
226 }
227
228 /** Reset table EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME data. */
reset_events_waits_by_user()229 void reset_events_waits_by_user()
230 {
231 PFS_user *pfs= user_array;
232 PFS_user *pfs_last= user_array + user_max;
233
234 for ( ; pfs < pfs_last; pfs++)
235 {
236 if (pfs->m_lock.is_populated())
237 pfs->aggregate_waits();
238 }
239 }
240
241 /** Reset table EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME data. */
reset_events_waits_by_host()242 void reset_events_waits_by_host()
243 {
244 PFS_host *pfs= host_array;
245 PFS_host *pfs_last= host_array + host_max;
246
247 for ( ; pfs < pfs_last; pfs++)
248 {
249 if (pfs->m_lock.is_populated())
250 pfs->aggregate_waits();
251 }
252 }
253
reset_table_waits_by_table()254 void reset_table_waits_by_table()
255 {
256 PFS_table_share *pfs= table_share_array;
257 PFS_table_share *pfs_last= pfs + table_share_max;
258
259 for ( ; pfs < pfs_last; pfs++)
260 {
261 if (pfs->m_lock.is_populated())
262 pfs->aggregate();
263 }
264 }
265
reset_table_io_waits_by_table()266 void reset_table_io_waits_by_table()
267 {
268 PFS_table_share *pfs= table_share_array;
269 PFS_table_share *pfs_last= pfs + table_share_max;
270
271 for ( ; pfs < pfs_last; pfs++)
272 {
273 if (pfs->m_lock.is_populated())
274 pfs->aggregate_io();
275 }
276 }
277
reset_table_lock_waits_by_table()278 void reset_table_lock_waits_by_table()
279 {
280 PFS_table_share *pfs= table_share_array;
281 PFS_table_share *pfs_last= pfs + table_share_max;
282
283 for ( ; pfs < pfs_last; pfs++)
284 {
285 if (pfs->m_lock.is_populated())
286 pfs->aggregate_lock();
287 }
288 }
289
reset_table_waits_by_table_handle()290 void reset_table_waits_by_table_handle()
291 {
292 PFS_table *pfs= table_array;
293 PFS_table *pfs_last= pfs + table_max;
294
295 for ( ; pfs < pfs_last; pfs++)
296 {
297 if (pfs->m_lock.is_populated())
298 pfs->sanitized_aggregate();
299 }
300 }
301
reset_table_io_waits_by_table_handle()302 void reset_table_io_waits_by_table_handle()
303 {
304 PFS_table *pfs= table_array;
305 PFS_table *pfs_last= pfs + table_max;
306
307 for ( ; pfs < pfs_last; pfs++)
308 {
309 if (pfs->m_lock.is_populated())
310 pfs->sanitized_aggregate_io();
311 }
312 }
313
reset_table_lock_waits_by_table_handle()314 void reset_table_lock_waits_by_table_handle()
315 {
316 PFS_table *pfs= table_array;
317 PFS_table *pfs_last= pfs + table_max;
318
319 for ( ; pfs < pfs_last; pfs++)
320 {
321 if (pfs->m_lock.is_populated())
322 pfs->sanitized_aggregate_lock();
323 }
324 }
325
326