1 /* Copyright (c) 2007, 2012, Oracle and/or its affiliates.
2 Copyright (c) 2020, MariaDB
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; version 2 of the License.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12
13 You should have received a copy of the GNU General Public License
14 along with this program; if not, write to the Free Software Foundation,
15 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
16
17
18 #include "mariadb.h"
19 #include "sql_class.h"
20 #include "debug_sync.h"
21 #include "sql_array.h"
22 #include "rpl_rli.h"
23 #include <lf.h>
24 #include "unireg.h"
25 #include <mysql/plugin.h>
26 #include <mysql/service_thd_wait.h>
27 #include <mysql/psi/mysql_stage.h>
28 #include <tpool.h>
29 #include <pfs_metadata_provider.h>
30 #include <mysql/psi/mysql_mdl.h>
31 #include <algorithm>
32 #include <array>
33
34 static PSI_memory_key key_memory_MDL_context_acquire_locks;
35
36 #ifdef HAVE_PSI_INTERFACE
37 static PSI_mutex_key key_MDL_wait_LOCK_wait_status;
38
39 static PSI_mutex_info all_mdl_mutexes[]=
40 {
41 { &key_MDL_wait_LOCK_wait_status, "MDL_wait::LOCK_wait_status", 0}
42 };
43
44 static PSI_rwlock_key key_MDL_lock_rwlock;
45 static PSI_rwlock_key key_MDL_context_LOCK_waiting_for;
46
47 static PSI_rwlock_info all_mdl_rwlocks[]=
48 {
49 { &key_MDL_lock_rwlock, "MDL_lock::rwlock", 0},
50 { &key_MDL_context_LOCK_waiting_for, "MDL_context::LOCK_waiting_for", 0}
51 };
52
53 static PSI_cond_key key_MDL_wait_COND_wait_status;
54
55 static PSI_cond_info all_mdl_conds[]=
56 {
57 { &key_MDL_wait_COND_wait_status, "MDL_context::COND_wait_status", 0}
58 };
59
60 static PSI_memory_info all_mdl_memory[]=
61 {
62 { &key_memory_MDL_context_acquire_locks, "MDL_context::acquire_locks", 0}
63 };
64
65 /**
66 Initialise all the performance schema instrumentation points
67 used by the MDL subsystem.
68 */
init_mdl_psi_keys(void)69 static void init_mdl_psi_keys(void)
70 {
71 int count;
72
73 count= array_elements(all_mdl_mutexes);
74 mysql_mutex_register("sql", all_mdl_mutexes, count);
75
76 count= array_elements(all_mdl_rwlocks);
77 mysql_rwlock_register("sql", all_mdl_rwlocks, count);
78
79 count= array_elements(all_mdl_conds);
80 mysql_cond_register("sql", all_mdl_conds, count);
81
82 count= array_elements(all_mdl_memory);
83 mysql_memory_register("sql", all_mdl_memory, count);
84
85 MDL_key::init_psi_keys();
86 }
87 #endif /* HAVE_PSI_INTERFACE */
88
89
90 /**
91 Thread state names to be used in case when we have to wait on resource
92 belonging to certain namespace.
93 */
94
95 PSI_stage_info MDL_key::m_namespace_to_wait_state_name[NAMESPACE_END]=
96 {
97 {0, "Waiting for backup lock", 0},
98 {0, "Waiting for schema metadata lock", 0},
99 {0, "Waiting for table metadata lock", 0},
100 {0, "Waiting for stored function metadata lock", 0},
101 {0, "Waiting for stored procedure metadata lock", 0},
102 {0, "Waiting for stored package body metadata lock", 0},
103 {0, "Waiting for trigger metadata lock", 0},
104 {0, "Waiting for event metadata lock", 0},
105 {0, "User lock", 0} /* Be compatible with old status. */
106 };
107
108
109 static const LEX_STRING lock_types[]=
110 {
111 { C_STRING_WITH_LEN("MDL_INTENTION_EXCLUSIVE") },
112 { C_STRING_WITH_LEN("MDL_SHARED") },
113 { C_STRING_WITH_LEN("MDL_SHARED_HIGH_PRIO") },
114 { C_STRING_WITH_LEN("MDL_SHARED_READ") },
115 { C_STRING_WITH_LEN("MDL_SHARED_WRITE") },
116 { C_STRING_WITH_LEN("MDL_SHARED_UPGRADABLE") },
117 { C_STRING_WITH_LEN("MDL_SHARED_READ_ONLY") },
118 { C_STRING_WITH_LEN("MDL_SHARED_NO_WRITE") },
119 { C_STRING_WITH_LEN("MDL_SHARED_NO_READ_WRITE") },
120 { C_STRING_WITH_LEN("MDL_EXCLUSIVE") },
121 };
122
123
124 static const LEX_STRING backup_lock_types[]=
125 {
126 { C_STRING_WITH_LEN("MDL_BACKUP_START") },
127 { C_STRING_WITH_LEN("MDL_BACKUP_FLUSH") },
128 { C_STRING_WITH_LEN("MDL_BACKUP_WAIT_FLUSH") },
129 { C_STRING_WITH_LEN("MDL_BACKUP_WAIT_DDL") },
130 { C_STRING_WITH_LEN("MDL_BACKUP_WAIT_COMMIT") },
131 { C_STRING_WITH_LEN("MDL_BACKUP_FTWRL1") },
132 { C_STRING_WITH_LEN("MDL_BACKUP_FTWRL2") },
133 { C_STRING_WITH_LEN("MDL_BACKUP_DML") },
134 { C_STRING_WITH_LEN("MDL_BACKUP_TRANS_DML") },
135 { C_STRING_WITH_LEN("MDL_BACKUP_SYS_DML") },
136 { C_STRING_WITH_LEN("MDL_BACKUP_DDL") },
137 { C_STRING_WITH_LEN("MDL_BACKUP_BLOCK_DDL") },
138 { C_STRING_WITH_LEN("MDL_BACKUP_ALTER_COPY") },
139 { C_STRING_WITH_LEN("MDL_BACKUP_COMMIT") }
140 };
141
142
143 #ifdef HAVE_PSI_INTERFACE
init_psi_keys()144 void MDL_key::init_psi_keys()
145 {
146 int i;
147 int count;
148 PSI_stage_info *info __attribute__((unused));
149
150 count= array_elements(MDL_key::m_namespace_to_wait_state_name);
151 for (i= 0; i<count; i++)
152 {
153 /* mysql_stage_register wants an array of pointers, registering 1 by 1. */
154 info= & MDL_key::m_namespace_to_wait_state_name[i];
155 mysql_stage_register("sql", &info, 1);
156 }
157 }
158 #endif
159
160 static bool mdl_initialized= 0;
161
162
163 /**
164 A collection of all MDL locks. A singleton,
165 there is only one instance of the map in the server.
166 */
167
168 class MDL_map
169 {
170 public:
171 void init();
172 void destroy();
173 MDL_lock *find_or_insert(LF_PINS *pins, const MDL_key *key);
174 unsigned long get_lock_owner(LF_PINS *pins, const MDL_key *key);
175 void remove(LF_PINS *pins, MDL_lock *lock);
get_pins()176 LF_PINS *get_pins() { return lf_hash_get_pins(&m_locks); }
177 private:
178 LF_HASH m_locks; /**< All acquired locks in the server. */
179 /** Pre-allocated MDL_lock object for BACKUP namespace. */
180 MDL_lock *m_backup_lock;
181 friend int mdl_iterate(mdl_iterator_callback, void *);
182 };
183
184
185 /**
186 A context of the recursive traversal through all contexts
187 in all sessions in search for deadlock.
188 */
189
190 class Deadlock_detection_visitor: public MDL_wait_for_graph_visitor
191 {
192 public:
Deadlock_detection_visitor(MDL_context * start_node_arg)193 Deadlock_detection_visitor(MDL_context *start_node_arg)
194 : m_start_node(start_node_arg),
195 m_victim(NULL),
196 m_current_search_depth(0),
197 m_found_deadlock(FALSE)
198 {}
199 virtual bool enter_node(MDL_context *node);
200 virtual void leave_node(MDL_context *node);
201
202 virtual bool inspect_edge(MDL_context *dest);
203
get_victim() const204 MDL_context *get_victim() const { return m_victim; }
205 private:
206 /**
207 Change the deadlock victim to a new one if it has lower deadlock
208 weight.
209 */
210 void opt_change_victim_to(MDL_context *new_victim);
211 private:
212 /**
213 The context which has initiated the search. There
214 can be multiple searches happening in parallel at the same time.
215 */
216 MDL_context *m_start_node;
217 /** If a deadlock is found, the context that identifies the victim. */
218 MDL_context *m_victim;
219 /** Set to the 0 at start. Increased whenever
220 we descend into another MDL context (aka traverse to the next
221 wait-for graph node). When MAX_SEARCH_DEPTH is reached, we
222 assume that a deadlock is found, even if we have not found a
223 loop.
224 */
225 uint m_current_search_depth;
226 /** TRUE if we found a deadlock. */
227 bool m_found_deadlock;
228 /**
229 Maximum depth for deadlock searches. After this depth is
230 achieved we will unconditionally declare that there is a
231 deadlock.
232
233 @note This depth should be small enough to avoid stack
234 being exhausted by recursive search algorithm.
235
236 TODO: Find out what is the optimal value for this parameter.
237 Current value is safe, but probably sub-optimal,
238 as there is an anecdotal evidence that real-life
239 deadlocks are even shorter typically.
240 */
241 static const uint MAX_SEARCH_DEPTH= 32;
242 };
243
244 #ifndef DBUG_OFF
245
246 /*
247 Print a list of all locks to DBUG trace to help with debugging
248 */
249
mdl_dbug_print_lock(MDL_ticket * mdl_ticket,void * arg,bool granted)250 static int mdl_dbug_print_lock(MDL_ticket *mdl_ticket, void *arg, bool granted)
251 {
252 String *tmp= (String*) arg;
253 char buffer[128];
254 MDL_key *mdl_key= mdl_ticket->get_key();
255 size_t length;
256 length= my_snprintf(buffer, sizeof(buffer)-1,
257 "\nname: %s db: %.*s key_name: %.*s (%s)",
258 mdl_ticket->get_type_name()->str,
259 (int) mdl_key->db_name_length(), mdl_key->db_name(),
260 (int) mdl_key->name_length(), mdl_key->name(),
261 granted ? "granted" : "waiting");
262 tmp->append(buffer, length);
263 return 0;
264 }
265
mdl_dbug_print_locks()266 const char *mdl_dbug_print_locks()
267 {
268 static String tmp;
269 mdl_iterate(mdl_dbug_print_lock, (void*) &tmp);
270 return tmp.c_ptr();
271 }
272 #endif /* DBUG_OFF */
273
274 /**
275 Enter a node of a wait-for graph. After
276 a node is entered, inspect_edge() will be called
277 for all wait-for destinations of this node. Then
278 leave_node() will be called.
279 We call "enter_node()" for all nodes we inspect,
280 including the starting node.
281
282 @retval TRUE Maximum search depth exceeded.
283 @retval FALSE OK.
284 */
285
enter_node(MDL_context * node)286 bool Deadlock_detection_visitor::enter_node(MDL_context *node)
287 {
288 m_found_deadlock= ++m_current_search_depth >= MAX_SEARCH_DEPTH;
289 if (m_found_deadlock)
290 {
291 DBUG_ASSERT(! m_victim);
292 opt_change_victim_to(node);
293 }
294 return m_found_deadlock;
295 }
296
297
298 /**
299 Done inspecting this node. Decrease the search
300 depth. If a deadlock is found, and we are
301 backtracking to the start node, optionally
302 change the deadlock victim to one with lower
303 deadlock weight.
304 */
305
leave_node(MDL_context * node)306 void Deadlock_detection_visitor::leave_node(MDL_context *node)
307 {
308 --m_current_search_depth;
309 if (m_found_deadlock)
310 opt_change_victim_to(node);
311 }
312
313
314 /**
315 Inspect a wait-for graph edge from one MDL context to another.
316
317 @retval TRUE A loop is found.
318 @retval FALSE No loop is found.
319 */
320
inspect_edge(MDL_context * node)321 bool Deadlock_detection_visitor::inspect_edge(MDL_context *node)
322 {
323 m_found_deadlock= node == m_start_node;
324 return m_found_deadlock;
325 }
326
327
328 /**
329 Change the deadlock victim to a new one if it has lower deadlock
330 weight.
331
332 @retval new_victim Victim is not changed.
333 @retval !new_victim New victim became the current.
334 */
335
336 void
opt_change_victim_to(MDL_context * new_victim)337 Deadlock_detection_visitor::opt_change_victim_to(MDL_context *new_victim)
338 {
339 if (m_victim == NULL ||
340 m_victim->get_deadlock_weight() >= new_victim->get_deadlock_weight())
341 {
342 /* Swap victims, unlock the old one. */
343 MDL_context *tmp= m_victim;
344 m_victim= new_victim;
345 m_victim->lock_deadlock_victim();
346 if (tmp)
347 tmp->unlock_deadlock_victim();
348 }
349 }
350
351
352 /**
353 Get a bit corresponding to enum_mdl_type value in a granted/waiting bitmaps
354 and compatibility matrices.
355 */
356
357 /**
358 The lock context. Created internally for an acquired lock.
359 For a given name, there exists only one MDL_lock instance,
360 and it exists only when the lock has been granted.
361 Can be seen as an MDL subsystem's version of TABLE_SHARE.
362
363 This is an abstract class which lacks information about
364 compatibility rules for lock types. They should be specified
365 in its descendants.
366 */
367
368 class MDL_lock
369 {
370 public:
371 typedef mdl_bitmap_t bitmap_t;
372
373 class Ticket_list
374 {
375 using List= ilist<MDL_ticket>;
376 public:
Ticket_list()377 Ticket_list() :m_bitmap(0) { m_type_counters.fill(0); }
378
379 void add_ticket(MDL_ticket *ticket);
380 void remove_ticket(MDL_ticket *ticket);
is_empty() const381 bool is_empty() const { return m_list.empty(); }
bitmap() const382 bitmap_t bitmap() const { return m_bitmap; }
begin() const383 List::const_iterator begin() const { return m_list.begin(); }
end() const384 List::const_iterator end() const { return m_list.end(); }
385 private:
386 /** List of tickets. */
387 List m_list;
388 /** Bitmap of types of tickets in this list. */
389 bitmap_t m_bitmap;
390 std::array<uint32_t, MDL_BACKUP_END> m_type_counters; // hash table
391 };
392
393
394 /**
395 Helper struct which defines how different types of locks are handled
396 for a specific MDL_lock. In practice we use only three strategies:
397 "backup" lock strategy for locks in BACKUP namespace, "scoped" lock
398 strategy for locks in SCHEMA namespace and "object" lock strategy for
399 all other namespaces.
400 */
401 struct MDL_lock_strategy
402 {
403 virtual const bitmap_t *incompatible_granted_types_bitmap() const = 0;
404 virtual const bitmap_t *incompatible_waiting_types_bitmap() const = 0;
405 virtual bool needs_notification(const MDL_ticket *ticket) const = 0;
406 virtual bool conflicting_locks(const MDL_ticket *ticket) const = 0;
407 virtual bitmap_t hog_lock_types_bitmap() const = 0;
~MDL_lock_strategyMDL_lock::MDL_lock_strategy408 virtual ~MDL_lock_strategy() {}
409 };
410
411
412 /**
413 An implementation of the scoped metadata lock. The only locking modes
414 which are supported at the moment are SHARED and INTENTION EXCLUSIVE
415 and EXCLUSIVE
416 */
417 struct MDL_scoped_lock : public MDL_lock_strategy
418 {
MDL_scoped_lockMDL_lock::MDL_scoped_lock419 MDL_scoped_lock() {}
incompatible_granted_types_bitmapMDL_lock::MDL_scoped_lock420 virtual const bitmap_t *incompatible_granted_types_bitmap() const
421 { return m_granted_incompatible; }
incompatible_waiting_types_bitmapMDL_lock::MDL_scoped_lock422 virtual const bitmap_t *incompatible_waiting_types_bitmap() const
423 { return m_waiting_incompatible; }
needs_notificationMDL_lock::MDL_scoped_lock424 virtual bool needs_notification(const MDL_ticket *ticket) const
425 { return (ticket->get_type() == MDL_SHARED); }
426
427 /**
428 Notify threads holding scoped IX locks which conflict with a pending
429 S lock.
430
431 Thread which holds global IX lock can be a handler thread for
432 insert delayed. We need to kill such threads in order to get
433 global shared lock. We do this my calling code outside of MDL.
434 */
conflicting_locksMDL_lock::MDL_scoped_lock435 virtual bool conflicting_locks(const MDL_ticket *ticket) const
436 { return ticket->get_type() == MDL_INTENTION_EXCLUSIVE; }
437
438 /*
439 In scoped locks, only IX lock request would starve because of X/S. But that
440 is practically very rare case. So just return 0 from this function.
441 */
hog_lock_types_bitmapMDL_lock::MDL_scoped_lock442 virtual bitmap_t hog_lock_types_bitmap() const
443 { return 0; }
444 private:
445 static const bitmap_t m_granted_incompatible[MDL_TYPE_END];
446 static const bitmap_t m_waiting_incompatible[MDL_TYPE_END];
447 };
448
449
450 /**
451 An implementation of a per-object lock. Supports SHARED, SHARED_UPGRADABLE,
452 SHARED HIGH PRIORITY and EXCLUSIVE locks.
453 */
454 struct MDL_object_lock : public MDL_lock_strategy
455 {
MDL_object_lockMDL_lock::MDL_object_lock456 MDL_object_lock() {}
incompatible_granted_types_bitmapMDL_lock::MDL_object_lock457 virtual const bitmap_t *incompatible_granted_types_bitmap() const
458 { return m_granted_incompatible; }
incompatible_waiting_types_bitmapMDL_lock::MDL_object_lock459 virtual const bitmap_t *incompatible_waiting_types_bitmap() const
460 { return m_waiting_incompatible; }
needs_notificationMDL_lock::MDL_object_lock461 virtual bool needs_notification(const MDL_ticket *ticket) const
462 {
463 return (MDL_BIT(ticket->get_type()) &
464 (MDL_BIT(MDL_SHARED_NO_WRITE) |
465 MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
466 MDL_BIT(MDL_EXCLUSIVE)));
467 }
468
469 /**
470 Notify threads holding a shared metadata locks on object which
471 conflict with a pending X, SNW or SNRW lock.
472
473 If thread which holds conflicting lock is waiting on table-level
474 lock or some other non-MDL resource we might need to wake it up
475 by calling code outside of MDL.
476 */
conflicting_locksMDL_lock::MDL_object_lock477 virtual bool conflicting_locks(const MDL_ticket *ticket) const
478 { return ticket->get_type() < MDL_SHARED_UPGRADABLE; }
479
480 /*
481 To prevent starvation, these lock types that are only granted
482 max_write_lock_count times in a row while other lock types are
483 waiting.
484 */
hog_lock_types_bitmapMDL_lock::MDL_object_lock485 virtual bitmap_t hog_lock_types_bitmap() const
486 {
487 return (MDL_BIT(MDL_SHARED_NO_WRITE) |
488 MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
489 MDL_BIT(MDL_EXCLUSIVE));
490 }
491
492 private:
493 static const bitmap_t m_granted_incompatible[MDL_TYPE_END];
494 static const bitmap_t m_waiting_incompatible[MDL_TYPE_END];
495 };
496
497
498 struct MDL_backup_lock: public MDL_lock_strategy
499 {
MDL_backup_lockMDL_lock::MDL_backup_lock500 MDL_backup_lock() {}
incompatible_granted_types_bitmapMDL_lock::MDL_backup_lock501 virtual const bitmap_t *incompatible_granted_types_bitmap() const
502 { return m_granted_incompatible; }
incompatible_waiting_types_bitmapMDL_lock::MDL_backup_lock503 virtual const bitmap_t *incompatible_waiting_types_bitmap() const
504 { return m_waiting_incompatible; }
needs_notificationMDL_lock::MDL_backup_lock505 virtual bool needs_notification(const MDL_ticket *ticket) const
506 {
507 return (MDL_BIT(ticket->get_type()) & MDL_BIT(MDL_BACKUP_FTWRL1));
508 }
509
510 /**
511 Insert delayed threads may hold DML or TRANS_DML lock.
512 We need to kill such threads in order to get lock for FTWRL statements.
513 We do this by calling code outside of MDL.
514 */
conflicting_locksMDL_lock::MDL_backup_lock515 virtual bool conflicting_locks(const MDL_ticket *ticket) const
516 {
517 return (MDL_BIT(ticket->get_type()) &
518 (MDL_BIT(MDL_BACKUP_DML) |
519 MDL_BIT(MDL_BACKUP_TRANS_DML)));
520 }
521
522 /*
523 In backup namespace DML/DDL may starve because of concurrent FTWRL or
524 BACKUP statements. This scenario is partically useless in real world,
525 so we just return 0 here.
526 */
hog_lock_types_bitmapMDL_lock::MDL_backup_lock527 virtual bitmap_t hog_lock_types_bitmap() const
528 { return 0; }
529 private:
530 static const bitmap_t m_granted_incompatible[MDL_BACKUP_END];
531 static const bitmap_t m_waiting_incompatible[MDL_BACKUP_END];
532 };
533
534 public:
535 /** The key of the object (data) being protected. */
536 MDL_key key;
537 /**
538 Read-write lock protecting this lock context.
539
540 @note The fact that we use read-write lock prefers readers here is
541 important as deadlock detector won't work correctly otherwise.
542
543 For example, imagine that we have following waiters graph:
544
545 ctxA -> obj1 -> ctxB -> obj1 -|
546 ^ |
547 |----------------------------|
548
549 and both ctxA and ctxB start deadlock detection process:
550
551 ctxA read-locks obj1 ctxB read-locks obj2
552 ctxA goes deeper ctxB goes deeper
553
554 Now ctxC comes in who wants to start waiting on obj1, also
555 ctxD comes in who wants to start waiting on obj2.
556
557 ctxC tries to write-lock obj1 ctxD tries to write-lock obj2
558 ctxC is blocked ctxD is blocked
559
560 Now ctxA and ctxB resume their search:
561
562 ctxA tries to read-lock obj2 ctxB tries to read-lock obj1
563
564 If m_rwlock prefers writes (or fair) both ctxA and ctxB would be
565 blocked because of pending write locks from ctxD and ctxC
566 correspondingly. Thus we will get a deadlock in deadlock detector.
567 If m_wrlock prefers readers (actually ignoring pending writers is
568 enough) ctxA and ctxB will continue and no deadlock will occur.
569 */
570 mysql_prlock_t m_rwlock;
571
is_empty() const572 bool is_empty() const
573 {
574 return (m_granted.is_empty() && m_waiting.is_empty());
575 }
576
incompatible_granted_types_bitmap() const577 const bitmap_t *incompatible_granted_types_bitmap() const
578 { return m_strategy->incompatible_granted_types_bitmap(); }
incompatible_waiting_types_bitmap() const579 const bitmap_t *incompatible_waiting_types_bitmap() const
580 { return m_strategy->incompatible_waiting_types_bitmap(); }
581
582 bool has_pending_conflicting_lock(enum_mdl_type type);
583
584 bool can_grant_lock(enum_mdl_type type, MDL_context *requstor_ctx,
585 bool ignore_lock_priority) const;
586
587 inline unsigned long get_lock_owner() const;
588
589 void reschedule_waiters();
590
591 void remove_ticket(LF_PINS *pins, Ticket_list MDL_lock::*queue,
592 MDL_ticket *ticket);
593
594 bool visit_subgraph(MDL_ticket *waiting_ticket,
595 MDL_wait_for_graph_visitor *gvisitor);
596
needs_notification(const MDL_ticket * ticket) const597 bool needs_notification(const MDL_ticket *ticket) const
598 { return m_strategy->needs_notification(ticket); }
notify_conflicting_locks(MDL_context * ctx)599 void notify_conflicting_locks(MDL_context *ctx)
600 {
601 for (const auto &conflicting_ticket : m_granted)
602 {
603 if (conflicting_ticket.get_ctx() != ctx &&
604 m_strategy->conflicting_locks(&conflicting_ticket))
605 {
606 MDL_context *conflicting_ctx= conflicting_ticket.get_ctx();
607
608 ctx->get_owner()->
609 notify_shared_lock(conflicting_ctx->get_owner(),
610 conflicting_ctx->get_needs_thr_lock_abort());
611 }
612 }
613 }
614
hog_lock_types_bitmap() const615 bitmap_t hog_lock_types_bitmap() const
616 { return m_strategy->hog_lock_types_bitmap(); }
617
618 #ifndef DBUG_OFF
619 bool check_if_conflicting_replication_locks(MDL_context *ctx);
620 #endif
621
622 /** List of granted tickets for this lock. */
623 Ticket_list m_granted;
624 /** Tickets for contexts waiting to acquire a lock. */
625 Ticket_list m_waiting;
626
627 /**
628 Number of times high priority lock requests have been granted while
629 low priority lock requests were waiting.
630 */
631 ulong m_hog_lock_count;
632
633 public:
634
MDL_lock()635 MDL_lock()
636 : m_hog_lock_count(0),
637 m_strategy(0)
638 { mysql_prlock_init(key_MDL_lock_rwlock, &m_rwlock); }
639
MDL_lock(const MDL_key * key_arg)640 MDL_lock(const MDL_key *key_arg)
641 : key(key_arg),
642 m_hog_lock_count(0),
643 m_strategy(&m_backup_lock_strategy)
644 {
645 DBUG_ASSERT(key_arg->mdl_namespace() == MDL_key::BACKUP);
646 mysql_prlock_init(key_MDL_lock_rwlock, &m_rwlock);
647 }
648
~MDL_lock()649 ~MDL_lock()
650 { mysql_prlock_destroy(&m_rwlock); }
651
lf_alloc_constructor(uchar * arg)652 static void lf_alloc_constructor(uchar *arg)
653 { new (arg + LF_HASH_OVERHEAD) MDL_lock(); }
654
lf_alloc_destructor(uchar * arg)655 static void lf_alloc_destructor(uchar *arg)
656 { ((MDL_lock*)(arg + LF_HASH_OVERHEAD))->~MDL_lock(); }
657
lf_hash_initializer(LF_HASH * hash,MDL_lock * lock,MDL_key * key_arg)658 static void lf_hash_initializer(LF_HASH *hash __attribute__((unused)),
659 MDL_lock *lock, MDL_key *key_arg)
660 {
661 DBUG_ASSERT(key_arg->mdl_namespace() != MDL_key::BACKUP);
662 new (&lock->key) MDL_key(key_arg);
663 if (key_arg->mdl_namespace() == MDL_key::SCHEMA)
664 lock->m_strategy= &m_scoped_lock_strategy;
665 else
666 lock->m_strategy= &m_object_lock_strategy;
667 }
668
669 const MDL_lock_strategy *m_strategy;
670 private:
671 static const MDL_backup_lock m_backup_lock_strategy;
672 static const MDL_scoped_lock m_scoped_lock_strategy;
673 static const MDL_object_lock m_object_lock_strategy;
674 };
675
676
677 const MDL_lock::MDL_backup_lock MDL_lock::m_backup_lock_strategy;
678 const MDL_lock::MDL_scoped_lock MDL_lock::m_scoped_lock_strategy;
679 const MDL_lock::MDL_object_lock MDL_lock::m_object_lock_strategy;
680
681
682 static MDL_map mdl_locks;
683
684
685 extern "C"
686 {
687 static uchar *
mdl_locks_key(const uchar * record,size_t * length,my_bool not_used)688 mdl_locks_key(const uchar *record, size_t *length,
689 my_bool not_used __attribute__((unused)))
690 {
691 MDL_lock *lock=(MDL_lock*) record;
692 *length= lock->key.length();
693 return (uchar*) lock->key.ptr();
694 }
695 } /* extern "C" */
696
697
698 /**
699 Initialize the metadata locking subsystem.
700
701 This function is called at server startup.
702
703 In particular, initializes the new global mutex and
704 the associated condition variable: LOCK_mdl and COND_mdl.
705 These locking primitives are implementation details of the MDL
706 subsystem and are private to it.
707 */
708
mdl_init()709 void mdl_init()
710 {
711 DBUG_ASSERT(! mdl_initialized);
712 mdl_initialized= TRUE;
713
714 #ifdef HAVE_PSI_INTERFACE
715 init_mdl_psi_keys();
716 #endif
717
718 mdl_locks.init();
719 }
720
721
722 /**
723 Release resources of metadata locking subsystem.
724
725 Destroys the global mutex and the condition variable.
726 Called at server shutdown.
727 */
728
mdl_destroy()729 void mdl_destroy()
730 {
731 if (mdl_initialized)
732 {
733 mdl_initialized= FALSE;
734 mdl_locks.destroy();
735 }
736 }
737
738
739 struct mdl_iterate_arg
740 {
741 mdl_iterator_callback callback;
742 void *argument;
743 };
744
745
mdl_iterate_lock(MDL_lock * lock,mdl_iterate_arg * arg)746 static my_bool mdl_iterate_lock(MDL_lock *lock, mdl_iterate_arg *arg)
747 {
748 /*
749 We can skip check for m_strategy here, becase m_granted
750 must be empty for such locks anyway.
751 */
752 mysql_prlock_rdlock(&lock->m_rwlock);
753 bool res= std::any_of(lock->m_granted.begin(), lock->m_granted.end(),
754 [arg](MDL_ticket &ticket) {
755 return arg->callback(&ticket, arg->argument, true);
756 });
757 res= std::any_of(lock->m_waiting.begin(), lock->m_waiting.end(),
758 [arg](MDL_ticket &ticket) {
759 return arg->callback(&ticket, arg->argument, false);
760 });
761 mysql_prlock_unlock(&lock->m_rwlock);
762 return res;
763 }
764
765
mdl_iterate(mdl_iterator_callback callback,void * arg)766 int mdl_iterate(mdl_iterator_callback callback, void *arg)
767 {
768 DBUG_ENTER("mdl_iterate");
769 mdl_iterate_arg argument= { callback, arg };
770 LF_PINS *pins= mdl_locks.get_pins();
771 int res= 1;
772
773 if (pins)
774 {
775 res= mdl_iterate_lock(mdl_locks.m_backup_lock, &argument) ||
776 lf_hash_iterate(&mdl_locks.m_locks, pins,
777 (my_hash_walk_action) mdl_iterate_lock, &argument);
778 lf_hash_put_pins(pins);
779 }
780 DBUG_RETURN(res);
781 }
782
783
mdl_hash_function(CHARSET_INFO * cs,const uchar * key,size_t length)784 my_hash_value_type mdl_hash_function(CHARSET_INFO *cs,
785 const uchar *key, size_t length)
786 {
787 MDL_key *mdl_key= (MDL_key*) (key - offsetof(MDL_key, m_ptr));
788 return mdl_key->hash_value();
789 }
790
791
792 /** Initialize the container for all MDL locks. */
793
init()794 void MDL_map::init()
795 {
796 MDL_key backup_lock_key(MDL_key::BACKUP, "", "");
797
798 m_backup_lock= new (std::nothrow) MDL_lock(&backup_lock_key);
799
800 lf_hash_init(&m_locks, sizeof(MDL_lock), LF_HASH_UNIQUE, 0, 0,
801 mdl_locks_key, &my_charset_bin);
802 m_locks.alloc.constructor= MDL_lock::lf_alloc_constructor;
803 m_locks.alloc.destructor= MDL_lock::lf_alloc_destructor;
804 m_locks.initializer= (lf_hash_initializer) MDL_lock::lf_hash_initializer;
805 m_locks.hash_function= mdl_hash_function;
806 }
807
808
809 /**
810 Destroy the container for all MDL locks.
811 @pre It must be empty.
812 */
813
destroy()814 void MDL_map::destroy()
815 {
816 delete m_backup_lock;
817
818 DBUG_ASSERT(!lf_hash_size(&m_locks));
819 lf_hash_destroy(&m_locks);
820 }
821
822
823 /**
824 Find MDL_lock object corresponding to the key, create it
825 if it does not exist.
826
827 @retval non-NULL - Success. MDL_lock instance for the key with
828 locked MDL_lock::m_rwlock.
829 @retval NULL - Failure (OOM).
830 */
831
find_or_insert(LF_PINS * pins,const MDL_key * mdl_key)832 MDL_lock* MDL_map::find_or_insert(LF_PINS *pins, const MDL_key *mdl_key)
833 {
834 MDL_lock *lock;
835
836 if (mdl_key->mdl_namespace() == MDL_key::BACKUP)
837 {
838 /*
839 Return pointer to pre-allocated MDL_lock instance. Such an optimization
840 allows to save one hash lookup for any statement changing data.
841
842 It works since this namespace contains only one element so keys
843 for them look like '<namespace-id>\0\0'.
844 */
845 DBUG_ASSERT(mdl_key->length() == 3);
846 mysql_prlock_wrlock(&m_backup_lock->m_rwlock);
847 return m_backup_lock;
848 }
849
850 retry:
851 while (!(lock= (MDL_lock*) lf_hash_search(&m_locks, pins, mdl_key->ptr(),
852 mdl_key->length())))
853 if (lf_hash_insert(&m_locks, pins, (uchar*) mdl_key) == -1)
854 return NULL;
855
856 mysql_prlock_wrlock(&lock->m_rwlock);
857 if (unlikely(!lock->m_strategy))
858 {
859 mysql_prlock_unlock(&lock->m_rwlock);
860 lf_hash_search_unpin(pins);
861 goto retry;
862 }
863 lf_hash_search_unpin(pins);
864
865 return lock;
866 }
867
868
869 /**
870 * Return thread id of the owner of the lock, if it is owned.
871 */
872
873 unsigned long
get_lock_owner(LF_PINS * pins,const MDL_key * mdl_key)874 MDL_map::get_lock_owner(LF_PINS *pins, const MDL_key *mdl_key)
875 {
876 unsigned long res= 0;
877
878 if (mdl_key->mdl_namespace() == MDL_key::BACKUP)
879 {
880 mysql_prlock_rdlock(&m_backup_lock->m_rwlock);
881 res= m_backup_lock->get_lock_owner();
882 mysql_prlock_unlock(&m_backup_lock->m_rwlock);
883 }
884 else
885 {
886 MDL_lock *lock= (MDL_lock*) lf_hash_search(&m_locks, pins, mdl_key->ptr(),
887 mdl_key->length());
888 if (lock)
889 {
890 /*
891 We can skip check for m_strategy here, becase m_granted
892 must be empty for such locks anyway.
893 */
894 mysql_prlock_rdlock(&lock->m_rwlock);
895 res= lock->get_lock_owner();
896 mysql_prlock_unlock(&lock->m_rwlock);
897 lf_hash_search_unpin(pins);
898 }
899 }
900 return res;
901 }
902
903
904 /**
905 Destroy MDL_lock object or delegate this responsibility to
906 whatever thread that holds the last outstanding reference to
907 it.
908 */
909
remove(LF_PINS * pins,MDL_lock * lock)910 void MDL_map::remove(LF_PINS *pins, MDL_lock *lock)
911 {
912 if (lock->key.mdl_namespace() == MDL_key::BACKUP)
913 {
914 /* Never destroy pre-allocated MDL_lock object in BACKUP namespace. */
915 mysql_prlock_unlock(&lock->m_rwlock);
916 return;
917 }
918
919 lock->m_strategy= 0;
920 mysql_prlock_unlock(&lock->m_rwlock);
921 lf_hash_delete(&m_locks, pins, lock->key.ptr(), lock->key.length());
922 }
923
924
925 /**
926 Initialize a metadata locking context.
927
928 This is to be called when a new server connection is created.
929 */
930
MDL_context()931 MDL_context::MDL_context()
932 :
933 m_owner(NULL),
934 m_needs_thr_lock_abort(FALSE),
935 m_waiting_for(NULL),
936 m_pins(NULL)
937 {
938 mysql_prlock_init(key_MDL_context_LOCK_waiting_for, &m_LOCK_waiting_for);
939 }
940
941
942 /**
943 Destroy metadata locking context.
944
945 Assumes and asserts that there are no active or pending locks
946 associated with this context at the time of the destruction.
947
948 Currently does nothing. Asserts that there are no pending
949 or satisfied lock requests. The pending locks must be released
950 prior to destruction. This is a new way to express the assertion
951 that all tables are closed before a connection is destroyed.
952 */
953
destroy()954 void MDL_context::destroy()
955 {
956 DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty());
957 DBUG_ASSERT(m_tickets[MDL_TRANSACTION].is_empty());
958 DBUG_ASSERT(m_tickets[MDL_EXPLICIT].is_empty());
959
960 mysql_prlock_destroy(&m_LOCK_waiting_for);
961 if (m_pins)
962 lf_hash_put_pins(m_pins);
963 }
964
965
fix_pins()966 bool MDL_context::fix_pins()
967 {
968 return m_pins ? false : (m_pins= mdl_locks.get_pins()) == 0;
969 }
970
971
972 /**
973 Initialize a lock request.
974
975 This is to be used for every lock request.
976
977 Note that initialization and allocation are split into two
978 calls. This is to allow flexible memory management of lock
979 requests. Normally a lock request is stored in statement memory
980 (e.g. is a member of struct TABLE_LIST), but we would also like
981 to allow allocation of lock requests in other memory roots,
982 for example in the grant subsystem, to lock privilege tables.
983
984 The MDL subsystem does not own or manage memory of lock requests.
985
986 @param mdl_namespace Id of namespace of object to be locked
987 @param db Name of database to which the object belongs
988 @param name Name of of the object
989 @param mdl_type The MDL lock type for the request.
990 */
991
init_with_source(MDL_key::enum_mdl_namespace mdl_namespace,const char * db_arg,const char * name_arg,enum_mdl_type mdl_type_arg,enum_mdl_duration mdl_duration_arg,const char * src_file,uint src_line)992 void MDL_request::init_with_source(MDL_key::enum_mdl_namespace mdl_namespace,
993 const char *db_arg,
994 const char *name_arg,
995 enum_mdl_type mdl_type_arg,
996 enum_mdl_duration mdl_duration_arg,
997 const char *src_file,
998 uint src_line)
999 {
1000 key.mdl_key_init(mdl_namespace, db_arg, name_arg);
1001 type= mdl_type_arg;
1002 duration= mdl_duration_arg;
1003 ticket= NULL;
1004 m_src_file= src_file;
1005 m_src_line= src_line;
1006 }
1007
1008
1009 /**
1010 Initialize a lock request using pre-built MDL_key.
1011
1012 @sa MDL_request::init(namespace, db, name, type).
1013
1014 @param key_arg The pre-built MDL key for the request.
1015 @param mdl_type_arg The MDL lock type for the request.
1016 */
1017
init_by_key_with_source(const MDL_key * key_arg,enum_mdl_type mdl_type_arg,enum_mdl_duration mdl_duration_arg,const char * src_file,uint src_line)1018 void MDL_request::init_by_key_with_source(const MDL_key *key_arg,
1019 enum_mdl_type mdl_type_arg,
1020 enum_mdl_duration mdl_duration_arg,
1021 const char *src_file,
1022 uint src_line)
1023 {
1024 key.mdl_key_init(key_arg);
1025 type= mdl_type_arg;
1026 duration= mdl_duration_arg;
1027 ticket= NULL;
1028 m_src_file= src_file;
1029 m_src_line= src_line;
1030 }
1031
1032
1033 /**
1034 Auxiliary functions needed for creation/destruction of MDL_ticket
1035 objects.
1036
1037 @todo This naive implementation should be replaced with one that saves
1038 on memory allocation by reusing released objects.
1039 */
1040
create(MDL_context * ctx_arg,enum_mdl_type type_arg,enum_mdl_duration duration_arg)1041 MDL_ticket *MDL_ticket::create(MDL_context *ctx_arg, enum_mdl_type type_arg
1042 #ifndef DBUG_OFF
1043 , enum_mdl_duration duration_arg
1044 #endif
1045 )
1046 {
1047 return new (std::nothrow)
1048 MDL_ticket(ctx_arg, type_arg
1049 #ifndef DBUG_OFF
1050 , duration_arg
1051 #endif
1052 );
1053 }
1054
1055
destroy(MDL_ticket * ticket)1056 void MDL_ticket::destroy(MDL_ticket *ticket)
1057 {
1058 mysql_mdl_destroy(ticket->m_psi);
1059 ticket->m_psi= NULL;
1060
1061 delete ticket;
1062 }
1063
1064
1065 /**
1066 Return the 'weight' of this ticket for the
1067 victim selection algorithm. Requests with
1068 lower weight are preferred to requests
1069 with higher weight when choosing a victim.
1070 */
1071
get_deadlock_weight() const1072 uint MDL_ticket::get_deadlock_weight() const
1073 {
1074 if (m_lock->key.mdl_namespace() == MDL_key::BACKUP)
1075 {
1076 if (m_type == MDL_BACKUP_FTWRL1)
1077 return DEADLOCK_WEIGHT_FTWRL1;
1078 return DEADLOCK_WEIGHT_DDL;
1079 }
1080 return m_type >= MDL_SHARED_UPGRADABLE ?
1081 DEADLOCK_WEIGHT_DDL : DEADLOCK_WEIGHT_DML;
1082 }
1083
1084
1085 /** Construct an empty wait slot. */
1086
MDL_wait()1087 MDL_wait::MDL_wait()
1088 :m_wait_status(EMPTY)
1089 {
1090 mysql_mutex_init(key_MDL_wait_LOCK_wait_status, &m_LOCK_wait_status, NULL);
1091 mysql_cond_init(key_MDL_wait_COND_wait_status, &m_COND_wait_status, NULL);
1092 }
1093
1094
1095 /** Destroy system resources. */
1096
~MDL_wait()1097 MDL_wait::~MDL_wait()
1098 {
1099 mysql_mutex_destroy(&m_LOCK_wait_status);
1100 mysql_cond_destroy(&m_COND_wait_status);
1101 }
1102
1103
1104 /**
1105 Set the status unless it's already set. Return FALSE if set,
1106 TRUE otherwise.
1107 */
1108
set_status(enum_wait_status status_arg)1109 bool MDL_wait::set_status(enum_wait_status status_arg)
1110 {
1111 bool was_occupied= TRUE;
1112 mysql_mutex_lock(&m_LOCK_wait_status);
1113 if (m_wait_status == EMPTY)
1114 {
1115 was_occupied= FALSE;
1116 m_wait_status= status_arg;
1117 mysql_cond_signal(&m_COND_wait_status);
1118 }
1119 mysql_mutex_unlock(&m_LOCK_wait_status);
1120 return was_occupied;
1121 }
1122
1123
1124 /** Query the current value of the wait slot. */
1125
get_status()1126 MDL_wait::enum_wait_status MDL_wait::get_status()
1127 {
1128 enum_wait_status result;
1129 mysql_mutex_lock(&m_LOCK_wait_status);
1130 result= m_wait_status;
1131 mysql_mutex_unlock(&m_LOCK_wait_status);
1132 return result;
1133 }
1134
1135
1136 /** Clear the current value of the wait slot. */
1137
reset_status()1138 void MDL_wait::reset_status()
1139 {
1140 mysql_mutex_lock(&m_LOCK_wait_status);
1141 m_wait_status= EMPTY;
1142 mysql_mutex_unlock(&m_LOCK_wait_status);
1143 }
1144
1145
1146 /**
1147 Wait for the status to be assigned to this wait slot.
1148
1149 @param owner MDL context owner.
1150 @param abs_timeout Absolute time after which waiting should stop.
1151 @param set_status_on_timeout TRUE - If in case of timeout waiting
1152 context should close the wait slot by
1153 sending TIMEOUT to itself.
1154 FALSE - Otherwise.
1155 @param wait_state_name Thread state name to be set for duration of wait.
1156
1157 @returns Signal posted.
1158 */
1159
1160 MDL_wait::enum_wait_status
timed_wait(MDL_context_owner * owner,struct timespec * abs_timeout,bool set_status_on_timeout,const PSI_stage_info * wait_state_name)1161 MDL_wait::timed_wait(MDL_context_owner *owner, struct timespec *abs_timeout,
1162 bool set_status_on_timeout,
1163 const PSI_stage_info *wait_state_name)
1164 {
1165 PSI_stage_info old_stage;
1166 enum_wait_status result;
1167 int wait_result= 0;
1168 DBUG_ENTER("MDL_wait::timed_wait");
1169
1170 mysql_mutex_lock(&m_LOCK_wait_status);
1171
1172 owner->ENTER_COND(&m_COND_wait_status, &m_LOCK_wait_status,
1173 wait_state_name, & old_stage);
1174 thd_wait_begin(NULL, THD_WAIT_META_DATA_LOCK);
1175 tpool::tpool_wait_begin();
1176 while (!m_wait_status && !owner->is_killed() &&
1177 wait_result != ETIMEDOUT && wait_result != ETIME)
1178 {
1179 #ifdef WITH_WSREP
1180 // Allow tests to block the applier thread using the DBUG facilities
1181 DBUG_EXECUTE_IF("sync.wsrep_before_mdl_wait",
1182 {
1183 const char act[]=
1184 "now "
1185 "wait_for signal.wsrep_before_mdl_wait";
1186 DBUG_ASSERT(!debug_sync_set_action((owner->get_thd()),
1187 STRING_WITH_LEN(act)));
1188 };);
1189 if (WSREP_ON && wsrep_thd_is_BF(owner->get_thd(), false))
1190 {
1191 wait_result= mysql_cond_wait(&m_COND_wait_status, &m_LOCK_wait_status);
1192 }
1193 else
1194 #endif /* WITH_WSREP */
1195 wait_result= mysql_cond_timedwait(&m_COND_wait_status, &m_LOCK_wait_status,
1196 abs_timeout);
1197 }
1198 tpool::tpool_wait_end();
1199 thd_wait_end(NULL);
1200
1201 if (m_wait_status == EMPTY)
1202 {
1203 /*
1204 Wait has ended not due to a status being set from another
1205 thread but due to this connection/statement being killed or a
1206 time out.
1207 To avoid races, which may occur if another thread sets
1208 GRANTED status before the code which calls this method
1209 processes the abort/timeout, we assign the status under
1210 protection of the m_LOCK_wait_status, within the critical
1211 section. An exception is when set_status_on_timeout is
1212 false, which means that the caller intends to restart the
1213 wait.
1214 */
1215 if (owner->is_killed())
1216 m_wait_status= KILLED;
1217 else if (set_status_on_timeout)
1218 m_wait_status= TIMEOUT;
1219 }
1220 result= m_wait_status;
1221
1222 owner->EXIT_COND(& old_stage);
1223
1224 DBUG_RETURN(result);
1225 }
1226
1227
1228 /**
1229 Add ticket to MDL_lock's list of waiting requests and
1230 update corresponding bitmap of lock types.
1231 */
1232
add_ticket(MDL_ticket * ticket)1233 void MDL_lock::Ticket_list::add_ticket(MDL_ticket *ticket)
1234 {
1235 /*
1236 Ticket being added to the list must have MDL_ticket::m_lock set,
1237 since for such tickets methods accessing this member might be
1238 called by other threads.
1239 */
1240 DBUG_ASSERT(ticket->get_lock());
1241 #ifdef WITH_WSREP
1242 if (WSREP_ON && (this == &(ticket->get_lock()->m_waiting)) &&
1243 wsrep_thd_is_BF(ticket->get_ctx()->get_thd(), false))
1244 {
1245 DBUG_ASSERT(WSREP(ticket->get_ctx()->get_thd()));
1246
1247 m_list.insert(std::find_if(ticket->get_lock()->m_waiting.begin(),
1248 ticket->get_lock()->m_waiting.end(),
1249 [](const MDL_ticket &waiting) {
1250 return !wsrep_thd_is_BF(
1251 waiting.get_ctx()->get_thd(), true);
1252 }),
1253 *ticket);
1254 }
1255 else
1256 #endif /* WITH_WSREP */
1257 {
1258 /*
1259 Add ticket to the *back* of the queue to ensure fairness
1260 among requests with the same priority.
1261 */
1262 m_list.push_back(*ticket);
1263 }
1264 m_bitmap|= MDL_BIT(ticket->get_type());
1265 m_type_counters[ticket->get_type()]++;
1266 }
1267
1268
1269 /**
1270 Remove ticket from MDL_lock's list of requests and
1271 update corresponding bitmap of lock types.
1272 */
1273
remove_ticket(MDL_ticket * ticket)1274 void MDL_lock::Ticket_list::remove_ticket(MDL_ticket *ticket)
1275 {
1276 m_list.remove(*ticket);
1277 /*
1278 Check if waiting queue has another ticket with the same type as
1279 one which was removed. If there is no such ticket, i.e. we have
1280 removed last ticket of particular type, then we need to update
1281 bitmap of waiting ticket's types.
1282 */
1283 if (--m_type_counters[ticket->get_type()] == 0)
1284 m_bitmap&= ~MDL_BIT(ticket->get_type());
1285 }
1286
1287
1288 /**
1289 Determine waiting contexts which requests for the lock can be
1290 satisfied, grant lock to them and wake them up.
1291
1292 @note Together with MDL_lock::add_ticket() this method implements
1293 fair scheduling among requests with the same priority.
1294 It tries to grant lock from the head of waiters list, while
1295 add_ticket() adds new requests to the back of this list.
1296
1297 */
1298
reschedule_waiters()1299 void MDL_lock::reschedule_waiters()
1300 {
1301 bool skip_high_priority= false;
1302 bitmap_t hog_lock_types= hog_lock_types_bitmap();
1303
1304 if (m_hog_lock_count >= max_write_lock_count)
1305 {
1306 /*
1307 If number of successively granted high-prio, strong locks has exceeded
1308 max_write_lock_count give a way to low-prio, weak locks to avoid their
1309 starvation.
1310 */
1311
1312 if ((m_waiting.bitmap() & ~hog_lock_types) != 0)
1313 {
1314 /*
1315 Even though normally when m_hog_lock_count is non-0 there is
1316 some pending low-prio lock, we still can encounter situation
1317 when m_hog_lock_count is non-0 and there are no pending low-prio
1318 locks. This, for example, can happen when a ticket for pending
1319 low-prio lock was removed from waiters list due to timeout,
1320 and reschedule_waiters() is called after that to update the
1321 waiters queue. m_hog_lock_count will be reset to 0 at the
1322 end of this call in such case.
1323
1324 Note that it is not an issue if we fail to wake up any pending
1325 waiters for weak locks in the loop below. This would mean that
1326 all of them are either killed, timed out or chosen as a victim
1327 by deadlock resolver, but have not managed to remove ticket
1328 from the waiters list yet. After tickets will be removed from
1329 the waiters queue there will be another call to
1330 reschedule_waiters() with pending bitmap updated to reflect new
1331 state of waiters queue.
1332 */
1333 skip_high_priority= true;
1334 }
1335 }
1336
1337 /*
1338 Find the first (and hence the oldest) waiting request which
1339 can be satisfied (taking into account priority). Grant lock to it.
1340 Repeat the process for the remainder of waiters.
1341 Note we don't need to re-start iteration from the head of the
1342 list after satisfying the first suitable request as in our case
1343 all compatible types of requests have the same priority.
1344
1345 TODO/FIXME: We should:
1346 - Either switch to scheduling without priorities
1347 which will allow to stop iteration through the
1348 list of waiters once we found the first ticket
1349 which can't be satisfied
1350 - Or implement some check using bitmaps which will
1351 allow to stop iteration in cases when, e.g., we
1352 grant SNRW lock and there are no pending S or
1353 SH locks.
1354 */
1355 for (auto it= m_waiting.begin(); it != m_waiting.end(); ++it)
1356 {
1357 /*
1358 Skip high-prio, strong locks if earlier we have decided to give way to
1359 low-prio, weaker locks.
1360 */
1361 if (skip_high_priority &&
1362 ((MDL_BIT(it->get_type()) & hog_lock_types) != 0))
1363 continue;
1364
1365 if (can_grant_lock(it->get_type(), it->get_ctx(),
1366 skip_high_priority))
1367 {
1368 if (!it->get_ctx()->m_wait.set_status(MDL_wait::GRANTED))
1369 {
1370 /*
1371 Satisfy the found request by updating lock structures.
1372 It is OK to do so even after waking up the waiter since any
1373 session which tries to get any information about the state of
1374 this lock has to acquire MDL_lock::m_rwlock first and thus,
1375 when manages to do so, already sees an updated state of the
1376 MDL_lock object.
1377 */
1378 auto prev_it= std::prev(it); // this might be begin()-- but the hack
1379 // works because list is circular
1380 m_waiting.remove_ticket(&*it);
1381 m_granted.add_ticket(&*it);
1382
1383 /*
1384 Increase counter of successively granted high-priority strong locks,
1385 if we have granted one.
1386 */
1387 if ((MDL_BIT(it->get_type()) & hog_lock_types) != 0)
1388 m_hog_lock_count++;
1389
1390 it= prev_it;
1391 }
1392 /*
1393 If we could not update the wait slot of the waiter,
1394 it can be due to fact that its connection/statement was
1395 killed or it has timed out (i.e. the slot is not empty).
1396 Since in all such cases the waiter assumes that the lock was
1397 not been granted, we should keep the request in the waiting
1398 queue and look for another request to reschedule.
1399 */
1400 }
1401 }
1402
1403 if ((m_waiting.bitmap() & ~hog_lock_types) == 0)
1404 {
1405 /*
1406 Reset number of successively granted high-prio, strong locks
1407 if there are no pending low-prio, weak locks.
1408 This ensures:
1409 - That m_hog_lock_count is correctly reset after strong lock
1410 is released and weak locks are granted (or there are no
1411 other lock requests).
1412 - That situation when SNW lock is granted along with some SR
1413 locks, but SW locks are still blocked are handled correctly.
1414 - That m_hog_lock_count is zero in most cases when there are no pending
1415 weak locks (see comment at the start of this method for example of
1416 exception). This allows to save on checks at the start of this method.
1417 */
1418 m_hog_lock_count= 0;
1419 }
1420 }
1421
1422
1423 /**
1424 Compatibility (or rather "incompatibility") matrices for scoped metadata
1425 lock.
1426 Scoped locks are database (or schema) locks.
1427 Arrays of bitmaps which elements specify which granted/waiting locks
1428 are incompatible with type of lock being requested.
1429
1430 The first array specifies if particular type of request can be satisfied
1431 if there is granted scoped lock of certain type.
1432
1433 (*) Since intention shared scoped locks (IS) are compatible with all other
1434 type of locks, they don't need to be implemented and there is no code
1435 for them.
1436
1437 | Type of active |
1438 Request | scoped lock |
1439 type | IS(*) IX S X |
1440 ---------+------------------+
1441 IS(*) | + + + + |
1442 IX | + + - - |
1443 S | + - + - |
1444 X | + - - - |
1445
1446 The second array specifies if particular type of request can be satisfied
1447 if there is already waiting request for the scoped lock of certain type.
1448 I.e. it specifies what is the priority of different lock types.
1449
1450 | Pending |
1451 Request | scoped lock |
1452 type | IS(*) IX S X |
1453 ---------+-----------------+
1454 IS(*) | + + + + |
1455 IX | + + - - |
1456 S | + + + - |
1457 X | + + + + |
1458
1459 Here: "+" -- means that request can be satisfied
1460 "-" -- means that request can't be satisfied and should wait
1461
1462 Note that relation between scoped locks and objects locks requested
1463 by statement is not straightforward and is therefore fully defined
1464 by SQL-layer.
1465 For example, in order to support global read lock implementation
1466 SQL-layer acquires IX lock in GLOBAL namespace for each statement
1467 that can modify metadata or data (i.e. for each statement that
1468 needs SW, SU, SNW, SNRW or X object locks). OTOH, to ensure that
1469 DROP DATABASE works correctly with concurrent DDL, IX metadata locks
1470 in SCHEMA namespace are acquired for DDL statements which can update
1471 metadata in the schema (i.e. which acquire SU, SNW, SNRW and X locks
1472 on schema objects) and aren't acquired for DML.
1473 */
1474
1475 const MDL_lock::bitmap_t
1476 MDL_lock::MDL_scoped_lock::m_granted_incompatible[MDL_TYPE_END]=
1477 {
1478 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED),
1479 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_INTENTION_EXCLUSIVE),
1480 0, 0, 0, 0, 0, 0, 0,
1481 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED) | MDL_BIT(MDL_INTENTION_EXCLUSIVE)
1482 };
1483
1484 const MDL_lock::bitmap_t
1485 MDL_lock::MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END]=
1486 {
1487 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED),
1488 MDL_BIT(MDL_EXCLUSIVE), 0, 0, 0, 0, 0, 0, 0, 0
1489 };
1490
1491
1492 /**
1493 Compatibility (or rather "incompatibility") matrices for per-object
1494 metadata lock. Arrays of bitmaps which elements specify which granted/
1495 waiting locks are incompatible with type of lock being requested.
1496
1497 The first array specifies if particular type of request can be satisfied
1498 if there is granted lock of certain type.
1499
1500 Request | Granted requests for lock |
1501 type | S SH SR SW SU SRO SNW SNRW X |
1502 ----------+------------------------------------+
1503 S | + + + + + + + + - |
1504 SH | + + + + + + + + - |
1505 SR | + + + + + + + - - |
1506 SW | + + + + + - - - - |
1507 SU | + + + + - + - - - |
1508 SRO | + + + - + + + - - |
1509 SNW | + + + - - + - - - |
1510 SNRW | + + - - - - - - - |
1511 X | - - - - - - - - - |
1512 SU -> X | - - - - 0 - 0 0 0 |
1513 SNW -> X | - - - 0 0 - 0 0 0 |
1514 SNRW -> X | - - 0 0 0 0 0 0 0 |
1515
1516 The second array specifies if particular type of request can be satisfied
1517 if there is waiting request for the same lock of certain type. In other
1518 words it specifies what is the priority of different lock types.
1519
1520 Request | Pending requests for lock |
1521 type | S SH SR SW SU SRO SNW SNRW X |
1522 ----------+-----------------------------------+
1523 S | + + + + + + + + - |
1524 SH | + + + + + + + + + |
1525 SR | + + + + + + + - - |
1526 SW | + + + + + + - - - |
1527 SU | + + + + + + + + - |
1528 SRO | + + + - + + + - - |
1529 SNW | + + + + + + + + - |
1530 SNRW | + + + + + + + + - |
1531 X | + + + + + + + + + |
1532 SU -> X | + + + + + + + + + |
1533 SNW -> X | + + + + + + + + + |
1534 SNRW -> X | + + + + + + + + + |
1535
1536 Here: "+" -- means that request can be satisfied
1537 "-" -- means that request can't be satisfied and should wait
1538 "0" -- means impossible situation which will trigger assert
1539
1540 @note In cases then current context already has "stronger" type
1541 of lock on the object it will be automatically granted
1542 thanks to usage of the MDL_context::find_ticket() method.
1543
1544 @note IX locks are excluded since they are not used for per-object
1545 metadata locks.
1546 */
1547
1548 const MDL_lock::bitmap_t
1549 MDL_lock::MDL_object_lock::m_granted_incompatible[MDL_TYPE_END]=
1550 {
1551 0,
1552 MDL_BIT(MDL_EXCLUSIVE),
1553 MDL_BIT(MDL_EXCLUSIVE),
1554 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE),
1555 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1556 MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_READ_ONLY),
1557 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1558 MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE),
1559 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1560 MDL_BIT(MDL_SHARED_WRITE),
1561 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1562 MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) |
1563 MDL_BIT(MDL_SHARED_WRITE),
1564 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1565 MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_READ_ONLY) |
1566 MDL_BIT(MDL_SHARED_UPGRADABLE) | MDL_BIT(MDL_SHARED_WRITE) |
1567 MDL_BIT(MDL_SHARED_READ),
1568 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1569 MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_READ_ONLY) |
1570 MDL_BIT(MDL_SHARED_UPGRADABLE) | MDL_BIT(MDL_SHARED_WRITE) |
1571 MDL_BIT(MDL_SHARED_READ) | MDL_BIT(MDL_SHARED_HIGH_PRIO) |
1572 MDL_BIT(MDL_SHARED)
1573 };
1574
1575
1576 const MDL_lock::bitmap_t
1577 MDL_lock::MDL_object_lock::m_waiting_incompatible[MDL_TYPE_END]=
1578 {
1579 0,
1580 MDL_BIT(MDL_EXCLUSIVE),
1581 0,
1582 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE),
1583 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1584 MDL_BIT(MDL_SHARED_NO_WRITE),
1585 MDL_BIT(MDL_EXCLUSIVE),
1586 MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
1587 MDL_BIT(MDL_SHARED_WRITE),
1588 MDL_BIT(MDL_EXCLUSIVE),
1589 MDL_BIT(MDL_EXCLUSIVE),
1590 0
1591 };
1592
1593
1594 /**
1595 Compatibility (or rather "incompatibility") matrices for backup metadata
1596 lock. Arrays of bitmaps which elements specify which granted/waiting locks
1597 are incompatible with type of lock being requested.
1598
1599 The first array specifies if particular type of request can be satisfied
1600 if there is granted backup lock of certain type.
1601
1602 Request | Type of active backup lock |
1603 type | S0 S1 S2 S3 S4 F1 F2 D TD SD DD BL AC C |
1604 ----------+---------------------------------------------------------+
1605 S0 | - - - - - + + + + + + + + + |
1606 S1 | - + + + + + + + + + + + + + |
1607 S2 | - + + + + + + - + + + + + + |
1608 S3 | - + + + + + + - + + - + + + |
1609 S4 | - + + + + + + - + - - + + - |
1610 FTWRL1 | + + + + + + + - - - - + - + |
1611 FTWRL2 | + + + + + + + - - - - + - - |
1612 D | + - - - - - - + + + + + + + |
1613 TD | + + + + + - - + + + + + + + |
1614 SD | + + + + - - - + + + + + + + |
1615 DDL | + + + - - - - + + + + - + + |
1616 BLOCK_DDL | + + + + + + + + + + - + + + |
1617 ALTER_COP | + + + + + - - + + + + + + + |
1618 COMMIT | + + + + - + - + + + + + + + |
1619
1620 The second array specifies if particular type of request can be satisfied
1621 if there is already waiting request for the backup lock of certain type.
1622 I.e. it specifies what is the priority of different lock types.
1623
1624 Request | Pending backup lock |
1625 type | S0 S1 S2 S3 S4 F1 F2 D TD SD DD BL AC C |
1626 ----------+---------------------------------------------------------+
1627 S0 | + - - - - + + + + + + + + + |
1628 S1 | + + + + + + + + + + + + + + |
1629 S2 | + + + + + + + + + + + + + + |
1630 S3 | + + + + + + + + + + + + + + |
1631 S4 | + + + + + + + + + + + + + + |
1632 FTWRL1 | + + + + + + + + + + + + + + |
1633 FTWRL2 | + + + + + + + + + + + + + + |
1634 D | + - - - - - - + + + + + + + |
1635 TD | + + + + + - - + + + + + + + |
1636 SD | + + + + - - - + + + + + + + |
1637 DDL | + + + - - - - + + + + - + + |
1638 BLOCK_DDL | + + + + + + + + + + + + + + |
1639 ALTER_COP | + + + + + - - + + + + + + + |
1640 COMMIT | + + + + - + - + + + + + + + |
1641
1642 Here: "+" -- means that request can be satisfied
1643 "-" -- means that request can't be satisfied and should wait
1644 */
1645
1646 /*
1647 NOTE: If you add a new MDL_BACKUP_XXX level lock, you have to also add it
1648 to MDL_BACKUP_START in the two arrays below!
1649 */
1650
1651 const MDL_lock::bitmap_t
1652 MDL_lock::MDL_backup_lock::m_granted_incompatible[MDL_BACKUP_END]=
1653 {
1654 /* MDL_BACKUP_START */
1655 MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT),
1656 MDL_BIT(MDL_BACKUP_START),
1657 MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_DML),
1658 MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_DML) | MDL_BIT(MDL_BACKUP_DDL),
1659 MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_DML) | MDL_BIT(MDL_BACKUP_SYS_DML) | MDL_BIT(MDL_BACKUP_DDL) | MDL_BIT(MDL_BACKUP_COMMIT),
1660
1661 /* MDL_BACKUP_FTWRL1 */
1662 MDL_BIT(MDL_BACKUP_DML) | MDL_BIT(MDL_BACKUP_TRANS_DML) | MDL_BIT(MDL_BACKUP_SYS_DML) | MDL_BIT(MDL_BACKUP_DDL) | MDL_BIT(MDL_BACKUP_ALTER_COPY),
1663 MDL_BIT(MDL_BACKUP_DML) | MDL_BIT(MDL_BACKUP_TRANS_DML) | MDL_BIT(MDL_BACKUP_SYS_DML) | MDL_BIT(MDL_BACKUP_DDL) | MDL_BIT(MDL_BACKUP_ALTER_COPY) | MDL_BIT(MDL_BACKUP_COMMIT),
1664 /* MDL_BACKUP_DML */
1665 MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
1666 MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
1667 MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
1668 /* MDL_BACKUP_DDL */
1669 MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2) | MDL_BIT(MDL_BACKUP_BLOCK_DDL),
1670 /* MDL_BACKUP_BLOCK_DDL */
1671 MDL_BIT(MDL_BACKUP_DDL),
1672 MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
1673 /* MDL_BACKUP_COMMIT */
1674 MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL2)
1675 };
1676
1677
1678 const MDL_lock::bitmap_t
1679 MDL_lock::MDL_backup_lock::m_waiting_incompatible[MDL_BACKUP_END]=
1680 {
1681 /* MDL_BACKUP_START */
1682 MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT),
1683 0,
1684 0,
1685 0,
1686 0,
1687 /* MDL_BACKUP_FTWRL1 */
1688 0,
1689 0,
1690
1691 /* MDL_BACKUP_DML */
1692 MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
1693 MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
1694 MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
1695 /* MDL_BACKUP_DDL */
1696 MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2) | MDL_BIT(MDL_BACKUP_BLOCK_DDL),
1697 /* MDL_BACKUP_BLOCK_DDL */
1698 0,
1699 MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
1700 /* MDL_BACKUP_COMMIT */
1701 MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL2)
1702 };
1703
1704
1705 /**
1706 Check if request for the metadata lock can be satisfied given its
1707 current state.
1708
1709 New lock request can be satisfied iff:
1710 - There are no incompatible types of satisfied requests
1711 in other contexts
1712 - There are no waiting requests which have higher priority
1713 than this request when priority was not ignored.
1714
1715 @param type_arg The requested lock type.
1716 @param requestor_ctx The MDL context of the requestor.
1717 @param ignore_lock_priority Ignore lock priority.
1718
1719 @retval TRUE Lock request can be satisfied
1720 @retval FALSE There is some conflicting lock.
1721
1722 @note In cases then current context already has "stronger" type
1723 of lock on the object it will be automatically granted
1724 thanks to usage of the MDL_context::find_ticket() method.
1725 */
1726
1727 bool
can_grant_lock(enum_mdl_type type_arg,MDL_context * requestor_ctx,bool ignore_lock_priority) const1728 MDL_lock::can_grant_lock(enum_mdl_type type_arg,
1729 MDL_context *requestor_ctx,
1730 bool ignore_lock_priority) const
1731 {
1732 bitmap_t waiting_incompat_map= incompatible_waiting_types_bitmap()[type_arg];
1733 bitmap_t granted_incompat_map= incompatible_granted_types_bitmap()[type_arg];
1734
1735 #ifdef WITH_WSREP
1736 /*
1737 Approve lock request in BACKUP namespace for BF threads.
1738 We should get rid of this code and forbid FTWRL/BACKUP statements
1739 when wsrep is active.
1740 */
1741 if ((wsrep_thd_is_toi(requestor_ctx->get_thd()) ||
1742 wsrep_thd_is_applying(requestor_ctx->get_thd())) &&
1743 key.mdl_namespace() == MDL_key::BACKUP)
1744 {
1745 bool waiting_incompatible= m_waiting.bitmap() & waiting_incompat_map;
1746 bool granted_incompatible= m_granted.bitmap() & granted_incompat_map;
1747 if (waiting_incompatible || granted_incompatible)
1748 {
1749 WSREP_DEBUG("global lock granted for BF%s: %lu %s",
1750 waiting_incompatible ? " (waiting queue)" : "",
1751 thd_get_thread_id(requestor_ctx->get_thd()),
1752 wsrep_thd_query(requestor_ctx->get_thd()));
1753 }
1754 return true;
1755 }
1756 #endif /* WITH_WSREP */
1757
1758 if (!ignore_lock_priority && (m_waiting.bitmap() & waiting_incompat_map))
1759 return false;
1760
1761 if (m_granted.bitmap() & granted_incompat_map)
1762 {
1763 bool can_grant= true;
1764
1765 /* Check that the incompatible lock belongs to some other context. */
1766 for (const auto &ticket : m_granted)
1767 {
1768 if (ticket.get_ctx() != requestor_ctx &&
1769 ticket.is_incompatible_when_granted(type_arg))
1770 {
1771 can_grant= false;
1772 #ifdef WITH_WSREP
1773 /*
1774 non WSREP threads must report conflict immediately
1775 note: RSU processing wsrep threads, have wsrep_on==OFF
1776 */
1777 if (WSREP(requestor_ctx->get_thd()) ||
1778 requestor_ctx->get_thd()->wsrep_cs().mode() ==
1779 wsrep::client_state::m_rsu)
1780 {
1781 wsrep_handle_mdl_conflict(requestor_ctx, &ticket, &key);
1782 if (wsrep_log_conflicts)
1783 {
1784 auto key= ticket.get_key();
1785 WSREP_INFO("MDL conflict db=%s table=%s ticket=%d solved by abort",
1786 key->db_name(), key->name(), ticket.get_type());
1787 }
1788 continue;
1789 }
1790 #endif /* WITH_WSREP */
1791 break;
1792 }
1793 }
1794 return can_grant;
1795 }
1796 return true;
1797 }
1798
1799
1800 /**
1801 Return thread id of the thread to which the first ticket was
1802 granted.
1803 */
1804
1805 inline unsigned long
get_lock_owner() const1806 MDL_lock::get_lock_owner() const
1807 {
1808 if (m_granted.is_empty())
1809 return 0;
1810
1811 return m_granted.begin()->get_ctx()->get_thread_id();
1812 }
1813
1814
1815 /** Remove a ticket from waiting or pending queue and wakeup up waiters. */
1816
remove_ticket(LF_PINS * pins,Ticket_list MDL_lock::* list,MDL_ticket * ticket)1817 void MDL_lock::remove_ticket(LF_PINS *pins, Ticket_list MDL_lock::*list,
1818 MDL_ticket *ticket)
1819 {
1820 mysql_prlock_wrlock(&m_rwlock);
1821 (this->*list).remove_ticket(ticket);
1822 if (is_empty())
1823 mdl_locks.remove(pins, this);
1824 else
1825 {
1826 /*
1827 There can be some contexts waiting to acquire a lock
1828 which now might be able to do it. Grant the lock to
1829 them and wake them up!
1830
1831 We always try to reschedule locks, since there is no easy way
1832 (i.e. by looking at the bitmaps) to find out whether it is
1833 required or not.
1834 In a general case, even when the queue's bitmap is not changed
1835 after removal of the ticket, there is a chance that some request
1836 can be satisfied (due to the fact that a granted request
1837 reflected in the bitmap might belong to the same context as a
1838 pending request).
1839 */
1840 reschedule_waiters();
1841 mysql_prlock_unlock(&m_rwlock);
1842 }
1843 }
1844
1845
1846 /**
1847 Check if we have any pending locks which conflict with existing
1848 shared lock.
1849
1850 @pre The ticket must match an acquired lock.
1851
1852 @return TRUE if there is a conflicting lock request, FALSE otherwise.
1853 */
1854
has_pending_conflicting_lock(enum_mdl_type type)1855 bool MDL_lock::has_pending_conflicting_lock(enum_mdl_type type)
1856 {
1857 bool result;
1858
1859 mysql_prlock_rdlock(&m_rwlock);
1860 result= (m_waiting.bitmap() & incompatible_granted_types_bitmap()[type]);
1861 mysql_prlock_unlock(&m_rwlock);
1862 return result;
1863 }
1864
1865
~MDL_wait_for_graph_visitor()1866 MDL_wait_for_graph_visitor::~MDL_wait_for_graph_visitor()
1867 {
1868 }
1869
1870
~MDL_wait_for_subgraph()1871 MDL_wait_for_subgraph::~MDL_wait_for_subgraph()
1872 {
1873 }
1874
1875 /**
1876 Check if ticket represents metadata lock of "stronger" or equal type
1877 than specified one. I.e. if metadata lock represented by ticket won't
1878 allow any of locks which are not allowed by specified type of lock.
1879
1880 @return TRUE if ticket has stronger or equal type
1881 FALSE otherwise.
1882 */
1883
has_stronger_or_equal_type(enum_mdl_type type) const1884 bool MDL_ticket::has_stronger_or_equal_type(enum_mdl_type type) const
1885 {
1886 const MDL_lock::bitmap_t *
1887 granted_incompat_map= m_lock->incompatible_granted_types_bitmap();
1888
1889 return ! (granted_incompat_map[type] & ~(granted_incompat_map[m_type]));
1890 }
1891
1892
is_incompatible_when_granted(enum_mdl_type type) const1893 bool MDL_ticket::is_incompatible_when_granted(enum_mdl_type type) const
1894 {
1895 return (MDL_BIT(m_type) &
1896 m_lock->incompatible_granted_types_bitmap()[type]);
1897 }
1898
1899
is_incompatible_when_waiting(enum_mdl_type type) const1900 bool MDL_ticket::is_incompatible_when_waiting(enum_mdl_type type) const
1901 {
1902 return (MDL_BIT(m_type) &
1903 m_lock->incompatible_waiting_types_bitmap()[type]);
1904 }
1905
1906
1907 static const LEX_STRING
get_mdl_lock_name(MDL_key::enum_mdl_namespace mdl_namespace,enum_mdl_type type)1908 *get_mdl_lock_name(MDL_key::enum_mdl_namespace mdl_namespace,
1909 enum_mdl_type type)
1910 {
1911 return mdl_namespace == MDL_key::BACKUP ?
1912 &backup_lock_types[type] :
1913 &lock_types[type];
1914 }
1915
1916
get_type_name() const1917 const LEX_STRING *MDL_ticket::get_type_name() const
1918 {
1919 return get_mdl_lock_name(get_key()->mdl_namespace(), m_type);
1920 }
1921
get_type_name(enum_mdl_type type) const1922 const LEX_STRING *MDL_ticket::get_type_name(enum_mdl_type type) const
1923 {
1924 return get_mdl_lock_name(get_key()->mdl_namespace(), type);
1925 }
1926
1927
1928 /**
1929 Check whether the context already holds a compatible lock ticket
1930 on an object.
1931 Start searching from list of locks for the same duration as lock
1932 being requested. If not look at lists for other durations.
1933
1934 @param mdl_request Lock request object for lock to be acquired
1935 @param[out] result_duration Duration of lock which was found.
1936
1937 @note Tickets which correspond to lock types "stronger" than one
1938 being requested are also considered compatible.
1939
1940 @return A pointer to the lock ticket for the object or NULL otherwise.
1941 */
1942
1943 MDL_ticket *
find_ticket(MDL_request * mdl_request,enum_mdl_duration * result_duration)1944 MDL_context::find_ticket(MDL_request *mdl_request,
1945 enum_mdl_duration *result_duration)
1946 {
1947 MDL_ticket *ticket;
1948 int i;
1949
1950 for (i= 0; i < MDL_DURATION_END; i++)
1951 {
1952 enum_mdl_duration duration= (enum_mdl_duration)((mdl_request->duration+i) %
1953 MDL_DURATION_END);
1954 Ticket_iterator it(m_tickets[duration]);
1955
1956 while ((ticket= it++))
1957 {
1958 if (mdl_request->key.is_equal(&ticket->m_lock->key) &&
1959 ticket->has_stronger_or_equal_type(mdl_request->type))
1960 {
1961 DBUG_PRINT("info", ("Adding mdl lock %s to %s",
1962 get_mdl_lock_name(mdl_request->key.mdl_namespace(),
1963 mdl_request->type)->str,
1964 ticket->get_type_name()->str));
1965 *result_duration= duration;
1966 return ticket;
1967 }
1968 }
1969 }
1970 return NULL;
1971 }
1972
1973
1974 /**
1975 Try to acquire one lock.
1976
1977 Unlike exclusive locks, shared locks are acquired one by
1978 one. This is interface is chosen to simplify introduction of
1979 the new locking API to the system. MDL_context::try_acquire_lock()
1980 is currently used from open_table(), and there we have only one
1981 table to work with.
1982
1983 This function may also be used to try to acquire an exclusive
1984 lock on a destination table, by ALTER TABLE ... RENAME.
1985
1986 Returns immediately without any side effect if encounters a lock
1987 conflict. Otherwise takes the lock.
1988
1989 FIXME: Compared to lock_table_name_if_not_cached() (from 5.1)
1990 it gives slightly more false negatives.
1991
1992 @param mdl_request [in/out] Lock request object for lock to be acquired
1993
1994 @retval FALSE Success. The lock may have not been acquired.
1995 Check the ticket, if it's NULL, a conflicting lock
1996 exists.
1997 @retval TRUE Out of resources, an error has been reported.
1998 */
1999
2000 bool
try_acquire_lock(MDL_request * mdl_request)2001 MDL_context::try_acquire_lock(MDL_request *mdl_request)
2002 {
2003 MDL_ticket *ticket;
2004
2005 if (try_acquire_lock_impl(mdl_request, &ticket))
2006 return TRUE;
2007
2008 if (! mdl_request->ticket)
2009 {
2010 /*
2011 Our attempt to acquire lock without waiting has failed.
2012 Let us release resources which were acquired in the process.
2013 We can't get here if we allocated a new lock object so there
2014 is no need to release it.
2015 */
2016 DBUG_ASSERT(! ticket->m_lock->is_empty());
2017 mysql_prlock_unlock(&ticket->m_lock->m_rwlock);
2018 MDL_ticket::destroy(ticket);
2019 }
2020
2021 return FALSE;
2022 }
2023
2024
2025 /**
2026 Auxiliary method for acquiring lock without waiting.
2027
2028 @param mdl_request [in/out] Lock request object for lock to be acquired
2029 @param out_ticket [out] Ticket for the request in case when lock
2030 has not been acquired.
2031
2032 @retval FALSE Success. The lock may have not been acquired.
2033 Check MDL_request::ticket, if it's NULL, a conflicting
2034 lock exists. In this case "out_ticket" out parameter
2035 points to ticket which was constructed for the request.
2036 MDL_ticket::m_lock points to the corresponding MDL_lock
2037 object and MDL_lock::m_rwlock write-locked.
2038 @retval TRUE Out of resources, an error has been reported.
2039 */
2040
2041 bool
try_acquire_lock_impl(MDL_request * mdl_request,MDL_ticket ** out_ticket)2042 MDL_context::try_acquire_lock_impl(MDL_request *mdl_request,
2043 MDL_ticket **out_ticket)
2044 {
2045 MDL_lock *lock;
2046 MDL_key *key= &mdl_request->key;
2047 MDL_ticket *ticket;
2048 enum_mdl_duration found_duration;
2049
2050 /* Don't take chances in production. */
2051 DBUG_ASSERT(mdl_request->ticket == NULL);
2052 mdl_request->ticket= NULL;
2053
2054 /*
2055 Check whether the context already holds a shared lock on the object,
2056 and if so, grant the request.
2057 */
2058 if ((ticket= find_ticket(mdl_request, &found_duration)))
2059 {
2060 DBUG_ASSERT(ticket->m_lock);
2061 DBUG_ASSERT(ticket->has_stronger_or_equal_type(mdl_request->type));
2062 /*
2063 If the request is for a transactional lock, and we found
2064 a transactional lock, just reuse the found ticket.
2065
2066 It's possible that we found a transactional lock,
2067 but the request is for a HANDLER lock. In that case HANDLER
2068 code will clone the ticket (see below why it's needed).
2069
2070 If the request is for a transactional lock, and we found
2071 a HANDLER lock, create a copy, to make sure that when user
2072 does HANDLER CLOSE, the transactional lock is not released.
2073
2074 If the request is for a handler lock, and we found a
2075 HANDLER lock, also do the clone. HANDLER CLOSE for one alias
2076 should not release the lock on the table HANDLER opened through
2077 a different alias.
2078 */
2079 mdl_request->ticket= ticket;
2080 if ((found_duration != mdl_request->duration ||
2081 mdl_request->duration == MDL_EXPLICIT) &&
2082 clone_ticket(mdl_request))
2083 {
2084 /* Clone failed. */
2085 mdl_request->ticket= NULL;
2086 return TRUE;
2087 }
2088 return FALSE;
2089 }
2090
2091 if (fix_pins())
2092 return TRUE;
2093
2094 if (!(ticket= MDL_ticket::create(this, mdl_request->type
2095 #ifndef DBUG_OFF
2096 , mdl_request->duration
2097 #endif
2098 )))
2099 return TRUE;
2100
2101 /* The below call implicitly locks MDL_lock::m_rwlock on success. */
2102 if (!(lock= mdl_locks.find_or_insert(m_pins, key)))
2103 {
2104 MDL_ticket::destroy(ticket);
2105 return TRUE;
2106 }
2107
2108 DBUG_ASSERT(ticket->m_psi == NULL);
2109 ticket->m_psi= mysql_mdl_create(ticket,
2110 &mdl_request->key,
2111 mdl_request->type,
2112 mdl_request->duration,
2113 MDL_ticket::PENDING,
2114 mdl_request->m_src_file,
2115 mdl_request->m_src_line);
2116
2117 ticket->m_lock= lock;
2118
2119 if (lock->can_grant_lock(mdl_request->type, this, false))
2120 {
2121 lock->m_granted.add_ticket(ticket);
2122
2123 mysql_prlock_unlock(&lock->m_rwlock);
2124
2125 m_tickets[mdl_request->duration].push_front(ticket);
2126
2127 mdl_request->ticket= ticket;
2128
2129 mysql_mdl_set_status(ticket->m_psi, MDL_ticket::GRANTED);
2130 }
2131 else
2132 *out_ticket= ticket;
2133
2134 return FALSE;
2135 }
2136
2137
2138 /**
2139 Create a copy of a granted ticket.
2140 This is used to make sure that HANDLER ticket
2141 is never shared with a ticket that belongs to
2142 a transaction, so that when we HANDLER CLOSE,
2143 we don't release a transactional ticket, and
2144 vice versa -- when we COMMIT, we don't mistakenly
2145 release a ticket for an open HANDLER.
2146
2147 @retval TRUE Out of memory.
2148 @retval FALSE Success.
2149 */
2150
2151 bool
clone_ticket(MDL_request * mdl_request)2152 MDL_context::clone_ticket(MDL_request *mdl_request)
2153 {
2154 MDL_ticket *ticket;
2155
2156
2157 /*
2158 Since in theory we can clone ticket belonging to a different context
2159 we need to prepare target context for possible attempts to release
2160 lock and thus possible removal of MDL_lock from MDL_map container.
2161 So we allocate pins to be able to work with this container if they
2162 are not allocated already.
2163 */
2164 if (fix_pins())
2165 return TRUE;
2166
2167 /*
2168 By submitting mdl_request->type to MDL_ticket::create()
2169 we effectively downgrade the cloned lock to the level of
2170 the request.
2171 */
2172 if (!(ticket= MDL_ticket::create(this, mdl_request->type
2173 #ifndef DBUG_OFF
2174 , mdl_request->duration
2175 #endif
2176 )))
2177 return TRUE;
2178
2179 DBUG_ASSERT(ticket->m_psi == NULL);
2180 ticket->m_psi= mysql_mdl_create(ticket,
2181 &mdl_request->key,
2182 mdl_request->type,
2183 mdl_request->duration,
2184 MDL_ticket::PENDING,
2185 mdl_request->m_src_file,
2186 mdl_request->m_src_line);
2187
2188 /* clone() is not supposed to be used to get a stronger lock. */
2189 DBUG_ASSERT(mdl_request->ticket->has_stronger_or_equal_type(ticket->m_type));
2190
2191 ticket->m_lock= mdl_request->ticket->m_lock;
2192 mdl_request->ticket= ticket;
2193
2194 mysql_prlock_wrlock(&ticket->m_lock->m_rwlock);
2195 ticket->m_lock->m_granted.add_ticket(ticket);
2196 mysql_prlock_unlock(&ticket->m_lock->m_rwlock);
2197
2198 m_tickets[mdl_request->duration].push_front(ticket);
2199
2200 mysql_mdl_set_status(ticket->m_psi, MDL_ticket::GRANTED);
2201
2202 return FALSE;
2203 }
2204
2205
2206 /**
2207 Check if there is any conflicting lock that could cause this thread
2208 to wait for another thread which is not ready to commit.
2209 This is always an error, as the upper level of parallel replication
2210 should not allow a scheduling of a conflicting DDL until all earlier
2211 transactions have been committed.
2212
2213 This function is only called for a slave using parallel replication
2214 and trying to get an exclusive lock for the table.
2215 */
2216
2217 #ifndef DBUG_OFF
check_if_conflicting_replication_locks(MDL_context * ctx)2218 bool MDL_lock::check_if_conflicting_replication_locks(MDL_context *ctx)
2219 {
2220 rpl_group_info *rgi_slave= ctx->get_thd()->rgi_slave;
2221
2222 if (!rgi_slave->gtid_sub_id)
2223 return 0;
2224
2225 for (const auto &conflicting_ticket : m_granted)
2226 {
2227 if (conflicting_ticket.get_ctx() != ctx)
2228 {
2229 MDL_context *conflicting_ctx= conflicting_ticket.get_ctx();
2230 rpl_group_info *conflicting_rgi_slave;
2231 conflicting_rgi_slave= conflicting_ctx->get_thd()->rgi_slave;
2232
2233 /*
2234 If the conflicting thread is another parallel replication
2235 thread for the same master and it's not in commit stage, then
2236 the current transaction has started too early and something is
2237 seriously wrong.
2238 */
2239 if (conflicting_rgi_slave &&
2240 conflicting_rgi_slave->gtid_sub_id &&
2241 conflicting_rgi_slave->rli == rgi_slave->rli &&
2242 conflicting_rgi_slave->current_gtid.domain_id ==
2243 rgi_slave->current_gtid.domain_id &&
2244 !conflicting_rgi_slave->did_mark_start_commit)
2245 return 1; // Fatal error
2246 }
2247 }
2248 return 0;
2249 }
2250 #endif
2251
2252
2253 /**
2254 Acquire one lock with waiting for conflicting locks to go away if needed.
2255
2256 @param mdl_request [in/out] Lock request object for lock to be acquired
2257
2258 @param lock_wait_timeout [in] Seconds to wait before timeout.
2259
2260 @retval FALSE Success. MDL_request::ticket points to the ticket
2261 for the lock.
2262 @retval TRUE Failure (Out of resources or waiting is aborted),
2263 */
2264
2265 bool
acquire_lock(MDL_request * mdl_request,double lock_wait_timeout)2266 MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout)
2267 {
2268 MDL_lock *lock;
2269 MDL_ticket *ticket;
2270 MDL_wait::enum_wait_status wait_status;
2271 DBUG_ENTER("MDL_context::acquire_lock");
2272 DBUG_PRINT("enter", ("lock_type: %s timeout: %f",
2273 get_mdl_lock_name(mdl_request->key.mdl_namespace(),
2274 mdl_request->type)->str,
2275 lock_wait_timeout));
2276
2277 if (try_acquire_lock_impl(mdl_request, &ticket))
2278 DBUG_RETURN(TRUE);
2279
2280 if (mdl_request->ticket)
2281 {
2282 /*
2283 We have managed to acquire lock without waiting.
2284 MDL_lock, MDL_context and MDL_request were updated
2285 accordingly, so we can simply return success.
2286 */
2287 DBUG_PRINT("info", ("Got lock without waiting"));
2288 DBUG_RETURN(FALSE);
2289 }
2290
2291 /*
2292 Our attempt to acquire lock without waiting has failed.
2293 As a result of this attempt we got MDL_ticket with m_lock
2294 member pointing to the corresponding MDL_lock object which
2295 has MDL_lock::m_rwlock write-locked.
2296 */
2297 lock= ticket->m_lock;
2298
2299 if (lock_wait_timeout == 0)
2300 {
2301 mysql_prlock_unlock(&lock->m_rwlock);
2302 MDL_ticket::destroy(ticket);
2303 my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
2304 DBUG_RETURN(TRUE);
2305 }
2306
2307 lock->m_waiting.add_ticket(ticket);
2308
2309 /*
2310 Once we added a pending ticket to the waiting queue,
2311 we must ensure that our wait slot is empty, so
2312 that our lock request can be scheduled. Do that in the
2313 critical section formed by the acquired write lock on MDL_lock.
2314 */
2315 m_wait.reset_status();
2316
2317 /*
2318 Don't break conflicting locks if timeout is 0 as 0 is used
2319 To check if there is any conflicting locks...
2320 */
2321 if (lock->needs_notification(ticket) && lock_wait_timeout)
2322 lock->notify_conflicting_locks(this);
2323
2324 /*
2325 Ensure that if we are trying to get an exclusive lock for a slave
2326 running parallel replication, then we are not blocked by another
2327 parallel slave thread that is not committed. This should never happen as
2328 the parallel replication scheduler should never schedule a DDL while
2329 DML's are still running.
2330 */
2331 DBUG_SLOW_ASSERT((mdl_request->type != MDL_INTENTION_EXCLUSIVE &&
2332 mdl_request->type != MDL_EXCLUSIVE) ||
2333 !(get_thd()->rgi_slave &&
2334 get_thd()->rgi_slave->is_parallel_exec &&
2335 lock->check_if_conflicting_replication_locks(this)));
2336
2337 mysql_prlock_unlock(&lock->m_rwlock);
2338
2339 #ifdef HAVE_PSI_INTERFACE
2340 PSI_metadata_locker_state state __attribute__((unused));
2341 PSI_metadata_locker *locker= NULL;
2342
2343 if (ticket->m_psi != NULL)
2344 locker= PSI_CALL_start_metadata_wait(&state, ticket->m_psi, __FILE__, __LINE__);
2345 #endif
2346
2347 will_wait_for(ticket);
2348
2349 /* There is a shared or exclusive lock on the object. */
2350 DEBUG_SYNC(get_thd(), "mdl_acquire_lock_wait");
2351
2352 find_deadlock();
2353
2354 struct timespec abs_timeout, abs_shortwait;
2355 set_timespec_nsec(abs_timeout,
2356 (ulonglong)(lock_wait_timeout * 1000000000ULL));
2357 set_timespec(abs_shortwait, 1);
2358 wait_status= MDL_wait::EMPTY;
2359
2360 while (cmp_timespec(abs_shortwait, abs_timeout) <= 0)
2361 {
2362 /* abs_timeout is far away. Wait a short while and notify locks. */
2363 wait_status= m_wait.timed_wait(m_owner, &abs_shortwait, FALSE,
2364 mdl_request->key.get_wait_state_name());
2365
2366 if (wait_status != MDL_wait::EMPTY)
2367 break;
2368 /* Check if the client is gone while we were waiting. */
2369 if (! thd_is_connected(m_owner->get_thd()))
2370 {
2371 /*
2372 * The client is disconnected. Don't wait forever:
2373 * assume it's the same as a wait timeout, this
2374 * ensures all error handling is correct.
2375 */
2376 wait_status= MDL_wait::TIMEOUT;
2377 break;
2378 }
2379
2380 mysql_prlock_wrlock(&lock->m_rwlock);
2381 if (lock->needs_notification(ticket))
2382 lock->notify_conflicting_locks(this);
2383 mysql_prlock_unlock(&lock->m_rwlock);
2384 set_timespec(abs_shortwait, 1);
2385 }
2386 if (wait_status == MDL_wait::EMPTY)
2387 wait_status= m_wait.timed_wait(m_owner, &abs_timeout, TRUE,
2388 mdl_request->key.get_wait_state_name());
2389
2390 done_waiting_for();
2391
2392 #ifdef HAVE_PSI_INTERFACE
2393 if (locker != NULL)
2394 PSI_CALL_end_metadata_wait(locker, 0);
2395 #endif
2396
2397 if (wait_status != MDL_wait::GRANTED)
2398 {
2399 lock->remove_ticket(m_pins, &MDL_lock::m_waiting, ticket);
2400 MDL_ticket::destroy(ticket);
2401 switch (wait_status)
2402 {
2403 case MDL_wait::VICTIM:
2404 DBUG_LOCK_FILE;
2405 DBUG_PRINT("mdl_locks", ("%s", mdl_dbug_print_locks()));
2406 DBUG_UNLOCK_FILE;
2407 my_error(ER_LOCK_DEADLOCK, MYF(0));
2408 break;
2409 case MDL_wait::TIMEOUT:
2410 my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
2411 break;
2412 case MDL_wait::KILLED:
2413 get_thd()->send_kill_message();
2414 break;
2415 default:
2416 DBUG_ASSERT(0);
2417 break;
2418 }
2419 DBUG_RETURN(TRUE);
2420 }
2421
2422 /*
2423 We have been granted our request.
2424 State of MDL_lock object is already being appropriately updated by a
2425 concurrent thread (@sa MDL_lock:reschedule_waiters()).
2426 So all we need to do is to update MDL_context and MDL_request objects.
2427 */
2428 DBUG_ASSERT(wait_status == MDL_wait::GRANTED);
2429
2430 m_tickets[mdl_request->duration].push_front(ticket);
2431
2432 mdl_request->ticket= ticket;
2433
2434 mysql_mdl_set_status(ticket->m_psi, MDL_ticket::GRANTED);
2435
2436 DBUG_RETURN(FALSE);
2437 }
2438
2439
mdl_request_ptr_cmp(const void * ptr1,const void * ptr2)2440 extern "C" int mdl_request_ptr_cmp(const void* ptr1, const void* ptr2)
2441 {
2442 MDL_request *req1= *(MDL_request**)ptr1;
2443 MDL_request *req2= *(MDL_request**)ptr2;
2444 return req1->key.cmp(&req2->key);
2445 }
2446
2447
2448 /**
2449 Acquire exclusive locks. There must be no granted locks in the
2450 context.
2451
2452 This is a replacement of lock_table_names(). It is used in
2453 RENAME, DROP and other DDL SQL statements.
2454
2455 @param mdl_requests List of requests for locks to be acquired.
2456
2457 @param lock_wait_timeout Seconds to wait before timeout.
2458
2459 @note The list of requests should not contain non-exclusive lock requests.
2460 There should not be any acquired locks in the context.
2461
2462 @note Assumes that one already owns scoped intention exclusive lock.
2463
2464 @retval FALSE Success
2465 @retval TRUE Failure
2466 */
2467
acquire_locks(MDL_request_list * mdl_requests,double lock_wait_timeout)2468 bool MDL_context::acquire_locks(MDL_request_list *mdl_requests,
2469 double lock_wait_timeout)
2470 {
2471 MDL_request_list::Iterator it(*mdl_requests);
2472 MDL_request **sort_buf, **p_req;
2473 MDL_savepoint mdl_svp= mdl_savepoint();
2474 ssize_t req_count= static_cast<ssize_t>(mdl_requests->elements());
2475 DBUG_ENTER("MDL_context::acquire_locks");
2476
2477 if (req_count == 0)
2478 DBUG_RETURN(FALSE);
2479
2480 /* Sort requests according to MDL_key. */
2481 if (! (sort_buf= (MDL_request **)my_malloc(key_memory_MDL_context_acquire_locks,
2482 req_count * sizeof(MDL_request*),
2483 MYF(MY_WME))))
2484 DBUG_RETURN(TRUE);
2485
2486 for (p_req= sort_buf; p_req < sort_buf + req_count; p_req++)
2487 *p_req= it++;
2488
2489 my_qsort(sort_buf, req_count, sizeof(MDL_request*),
2490 mdl_request_ptr_cmp);
2491
2492 for (p_req= sort_buf; p_req < sort_buf + req_count; p_req++)
2493 {
2494 if (acquire_lock(*p_req, lock_wait_timeout))
2495 goto err;
2496 }
2497 my_free(sort_buf);
2498 DBUG_RETURN(FALSE);
2499
2500 err:
2501 /*
2502 Release locks we have managed to acquire so far.
2503 Use rollback_to_savepoint() since there may be duplicate
2504 requests that got assigned the same ticket.
2505 */
2506 rollback_to_savepoint(mdl_svp);
2507 /* Reset lock requests back to its initial state. */
2508 for (req_count= p_req - sort_buf, p_req= sort_buf;
2509 p_req < sort_buf + req_count; p_req++)
2510 {
2511 (*p_req)->ticket= NULL;
2512 }
2513 my_free(sort_buf);
2514 DBUG_RETURN(TRUE);
2515 }
2516
2517
2518 /**
2519 Upgrade a shared metadata lock.
2520
2521 Used in ALTER TABLE.
2522
2523 @param mdl_ticket Lock to upgrade.
2524 @param new_type Lock type to upgrade to.
2525 @param lock_wait_timeout Seconds to wait before timeout.
2526
2527 @note In case of failure to upgrade lock (e.g. because upgrader
2528 was killed) leaves lock in its original state (locked in
2529 shared mode).
2530
2531 @note There can be only one upgrader for a lock or we will have deadlock.
2532 This invariant is ensured by the fact that upgradeable locks SU, SNW
2533 and SNRW are not compatible with each other and themselves.
2534
2535 @retval FALSE Success
2536 @retval TRUE Failure (thread was killed)
2537 */
2538
2539 bool
upgrade_shared_lock(MDL_ticket * mdl_ticket,enum_mdl_type new_type,double lock_wait_timeout)2540 MDL_context::upgrade_shared_lock(MDL_ticket *mdl_ticket,
2541 enum_mdl_type new_type,
2542 double lock_wait_timeout)
2543 {
2544 MDL_request mdl_xlock_request;
2545 MDL_savepoint mdl_svp= mdl_savepoint();
2546 bool is_new_ticket;
2547 DBUG_ENTER("MDL_context::upgrade_shared_lock");
2548 DBUG_PRINT("enter",("old_type: %s new_type: %s lock_wait_timeout: %f",
2549 mdl_ticket->get_type_name()->str,
2550 mdl_ticket->get_type_name(new_type)->str,
2551 lock_wait_timeout));
2552 DEBUG_SYNC(get_thd(), "mdl_upgrade_lock");
2553
2554 /*
2555 Do nothing if already upgraded. Used when we FLUSH TABLE under
2556 LOCK TABLES and a table is listed twice in LOCK TABLES list.
2557
2558 In BACKUP namespace upgrade must always happen. Even though
2559 MDL_BACKUP_START is not stronger than MDL_BACKUP_FLUSH from
2560 has_stronger_or_equal_type(), the latter effectively blocks
2561 new MDL_BACKUP_DML while the former doesn't.
2562 */
2563 if (mdl_ticket->has_stronger_or_equal_type(new_type) &&
2564 mdl_ticket->get_key()->mdl_namespace() != MDL_key::BACKUP)
2565 DBUG_RETURN(FALSE);
2566
2567 MDL_REQUEST_INIT_BY_KEY(&mdl_xlock_request, &mdl_ticket->m_lock->key,
2568 new_type, MDL_TRANSACTION);
2569
2570 if (acquire_lock(&mdl_xlock_request, lock_wait_timeout))
2571 DBUG_RETURN(TRUE);
2572
2573 is_new_ticket= ! has_lock(mdl_svp, mdl_xlock_request.ticket);
2574
2575 /* Merge the acquired and the original lock. @todo: move to a method. */
2576 mysql_prlock_wrlock(&mdl_ticket->m_lock->m_rwlock);
2577 if (is_new_ticket)
2578 mdl_ticket->m_lock->m_granted.remove_ticket(mdl_xlock_request.ticket);
2579 /*
2580 Set the new type of lock in the ticket. To update state of
2581 MDL_lock object correctly we need to temporarily exclude
2582 ticket from the granted queue and then include it back.
2583 */
2584 mdl_ticket->m_lock->m_granted.remove_ticket(mdl_ticket);
2585 mdl_ticket->m_type= new_type;
2586 mdl_ticket->m_lock->m_granted.add_ticket(mdl_ticket);
2587
2588 mysql_prlock_unlock(&mdl_ticket->m_lock->m_rwlock);
2589
2590 if (is_new_ticket)
2591 {
2592 m_tickets[MDL_TRANSACTION].remove(mdl_xlock_request.ticket);
2593 MDL_ticket::destroy(mdl_xlock_request.ticket);
2594 }
2595
2596 DBUG_RETURN(FALSE);
2597 }
2598
2599
2600 /**
2601 A fragment of recursive traversal of the wait-for graph
2602 in search for deadlocks. Direct the deadlock visitor to all
2603 contexts that own the lock the current node in the wait-for
2604 graph is waiting for.
2605 As long as the initial node is remembered in the visitor,
2606 a deadlock is found when the same node is seen twice.
2607 */
2608
visit_subgraph(MDL_ticket * waiting_ticket,MDL_wait_for_graph_visitor * gvisitor)2609 bool MDL_lock::visit_subgraph(MDL_ticket *waiting_ticket,
2610 MDL_wait_for_graph_visitor *gvisitor)
2611 {
2612 MDL_context *src_ctx= waiting_ticket->get_ctx();
2613 bool result= TRUE;
2614
2615 mysql_prlock_rdlock(&m_rwlock);
2616
2617 /*
2618 MDL_lock's waiting and granted queues and MDL_context::m_waiting_for
2619 member are updated by different threads when the lock is granted
2620 (see MDL_context::acquire_lock() and MDL_lock::reschedule_waiters()).
2621 As a result, here we may encounter a situation when MDL_lock data
2622 already reflects the fact that the lock was granted but
2623 m_waiting_for member has not been updated yet.
2624
2625 For example, imagine that:
2626
2627 thread1: Owns SNW lock on table t1.
2628 thread2: Attempts to acquire SW lock on t1,
2629 but sees an active SNW lock.
2630 Thus adds the ticket to the waiting queue and
2631 sets m_waiting_for to point to the ticket.
2632 thread1: Releases SNW lock, updates MDL_lock object to
2633 grant SW lock to thread2 (moves the ticket for
2634 SW from waiting to the active queue).
2635 Attempts to acquire a new SNW lock on t1,
2636 sees an active SW lock (since it is present in the
2637 active queue), adds ticket for SNW lock to the waiting
2638 queue, sets m_waiting_for to point to this ticket.
2639
2640 At this point deadlock detection algorithm run by thread1 will see that:
2641 - Thread1 waits for SNW lock on t1 (since m_waiting_for is set).
2642 - SNW lock is not granted, because it conflicts with active SW lock
2643 owned by thread 2 (since ticket for SW is present in granted queue).
2644 - Thread2 waits for SW lock (since its m_waiting_for has not been
2645 updated yet!).
2646 - SW lock is not granted because there is pending SNW lock from thread1.
2647 Therefore deadlock should exist [sic!].
2648
2649 To avoid detection of such false deadlocks we need to check the "actual"
2650 status of the ticket being waited for, before analyzing its blockers.
2651 We do this by checking the wait status of the context which is waiting
2652 for it. To avoid races this has to be done under protection of
2653 MDL_lock::m_rwlock lock.
2654 */
2655 if (src_ctx->m_wait.get_status() != MDL_wait::EMPTY)
2656 {
2657 result= FALSE;
2658 goto end;
2659 }
2660
2661 /*
2662 To avoid visiting nodes which were already marked as victims of
2663 deadlock detection (or whose requests were already satisfied) we
2664 enter the node only after peeking at its wait status.
2665 This is necessary to avoid active waiting in a situation
2666 when previous searches for a deadlock already selected the
2667 node we're about to enter as a victim (see the comment
2668 in MDL_context::find_deadlock() for explanation why several searches
2669 can be performed for the same wait).
2670 There is no guarantee that the node isn't chosen a victim while we
2671 are visiting it but this is OK: in the worst case we might do some
2672 extra work and one more context might be chosen as a victim.
2673 */
2674 if (gvisitor->enter_node(src_ctx))
2675 goto end;
2676
2677 /*
2678 We do a breadth-first search first -- that is, inspect all
2679 edges of the current node, and only then follow up to the next
2680 node. In workloads that involve wait-for graph loops this
2681 has proven to be a more efficient strategy [citation missing].
2682 */
2683 for (const auto& ticket : m_granted)
2684 {
2685 /* Filter out edges that point to the same node. */
2686 if (ticket.get_ctx() != src_ctx &&
2687 ticket.is_incompatible_when_granted(waiting_ticket->get_type()) &&
2688 gvisitor->inspect_edge(ticket.get_ctx()))
2689 {
2690 goto end_leave_node;
2691 }
2692 }
2693
2694 for (const auto &ticket : m_waiting)
2695 {
2696 /* Filter out edges that point to the same node. */
2697 if (ticket.get_ctx() != src_ctx &&
2698 ticket.is_incompatible_when_waiting(waiting_ticket->get_type()) &&
2699 gvisitor->inspect_edge(ticket.get_ctx()))
2700 {
2701 goto end_leave_node;
2702 }
2703 }
2704
2705 /* Recurse and inspect all adjacent nodes. */
2706 for (const auto &ticket : m_granted)
2707 {
2708 if (ticket.get_ctx() != src_ctx &&
2709 ticket.is_incompatible_when_granted(waiting_ticket->get_type()) &&
2710 ticket.get_ctx()->visit_subgraph(gvisitor))
2711 {
2712 goto end_leave_node;
2713 }
2714 }
2715
2716 for (const auto &ticket : m_waiting)
2717 {
2718 if (ticket.get_ctx() != src_ctx &&
2719 ticket.is_incompatible_when_waiting(waiting_ticket->get_type()) &&
2720 ticket.get_ctx()->visit_subgraph(gvisitor))
2721 {
2722 goto end_leave_node;
2723 }
2724 }
2725
2726 result= FALSE;
2727
2728 end_leave_node:
2729 gvisitor->leave_node(src_ctx);
2730
2731 end:
2732 mysql_prlock_unlock(&m_rwlock);
2733 return result;
2734 }
2735
2736
2737 /**
2738 Traverse a portion of wait-for graph which is reachable
2739 through the edge represented by this ticket and search
2740 for deadlocks.
2741
2742 @retval TRUE A deadlock is found. A pointer to deadlock
2743 victim is saved in the visitor.
2744 @retval FALSE
2745 */
2746
accept_visitor(MDL_wait_for_graph_visitor * gvisitor)2747 bool MDL_ticket::accept_visitor(MDL_wait_for_graph_visitor *gvisitor)
2748 {
2749 return m_lock->visit_subgraph(this, gvisitor);
2750 }
2751
2752
2753 /**
2754 A fragment of recursive traversal of the wait-for graph of
2755 MDL contexts in the server in search for deadlocks.
2756 Assume this MDL context is a node in the wait-for graph,
2757 and direct the visitor to all adjacent nodes. As long
2758 as the starting node is remembered in the visitor, a
2759 deadlock is found when the same node is visited twice.
2760 One MDL context is connected to another in the wait-for
2761 graph if it waits on a resource that is held by the other
2762 context.
2763
2764 @retval TRUE A deadlock is found. A pointer to deadlock
2765 victim is saved in the visitor.
2766 @retval FALSE
2767 */
2768
visit_subgraph(MDL_wait_for_graph_visitor * gvisitor)2769 bool MDL_context::visit_subgraph(MDL_wait_for_graph_visitor *gvisitor)
2770 {
2771 bool result= FALSE;
2772
2773 mysql_prlock_rdlock(&m_LOCK_waiting_for);
2774
2775 if (m_waiting_for)
2776 result= m_waiting_for->accept_visitor(gvisitor);
2777
2778 mysql_prlock_unlock(&m_LOCK_waiting_for);
2779
2780 return result;
2781 }
2782
2783
2784 /**
2785 Try to find a deadlock. This function produces no errors.
2786
2787 @note If during deadlock resolution context which performs deadlock
2788 detection is chosen as a victim it will be informed about the
2789 fact by setting VICTIM status to its wait slot.
2790 */
2791
find_deadlock()2792 void MDL_context::find_deadlock()
2793 {
2794 while (1)
2795 {
2796 /*
2797 The fact that we use fresh instance of gvisitor for each
2798 search performed by find_deadlock() below is important,
2799 the code responsible for victim selection relies on this.
2800 */
2801 Deadlock_detection_visitor dvisitor(this);
2802 MDL_context *victim;
2803
2804 if (! visit_subgraph(&dvisitor))
2805 {
2806 /* No deadlocks are found! */
2807 break;
2808 }
2809
2810 victim= dvisitor.get_victim();
2811
2812 /*
2813 Failure to change status of the victim is OK as it means
2814 that the victim has received some other message and is
2815 about to stop its waiting/to break deadlock loop.
2816 Even when the initiator of the deadlock search is
2817 chosen the victim, we need to set the respective wait
2818 result in order to "close" it for any attempt to
2819 schedule the request.
2820 This is needed to avoid a possible race during
2821 cleanup in case when the lock request on which the
2822 context was waiting is concurrently satisfied.
2823 */
2824 (void) victim->m_wait.set_status(MDL_wait::VICTIM);
2825 victim->inc_deadlock_overweight();
2826 victim->unlock_deadlock_victim();
2827
2828 if (victim == this)
2829 break;
2830 /*
2831 After adding a new edge to the waiting graph we found that it
2832 creates a loop (i.e. there is a deadlock). We decided to destroy
2833 this loop by removing an edge, but not the one that we added.
2834 Since this doesn't guarantee that all loops created by addition
2835 of the new edge are destroyed, we have to repeat the search.
2836 */
2837 }
2838 }
2839
2840
2841 /**
2842 Release lock.
2843
2844 @param duration Lock duration.
2845 @param ticket Ticket for lock to be released.
2846
2847 */
2848
release_lock(enum_mdl_duration duration,MDL_ticket * ticket)2849 void MDL_context::release_lock(enum_mdl_duration duration, MDL_ticket *ticket)
2850 {
2851 MDL_lock *lock= ticket->m_lock;
2852 DBUG_ENTER("MDL_context::release_lock");
2853 DBUG_PRINT("enter", ("db: '%s' name: '%s'",
2854 lock->key.db_name(), lock->key.name()));
2855
2856 DBUG_ASSERT(this == ticket->get_ctx());
2857
2858 lock->remove_ticket(m_pins, &MDL_lock::m_granted, ticket);
2859
2860 m_tickets[duration].remove(ticket);
2861 MDL_ticket::destroy(ticket);
2862
2863 DBUG_VOID_RETURN;
2864 }
2865
2866
2867 /**
2868 Release lock with explicit duration.
2869
2870 @param ticket Ticket for lock to be released.
2871
2872 */
2873
release_lock(MDL_ticket * ticket)2874 void MDL_context::release_lock(MDL_ticket *ticket)
2875 {
2876 DBUG_SLOW_ASSERT(ticket->m_duration == MDL_EXPLICIT);
2877
2878 release_lock(MDL_EXPLICIT, ticket);
2879 }
2880
2881
2882 /**
2883 Release all locks associated with the context. If the sentinel
2884 is not NULL, do not release locks stored in the list after and
2885 including the sentinel.
2886
2887 Statement and transactional locks are added to the beginning of
2888 the corresponding lists, i.e. stored in reverse temporal order.
2889 This allows to employ this function to:
2890 - back off in case of a lock conflict.
2891 - release all locks in the end of a statement or transaction
2892 - rollback to a savepoint.
2893 */
2894
release_locks_stored_before(enum_mdl_duration duration,MDL_ticket * sentinel)2895 void MDL_context::release_locks_stored_before(enum_mdl_duration duration,
2896 MDL_ticket *sentinel)
2897 {
2898 MDL_ticket *ticket;
2899 Ticket_iterator it(m_tickets[duration]);
2900 DBUG_ENTER("MDL_context::release_locks_stored_before");
2901
2902 if (m_tickets[duration].is_empty())
2903 DBUG_VOID_RETURN;
2904
2905 while ((ticket= it++) && ticket != sentinel)
2906 {
2907 DBUG_PRINT("info", ("found lock to release ticket=%p", ticket));
2908 release_lock(duration, ticket);
2909 }
2910
2911 DBUG_VOID_RETURN;
2912 }
2913
2914
2915 /**
2916 Release all explicit locks in the context which correspond to the
2917 same name/object as this lock request.
2918
2919 @param ticket One of the locks for the name/object for which all
2920 locks should be released.
2921 */
2922
release_all_locks_for_name(MDL_ticket * name)2923 void MDL_context::release_all_locks_for_name(MDL_ticket *name)
2924 {
2925 /* Use MDL_ticket::m_lock to identify other locks for the same object. */
2926 MDL_lock *lock= name->m_lock;
2927
2928 /* Remove matching lock tickets from the context. */
2929 MDL_ticket *ticket;
2930 Ticket_iterator it_ticket(m_tickets[MDL_EXPLICIT]);
2931
2932 while ((ticket= it_ticket++))
2933 {
2934 DBUG_ASSERT(ticket->m_lock);
2935 if (ticket->m_lock == lock)
2936 release_lock(MDL_EXPLICIT, ticket);
2937 }
2938 }
2939
2940
2941 /**
2942 Downgrade an EXCLUSIVE or SHARED_NO_WRITE lock to shared metadata lock.
2943
2944 @param type Type of lock to which exclusive lock should be downgraded.
2945 */
2946
downgrade_lock(enum_mdl_type type)2947 void MDL_ticket::downgrade_lock(enum_mdl_type type)
2948 {
2949 /*
2950 Do nothing if already downgraded. Used when we FLUSH TABLE under
2951 LOCK TABLES and a table is listed twice in LOCK TABLES list.
2952 Note that this code might even try to "downgrade" a weak lock
2953 (e.g. SW) to a stronger one (e.g SNRW). So we can't even assert
2954 here that target lock is weaker than existing lock.
2955 */
2956 if (m_type == type || !has_stronger_or_equal_type(type))
2957 return;
2958
2959 /* Only allow downgrade in some specific known cases */
2960 DBUG_ASSERT((get_key()->mdl_namespace() != MDL_key::BACKUP &&
2961 (m_type == MDL_EXCLUSIVE ||
2962 m_type == MDL_SHARED_NO_WRITE)) ||
2963 (get_key()->mdl_namespace() == MDL_key::BACKUP &&
2964 (m_type == MDL_BACKUP_DDL ||
2965 m_type == MDL_BACKUP_WAIT_FLUSH)));
2966
2967 mysql_prlock_wrlock(&m_lock->m_rwlock);
2968 /*
2969 To update state of MDL_lock object correctly we need to temporarily
2970 exclude ticket from the granted queue and then include it back.
2971 */
2972 m_lock->m_granted.remove_ticket(this);
2973 m_type= type;
2974 m_lock->m_granted.add_ticket(this);
2975 m_lock->reschedule_waiters();
2976 mysql_prlock_unlock(&m_lock->m_rwlock);
2977 }
2978
2979
2980 /**
2981 Auxiliary function which allows to check if we have some kind of lock on
2982 a object. Returns TRUE if we have a lock of a given or stronger type.
2983
2984 @param mdl_namespace Id of object namespace
2985 @param db Name of the database
2986 @param name Name of the object
2987 @param mdl_type Lock type. Pass in the weakest type to find
2988 out if there is at least some lock.
2989
2990 @return TRUE if current context contains satisfied lock for the object,
2991 FALSE otherwise.
2992 */
2993
2994 bool
is_lock_owner(MDL_key::enum_mdl_namespace mdl_namespace,const char * db,const char * name,enum_mdl_type mdl_type)2995 MDL_context::is_lock_owner(MDL_key::enum_mdl_namespace mdl_namespace,
2996 const char *db, const char *name,
2997 enum_mdl_type mdl_type)
2998 {
2999 MDL_request mdl_request;
3000 enum_mdl_duration not_unused;
3001 /* We don't care about exact duration of lock here. */
3002 MDL_REQUEST_INIT(&mdl_request, mdl_namespace, db, name, mdl_type,
3003 MDL_TRANSACTION);
3004 MDL_ticket *ticket= find_ticket(&mdl_request, ¬_unused);
3005
3006 DBUG_ASSERT(ticket == NULL || ticket->m_lock);
3007
3008 return ticket;
3009 }
3010
3011
3012 /**
3013 Return thread id of the owner of the lock or 0 if
3014 there is no owner.
3015 @note: Lock type is not considered at all, the function
3016 simply checks that there is some lock for the given key.
3017
3018 @return thread id of the owner of the lock or 0
3019 */
3020
3021 unsigned long
get_lock_owner(MDL_key * key)3022 MDL_context::get_lock_owner(MDL_key *key)
3023 {
3024 fix_pins();
3025 return mdl_locks.get_lock_owner(m_pins, key);
3026 }
3027
3028
3029 /**
3030 Check if we have any pending locks which conflict with existing shared lock.
3031
3032 @pre The ticket must match an acquired lock.
3033
3034 @return TRUE if there is a conflicting lock request, FALSE otherwise.
3035 */
3036
has_pending_conflicting_lock() const3037 bool MDL_ticket::has_pending_conflicting_lock() const
3038 {
3039 return m_lock->has_pending_conflicting_lock(m_type);
3040 }
3041
3042 /** Return a key identifying this lock. */
get_key() const3043 MDL_key *MDL_ticket::get_key() const
3044 {
3045 return &m_lock->key;
3046 }
3047
3048 /**
3049 Releases metadata locks that were acquired after a specific savepoint.
3050
3051 @note Used to release tickets acquired during a savepoint unit.
3052 @note It's safe to iterate and unlock any locks after taken after this
3053 savepoint because other statements that take other special locks
3054 cause a implicit commit (ie LOCK TABLES).
3055 */
3056
rollback_to_savepoint(const MDL_savepoint & mdl_savepoint)3057 void MDL_context::rollback_to_savepoint(const MDL_savepoint &mdl_savepoint)
3058 {
3059 DBUG_ENTER("MDL_context::rollback_to_savepoint");
3060
3061 /* If savepoint is NULL, it is from the start of the transaction. */
3062 release_locks_stored_before(MDL_STATEMENT, mdl_savepoint.m_stmt_ticket);
3063 release_locks_stored_before(MDL_TRANSACTION, mdl_savepoint.m_trans_ticket);
3064
3065 DBUG_VOID_RETURN;
3066 }
3067
3068
3069 /**
3070 Release locks acquired by normal statements (SELECT, UPDATE,
3071 DELETE, etc) in the course of a transaction. Do not release
3072 HANDLER locks, if there are any.
3073
3074 This method is used at the end of a transaction, in
3075 implementation of COMMIT (implicit or explicit) and ROLLBACK.
3076 */
3077
release_transactional_locks(THD * thd)3078 void MDL_context::release_transactional_locks(THD *thd)
3079 {
3080 DBUG_ENTER("MDL_context::release_transactional_locks");
3081 /* Fail if there are active transactions */
3082 DBUG_ASSERT(!(thd->server_status &
3083 (SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY)));
3084 release_locks_stored_before(MDL_STATEMENT, NULL);
3085 release_locks_stored_before(MDL_TRANSACTION, NULL);
3086 DBUG_VOID_RETURN;
3087 }
3088
release_statement_locks()3089 void MDL_context::release_statement_locks()
3090 {
3091 DBUG_ENTER("MDL_context::release_transactional_locks");
3092 release_locks_stored_before(MDL_STATEMENT, NULL);
3093 DBUG_VOID_RETURN;
3094 }
3095
3096
3097 /**
3098 Does this savepoint have this lock?
3099
3100 @retval TRUE The ticket is older than the savepoint or
3101 is an LT, HA or GLR ticket. Thus it belongs
3102 to the savepoint or has explicit duration.
3103 @retval FALSE The ticket is newer than the savepoint.
3104 and is not an LT, HA or GLR ticket.
3105 */
3106
has_lock(const MDL_savepoint & mdl_savepoint,MDL_ticket * mdl_ticket)3107 bool MDL_context::has_lock(const MDL_savepoint &mdl_savepoint,
3108 MDL_ticket *mdl_ticket)
3109 {
3110 MDL_ticket *ticket;
3111 /* Start from the beginning, most likely mdl_ticket's been just acquired. */
3112 MDL_context::Ticket_iterator s_it(m_tickets[MDL_STATEMENT]);
3113 MDL_context::Ticket_iterator t_it(m_tickets[MDL_TRANSACTION]);
3114
3115 while ((ticket= s_it++) && ticket != mdl_savepoint.m_stmt_ticket)
3116 {
3117 if (ticket == mdl_ticket)
3118 return FALSE;
3119 }
3120
3121 while ((ticket= t_it++) && ticket != mdl_savepoint.m_trans_ticket)
3122 {
3123 if (ticket == mdl_ticket)
3124 return FALSE;
3125 }
3126 return TRUE;
3127 }
3128
3129
3130 /**
3131 Change lock duration for transactional lock.
3132
3133 @param ticket Ticket representing lock.
3134 @param duration Lock duration to be set.
3135
3136 @note This method only supports changing duration of
3137 transactional lock to some other duration.
3138 */
3139
set_lock_duration(MDL_ticket * mdl_ticket,enum_mdl_duration duration)3140 void MDL_context::set_lock_duration(MDL_ticket *mdl_ticket,
3141 enum_mdl_duration duration)
3142 {
3143 DBUG_SLOW_ASSERT(mdl_ticket->m_duration == MDL_TRANSACTION &&
3144 duration != MDL_TRANSACTION);
3145
3146 m_tickets[MDL_TRANSACTION].remove(mdl_ticket);
3147 m_tickets[duration].push_front(mdl_ticket);
3148 #ifndef DBUG_OFF
3149 mdl_ticket->m_duration= duration;
3150 #endif
3151 }
3152
3153
3154 /**
3155 Set explicit duration for all locks in the context.
3156 */
3157
set_explicit_duration_for_all_locks()3158 void MDL_context::set_explicit_duration_for_all_locks()
3159 {
3160 int i;
3161 MDL_ticket *ticket;
3162
3163 /*
3164 In the most common case when this function is called list
3165 of transactional locks is bigger than list of locks with
3166 explicit duration. So we start by swapping these two lists
3167 and then move elements from new list of transactional
3168 locks and list of statement locks to list of locks with
3169 explicit duration.
3170 */
3171
3172 m_tickets[MDL_EXPLICIT].swap(m_tickets[MDL_TRANSACTION]);
3173
3174 for (i= 0; i < MDL_EXPLICIT; i++)
3175 {
3176 Ticket_iterator it_ticket(m_tickets[i]);
3177
3178 while ((ticket= it_ticket++))
3179 {
3180 m_tickets[i].remove(ticket);
3181 m_tickets[MDL_EXPLICIT].push_front(ticket);
3182 }
3183 }
3184
3185 #ifndef DBUG_OFF
3186 Ticket_iterator exp_it(m_tickets[MDL_EXPLICIT]);
3187
3188 while ((ticket= exp_it++))
3189 ticket->m_duration= MDL_EXPLICIT;
3190 #endif
3191 }
3192
3193
3194 /**
3195 Set transactional duration for all locks in the context.
3196 */
3197
set_transaction_duration_for_all_locks()3198 void MDL_context::set_transaction_duration_for_all_locks()
3199 {
3200 MDL_ticket *ticket;
3201
3202 /*
3203 In the most common case when this function is called list
3204 of explicit locks is bigger than two other lists (in fact,
3205 list of statement locks is always empty). So we start by
3206 swapping list of explicit and transactional locks and then
3207 move contents of new list of explicit locks to list of
3208 locks with transactional duration.
3209 */
3210
3211 DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty());
3212
3213 m_tickets[MDL_TRANSACTION].swap(m_tickets[MDL_EXPLICIT]);
3214
3215 Ticket_iterator it_ticket(m_tickets[MDL_EXPLICIT]);
3216
3217 while ((ticket= it_ticket++))
3218 {
3219 m_tickets[MDL_EXPLICIT].remove(ticket);
3220 m_tickets[MDL_TRANSACTION].push_front(ticket);
3221 }
3222
3223 #ifndef DBUG_OFF
3224 Ticket_iterator trans_it(m_tickets[MDL_TRANSACTION]);
3225
3226 while ((ticket= trans_it++))
3227 ticket->m_duration= MDL_TRANSACTION;
3228 #endif
3229 }
3230
3231
3232
release_explicit_locks()3233 void MDL_context::release_explicit_locks()
3234 {
3235 release_locks_stored_before(MDL_EXPLICIT, NULL);
3236 }
3237
has_explicit_locks()3238 bool MDL_context::has_explicit_locks()
3239 {
3240 MDL_ticket *ticket = NULL;
3241
3242 Ticket_iterator it(m_tickets[MDL_EXPLICIT]);
3243
3244 while ((ticket = it++))
3245 {
3246 return true;
3247 }
3248
3249 return false;
3250 }
3251
3252 #ifdef WITH_WSREP
3253 static
wsrep_get_mdl_namespace_name(MDL_key::enum_mdl_namespace ns)3254 const char *wsrep_get_mdl_namespace_name(MDL_key::enum_mdl_namespace ns)
3255 {
3256 switch (ns)
3257 {
3258 case MDL_key::BACKUP : return "BACKUP";
3259 case MDL_key::SCHEMA : return "SCHEMA";
3260 case MDL_key::TABLE : return "TABLE";
3261 case MDL_key::FUNCTION : return "FUNCTION";
3262 case MDL_key::PROCEDURE : return "PROCEDURE";
3263 case MDL_key::PACKAGE_BODY: return "PACKAGE BODY";
3264 case MDL_key::TRIGGER : return "TRIGGER";
3265 case MDL_key::EVENT : return "EVENT";
3266 case MDL_key::USER_LOCK : return "USER_LOCK";
3267 default: break;
3268 }
3269 return "UNKNOWN";
3270 }
3271
wsrep_report(bool debug) const3272 void MDL_ticket::wsrep_report(bool debug) const
3273 {
3274 if (!debug) return;
3275
3276 const PSI_stage_info *psi_stage= m_lock->key.get_wait_state_name();
3277 WSREP_DEBUG("MDL ticket: type: %s space: %s db: %s name: %s (%s)",
3278 get_type_name()->str,
3279 wsrep_get_mdl_namespace_name(m_lock->key.mdl_namespace()),
3280 m_lock->key.db_name(),
3281 m_lock->key.name(),
3282 psi_stage->m_name);
3283 }
3284 #endif /* WITH_WSREP */
3285