1 /* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #ifndef MT_LOCK_HPP
25 #define MT_LOCK_HPP
26
27 #include <ndb_global.h>
28 #include "mt-asm.h"
29 #include <NdbMutex.h>
30
31 #define JAM_FILE_ID 323
32
33
34 struct mt_lock_stat
35 {
36 const void * m_ptr;
37 char * m_name;
38 Uint32 m_contended_count;
39 Uint32 m_spin_count;
40 };
41
42 static void register_lock(const void * ptr, const char * name);
43
44 /**
45 * We will disable use of spinlocks since it doesn't work properly
46 * with realtime settings. Will also provide more stable results in
47 * some environments at the expense of a minor optimisation. If
48 * desirable to have optimal performance without usage of realtime
49 * and always ensuring that each thread runs in its own processor,
50 * then enable spinlocks again by removing comment on
51 * #ifdef NDB_HAVE_XCNG
52 */
53 #if defined(NDB_HAVE_XCNG) && defined(NDB_USE_SPINLOCK)
54 static mt_lock_stat * lookup_lock(const void * ptr);
55 struct thr_spin_lock
56 {
thr_spin_lockthr_spin_lock57 thr_spin_lock(const char * name = 0)
58 {
59 m_lock = 0;
60 register_lock(this, name);
61 }
62
63 volatile Uint32 m_lock;
64 };
65
66 static
67 ATTRIBUTE_NOINLINE
68 void
lock_slow(void * sl,volatile unsigned * val)69 lock_slow(void * sl, volatile unsigned * val)
70 {
71 mt_lock_stat* s = lookup_lock(sl); // lookup before owning lock
72
73 loop:
74 Uint32 spins = 0;
75 do {
76 spins++;
77 cpu_pause();
78 } while (* val == 1);
79
80 if (unlikely(xcng(val, 1) != 0))
81 goto loop;
82
83 if (s)
84 {
85 s->m_spin_count += spins;
86 Uint32 count = ++s->m_contended_count;
87 Uint32 freq = (count > 10000 ? 5000 : (count > 20 ? 200 : 1));
88
89 if ((count % freq) == 0)
90 printf("%s waiting for lock, contentions: %u spins: %u\n",
91 s->m_name, count, s->m_spin_count);
92 }
93 }
94
95 static
96 inline
97 void
lock(struct thr_spin_lock * sl)98 lock(struct thr_spin_lock* sl)
99 {
100 volatile unsigned* val = &sl->m_lock;
101 if (likely(xcng(val, 1) == 0))
102 return;
103
104 lock_slow(sl, val);
105 }
106
107 static
108 inline
109 void
unlock(struct thr_spin_lock * sl)110 unlock(struct thr_spin_lock* sl)
111 {
112 /**
113 * Memory barrier here, to make sure all of our stores are visible before
114 * the lock release is.
115 *
116 * NOTE: Bug#13870457 UNNECESSARY STRONG MEMORY BARRIER ...
117 * Suggest that a 'wmb' may have been sufficient here.
118 * However, as spinlocks are not used anymore,
119 * (see fix for bug#16961971) this will not be fixed.
120 */
121 mb();
122 sl->m_lock = 0;
123 }
124
125 static
126 inline
127 int
trylock(struct thr_spin_lock * sl)128 trylock(struct thr_spin_lock* sl)
129 {
130 volatile unsigned* val = &sl->m_lock;
131 return xcng(val, 1);
132 }
133 #else
134 #define thr_spin_lock thr_mutex
135 #endif
136
137 struct thr_mutex
138 {
thr_mutexthr_mutex139 thr_mutex(const char * name = 0) {
140 NdbMutex_Init(&m_mutex);
141 register_lock(this, name);
142 }
143
144 NdbMutex m_mutex;
145 };
146
147 /**
148 * For receive threads we have an array of thr_spin_lock, they need all be
149 * aligned with NDB_CL.
150 *
151 * thr_aligned_spin_lock is defined as an aligned and therefore padded version
152 * of thr_spin_lock.
153 *
154 * Beware not to use pointer arithmetic on thr_spin_lock pointer pointing to a
155 * thr_spin_aligned_lock object, although they look logical the same the
156 * padding is different.
157 *
158 * A proper solution would be to define thr_spin_aligned_lock as its own type
159 * and do needed refactoring of code.
160 */
161 struct alignas(NDB_CL) thr_aligned_spin_lock: public thr_spin_lock { };
162
163 static
164 inline
165 void
lock(struct thr_mutex * sl)166 lock(struct thr_mutex* sl)
167 {
168 NdbMutex_Lock(&sl->m_mutex);
169 }
170
171 static
172 inline
173 void
unlock(struct thr_mutex * sl)174 unlock(struct thr_mutex* sl)
175 {
176 NdbMutex_Unlock(&sl->m_mutex);
177 }
178
179 static
180 inline
181 int
trylock(struct thr_mutex * sl)182 trylock(struct thr_mutex* sl)
183 {
184 return NdbMutex_Trylock(&sl->m_mutex);
185 }
186
187
188 #undef JAM_FILE_ID
189
190 #endif
191