1 /* Copyright (c) 2009, 2021, Oracle and/or its affiliates.
2 
3   This program is free software; you can redistribute it and/or modify
4   it under the terms of the GNU General Public License, version 2.0,
5   as published by the Free Software Foundation.
6 
7   This program is also distributed with certain software (including
8   but not limited to OpenSSL) that is licensed under separate terms,
9   as designated in a particular file or component or in included license
10   documentation.  The authors of MySQL hereby grant you an additional
11   permission to link the program and your derivative works with the
12   separately licensed software that they have included with MySQL.
13 
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License, version 2.0, for more details.
18 
19   You should have received a copy of the GNU General Public License
20   along with this program; if not, write to the Free Software Foundation,
21   51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
22 
23 #ifndef PFS_LOCK_H
24 #define PFS_LOCK_H
25 
26 /**
27   @file storage/perfschema/pfs_lock.h
28   Performance schema internal locks (declarations).
29 */
30 
31 #include "my_global.h"
32 
33 #include "pfs_atomic.h"
34 
35 /* to cause bugs, testing */
36 // #define MEM(X) std::memory_order_relaxed
37 /* correct code */
38 #define MEM(X) X
39 
40 /**
41   @addtogroup Performance_schema_buffers
42   @{
43 */
44 
45 /**
46   State of a free record.
47   Values of a free record should not be read by a reader.
48   Writers can concurrently attempt to allocate a free record.
49 */
50 #define PFS_LOCK_FREE 0x00
51 /**
52   State of a dirty record.
53   Values of a dirty record should not be read by a reader,
54   as the record is currently being modified.
55   Only one writer, the writer which owns the record, should
56   modify the record content.
57 */
58 #define PFS_LOCK_DIRTY 0x01
59 /**
60   State of an allocated record.
61   Values of an allocated record are safe to read by a reader.
62   A writer may modify some but not all properties of the record:
63   only modifying values that can never cause the reader to crash is allowed.
64 */
65 #define PFS_LOCK_ALLOCATED 0x02
66 
67 #define VERSION_MASK 0xFFFFFFFC
68 #define STATE_MASK   0x00000003
69 #define VERSION_INC  4
70 
71 struct pfs_optimistic_state
72 {
73   uint32 m_version_state;
74 };
75 
76 struct pfs_dirty_state
77 {
78   uint32 m_version_state;
79 };
80 
81 /**
82   A 'lock' protecting performance schema internal buffers.
83   This lock is used to mark the state of a record.
84   Access to the record is not enforced here,
85   it's up to the readers and writers to look at the record state
86   before making an actual read or write operation.
87 */
88 struct pfs_lock
89 {
90   /**
91     The record internal version and state
92     @sa PFS_LOCK_FREE
93     @sa PFS_LOCK_DIRTY
94     @sa PFS_LOCK_ALLOCATED
95     The version number is to transform the 'ABA' problem
96     (see http://en.wikipedia.org/wiki/ABA_problem)
97     into an 'A(n)BA(n + 1)' problem, where 'n' is the m_version number.
98     When the performance schema instrumentation deletes a record,
99     then create a different record reusing the same memory allocation,
100     the version number is incremented, so that a reader can detect that
101     the record was changed. Note that the version number is never
102     reset to zero when a new record is created.
103     The version number is stored in the high 30 bits.
104     The state is stored in the low 2 bits.
105   */
106   uint32 m_version_state;
107 
copy_version_statepfs_lock108   uint32 copy_version_state()
109   {
110     uint32 copy;
111 
112     copy= m_version_state; /* dirty read */
113 
114     return copy;
115   }
116 
117   /** Returns true if the record is free. */
is_freepfs_lock118   bool is_free(void)
119   {
120     uint32 copy;
121 
122     copy= PFS_atomic::load_u32(&m_version_state);
123 
124     return ((copy & STATE_MASK) == PFS_LOCK_FREE);
125   }
126 
127   /** Returns true if the record contains values that can be read. */
is_populatedpfs_lock128   bool is_populated(void)
129   {
130     uint32 copy;
131 
132     copy= PFS_atomic::load_u32(&m_version_state);
133 
134     return ((copy & STATE_MASK) == PFS_LOCK_ALLOCATED);
135   }
136 
137   /**
138     Execute a free to dirty transition.
139     This transition is safe to execute concurrently by multiple writers.
140     Only one writer will succeed to acquire the record.
141     @return true if the operation succeed
142   */
free_to_dirtypfs_lock143   bool free_to_dirty(pfs_dirty_state *copy_ptr)
144   {
145     uint32 old_val;
146 
147     old_val= PFS_atomic::load_u32(&m_version_state);
148 
149     if ((old_val & STATE_MASK) != PFS_LOCK_FREE)
150     {
151       return false;
152     }
153 
154     uint32 new_val= (old_val & VERSION_MASK) + PFS_LOCK_DIRTY;
155     bool pass;
156 
157     pass= PFS_atomic::cas_u32(&m_version_state, &old_val, new_val);
158 
159     if (pass)
160     {
161       copy_ptr->m_version_state= new_val;
162     }
163 
164     return pass;
165   }
166 
167   /**
168     Execute an allocated to dirty transition.
169     This transition should be executed by the writer that owns the record,
170     before the record is modified.
171   */
allocated_to_dirtypfs_lock172   void allocated_to_dirty(pfs_dirty_state *copy_ptr)
173   {
174     uint32 copy= copy_version_state();
175     /* Make sure the record was ALLOCATED. */
176     assert((copy & STATE_MASK) == PFS_LOCK_ALLOCATED);
177     /* Keep the same version, set the DIRTY state */
178     uint32 new_val= (copy & VERSION_MASK) + PFS_LOCK_DIRTY;
179     /* We own the record, no need to use compare and swap. */
180 
181     PFS_atomic::store_u32(&m_version_state, new_val);
182 
183     copy_ptr->m_version_state= new_val;
184   }
185 
186   /**
187     Execute a dirty to allocated transition.
188     This transition should be executed by the writer that owns the record,
189     after the record is in a state ready to be read.
190   */
dirty_to_allocatedpfs_lock191   void dirty_to_allocated(const pfs_dirty_state *copy)
192   {
193     /* Make sure the record was DIRTY. */
194     assert((copy->m_version_state & STATE_MASK) == PFS_LOCK_DIRTY);
195     /* Increment the version, set the ALLOCATED state */
196     uint32 new_val= (copy->m_version_state & VERSION_MASK) + VERSION_INC + PFS_LOCK_ALLOCATED;
197 
198     PFS_atomic::store_u32(&m_version_state, new_val);
199   }
200 
201   /**
202     Initialize a lock to allocated.
203     This transition should be executed by the writer that owns the record and the lock,
204     after the record is in a state ready to be read.
205   */
set_allocatedpfs_lock206   void set_allocated(void)
207   {
208     /* Do not set the version to 0, read the previous value. */
209     uint32 copy= copy_version_state();
210     /* Increment the version, set the ALLOCATED state */
211     uint32 new_val= (copy & VERSION_MASK) + VERSION_INC + PFS_LOCK_ALLOCATED;
212 
213     PFS_atomic::store_u32(&m_version_state, new_val);
214   }
215 
216   /**
217     Initialize a lock to dirty.
218   */
set_dirtypfs_lock219   void set_dirty(pfs_dirty_state *copy_ptr)
220   {
221     /* Do not set the version to 0, read the previous value. */
222     uint32 copy= PFS_atomic::load_u32(&m_version_state);
223     /* Increment the version, set the DIRTY state */
224     uint32 new_val= (copy & VERSION_MASK) + VERSION_INC + PFS_LOCK_DIRTY;
225     PFS_atomic::store_u32(&m_version_state, new_val);
226 
227     copy_ptr->m_version_state= new_val;
228   }
229 
230   /**
231     Execute a dirty to free transition.
232     This transition should be executed by the writer that owns the record.
233   */
dirty_to_freepfs_lock234   void dirty_to_free(const pfs_dirty_state *copy)
235   {
236     /* Make sure the record was DIRTY. */
237     assert((copy->m_version_state & STATE_MASK) == PFS_LOCK_DIRTY);
238     /* Keep the same version, set the FREE state */
239     uint32 new_val= (copy->m_version_state & VERSION_MASK) + PFS_LOCK_FREE;
240 
241     PFS_atomic::store_u32(&m_version_state, new_val);
242   }
243 
244   /**
245     Execute an allocated to free transition.
246     This transition should be executed by the writer that owns the record.
247   */
allocated_to_freepfs_lock248   void allocated_to_free(void)
249   {
250     /*
251       If this record is not in the ALLOCATED state and the caller is trying
252       to free it, this is a bug: the caller is confused,
253       and potentially damaging data owned by another thread or object.
254     */
255     uint32 copy= copy_version_state();
256     /* Make sure the record was ALLOCATED. */
257     assert(((copy & STATE_MASK) == PFS_LOCK_ALLOCATED));
258     /* Keep the same version, set the FREE state */
259     uint32 new_val= (copy & VERSION_MASK) + PFS_LOCK_FREE;
260 
261     PFS_atomic::store_u32(&m_version_state, new_val);
262   }
263 
264   /**
265     Start an optimistic read operation.
266     @param [out] copy Saved lock state
267     @sa end_optimist_lock.
268   */
begin_optimistic_lockpfs_lock269   void begin_optimistic_lock(struct pfs_optimistic_state *copy)
270   {
271     copy->m_version_state= PFS_atomic::load_u32(&m_version_state);
272   }
273 
274   /**
275     End an optimistic read operation.
276     @sa begin_optimist_lock.
277     @param copy Saved lock state
278     @return true if the data read is safe to use.
279   */
end_optimistic_lockpfs_lock280   bool end_optimistic_lock(const struct pfs_optimistic_state *copy)
281   {
282     uint32 version_state;
283 
284     /* Check there was valid data to look at. */
285     if ((copy->m_version_state & STATE_MASK) != PFS_LOCK_ALLOCATED)
286       return false;
287 
288     version_state= PFS_atomic::load_u32(&m_version_state);
289 
290     /* Check the version + state has not changed. */
291     if (copy->m_version_state != version_state)
292       return false;
293 
294     return true;
295   }
296 
get_versionpfs_lock297   uint32 get_version()
298   {
299     uint32 version_state;
300 
301     version_state= PFS_atomic::load_u32(&m_version_state);
302 
303     return (version_state & VERSION_MASK);
304   }
305 };
306 
307 
308 /** @} */
309 #endif
310 
311