1 /*****************************************************************************
2
3 Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License, version 2.0,
7 as published by the Free Software Foundation.
8
9 This program is also distributed with certain software (including
10 but not limited to OpenSSL) that is licensed under separate terms,
11 as designated in a particular file or component or in included license
12 documentation. The authors of MySQL hereby grant you an additional
13 permission to link the program and your derivative works with the
14 separately licensed software that they have included with MySQL.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License, version 2.0, for more details.
20
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
24
25 *****************************************************************************/
26
27 /********************************************************************//**
28 @file ha/ha0ha.cc
29 The hash table with external chains
30
31 Created 8/22/1994 Heikki Tuuri
32 *************************************************************************/
33
34 #include "ha0ha.h"
35 #ifdef UNIV_NONINL
36 #include "ha0ha.ic"
37 #endif
38
39 #ifndef UNIV_HOTBACKUP
40 #ifdef UNIV_DEBUG
41 # include "buf0buf.h"
42 #endif /* UNIV_DEBUG */
43 # include "btr0sea.h"
44 #include "page0page.h"
45
46 /*************************************************************//**
47 Creates a hash table with at least n array cells. The actual number
48 of cells is chosen to be a prime number slightly bigger than n.
49 @return own: created table */
50 UNIV_INTERN
51 hash_table_t*
ha_create_func(ulint n,ulint sync_level,ulint n_sync_obj,ulint type)52 ha_create_func(
53 /*===========*/
54 ulint n, /*!< in: number of array cells */
55 #ifdef UNIV_SYNC_DEBUG
56 ulint sync_level, /*!< in: level of the mutexes or rw_locks
57 in the latching order: this is used in the
58 debug version */
59 #endif /* UNIV_SYNC_DEBUG */
60 ulint n_sync_obj, /*!< in: number of mutexes or rw_locks
61 to protect the hash table: must be a
62 power of 2, or 0 */
63 ulint type) /*!< in: type of datastructure for which
64 the memory heap is going to be used e.g.:
65 MEM_HEAP_FOR_BTR_SEARCH or
66 MEM_HEAP_FOR_PAGE_HASH */
67 {
68 hash_table_t* table;
69 ulint i;
70
71 ut_a(type == MEM_HEAP_FOR_BTR_SEARCH
72 || type == MEM_HEAP_FOR_PAGE_HASH);
73
74 ut_ad(ut_is_2pow(n_sync_obj));
75 table = hash_create(n);
76
77 /* Creating MEM_HEAP_BTR_SEARCH type heaps can potentially fail,
78 but in practise it never should in this case, hence the asserts. */
79
80 if (n_sync_obj == 0) {
81 table->heap = mem_heap_create_typed(
82 ut_min(4096, MEM_MAX_ALLOC_IN_BUF), type);
83 ut_a(table->heap);
84
85 return(table);
86 }
87
88 if (type == MEM_HEAP_FOR_PAGE_HASH) {
89 /* We create a hash table protected by rw_locks for
90 buf_pool->page_hash. */
91 hash_create_sync_obj(table, HASH_TABLE_SYNC_RW_LOCK,
92 n_sync_obj, sync_level);
93 } else {
94 hash_create_sync_obj(table, HASH_TABLE_SYNC_MUTEX,
95 n_sync_obj, sync_level);
96 }
97
98 table->heaps = static_cast<mem_heap_t**>(
99 mem_alloc(n_sync_obj * sizeof(void*)));
100
101 for (i = 0; i < n_sync_obj; i++) {
102 table->heaps[i] = mem_heap_create_typed(4096, type);
103 ut_a(table->heaps[i]);
104 }
105
106 return(table);
107 }
108
109 #ifdef UNIV_SYNC_DEBUG
110 /*************************************************************//**
111 Verifies that the specified hash table is a part of adaptive hash index and
112 that its corresponding latch is X-latched by the current thread. */
113 static
114 bool
ha_assert_btr_x_locked(const hash_table_t * table)115 ha_assert_btr_x_locked(
116 /*===================*/
117 const hash_table_t* table) /*!<in: hash table to check */
118 {
119 ulint i;
120
121 ut_ad(table->adaptive);
122
123 for (i = 0; i < btr_search_index_num; i++) {
124 if (btr_search_sys->hash_tables[i] == table) {
125 break;
126 }
127 }
128
129 ut_ad(i < btr_search_index_num);
130 ut_ad(rw_lock_own(&btr_search_latch_arr[i], RW_LOCK_EX));
131
132 return(true);
133 }
134 #endif /* UNIV_SYNC_DEBUG */
135
136 /*************************************************************//**
137 Empties a hash table and frees the memory heaps. */
138 UNIV_INTERN
139 void
ha_clear(hash_table_t * table)140 ha_clear(
141 /*=====*/
142 hash_table_t* table) /*!< in, own: hash table */
143 {
144 ulint i;
145 ulint n;
146
147 ut_ad(table);
148 ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
149 #ifdef UNIV_SYNC_DEBUG
150 ut_ad(!table->adaptive || ha_assert_btr_x_locked(table));
151 #endif /* UNIV_SYNC_DEBUG */
152
153 /* Free the memory heaps. */
154 n = table->n_sync_obj;
155
156 for (i = 0; i < n; i++) {
157 mem_heap_free(table->heaps[i]);
158 }
159
160 if (table->heaps) {
161 mem_free(table->heaps);
162 }
163
164 switch (table->type) {
165 case HASH_TABLE_SYNC_MUTEX:
166 for (ulint i = 0; i < table->n_sync_obj; i++)
167 mutex_free(table->sync_obj.mutexes + i);
168 mem_free(table->sync_obj.mutexes);
169 table->sync_obj.mutexes = NULL;
170 break;
171
172 case HASH_TABLE_SYNC_RW_LOCK:
173 for (ulint i = 0; i < table->n_sync_obj; i++)
174 rw_lock_free(table->sync_obj.rw_locks + i);
175 mem_free(table->sync_obj.rw_locks);
176 table->sync_obj.rw_locks = NULL;
177 break;
178
179 case HASH_TABLE_SYNC_NONE:
180 /* do nothing */
181 break;
182 }
183
184 table->n_sync_obj = 0;
185 table->type = HASH_TABLE_SYNC_NONE;
186
187
188 /* Clear the hash table. */
189 n = hash_get_n_cells(table);
190
191 for (i = 0; i < n; i++) {
192 hash_get_nth_cell(table, i)->node = NULL;
193 }
194 }
195
196 /*************************************************************//**
197 Inserts an entry into a hash table. If an entry with the same fold number
198 is found, its node is updated to point to the new data, and no new node
199 is inserted. If btr_search_enabled is set to FALSE, we will only allow
200 updating existing nodes, but no new node is allowed to be added.
201 @return TRUE if succeed, FALSE if no more memory could be allocated */
202 UNIV_INTERN
203 ibool
ha_insert_for_fold_func(hash_table_t * table,ulint fold,buf_block_t * block,const rec_t * data)204 ha_insert_for_fold_func(
205 /*====================*/
206 hash_table_t* table, /*!< in: hash table */
207 ulint fold, /*!< in: folded value of data; if a node with
208 the same fold value already exists, it is
209 updated to point to the same data, and no new
210 node is created! */
211 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
212 buf_block_t* block, /*!< in: buffer block containing the data */
213 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
214 const rec_t* data) /*!< in: data, must not be NULL */
215 {
216 hash_cell_t* cell;
217 ha_node_t* node;
218 ha_node_t* prev_node;
219 ulint hash;
220
221 ut_ad(data);
222 ut_ad(table);
223 ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
224 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
225 ut_a(block->frame == page_align(data));
226 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
227 hash_assert_can_modify(table, fold);
228 ut_ad(btr_search_enabled);
229
230 hash = hash_calc_hash(fold, table);
231
232 cell = hash_get_nth_cell(table, hash);
233
234 prev_node = static_cast<ha_node_t*>(cell->node);
235
236 while (prev_node != NULL) {
237 if (prev_node->fold == fold) {
238 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
239 if (table->adaptive) {
240 buf_block_t* prev_block = prev_node->block;
241 ut_a(prev_block->frame
242 == page_align(prev_node->data));
243 ut_a(prev_block->n_pointers > 0);
244 prev_block->n_pointers--;
245 block->n_pointers++;
246 }
247
248 prev_node->block = block;
249 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
250 prev_node->data = data;
251
252 return(TRUE);
253 }
254
255 prev_node = prev_node->next;
256 }
257
258 /* We have to allocate a new chain node */
259
260 node = static_cast<ha_node_t*>(
261 mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t)));
262
263 if (node == NULL) {
264 /* It was a btr search type memory heap and at the moment
265 no more memory could be allocated: return */
266
267 ut_ad(hash_get_heap(table, fold)->type & MEM_HEAP_BTR_SEARCH);
268
269 return(FALSE);
270 }
271
272 ha_node_set_data(node, block, data);
273
274 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
275 if (table->adaptive) {
276 block->n_pointers++;
277 }
278 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
279
280 node->fold = fold;
281
282 node->next = NULL;
283
284 prev_node = static_cast<ha_node_t*>(cell->node);
285
286 if (prev_node == NULL) {
287
288 cell->node = node;
289
290 return(TRUE);
291 }
292
293 while (prev_node->next != NULL) {
294
295 prev_node = prev_node->next;
296 }
297
298 prev_node->next = node;
299
300 return(TRUE);
301 }
302
303 /***********************************************************//**
304 Deletes a hash node. */
305 UNIV_INTERN
306 void
ha_delete_hash_node(hash_table_t * table,ha_node_t * del_node)307 ha_delete_hash_node(
308 /*================*/
309 hash_table_t* table, /*!< in: hash table */
310 ha_node_t* del_node) /*!< in: node to be deleted */
311 {
312 ut_ad(table);
313 ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
314 #ifdef UNIV_SYNC_DEBUG
315 ut_ad(ha_assert_btr_x_locked(table));
316 #endif /* UNIV_SYNC_DEBUG */
317 ut_ad(btr_search_enabled);
318 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
319 if (table->adaptive) {
320 ut_a(del_node->block->frame = page_align(del_node->data));
321 ut_a(del_node->block->n_pointers > 0);
322 del_node->block->n_pointers--;
323 }
324 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
325
326 HASH_DELETE_AND_COMPACT(ha_node_t, next, table, del_node);
327 }
328
329 /*********************************************************//**
330 Looks for an element when we know the pointer to the data, and updates
331 the pointer to data, if found.
332 @return TRUE if found */
333 UNIV_INTERN
334 ibool
ha_search_and_update_if_found_func(hash_table_t * table,ulint fold,const rec_t * data,buf_block_t * new_block,const rec_t * new_data)335 ha_search_and_update_if_found_func(
336 /*===============================*/
337 hash_table_t* table, /*!< in/out: hash table */
338 ulint fold, /*!< in: folded value of the searched data */
339 const rec_t* data, /*!< in: pointer to the data */
340 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
341 buf_block_t* new_block,/*!< in: block containing new_data */
342 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
343 const rec_t* new_data)/*!< in: new pointer to the data */
344 {
345 ha_node_t* node;
346
347 ut_ad(table);
348 ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
349 hash_assert_can_modify(table, fold);
350 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
351 ut_a(new_block->frame == page_align(new_data));
352 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
353 #ifdef UNIV_SYNC_DEBUG
354 ut_ad(ha_assert_btr_x_locked(table));
355 #endif /* UNIV_SYNC_DEBUG */
356
357 if (!btr_search_enabled) {
358 return(FALSE);
359 }
360
361 node = ha_search_with_data(table, fold, data);
362
363 if (node) {
364 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
365 if (table->adaptive) {
366 ut_a(node->block->n_pointers > 0);
367 node->block->n_pointers--;
368 new_block->n_pointers++;
369 }
370
371 node->block = new_block;
372 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
373 node->data = new_data;
374
375 return(TRUE);
376 }
377
378 return(FALSE);
379 }
380
381 /*****************************************************************//**
382 Removes from the chain determined by fold all nodes whose data pointer
383 points to the page given. */
384 UNIV_INTERN
385 void
ha_remove_all_nodes_to_page(hash_table_t * table,ulint fold,const page_t * page)386 ha_remove_all_nodes_to_page(
387 /*========================*/
388 hash_table_t* table, /*!< in: hash table */
389 ulint fold, /*!< in: fold value */
390 const page_t* page) /*!< in: buffer page */
391 {
392 ha_node_t* node;
393
394 ut_ad(table);
395 ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
396 hash_assert_can_modify(table, fold);
397 ut_ad(btr_search_enabled);
398
399 node = ha_chain_get_first(table, fold);
400
401 while (node) {
402 if (page_align(ha_node_get_data(node)) == page) {
403
404 /* Remove the hash node */
405
406 ha_delete_hash_node(table, node);
407
408 /* Start again from the first node in the chain
409 because the deletion may compact the heap of
410 nodes and move other nodes! */
411
412 node = ha_chain_get_first(table, fold);
413 } else {
414 node = ha_chain_get_next(node);
415 }
416 }
417 #ifdef UNIV_DEBUG
418 /* Check that all nodes really got deleted */
419
420 node = ha_chain_get_first(table, fold);
421
422 while (node) {
423 ut_a(page_align(ha_node_get_data(node)) != page);
424
425 node = ha_chain_get_next(node);
426 }
427 #endif
428 }
429
430 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
431 /*************************************************************//**
432 Validates a given range of the cells in hash table.
433 @return TRUE if ok */
434 UNIV_INTERN
435 ibool
ha_validate(hash_table_t * table,ulint start_index,ulint end_index)436 ha_validate(
437 /*========*/
438 hash_table_t* table, /*!< in: hash table */
439 ulint start_index, /*!< in: start index */
440 ulint end_index) /*!< in: end index */
441 {
442 ibool ok = TRUE;
443 ulint i;
444
445 ut_ad(table);
446 ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
447 ut_a(start_index <= end_index);
448 ut_a(start_index < hash_get_n_cells(table));
449 ut_a(end_index < hash_get_n_cells(table));
450
451 for (i = start_index; i <= end_index; i++) {
452 ha_node_t* node;
453 hash_cell_t* cell;
454
455 cell = hash_get_nth_cell(table, i);
456
457 for (node = static_cast<ha_node_t*>(cell->node);
458 node != 0;
459 node = node->next) {
460
461 if (hash_calc_hash(node->fold, table) != i) {
462 ut_print_timestamp(stderr);
463 fprintf(stderr,
464 "InnoDB: Error: hash table node"
465 " fold value %lu does not\n"
466 "InnoDB: match the cell number %lu.\n",
467 (ulong) node->fold, (ulong) i);
468
469 ok = FALSE;
470 }
471 }
472 }
473
474 return(ok);
475 }
476 #endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
477
478 /*************************************************************//**
479 Prints info of a hash table. */
480 UNIV_INTERN
481 void
ha_print_info(FILE * file,hash_table_t * table)482 ha_print_info(
483 /*==========*/
484 FILE* file, /*!< in: file where to print */
485 hash_table_t* table) /*!< in: hash table */
486 {
487 #ifdef UNIV_DEBUG
488 /* Some of the code here is disabled for performance reasons in production
489 builds, see http://bugs.mysql.com/36941 */
490 #define PRINT_USED_CELLS
491 #endif /* UNIV_DEBUG */
492
493 #ifdef PRINT_USED_CELLS
494 hash_cell_t* cell;
495 ulint cells = 0;
496 ulint i;
497 #endif /* PRINT_USED_CELLS */
498 ulint n_bufs;
499
500 ut_ad(table);
501 ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
502 #ifdef PRINT_USED_CELLS
503 for (i = 0; i < hash_get_n_cells(table); i++) {
504
505 cell = hash_get_nth_cell(table, i);
506
507 if (cell->node) {
508
509 cells++;
510 }
511 }
512 #endif /* PRINT_USED_CELLS */
513
514 fprintf(file, "Hash table size %lu",
515 (ulong) hash_get_n_cells(table));
516
517 #ifdef PRINT_USED_CELLS
518 fprintf(file, ", used cells %lu", (ulong) cells);
519 #endif /* PRINT_USED_CELLS */
520
521 if (table->heaps == NULL && table->heap != NULL) {
522
523 /* This calculation is intended for the adaptive hash
524 index: how many buffer frames we have reserved? */
525
526 n_bufs = UT_LIST_GET_LEN(table->heap->base) - 1;
527
528 if (table->heap->free_block) {
529 n_bufs++;
530 }
531
532 fprintf(file, ", node heap has %lu buffer(s)\n",
533 (ulong) n_bufs);
534 }
535 }
536 #endif /* !UNIV_HOTBACKUP */
537