1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * 2 * Copyright by The HDF Group. * 3 * Copyright by the Board of Trustees of the University of Illinois. * 4 * All rights reserved. * 5 * * 6 * This file is part of HDF5. The full HDF5 copyright notice, including * 7 * terms governing use, modification, and redistribution, is contained in * 8 * the COPYING file, which can be found at the root of the source code * 9 * distribution tree, or in https://www.hdfgroup.org/licenses. * 10 * If you do not have access to either file, you may request a copy from * 11 * help@hdfgroup.org. * 12 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 13 14 /* 15 * Programmer: John Mainzer -- 10/12/04 16 * 17 * Purpose: This file contains declarations which are normally visible 18 * only within the H5C package. 19 * 20 * Source files outside the H5C package should include 21 * H5Cprivate.h instead. 22 * 23 * The one exception to this rule is test/cache.c. The test 24 * code is easier to write if it can look at the cache's 25 * internal data structures. Indeed, this is the main 26 * reason why this file was created. 27 */ 28 29 /* clang-format off */ 30 /* Maintain current format by disabling format for this file */ 31 32 #if !(defined H5C_FRIEND || defined H5C_MODULE) 33 #error "Do not include this file outside the H5C package!" 34 #endif 35 36 #ifndef H5Cpkg_H 37 #define H5Cpkg_H 38 39 /* Get package's private header */ 40 #include "H5Cprivate.h" 41 42 /* Other private headers needed by this file */ 43 #include "H5Clog.h" /* Cache logging */ 44 #include "H5SLprivate.h" /* Skip lists */ 45 46 /**************************/ 47 /* Package Private Macros */ 48 /**************************/ 49 50 /* Number of epoch markers active */ 51 #define H5C__MAX_EPOCH_MARKERS 10 52 53 54 /* Cache configuration settings */ 55 #define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */ 56 #define H5C__H5C_T_MAGIC 0x005CAC0E 57 58 59 /* Initial allocated size of the "flush_dep_parent" array */ 60 #define H5C_FLUSH_DEP_PARENT_INIT 8 61 62 63 /* Set to TRUE to enable the slist optimization. If this field is TRUE, 64 * the slist is disabled whenever a flush is not in progress. 65 */ 66 #define H5C__SLIST_OPT_ENABLED TRUE 67 68 69 /**************************************************************************** 70 * 71 * We maintain doubly linked lists of instances of H5C_cache_entry_t for a 72 * variety of reasons -- protected list, LRU list, and the clean and dirty 73 * LRU lists at present. The following macros support linking and unlinking 74 * of instances of H5C_cache_entry_t by both their regular and auxiliary next 75 * and previous pointers. 76 * 77 * The size and length fields are also maintained. 78 * 79 * Note that the relevant pair of prev and next pointers are presumed to be 80 * NULL on entry in the insertion macros. 81 * 82 * Finally, observe that the sanity checking macros evaluate to the empty 83 * string when H5C_DO_SANITY_CHECKS is FALSE. They also contain calls 84 * to the HGOTO_ERROR macro, which may not be appropriate in all cases. 85 * If so, we will need versions of the insertion and deletion macros which 86 * do not reference the sanity checking macros. 87 * JRM - 5/5/04 88 * 89 * Changes: 90 * 91 * - Removed the line: 92 * 93 * ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) || 94 * 95 * from the H5C__DLL_PRE_REMOVE_SC macro. With the addition of the 96 * epoch markers used in the age out based cache size reduction algorithm, 97 * this invariant need not hold, as the epoch markers are of size 0. 98 * 99 * One could argue that I should have given the epoch markers a positive 100 * size, but this would break the index_size = LRU_list_size + pl_size 101 * + pel_size invariant. 102 * 103 * Alternatively, I could pass the current decr_mode in to the macro, 104 * and just skip the check whenever epoch markers may be in use. 105 * 106 * However, any size errors should be caught when the cache is flushed 107 * and destroyed. Until we are tracking such an error, this should be 108 * good enough. 109 * JRM - 12/9/04 110 * 111 * 112 * - In the H5C__DLL_PRE_INSERT_SC macro, replaced the lines: 113 * 114 * ( ( (len) == 1 ) && 115 * ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || 116 * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) 117 * ) 118 * ) || 119 * 120 * with: 121 * 122 * ( ( (len) == 1 ) && 123 * ( ( (head_ptr) != (tail_ptr) ) || 124 * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) 125 * ) 126 * ) || 127 * 128 * Epoch markers have size 0, so we can now have a non-empty list with 129 * zero size. Hence the "( (Size) <= 0 )" clause cause false failures 130 * in the sanity check. Since "Size" is typically a size_t, it can't 131 * take on negative values, and thus the revised clause "( (Size) < 0 )" 132 * caused compiler warnings. 133 * JRM - 12/22/04 134 * 135 * - In the H5C__DLL_SC macro, replaced the lines: 136 * 137 * ( ( (len) == 1 ) && 138 * ( ( (head_ptr) != (tail_ptr) ) || ( (cache_ptr)->size <= 0 ) || 139 * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) 140 * ) 141 * ) || 142 * 143 * with 144 * 145 * ( ( (len) == 1 ) && 146 * ( ( (head_ptr) != (tail_ptr) ) || 147 * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) 148 * ) 149 * ) || 150 * 151 * Epoch markers have size 0, so we can now have a non-empty list with 152 * zero size. Hence the "( (Size) <= 0 )" clause cause false failures 153 * in the sanity check. Since "Size" is typically a size_t, it can't 154 * take on negative values, and thus the revised clause "( (Size) < 0 )" 155 * caused compiler warnings. 156 * JRM - 1/10/05 157 * 158 * - Added the H5C__DLL_UPDATE_FOR_SIZE_CHANGE macro and the associated 159 * sanity checking macros. These macro are used to update the size of 160 * a DLL when one of its entries changes size. 161 * 162 * JRM - 9/8/05 163 * 164 * - Added macros supporting the index list -- a doubly liked list of 165 * all entries in the index. This list is necessary to reduce the 166 * cost of visiting all entries in the cache, which was previously 167 * done via a scan of the hash table. 168 * 169 * JRM - 10/15/15 170 * 171 ****************************************************************************/ 172 173 #if H5C_DO_SANITY_CHECKS 174 175 #define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ 176 if ( ( (head_ptr) == NULL ) || \ 177 ( (tail_ptr) == NULL ) || \ 178 ( (entry_ptr) == NULL ) || \ 179 ( (len) <= 0 ) || \ 180 ( (Size) < (entry_ptr)->size ) || \ 181 ( ( (entry_ptr)->prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \ 182 ( ( (entry_ptr)->next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ 183 ( ( (len) == 1 ) && \ 184 ( ! ( ( (head_ptr) == (entry_ptr) ) && \ 185 ( (tail_ptr) == (entry_ptr) ) && \ 186 ( (entry_ptr)->next == NULL ) && \ 187 ( (entry_ptr)->prev == NULL ) && \ 188 ( (Size) == (entry_ptr)->size ) \ 189 ) \ 190 ) \ 191 ) \ 192 ) { \ 193 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre remove SC failed") \ 194 } 195 196 #define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ 197 if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ 198 ( (head_ptr) != (tail_ptr) ) \ 199 ) || \ 200 ( (len) < 0 ) || \ 201 ( (Size) < 0 ) || \ 202 ( ( (len) == 1 ) && \ 203 ( ( (head_ptr) != (tail_ptr) ) || \ 204 ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ 205 ) \ 206 ) || \ 207 ( ( (len) >= 1 ) && \ 208 ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \ 209 ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \ 210 ) \ 211 ) \ 212 ) { \ 213 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL sanity check failed") \ 214 } 215 216 #define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ 217 if ( ( (entry_ptr) == NULL ) || \ 218 ( (entry_ptr)->next != NULL ) || \ 219 ( (entry_ptr)->prev != NULL ) || \ 220 ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ 221 ( (head_ptr) != (tail_ptr) ) \ 222 ) || \ 223 ( ( (len) == 1 ) && \ 224 ( ( (head_ptr) != (tail_ptr) ) || \ 225 ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ 226 ) \ 227 ) || \ 228 ( ( (len) >= 1 ) && \ 229 ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \ 230 ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \ 231 ) \ 232 ) \ 233 ) { \ 234 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre insert SC failed") \ 235 } 236 237 #define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ 238 if ( ( (dll_len) <= 0 ) || \ 239 ( (dll_size) <= 0 ) || \ 240 ( (old_size) <= 0 ) || \ 241 ( (old_size) > (dll_size) ) || \ 242 ( (new_size) <= 0 ) || \ 243 ( ( (dll_len) == 1 ) && ( (old_size) != (dll_size) ) ) ) { \ 244 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL pre size update SC failed") \ 245 } 246 247 #define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ 248 if ( ( (new_size) > (dll_size) ) || \ 249 ( ( (dll_len) == 1 ) && ( (new_size) != (dll_size) ) ) ) { \ 250 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL post size update SC failed") \ 251 } 252 253 #else /* H5C_DO_SANITY_CHECKS */ 254 255 #define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) 256 #define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv) 257 #define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) 258 #define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) 259 #define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) 260 261 #endif /* H5C_DO_SANITY_CHECKS */ 262 263 264 #define H5C__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ 265 { \ 266 H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ 267 fail_val) \ 268 if ( (head_ptr) == NULL ) \ 269 { \ 270 (head_ptr) = (entry_ptr); \ 271 (tail_ptr) = (entry_ptr); \ 272 } \ 273 else \ 274 { \ 275 (tail_ptr)->next = (entry_ptr); \ 276 (entry_ptr)->prev = (tail_ptr); \ 277 (tail_ptr) = (entry_ptr); \ 278 } \ 279 (len)++; \ 280 (Size) += (entry_ptr)->size; \ 281 } /* H5C__DLL_APPEND() */ 282 283 #define H5C__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ 284 { \ 285 H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ 286 fail_val) \ 287 if ( (head_ptr) == NULL ) \ 288 { \ 289 (head_ptr) = (entry_ptr); \ 290 (tail_ptr) = (entry_ptr); \ 291 } \ 292 else \ 293 { \ 294 (head_ptr)->prev = (entry_ptr); \ 295 (entry_ptr)->next = (head_ptr); \ 296 (head_ptr) = (entry_ptr); \ 297 } \ 298 (len)++; \ 299 (Size) += entry_ptr->size; \ 300 } /* H5C__DLL_PREPEND() */ 301 302 #define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ 303 { \ 304 H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ 305 fail_val) \ 306 { \ 307 if ( (head_ptr) == (entry_ptr) ) \ 308 { \ 309 (head_ptr) = (entry_ptr)->next; \ 310 if ( (head_ptr) != NULL ) \ 311 (head_ptr)->prev = NULL; \ 312 } \ 313 else \ 314 (entry_ptr)->prev->next = (entry_ptr)->next; \ 315 if ( (tail_ptr) == (entry_ptr) ) \ 316 { \ 317 (tail_ptr) = (entry_ptr)->prev; \ 318 if ( (tail_ptr) != NULL ) \ 319 (tail_ptr)->next = NULL; \ 320 } \ 321 else \ 322 (entry_ptr)->next->prev = (entry_ptr)->prev; \ 323 entry_ptr->next = NULL; \ 324 entry_ptr->prev = NULL; \ 325 (len)--; \ 326 (Size) -= entry_ptr->size; \ 327 } \ 328 } /* H5C__DLL_REMOVE() */ 329 330 #define H5C__DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size) \ 331 { \ 332 H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ 333 (dll_size) -= (old_size); \ 334 (dll_size) += (new_size); \ 335 H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ 336 } /* H5C__DLL_UPDATE_FOR_SIZE_CHANGE() */ 337 338 #if H5C_DO_SANITY_CHECKS 339 340 #define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ 341 if ( ( (hd_ptr) == NULL ) || \ 342 ( (tail_ptr) == NULL ) || \ 343 ( (entry_ptr) == NULL ) || \ 344 ( (len) <= 0 ) || \ 345 ( (Size) < (entry_ptr)->size ) || \ 346 ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ 347 ( ( (entry_ptr)->aux_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ 348 ( ( (entry_ptr)->aux_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ 349 ( ( (len) == 1 ) && \ 350 ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ 351 ( (entry_ptr)->aux_next == NULL ) && \ 352 ( (entry_ptr)->aux_prev == NULL ) && \ 353 ( (Size) == (entry_ptr)->size ) \ 354 ) \ 355 ) \ 356 ) \ 357 ) { \ 358 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "aux DLL pre remove SC failed") \ 359 } 360 361 #define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ 362 if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ 363 ( (head_ptr) != (tail_ptr) ) \ 364 ) || \ 365 ( (len) < 0 ) || \ 366 ( (Size) < 0 ) || \ 367 ( ( (len) == 1 ) && \ 368 ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ 369 ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ 370 ) \ 371 ) || \ 372 ( ( (len) >= 1 ) && \ 373 ( ( (head_ptr) == NULL ) || ( (head_ptr)->aux_prev != NULL ) || \ 374 ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \ 375 ) \ 376 ) \ 377 ) { \ 378 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL sanity check failed") \ 379 } 380 381 #define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ 382 if ( ( (entry_ptr) == NULL ) || \ 383 ( (entry_ptr)->aux_next != NULL ) || \ 384 ( (entry_ptr)->aux_prev != NULL ) || \ 385 ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ 386 ( (hd_ptr) != (tail_ptr) ) \ 387 ) || \ 388 ( ( (len) == 1 ) && \ 389 ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ 390 ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \ 391 ) \ 392 ) || \ 393 ( ( (len) >= 1 ) && \ 394 ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->aux_prev != NULL ) || \ 395 ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \ 396 ) \ 397 ) \ 398 ) { \ 399 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL pre insert SC failed") \ 400 } 401 402 #else /* H5C_DO_SANITY_CHECKS */ 403 404 #define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) 405 #define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) 406 #define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) 407 408 #endif /* H5C_DO_SANITY_CHECKS */ 409 410 411 #define H5C__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\ 412 { \ 413 H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ 414 fail_val) \ 415 if ( (head_ptr) == NULL ) \ 416 { \ 417 (head_ptr) = (entry_ptr); \ 418 (tail_ptr) = (entry_ptr); \ 419 } \ 420 else \ 421 { \ 422 (tail_ptr)->aux_next = (entry_ptr); \ 423 (entry_ptr)->aux_prev = (tail_ptr); \ 424 (tail_ptr) = (entry_ptr); \ 425 } \ 426 (len)++; \ 427 (Size) += entry_ptr->size; \ 428 } /* H5C__AUX_DLL_APPEND() */ 429 430 #define H5C__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ 431 { \ 432 H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ 433 if ( (head_ptr) == NULL ) \ 434 { \ 435 (head_ptr) = (entry_ptr); \ 436 (tail_ptr) = (entry_ptr); \ 437 } \ 438 else \ 439 { \ 440 (head_ptr)->aux_prev = (entry_ptr); \ 441 (entry_ptr)->aux_next = (head_ptr); \ 442 (head_ptr) = (entry_ptr); \ 443 } \ 444 (len)++; \ 445 (Size) += entry_ptr->size; \ 446 } /* H5C__AUX_DLL_PREPEND() */ 447 448 #define H5C__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ 449 { \ 450 H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ 451 { \ 452 if ( (head_ptr) == (entry_ptr) ) \ 453 { \ 454 (head_ptr) = (entry_ptr)->aux_next; \ 455 if ( (head_ptr) != NULL ) \ 456 (head_ptr)->aux_prev = NULL; \ 457 } \ 458 else \ 459 (entry_ptr)->aux_prev->aux_next = (entry_ptr)->aux_next; \ 460 if ( (tail_ptr) == (entry_ptr) ) \ 461 { \ 462 (tail_ptr) = (entry_ptr)->aux_prev; \ 463 if ( (tail_ptr) != NULL ) \ 464 (tail_ptr)->aux_next = NULL; \ 465 } \ 466 else \ 467 (entry_ptr)->aux_next->aux_prev = (entry_ptr)->aux_prev; \ 468 entry_ptr->aux_next = NULL; \ 469 entry_ptr->aux_prev = NULL; \ 470 (len)--; \ 471 (Size) -= entry_ptr->size; \ 472 } \ 473 } /* H5C__AUX_DLL_REMOVE() */ 474 475 #if H5C_DO_SANITY_CHECKS 476 477 #define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ 478 if ( ( (hd_ptr) == NULL ) || \ 479 ( (tail_ptr) == NULL ) || \ 480 ( (entry_ptr) == NULL ) || \ 481 ( (len) <= 0 ) || \ 482 ( (Size) < (entry_ptr)->size ) || \ 483 ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ 484 ( ( (entry_ptr)->il_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ 485 ( ( (entry_ptr)->il_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ 486 ( ( (len) == 1 ) && \ 487 ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ 488 ( (entry_ptr)->il_next == NULL ) && \ 489 ( (entry_ptr)->il_prev == NULL ) && \ 490 ( (Size) == (entry_ptr)->size ) \ 491 ) \ 492 ) \ 493 ) \ 494 ) { \ 495 HDassert(0 && "il DLL pre remove SC failed"); \ 496 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "il DLL pre remove SC failed") \ 497 } 498 499 #define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ 500 if ( ( (entry_ptr) == NULL ) || \ 501 ( (entry_ptr)->il_next != NULL ) || \ 502 ( (entry_ptr)->il_prev != NULL ) || \ 503 ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ 504 ( (hd_ptr) != (tail_ptr) ) \ 505 ) || \ 506 ( ( (len) == 1 ) && \ 507 ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ 508 ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \ 509 ) \ 510 ) || \ 511 ( ( (len) >= 1 ) && \ 512 ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->il_prev != NULL ) || \ 513 ( (tail_ptr) == NULL ) || ( (tail_ptr)->il_next != NULL ) \ 514 ) \ 515 ) \ 516 ) { \ 517 HDassert(0 && "IL DLL pre insert SC failed"); \ 518 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL pre insert SC failed") \ 519 } 520 521 #define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ 522 if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ 523 ( (head_ptr) != (tail_ptr) ) \ 524 ) || \ 525 ( ( (len) == 1 ) && \ 526 ( ( (head_ptr) != (tail_ptr) ) || \ 527 ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ 528 ) \ 529 ) || \ 530 ( ( (len) >= 1 ) && \ 531 ( ( (head_ptr) == NULL ) || ( (head_ptr)->il_prev != NULL ) || \ 532 ( (tail_ptr) == NULL ) || ( (tail_ptr)->il_next != NULL ) \ 533 ) \ 534 ) \ 535 ) { \ 536 HDassert(0 && "IL DLL sanity check failed"); \ 537 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL sanity check failed") \ 538 } 539 540 #else /* H5C_DO_SANITY_CHECKS */ 541 542 #define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) 543 #define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) 544 #define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) 545 546 #endif /* H5C_DO_SANITY_CHECKS */ 547 548 549 #define H5C__IL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\ 550 { \ 551 H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ 552 fail_val) \ 553 if ( (head_ptr) == NULL ) \ 554 { \ 555 (head_ptr) = (entry_ptr); \ 556 (tail_ptr) = (entry_ptr); \ 557 } \ 558 else \ 559 { \ 560 (tail_ptr)->il_next = (entry_ptr); \ 561 (entry_ptr)->il_prev = (tail_ptr); \ 562 (tail_ptr) = (entry_ptr); \ 563 } \ 564 (len)++; \ 565 (Size) += entry_ptr->size; \ 566 H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fail_val) \ 567 } /* H5C__IL_DLL_APPEND() */ 568 569 #define H5C__IL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ 570 { \ 571 H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ 572 { \ 573 if ( (head_ptr) == (entry_ptr) ) \ 574 { \ 575 (head_ptr) = (entry_ptr)->il_next; \ 576 if ( (head_ptr) != NULL ) \ 577 (head_ptr)->il_prev = NULL; \ 578 } \ 579 else \ 580 (entry_ptr)->il_prev->il_next = (entry_ptr)->il_next; \ 581 if ( (tail_ptr) == (entry_ptr) ) \ 582 { \ 583 (tail_ptr) = (entry_ptr)->il_prev; \ 584 if ( (tail_ptr) != NULL ) \ 585 (tail_ptr)->il_next = NULL; \ 586 } \ 587 else \ 588 (entry_ptr)->il_next->il_prev = (entry_ptr)->il_prev; \ 589 entry_ptr->il_next = NULL; \ 590 entry_ptr->il_prev = NULL; \ 591 (len)--; \ 592 (Size) -= entry_ptr->size; \ 593 } \ 594 H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ 595 } /* H5C__IL_DLL_REMOVE() */ 596 597 598 /*********************************************************************** 599 * 600 * Stats collection macros 601 * 602 * The following macros must handle stats collection when this collection 603 * is enabled, and evaluate to the empty string when it is not. 604 * 605 * The sole exception to this rule is 606 * H5C__UPDATE_CACHE_HIT_RATE_STATS(), which is always active as 607 * the cache hit rate stats are always collected and available. 608 * 609 ***********************************************************************/ 610 611 #define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \ 612 (cache_ptr->cache_accesses)++; \ 613 if ( hit ) { \ 614 (cache_ptr->cache_hits)++; \ 615 } \ 616 617 #if H5C_COLLECT_CACHE_STATS 618 619 #define H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ 620 if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \ 621 (cache_ptr)->max_index_size = (cache_ptr)->index_size; \ 622 if ( (cache_ptr)->clean_index_size > \ 623 (cache_ptr)->max_clean_index_size ) \ 624 (cache_ptr)->max_clean_index_size = \ 625 (cache_ptr)->clean_index_size; \ 626 if ( (cache_ptr)->dirty_index_size > \ 627 (cache_ptr)->max_dirty_index_size ) \ 628 (cache_ptr)->max_dirty_index_size = \ 629 (cache_ptr)->dirty_index_size; 630 631 #define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) \ 632 (((cache_ptr)->dirty_pins)[(entry_ptr)->type->id])++; 633 634 #define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) \ 635 if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ 636 (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ 637 if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ 638 (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ 639 if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ 640 (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ 641 if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ 642 (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; 643 644 #define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \ 645 if ( cache_ptr->flush_in_progress ) \ 646 ((cache_ptr)->cache_flush_moves[(entry_ptr)->type->id])++; \ 647 if ( entry_ptr->flush_in_progress ) \ 648 ((cache_ptr)->entry_flush_moves[(entry_ptr)->type->id])++; \ 649 (((cache_ptr)->moves)[(entry_ptr)->type->id])++; \ 650 (cache_ptr)->entries_relocated_counter++; 651 652 #define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\ 653 if ( cache_ptr->flush_in_progress ) \ 654 ((cache_ptr)->cache_flush_size_changes[(entry_ptr)->type->id])++; \ 655 if ( entry_ptr->flush_in_progress ) \ 656 ((cache_ptr)->entry_flush_size_changes[(entry_ptr)->type->id])++; \ 657 if ( (entry_ptr)->size < (new_size) ) { \ 658 ((cache_ptr)->size_increases[(entry_ptr)->type->id])++; \ 659 H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ 660 if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ 661 (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ 662 if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ 663 (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ 664 } else if ( (entry_ptr)->size > (new_size) ) { \ 665 ((cache_ptr)->size_decreases[(entry_ptr)->type->id])++; \ 666 } 667 668 #define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \ 669 (cache_ptr)->total_ht_insertions++; 670 671 #define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \ 672 (cache_ptr)->total_ht_deletions++; 673 674 #define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \ 675 if ( success ) { \ 676 (cache_ptr)->successful_ht_searches++; \ 677 (cache_ptr)->total_successful_ht_search_depth += depth; \ 678 } else { \ 679 (cache_ptr)->failed_ht_searches++; \ 680 (cache_ptr)->total_failed_ht_search_depth += depth; \ 681 } 682 683 #define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \ 684 ((cache_ptr)->unpins)[(entry_ptr)->type->id]++; 685 686 #define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) \ 687 ((cache_ptr)->slist_scan_restarts)++; 688 689 #define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) \ 690 ((cache_ptr)->LRU_scan_restarts)++; 691 692 #define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) \ 693 ((cache_ptr)->index_scan_restarts)++; 694 695 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \ 696 { \ 697 (cache_ptr)->images_created++; \ 698 } 699 700 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \ 701 { \ 702 /* make sure image len is still good */ \ 703 HDassert((cache_ptr)->image_len > 0); \ 704 (cache_ptr)->images_read++; \ 705 } 706 707 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \ 708 { \ 709 /* make sure image len is still good */ \ 710 HDassert((cache_ptr)->image_len > 0); \ 711 (cache_ptr)->images_loaded++; \ 712 (cache_ptr)->last_image_size = (cache_ptr)->image_len; \ 713 } 714 715 #define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \ 716 { \ 717 (cache_ptr)->prefetches++; \ 718 if ( dirty ) \ 719 (cache_ptr)->dirty_prefetches++; \ 720 } 721 722 #define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \ 723 { \ 724 (cache_ptr)->prefetch_hits++; \ 725 } 726 727 #if H5C_COLLECT_CACHE_ENTRY_STATS 728 729 #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \ 730 { \ 731 (entry_ptr)->accesses = 0; \ 732 (entry_ptr)->clears = 0; \ 733 (entry_ptr)->flushes = 0; \ 734 (entry_ptr)->pins = 0; \ 735 } 736 737 #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ 738 { \ 739 (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \ 740 if((entry_ptr)->is_pinned) \ 741 (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \ 742 ((entry_ptr)->clears)++; \ 743 } 744 745 #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ 746 { \ 747 (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ 748 if((entry_ptr)->is_pinned) \ 749 (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \ 750 ((entry_ptr)->flushes)++; \ 751 } 752 753 #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \ 754 { \ 755 if ( take_ownership ) \ 756 (((cache_ptr)->take_ownerships)[(entry_ptr)->type->id])++; \ 757 else \ 758 (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \ 759 if ( (entry_ptr)->accesses > \ 760 ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] ) \ 761 ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] = \ 762 (entry_ptr)->accesses; \ 763 if ( (entry_ptr)->accesses < \ 764 ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] ) \ 765 ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] = \ 766 (entry_ptr)->accesses; \ 767 if ( (entry_ptr)->clears > \ 768 ((cache_ptr)->max_clears)[(entry_ptr)->type->id] ) \ 769 ((cache_ptr)->max_clears)[(entry_ptr)->type->id] \ 770 = (entry_ptr)->clears; \ 771 if ( (entry_ptr)->flushes > \ 772 ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] ) \ 773 ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] \ 774 = (entry_ptr)->flushes; \ 775 if ( (entry_ptr)->size > \ 776 ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ 777 ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ 778 = (entry_ptr)->size; \ 779 if ( (entry_ptr)->pins > \ 780 ((cache_ptr)->max_pins)[(entry_ptr)->type->id] ) \ 781 ((cache_ptr)->max_pins)[(entry_ptr)->type->id] \ 782 = (entry_ptr)->pins; \ 783 } 784 785 #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ 786 { \ 787 (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \ 788 if ( (entry_ptr)->is_pinned ) { \ 789 (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \ 790 ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ 791 (entry_ptr)->pins++; \ 792 if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ 793 (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ 794 if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ 795 (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ 796 } \ 797 if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ 798 (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ 799 H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ 800 if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ 801 (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ 802 if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ 803 (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ 804 if ( (entry_ptr)->size > \ 805 ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ 806 ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ 807 = (entry_ptr)->size; \ 808 cache_ptr->entries_inserted_counter++; \ 809 } 810 811 #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ 812 { \ 813 if ( hit ) \ 814 ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ 815 else \ 816 ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ 817 if ( ! ((entry_ptr)->is_read_only) ) { \ 818 ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \ 819 } else { \ 820 ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \ 821 if ( ((entry_ptr)->ro_ref_count) > \ 822 ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \ 823 ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \ 824 ((entry_ptr)->ro_ref_count); \ 825 } \ 826 if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ 827 (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ 828 H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ 829 if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ 830 (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ 831 if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ 832 (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ 833 if ( (entry_ptr)->size > \ 834 ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ 835 ((cache_ptr)->max_size)[(entry_ptr)->type->id] = (entry_ptr)->size; \ 836 ((entry_ptr)->accesses)++; \ 837 } 838 839 #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ 840 { \ 841 ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ 842 (entry_ptr)->pins++; \ 843 if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ 844 (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ 845 if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ 846 (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ 847 } 848 849 #else /* H5C_COLLECT_CACHE_ENTRY_STATS */ 850 851 #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) 852 853 #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ 854 { \ 855 (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \ 856 if((entry_ptr)->is_pinned) \ 857 (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \ 858 } 859 860 #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ 861 { \ 862 (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ 863 if ( (entry_ptr)->is_pinned ) \ 864 (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \ 865 } 866 867 #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \ 868 { \ 869 if ( take_ownership ) \ 870 (((cache_ptr)->take_ownerships)[(entry_ptr)->type->id])++; \ 871 else \ 872 (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \ 873 } 874 875 #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ 876 { \ 877 (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \ 878 if ( (entry_ptr)->is_pinned ) { \ 879 (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \ 880 ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ 881 if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ 882 (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ 883 if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ 884 (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ 885 } \ 886 if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ 887 (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ 888 H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ 889 if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ 890 (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ 891 if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ 892 (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ 893 cache_ptr->entries_inserted_counter++; \ 894 } 895 896 #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ 897 { \ 898 if ( hit ) \ 899 ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ 900 else \ 901 ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ 902 if ( ! ((entry_ptr)->is_read_only) ) \ 903 ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \ 904 else { \ 905 ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \ 906 if ( ((entry_ptr)->ro_ref_count) > \ 907 ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \ 908 ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \ 909 ((entry_ptr)->ro_ref_count); \ 910 } \ 911 if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ 912 (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ 913 H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ 914 if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ 915 (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ 916 if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ 917 (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ 918 } 919 920 #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ 921 { \ 922 ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ 923 if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ 924 (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ 925 if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ 926 (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ 927 } 928 929 #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ 930 931 #else /* H5C_COLLECT_CACHE_STATS */ 932 933 #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) 934 #define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) 935 #define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) 936 #define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) 937 #define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) 938 #define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) 939 #define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) 940 #define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) 941 #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) 942 #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) 943 #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) 944 #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) 945 #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) 946 #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) 947 #define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) 948 #define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) 949 #define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) 950 #define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) 951 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) 952 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) 953 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) 954 #define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) 955 #define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) 956 957 #endif /* H5C_COLLECT_CACHE_STATS */ 958 959 960 /*********************************************************************** 961 * 962 * Hash table access and manipulation macros: 963 * 964 * The following macros handle searches, insertions, and deletion in 965 * the hash table. 966 * 967 * When modifying these macros, remember to modify the similar macros 968 * in tst/cache.c 969 * 970 * Changes: 971 * 972 * - Updated existing index macros and sanity check macros to maintain 973 * the clean_index_size and dirty_index_size fields of H5C_t. Also 974 * added macros to allow us to track entry cleans and dirties. 975 * 976 * JRM -- 11/5/08 977 * 978 * - Updated existing index macros and sanity check macros to maintain 979 * the index_ring_len, index_ring_size, clean_index_ring_size, and 980 * dirty_index_ring_size fields of H5C_t. 981 * 982 * JRM -- 9/1/15 983 * 984 * - Updated existing index macros and sanity checks macros to 985 * maintain an doubly linked list of all entries in the index. 986 * This is necessary to reduce the computational cost of visiting 987 * all entries in the index, which used to be done by scanning 988 * the hash table. 989 * 990 * JRM -- 10/15/15 991 * 992 ***********************************************************************/ 993 994 /* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */ 995 996 #define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3) 997 998 #define H5C__HASH_FCN(x) (int)((unsigned)((x) & H5C__HASH_MASK) >> 3) 999 1000 #if H5C_DO_SANITY_CHECKS 1001 1002 #define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ 1003 if ( ( (cache_ptr) == NULL ) || \ 1004 ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ 1005 ( (entry_ptr) == NULL ) || \ 1006 ( ! H5F_addr_defined((entry_ptr)->addr) ) || \ 1007 ( (entry_ptr)->ht_next != NULL ) || \ 1008 ( (entry_ptr)->ht_prev != NULL ) || \ 1009 ( (entry_ptr)->size <= 0 ) || \ 1010 ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \ 1011 ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \ 1012 ( (cache_ptr)->index_size != \ 1013 ((cache_ptr)->clean_index_size + \ 1014 (cache_ptr)->dirty_index_size) ) || \ 1015 ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ 1016 ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ 1017 ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \ 1018 ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \ 1019 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ 1020 (cache_ptr)->index_len ) || \ 1021 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ 1022 (cache_ptr)->index_size ) || \ 1023 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ 1024 ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ 1025 (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ 1026 ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ 1027 ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ 1028 HDassert(FALSE); \ 1029 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \ 1030 } 1031 1032 #define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ 1033 if ( ( (cache_ptr) == NULL ) || \ 1034 ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ 1035 ( (cache_ptr)->index_size != \ 1036 ((cache_ptr)->clean_index_size + \ 1037 (cache_ptr)->dirty_index_size) ) || \ 1038 ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ 1039 ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ 1040 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] == 0 ) || \ 1041 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ 1042 (cache_ptr)->index_len ) || \ 1043 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ 1044 (cache_ptr)->index_size ) || \ 1045 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ 1046 ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ 1047 (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ 1048 ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ 1049 ( (cache_ptr)->index_size != (cache_ptr)->il_size) ) { \ 1050 HDassert(FALSE); \ 1051 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT insert SC failed") \ 1052 } 1053 1054 #define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \ 1055 if ( ( (cache_ptr) == NULL ) || \ 1056 ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ 1057 ( (cache_ptr)->index_len < 1 ) || \ 1058 ( (entry_ptr) == NULL ) || \ 1059 ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ 1060 ( ! H5F_addr_defined((entry_ptr)->addr) ) || \ 1061 ( (entry_ptr)->size <= 0 ) || \ 1062 ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \ 1063 ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \ 1064 ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \ 1065 == NULL ) || \ 1066 ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \ 1067 != (entry_ptr) ) && \ 1068 ( (entry_ptr)->ht_prev == NULL ) ) || \ 1069 ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == \ 1070 (entry_ptr) ) && \ 1071 ( (entry_ptr)->ht_prev != NULL ) ) || \ 1072 ( (cache_ptr)->index_size != \ 1073 ((cache_ptr)->clean_index_size + \ 1074 (cache_ptr)->dirty_index_size) ) || \ 1075 ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ 1076 ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ 1077 ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \ 1078 ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \ 1079 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \ 1080 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ 1081 (cache_ptr)->index_len ) || \ 1082 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] < \ 1083 (entry_ptr)->size ) || \ 1084 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ 1085 (cache_ptr)->index_size ) || \ 1086 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ 1087 ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ 1088 (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ 1089 ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ 1090 ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ 1091 HDassert(FALSE); \ 1092 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT remove SC failed") \ 1093 } 1094 1095 #define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \ 1096 if ( ( (cache_ptr) == NULL ) || \ 1097 ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ 1098 ( (entry_ptr) == NULL ) || \ 1099 ( ! H5F_addr_defined((entry_ptr)->addr) ) || \ 1100 ( (entry_ptr)->size <= 0 ) || \ 1101 ( (entry_ptr)->ht_prev != NULL ) || \ 1102 ( (entry_ptr)->ht_prev != NULL ) || \ 1103 ( (cache_ptr)->index_size != \ 1104 ((cache_ptr)->clean_index_size + \ 1105 (cache_ptr)->dirty_index_size) ) || \ 1106 ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ 1107 ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ 1108 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ 1109 (cache_ptr)->index_len ) || \ 1110 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ 1111 (cache_ptr)->index_size ) || \ 1112 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ 1113 ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ 1114 (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ 1115 ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ 1116 ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ 1117 HDassert(FALSE); \ 1118 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT remove SC failed") \ 1119 } 1120 1121 /* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ 1122 #define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ 1123 if ( ( (cache_ptr) == NULL ) || \ 1124 ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ 1125 ( (cache_ptr)->index_size != \ 1126 ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ 1127 ( ! H5F_addr_defined(Addr) ) || \ 1128 ( H5C__HASH_FCN(Addr) < 0 ) || \ 1129 ( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \ 1130 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT search SC failed") \ 1131 } 1132 1133 /* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ 1134 #define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \ 1135 if ( ( (cache_ptr) == NULL ) || \ 1136 ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ 1137 ( (cache_ptr)->index_len < 1 ) || \ 1138 ( (entry_ptr) == NULL ) || \ 1139 ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ 1140 ( (cache_ptr)->index_size != \ 1141 ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ 1142 ( (entry_ptr)->size <= 0 ) || \ 1143 ( ((cache_ptr)->index)[k] == NULL ) || \ 1144 ( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \ 1145 ( (entry_ptr)->ht_prev == NULL ) ) || \ 1146 ( ( ((cache_ptr)->index)[k] == (entry_ptr) ) && \ 1147 ( (entry_ptr)->ht_prev != NULL ) ) || \ 1148 ( ( (entry_ptr)->ht_prev != NULL ) && \ 1149 ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \ 1150 ( ( (entry_ptr)->ht_next != NULL ) && \ 1151 ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \ 1152 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post successful HT search SC failed") \ 1153 } 1154 1155 /* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */ 1156 #define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ 1157 if ( ( (cache_ptr) == NULL ) || \ 1158 ( ((cache_ptr)->index)[k] != (entry_ptr) ) || \ 1159 ( (entry_ptr)->ht_prev != NULL ) ) { \ 1160 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT shift to front SC failed") \ 1161 } 1162 1163 #define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ 1164 entry_ptr, was_clean) \ 1165 if ( ( (cache_ptr) == NULL ) || \ 1166 ( (cache_ptr)->index_len <= 0 ) || \ 1167 ( (cache_ptr)->index_size <= 0 ) || \ 1168 ( (new_size) <= 0 ) || \ 1169 ( (old_size) > (cache_ptr)->index_size ) || \ 1170 ( ( (cache_ptr)->index_len == 1 ) && \ 1171 ( (cache_ptr)->index_size != (old_size) ) ) || \ 1172 ( (cache_ptr)->index_size != \ 1173 ((cache_ptr)->clean_index_size + \ 1174 (cache_ptr)->dirty_index_size) ) || \ 1175 ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ 1176 ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ 1177 ( ( !( was_clean ) || \ 1178 ( (cache_ptr)->clean_index_size < (old_size) ) ) && \ 1179 ( ( (was_clean) ) || \ 1180 ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \ 1181 ( (entry_ptr) == NULL ) || \ 1182 ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \ 1183 ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \ 1184 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \ 1185 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ 1186 (cache_ptr)->index_len ) || \ 1187 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ 1188 (cache_ptr)->index_size ) || \ 1189 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ 1190 ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ 1191 (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ 1192 ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ 1193 ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ 1194 HDassert(FALSE); \ 1195 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT entry size change SC failed") \ 1196 } 1197 1198 #define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ 1199 entry_ptr) \ 1200 if ( ( (cache_ptr) == NULL ) || \ 1201 ( (cache_ptr)->index_len <= 0 ) || \ 1202 ( (cache_ptr)->index_size <= 0 ) || \ 1203 ( (new_size) > (cache_ptr)->index_size ) || \ 1204 ( (cache_ptr)->index_size != \ 1205 ((cache_ptr)->clean_index_size + \ 1206 (cache_ptr)->dirty_index_size) ) || \ 1207 ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ 1208 ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ 1209 ( ( !((entry_ptr)->is_dirty ) || \ 1210 ( (cache_ptr)->dirty_index_size < (new_size) ) ) && \ 1211 ( ( ((entry_ptr)->is_dirty) ) || \ 1212 ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \ 1213 ( ( (cache_ptr)->index_len == 1 ) && \ 1214 ( (cache_ptr)->index_size != (new_size) ) ) || \ 1215 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ 1216 (cache_ptr)->index_len ) || \ 1217 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ 1218 (cache_ptr)->index_size ) || \ 1219 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ 1220 ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ 1221 (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \ 1222 ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \ 1223 ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \ 1224 HDassert(FALSE); \ 1225 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT entry size change SC failed") \ 1226 } 1227 1228 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \ 1229 if ( \ 1230 ( (cache_ptr) == NULL ) || \ 1231 ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ 1232 ( (cache_ptr)->index_len <= 0 ) || \ 1233 ( (entry_ptr) == NULL ) || \ 1234 ( (entry_ptr)->is_dirty != FALSE ) || \ 1235 ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ 1236 ( (cache_ptr)->dirty_index_size < (entry_ptr)->size ) || \ 1237 ( (cache_ptr)->index_size != \ 1238 ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ 1239 ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ 1240 ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ 1241 ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \ 1242 ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \ 1243 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \ 1244 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ 1245 (cache_ptr)->index_len ) || \ 1246 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ 1247 (cache_ptr)->index_size ) || \ 1248 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ 1249 ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ 1250 (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ 1251 HDassert(FALSE); \ 1252 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry clean SC failed") \ 1253 } 1254 1255 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \ 1256 if ( \ 1257 ( (cache_ptr) == NULL ) || \ 1258 ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ 1259 ( (cache_ptr)->index_len <= 0 ) || \ 1260 ( (entry_ptr) == NULL ) || \ 1261 ( (entry_ptr)->is_dirty != TRUE ) || \ 1262 ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ 1263 ( (cache_ptr)->clean_index_size < (entry_ptr)->size ) || \ 1264 ( (cache_ptr)->index_size != \ 1265 ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ 1266 ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ 1267 ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ 1268 ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \ 1269 ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \ 1270 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \ 1271 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ 1272 (cache_ptr)->index_len ) || \ 1273 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ 1274 (cache_ptr)->index_size ) || \ 1275 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ 1276 ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ 1277 (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ 1278 HDassert(FALSE); \ 1279 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry dirty SC failed") \ 1280 } 1281 1282 #define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \ 1283 if ( ( (cache_ptr)->index_size != \ 1284 ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ 1285 ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ 1286 ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ 1287 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ 1288 (cache_ptr)->index_len ) || \ 1289 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ 1290 (cache_ptr)->index_size ) || \ 1291 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ 1292 ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ 1293 (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ 1294 HDassert(FALSE); \ 1295 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry clean SC failed") \ 1296 } 1297 1298 #define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \ 1299 if ( ( (cache_ptr)->index_size != \ 1300 ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ 1301 ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \ 1302 ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \ 1303 ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \ 1304 (cache_ptr)->index_len ) || \ 1305 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \ 1306 (cache_ptr)->index_size ) || \ 1307 ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \ 1308 ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \ 1309 (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \ 1310 HDassert(FALSE); \ 1311 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry dirty SC failed") \ 1312 } 1313 1314 #else /* H5C_DO_SANITY_CHECKS */ 1315 1316 #define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) 1317 #define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) 1318 #define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) 1319 #define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) 1320 #define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) 1321 #define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) 1322 #define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) 1323 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) 1324 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) 1325 #define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ 1326 entry_ptr, was_clean) 1327 #define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ 1328 entry_ptr) 1329 #define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) 1330 #define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) 1331 1332 #endif /* H5C_DO_SANITY_CHECKS */ 1333 1334 1335 #define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \ 1336 { \ 1337 int k; \ 1338 H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ 1339 k = H5C__HASH_FCN((entry_ptr)->addr); \ 1340 if(((cache_ptr)->index)[k] != NULL) { \ 1341 (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ 1342 (entry_ptr)->ht_next->ht_prev = (entry_ptr); \ 1343 } \ 1344 ((cache_ptr)->index)[k] = (entry_ptr); \ 1345 (cache_ptr)->index_len++; \ 1346 (cache_ptr)->index_size += (entry_ptr)->size; \ 1347 ((cache_ptr)->index_ring_len[entry_ptr->ring])++; \ 1348 ((cache_ptr)->index_ring_size[entry_ptr->ring]) \ 1349 += (entry_ptr)->size; \ 1350 if((entry_ptr)->is_dirty) { \ 1351 (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ 1352 ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \ 1353 += (entry_ptr)->size; \ 1354 } else { \ 1355 (cache_ptr)->clean_index_size += (entry_ptr)->size; \ 1356 ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \ 1357 += (entry_ptr)->size; \ 1358 } \ 1359 if((entry_ptr)->flush_me_last) { \ 1360 (cache_ptr)->num_last_entries++; \ 1361 HDassert((cache_ptr)->num_last_entries <= 2); \ 1362 } \ 1363 H5C__IL_DLL_APPEND((entry_ptr), (cache_ptr)->il_head, \ 1364 (cache_ptr)->il_tail, (cache_ptr)->il_len, \ 1365 (cache_ptr)->il_size, fail_val) \ 1366 H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \ 1367 H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ 1368 } 1369 1370 #define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, fail_val) \ 1371 { \ 1372 int k; \ 1373 H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \ 1374 k = H5C__HASH_FCN((entry_ptr)->addr); \ 1375 if((entry_ptr)->ht_next) \ 1376 (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ 1377 if((entry_ptr)->ht_prev) \ 1378 (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ 1379 if(((cache_ptr)->index)[k] == (entry_ptr)) \ 1380 ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \ 1381 (entry_ptr)->ht_next = NULL; \ 1382 (entry_ptr)->ht_prev = NULL; \ 1383 (cache_ptr)->index_len--; \ 1384 (cache_ptr)->index_size -= (entry_ptr)->size; \ 1385 ((cache_ptr)->index_ring_len[entry_ptr->ring])--; \ 1386 ((cache_ptr)->index_ring_size[entry_ptr->ring]) \ 1387 -= (entry_ptr)->size; \ 1388 if((entry_ptr)->is_dirty) { \ 1389 (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ 1390 ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \ 1391 -= (entry_ptr)->size; \ 1392 } else { \ 1393 (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ 1394 ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \ 1395 -= (entry_ptr)->size; \ 1396 } \ 1397 if((entry_ptr)->flush_me_last) { \ 1398 (cache_ptr)->num_last_entries--; \ 1399 HDassert((cache_ptr)->num_last_entries <= 1); \ 1400 } \ 1401 H5C__IL_DLL_REMOVE((entry_ptr), (cache_ptr)->il_head, \ 1402 (cache_ptr)->il_tail, (cache_ptr)->il_len, \ 1403 (cache_ptr)->il_size, fail_val) \ 1404 H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \ 1405 H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \ 1406 } 1407 1408 #define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \ 1409 { \ 1410 int k; \ 1411 int depth = 0; \ 1412 H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ 1413 k = H5C__HASH_FCN(Addr); \ 1414 entry_ptr = ((cache_ptr)->index)[k]; \ 1415 while(entry_ptr) { \ 1416 if(H5F_addr_eq(Addr, (entry_ptr)->addr)) { \ 1417 H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \ 1418 if(entry_ptr != ((cache_ptr)->index)[k]) { \ 1419 if((entry_ptr)->ht_next) \ 1420 (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ 1421 HDassert((entry_ptr)->ht_prev != NULL); \ 1422 (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ 1423 ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \ 1424 (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ 1425 (entry_ptr)->ht_prev = NULL; \ 1426 ((cache_ptr)->index)[k] = (entry_ptr); \ 1427 H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ 1428 } \ 1429 break; \ 1430 } \ 1431 (entry_ptr) = (entry_ptr)->ht_next; \ 1432 (depth)++; \ 1433 } \ 1434 H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \ 1435 } 1436 1437 #define H5C__SEARCH_INDEX_NO_STATS(cache_ptr, Addr, entry_ptr, fail_val) \ 1438 { \ 1439 int k; \ 1440 H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ 1441 k = H5C__HASH_FCN(Addr); \ 1442 entry_ptr = ((cache_ptr)->index)[k]; \ 1443 while(entry_ptr) { \ 1444 if(H5F_addr_eq(Addr, (entry_ptr)->addr)) { \ 1445 H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \ 1446 if(entry_ptr != ((cache_ptr)->index)[k]) { \ 1447 if((entry_ptr)->ht_next) \ 1448 (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ 1449 HDassert((entry_ptr)->ht_prev != NULL); \ 1450 (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ 1451 ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \ 1452 (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ 1453 (entry_ptr)->ht_prev = NULL; \ 1454 ((cache_ptr)->index)[k] = (entry_ptr); \ 1455 H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ 1456 } \ 1457 break; \ 1458 } \ 1459 (entry_ptr) = (entry_ptr)->ht_next; \ 1460 } \ 1461 } 1462 1463 #define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) \ 1464 { \ 1465 H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ 1466 (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ 1467 ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \ 1468 -= (entry_ptr)->size; \ 1469 (cache_ptr)->clean_index_size += (entry_ptr)->size; \ 1470 ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \ 1471 += (entry_ptr)->size; \ 1472 H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ 1473 } 1474 1475 #define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) \ 1476 { \ 1477 H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \ 1478 (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ 1479 ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \ 1480 -= (entry_ptr)->size; \ 1481 (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ 1482 ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \ 1483 += (entry_ptr)->size; \ 1484 H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \ 1485 } 1486 1487 #define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \ 1488 entry_ptr, was_clean) \ 1489 { \ 1490 H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ 1491 entry_ptr, was_clean) \ 1492 (cache_ptr)->index_size -= (old_size); \ 1493 (cache_ptr)->index_size += (new_size); \ 1494 ((cache_ptr)->index_ring_size[entry_ptr->ring]) -= (old_size); \ 1495 ((cache_ptr)->index_ring_size[entry_ptr->ring]) += (new_size); \ 1496 if(was_clean) { \ 1497 (cache_ptr)->clean_index_size -= (old_size); \ 1498 ((cache_ptr)->clean_index_ring_size[entry_ptr->ring])-= (old_size); \ 1499 } else { \ 1500 (cache_ptr)->dirty_index_size -= (old_size); \ 1501 ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])-= (old_size); \ 1502 } \ 1503 if((entry_ptr)->is_dirty) { \ 1504 (cache_ptr)->dirty_index_size += (new_size); \ 1505 ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])+= (new_size); \ 1506 } else { \ 1507 (cache_ptr)->clean_index_size += (new_size); \ 1508 ((cache_ptr)->clean_index_ring_size[entry_ptr->ring])+= (new_size); \ 1509 } \ 1510 H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->il_len, \ 1511 (cache_ptr)->il_size, \ 1512 (old_size), (new_size)) \ 1513 H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ 1514 entry_ptr) \ 1515 } 1516 1517 1518 /************************************************************************** 1519 * 1520 * Skip list insertion and deletion macros: 1521 * 1522 * These used to be functions, but I converted them to macros to avoid some 1523 * function call overhead. 1524 * 1525 **************************************************************************/ 1526 1527 /*------------------------------------------------------------------------- 1528 * 1529 * Macro: H5C__INSERT_ENTRY_IN_SLIST 1530 * 1531 * Purpose: Insert the specified instance of H5C_cache_entry_t into 1532 * the skip list in the specified instance of H5C_t. Update 1533 * the associated length and size fields. 1534 * 1535 * Return: N/A 1536 * 1537 * Programmer: John Mainzer, 5/10/04 1538 * 1539 * Modifications: 1540 * 1541 * JRM -- 7/21/04 1542 * Updated function to set the in_tree flag when inserting 1543 * an entry into the tree. Also modified the function to 1544 * update the tree size and len fields instead of the similar 1545 * index fields. 1546 * 1547 * All of this is part of the modifications to support the 1548 * hash table. 1549 * 1550 * JRM -- 7/27/04 1551 * Converted the function H5C_insert_entry_in_tree() into 1552 * the macro H5C__INSERT_ENTRY_IN_TREE in the hopes of 1553 * wringing a little more speed out of the cache. 1554 * 1555 * Note that we don't bother to check if the entry is already 1556 * in the tree -- if it is, H5SL_insert() will fail. 1557 * 1558 * QAK -- 11/27/04 1559 * Switched over to using skip list routines. 1560 * 1561 * JRM -- 6/27/06 1562 * Added fail_val parameter. 1563 * 1564 * JRM -- 8/25/06 1565 * Added the H5C_DO_SANITY_CHECKS version of the macro. 1566 * 1567 * This version maintains the slist_len_increase and 1568 * slist_size_increase fields that are used in sanity 1569 * checks in the flush routines. 1570 * 1571 * All this is needed as the fractal heap needs to be 1572 * able to dirty, resize and/or move entries during the 1573 * flush. 1574 * 1575 * JRM -- 12/13/14 1576 * Added code to set cache_ptr->slist_changed to TRUE 1577 * when an entry is inserted in the slist. 1578 * 1579 * JRM -- 9/1/15 1580 * Added code to maintain the cache_ptr->slist_ring_len 1581 * and cache_ptr->slist_ring_size arrays. 1582 * 1583 * JRM -- 4/29/20 1584 * Reworked macro to support the slist_enabled field 1585 * of H5C_t. If slist_enabled == TRUE, the macro 1586 * functions as before. Otherwise, the macro is a no-op, 1587 * and the slist must be empty. 1588 * 1589 *------------------------------------------------------------------------- 1590 */ 1591 1592 /* NOTE: The H5C__INSERT_ENTRY_IN_SLIST() macro is set up so that 1593 * 1594 * H5C_DO_SANITY_CHECKS 1595 * 1596 * and 1597 * 1598 * H5C_DO_SLIST_SANITY_CHECKS 1599 * 1600 * can be selected independantly. This is easy to miss as the 1601 * two #defines are easy to confuse. 1602 */ 1603 1604 #if H5C_DO_SLIST_SANITY_CHECKS 1605 1606 #define ENTRY_IN_SLIST(cache_ptr, entry_ptr) \ 1607 H5C_entry_in_skip_list((cache_ptr), (entry_ptr)) 1608 1609 #else /* H5C_DO_SLIST_SANITY_CHECKS */ 1610 1611 #define ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE 1612 1613 #endif /* H5C_DO_SLIST_SANITY_CHECKS */ 1614 1615 1616 #if H5C_DO_SANITY_CHECKS 1617 1618 #define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \ 1619 { \ 1620 HDassert( (cache_ptr) ); \ 1621 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 1622 \ 1623 if ( (cache_ptr)->slist_enabled ) { \ 1624 \ 1625 HDassert( (entry_ptr) ); \ 1626 HDassert( (entry_ptr)->size > 0 ); \ 1627 HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ 1628 HDassert( !((entry_ptr)->in_slist) ); \ 1629 HDassert( ! ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \ 1630 HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ 1631 HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ 1632 HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ 1633 (cache_ptr)->slist_len ); \ 1634 HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ 1635 (cache_ptr)->slist_size ); \ 1636 \ 1637 if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, \ 1638 &((entry_ptr)->addr)) < 0) \ 1639 HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \ 1640 "can't insert entry in skip list") \ 1641 \ 1642 (entry_ptr)->in_slist = TRUE; \ 1643 (cache_ptr)->slist_changed = TRUE; \ 1644 (cache_ptr)->slist_len++; \ 1645 (cache_ptr)->slist_size += (entry_ptr)->size; \ 1646 ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \ 1647 ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size;\ 1648 (cache_ptr)->slist_len_increase++; \ 1649 (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \ 1650 \ 1651 HDassert( (cache_ptr)->slist_len > 0 ); \ 1652 HDassert( (cache_ptr)->slist_size > 0 ); \ 1653 \ 1654 } else { /* slist disabled */ \ 1655 \ 1656 HDassert( (cache_ptr)->slist_len == 0 ); \ 1657 HDassert( (cache_ptr)->slist_size == 0 ); \ 1658 } \ 1659 } /* H5C__INSERT_ENTRY_IN_SLIST */ 1660 1661 #else /* H5C_DO_SANITY_CHECKS */ 1662 1663 #define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \ 1664 { \ 1665 HDassert( (cache_ptr) ); \ 1666 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 1667 \ 1668 if ( (cache_ptr)->slist_enabled ) { \ 1669 \ 1670 HDassert( (entry_ptr) ); \ 1671 HDassert( (entry_ptr)->size > 0 ); \ 1672 HDassert( ! ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \ 1673 HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ 1674 HDassert( !((entry_ptr)->in_slist) ); \ 1675 HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ 1676 HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ 1677 HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ 1678 (cache_ptr)->slist_len ); \ 1679 HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ 1680 (cache_ptr)->slist_size ); \ 1681 HDassert( (cache_ptr)->slist_ptr ); \ 1682 \ 1683 if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, \ 1684 &((entry_ptr)->addr)) < 0) \ 1685 HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \ 1686 "can't insert entry in skip list") \ 1687 \ 1688 (entry_ptr)->in_slist = TRUE; \ 1689 (cache_ptr)->slist_changed = TRUE; \ 1690 (cache_ptr)->slist_len++; \ 1691 (cache_ptr)->slist_size += (entry_ptr)->size; \ 1692 ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \ 1693 ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size;\ 1694 \ 1695 HDassert( (cache_ptr)->slist_len > 0 ); \ 1696 HDassert( (cache_ptr)->slist_size > 0 ); \ 1697 \ 1698 } else { /* slist disabled */ \ 1699 \ 1700 HDassert( (cache_ptr)->slist_len == 0 ); \ 1701 HDassert( (cache_ptr)->slist_size == 0 ); \ 1702 } \ 1703 } /* H5C__INSERT_ENTRY_IN_SLIST */ 1704 1705 #endif /* H5C_DO_SANITY_CHECKS */ 1706 1707 1708 /*------------------------------------------------------------------------- 1709 * 1710 * Function: H5C__REMOVE_ENTRY_FROM_SLIST 1711 * 1712 * Purpose: Remove the specified instance of H5C_cache_entry_t from the 1713 * index skip list in the specified instance of H5C_t. Update 1714 * the associated length and size fields. 1715 * 1716 * Return: N/A 1717 * 1718 * Programmer: John Mainzer, 5/10/04 1719 * 1720 * Modifications: 1721 * 1722 * JRM -- 7/21/04 1723 * Updated function for the addition of the hash table. 1724 * 1725 * JRM - 7/27/04 1726 * Converted from the function H5C_remove_entry_from_tree() 1727 * to the macro H5C__REMOVE_ENTRY_FROM_TREE in the hopes of 1728 * wringing a little more performance out of the cache. 1729 * 1730 * QAK -- 11/27/04 1731 * Switched over to using skip list routines. 1732 * 1733 * JRM -- 3/28/07 1734 * Updated sanity checks for the new is_read_only and 1735 * ro_ref_count fields in H5C_cache_entry_t. 1736 * 1737 * JRM -- 12/13/14 1738 * Added code to set cache_ptr->slist_changed to TRUE 1739 * when an entry is removed from the slist. 1740 * 1741 * JRM -- 4/29/20 1742 * Reworked macro to support the slist_enabled field 1743 * of H5C_t. If slist_enabled == TRUE, the macro 1744 * functions as before. Otherwise, the macro is a no-op, 1745 * and the slist must be empty. 1746 * 1747 *------------------------------------------------------------------------- 1748 */ 1749 1750 #if H5C_DO_SANITY_CHECKS 1751 #define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \ 1752 { \ 1753 HDassert( (cache_ptr) ); \ 1754 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 1755 \ 1756 if ( (cache_ptr)->slist_enabled ) { \ 1757 \ 1758 HDassert( (entry_ptr) ); \ 1759 HDassert( !((entry_ptr)->is_read_only) ); \ 1760 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 1761 HDassert( (entry_ptr)->size > 0 ); \ 1762 HDassert( (entry_ptr)->in_slist ); \ 1763 HDassert( (cache_ptr)->slist_ptr ); \ 1764 HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ 1765 HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ 1766 HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ 1767 (cache_ptr)->slist_len ); \ 1768 HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ 1769 (cache_ptr)->slist_size ); \ 1770 HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \ 1771 \ 1772 if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \ 1773 != (entry_ptr) ) \ 1774 HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \ 1775 "can't delete entry from skip list") \ 1776 \ 1777 HDassert( (cache_ptr)->slist_len > 0 ); \ 1778 if(!(during_flush)) \ 1779 (cache_ptr)->slist_changed = TRUE; \ 1780 (cache_ptr)->slist_len--; \ 1781 HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \ 1782 (cache_ptr)->slist_size -= (entry_ptr)->size; \ 1783 ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \ 1784 HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \ 1785 (entry_ptr)->size ); \ 1786 ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\ 1787 (cache_ptr)->slist_len_increase--; \ 1788 (cache_ptr)->slist_size_increase -= (int64_t)((entry_ptr)->size); \ 1789 (entry_ptr)->in_slist = FALSE; \ 1790 \ 1791 } else { /* slist disabled */ \ 1792 \ 1793 HDassert( (cache_ptr)->slist_len == 0 ); \ 1794 HDassert( (cache_ptr)->slist_size == 0 ); \ 1795 } \ 1796 } /* H5C__REMOVE_ENTRY_FROM_SLIST */ 1797 1798 #else /* H5C_DO_SANITY_CHECKS */ 1799 1800 #define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \ 1801 { \ 1802 HDassert( (cache_ptr) ); \ 1803 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 1804 \ 1805 if ( (cache_ptr)->slist_enabled ) { \ 1806 \ 1807 HDassert( (entry_ptr) ); \ 1808 HDassert( !((entry_ptr)->is_read_only) ); \ 1809 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 1810 HDassert( (entry_ptr)->in_slist ); \ 1811 HDassert( (cache_ptr)->slist_ptr ); \ 1812 HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ 1813 HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ 1814 HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ 1815 (cache_ptr)->slist_len ); \ 1816 HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ 1817 (cache_ptr)->slist_size ); \ 1818 \ 1819 if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \ 1820 != (entry_ptr) ) \ 1821 HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \ 1822 "can't delete entry from skip list") \ 1823 \ 1824 HDassert( (cache_ptr)->slist_len > 0 ); \ 1825 if(!(during_flush)) \ 1826 (cache_ptr)->slist_changed = TRUE; \ 1827 (cache_ptr)->slist_len--; \ 1828 HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \ 1829 (cache_ptr)->slist_size -= (entry_ptr)->size; \ 1830 ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \ 1831 HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \ 1832 (entry_ptr)->size ); \ 1833 ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\ 1834 (entry_ptr)->in_slist = FALSE; \ 1835 \ 1836 } else { /* slist disabled */ \ 1837 \ 1838 HDassert( (cache_ptr)->slist_len == 0 ); \ 1839 HDassert( (cache_ptr)->slist_size == 0 ); \ 1840 } \ 1841 } /* H5C__REMOVE_ENTRY_FROM_SLIST */ 1842 1843 #endif /* H5C_DO_SANITY_CHECKS */ 1844 1845 1846 /*------------------------------------------------------------------------- 1847 * 1848 * Function: H5C__UPDATE_SLIST_FOR_SIZE_CHANGE 1849 * 1850 * Purpose: Update cache_ptr->slist_size for a change in the size of 1851 * and entry in the slist. 1852 * 1853 * Return: N/A 1854 * 1855 * Programmer: John Mainzer, 9/07/05 1856 * 1857 * Modifications: 1858 * 1859 * JRM -- 8/27/06 1860 * Added the H5C_DO_SANITY_CHECKS version of the macro. 1861 * 1862 * This version maintains the slist_size_increase field 1863 * that are used in sanity checks in the flush routines. 1864 * 1865 * All this is needed as the fractal heap needs to be 1866 * able to dirty, resize and/or move entries during the 1867 * flush. 1868 * 1869 * JRM -- 12/13/14 1870 * Note that we do not set cache_ptr->slist_changed to TRUE 1871 * in this case, as the structure of the slist is not 1872 * modified. 1873 * 1874 * JRM -- 9/1/15 1875 * Added code to maintain the cache_ptr->slist_ring_len 1876 * and cache_ptr->slist_ring_size arrays. 1877 * 1878 * JRM -- 4/29/20 1879 * Reworked macro to support the slist_enabled field 1880 * of H5C_t. If slist_enabled == TRUE, the macro 1881 * functions as before. Otherwise, the macro is a no-op, 1882 * and the slist must be empty. 1883 * 1884 *------------------------------------------------------------------------- 1885 */ 1886 1887 #if H5C_DO_SANITY_CHECKS 1888 1889 #define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \ 1890 { \ 1891 HDassert( (cache_ptr) ); \ 1892 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 1893 \ 1894 if ( (cache_ptr)->slist_enabled ) { \ 1895 \ 1896 HDassert( (old_size) > 0 ); \ 1897 HDassert( (new_size) > 0 ); \ 1898 HDassert( (old_size) <= (cache_ptr)->slist_size ); \ 1899 HDassert( (cache_ptr)->slist_len > 0 ); \ 1900 HDassert( ((cache_ptr)->slist_len > 1) || \ 1901 ( (cache_ptr)->slist_size == (old_size) ) ); \ 1902 HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ 1903 HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ 1904 HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ 1905 (cache_ptr)->slist_len ); \ 1906 HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ 1907 (cache_ptr)->slist_size ); \ 1908 \ 1909 (cache_ptr)->slist_size -= (old_size); \ 1910 (cache_ptr)->slist_size += (new_size); \ 1911 \ 1912 HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] \ 1913 >= (old_size) ); \ 1914 \ 1915 ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \ 1916 ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \ 1917 \ 1918 (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \ 1919 (cache_ptr)->slist_size_increase += (int64_t)(new_size); \ 1920 \ 1921 HDassert( (new_size) <= (cache_ptr)->slist_size ); \ 1922 HDassert( ( (cache_ptr)->slist_len > 1 ) || \ 1923 ( (cache_ptr)->slist_size == (new_size) ) ); \ 1924 \ 1925 } else { /* slist disabled */ \ 1926 \ 1927 HDassert( (cache_ptr)->slist_len == 0 ); \ 1928 HDassert( (cache_ptr)->slist_size == 0 ); \ 1929 } \ 1930 } /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */ 1931 1932 #else /* H5C_DO_SANITY_CHECKS */ 1933 1934 #define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \ 1935 { \ 1936 HDassert( (cache_ptr) ); \ 1937 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 1938 \ 1939 if ( (cache_ptr)->slist_enabled ) { \ 1940 \ 1941 HDassert( (old_size) > 0 ); \ 1942 HDassert( (new_size) > 0 ); \ 1943 HDassert( (old_size) <= (cache_ptr)->slist_size ); \ 1944 HDassert( (cache_ptr)->slist_len > 0 ); \ 1945 HDassert( ((cache_ptr)->slist_len > 1) || \ 1946 ( (cache_ptr)->slist_size == (old_size) ) ); \ 1947 HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \ 1948 HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \ 1949 HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \ 1950 (cache_ptr)->slist_len ); \ 1951 HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \ 1952 (cache_ptr)->slist_size ); \ 1953 \ 1954 (cache_ptr)->slist_size -= (old_size); \ 1955 (cache_ptr)->slist_size += (new_size); \ 1956 \ 1957 HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \ 1958 (old_size) ); \ 1959 ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \ 1960 ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \ 1961 \ 1962 HDassert( (new_size) <= (cache_ptr)->slist_size ); \ 1963 HDassert( ( (cache_ptr)->slist_len > 1 ) || \ 1964 ( (cache_ptr)->slist_size == (new_size) ) ); \ 1965 \ 1966 } else { /* slist disabled */ \ 1967 \ 1968 HDassert( (cache_ptr)->slist_len == 0 ); \ 1969 HDassert( (cache_ptr)->slist_size == 0 ); \ 1970 } \ 1971 } /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */ 1972 1973 #endif /* H5C_DO_SANITY_CHECKS */ 1974 1975 1976 /************************************************************************** 1977 * 1978 * Replacement policy update macros: 1979 * 1980 * These used to be functions, but I converted them to macros to avoid some 1981 * function call overhead. 1982 * 1983 **************************************************************************/ 1984 1985 /*------------------------------------------------------------------------- 1986 * 1987 * Macro: H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS 1988 * 1989 * Purpose: For efficiency, we sometimes change the order of flushes -- 1990 * but doing so can confuse the replacement policy. This 1991 * macro exists to allow us to specify an entry as the 1992 * most recently touched so we can repair any such 1993 * confusion. 1994 * 1995 * At present, we only support the modified LRU policy, so 1996 * this function deals with that case unconditionally. If 1997 * we ever support other replacement policies, the macro 1998 * should switch on the current policy and act accordingly. 1999 * 2000 * Return: N/A 2001 * 2002 * Programmer: John Mainzer, 10/13/05 2003 * 2004 * Modifications: 2005 * 2006 * JRM -- 3/20/06 2007 * Modified macro to ignore pinned entries. Pinned entries 2008 * do not appear in the data structures maintained by the 2009 * replacement policy code, and thus this macro has nothing 2010 * to do if called for such an entry. 2011 * 2012 * JRM -- 3/28/07 2013 * Added sanity checks using the new is_read_only and 2014 * ro_ref_count fields of struct H5C_cache_entry_t. 2015 * 2016 *------------------------------------------------------------------------- 2017 */ 2018 2019 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 2020 2021 #define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \ 2022 { \ 2023 HDassert( (cache_ptr) ); \ 2024 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2025 HDassert( (entry_ptr) ); \ 2026 HDassert( !((entry_ptr)->is_protected) ); \ 2027 HDassert( !((entry_ptr)->is_read_only) ); \ 2028 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2029 HDassert( (entry_ptr)->size > 0 ); \ 2030 \ 2031 if ( ! ((entry_ptr)->is_pinned) ) { \ 2032 \ 2033 /* modified LRU specific code */ \ 2034 \ 2035 /* remove the entry from the LRU list, and re-insert it at the head.\ 2036 */ \ 2037 \ 2038 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2039 (cache_ptr)->LRU_tail_ptr, \ 2040 (cache_ptr)->LRU_list_len, \ 2041 (cache_ptr)->LRU_list_size, (fail_val)) \ 2042 \ 2043 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2044 (cache_ptr)->LRU_tail_ptr, \ 2045 (cache_ptr)->LRU_list_len, \ 2046 (cache_ptr)->LRU_list_size, (fail_val)) \ 2047 \ 2048 /* Use the dirty flag to infer whether the entry is on the clean or \ 2049 * dirty LRU list, and remove it. Then insert it at the head of \ 2050 * the same LRU list. \ 2051 * \ 2052 * At least initially, all entries should be clean. That may \ 2053 * change, so we may as well deal with both cases now. \ 2054 */ \ 2055 \ 2056 if ( (entry_ptr)->is_dirty ) { \ 2057 H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ 2058 (cache_ptr)->dLRU_tail_ptr, \ 2059 (cache_ptr)->dLRU_list_len, \ 2060 (cache_ptr)->dLRU_list_size, (fail_val)) \ 2061 \ 2062 H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ 2063 (cache_ptr)->dLRU_tail_ptr, \ 2064 (cache_ptr)->dLRU_list_len, \ 2065 (cache_ptr)->dLRU_list_size, (fail_val)) \ 2066 } else { \ 2067 H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ 2068 (cache_ptr)->cLRU_tail_ptr, \ 2069 (cache_ptr)->cLRU_list_len, \ 2070 (cache_ptr)->cLRU_list_size, (fail_val)) \ 2071 \ 2072 H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ 2073 (cache_ptr)->cLRU_tail_ptr, \ 2074 (cache_ptr)->cLRU_list_len, \ 2075 (cache_ptr)->cLRU_list_size, (fail_val)) \ 2076 } \ 2077 \ 2078 /* End modified LRU specific code. */ \ 2079 } \ 2080 } /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */ 2081 2082 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2083 2084 #define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \ 2085 { \ 2086 HDassert( (cache_ptr) ); \ 2087 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2088 HDassert( (entry_ptr) ); \ 2089 HDassert( !((entry_ptr)->is_protected) ); \ 2090 HDassert( !((entry_ptr)->is_read_only) ); \ 2091 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2092 HDassert( (entry_ptr)->size > 0 ); \ 2093 \ 2094 if ( ! ((entry_ptr)->is_pinned) ) { \ 2095 \ 2096 /* modified LRU specific code */ \ 2097 \ 2098 /* remove the entry from the LRU list, and re-insert it at the head \ 2099 */ \ 2100 \ 2101 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2102 (cache_ptr)->LRU_tail_ptr, \ 2103 (cache_ptr)->LRU_list_len, \ 2104 (cache_ptr)->LRU_list_size, (fail_val)) \ 2105 \ 2106 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2107 (cache_ptr)->LRU_tail_ptr, \ 2108 (cache_ptr)->LRU_list_len, \ 2109 (cache_ptr)->LRU_list_size, (fail_val)) \ 2110 \ 2111 /* End modified LRU specific code. */ \ 2112 } \ 2113 } /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */ 2114 2115 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2116 2117 2118 /*------------------------------------------------------------------------- 2119 * 2120 * Macro: H5C__UPDATE_RP_FOR_EVICTION 2121 * 2122 * Purpose: Update the replacement policy data structures for an 2123 * eviction of the specified cache entry. 2124 * 2125 * At present, we only support the modified LRU policy, so 2126 * this function deals with that case unconditionally. If 2127 * we ever support other replacement policies, the function 2128 * should switch on the current policy and act accordingly. 2129 * 2130 * Return: Non-negative on success/Negative on failure. 2131 * 2132 * Programmer: John Mainzer, 5/10/04 2133 * 2134 * Modifications: 2135 * 2136 * JRM - 7/27/04 2137 * Converted the function H5C_update_rp_for_eviction() to the 2138 * macro H5C__UPDATE_RP_FOR_EVICTION in an effort to squeeze 2139 * a bit more performance out of the cache. 2140 * 2141 * At least for the first cut, I am leaving the comments and 2142 * white space in the macro. If they cause difficulties with 2143 * the pre-processor, I'll have to remove them. 2144 * 2145 * JRM - 7/28/04 2146 * Split macro into two version, one supporting the clean and 2147 * dirty LRU lists, and the other not. Yet another attempt 2148 * at optimization. 2149 * 2150 * JRM - 3/20/06 2151 * Pinned entries can't be evicted, so this entry should never 2152 * be called on a pinned entry. Added assert to verify this. 2153 * 2154 * JRM -- 3/28/07 2155 * Added sanity checks for the new is_read_only and 2156 * ro_ref_count fields of struct H5C_cache_entry_t. 2157 * 2158 *------------------------------------------------------------------------- 2159 */ 2160 2161 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 2162 2163 #define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \ 2164 { \ 2165 HDassert( (cache_ptr) ); \ 2166 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2167 HDassert( (entry_ptr) ); \ 2168 HDassert( !((entry_ptr)->is_protected) ); \ 2169 HDassert( !((entry_ptr)->is_read_only) ); \ 2170 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2171 HDassert( !((entry_ptr)->is_pinned) ); \ 2172 HDassert( (entry_ptr)->size > 0 ); \ 2173 \ 2174 /* modified LRU specific code */ \ 2175 \ 2176 /* remove the entry from the LRU list. */ \ 2177 \ 2178 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2179 (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ 2180 (cache_ptr)->LRU_list_size, (fail_val)) \ 2181 \ 2182 /* If the entry is clean when it is evicted, it should be on the \ 2183 * clean LRU list, if it was dirty, it should be on the dirty LRU list. \ 2184 * Remove it from the appropriate list according to the value of the \ 2185 * dirty flag. \ 2186 */ \ 2187 \ 2188 if ( (entry_ptr)->is_dirty ) { \ 2189 \ 2190 H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ 2191 (cache_ptr)->dLRU_tail_ptr, \ 2192 (cache_ptr)->dLRU_list_len, \ 2193 (cache_ptr)->dLRU_list_size, (fail_val)) \ 2194 } else { \ 2195 H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ 2196 (cache_ptr)->cLRU_tail_ptr, \ 2197 (cache_ptr)->cLRU_list_len, \ 2198 (cache_ptr)->cLRU_list_size, (fail_val)) \ 2199 } \ 2200 \ 2201 } /* H5C__UPDATE_RP_FOR_EVICTION */ 2202 2203 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2204 2205 #define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \ 2206 { \ 2207 HDassert( (cache_ptr) ); \ 2208 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2209 HDassert( (entry_ptr) ); \ 2210 HDassert( !((entry_ptr)->is_protected) ); \ 2211 HDassert( !((entry_ptr)->is_read_only) ); \ 2212 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2213 HDassert( !((entry_ptr)->is_pinned) ); \ 2214 HDassert( (entry_ptr)->size > 0 ); \ 2215 \ 2216 /* modified LRU specific code */ \ 2217 \ 2218 /* remove the entry from the LRU list. */ \ 2219 \ 2220 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2221 (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ 2222 (cache_ptr)->LRU_list_size, (fail_val)) \ 2223 \ 2224 } /* H5C__UPDATE_RP_FOR_EVICTION */ 2225 2226 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2227 2228 2229 /*------------------------------------------------------------------------- 2230 * 2231 * Macro: H5C__UPDATE_RP_FOR_FLUSH 2232 * 2233 * Purpose: Update the replacement policy data structures for a flush 2234 * of the specified cache entry. 2235 * 2236 * At present, we only support the modified LRU policy, so 2237 * this function deals with that case unconditionally. If 2238 * we ever support other replacement policies, the function 2239 * should switch on the current policy and act accordingly. 2240 * 2241 * Return: N/A 2242 * 2243 * Programmer: John Mainzer, 5/6/04 2244 * 2245 * Modifications: 2246 * 2247 * JRM - 7/27/04 2248 * Converted the function H5C_update_rp_for_flush() to the 2249 * macro H5C__UPDATE_RP_FOR_FLUSH in an effort to squeeze 2250 * a bit more performance out of the cache. 2251 * 2252 * At least for the first cut, I am leaving the comments and 2253 * white space in the macro. If they cause difficulties with 2254 * pre-processor, I'll have to remove them. 2255 * 2256 * JRM - 7/28/04 2257 * Split macro into two versions, one supporting the clean and 2258 * dirty LRU lists, and the other not. Yet another attempt 2259 * at optimization. 2260 * 2261 * JRM - 3/20/06 2262 * While pinned entries can be flushed, they don't reside in 2263 * the replacement policy data structures when unprotected. 2264 * Thus I modified this macro to do nothing if the entry is 2265 * pinned. 2266 * 2267 * JRM - 3/28/07 2268 * Added sanity checks based on the new is_read_only and 2269 * ro_ref_count fields of struct H5C_cache_entry_t. 2270 * 2271 *------------------------------------------------------------------------- 2272 */ 2273 2274 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 2275 2276 #define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \ 2277 { \ 2278 HDassert( (cache_ptr) ); \ 2279 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2280 HDassert( (entry_ptr) ); \ 2281 HDassert( !((entry_ptr)->is_protected) ); \ 2282 HDassert( !((entry_ptr)->is_read_only) ); \ 2283 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2284 HDassert( (entry_ptr)->size > 0 ); \ 2285 \ 2286 if ( ! ((entry_ptr)->is_pinned) ) { \ 2287 \ 2288 /* modified LRU specific code */ \ 2289 \ 2290 /* remove the entry from the LRU list, and re-insert it at the \ 2291 * head. \ 2292 */ \ 2293 \ 2294 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2295 (cache_ptr)->LRU_tail_ptr, \ 2296 (cache_ptr)->LRU_list_len, \ 2297 (cache_ptr)->LRU_list_size, (fail_val)) \ 2298 \ 2299 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2300 (cache_ptr)->LRU_tail_ptr, \ 2301 (cache_ptr)->LRU_list_len, \ 2302 (cache_ptr)->LRU_list_size, (fail_val)) \ 2303 \ 2304 /* since the entry is being flushed or cleared, one would think \ 2305 * that it must be dirty -- but that need not be the case. Use the \ 2306 * dirty flag to infer whether the entry is on the clean or dirty \ 2307 * LRU list, and remove it. Then insert it at the head of the \ 2308 * clean LRU list. \ 2309 * \ 2310 * The function presumes that a dirty entry will be either cleared \ 2311 * or flushed shortly, so it is OK if we put a dirty entry on the \ 2312 * clean LRU list. \ 2313 */ \ 2314 \ 2315 if ( (entry_ptr)->is_dirty ) { \ 2316 H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ 2317 (cache_ptr)->dLRU_tail_ptr, \ 2318 (cache_ptr)->dLRU_list_len, \ 2319 (cache_ptr)->dLRU_list_size, (fail_val)) \ 2320 } else { \ 2321 H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ 2322 (cache_ptr)->cLRU_tail_ptr, \ 2323 (cache_ptr)->cLRU_list_len, \ 2324 (cache_ptr)->cLRU_list_size, (fail_val)) \ 2325 } \ 2326 \ 2327 H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ 2328 (cache_ptr)->cLRU_tail_ptr, \ 2329 (cache_ptr)->cLRU_list_len, \ 2330 (cache_ptr)->cLRU_list_size, (fail_val)) \ 2331 \ 2332 /* End modified LRU specific code. */ \ 2333 } \ 2334 } /* H5C__UPDATE_RP_FOR_FLUSH */ 2335 2336 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2337 2338 #define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \ 2339 { \ 2340 HDassert( (cache_ptr) ); \ 2341 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2342 HDassert( (entry_ptr) ); \ 2343 HDassert( !((entry_ptr)->is_protected) ); \ 2344 HDassert( !((entry_ptr)->is_read_only) ); \ 2345 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2346 HDassert( (entry_ptr)->size > 0 ); \ 2347 \ 2348 if ( ! ((entry_ptr)->is_pinned) ) { \ 2349 \ 2350 /* modified LRU specific code */ \ 2351 \ 2352 /* remove the entry from the LRU list, and re-insert it at the \ 2353 * head. \ 2354 */ \ 2355 \ 2356 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2357 (cache_ptr)->LRU_tail_ptr, \ 2358 (cache_ptr)->LRU_list_len, \ 2359 (cache_ptr)->LRU_list_size, (fail_val)) \ 2360 \ 2361 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2362 (cache_ptr)->LRU_tail_ptr, \ 2363 (cache_ptr)->LRU_list_len, \ 2364 (cache_ptr)->LRU_list_size, (fail_val)) \ 2365 \ 2366 /* End modified LRU specific code. */ \ 2367 } \ 2368 } /* H5C__UPDATE_RP_FOR_FLUSH */ 2369 2370 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2371 2372 2373 /*------------------------------------------------------------------------- 2374 * 2375 * Macro: H5C__UPDATE_RP_FOR_INSERT_APPEND 2376 * 2377 * Purpose: Update the replacement policy data structures for an 2378 * insertion of the specified cache entry. 2379 * 2380 * Unlike H5C__UPDATE_RP_FOR_INSERTION below, mark the 2381 * new entry as the LEAST recently used entry, not the 2382 * most recently used. 2383 * 2384 * For now at least, this macro should only be used in 2385 * the reconstruction of the metadata cache from a cache 2386 * image block. 2387 * 2388 * At present, we only support the modified LRU policy, so 2389 * this function deals with that case unconditionally. If 2390 * we ever support other replacement policies, the function 2391 * should switch on the current policy and act accordingly. 2392 * 2393 * Return: N/A 2394 * 2395 * Programmer: John Mainzer, 8/15/15 2396 * 2397 *------------------------------------------------------------------------- 2398 */ 2399 2400 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 2401 2402 #define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \ 2403 { \ 2404 HDassert( (cache_ptr) ); \ 2405 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2406 HDassert( (entry_ptr) ); \ 2407 HDassert( !((entry_ptr)->is_protected) ); \ 2408 HDassert( !((entry_ptr)->is_read_only) ); \ 2409 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2410 HDassert( (entry_ptr)->size > 0 ); \ 2411 \ 2412 if ( (entry_ptr)->is_pinned ) { \ 2413 \ 2414 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ 2415 (cache_ptr)->pel_tail_ptr, \ 2416 (cache_ptr)->pel_len, \ 2417 (cache_ptr)->pel_size, (fail_val)) \ 2418 \ 2419 } else { \ 2420 \ 2421 /* modified LRU specific code */ \ 2422 \ 2423 /* insert the entry at the tail of the LRU list. */ \ 2424 \ 2425 H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2426 (cache_ptr)->LRU_tail_ptr, \ 2427 (cache_ptr)->LRU_list_len, \ 2428 (cache_ptr)->LRU_list_size, (fail_val)) \ 2429 \ 2430 /* insert the entry at the tail of the clean or dirty LRU list as \ 2431 * appropriate. \ 2432 */ \ 2433 \ 2434 if ( entry_ptr->is_dirty ) { \ 2435 H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ 2436 (cache_ptr)->dLRU_tail_ptr, \ 2437 (cache_ptr)->dLRU_list_len, \ 2438 (cache_ptr)->dLRU_list_size, (fail_val)) \ 2439 } else { \ 2440 H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ 2441 (cache_ptr)->cLRU_tail_ptr, \ 2442 (cache_ptr)->cLRU_list_len, \ 2443 (cache_ptr)->cLRU_list_size, (fail_val)) \ 2444 } \ 2445 \ 2446 /* End modified LRU specific code. */ \ 2447 } \ 2448 } 2449 2450 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2451 2452 #define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \ 2453 { \ 2454 HDassert( (cache_ptr) ); \ 2455 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2456 HDassert( (entry_ptr) ); \ 2457 HDassert( !((entry_ptr)->is_protected) ); \ 2458 HDassert( !((entry_ptr)->is_read_only) ); \ 2459 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2460 HDassert( (entry_ptr)->size > 0 ); \ 2461 \ 2462 if ( (entry_ptr)->is_pinned ) { \ 2463 \ 2464 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ 2465 (cache_ptr)->pel_tail_ptr, \ 2466 (cache_ptr)->pel_len, \ 2467 (cache_ptr)->pel_size, (fail_val)) \ 2468 \ 2469 } else { \ 2470 \ 2471 /* modified LRU specific code */ \ 2472 \ 2473 /* insert the entry at the tail of the LRU list. */ \ 2474 \ 2475 H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2476 (cache_ptr)->LRU_tail_ptr, \ 2477 (cache_ptr)->LRU_list_len, \ 2478 (cache_ptr)->LRU_list_size, (fail_val)) \ 2479 \ 2480 /* End modified LRU specific code. */ \ 2481 } \ 2482 } 2483 2484 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2485 2486 2487 /*------------------------------------------------------------------------- 2488 * 2489 * Macro: H5C__UPDATE_RP_FOR_INSERTION 2490 * 2491 * Purpose: Update the replacement policy data structures for an 2492 * insertion of the specified cache entry. 2493 * 2494 * At present, we only support the modified LRU policy, so 2495 * this function deals with that case unconditionally. If 2496 * we ever support other replacement policies, the function 2497 * should switch on the current policy and act accordingly. 2498 * 2499 * Return: N/A 2500 * 2501 * Programmer: John Mainzer, 5/17/04 2502 * 2503 * Modifications: 2504 * 2505 * JRM - 7/27/04 2506 * Converted the function H5C_update_rp_for_insertion() to the 2507 * macro H5C__UPDATE_RP_FOR_INSERTION in an effort to squeeze 2508 * a bit more performance out of the cache. 2509 * 2510 * At least for the first cut, I am leaving the comments and 2511 * white space in the macro. If they cause difficulties with 2512 * pre-processor, I'll have to remove them. 2513 * 2514 * JRM - 7/28/04 2515 * Split macro into two version, one supporting the clean and 2516 * dirty LRU lists, and the other not. Yet another attempt 2517 * at optimization. 2518 * 2519 * JRM - 3/10/06 2520 * This macro should never be called on a pinned entry. 2521 * Inserted an assert to verify this. 2522 * 2523 * JRM - 8/9/06 2524 * Not any more. We must now allow insertion of pinned 2525 * entries. Updated macro to support this. 2526 * 2527 * JRM - 3/28/07 2528 * Added sanity checks using the new is_read_only and 2529 * ro_ref_count fields of struct H5C_cache_entry_t. 2530 * 2531 *------------------------------------------------------------------------- 2532 */ 2533 2534 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 2535 2536 #define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \ 2537 { \ 2538 HDassert( (cache_ptr) ); \ 2539 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2540 HDassert( (entry_ptr) ); \ 2541 HDassert( !((entry_ptr)->is_protected) ); \ 2542 HDassert( !((entry_ptr)->is_read_only) ); \ 2543 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2544 HDassert( (entry_ptr)->size > 0 ); \ 2545 \ 2546 if ( (entry_ptr)->is_pinned ) { \ 2547 \ 2548 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ 2549 (cache_ptr)->pel_tail_ptr, \ 2550 (cache_ptr)->pel_len, \ 2551 (cache_ptr)->pel_size, (fail_val)) \ 2552 \ 2553 } else { \ 2554 \ 2555 /* modified LRU specific code */ \ 2556 \ 2557 /* insert the entry at the head of the LRU list. */ \ 2558 \ 2559 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2560 (cache_ptr)->LRU_tail_ptr, \ 2561 (cache_ptr)->LRU_list_len, \ 2562 (cache_ptr)->LRU_list_size, (fail_val)) \ 2563 \ 2564 /* insert the entry at the head of the clean or dirty LRU list as \ 2565 * appropriate. \ 2566 */ \ 2567 \ 2568 if ( entry_ptr->is_dirty ) { \ 2569 H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ 2570 (cache_ptr)->dLRU_tail_ptr, \ 2571 (cache_ptr)->dLRU_list_len, \ 2572 (cache_ptr)->dLRU_list_size, (fail_val)) \ 2573 } else { \ 2574 H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ 2575 (cache_ptr)->cLRU_tail_ptr, \ 2576 (cache_ptr)->cLRU_list_len, \ 2577 (cache_ptr)->cLRU_list_size, (fail_val)) \ 2578 } \ 2579 \ 2580 /* End modified LRU specific code. */ \ 2581 } \ 2582 } 2583 2584 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2585 2586 #define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \ 2587 { \ 2588 HDassert( (cache_ptr) ); \ 2589 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2590 HDassert( (entry_ptr) ); \ 2591 HDassert( !((entry_ptr)->is_protected) ); \ 2592 HDassert( !((entry_ptr)->is_read_only) ); \ 2593 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2594 HDassert( (entry_ptr)->size > 0 ); \ 2595 \ 2596 if ( (entry_ptr)->is_pinned ) { \ 2597 \ 2598 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ 2599 (cache_ptr)->pel_tail_ptr, \ 2600 (cache_ptr)->pel_len, \ 2601 (cache_ptr)->pel_size, (fail_val)) \ 2602 \ 2603 } else { \ 2604 \ 2605 /* modified LRU specific code */ \ 2606 \ 2607 /* insert the entry at the head of the LRU list. */ \ 2608 \ 2609 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2610 (cache_ptr)->LRU_tail_ptr, \ 2611 (cache_ptr)->LRU_list_len, \ 2612 (cache_ptr)->LRU_list_size, (fail_val)) \ 2613 \ 2614 /* End modified LRU specific code. */ \ 2615 } \ 2616 } 2617 2618 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2619 2620 2621 /*------------------------------------------------------------------------- 2622 * 2623 * Macro: H5C__UPDATE_RP_FOR_PROTECT 2624 * 2625 * Purpose: Update the replacement policy data structures for a 2626 * protect of the specified cache entry. 2627 * 2628 * To do this, unlink the specified entry from any data 2629 * structures used by the replacement policy, and add the 2630 * entry to the protected list. 2631 * 2632 * At present, we only support the modified LRU policy, so 2633 * this function deals with that case unconditionally. If 2634 * we ever support other replacement policies, the function 2635 * should switch on the current policy and act accordingly. 2636 * 2637 * Return: N/A 2638 * 2639 * Programmer: John Mainzer, 5/17/04 2640 * 2641 * Modifications: 2642 * 2643 * JRM - 7/27/04 2644 * Converted the function H5C_update_rp_for_protect() to the 2645 * macro H5C__UPDATE_RP_FOR_PROTECT in an effort to squeeze 2646 * a bit more performance out of the cache. 2647 * 2648 * At least for the first cut, I am leaving the comments and 2649 * white space in the macro. If they cause difficulties with 2650 * pre-processor, I'll have to remove them. 2651 * 2652 * JRM - 7/28/04 2653 * Split macro into two version, one supporting the clean and 2654 * dirty LRU lists, and the other not. Yet another attempt 2655 * at optimization. 2656 * 2657 * JRM - 3/17/06 2658 * Modified macro to attempt to remove pinned entriese from 2659 * the pinned entry list instead of from the data structures 2660 * maintained by the replacement policy. 2661 * 2662 * JRM - 3/28/07 2663 * Added sanity checks based on the new is_read_only and 2664 * ro_ref_count fields of struct H5C_cache_entry_t. 2665 * 2666 *------------------------------------------------------------------------- 2667 */ 2668 2669 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 2670 2671 #define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \ 2672 { \ 2673 HDassert( (cache_ptr) ); \ 2674 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2675 HDassert( (entry_ptr) ); \ 2676 HDassert( !((entry_ptr)->is_protected) ); \ 2677 HDassert( !((entry_ptr)->is_read_only) ); \ 2678 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2679 HDassert( (entry_ptr)->size > 0 ); \ 2680 \ 2681 if ( (entry_ptr)->is_pinned ) { \ 2682 \ 2683 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ 2684 (cache_ptr)->pel_tail_ptr, \ 2685 (cache_ptr)->pel_len, \ 2686 (cache_ptr)->pel_size, (fail_val)) \ 2687 \ 2688 } else { \ 2689 \ 2690 /* modified LRU specific code */ \ 2691 \ 2692 /* remove the entry from the LRU list. */ \ 2693 \ 2694 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2695 (cache_ptr)->LRU_tail_ptr, \ 2696 (cache_ptr)->LRU_list_len, \ 2697 (cache_ptr)->LRU_list_size, (fail_val)) \ 2698 \ 2699 /* Similarly, remove the entry from the clean or dirty LRU list \ 2700 * as appropriate. \ 2701 */ \ 2702 \ 2703 if ( (entry_ptr)->is_dirty ) { \ 2704 \ 2705 H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ 2706 (cache_ptr)->dLRU_tail_ptr, \ 2707 (cache_ptr)->dLRU_list_len, \ 2708 (cache_ptr)->dLRU_list_size, (fail_val)) \ 2709 \ 2710 } else { \ 2711 \ 2712 H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ 2713 (cache_ptr)->cLRU_tail_ptr, \ 2714 (cache_ptr)->cLRU_list_len, \ 2715 (cache_ptr)->cLRU_list_size, (fail_val)) \ 2716 } \ 2717 \ 2718 /* End modified LRU specific code. */ \ 2719 } \ 2720 \ 2721 /* Regardless of the replacement policy, or whether the entry is \ 2722 * pinned, now add the entry to the protected list. \ 2723 */ \ 2724 \ 2725 H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \ 2726 (cache_ptr)->pl_tail_ptr, \ 2727 (cache_ptr)->pl_len, \ 2728 (cache_ptr)->pl_size, (fail_val)) \ 2729 } /* H5C__UPDATE_RP_FOR_PROTECT */ 2730 2731 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2732 2733 #define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \ 2734 { \ 2735 HDassert( (cache_ptr) ); \ 2736 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2737 HDassert( (entry_ptr) ); \ 2738 HDassert( !((entry_ptr)->is_protected) ); \ 2739 HDassert( !((entry_ptr)->is_read_only) ); \ 2740 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2741 HDassert( (entry_ptr)->size > 0 ); \ 2742 \ 2743 if ( (entry_ptr)->is_pinned ) { \ 2744 \ 2745 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ 2746 (cache_ptr)->pel_tail_ptr, \ 2747 (cache_ptr)->pel_len, \ 2748 (cache_ptr)->pel_size, (fail_val)) \ 2749 \ 2750 } else { \ 2751 \ 2752 /* modified LRU specific code */ \ 2753 \ 2754 /* remove the entry from the LRU list. */ \ 2755 \ 2756 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2757 (cache_ptr)->LRU_tail_ptr, \ 2758 (cache_ptr)->LRU_list_len, \ 2759 (cache_ptr)->LRU_list_size, (fail_val)) \ 2760 \ 2761 /* End modified LRU specific code. */ \ 2762 } \ 2763 \ 2764 /* Regardless of the replacement policy, or whether the entry is \ 2765 * pinned, now add the entry to the protected list. \ 2766 */ \ 2767 \ 2768 H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \ 2769 (cache_ptr)->pl_tail_ptr, \ 2770 (cache_ptr)->pl_len, \ 2771 (cache_ptr)->pl_size, (fail_val)) \ 2772 } /* H5C__UPDATE_RP_FOR_PROTECT */ 2773 2774 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2775 2776 2777 /*------------------------------------------------------------------------- 2778 * 2779 * Macro: H5C__UPDATE_RP_FOR_MOVE 2780 * 2781 * Purpose: Update the replacement policy data structures for a 2782 * move of the specified cache entry. 2783 * 2784 * At present, we only support the modified LRU policy, so 2785 * this function deals with that case unconditionally. If 2786 * we ever support other replacement policies, the function 2787 * should switch on the current policy and act accordingly. 2788 * 2789 * Return: N/A 2790 * 2791 * Programmer: John Mainzer, 5/17/04 2792 * 2793 *------------------------------------------------------------------------- 2794 */ 2795 2796 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 2797 2798 #define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \ 2799 { \ 2800 HDassert( (cache_ptr) ); \ 2801 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2802 HDassert( (entry_ptr) ); \ 2803 HDassert( !((entry_ptr)->is_read_only) ); \ 2804 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2805 HDassert( (entry_ptr)->size > 0 ); \ 2806 \ 2807 if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) { \ 2808 \ 2809 /* modified LRU specific code */ \ 2810 \ 2811 /* remove the entry from the LRU list, and re-insert it at the head. \ 2812 */ \ 2813 \ 2814 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2815 (cache_ptr)->LRU_tail_ptr, \ 2816 (cache_ptr)->LRU_list_len, \ 2817 (cache_ptr)->LRU_list_size, (fail_val)) \ 2818 \ 2819 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2820 (cache_ptr)->LRU_tail_ptr, \ 2821 (cache_ptr)->LRU_list_len, \ 2822 (cache_ptr)->LRU_list_size, (fail_val)) \ 2823 \ 2824 /* remove the entry from either the clean or dirty LUR list as \ 2825 * indicated by the was_dirty parameter \ 2826 */ \ 2827 if ( was_dirty ) { \ 2828 \ 2829 H5C__AUX_DLL_REMOVE((entry_ptr), \ 2830 (cache_ptr)->dLRU_head_ptr, \ 2831 (cache_ptr)->dLRU_tail_ptr, \ 2832 (cache_ptr)->dLRU_list_len, \ 2833 (cache_ptr)->dLRU_list_size, \ 2834 (fail_val)) \ 2835 \ 2836 } else { \ 2837 \ 2838 H5C__AUX_DLL_REMOVE((entry_ptr), \ 2839 (cache_ptr)->cLRU_head_ptr, \ 2840 (cache_ptr)->cLRU_tail_ptr, \ 2841 (cache_ptr)->cLRU_list_len, \ 2842 (cache_ptr)->cLRU_list_size, \ 2843 (fail_val)) \ 2844 } \ 2845 \ 2846 /* insert the entry at the head of either the clean or dirty \ 2847 * LRU list as appropriate. \ 2848 */ \ 2849 \ 2850 if ( (entry_ptr)->is_dirty ) { \ 2851 \ 2852 H5C__AUX_DLL_PREPEND((entry_ptr), \ 2853 (cache_ptr)->dLRU_head_ptr, \ 2854 (cache_ptr)->dLRU_tail_ptr, \ 2855 (cache_ptr)->dLRU_list_len, \ 2856 (cache_ptr)->dLRU_list_size, \ 2857 (fail_val)) \ 2858 \ 2859 } else { \ 2860 \ 2861 H5C__AUX_DLL_PREPEND((entry_ptr), \ 2862 (cache_ptr)->cLRU_head_ptr, \ 2863 (cache_ptr)->cLRU_tail_ptr, \ 2864 (cache_ptr)->cLRU_list_len, \ 2865 (cache_ptr)->cLRU_list_size, \ 2866 (fail_val)) \ 2867 } \ 2868 \ 2869 /* End modified LRU specific code. */ \ 2870 } \ 2871 } /* H5C__UPDATE_RP_FOR_MOVE */ 2872 2873 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2874 2875 #define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \ 2876 { \ 2877 HDassert( (cache_ptr) ); \ 2878 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2879 HDassert( (entry_ptr) ); \ 2880 HDassert( !((entry_ptr)->is_read_only) ); \ 2881 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2882 HDassert( (entry_ptr)->size > 0 ); \ 2883 \ 2884 if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) { \ 2885 \ 2886 /* modified LRU specific code */ \ 2887 \ 2888 /* remove the entry from the LRU list, and re-insert it at the head. \ 2889 */ \ 2890 \ 2891 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2892 (cache_ptr)->LRU_tail_ptr, \ 2893 (cache_ptr)->LRU_list_len, \ 2894 (cache_ptr)->LRU_list_size, (fail_val)) \ 2895 \ 2896 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 2897 (cache_ptr)->LRU_tail_ptr, \ 2898 (cache_ptr)->LRU_list_len, \ 2899 (cache_ptr)->LRU_list_size, (fail_val)) \ 2900 \ 2901 /* End modified LRU specific code. */ \ 2902 } \ 2903 } /* H5C__UPDATE_RP_FOR_MOVE */ 2904 2905 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 2906 2907 2908 /*------------------------------------------------------------------------- 2909 * 2910 * Macro: H5C__UPDATE_RP_FOR_SIZE_CHANGE 2911 * 2912 * Purpose: Update the replacement policy data structures for a 2913 * size change of the specified cache entry. 2914 * 2915 * To do this, determine if the entry is pinned. If it is, 2916 * update the size of the pinned entry list. 2917 * 2918 * If it isn't pinned, the entry must handled by the 2919 * replacement policy. Update the appropriate replacement 2920 * policy data structures. 2921 * 2922 * At present, we only support the modified LRU policy, so 2923 * this function deals with that case unconditionally. If 2924 * we ever support other replacement policies, the function 2925 * should switch on the current policy and act accordingly. 2926 * 2927 * Return: N/A 2928 * 2929 * Programmer: John Mainzer, 8/23/06 2930 * 2931 * Modifications: 2932 * 2933 * JRM -- 3/28/07 2934 * Added sanity checks based on the new is_read_only and 2935 * ro_ref_count fields of struct H5C_cache_entry_t. 2936 * 2937 *------------------------------------------------------------------------- 2938 */ 2939 2940 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 2941 2942 #define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \ 2943 { \ 2944 HDassert( (cache_ptr) ); \ 2945 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 2946 HDassert( (entry_ptr) ); \ 2947 HDassert( !((entry_ptr)->is_protected) ); \ 2948 HDassert( !((entry_ptr)->is_read_only) ); \ 2949 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 2950 HDassert( (entry_ptr)->size > 0 ); \ 2951 HDassert( new_size > 0 ); \ 2952 \ 2953 if ( (entry_ptr)->coll_access ) { \ 2954 \ 2955 H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->coll_list_len, \ 2956 (cache_ptr)->coll_list_size, \ 2957 (entry_ptr)->size, \ 2958 (new_size)); \ 2959 \ 2960 } \ 2961 \ 2962 if ( (entry_ptr)->is_pinned ) { \ 2963 \ 2964 H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \ 2965 (cache_ptr)->pel_size, \ 2966 (entry_ptr)->size, \ 2967 (new_size)); \ 2968 \ 2969 } else { \ 2970 \ 2971 /* modified LRU specific code */ \ 2972 \ 2973 /* Update the size of the LRU list */ \ 2974 \ 2975 H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \ 2976 (cache_ptr)->LRU_list_size, \ 2977 (entry_ptr)->size, \ 2978 (new_size)); \ 2979 \ 2980 /* Similarly, update the size of the clean or dirty LRU list as \ 2981 * appropriate. At present, the entry must be clean, but that \ 2982 * could change. \ 2983 */ \ 2984 \ 2985 if ( (entry_ptr)->is_dirty ) { \ 2986 \ 2987 H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \ 2988 (cache_ptr)->dLRU_list_size, \ 2989 (entry_ptr)->size, \ 2990 (new_size)); \ 2991 \ 2992 } else { \ 2993 \ 2994 H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \ 2995 (cache_ptr)->cLRU_list_size, \ 2996 (entry_ptr)->size, \ 2997 (new_size)); \ 2998 } \ 2999 \ 3000 /* End modified LRU specific code. */ \ 3001 } \ 3002 \ 3003 } /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */ 3004 3005 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 3006 3007 #define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \ 3008 { \ 3009 HDassert( (cache_ptr) ); \ 3010 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 3011 HDassert( (entry_ptr) ); \ 3012 HDassert( !((entry_ptr)->is_protected) ); \ 3013 HDassert( !((entry_ptr)->is_read_only) ); \ 3014 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 3015 HDassert( (entry_ptr)->size > 0 ); \ 3016 HDassert( new_size > 0 ); \ 3017 \ 3018 if ( (entry_ptr)->is_pinned ) { \ 3019 \ 3020 H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \ 3021 (cache_ptr)->pel_size, \ 3022 (entry_ptr)->size, \ 3023 (new_size)); \ 3024 \ 3025 } else { \ 3026 \ 3027 /* modified LRU specific code */ \ 3028 \ 3029 /* Update the size of the LRU list */ \ 3030 \ 3031 H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \ 3032 (cache_ptr)->LRU_list_size, \ 3033 (entry_ptr)->size, \ 3034 (new_size)); \ 3035 \ 3036 /* End modified LRU specific code. */ \ 3037 } \ 3038 \ 3039 } /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */ 3040 3041 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 3042 3043 3044 /*------------------------------------------------------------------------- 3045 * 3046 * Macro: H5C__UPDATE_RP_FOR_UNPIN 3047 * 3048 * Purpose: Update the replacement policy data structures for an 3049 * unpin of the specified cache entry. 3050 * 3051 * To do this, unlink the specified entry from the protected 3052 * entry list, and re-insert it in the data structures used 3053 * by the current replacement policy. 3054 * 3055 * At present, we only support the modified LRU policy, so 3056 * this function deals with that case unconditionally. If 3057 * we ever support other replacement policies, the macro 3058 * should switch on the current policy and act accordingly. 3059 * 3060 * Return: N/A 3061 * 3062 * Programmer: John Mainzer, 3/22/06 3063 * 3064 * Modifications: 3065 * 3066 * JRM -- 3/28/07 3067 * Added sanity checks based on the new is_read_only and 3068 * ro_ref_count fields of struct H5C_cache_entry_t. 3069 * 3070 *------------------------------------------------------------------------- 3071 */ 3072 3073 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 3074 3075 #define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ 3076 { \ 3077 HDassert( (cache_ptr) ); \ 3078 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 3079 HDassert( (entry_ptr) ); \ 3080 HDassert( !((entry_ptr)->is_protected) ); \ 3081 HDassert( !((entry_ptr)->is_read_only) ); \ 3082 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 3083 HDassert( (entry_ptr)->is_pinned); \ 3084 HDassert( (entry_ptr)->size > 0 ); \ 3085 \ 3086 /* Regardless of the replacement policy, remove the entry from the \ 3087 * pinned entry list. \ 3088 */ \ 3089 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ 3090 (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \ 3091 (cache_ptr)->pel_size, (fail_val)) \ 3092 \ 3093 /* modified LRU specific code */ \ 3094 \ 3095 /* insert the entry at the head of the LRU list. */ \ 3096 \ 3097 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 3098 (cache_ptr)->LRU_tail_ptr, \ 3099 (cache_ptr)->LRU_list_len, \ 3100 (cache_ptr)->LRU_list_size, (fail_val)) \ 3101 \ 3102 /* Similarly, insert the entry at the head of either the clean \ 3103 * or dirty LRU list as appropriate. \ 3104 */ \ 3105 \ 3106 if ( (entry_ptr)->is_dirty ) { \ 3107 \ 3108 H5C__AUX_DLL_PREPEND((entry_ptr), \ 3109 (cache_ptr)->dLRU_head_ptr, \ 3110 (cache_ptr)->dLRU_tail_ptr, \ 3111 (cache_ptr)->dLRU_list_len, \ 3112 (cache_ptr)->dLRU_list_size, \ 3113 (fail_val)) \ 3114 \ 3115 } else { \ 3116 \ 3117 H5C__AUX_DLL_PREPEND((entry_ptr), \ 3118 (cache_ptr)->cLRU_head_ptr, \ 3119 (cache_ptr)->cLRU_tail_ptr, \ 3120 (cache_ptr)->cLRU_list_len, \ 3121 (cache_ptr)->cLRU_list_size, \ 3122 (fail_val)) \ 3123 } \ 3124 \ 3125 /* End modified LRU specific code. */ \ 3126 \ 3127 } /* H5C__UPDATE_RP_FOR_UNPIN */ 3128 3129 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 3130 3131 #define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ 3132 { \ 3133 HDassert( (cache_ptr) ); \ 3134 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 3135 HDassert( (entry_ptr) ); \ 3136 HDassert( !((entry_ptr)->is_protected) ); \ 3137 HDassert( !((entry_ptr)->is_read_only) ); \ 3138 HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ 3139 HDassert( (entry_ptr)->is_pinned); \ 3140 HDassert( (entry_ptr)->size > 0 ); \ 3141 \ 3142 /* Regardless of the replacement policy, remove the entry from the \ 3143 * pinned entry list. \ 3144 */ \ 3145 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ 3146 (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \ 3147 (cache_ptr)->pel_size, (fail_val)) \ 3148 \ 3149 /* modified LRU specific code */ \ 3150 \ 3151 /* insert the entry at the head of the LRU list. */ \ 3152 \ 3153 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 3154 (cache_ptr)->LRU_tail_ptr, \ 3155 (cache_ptr)->LRU_list_len, \ 3156 (cache_ptr)->LRU_list_size, (fail_val)) \ 3157 \ 3158 /* End modified LRU specific code. */ \ 3159 \ 3160 } /* H5C__UPDATE_RP_FOR_UNPIN */ 3161 3162 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 3163 3164 3165 /*------------------------------------------------------------------------- 3166 * 3167 * Macro: H5C__UPDATE_RP_FOR_UNPROTECT 3168 * 3169 * Purpose: Update the replacement policy data structures for an 3170 * unprotect of the specified cache entry. 3171 * 3172 * To do this, unlink the specified entry from the protected 3173 * list, and re-insert it in the data structures used by the 3174 * current replacement policy. 3175 * 3176 * At present, we only support the modified LRU policy, so 3177 * this function deals with that case unconditionally. If 3178 * we ever support other replacement policies, the function 3179 * should switch on the current policy and act accordingly. 3180 * 3181 * Return: N/A 3182 * 3183 * Programmer: John Mainzer, 5/19/04 3184 * 3185 * Modifications: 3186 * 3187 * JRM - 7/27/04 3188 * Converted the function H5C_update_rp_for_unprotect() to 3189 * the macro H5C__UPDATE_RP_FOR_UNPROTECT in an effort to 3190 * squeeze a bit more performance out of the cache. 3191 * 3192 * At least for the first cut, I am leaving the comments and 3193 * white space in the macro. If they cause difficulties with 3194 * pre-processor, I'll have to remove them. 3195 * 3196 * JRM - 7/28/04 3197 * Split macro into two version, one supporting the clean and 3198 * dirty LRU lists, and the other not. Yet another attempt 3199 * at optimization. 3200 * 3201 * JRM - 3/17/06 3202 * Modified macro to put pinned entries on the pinned entry 3203 * list instead of inserting them in the data structures 3204 * maintained by the replacement policy. 3205 * 3206 *------------------------------------------------------------------------- 3207 */ 3208 3209 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 3210 3211 #define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ 3212 { \ 3213 HDassert( (cache_ptr) ); \ 3214 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 3215 HDassert( (entry_ptr) ); \ 3216 HDassert( (entry_ptr)->is_protected); \ 3217 HDassert( (entry_ptr)->size > 0 ); \ 3218 \ 3219 /* Regardless of the replacement policy, remove the entry from the \ 3220 * protected list. \ 3221 */ \ 3222 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ 3223 (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ 3224 (cache_ptr)->pl_size, (fail_val)) \ 3225 \ 3226 if ( (entry_ptr)->is_pinned ) { \ 3227 \ 3228 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ 3229 (cache_ptr)->pel_tail_ptr, \ 3230 (cache_ptr)->pel_len, \ 3231 (cache_ptr)->pel_size, (fail_val)) \ 3232 \ 3233 } else { \ 3234 \ 3235 /* modified LRU specific code */ \ 3236 \ 3237 /* insert the entry at the head of the LRU list. */ \ 3238 \ 3239 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 3240 (cache_ptr)->LRU_tail_ptr, \ 3241 (cache_ptr)->LRU_list_len, \ 3242 (cache_ptr)->LRU_list_size, (fail_val)) \ 3243 \ 3244 /* Similarly, insert the entry at the head of either the clean or \ 3245 * dirty LRU list as appropriate. \ 3246 */ \ 3247 \ 3248 if ( (entry_ptr)->is_dirty ) { \ 3249 \ 3250 H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ 3251 (cache_ptr)->dLRU_tail_ptr, \ 3252 (cache_ptr)->dLRU_list_len, \ 3253 (cache_ptr)->dLRU_list_size, (fail_val)) \ 3254 \ 3255 } else { \ 3256 \ 3257 H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ 3258 (cache_ptr)->cLRU_tail_ptr, \ 3259 (cache_ptr)->cLRU_list_len, \ 3260 (cache_ptr)->cLRU_list_size, (fail_val)) \ 3261 } \ 3262 \ 3263 /* End modified LRU specific code. */ \ 3264 } \ 3265 \ 3266 } /* H5C__UPDATE_RP_FOR_UNPROTECT */ 3267 3268 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 3269 3270 #define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ 3271 { \ 3272 HDassert( (cache_ptr) ); \ 3273 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 3274 HDassert( (entry_ptr) ); \ 3275 HDassert( (entry_ptr)->is_protected); \ 3276 HDassert( (entry_ptr)->size > 0 ); \ 3277 \ 3278 /* Regardless of the replacement policy, remove the entry from the \ 3279 * protected list. \ 3280 */ \ 3281 H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ 3282 (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ 3283 (cache_ptr)->pl_size, (fail_val)) \ 3284 \ 3285 if ( (entry_ptr)->is_pinned ) { \ 3286 \ 3287 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ 3288 (cache_ptr)->pel_tail_ptr, \ 3289 (cache_ptr)->pel_len, \ 3290 (cache_ptr)->pel_size, (fail_val)) \ 3291 \ 3292 } else { \ 3293 \ 3294 /* modified LRU specific code */ \ 3295 \ 3296 /* insert the entry at the head of the LRU list. */ \ 3297 \ 3298 H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ 3299 (cache_ptr)->LRU_tail_ptr, \ 3300 (cache_ptr)->LRU_list_len, \ 3301 (cache_ptr)->LRU_list_size, (fail_val)) \ 3302 \ 3303 /* End modified LRU specific code. */ \ 3304 } \ 3305 } /* H5C__UPDATE_RP_FOR_UNPROTECT */ 3306 3307 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 3308 3309 #ifdef H5_HAVE_PARALLEL 3310 3311 #if H5C_DO_SANITY_CHECKS 3312 3313 #define H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ 3314 if ( ( (hd_ptr) == NULL ) || \ 3315 ( (tail_ptr) == NULL ) || \ 3316 ( (entry_ptr) == NULL ) || \ 3317 ( (len) <= 0 ) || \ 3318 ( (Size) < (entry_ptr)->size ) || \ 3319 ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ 3320 ( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ 3321 ( ( (entry_ptr)->coll_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ 3322 ( ( (len) == 1 ) && \ 3323 ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ 3324 ( (entry_ptr)->coll_next == NULL ) && \ 3325 ( (entry_ptr)->coll_prev == NULL ) && \ 3326 ( (Size) == (entry_ptr)->size ) \ 3327 ) \ 3328 ) \ 3329 ) \ 3330 ) { \ 3331 HDassert(0 && "coll DLL pre remove SC failed"); \ 3332 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "coll DLL pre remove SC failed") \ 3333 } 3334 3335 #define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ 3336 if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ 3337 ( (head_ptr) != (tail_ptr) ) \ 3338 ) || \ 3339 ( (len) < 0 ) || \ 3340 ( (Size) < 0 ) || \ 3341 ( ( (len) == 1 ) && \ 3342 ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ 3343 ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ 3344 ) \ 3345 ) || \ 3346 ( ( (len) >= 1 ) && \ 3347 ( ( (head_ptr) == NULL ) || ( (head_ptr)->coll_prev != NULL ) || \ 3348 ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \ 3349 ) \ 3350 ) \ 3351 ) { \ 3352 HDassert(0 && "COLL DLL sanity check failed"); \ 3353 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL sanity check failed") \ 3354 } 3355 3356 #define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ 3357 if ( ( (entry_ptr) == NULL ) || \ 3358 ( (entry_ptr)->coll_next != NULL ) || \ 3359 ( (entry_ptr)->coll_prev != NULL ) || \ 3360 ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ 3361 ( (hd_ptr) != (tail_ptr) ) \ 3362 ) || \ 3363 ( ( (len) == 1 ) && \ 3364 ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ 3365 ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \ 3366 ) \ 3367 ) || \ 3368 ( ( (len) >= 1 ) && \ 3369 ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->coll_prev != NULL ) || \ 3370 ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \ 3371 ) \ 3372 ) \ 3373 ) { \ 3374 HDassert(0 && "COLL DLL pre insert SC failed"); \ 3375 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL pre insert SC failed") \ 3376 } 3377 3378 #else /* H5C_DO_SANITY_CHECKS */ 3379 3380 #define H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) 3381 #define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) 3382 #define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) 3383 3384 #endif /* H5C_DO_SANITY_CHECKS */ 3385 3386 3387 #define H5C__COLL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ 3388 { \ 3389 H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ 3390 fail_val) \ 3391 if ( (head_ptr) == NULL ) \ 3392 { \ 3393 (head_ptr) = (entry_ptr); \ 3394 (tail_ptr) = (entry_ptr); \ 3395 } \ 3396 else \ 3397 { \ 3398 (tail_ptr)->coll_next = (entry_ptr); \ 3399 (entry_ptr)->coll_prev = (tail_ptr); \ 3400 (tail_ptr) = (entry_ptr); \ 3401 } \ 3402 (len)++; \ 3403 (Size) += entry_ptr->size; \ 3404 } /* H5C__COLL_DLL_APPEND() */ 3405 3406 #define H5C__COLL_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ 3407 { \ 3408 H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)\ 3409 if ( (head_ptr) == NULL ) \ 3410 { \ 3411 (head_ptr) = (entry_ptr); \ 3412 (tail_ptr) = (entry_ptr); \ 3413 } \ 3414 else \ 3415 { \ 3416 (head_ptr)->coll_prev = (entry_ptr); \ 3417 (entry_ptr)->coll_next = (head_ptr); \ 3418 (head_ptr) = (entry_ptr); \ 3419 } \ 3420 (len)++; \ 3421 (Size) += entry_ptr->size; \ 3422 } /* H5C__COLL_DLL_PREPEND() */ 3423 3424 #define H5C__COLL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ 3425 { \ 3426 H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)\ 3427 { \ 3428 if ( (head_ptr) == (entry_ptr) ) \ 3429 { \ 3430 (head_ptr) = (entry_ptr)->coll_next; \ 3431 if ( (head_ptr) != NULL ) \ 3432 (head_ptr)->coll_prev = NULL; \ 3433 } \ 3434 else \ 3435 { \ 3436 (entry_ptr)->coll_prev->coll_next = (entry_ptr)->coll_next; \ 3437 } \ 3438 if ( (tail_ptr) == (entry_ptr) ) \ 3439 { \ 3440 (tail_ptr) = (entry_ptr)->coll_prev; \ 3441 if ( (tail_ptr) != NULL ) \ 3442 (tail_ptr)->coll_next = NULL; \ 3443 } \ 3444 else \ 3445 (entry_ptr)->coll_next->coll_prev = (entry_ptr)->coll_prev; \ 3446 entry_ptr->coll_next = NULL; \ 3447 entry_ptr->coll_prev = NULL; \ 3448 (len)--; \ 3449 (Size) -= entry_ptr->size; \ 3450 } \ 3451 } /* H5C__COLL_DLL_REMOVE() */ 3452 3453 3454 /*------------------------------------------------------------------------- 3455 * 3456 * Macro: H5C__INSERT_IN_COLL_LIST 3457 * 3458 * Purpose: Insert entry into collective entries list 3459 * 3460 * Return: N/A 3461 * 3462 * Programmer: Mohamad Chaarawi 3463 * 3464 *------------------------------------------------------------------------- 3465 */ 3466 3467 #define H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \ 3468 { \ 3469 HDassert( (cache_ptr) ); \ 3470 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 3471 HDassert( (entry_ptr) ); \ 3472 \ 3473 /* insert the entry at the head of the list. */ \ 3474 \ 3475 H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \ 3476 (cache_ptr)->coll_tail_ptr, \ 3477 (cache_ptr)->coll_list_len, \ 3478 (cache_ptr)->coll_list_size, \ 3479 (fail_val)) \ 3480 \ 3481 } /* H5C__INSERT_IN_COLL_LIST */ 3482 3483 3484 /*------------------------------------------------------------------------- 3485 * 3486 * Macro: H5C__REMOVE_FROM_COLL_LIST 3487 * 3488 * Purpose: Remove entry from collective entries list 3489 * 3490 * Return: N/A 3491 * 3492 * Programmer: Mohamad Chaarawi 3493 * 3494 *------------------------------------------------------------------------- 3495 */ 3496 3497 #define H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, fail_val) \ 3498 { \ 3499 HDassert( (cache_ptr) ); \ 3500 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 3501 HDassert( (entry_ptr) ); \ 3502 \ 3503 /* remove the entry from the list. */ \ 3504 \ 3505 H5C__COLL_DLL_REMOVE((entry_ptr), (cache_ptr)->coll_head_ptr, \ 3506 (cache_ptr)->coll_tail_ptr, \ 3507 (cache_ptr)->coll_list_len, \ 3508 (cache_ptr)->coll_list_size, \ 3509 (fail_val)) \ 3510 \ 3511 } /* H5C__REMOVE_FROM_COLL_LIST */ 3512 3513 3514 /*------------------------------------------------------------------------- 3515 * 3516 * Macro: H5C__MOVE_TO_TOP_IN_COLL_LIST 3517 * 3518 * Purpose: Update entry position in collective entries list 3519 * 3520 * Return: N/A 3521 * 3522 * Programmer: Mohamad Chaarawi 3523 * 3524 *------------------------------------------------------------------------- 3525 */ 3526 3527 #define H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \ 3528 { \ 3529 HDassert( (cache_ptr) ); \ 3530 HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ 3531 HDassert( (entry_ptr) ); \ 3532 \ 3533 /* Remove entry and insert at the head of the list. */ \ 3534 H5C__COLL_DLL_REMOVE((entry_ptr), (cache_ptr)->coll_head_ptr, \ 3535 (cache_ptr)->coll_tail_ptr, \ 3536 (cache_ptr)->coll_list_len, \ 3537 (cache_ptr)->coll_list_size, \ 3538 (fail_val)) \ 3539 \ 3540 H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \ 3541 (cache_ptr)->coll_tail_ptr, \ 3542 (cache_ptr)->coll_list_len, \ 3543 (cache_ptr)->coll_list_size, \ 3544 (fail_val)) \ 3545 \ 3546 } /* H5C__MOVE_TO_TOP_IN_COLL_LIST */ 3547 #endif /* H5_HAVE_PARALLEL */ 3548 3549 3550 /****************************/ 3551 /* Package Private Typedefs */ 3552 /****************************/ 3553 3554 /**************************************************************************** 3555 * 3556 * structure H5C_tag_info_t 3557 * 3558 * Structure about each set of tagged entries for an object in the file. 3559 * 3560 * Each H5C_tag_info_t struct corresponds to a particular object in the file. 3561 * 3562 * Each H5C_cache_entry struct in the linked list of entries for this tag 3563 * also contains a pointer back to the H5C_tag_info_t struct for the 3564 * overall object. 3565 * 3566 * 3567 * The fields of this structure are discussed individually below: 3568 * 3569 * tag: Address (i.e. "tag") of the object header for all the entries 3570 * corresponding to parts of that object. 3571 * 3572 * head: Head of doubly-linked list of all entries belonging to the tag. 3573 * 3574 * entry_cnt: Number of entries on linked list of entries for this tag. 3575 * 3576 * corked: Boolean flag indicating whether entries for this object can be 3577 * evicted. 3578 * 3579 ****************************************************************************/ 3580 typedef struct H5C_tag_info_t { 3581 haddr_t tag; /* Tag (address) of the entries (must be first, for skiplist) */ 3582 H5C_cache_entry_t *head; /* Head of the list of entries for this tag */ 3583 size_t entry_cnt; /* Number of entries on list */ 3584 hbool_t corked; /* Whether this object is corked */ 3585 } H5C_tag_info_t; 3586 3587 3588 /**************************************************************************** 3589 * 3590 * structure H5C_t 3591 * 3592 * Catchall structure for all variables specific to an instance of the cache. 3593 * 3594 * While the individual fields of the structure are discussed below, the 3595 * following overview may be helpful. 3596 * 3597 * Entries in the cache are stored in an instance of H5TB_TREE, indexed on 3598 * the entry's disk address. While the H5TB_TREE is less efficient than 3599 * hash table, it keeps the entries in address sorted order. As flushes 3600 * in parallel mode are more efficient if they are issued in increasing 3601 * address order, this is a significant benefit. Also the H5TB_TREE code 3602 * was readily available, which reduced development time. 3603 * 3604 * While the cache was designed with multiple replacement policies in mind, 3605 * at present only a modified form of LRU is supported. 3606 * 3607 * JRM - 4/26/04 3608 * 3609 * Profiling has indicated that searches in the instance of H5TB_TREE are 3610 * too expensive. To deal with this issue, I have augmented the cache 3611 * with a hash table in which all entries will be stored. Given the 3612 * advantages of flushing entries in increasing address order, the TBBT 3613 * is retained, but only dirty entries are stored in it. At least for 3614 * now, we will leave entries in the TBBT after they are flushed. 3615 * 3616 * Note that index_size and index_len now refer to the total size of 3617 * and number of entries in the hash table. 3618 * 3619 * JRM - 7/19/04 3620 * 3621 * The TBBT has since been replaced with a skip list. This change 3622 * greatly predates this note. 3623 * 3624 * JRM - 9/26/05 3625 * 3626 * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. 3627 * This field is used to validate pointers to instances of 3628 * H5C_t. 3629 * 3630 * flush_in_progress: Boolean flag indicating whether a flush is in 3631 * progress. 3632 * 3633 * log_info: Information used by the MDC logging functionality. 3634 * Described in H5Clog.h. 3635 * 3636 * aux_ptr: Pointer to void used to allow wrapper code to associate 3637 * its data with an instance of H5C_t. The H5C cache code 3638 * sets this field to NULL, and otherwise leaves it alone. 3639 * 3640 * max_type_id: Integer field containing the maximum type id number assigned 3641 * to a type of entry in the cache. All type ids from 0 to 3642 * max_type_id inclusive must be defined. The names of the 3643 * types are stored in the type_name_table discussed below, and 3644 * indexed by the ids. 3645 * 3646 * class_table_ptr: Pointer to an array of H5C_class_t of length 3647 * max_type_id + 1. Entry classes for the cache. 3648 * 3649 * max_cache_size: Nominal maximum number of bytes that may be stored in the 3650 * cache. This value should be viewed as a soft limit, as the 3651 * cache can exceed this value under the following circumstances: 3652 * 3653 * a) All entries in the cache are protected, and the cache is 3654 * asked to insert a new entry. In this case the new entry 3655 * will be created. If this causes the cache to exceed 3656 * max_cache_size, it will do so. The cache will attempt 3657 * to reduce its size as entries are unprotected. 3658 * 3659 * b) When running in parallel mode, the cache may not be 3660 * permitted to flush a dirty entry in response to a read. 3661 * If there are no clean entries available to evict, the 3662 * cache will exceed its maximum size. Again the cache 3663 * will attempt to reduce its size to the max_cache_size 3664 * limit on the next cache write. 3665 * 3666 * c) When an entry increases in size, the cache may exceed 3667 * the max_cache_size limit until the next time the cache 3668 * attempts to load or insert an entry. 3669 * 3670 * d) When the evictions_enabled field is false (see below), 3671 * the cache size will increase without limit until the 3672 * field is set to true. 3673 * 3674 * min_clean_size: Nominal minimum number of clean bytes in the cache. 3675 * The cache attempts to maintain this number of bytes of 3676 * clean data so as to avoid case b) above. Again, this is 3677 * a soft limit. 3678 * 3679 * close_warning_received: Boolean flag indicating that a file closing 3680 * warning has been received. 3681 * 3682 * 3683 * In addition to the call back functions required for each entry, the 3684 * cache requires the following call back functions for this instance of 3685 * the cache as a whole: 3686 * 3687 * check_write_permitted: In certain applications, the cache may not 3688 * be allowed to write to disk at certain time. If specified, 3689 * the check_write_permitted function is used to determine if 3690 * a write is permissible at any given point in time. 3691 * 3692 * If no such function is specified (i.e. this field is NULL), 3693 * the cache uses the following write_permitted field to 3694 * determine whether writes are permitted. 3695 * 3696 * write_permitted: If check_write_permitted is NULL, this boolean flag 3697 * indicates whether writes are permitted. 3698 * 3699 * log_flush: If provided, this function is called whenever a dirty 3700 * entry is flushed to disk. 3701 * 3702 * 3703 * In cases where memory is plentiful, and performance is an issue, it may 3704 * be useful to disable all cache evictions, and thereby postpone metadata 3705 * writes. The following field is used to implement this. 3706 * 3707 * evictions_enabled: Boolean flag that is initialized to TRUE. When 3708 * this flag is set to FALSE, the metadata cache will not 3709 * attempt to evict entries to make space for newly protected 3710 * entries, and instead the will grow without limit. 3711 * 3712 * Needless to say, this feature must be used with care. 3713 * 3714 * 3715 * The cache requires an index to facilitate searching for entries. The 3716 * following fields support that index. 3717 * 3718 * Addendum: JRM -- 10/14/15 3719 * 3720 * We sometimes need to visit all entries in the cache. In the past, this 3721 * was done by scanning the hash table. However, this is expensive, and 3722 * we have come to scan the hash table often enough that it has become a 3723 * performance issue. To repair this, I have added code to maintain a 3724 * list of all entries in the index -- call this list the index list. 3725 * 3726 * The index list is maintained by the same macros that maintain the 3727 * index, and must have the same length and size as the index proper. 3728 * 3729 * index_len: Number of entries currently in the hash table used to index 3730 * the cache. 3731 * 3732 * index_size: Number of bytes of cache entries currently stored in the 3733 * hash table used to index the cache. 3734 * 3735 * This value should not be mistaken for footprint of the 3736 * cache in memory. The average cache entry is small, and 3737 * the cache has a considerable overhead. Multiplying the 3738 * index_size by three should yield a conservative estimate 3739 * of the cache's memory footprint. 3740 * 3741 * index_ring_len: Array of integer of length H5C_RING_NTYPES used to 3742 * maintain a count of entries in the index by ring. Note 3743 * that the sum of all the cells in this array must equal 3744 * the value stored in index_len above. 3745 * 3746 * index_ring_size: Array of size_t of length H5C_RING_NTYPES used to 3747 * maintain the sum of the sizes of all entries in the index 3748 * by ring. Note that the sum of all cells in this array must 3749 * equal the value stored in index_size above. 3750 * 3751 * clean_index_size: Number of bytes of clean entries currently stored in 3752 * the hash table. Note that the index_size field (above) 3753 * is also the sum of the sizes of all entries in the cache. 3754 * Thus we should have the invariant that clean_index_size + 3755 * dirty_index_size == index_size. 3756 * 3757 * WARNING: 3758 * 3759 * The value of the clean_index_size must not be mistaken 3760 * for the current clean size of the cache. Rather, the 3761 * clean size of the cache is the current value of 3762 * clean_index_size plus the amount of empty space (if any) 3763 * in the cache. 3764 * 3765 * clean_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to 3766 * maintain the sum of the sizes of all clean entries in the 3767 * index by ring. Note that the sum of all cells in this array 3768 * must equal the value stored in clean_index_size above. 3769 * 3770 * dirty_index_size: Number of bytes of dirty entries currently stored in 3771 * the hash table. Note that the index_size field (above) 3772 * is also the sum of the sizes of all entries in the cache. 3773 * Thus we should have the invariant that clean_index_size + 3774 * dirty_index_size == index_size. 3775 * 3776 * dirty_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to 3777 * maintain the sum of the sizes of all dirty entries in the 3778 * index by ring. Note that the sum of all cells in this array 3779 * must equal the value stored in dirty_index_size above. 3780 * 3781 * index: Array of pointer to H5C_cache_entry_t of size 3782 * H5C__HASH_TABLE_LEN. At present, this value is a power 3783 * of two, not the usual prime number. 3784 * 3785 * I hope that the variable size of cache elements, the large 3786 * hash table size, and the way in which HDF5 allocates space 3787 * will combine to avoid problems with periodicity. If so, we 3788 * can use a trivial hash function (a bit-and and a 3 bit left 3789 * shift) with some small savings. 3790 * 3791 * If not, it will become evident in the statistics. Changing 3792 * to the usual prime number length hash table will require 3793 * changing the H5C__HASH_FCN macro and the deletion of the 3794 * H5C__HASH_MASK #define. No other changes should be required. 3795 * 3796 * il_len: Number of entries on the index list. 3797 * 3798 * This must always be equal to index_len. As such, this 3799 * field is redundant. However, the existing linked list 3800 * management macros expect to maintain a length field, so 3801 * this field exists primarily to avoid adding complexity to 3802 * these macros. 3803 * 3804 * il_size: Number of bytes of cache entries currently stored in the 3805 * index list. 3806 * 3807 * This must always be equal to index_size. As such, this 3808 * field is redundant. However, the existing linked list 3809 * management macros expect to maintain a size field, so 3810 * this field exists primarily to avoid adding complexity to 3811 * these macros. 3812 * 3813 * il_head: Pointer to the head of the doubly linked list of entries in 3814 * the index list. Note that cache entries on this list are 3815 * linked by their il_next and il_prev fields. 3816 * 3817 * This field is NULL if the index is empty. 3818 * 3819 * il_tail: Pointer to the tail of the doubly linked list of entries in 3820 * the index list. Note that cache entries on this list are 3821 * linked by their il_next and il_prev fields. 3822 * 3823 * This field is NULL if the index is empty. 3824 * 3825 * 3826 * With the addition of the take ownership flag, it is possible that 3827 * an entry may be removed from the cache as the result of the flush of 3828 * a second entry. In general, this causes little trouble, but it is 3829 * possible that the entry removed may be the next entry in the scan of 3830 * a list. In this case, we must be able to detect the fact that the 3831 * entry has been removed, so that the scan doesn't attempt to proceed with 3832 * an entry that is no longer in the cache. 3833 * 3834 * The following fields are maintained to facilitate this. 3835 * 3836 * entries_removed_counter: Counter that is incremented each time an 3837 * entry is removed from the cache by any means (eviction, 3838 * expungement, or take ownership at this point in time). 3839 * Functions that perform scans on lists may set this field 3840 * to zero prior to calling H5C__flush_single_entry(). 3841 * Unexpected changes to the counter indicate that an entry 3842 * was removed from the cache as a side effect of the flush. 3843 * 3844 * last_entry_removed_ptr: Pointer to the instance of H5C_cache_entry_t 3845 * which contained the last entry to be removed from the cache, 3846 * or NULL if there either is no such entry, or if a function 3847 * performing a scan of a list has set this field to NULL prior 3848 * to calling H5C__flush_single_entry(). 3849 * 3850 * WARNING!!! This field must NEVER be dereferenced. It is 3851 * maintained to allow functions that perform scans of lists 3852 * to compare this pointer with their pointers to next, thus 3853 * allowing them to avoid unnecessary restarts of scans if the 3854 * pointers don't match, and if entries_removed_counter is 3855 * one. 3856 * 3857 * entry_watched_for_removal: Pointer to an instance of H5C_cache_entry_t 3858 * which contains the 'next' entry for an iteration. Removing 3859 * this entry must trigger a rescan of the iteration, so each 3860 * entry removed from the cache is compared against this pointer 3861 * and the pointer is reset to NULL if the watched entry is 3862 * removed. 3863 * (This functions similarly to a "dead man's switch") 3864 * 3865 * 3866 * When we flush the cache, we need to write entries out in increasing 3867 * address order. An instance of a skip list is used to store dirty entries in 3868 * sorted order. Whether it is cheaper to sort the dirty entries as needed, 3869 * or to maintain the list is an open question. At a guess, it depends 3870 * on how frequently the cache is flushed. We will see how it goes. 3871 * 3872 * For now at least, I will not remove dirty entries from the list as they 3873 * are flushed. (this has been changed -- dirty entries are now removed from 3874 * the skip list as they are flushed. JRM - 10/25/05) 3875 * 3876 * Update 4/21/20: 3877 * 3878 * Profiling indicates that the cost of maintaining the skip list is 3879 * significant. As it is only used on flush and close, maintaining it 3880 * only when needed is an obvious optimization. 3881 * 3882 * To do this, we add a flag to control maintenanace of the skip list. 3883 * This flag is initially set to FALSE, which disables all operations 3884 * on the skip list. 3885 * 3886 * At the beginning of either flush or close, we scan the index list, 3887 * insert all dirtly entries in the skip list, and enable operations 3888 * on skip list by setting above control flag to true. 3889 * 3890 * At the end of a complete flush, we verify that the skip list is empty, 3891 * and set the control flag back to false, so as to avoid skip list 3892 * maintenance overhead until the next flush or close. 3893 * 3894 * In the case of a partial flush (i.e. flush marked entries), we remove 3895 * all remaining entries from the skip list, and then set the control flag 3896 * back to false -- again avoiding skip list maintenance overhead until 3897 * the next flush or close. 3898 * 3899 * slist_enabled: Boolean flag used to control operation of the skip 3900 * list. If this filed is FALSE, operations on the 3901 * slist are no-ops, and the slist must be empty. If 3902 * it is TRUE, operations on the slist proceed as usual, 3903 * and all dirty entries in the metadata cache must be 3904 * listed in the slist. 3905 * 3906 * slist_changed: Boolean flag used to indicate whether the contents of 3907 * the slist has changed since the last time this flag was 3908 * reset. This is used in the cache flush code to detect 3909 * conditions in which pre-serialize or serialize callbacks 3910 * have modified the slist -- which obliges us to restart 3911 * the scan of the slist from the beginning. 3912 * 3913 * slist_len: Number of entries currently in the skip list 3914 * used to maintain a sorted list of dirty entries in the 3915 * cache. 3916 * 3917 * slist_size: Number of bytes of cache entries currently stored in the 3918 * skip list used to maintain a sorted list of 3919 * dirty entries in the cache. 3920 * 3921 * slist_ring_len: Array of integer of length H5C_RING_NTYPES used to 3922 * maintain a count of entries in the slist by ring. Note 3923 * that the sum of all the cells in this array must equal 3924 * the value stored in slist_len above. 3925 * 3926 * slist_ring_size: Array of size_t of length H5C_RING_NTYPES used to 3927 * maintain the sum of the sizes of all entries in the 3928 * slist by ring. Note that the sum of all cells in this 3929 * array must equal the value stored in slist_size above. 3930 * 3931 * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted 3932 * list of dirty entries in the cache. This sorted list has 3933 * two uses: 3934 * 3935 * a) It allows us to flush dirty entries in increasing address 3936 * order, which results in significant savings. 3937 * 3938 * b) It facilitates checking for adjacent dirty entries when 3939 * attempting to evict entries from the cache. While we 3940 * don't use this at present, I hope that this will allow 3941 * some optimizations when I get to it. 3942 * 3943 * num_last_entries: The number of entries in the cache that can only be 3944 * flushed after all other entries in the cache have 3945 * been flushed. At this time, this will only ever be 3946 * one entry (the superblock), and the code has been 3947 * protected with HDasserts to enforce this. This restraint 3948 * can certainly be relaxed in the future if the need for 3949 * multiple entries being flushed last arises, though 3950 * explicit tests for that case should be added when said 3951 * HDasserts are removed. 3952 * 3953 * Update: There are now two possible last entries 3954 * (superblock and file driver info message). This 3955 * number will probably increase as we add superblock 3956 * messages. JRM -- 11/18/14 3957 * 3958 * With the addition of the fractal heap, the cache must now deal with 3959 * the case in which entries may be dirtied, moved, or have their sizes 3960 * changed during a flush. To allow sanity checks in this situation, the 3961 * following two fields have been added. They are only compiled in when 3962 * H5C_DO_SANITY_CHECKS is TRUE. 3963 * 3964 * slist_len_increase: Number of entries that have been added to the 3965 * slist since the last time this field was set to zero. 3966 * Note that this value can be negative. 3967 * 3968 * slist_size_increase: Total size of all entries that have been added 3969 * to the slist since the last time this field was set to 3970 * zero. Note that this value can be negative. 3971 * 3972 * Cache entries belonging to a particular object are "tagged" with that 3973 * object's base object header address. 3974 * 3975 * The following fields are maintained to facilitate this. 3976 * 3977 * tag_list: A skip list to track entries that belong to an object. 3978 * Each H5C_tag_info_t struct on the tag list corresponds to 3979 * a particular object in the file. Tagged entries can be 3980 * flushed or evicted as a group, or corked to prevent entries 3981 * from being evicted from the cache. 3982 * 3983 * "Global" entries, like the superblock and the file's 3984 * freelist, as well as shared entries like global 3985 * heaps and shared object header messages, are not tagged. 3986 * 3987 * ignore_tags: Boolean flag to disable tag validation during entry insertion. 3988 * 3989 * num_objs_corked: Unsigned integer field containing the number of objects 3990 * that are "corked". The "corked" status of an object is 3991 * found by searching the "tag_list". This field is added 3992 * for optimization so that the skip list search on "tag_list" 3993 * can be skipped if this field is zero, i.e. no "corked" 3994 * objects. 3995 * 3996 * When a cache entry is protected, it must be removed from the LRU 3997 * list(s) as it cannot be either flushed or evicted until it is unprotected. 3998 * The following fields are used to implement the protected list (pl). 3999 * 4000 * pl_len: Number of entries currently residing on the protected list. 4001 * 4002 * pl_size: Number of bytes of cache entries currently residing on the 4003 * protected list. 4004 * 4005 * pl_head_ptr: Pointer to the head of the doubly linked list of protected 4006 * entries. Note that cache entries on this list are linked 4007 * by their next and prev fields. 4008 * 4009 * This field is NULL if the list is empty. 4010 * 4011 * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected 4012 * entries. Note that cache entries on this list are linked 4013 * by their next and prev fields. 4014 * 4015 * This field is NULL if the list is empty. 4016 * 4017 * 4018 * For very frequently used entries, the protect/unprotect overhead can 4019 * become burdensome. To avoid this overhead, I have modified the cache 4020 * to allow entries to be "pinned". A pinned entry is similar to a 4021 * protected entry, in the sense that it cannot be evicted, and that 4022 * the entry can be modified at any time. 4023 * 4024 * Pinning an entry has the following implications: 4025 * 4026 * 1) A pinned entry cannot be evicted. Thus unprotected 4027 * pinned entries reside in the pinned entry list, instead 4028 * of the LRU list(s) (or other lists maintained by the current 4029 * replacement policy code). 4030 * 4031 * 2) A pinned entry can be accessed or modified at any time. 4032 * This places an additional burden on the associated pre-serialize 4033 * and serialize callbacks, which must ensure the the entry is in 4034 * a consistent state before creating an image of it. 4035 * 4036 * 3) A pinned entry can be marked as dirty (and possibly 4037 * change size) while it is unprotected. 4038 * 4039 * 4) The flush-destroy code must allow pinned entries to 4040 * be unpinned (and possibly unprotected) during the 4041 * flush. 4042 * 4043 * Since pinned entries cannot be evicted, they must be kept on a pinned 4044 * entry list (pel), instead of being entrusted to the replacement policy 4045 * code. 4046 * 4047 * Maintaining the pinned entry list requires the following fields: 4048 * 4049 * pel_len: Number of entries currently residing on the pinned 4050 * entry list. 4051 * 4052 * pel_size: Number of bytes of cache entries currently residing on 4053 * the pinned entry list. 4054 * 4055 * pel_head_ptr: Pointer to the head of the doubly linked list of pinned 4056 * but not protected entries. Note that cache entries on 4057 * this list are linked by their next and prev fields. 4058 * 4059 * This field is NULL if the list is empty. 4060 * 4061 * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned 4062 * but not protected entries. Note that cache entries on 4063 * this list are linked by their next and prev fields. 4064 * 4065 * This field is NULL if the list is empty. 4066 * 4067 * 4068 * The cache must have a replacement policy, and the fields supporting this 4069 * policy must be accessible from this structure. 4070 * 4071 * While there has been interest in several replacement policies for 4072 * this cache, the initial development schedule is tight. Thus I have 4073 * elected to support only a modified LRU (least recently used) policy 4074 * for the first cut. 4075 * 4076 * To further simplify matters, I have simply included the fields needed 4077 * by the modified LRU in this structure. When and if we add support for 4078 * other policies, it will probably be easiest to just add the necessary 4079 * fields to this structure as well -- we only create one instance of this 4080 * structure per file, so the overhead is not excessive. 4081 * 4082 * 4083 * Fields supporting the modified LRU policy: 4084 * 4085 * See most any OS text for a discussion of the LRU replacement policy. 4086 * 4087 * When operating in parallel mode, we must ensure that a read does not 4088 * cause a write. If it does, the process will hang, as the write will 4089 * be collective and the other processes will not know to participate. 4090 * 4091 * To deal with this issue, I have modified the usual LRU policy by adding 4092 * clean and dirty LRU lists to the usual LRU list. In general, these 4093 * lists are only exist in parallel builds. 4094 * 4095 * The clean LRU list is simply the regular LRU list with all dirty cache 4096 * entries removed. 4097 * 4098 * Similarly, the dirty LRU list is the regular LRU list with all the clean 4099 * cache entries removed. 4100 * 4101 * When reading in parallel mode, we evict from the clean LRU list only. 4102 * This implies that we must try to ensure that the clean LRU list is 4103 * reasonably well stocked at all times. 4104 * 4105 * We attempt to do this by trying to flush enough entries on each write 4106 * to keep the cLRU_list_size >= min_clean_size. 4107 * 4108 * Even if we start with a completely clean cache, a sequence of protects 4109 * without unprotects can empty the clean LRU list. In this case, the 4110 * cache must grow temporarily. At the next sync point, we will attempt to 4111 * evict enough entries to reduce index_size to less than max_cache_size. 4112 * While this will usually be possible, all bets are off if enough entries 4113 * are protected. 4114 * 4115 * Discussions of the individual fields used by the modified LRU replacement 4116 * policy follow: 4117 * 4118 * LRU_list_len: Number of cache entries currently on the LRU list. 4119 * 4120 * Observe that LRU_list_len + pl_len + pel_len must always 4121 * equal index_len. 4122 * 4123 * LRU_list_size: Number of bytes of cache entries currently residing on the 4124 * LRU list. 4125 * 4126 * Observe that LRU_list_size + pl_size + pel_size must always 4127 * equal index_size. 4128 * 4129 * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache 4130 * entries on this list are linked by their next and prev fields. 4131 * 4132 * This field is NULL if the list is empty. 4133 * 4134 * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache 4135 * entries on this list are linked by their next and prev fields. 4136 * 4137 * This field is NULL if the list is empty. 4138 * 4139 * cLRU_list_len: Number of cache entries currently on the clean LRU list. 4140 * 4141 * Observe that cLRU_list_len + dLRU_list_len must always 4142 * equal LRU_list_len. 4143 * 4144 * cLRU_list_size: Number of bytes of cache entries currently residing on 4145 * the clean LRU list. 4146 * 4147 * Observe that cLRU_list_size + dLRU_list_size must always 4148 * equal LRU_list_size. 4149 * 4150 * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list. 4151 * Cache entries on this list are linked by their aux_next and 4152 * aux_prev fields. 4153 * 4154 * This field is NULL if the list is empty. 4155 * 4156 * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list. 4157 * Cache entries on this list are linked by their aux_next and 4158 * aux_prev fields. 4159 * 4160 * This field is NULL if the list is empty. 4161 * 4162 * dLRU_list_len: Number of cache entries currently on the dirty LRU list. 4163 * 4164 * Observe that cLRU_list_len + dLRU_list_len must always 4165 * equal LRU_list_len. 4166 * 4167 * dLRU_list_size: Number of cache entries currently on the dirty LRU list. 4168 * 4169 * Observe that cLRU_list_len + dLRU_list_len must always 4170 * equal LRU_list_len. 4171 * 4172 * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list. 4173 * Cache entries on this list are linked by their aux_next and 4174 * aux_prev fields. 4175 * 4176 * This field is NULL if the list is empty. 4177 * 4178 * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list. 4179 * Cache entries on this list are linked by their aux_next and 4180 * aux_prev fields. 4181 * 4182 * This field is NULL if the list is empty. 4183 * 4184 * 4185 * Automatic cache size adjustment: 4186 * 4187 * While the default cache size is adequate for most cases, we can run into 4188 * cases where the default is too small. Ideally, we will let the user 4189 * adjust the cache size as required. However, this is not possible in all 4190 * cases. Thus I have added automatic cache size adjustment code. 4191 * 4192 * The configuration for the automatic cache size adjustment is stored in 4193 * the structure described below: 4194 * 4195 * size_increase_possible: Depending on the configuration data given 4196 * in the resize_ctl field, it may or may not be possible 4197 * to increase the size of the cache. Rather than test for 4198 * all the ways this can happen, we simply set this flag when 4199 * we receive a new configuration. 4200 * 4201 * flash_size_increase_possible: Depending on the configuration data given 4202 * in the resize_ctl field, it may or may not be possible 4203 * for a flash size increase to occur. We set this flag 4204 * whenever we receive a new configuration so as to avoid 4205 * repeated calculations. 4206 * 4207 * flash_size_increase_threshold: If a flash cache size increase is possible, 4208 * this field is used to store the minimum size of a new entry 4209 * or size increase needed to trigger a flash cache size 4210 * increase. Note that this field must be updated whenever 4211 * the size of the cache is changed. 4212 * 4213 * size_decrease_possible: Depending on the configuration data given 4214 * in the resize_ctl field, it may or may not be possible 4215 * to decrease the size of the cache. Rather than test for 4216 * all the ways this can happen, we simply set this flag when 4217 * we receive a new configuration. 4218 * 4219 * resize_enabled: This is another convenience flag which is set whenever 4220 * a new set of values for resize_ctl are provided. Very 4221 * simply, 4222 * 4223 * resize_enabled = size_increase_possible || 4224 * size_decrease_possible; 4225 * 4226 * cache_full: Boolean flag used to keep track of whether the cache is 4227 * full, so we can refrain from increasing the size of a 4228 * cache which hasn't used up the space allotted to it. 4229 * 4230 * The field is initialized to FALSE, and then set to TRUE 4231 * whenever we attempt to make space in the cache. 4232 * 4233 * size_decreased: Boolean flag set to TRUE whenever the maximum cache 4234 * size is decreased. The flag triggers a call to 4235 * H5C__make_space_in_cache() on the next call to H5C_protect(). 4236 * 4237 * resize_in_progress: As the metadata cache has become re-entrant, it is 4238 * possible that a protect may trigger a call to 4239 * H5C__auto_adjust_cache_size(), which may trigger a flush, 4240 * which may trigger a protect, which will result in another 4241 * call to H5C__auto_adjust_cache_size(). 4242 * 4243 * The resize_in_progress boolean flag is used to detect this, 4244 * and to prevent the infinite recursion that would otherwise 4245 * occur. 4246 * 4247 * Note that this issue is not hypothetical -- this field 4248 * was added 12/29/15 to fix a bug exposed in the testing 4249 * of changes to the file driver info superblock extension 4250 * management code needed to support rings. 4251 * 4252 * msic_in_progress: As the metadata cache has become re-entrant, and as 4253 * the free space manager code has become more tightly 4254 * integrated with the metadata cache, it is possible that 4255 * a call to H5C_insert_entry() may trigger a call to 4256 * H5C_make_space_in_cache(), which, via H5C__flush_single_entry() 4257 * and client callbacks, may trigger an infinite regression 4258 * of calls to H5C_make_space_in_cache(). 4259 * 4260 * The msic_in_progress boolean flag is used to detect this, 4261 * and prevent the infinite regression that would otherwise 4262 * occur. 4263 * 4264 * Note that this is issue is not hypothetical -- this field 4265 * was added 2/16/17 to address this issue when it was 4266 * exposed by modifications to test/fheap.c to cause it to 4267 * use paged allocation. 4268 * 4269 * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration 4270 * data for automatic cache resizing. 4271 * 4272 * epoch_markers_active: Integer field containing the number of epoch 4273 * markers currently in use in the LRU list. This value 4274 * must be in the range [0, H5C__MAX_EPOCH_MARKERS - 1]. 4275 * 4276 * epoch_marker_active: Array of boolean of length H5C__MAX_EPOCH_MARKERS. 4277 * This array is used to track which epoch markers are currently 4278 * in use. 4279 * 4280 * epoch_marker_ringbuf: Array of int of length H5C__MAX_EPOCH_MARKERS + 1. 4281 * 4282 * To manage the epoch marker cache entries, it is necessary 4283 * to track their order in the LRU list. This is done with 4284 * epoch_marker_ringbuf. When markers are inserted at the 4285 * head of the LRU list, the index of the marker in the 4286 * epoch_markers array is inserted at the tail of the ring 4287 * buffer. When it becomes the epoch_marker_active'th marker 4288 * in the LRU list, it will have worked its way to the head 4289 * of the ring buffer as well. This allows us to remove it 4290 * without scanning the LRU list if such is required. 4291 * 4292 * epoch_marker_ringbuf_first: Integer field containing the index of the 4293 * first entry in the ring buffer. 4294 * 4295 * epoch_marker_ringbuf_last: Integer field containing the index of the 4296 * last entry in the ring buffer. 4297 * 4298 * epoch_marker_ringbuf_size: Integer field containing the number of entries 4299 * in the ring buffer. 4300 * 4301 * epoch_markers: Array of instances of H5C_cache_entry_t of length 4302 * H5C__MAX_EPOCH_MARKERS. The entries are used as markers 4303 * in the LRU list to identify cache entries that haven't 4304 * been accessed for some (small) specified number of 4305 * epochs. These entries (if any) can then be evicted and 4306 * the cache size reduced -- ideally without evicting any 4307 * of the current working set. Needless to say, the epoch 4308 * length and the number of epochs before an unused entry 4309 * must be chosen so that all, or almost all, the working 4310 * set will be accessed before the limit. 4311 * 4312 * Epoch markers only appear in the LRU list, never in 4313 * the index or slist. While they are of type 4314 * H5C__EPOCH_MARKER_TYPE, and have associated class 4315 * functions, these functions should never be called. 4316 * 4317 * The addr fields of these instances of H5C_cache_entry_t 4318 * are set to the index of the instance in the epoch_markers 4319 * array, the size is set to 0, and the type field points 4320 * to the constant structure epoch_marker_class defined 4321 * in H5C.c. The next and prev fields are used as usual 4322 * to link the entry into the LRU list. 4323 * 4324 * All other fields are unused. 4325 * 4326 * 4327 * Cache hit rate collection fields: 4328 * 4329 * We supply the current cache hit rate on request, so we must keep a 4330 * simple cache hit rate computation regardless of whether statistics 4331 * collection is enabled. The following fields support this capability. 4332 * 4333 * cache_hits: Number of cache hits since the last time the cache hit 4334 * rate statistics were reset. Note that when automatic cache 4335 * re-sizing is enabled, this field will be reset every automatic 4336 * resize epoch. 4337 * 4338 * cache_accesses: Number of times the cache has been accessed while 4339 * since the last since the last time the cache hit rate statistics 4340 * were reset. Note that when automatic cache re-sizing is enabled, 4341 * this field will be reset every automatic resize epoch. 4342 * 4343 * 4344 * Metadata cache image management related fields. 4345 * 4346 * image_ctl: Instance of H5C_cache_image_ctl_t containing configuration 4347 * data for generation of a cache image on file close. 4348 * 4349 * serialization_in_progress: Boolean field that is set to TRUE iff 4350 * the cache is in the process of being serialized. This 4351 * field is needed to support the H5C_serialization_in_progress() 4352 * call, which is in turn required for sanity checks in some 4353 * cache clients. 4354 * 4355 * load_image: Boolean flag indicating that the metadata cache image 4356 * superblock extension message exists and should be 4357 * read, and the image block read and decoded on the next 4358 * call to H5C_protect(). 4359 * 4360 * image_loaded: Boolean flag indicating that the metadata cache has 4361 * loaded the metadata cache image as directed by the 4362 * MDC cache image superblock extension message. 4363 * 4364 * delete_image: Boolean flag indicating whether the metadata cache image 4365 * superblock message should be deleted and the cache image 4366 * file space freed after they have been read and decoded. 4367 * 4368 * This flag should be set to TRUE iff the file is opened 4369 * R/W and there is a cache image to be read. 4370 * 4371 * image_addr: haddr_t containing the base address of the on disk 4372 * metadata cache image, or HADDR_UNDEF if that value is 4373 * undefined. Note that this field is used both in the 4374 * construction and write, and the read and decode of 4375 * metadata cache image blocks. 4376 * 4377 * image_len: hsize_t containing the size of the on disk metadata cache 4378 * image, or zero if that value is undefined. Note that this 4379 * field is used both in the construction and write, and the 4380 * read and decode of metadata cache image blocks. 4381 * 4382 * image_data_len: size_t containing the number of bytes of data in the 4383 * on disk metadata cache image, or zero if that value is 4384 * undefined. 4385 * 4386 * In most cases, this value is the same as the image_len 4387 * above. It exists to allow for metadata cache image blocks 4388 * that are larger than the actual image. Thus in all 4389 * cases image_data_len <= image_len. 4390 * 4391 * To create the metadata cache image, we must first serialize all the 4392 * entries in the metadata cache. This is done by a scan of the index. 4393 * As entries must be serialized in increasing flush dependency height 4394 * order, we scan the index repeatedly, once for each flush dependency 4395 * height in increasing order. 4396 * 4397 * This operation is complicated by the fact that entries other the the 4398 * target may be inserted, loaded, relocated, or removed from the cache 4399 * (either by eviction or the take ownership flag) as the result of a 4400 * pre_serialize or serialize callback. While entry removals are not 4401 * a problem for the scan of the index, insertions, loads, and relocations 4402 * are. Hence the entries loaded, inserted, and relocated counters 4403 * listed below have been implemented to allow these conditions to be 4404 * detected and dealt with by restarting the scan. 4405 * 4406 * The serialization operation is further complicated by the fact that 4407 * the flush dependency height of a given entry may increase (as the 4408 * result of an entry load or insert) or decrease (as the result of an 4409 * entry removal -- via either eviction or the take ownership flag). The 4410 * entry_fd_height_change_counter field is maintained to allow detection 4411 * of this condition, and a restart of the scan when it occurs. 4412 * 4413 * Note that all these new fields would work just as well as booleans. 4414 * 4415 * entries_loaded_counter: Number of entries loaded into the cache 4416 * since the last time this field was reset. 4417 * 4418 * entries_inserted_counter: Number of entries inserted into the cache 4419 * since the last time this field was reset. 4420 * 4421 * entries relocated_counter: Number of entries whose base address has 4422 * been changed since the last time this field was reset. 4423 * 4424 * entry_fd_height_change_counter: Number of entries whose flush dependency 4425 * height has changed since the last time this field was reset. 4426 * 4427 * The following fields are used assemble the cache image prior to 4428 * writing it to disk. 4429 * 4430 * num_entries_in_image: Unsigned integer field containing the number of entries 4431 * to be copied into the metadata cache image. Note that 4432 * this value will be less than the number of entries in 4433 * the cache, and the superblock and its related entries 4434 * are not written to the metadata cache image. 4435 * 4436 * image_entries: Pointer to a dynamically allocated array of instance of 4437 * H5C_image_entry_t of length num_entries_in_image, or NULL 4438 * if that array does not exist. This array is used to 4439 * assemble entry data to be included in the image, and to 4440 * sort them by flush dependency height and LRU rank. 4441 * 4442 * image_buffer: Pointer to the dynamically allocated buffer of length 4443 * image_len in which the metadata cache image is assembled, 4444 * or NULL if that buffer does not exist. 4445 * 4446 * 4447 * Free Space Manager Related fields: 4448 * 4449 * The free space managers must be informed when we are about to close 4450 * or flush the file so that they order themselves accordingly. This used 4451 * to be done much later in the close process, but with cache image and 4452 * page buffering, this is no longer viable, as we must finalize the on 4453 * disk image of all metadata much sooner. 4454 * 4455 * This is handled by the H5MF_settle_raw_data_fsm() and 4456 * H5MF_settle_meta_data_FSM() routines. As these calls are expensive, 4457 * the following fields are used to track whether the target free space 4458 * managers are clean. 4459 * 4460 * They are also used in sanity checking, as once a free space manager is 4461 * settled, it should not become unsettled (i.e. be asked to allocate or 4462 * free file space) either ever (in the case of a file close) or until the 4463 * flush is complete. 4464 * 4465 * rdfsm_settled: Boolean flag indicating whether the raw data free space 4466 * manager is settled -- i.e. whether the correct space has 4467 * been allocated for it in the file. 4468 * 4469 * Note that the name of this field is deceptive. In the 4470 * multi file case, the flag applies to all free space 4471 * managers that are not involved in allocating space for 4472 * free space manager metadata. 4473 * 4474 * mdfsm_settled: Boolean flag indicating whether the meta data free space 4475 * manager is settled -- i.e. whether the correct space has 4476 * been allocated for it in the file. 4477 * 4478 * Note that the name of this field is deceptive. In the 4479 * multi file case, the flag applies only to free space 4480 * managers that are involved in allocating space for free 4481 * space managers. 4482 * 4483 * 4484 * Statistics collection fields: 4485 * 4486 * When enabled, these fields are used to collect statistics as described 4487 * below. The first set are collected only when H5C_COLLECT_CACHE_STATS 4488 * is true. 4489 * 4490 * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4491 * are used to record the number of times an entry with type id 4492 * equal to the array index has been in cache when requested in 4493 * the current epoch. 4494 * 4495 * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4496 * are used to record the number of times an entry with type id 4497 * equal to the array index has not been in cache when 4498 * requested in the current epoch. 4499 * 4500 * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The 4501 * cells are used to record the number of times an entry with 4502 * type id equal to the array index has been write protected 4503 * in the current epoch. 4504 * 4505 * Observe that (hits + misses) = (write_protects + read_protects). 4506 * 4507 * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The 4508 * cells are used to record the number of times an entry with 4509 * type id equal to the array index has been read protected in 4510 * the current epoch. 4511 * 4512 * Observe that (hits + misses) = (write_protects + read_protects). 4513 * 4514 * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. 4515 * The cells are used to maximum number of simultaneous read 4516 * protects on any entry with type id equal to the array index 4517 * in the current epoch. 4518 * 4519 * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4520 * are used to record the number of times an entry with type 4521 * id equal to the array index has been inserted into the 4522 * cache in the current epoch. 4523 * 4524 * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. 4525 * The cells are used to record the number of times an entry 4526 * with type id equal to the array index has been inserted 4527 * pinned into the cache in the current epoch. 4528 * 4529 * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4530 * are used to record the number of times a dirty entry with type 4531 * id equal to the array index has been cleared in the current 4532 * epoch. 4533 * 4534 * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4535 * are used to record the number of times an entry with type id 4536 * equal to the array index has been written to disk in the 4537 * current epoch. 4538 * 4539 * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4540 * are used to record the number of times an entry with type id 4541 * equal to the array index has been evicted from the cache in 4542 * the current epoch. 4543 * 4544 * take_ownerships: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The 4545 * cells are used to record the number of times an entry with 4546 * type id equal to the array index has been removed from the 4547 * cache via the H5C__TAKE_OWNERSHIP_FLAG in the current epoch. 4548 * 4549 * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4550 * are used to record the number of times an entry with type 4551 * id equal to the array index has been moved in the current 4552 * epoch. 4553 * 4554 * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. 4555 * The cells are used to record the number of times an entry 4556 * with type id equal to the array index has been moved 4557 * during its pre-serialize callback in the current epoch. 4558 * 4559 * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. 4560 * The cells are used to record the number of times an entry 4561 * with type id equal to the array index has been moved 4562 * during a cache flush in the current epoch. 4563 * 4564 * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4565 * are used to record the number of times an entry with type 4566 * id equal to the array index has been pinned in the current 4567 * epoch. 4568 * 4569 * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4570 * are used to record the number of times an entry with type 4571 * id equal to the array index has been unpinned in the current 4572 * epoch. 4573 * 4574 * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4575 * are used to record the number of times an entry with type 4576 * id equal to the array index has been marked dirty while pinned 4577 * in the current epoch. 4578 * 4579 * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The 4580 * cells are used to record the number of times an entry 4581 * with type id equal to the array index has been flushed while 4582 * pinned in the current epoch. 4583 * 4584 * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The 4585 * cells are used to record the number of times an entry 4586 * with type id equal to the array index has been cleared while 4587 * pinned in the current epoch. 4588 * 4589 * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. 4590 * The cells are used to record the number of times an entry 4591 * with type id equal to the array index has increased in 4592 * size in the current epoch. 4593 * 4594 * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. 4595 * The cells are used to record the number of times an entry 4596 * with type id equal to the array index has decreased in 4597 * size in the current epoch. 4598 * 4599 * entry_flush_size_changes: Array of int64 of length 4600 * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record 4601 * the number of times an entry with type id equal to the 4602 * array index has changed size while in its pre-serialize 4603 * callback. 4604 * 4605 * cache_flush_size_changes: Array of int64 of length 4606 * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record 4607 * the number of times an entry with type id equal to the 4608 * array index has changed size during a cache flush 4609 * 4610 * total_ht_insertions: Number of times entries have been inserted into the 4611 * hash table in the current epoch. 4612 * 4613 * total_ht_deletions: Number of times entries have been deleted from the 4614 * hash table in the current epoch. 4615 * 4616 * successful_ht_searches: int64 containing the total number of successful 4617 * searches of the hash table in the current epoch. 4618 * 4619 * total_successful_ht_search_depth: int64 containing the total number of 4620 * entries other than the targets examined in successful 4621 * searches of the hash table in the current epoch. 4622 * 4623 * failed_ht_searches: int64 containing the total number of unsuccessful 4624 * searches of the hash table in the current epoch. 4625 * 4626 * total_failed_ht_search_depth: int64 containing the total number of 4627 * entries examined in unsuccessful searches of the hash 4628 * table in the current epoch. 4629 * 4630 * max_index_len: Largest value attained by the index_len field in the 4631 * current epoch. 4632 * 4633 * max_index_size: Largest value attained by the index_size field in the 4634 * current epoch. 4635 * 4636 * max_clean_index_size: Largest value attained by the clean_index_size field 4637 * in the current epoch. 4638 * 4639 * max_dirty_index_size: Largest value attained by the dirty_index_size field 4640 * in the current epoch. 4641 * 4642 * max_slist_len: Largest value attained by the slist_len field in the 4643 * current epoch. 4644 * 4645 * max_slist_size: Largest value attained by the slist_size field in the 4646 * current epoch. 4647 * 4648 * max_pl_len: Largest value attained by the pl_len field in the 4649 * current epoch. 4650 * 4651 * max_pl_size: Largest value attained by the pl_size field in the 4652 * current epoch. 4653 * 4654 * max_pel_len: Largest value attained by the pel_len field in the 4655 * current epoch. 4656 * 4657 * max_pel_size: Largest value attained by the pel_size field in the 4658 * current epoch. 4659 * 4660 * calls_to_msic: Total number of calls to H5C__make_space_in_cache 4661 * 4662 * total_entries_skipped_in_msic: Number of clean entries skipped while 4663 * enforcing the min_clean_fraction in H5C__make_space_in_cache(). 4664 * 4665 * total_dirty_pf_entries_skipped_in_msic: Number of dirty prefetched entries 4666 * skipped in H5C__make_space_in_cache(). Note that this can 4667 * only occur when a file is opened R/O with a cache image 4668 * containing dirty entries. 4669 * 4670 * total_entries_scanned_in_msic: Number of clean entries skipped while 4671 * enforcing the min_clean_fraction in H5C__make_space_in_cache(). 4672 * 4673 * max_entries_skipped_in_msic: Maximum number of clean entries skipped 4674 * in any one call to H5C__make_space_in_cache(). 4675 * 4676 * max_dirty_pf_entries_skipped_in_msic: Maximum number of dirty prefetched 4677 * entries skipped in any one call to H5C__make_space_in_cache(). 4678 * Note that this can only occur when the file is opened 4679 * R/O with a cache image containing dirty entries. 4680 * 4681 * max_entries_scanned_in_msic: Maximum number of entries scanned over 4682 * in any one call to H5C__make_space_in_cache(). 4683 * 4684 * entries_scanned_to_make_space: Number of entries scanned only when looking 4685 * for entries to evict in order to make space in cache. 4686 * 4687 * 4688 * The following fields track statistics on cache images. 4689 * 4690 * images_created: Integer field containing the number of cache images 4691 * created since the last time statistics were reset. 4692 * 4693 * At present, this field must always be either 0 or 1. 4694 * Further, since cache images are only created at file 4695 * close, this field should only be set at that time. 4696 * 4697 * images_read: Integer field containing the number of cache images 4698 * read from file. Note that reading an image is different 4699 * from loading it -- reading the image means just that, 4700 * while loading the image refers to decoding it and loading 4701 * it into the metadata cache. 4702 * 4703 * In the serial case, image_read should always equal 4704 * images_loaded. However, in the parallel case, the 4705 * image should only be read by process 0. All other 4706 * processes should receive the cache image via a broadcast 4707 * from process 0. 4708 * 4709 * images_loaded: Integer field containing the number of cache images 4710 * loaded since the last time statistics were reset. 4711 * 4712 * At present, this field must always be either 0 or 1. 4713 * Further, since cache images are only loaded at the 4714 * time of the first protect or on file close, this value 4715 * should only change on those events. 4716 * 4717 * last_image_size: Size of the most recently loaded metadata cache image 4718 * loaded into the cache, or zero if no image has been 4719 * loaded. 4720 * 4721 * At present, at most one cache image can be loaded into 4722 * the metadata cache for any given file, and this image 4723 * will be loaded either on the first protect, or on file 4724 * close if no entry is protected before then. 4725 * 4726 * 4727 * Fields for tracking prefetched entries. Note that flushes and evictions 4728 * of prefetched entries are tracked in the flushes and evictions arrays 4729 * discused above. 4730 * 4731 * prefetches: Number of prefetched entries that are loaded to the 4732 * cache. 4733 * 4734 * dirty_prefetches: Number of dirty prefetched entries that are loaded 4735 * into the cache. 4736 * 4737 * prefetch_hits: Number of prefetched entries that are actually used. 4738 * 4739 * 4740 * As entries are now capable of moving, loading, dirtying, and deleting 4741 * other entries in their pre_serialize and serialize callbacks, it has 4742 * been necessary to insert code to restart scans of lists so as to avoid 4743 * improper behavior if the next entry in the list is the target of one on 4744 * these operations. 4745 * 4746 * The following fields are use to count such occurrences. They are used 4747 * both in tests (to verify that the scan has been restarted), and to 4748 * obtain estimates of how frequently these restarts occur. 4749 * 4750 * slist_scan_restarts: Number of times a scan of the slist (that contains 4751 * calls to H5C__flush_single_entry()) has been restarted to 4752 * avoid potential issues with change of status of the next 4753 * entry in the scan. 4754 * 4755 * LRU_scan_restarts: Number of times a scan of the LRU list (that contains 4756 * calls to H5C__flush_single_entry()) has been restarted to 4757 * avoid potential issues with change of status of the next 4758 * entry in the scan. 4759 * 4760 * index_scan_restarts: Number of times a scan of the index has been 4761 * restarted to avoid potential issues with load, insertion 4762 * or change in flush dependency height of an entry other 4763 * than the target entry as the result of call(s) to the 4764 * pre_serialize or serialize callbacks. 4765 * 4766 * Note that at present, this condition can only be triggered 4767 * by a call to H5C_serialize_single_entry(). 4768 * 4769 * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS 4770 * and H5C_COLLECT_CACHE_ENTRY_STATS are true. 4771 * 4772 * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4773 * are used to record the maximum number of times any single 4774 * entry with type id equal to the array index has been 4775 * accessed in the current epoch. 4776 * 4777 * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4778 * are used to record the minimum number of times any single 4779 * entry with type id equal to the array index has been 4780 * accessed in the current epoch. 4781 * 4782 * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4783 * are used to record the maximum number of times any single 4784 * entry with type id equal to the array index has been cleared 4785 * in the current epoch. 4786 * 4787 * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4788 * are used to record the maximum number of times any single 4789 * entry with type id equal to the array index has been 4790 * flushed in the current epoch. 4791 * 4792 * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4793 * are used to record the maximum size of any single entry 4794 * with type id equal to the array index that has resided in 4795 * the cache in the current epoch. 4796 * 4797 * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells 4798 * are used to record the maximum number of times that any single 4799 * entry with type id equal to the array index that has been 4800 * marked as pinned in the cache in the current epoch. 4801 * 4802 * 4803 * Fields supporting testing: 4804 * 4805 * prefix Array of char used to prefix debugging output. The 4806 * field is intended to allow marking of output of with 4807 * the processes mpi rank. 4808 * 4809 * get_entry_ptr_from_addr_counter: Counter used to track the number of 4810 * times the H5C_get_entry_ptr_from_addr() function has been 4811 * called successfully. This field is only defined when 4812 * NDEBUG is not #defined. 4813 * 4814 ****************************************************************************/ 4815 4816 struct H5C_t { 4817 uint32_t magic; 4818 hbool_t flush_in_progress; 4819 H5C_log_info_t *log_info; 4820 void * aux_ptr; 4821 int32_t max_type_id; 4822 const H5C_class_t * const *class_table_ptr; 4823 size_t max_cache_size; 4824 size_t min_clean_size; 4825 H5C_write_permitted_func_t check_write_permitted; 4826 hbool_t write_permitted; 4827 H5C_log_flush_func_t log_flush; 4828 hbool_t evictions_enabled; 4829 hbool_t close_warning_received; 4830 4831 /* Fields for maintaining the [hash table] index of entries */ 4832 uint32_t index_len; 4833 size_t index_size; 4834 uint32_t index_ring_len[H5C_RING_NTYPES]; 4835 size_t index_ring_size[H5C_RING_NTYPES]; 4836 size_t clean_index_size; 4837 size_t clean_index_ring_size[H5C_RING_NTYPES]; 4838 size_t dirty_index_size; 4839 size_t dirty_index_ring_size[H5C_RING_NTYPES]; 4840 H5C_cache_entry_t * index[H5C__HASH_TABLE_LEN]; 4841 uint32_t il_len; 4842 size_t il_size; 4843 H5C_cache_entry_t * il_head; 4844 H5C_cache_entry_t * il_tail; 4845 4846 /* Fields to detect entries removed during scans */ 4847 int64_t entries_removed_counter; 4848 H5C_cache_entry_t * last_entry_removed_ptr; 4849 H5C_cache_entry_t * entry_watched_for_removal; 4850 4851 /* Fields for maintaining list of in-order entries, for flushing */ 4852 hbool_t slist_enabled; 4853 hbool_t slist_changed; 4854 uint32_t slist_len; 4855 size_t slist_size; 4856 uint32_t slist_ring_len[H5C_RING_NTYPES]; 4857 size_t slist_ring_size[H5C_RING_NTYPES]; 4858 H5SL_t * slist_ptr; 4859 uint32_t num_last_entries; 4860 #if H5C_DO_SANITY_CHECKS 4861 int32_t slist_len_increase; 4862 int64_t slist_size_increase; 4863 #endif /* H5C_DO_SANITY_CHECKS */ 4864 4865 /* Fields for maintaining list of tagged entries */ 4866 H5SL_t * tag_list; 4867 hbool_t ignore_tags; 4868 uint32_t num_objs_corked; 4869 4870 /* Fields for tracking protected entries */ 4871 uint32_t pl_len; 4872 size_t pl_size; 4873 H5C_cache_entry_t * pl_head_ptr; 4874 H5C_cache_entry_t * pl_tail_ptr; 4875 4876 /* Fields for tracking pinned entries */ 4877 uint32_t pel_len; 4878 size_t pel_size; 4879 H5C_cache_entry_t * pel_head_ptr; 4880 H5C_cache_entry_t * pel_tail_ptr; 4881 4882 /* Fields for complete LRU list of entries */ 4883 uint32_t LRU_list_len; 4884 size_t LRU_list_size; 4885 H5C_cache_entry_t * LRU_head_ptr; 4886 H5C_cache_entry_t * LRU_tail_ptr; 4887 4888 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 4889 /* Fields for clean LRU list of entries */ 4890 uint32_t cLRU_list_len; 4891 size_t cLRU_list_size; 4892 H5C_cache_entry_t * cLRU_head_ptr; 4893 H5C_cache_entry_t * cLRU_tail_ptr; 4894 4895 /* Fields for dirty LRU list of entries */ 4896 uint32_t dLRU_list_len; 4897 size_t dLRU_list_size; 4898 H5C_cache_entry_t * dLRU_head_ptr; 4899 H5C_cache_entry_t * dLRU_tail_ptr; 4900 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ 4901 4902 #ifdef H5_HAVE_PARALLEL 4903 /* Fields for collective metadata reads */ 4904 uint32_t coll_list_len; 4905 size_t coll_list_size; 4906 H5C_cache_entry_t * coll_head_ptr; 4907 H5C_cache_entry_t * coll_tail_ptr; 4908 4909 /* Fields for collective metadata writes */ 4910 H5SL_t * coll_write_list; 4911 #endif /* H5_HAVE_PARALLEL */ 4912 4913 /* Fields for automatic cache size adjustment */ 4914 hbool_t size_increase_possible; 4915 hbool_t flash_size_increase_possible; 4916 size_t flash_size_increase_threshold; 4917 hbool_t size_decrease_possible; 4918 hbool_t resize_enabled; 4919 hbool_t cache_full; 4920 hbool_t size_decreased; 4921 hbool_t resize_in_progress; 4922 hbool_t msic_in_progress; 4923 H5C_auto_size_ctl_t resize_ctl; 4924 4925 /* Fields for epoch markers used in automatic cache size adjustment */ 4926 int32_t epoch_markers_active; 4927 hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS]; 4928 int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1]; 4929 int32_t epoch_marker_ringbuf_first; 4930 int32_t epoch_marker_ringbuf_last; 4931 int32_t epoch_marker_ringbuf_size; 4932 H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS]; 4933 4934 /* Fields for cache hit rate collection */ 4935 int64_t cache_hits; 4936 int64_t cache_accesses; 4937 4938 /* fields supporting generation of a cache image on file close */ 4939 H5C_cache_image_ctl_t image_ctl; 4940 hbool_t serialization_in_progress; 4941 hbool_t load_image; 4942 hbool_t image_loaded; 4943 hbool_t delete_image; 4944 haddr_t image_addr; 4945 hsize_t image_len; 4946 hsize_t image_data_len; 4947 int64_t entries_loaded_counter; 4948 int64_t entries_inserted_counter; 4949 int64_t entries_relocated_counter; 4950 int64_t entry_fd_height_change_counter; 4951 uint32_t num_entries_in_image; 4952 H5C_image_entry_t * image_entries; 4953 void * image_buffer; 4954 4955 /* Free Space Manager Related fields */ 4956 hbool_t rdfsm_settled; 4957 hbool_t mdfsm_settled; 4958 4959 #if H5C_COLLECT_CACHE_STATS 4960 /* stats fields */ 4961 int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1]; 4962 int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1]; 4963 int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1]; 4964 int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; 4965 int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; 4966 int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1]; 4967 int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1]; 4968 int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1]; 4969 int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1]; 4970 int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1]; 4971 int64_t take_ownerships[H5C__MAX_NUM_TYPE_IDS + 1]; 4972 int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1]; 4973 int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1]; 4974 int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1]; 4975 int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1]; 4976 int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1]; 4977 int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1]; 4978 int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; 4979 int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1]; 4980 int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1]; 4981 int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1]; 4982 int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1]; 4983 int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1]; 4984 4985 /* Fields for hash table operations */ 4986 int64_t total_ht_insertions; 4987 int64_t total_ht_deletions; 4988 int64_t successful_ht_searches; 4989 int64_t total_successful_ht_search_depth; 4990 int64_t failed_ht_searches; 4991 int64_t total_failed_ht_search_depth; 4992 uint32_t max_index_len; 4993 size_t max_index_size; 4994 size_t max_clean_index_size; 4995 size_t max_dirty_index_size; 4996 4997 /* Fields for in-order skip list */ 4998 uint32_t max_slist_len; 4999 size_t max_slist_size; 5000 5001 /* Fields for protected entry list */ 5002 uint32_t max_pl_len; 5003 size_t max_pl_size; 5004 5005 /* Fields for pinned entry list */ 5006 uint32_t max_pel_len; 5007 size_t max_pel_size; 5008 5009 /* Fields for tracking 'make space in cache' (msic) operations */ 5010 int64_t calls_to_msic; 5011 int64_t total_entries_skipped_in_msic; 5012 int64_t total_dirty_pf_entries_skipped_in_msic; 5013 int64_t total_entries_scanned_in_msic; 5014 int32_t max_entries_skipped_in_msic; 5015 int32_t max_dirty_pf_entries_skipped_in_msic; 5016 int32_t max_entries_scanned_in_msic; 5017 int64_t entries_scanned_to_make_space; 5018 5019 /* Fields for tracking skip list scan restarts */ 5020 int64_t slist_scan_restarts; 5021 int64_t LRU_scan_restarts; 5022 int64_t index_scan_restarts; 5023 5024 /* Fields for tracking cache image operations */ 5025 int32_t images_created; 5026 int32_t images_read; 5027 int32_t images_loaded; 5028 hsize_t last_image_size; 5029 5030 /* Fields for tracking prefetched entries */ 5031 int64_t prefetches; 5032 int64_t dirty_prefetches; 5033 int64_t prefetch_hits; 5034 5035 #if H5C_COLLECT_CACHE_ENTRY_STATS 5036 int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; 5037 int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; 5038 int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1]; 5039 int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; 5040 size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1]; 5041 int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1]; 5042 #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ 5043 #endif /* H5C_COLLECT_CACHE_STATS */ 5044 5045 char prefix[H5C__PREFIX_LEN]; 5046 5047 #ifndef NDEBUG 5048 int64_t get_entry_ptr_from_addr_counter; 5049 #endif /* NDEBUG */ 5050 5051 }; /* H5C_t */ 5052 5053 /* Define typedef for tagged cache entry iteration callbacks */ 5054 typedef int (*H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx); 5055 5056 5057 /*****************************/ 5058 /* Package Private Variables */ 5059 /*****************************/ 5060 5061 5062 /******************************/ 5063 /* Package Private Prototypes */ 5064 /******************************/ 5065 H5_DLL herr_t H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated); 5066 H5_DLL herr_t H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t * cache_ptr, 5067 H5C_cache_entry_t** entry_ptr_ptr, const H5C_class_t * type, haddr_t addr, 5068 void * udata); 5069 5070 /* General routines */ 5071 H5_DLL herr_t H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, 5072 unsigned flags); 5073 H5_DLL herr_t H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr); 5074 H5_DLL herr_t H5C__load_cache_image(H5F_t *f); 5075 H5_DLL herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr); 5076 H5_DLL herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr); 5077 H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, size_t space_needed, 5078 hbool_t write_permitted); 5079 H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f); 5080 H5_DLL herr_t H5C__serialize_cache(H5F_t *f); 5081 H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global, 5082 H5C_tag_iter_cb_t cb, void *cb_ctx); 5083 5084 /* Routines for operating on entry tags */ 5085 H5_DLL herr_t H5C__tag_entry(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr); 5086 H5_DLL herr_t H5C__untag_entry(H5C_t *cache, H5C_cache_entry_t *entry); 5087 5088 /* Testing functions */ 5089 #ifdef H5C_TESTING 5090 H5_DLL herr_t H5C__verify_cork_tag_test(hid_t fid, H5O_token_t tag_token, hbool_t status); 5091 #endif /* H5C_TESTING */ 5092 5093 #endif /* H5Cpkg_H */ 5094 /* clang-format on */ 5095