1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  * Copyright by The HDF Group.                                               *
3  * Copyright by the Board of Trustees of the University of Illinois.         *
4  * All rights reserved.                                                      *
5  *                                                                           *
6  * This file is part of HDF5.  The full HDF5 copyright notice, including     *
7  * terms governing use, modification, and redistribution, is contained in    *
8  * the COPYING file, which can be found at the root of the source code       *
9  * distribution tree, or in https://www.hdfgroup.org/licenses.               *
10  * If you do not have access to either file, you may request a copy from     *
11  * help@hdfgroup.org.                                                        *
12  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
13 
14 /*-------------------------------------------------------------------------
15  *
16  * Created:     H5C.c
17  *              June 1 2004
18  *              John Mainzer
19  *
20  * Purpose:     Functions in this file implement a generic cache for
21  *              things which exist on disk, and which may be
22  *              unambiguously referenced by their disk addresses.
23  *
24  *        For a detailed overview of the cache, please see the
25  *        header comment for H5C_t in H5Cpkg.h.
26  *
27  *-------------------------------------------------------------------------
28  */
29 
30 /**************************************************************************
31  *
32  *                To Do:
33  *
34  *    Code Changes:
35  *
36  *     - Change protect/unprotect to lock/unlock.
37  *
38  *     - Flush entries in increasing address order in
39  *       H5C__make_space_in_cache().
40  *
41  *     - Also in H5C__make_space_in_cache(), use high and low water marks
42  *       to reduce the number of I/O calls.
43  *
44  *     - When flushing, attempt to combine contiguous entries to reduce
45  *       I/O overhead.  Can't do this just yet as some entries are not
46  *       contiguous.  Do this in parallel only or in serial as well?
47  *
48  *     - Fix nodes in memory to point directly to the skip list node from
49  *         the LRU list, eliminating skip list lookups when evicting objects
50  *         from the cache.
51  *
52  **************************************************************************/
53 
54 /****************/
55 /* Module Setup */
56 /****************/
57 
58 #include "H5Cmodule.h" /* This source code file is part of the H5C module */
59 #define H5F_FRIEND     /* suppress error about including H5Fpkg  */
60 
61 /***********/
62 /* Headers */
63 /***********/
64 #include "H5private.h"   /* Generic Functions */
65 #include "H5Cpkg.h"      /* Cache */
66 #include "H5CXprivate.h" /* API Contexts */
67 #include "H5Eprivate.h"  /* Error handling */
68 #include "H5Fpkg.h"      /* Files */
69 #include "H5FLprivate.h" /* Free Lists */
70 #include "H5Iprivate.h"  /* IDs */
71 #include "H5MFprivate.h" /* File memory management */
72 #include "H5MMprivate.h" /* Memory management */
73 #include "H5Pprivate.h"  /* Property lists */
74 
75 /****************/
76 /* Local Macros */
77 /****************/
78 #if H5C_DO_MEMORY_SANITY_CHECKS
79 #define H5C_IMAGE_EXTRA_SPACE  8
80 #define H5C_IMAGE_SANITY_VALUE "DeadBeef"
81 #else /* H5C_DO_MEMORY_SANITY_CHECKS */
82 #define H5C_IMAGE_EXTRA_SPACE 0
83 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
84 
85 /******************/
86 /* Local Typedefs */
87 /******************/
88 
89 /* Alias for pointer to cache entry, for use when allocating sequences of them */
90 typedef H5C_cache_entry_t *H5C_cache_entry_ptr_t;
91 
92 /********************/
93 /* Local Prototypes */
94 /********************/
95 
96 static herr_t H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
97 
98 static herr_t H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp);
99 
100 static herr_t H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp);
101 
102 static herr_t H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted);
103 
104 static herr_t H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr,
105                                       size_t *new_max_cache_size_ptr, hbool_t write_permitted);
106 
107 static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr);
108 
109 static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted);
110 
111 static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr);
112 
113 static herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr);
114 
115 static herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr);
116 
117 static herr_t H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size);
118 
119 static herr_t H5C__flush_invalidate_cache(H5F_t *f, unsigned flags);
120 
121 static herr_t H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
122 
123 static herr_t H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
124 
125 static void *H5C__load_entry(H5F_t *f,
126 #ifdef H5_HAVE_PARALLEL
127                              hbool_t coll_access,
128 #endif /* H5_HAVE_PARALLEL */
129                              const H5C_class_t *type, haddr_t addr, void *udata);
130 
131 static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry);
132 
133 static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry);
134 
135 static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring);
136 static herr_t H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
137 static herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
138 static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len,
139                                   hbool_t actual);
140 
141 #if H5C_DO_SLIST_SANITY_CHECKS
142 static hbool_t H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr);
143 #endif /* H5C_DO_SLIST_SANITY_CHECKS */
144 
145 #if H5C_DO_EXTREME_SANITY_CHECKS
146 static herr_t H5C__validate_lru_list(H5C_t *cache_ptr);
147 static herr_t H5C__validate_pinned_entry_list(H5C_t *cache_ptr);
148 static herr_t H5C__validate_protected_entry_list(H5C_t *cache_ptr);
149 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
150 
151 #ifndef NDEBUG
152 static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry,
153                                           const H5C_cache_entry_t *base_entry);
154 #endif /* NDEBUG */
155 
156 /*********************/
157 /* Package Variables */
158 /*********************/
159 
160 /* Package initialization variable */
161 hbool_t H5_PKG_INIT_VAR = FALSE;
162 
163 /* Declare a free list to manage the tag info struct */
164 H5FL_DEFINE(H5C_tag_info_t);
165 
166 /*****************************/
167 /* Library Private Variables */
168 /*****************************/
169 
170 /*******************/
171 /* Local Variables */
172 /*******************/
173 
174 /* Declare a free list to manage the H5C_t struct */
175 H5FL_DEFINE_STATIC(H5C_t);
176 
177 /* Declare a free list to manage arrays of cache entries */
178 H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t);
179 
180 /*-------------------------------------------------------------------------
181  * Function:    H5C_create
182  *
183  * Purpose:     Allocate, initialize, and return the address of a new
184  *        instance of H5C_t.
185  *
186  *        In general, the max_cache_size parameter must be positive,
187  *        and the min_clean_size parameter must lie in the closed
188  *        interval [0, max_cache_size].
189  *
190  *        The check_write_permitted parameter must either be NULL,
191  *        or point to a function of type H5C_write_permitted_func_t.
192  *        If it is NULL, the cache will use the write_permitted
193  *        flag to determine whether writes are permitted.
194  *
195  * Return:      Success:        Pointer to the new instance.
196  *
197  *              Failure:        NULL
198  *
199  * Programmer:  John Mainzer
200  *              6/2/04
201  *
202  * Modifications:
203  *
204  *              JRM -- 7/20/04
205  *              Updated for the addition of the hash table.
206  *
207  *              JRM -- 10/5/04
208  *              Added call to H5C_reset_cache_hit_rate_stats().  Also
209  *              added initialization for cache_is_full flag and for
210  *              resize_ctl.
211  *
212  *              JRM -- 11/12/04
213  *              Added initialization for the new size_decreased field.
214  *
215  *              JRM -- 11/17/04
216  *              Added/updated initialization for the automatic cache
217  *              size control data structures.
218  *
219  *              JRM -- 6/24/05
220  *              Added support for the new write_permitted field of
221  *              the H5C_t structure.
222  *
223  *              JRM -- 7/5/05
224  *              Added the new log_flush parameter and supporting code.
225  *
226  *              JRM -- 9/21/05
227  *              Added the new aux_ptr parameter and supporting code.
228  *
229  *              JRM -- 1/20/06
230  *              Added initialization of the new prefix field in H5C_t.
231  *
232  *              JRM -- 3/16/06
233  *              Added initialization for the pinned entry related fields.
234  *
235  *              JRM -- 5/31/06
236  *              Added initialization for the trace_file_ptr field.
237  *
238  *              JRM -- 8/19/06
239  *              Added initialization for the flush_in_progress field.
240  *
241  *              JRM -- 8/25/06
242  *              Added initialization for the slist_len_increase and
243  *              slist_size_increase fields.  These fields are used
244  *              for sanity checking in the flush process, and are not
245  *              compiled in unless H5C_DO_SANITY_CHECKS is TRUE.
246  *
247  *              JRM -- 3/28/07
248  *              Added initialization for the new is_read_only and
249  *              ro_ref_count fields.
250  *
251  *              JRM -- 7/27/07
252  *              Added initialization for the new evictions_enabled
253  *              field of H5C_t.
254  *
255  *              JRM -- 12/31/07
256  *              Added initialization for the new flash cache size increase
257  *              related fields of H5C_t.
258  *
259  *              JRM -- 11/5/08
260  *              Added initialization for the new clean_index_size and
261  *              dirty_index_size fields of H5C_t.
262  *
263  *
264  *              Missing entries?
265  *
266  *
267  *              JRM -- 4/20/20
268  *              Added initialization for the slist_enabled field.  Recall
269  *              that the slist is used to flush metadata cache entries
270  *              in (roughly) increasing address order.  While this is
271  *              needed at flush and close, it is not used elsewhere.
272  *              The slist_enabled field exists to allow us to construct
273  *              the slist when needed, and leave it empty otherwise -- thus
274  *              avoiding the overhead of maintaining it.
275  *
276  *                                               JRM -- 4/29/20
277  *
278  *-------------------------------------------------------------------------
279  */
280 H5C_t *
H5C_create(size_t max_cache_size,size_t min_clean_size,int max_type_id,const H5C_class_t * const * class_table_ptr,H5C_write_permitted_func_t check_write_permitted,hbool_t write_permitted,H5C_log_flush_func_t log_flush,void * aux_ptr)281 H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id,
282            const H5C_class_t *const *class_table_ptr, H5C_write_permitted_func_t check_write_permitted,
283            hbool_t write_permitted, H5C_log_flush_func_t log_flush, void *aux_ptr)
284 {
285     int    i;
286     H5C_t *cache_ptr = NULL;
287     H5C_t *ret_value = NULL; /* Return value */
288 
289     FUNC_ENTER_NOAPI(NULL)
290 
291     HDassert(max_cache_size >= H5C__MIN_MAX_CACHE_SIZE);
292     HDassert(max_cache_size <= H5C__MAX_MAX_CACHE_SIZE);
293     HDassert(min_clean_size <= max_cache_size);
294 
295     HDassert(max_type_id >= 0);
296     HDassert(max_type_id < H5C__MAX_NUM_TYPE_IDS);
297     HDassert(class_table_ptr);
298 
299     for (i = 0; i <= max_type_id; i++) {
300         HDassert((class_table_ptr)[i]);
301         HDassert(HDstrlen((class_table_ptr)[i]->name) > 0);
302     } /* end for */
303 
304     if (NULL == (cache_ptr = H5FL_CALLOC(H5C_t)))
305         HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
306 
307     if (NULL == (cache_ptr->slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
308         HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list")
309 
310     if (NULL == (cache_ptr->tag_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
311         HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list for tagged entry addresses")
312 
313     /* If we get this far, we should succeed.  Go ahead and initialize all
314      * the fields.
315      */
316 
317     cache_ptr->magic = H5C__H5C_T_MAGIC;
318 
319     cache_ptr->flush_in_progress = FALSE;
320 
321     if (NULL == (cache_ptr->log_info = (H5C_log_info_t *)H5MM_calloc(sizeof(H5C_log_info_t))))
322         HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed")
323 
324     cache_ptr->aux_ptr = aux_ptr;
325 
326     cache_ptr->max_type_id = max_type_id;
327 
328     cache_ptr->class_table_ptr = class_table_ptr;
329 
330     cache_ptr->max_cache_size = max_cache_size;
331     cache_ptr->min_clean_size = min_clean_size;
332 
333     cache_ptr->check_write_permitted = check_write_permitted;
334     cache_ptr->write_permitted       = write_permitted;
335 
336     cache_ptr->log_flush = log_flush;
337 
338     cache_ptr->evictions_enabled      = TRUE;
339     cache_ptr->close_warning_received = FALSE;
340 
341     cache_ptr->index_len        = 0;
342     cache_ptr->index_size       = (size_t)0;
343     cache_ptr->clean_index_size = (size_t)0;
344     cache_ptr->dirty_index_size = (size_t)0;
345 
346     for (i = 0; i < H5C_RING_NTYPES; i++) {
347         cache_ptr->index_ring_len[i]        = 0;
348         cache_ptr->index_ring_size[i]       = (size_t)0;
349         cache_ptr->clean_index_ring_size[i] = (size_t)0;
350         cache_ptr->dirty_index_ring_size[i] = (size_t)0;
351 
352         cache_ptr->slist_ring_len[i]  = 0;
353         cache_ptr->slist_ring_size[i] = (size_t)0;
354     } /* end for */
355 
356     for (i = 0; i < H5C__HASH_TABLE_LEN; i++)
357         (cache_ptr->index)[i] = NULL;
358 
359     cache_ptr->il_len  = 0;
360     cache_ptr->il_size = (size_t)0;
361     cache_ptr->il_head = NULL;
362     cache_ptr->il_tail = NULL;
363 
364     /* Tagging Field Initializations */
365     cache_ptr->ignore_tags     = FALSE;
366     cache_ptr->num_objs_corked = 0;
367 
368     /* slist field initializations */
369     cache_ptr->slist_enabled = !H5C__SLIST_OPT_ENABLED;
370     cache_ptr->slist_changed = FALSE;
371     cache_ptr->slist_len     = 0;
372     cache_ptr->slist_size    = (size_t)0;
373 
374     /* slist_ring_len, slist_ring_size, and
375      * slist_ptr initializaed above.
376      */
377 
378 #if H5C_DO_SANITY_CHECKS
379     cache_ptr->slist_len_increase  = 0;
380     cache_ptr->slist_size_increase = 0;
381 #endif /* H5C_DO_SANITY_CHECKS */
382 
383     cache_ptr->entries_removed_counter   = 0;
384     cache_ptr->last_entry_removed_ptr    = NULL;
385     cache_ptr->entry_watched_for_removal = NULL;
386 
387     cache_ptr->pl_len      = 0;
388     cache_ptr->pl_size     = (size_t)0;
389     cache_ptr->pl_head_ptr = NULL;
390     cache_ptr->pl_tail_ptr = NULL;
391 
392     cache_ptr->pel_len      = 0;
393     cache_ptr->pel_size     = (size_t)0;
394     cache_ptr->pel_head_ptr = NULL;
395     cache_ptr->pel_tail_ptr = NULL;
396 
397     cache_ptr->LRU_list_len  = 0;
398     cache_ptr->LRU_list_size = (size_t)0;
399     cache_ptr->LRU_head_ptr  = NULL;
400     cache_ptr->LRU_tail_ptr  = NULL;
401 
402 #ifdef H5_HAVE_PARALLEL
403     cache_ptr->coll_list_len   = 0;
404     cache_ptr->coll_list_size  = (size_t)0;
405     cache_ptr->coll_head_ptr   = NULL;
406     cache_ptr->coll_tail_ptr   = NULL;
407     cache_ptr->coll_write_list = NULL;
408 #endif /* H5_HAVE_PARALLEL */
409 
410 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
411     cache_ptr->cLRU_list_len  = 0;
412     cache_ptr->cLRU_list_size = (size_t)0;
413     cache_ptr->cLRU_head_ptr  = NULL;
414     cache_ptr->cLRU_tail_ptr  = NULL;
415 
416     cache_ptr->dLRU_list_len  = 0;
417     cache_ptr->dLRU_list_size = (size_t)0;
418     cache_ptr->dLRU_head_ptr  = NULL;
419     cache_ptr->dLRU_tail_ptr  = NULL;
420 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
421 
422     cache_ptr->size_increase_possible        = FALSE;
423     cache_ptr->flash_size_increase_possible  = FALSE;
424     cache_ptr->flash_size_increase_threshold = 0;
425     cache_ptr->size_decrease_possible        = FALSE;
426     cache_ptr->resize_enabled                = FALSE;
427     cache_ptr->cache_full                    = FALSE;
428     cache_ptr->size_decreased                = FALSE;
429     cache_ptr->resize_in_progress            = FALSE;
430     cache_ptr->msic_in_progress              = FALSE;
431 
432     (cache_ptr->resize_ctl).version            = H5C__CURR_AUTO_SIZE_CTL_VER;
433     (cache_ptr->resize_ctl).rpt_fcn            = NULL;
434     (cache_ptr->resize_ctl).set_initial_size   = FALSE;
435     (cache_ptr->resize_ctl).initial_size       = H5C__DEF_AR_INIT_SIZE;
436     (cache_ptr->resize_ctl).min_clean_fraction = H5C__DEF_AR_MIN_CLEAN_FRAC;
437     (cache_ptr->resize_ctl).max_size           = H5C__DEF_AR_MAX_SIZE;
438     (cache_ptr->resize_ctl).min_size           = H5C__DEF_AR_MIN_SIZE;
439     (cache_ptr->resize_ctl).epoch_length       = H5C__DEF_AR_EPOCH_LENGTH;
440 
441     (cache_ptr->resize_ctl).incr_mode           = H5C_incr__off;
442     (cache_ptr->resize_ctl).lower_hr_threshold  = H5C__DEF_AR_LOWER_THRESHHOLD;
443     (cache_ptr->resize_ctl).increment           = H5C__DEF_AR_INCREMENT;
444     (cache_ptr->resize_ctl).apply_max_increment = TRUE;
445     (cache_ptr->resize_ctl).max_increment       = H5C__DEF_AR_MAX_INCREMENT;
446 
447     (cache_ptr->resize_ctl).flash_incr_mode = H5C_flash_incr__off;
448     (cache_ptr->resize_ctl).flash_multiple  = 1.0;
449     (cache_ptr->resize_ctl).flash_threshold = 0.25;
450 
451     (cache_ptr->resize_ctl).decr_mode              = H5C_decr__off;
452     (cache_ptr->resize_ctl).upper_hr_threshold     = H5C__DEF_AR_UPPER_THRESHHOLD;
453     (cache_ptr->resize_ctl).decrement              = H5C__DEF_AR_DECREMENT;
454     (cache_ptr->resize_ctl).apply_max_decrement    = TRUE;
455     (cache_ptr->resize_ctl).max_decrement          = H5C__DEF_AR_MAX_DECREMENT;
456     (cache_ptr->resize_ctl).epochs_before_eviction = H5C__DEF_AR_EPCHS_B4_EVICT;
457     (cache_ptr->resize_ctl).apply_empty_reserve    = TRUE;
458     (cache_ptr->resize_ctl).empty_reserve          = H5C__DEF_AR_EMPTY_RESERVE;
459 
460     cache_ptr->epoch_markers_active = 0;
461 
462     /* no need to initialize the ring buffer itself */
463     cache_ptr->epoch_marker_ringbuf_first = 1;
464     cache_ptr->epoch_marker_ringbuf_last  = 0;
465     cache_ptr->epoch_marker_ringbuf_size  = 0;
466 
467     /* Initialize all epoch marker entries' fields to zero/FALSE/NULL */
468     HDmemset(cache_ptr->epoch_markers, 0, sizeof(cache_ptr->epoch_markers));
469 
470     /* Set non-zero/FALSE/NULL fields for epoch markers */
471     for (i = 0; i < H5C__MAX_EPOCH_MARKERS; i++) {
472         ((cache_ptr->epoch_markers)[i]).magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
473         ((cache_ptr->epoch_markers)[i]).addr  = (haddr_t)i;
474         ((cache_ptr->epoch_markers)[i]).type  = H5AC_EPOCH_MARKER;
475     }
476 
477     /* Initialize cache image generation on file close related fields.
478      * Initial value of image_ctl must match H5C__DEFAULT_CACHE_IMAGE_CTL
479      * in H5Cprivate.h.
480      */
481     cache_ptr->image_ctl.version            = H5C__CURR_CACHE_IMAGE_CTL_VER;
482     cache_ptr->image_ctl.generate_image     = FALSE;
483     cache_ptr->image_ctl.save_resize_status = FALSE;
484     cache_ptr->image_ctl.entry_ageout       = -1;
485     cache_ptr->image_ctl.flags              = H5C_CI__ALL_FLAGS;
486 
487     cache_ptr->serialization_in_progress = FALSE;
488     cache_ptr->load_image                = FALSE;
489     cache_ptr->image_loaded              = FALSE;
490     cache_ptr->delete_image              = FALSE;
491     cache_ptr->image_addr                = HADDR_UNDEF;
492     cache_ptr->image_len                 = 0;
493     cache_ptr->image_data_len            = 0;
494 
495     cache_ptr->entries_loaded_counter         = 0;
496     cache_ptr->entries_inserted_counter       = 0;
497     cache_ptr->entries_relocated_counter      = 0;
498     cache_ptr->entry_fd_height_change_counter = 0;
499 
500     cache_ptr->num_entries_in_image = 0;
501     cache_ptr->image_entries        = NULL;
502     cache_ptr->image_buffer         = NULL;
503 
504     /* initialize free space manager related fields: */
505     cache_ptr->rdfsm_settled = FALSE;
506     cache_ptr->mdfsm_settled = FALSE;
507 
508     if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
509         /* this should be impossible... */
510         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "H5C_reset_cache_hit_rate_stats failed")
511 
512     H5C_stats__reset(cache_ptr);
513 
514     cache_ptr->prefix[0] = '\0'; /* empty string */
515 
516 #ifndef NDEBUG
517     cache_ptr->get_entry_ptr_from_addr_counter = 0;
518 #endif /* NDEBUG */
519 
520     /* Set return value */
521     ret_value = cache_ptr;
522 
523 done:
524     if (NULL == ret_value) {
525         if (cache_ptr != NULL) {
526             if (cache_ptr->slist_ptr != NULL)
527                 H5SL_close(cache_ptr->slist_ptr);
528 
529             if (cache_ptr->tag_list != NULL)
530                 H5SL_close(cache_ptr->tag_list);
531 
532             if (cache_ptr->log_info != NULL)
533                 H5MM_xfree(cache_ptr->log_info);
534 
535             cache_ptr->magic = 0;
536             cache_ptr        = H5FL_FREE(H5C_t, cache_ptr);
537         } /* end if */
538     }     /* end if */
539 
540     FUNC_LEAVE_NOAPI(ret_value)
541 } /* H5C_create() */
542 
543 /*-------------------------------------------------------------------------
544  * Function:    H5C_def_auto_resize_rpt_fcn
545  *
546  * Purpose:     Print results of a automatic cache resize.
547  *
548  *        This function should only be used where HDprintf() behaves
549  *        well -- i.e. not on Windows.
550  *
551  * Return:      void
552  *
553  * Programmer:  John Mainzer
554  *        10/27/04
555  *
556  *-------------------------------------------------------------------------
557  */
558 void
H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,int32_t version,double hit_rate,enum H5C_resize_status status,size_t old_max_cache_size,size_t new_max_cache_size,size_t old_min_clean_size,size_t new_min_clean_size)559 H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr,
560 #ifndef NDEBUG
561                             int32_t version,
562 #else  /* NDEBUG */
563                             int32_t H5_ATTR_UNUSED version,
564 #endif /* NDEBUG */
565                             double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size,
566                             size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size)
567 {
568     HDassert(cache_ptr != NULL);
569     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
570     HDassert(version == H5C__CURR_AUTO_RESIZE_RPT_FCN_VER);
571 
572     switch (status) {
573         case in_spec:
574             HDfprintf(stdout, "%sAuto cache resize -- no change. (hit rate = %lf)\n", cache_ptr->prefix,
575                       hit_rate);
576             break;
577 
578         case increase:
579             HDassert(hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold);
580             HDassert(old_max_cache_size < new_max_cache_size);
581 
582             HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
583                       cache_ptr->prefix, hit_rate, (cache_ptr->resize_ctl).lower_hr_threshold);
584 
585             HDfprintf(stdout, "%scache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix,
586                       old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size);
587             break;
588 
589         case flash_increase:
590             HDassert(old_max_cache_size < new_max_cache_size);
591 
592             HDfprintf(stdout, "%sflash cache resize(%d) -- size threshold = %zu.\n", cache_ptr->prefix,
593                       (int)((cache_ptr->resize_ctl).flash_incr_mode),
594                       cache_ptr->flash_size_increase_threshold);
595 
596             HDfprintf(stdout, "%s cache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix,
597                       old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size);
598             break;
599 
600         case decrease:
601             HDassert(old_max_cache_size > new_max_cache_size);
602 
603             switch ((cache_ptr->resize_ctl).decr_mode) {
604                 case H5C_decr__off:
605                     HDfprintf(stdout, "%sAuto cache resize -- decrease off.  HR = %lf\n", cache_ptr->prefix,
606                               hit_rate);
607                     break;
608 
609                 case H5C_decr__threshold:
610                     HDassert(hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold);
611 
612                     HDfprintf(stdout, "%sAuto cache resize -- decrease by threshold.  HR = %lf > %6.5lf\n",
613                               cache_ptr->prefix, hit_rate, (cache_ptr->resize_ctl).upper_hr_threshold);
614 
615                     HDfprintf(stdout, "%sout of bounds high (%6.5lf).\n", cache_ptr->prefix,
616                               (cache_ptr->resize_ctl).upper_hr_threshold);
617                     break;
618 
619                 case H5C_decr__age_out:
620                     HDfprintf(stdout, "%sAuto cache resize -- decrease by ageout.  HR = %lf\n",
621                               cache_ptr->prefix, hit_rate);
622                     break;
623 
624                 case H5C_decr__age_out_with_threshold:
625                     HDassert(hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold);
626 
627                     HDfprintf(stdout,
628                               "%sAuto cache resize -- decrease by ageout with threshold. HR = %lf > %6.5lf\n",
629                               cache_ptr->prefix, hit_rate, (cache_ptr->resize_ctl).upper_hr_threshold);
630                     break;
631 
632                 default:
633                     HDfprintf(stdout, "%sAuto cache resize -- decrease by unknown mode.  HR = %lf\n",
634                               cache_ptr->prefix, hit_rate);
635             }
636 
637             HDfprintf(stdout, "%s    cache size decreased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix,
638                       old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size);
639             break;
640 
641         case at_max_size:
642             HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
643                       cache_ptr->prefix, hit_rate, (cache_ptr->resize_ctl).lower_hr_threshold);
644             HDfprintf(stdout, "%s    cache already at maximum size so no change.\n", cache_ptr->prefix);
645             break;
646 
647         case at_min_size:
648             HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n", cache_ptr->prefix,
649                       hit_rate);
650             HDfprintf(stdout, "%s    cache already at minimum size.\n", cache_ptr->prefix);
651             break;
652 
653         case increase_disabled:
654             HDfprintf(stdout, "%sAuto cache resize -- increase disabled -- HR = %lf.", cache_ptr->prefix,
655                       hit_rate);
656             break;
657 
658         case decrease_disabled:
659             HDfprintf(stdout, "%sAuto cache resize -- decrease disabled -- HR = %lf.\n", cache_ptr->prefix,
660                       hit_rate);
661             break;
662 
663         case not_full:
664             HDassert(hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold);
665 
666             HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
667                       cache_ptr->prefix, hit_rate, (cache_ptr->resize_ctl).lower_hr_threshold);
668             HDfprintf(stdout, "%s    cache not full so no increase in size.\n", cache_ptr->prefix);
669             break;
670 
671         default:
672             HDfprintf(stdout, "%sAuto cache resize -- unknown status code.\n", cache_ptr->prefix);
673             break;
674     }
675 } /* H5C_def_auto_resize_rpt_fcn() */
676 
677 /*-------------------------------------------------------------------------
678  * Function:    H5C__free_tag_list_cb
679  *
680  * Purpose:     Callback function to free tag nodes from the skip list.
681  *
682  * Return:      Non-negative on success/Negative on failure
683  *
684  * Programmer:  Vailin Choi
685  *        January 2014
686  *
687  *-------------------------------------------------------------------------
688  */
689 static herr_t
H5C__free_tag_list_cb(void * _item,void H5_ATTR_UNUSED * key,void H5_ATTR_UNUSED * op_data)690 H5C__free_tag_list_cb(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *op_data)
691 {
692     H5C_tag_info_t *tag_info = (H5C_tag_info_t *)_item;
693 
694     FUNC_ENTER_STATIC_NOERR
695 
696     HDassert(tag_info);
697 
698     /* Release the item */
699     tag_info = H5FL_FREE(H5C_tag_info_t, tag_info);
700 
701     FUNC_LEAVE_NOAPI(0)
702 } /* H5C__free_tag_list_cb() */
703 
704 /*-------------------------------------------------------------------------
705  *
706  * Function:    H5C_prep_for_file_close
707  *
708  * Purpose:     This function should be called just prior to the cache
709  *        flushes at file close.  There should be no protected
710  *        entries in the cache at this point.
711  *
712  * Return:      Non-negative on success/Negative on failure
713  *
714  * Programmer:  John Mainzer
715  *              7/3/15
716  *
717  *-------------------------------------------------------------------------
718  */
719 herr_t
H5C_prep_for_file_close(H5F_t * f)720 H5C_prep_for_file_close(H5F_t *f)
721 {
722     H5C_t * cache_ptr;
723     hbool_t image_generated = FALSE;   /* Whether a cache image was generated */
724     herr_t  ret_value       = SUCCEED; /* Return value */
725 
726     FUNC_ENTER_NOAPI(FAIL)
727 
728     /* Sanity checks */
729     HDassert(f);
730     HDassert(f->shared);
731     HDassert(f->shared->cache);
732     cache_ptr = f->shared->cache;
733     HDassert(cache_ptr);
734     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
735 
736     /* For now at least, it is possible to receive the
737      * close warning more than once -- the following
738      * if statement handles this.
739      */
740     if (cache_ptr->close_warning_received)
741         HGOTO_DONE(SUCCEED)
742     cache_ptr->close_warning_received = TRUE;
743 
744     /* Make certain there aren't any protected entries */
745     HDassert(cache_ptr->pl_len == 0);
746 
747     /* Prepare cache image */
748     if (H5C__prep_image_for_file_close(f, &image_generated) < 0)
749         HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cache image")
750 
751 #ifdef H5_HAVE_PARALLEL
752     if ((H5F_INTENT(f) & H5F_ACC_RDWR) && (!image_generated) && (cache_ptr->aux_ptr != NULL) &&
753         (f->shared->fs_persist)) {
754         /* If persistent free space managers are enabled, flushing the
755          * metadata cache may result in the deletion, insertion, and/or
756          * dirtying of entries.
757          *
758          * This is a problem in PHDF5, as it breaks two invariants of
759          * our management of the metadata cache across all processes:
760          *
761          * 1) Entries will not be dirtied, deleted, inserted, or moved
762          *    during flush in the parallel case.
763          *
764          * 2) All processes contain the same set of dirty metadata
765          *    entries on entry to a sync point.
766          *
767          * To solve this problem for the persistent free space managers,
768          * serialize the metadata cache on all processes prior to the
769          * first sync point on file shutdown.  The shutdown warning is
770          * a convenient location for this call.
771          *
772          * This is sufficient since:
773          *
774          * 1) FSM settle routines are only invoked on file close.  Since
775          *    serialization make the same settle calls as flush on file
776          *    close, and since the close warning is issued after all
777          *    non FSM related space allocations and just before the
778          *    first sync point on close, this call will leave the caches
779          *    in a consistent state across the processes if they were
780          *    consistent before.
781          *
782          * 2) Since the FSM settle routines are only invoked once during
783          *    file close, invoking them now will prevent their invocation
784          *    during a flush, and thus avoid any resulting entrie dirties,
785          *    deletions, insertion, or moves during the flush.
786          */
787         if (H5C__serialize_cache(f) < 0)
788             HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialization of the cache failed")
789     }  /* end if */
790 #endif /* H5_HAVE_PARALLEL */
791 
792 done:
793     FUNC_LEAVE_NOAPI(ret_value)
794 } /* H5C_prep_for_file_close() */
795 
796 /*-------------------------------------------------------------------------
797  * Function:    H5C_dest
798  *
799  * Purpose:     Flush all data to disk and destroy the cache.
800  *
801  *              This function fails if any object are protected since the
802  *              resulting file might not be consistent.
803  *
804  *        Note that *cache_ptr has been freed upon successful return.
805  *
806  * Return:      Non-negative on success/Negative on failure
807  *
808  * Programmer:  John Mainzer
809  *        6/2/04
810  *
811  * Modifications:
812  *
813  *              JRM -- 5/15/20
814  *
815  *              Updated the function to enable the slist prior to the
816  *              call to H5C__flush_invalidate_cache().
817  *
818  *              Arguably, it shouldn't be necessary to re-enable the
819  *              slist after the call to H5C__flush_invalidate_cache(), as
820  *              the metadata cache should be discarded.  However, in the
821  *              test code, we make multiple calls to H5C_dest().  Thus
822  *              we re-enable the slist on failure if it and the cache
823  *              still exist.
824  *
825  *-------------------------------------------------------------------------
826  */
827 herr_t
H5C_dest(H5F_t * f)828 H5C_dest(H5F_t *f)
829 {
830     H5C_t *cache_ptr = f->shared->cache;
831     herr_t ret_value = SUCCEED; /* Return value */
832 
833     FUNC_ENTER_NOAPI(FAIL)
834 
835     /* Sanity check */
836     HDassert(cache_ptr);
837     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
838     HDassert(cache_ptr->close_warning_received);
839 
840 #if H5AC_DUMP_IMAGE_STATS_ON_CLOSE
841     if (H5C_image_stats(cache_ptr, TRUE) < 0)
842         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't display cache image stats")
843 #endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */
844 
845     /* Enable the slist, as it is needed in the flush */
846     if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
847 
848         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
849 
850     /* Flush and invalidate all cache entries */
851     if (H5C__flush_invalidate_cache(f, H5C__NO_FLAGS_SET) < 0)
852 
853         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
854 
855     /* Generate & write cache image if requested */
856     if (cache_ptr->image_ctl.generate_image) {
857 
858         if (H5C__generate_cache_image(f, cache_ptr) < 0)
859 
860             HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't generate metadata cache image")
861     }
862 
863     /* Question: Is it possible for cache_ptr->slist be non-null at this
864      *           point?  If no, shouldn't this if statement be an assert?
865      */
866     if (cache_ptr->slist_ptr != NULL) {
867 
868         HDassert(cache_ptr->slist_len == 0);
869         HDassert(cache_ptr->slist_size == 0);
870 
871         H5SL_close(cache_ptr->slist_ptr);
872 
873         cache_ptr->slist_ptr = NULL;
874 
875     } /* end if */
876 
877     if (cache_ptr->tag_list != NULL) {
878 
879         H5SL_destroy(cache_ptr->tag_list, H5C__free_tag_list_cb, NULL);
880 
881         cache_ptr->tag_list = NULL;
882 
883     } /* end if */
884 
885     if (cache_ptr->log_info != NULL) {
886 
887         H5MM_xfree(cache_ptr->log_info);
888     }
889 
890 #ifndef NDEBUG
891 #if H5C_DO_SANITY_CHECKS
892 
893     if (cache_ptr->get_entry_ptr_from_addr_counter > 0) {
894 
895         HDfprintf(stdout, "*** %" PRId64 " calls to H5C_get_entry_ptr_from_add(). ***\n",
896                   cache_ptr->get_entry_ptr_from_addr_counter);
897     }
898 #endif /* H5C_DO_SANITY_CHECKS */
899 
900     cache_ptr->magic = 0;
901 #endif /* NDEBUG */
902 
903     cache_ptr = H5FL_FREE(H5C_t, cache_ptr);
904 
905 done:
906 
907     if ((ret_value < 0) && (cache_ptr) && (cache_ptr->slist_ptr)) {
908 
909         /* need this for test code -- see change note for details */
910 
911         if (H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0)
912 
913             HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist on flush dest failure failed")
914     }
915 
916     FUNC_LEAVE_NOAPI(ret_value)
917 
918 } /* H5C_dest() */
919 
920 /*-------------------------------------------------------------------------
921  * Function:    H5C_evict
922  *
923  * Purpose:     Evict all except pinned entries in the cache
924  *
925  * Return:      Non-negative on success/Negative on failure
926  *
927  * Programmer:  Vailin Choi
928  *        Dec 2013
929  *
930  * Modifications:
931  *
932  *              JRM -- 5/5/20
933  *
934  *              Added code to enable the skip list prior to the call
935  *              to H5C__flush_invalidate_cache(), and disable it
936  *              afterwards.
937  *
938  *-------------------------------------------------------------------------
939  */
940 herr_t
H5C_evict(H5F_t * f)941 H5C_evict(H5F_t *f)
942 {
943     herr_t ret_value = SUCCEED; /* Return value */
944 
945     FUNC_ENTER_NOAPI(FAIL)
946 
947     /* Sanity check */
948     HDassert(f);
949 
950     /* Enable the slist, as it is needed in the flush */
951     if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
952 
953         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
954 
955     /* Flush and invalidate all cache entries except the pinned entries */
956     if (H5C__flush_invalidate_cache(f, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0)
957 
958         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict entries in the cache")
959 
960     /* Disable the slist */
961     if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0)
962 
963         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist disabled failed")
964 
965 done:
966     FUNC_LEAVE_NOAPI(ret_value)
967 } /* H5C_evict() */
968 
969 /*-------------------------------------------------------------------------
970  * Function:    H5C_expunge_entry
971  *
972  * Purpose:     Use this function to tell the cache to expunge an entry
973  *              from the cache without writing it to disk even if it is
974  *              dirty.  The entry may not be either pinned or protected.
975  *
976  * Return:      Non-negative on success/Negative on failure
977  *
978  * Programmer:  John Mainzer
979  *              6/29/06
980  *
981  *-------------------------------------------------------------------------
982  */
983 herr_t
H5C_expunge_entry(H5F_t * f,const H5C_class_t * type,haddr_t addr,unsigned flags)984 H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flags)
985 {
986     H5C_t *            cache_ptr;
987     H5C_cache_entry_t *entry_ptr   = NULL;
988     unsigned           flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG);
989     herr_t             ret_value   = SUCCEED; /* Return value */
990 
991     FUNC_ENTER_NOAPI(FAIL)
992 
993     HDassert(f);
994     HDassert(f->shared);
995     cache_ptr = f->shared->cache;
996     HDassert(cache_ptr);
997     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
998     HDassert(type);
999     HDassert(H5F_addr_defined(addr));
1000 
1001 #if H5C_DO_EXTREME_SANITY_CHECKS
1002     if (H5C__validate_lru_list(cache_ptr) < 0)
1003         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry")
1004 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1005 
1006     /* Look for entry in cache */
1007     H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
1008     if ((entry_ptr == NULL) || (entry_ptr->type != type))
1009         /* the target doesn't exist in the cache, so we are done. */
1010         HGOTO_DONE(SUCCEED)
1011 
1012     HDassert(entry_ptr->addr == addr);
1013     HDassert(entry_ptr->type == type);
1014 
1015     /* Check for entry being pinned or protected */
1016     if (entry_ptr->is_protected)
1017         HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected")
1018     if (entry_ptr->is_pinned)
1019         HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned")
1020 
1021     /* If we get this far, call H5C__flush_single_entry() with the
1022      * H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG.
1023      * This will clear the entry, and then delete it from the cache.
1024      */
1025 
1026     /* Pass along 'free file space' flag */
1027     flush_flags |= (flags & H5C__FREE_FILE_SPACE_FLAG);
1028 
1029     /* Delete the entry from the skip list on destroy */
1030     flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
1031 
1032     if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0)
1033         HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry")
1034 
1035 done:
1036 #if H5C_DO_EXTREME_SANITY_CHECKS
1037     if (H5C__validate_lru_list(cache_ptr) < 0)
1038         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit")
1039 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1040 
1041     FUNC_LEAVE_NOAPI(ret_value)
1042 } /* H5C_expunge_entry() */
1043 
1044 /*-------------------------------------------------------------------------
1045  * Function:    H5C_flush_cache
1046  *
1047  * Purpose:    Flush (and possibly destroy) the entries contained in the
1048  *        specified cache.
1049  *
1050  *        If the cache contains protected entries, the function will
1051  *        fail, as protected entries cannot be flushed.  However
1052  *        all unprotected entries should be flushed before the
1053  *        function returns failure.
1054  *
1055  * Return:      Non-negative on success/Negative on failure or if there was
1056  *        a request to flush all items and something was protected.
1057  *
1058  * Programmer:  John Mainzer
1059  *        6/2/04
1060  *
1061  * Changes:    Modified function to test for slist chamges in
1062  *        pre_serialize and serialize callbacks, and re-start
1063  *        scans through the slist when such changes occur.
1064  *
1065  *        This has been a potential problem for some time,
1066  *        and there has been code in this function to deal
1067  *        with elements of this issue.  However the shift
1068  *        to the V3 cache in combination with the activities
1069  *        of some of the cache clients (in particular the
1070  *        free space manager and the fractal heap) have
1071  *        made this re-work necessary.
1072  *
1073  *                        JRM -- 12/13/14
1074  *
1075  *        Modified function to support rings.  Basic idea is that
1076  *        every entry in the cache is assigned to a ring.  Entries
1077  *        in the outermost ring are flushed first, followed by
1078  *        those in the next outermost ring, and so on until the
1079  *        innermost ring is flushed.  See header comment on
1080  *        H5C_ring_t in H5Cprivate.h for a more detailed
1081  *        discussion.
1082  *
1083  *                        JRM -- 8/30/15
1084  *
1085  *        Modified function to call the free space manager
1086  *        settling functions.
1087  *                        JRM -- 6/9/16
1088  *
1089  *-------------------------------------------------------------------------
1090  */
1091 herr_t
H5C_flush_cache(H5F_t * f,unsigned flags)1092 H5C_flush_cache(H5F_t *f, unsigned flags)
1093 {
1094 #if H5C_DO_SANITY_CHECKS
1095     int      i;
1096     uint32_t index_len        = 0;
1097     size_t   index_size       = (size_t)0;
1098     size_t   clean_index_size = (size_t)0;
1099     size_t   dirty_index_size = (size_t)0;
1100     size_t   slist_size       = (size_t)0;
1101     uint32_t slist_len        = 0;
1102 #endif /* H5C_DO_SANITY_CHECKS */
1103     H5C_ring_t ring;
1104     H5C_t *    cache_ptr;
1105     hbool_t    destroy;
1106     herr_t     ret_value = SUCCEED;
1107 
1108     FUNC_ENTER_NOAPI(FAIL)
1109 
1110     HDassert(f);
1111     HDassert(f->shared);
1112     cache_ptr = f->shared->cache;
1113     HDassert(cache_ptr);
1114     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1115     HDassert(cache_ptr->slist_ptr);
1116 
1117 #if H5C_DO_SANITY_CHECKS
1118     HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
1119     HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
1120     HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
1121     HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
1122     HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
1123     HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
1124 
1125     for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
1126         index_len += cache_ptr->index_ring_len[i];
1127         index_size += cache_ptr->index_ring_size[i];
1128         clean_index_size += cache_ptr->clean_index_ring_size[i];
1129         dirty_index_size += cache_ptr->dirty_index_ring_size[i];
1130 
1131         slist_len += cache_ptr->slist_ring_len[i];
1132         slist_size += cache_ptr->slist_ring_size[i];
1133     } /* end for */
1134 
1135     HDassert(cache_ptr->index_len == index_len);
1136     HDassert(cache_ptr->index_size == index_size);
1137     HDassert(cache_ptr->clean_index_size == clean_index_size);
1138     HDassert(cache_ptr->dirty_index_size == dirty_index_size);
1139     HDassert(cache_ptr->slist_len == slist_len);
1140     HDassert(cache_ptr->slist_size == slist_size);
1141 #endif /* H5C_DO_SANITY_CHECKS */
1142 
1143 #if H5C_DO_EXTREME_SANITY_CHECKS
1144     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
1145         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
1146         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
1147 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1148 
1149     destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
1150     HDassert(!(destroy && ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0)));
1151     HDassert(!(cache_ptr->flush_in_progress));
1152 
1153     cache_ptr->flush_in_progress = TRUE;
1154 
1155     if (destroy) {
1156         if (H5C__flush_invalidate_cache(f, flags) < 0)
1157             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed")
1158     } /* end if */
1159     else {
1160         /* flush each ring, starting from the outermost ring and
1161          * working inward.
1162          */
1163         ring = H5C_RING_USER;
1164         while (ring < H5C_RING_NTYPES) {
1165 
1166             /* Only call the free space manager settle routines when close
1167              * warning has been received.
1168              */
1169             if (cache_ptr->close_warning_received) {
1170                 switch (ring) {
1171                     case H5C_RING_USER:
1172                         break;
1173 
1174                     case H5C_RING_RDFSM:
1175                         /* Settle raw data FSM */
1176                         if (!cache_ptr->rdfsm_settled)
1177                             if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0)
1178                                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
1179                         break;
1180 
1181                     case H5C_RING_MDFSM:
1182                         /* Settle metadata FSM */
1183                         if (!cache_ptr->mdfsm_settled)
1184                             if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0)
1185                                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
1186                         break;
1187 
1188                     case H5C_RING_SBE:
1189                     case H5C_RING_SB:
1190                         break;
1191 
1192                     default:
1193                         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
1194                         break;
1195                 } /* end switch */
1196             }     /* end if */
1197 
1198             if (H5C__flush_ring(f, ring, flags) < 0)
1199                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush ring failed")
1200             ring++;
1201         } /* end while */
1202     }     /* end else */
1203 
1204 done:
1205     cache_ptr->flush_in_progress = FALSE;
1206 
1207     FUNC_LEAVE_NOAPI(ret_value)
1208 } /* H5C_flush_cache() */
1209 
1210 /*-------------------------------------------------------------------------
1211  * Function:    H5C_flush_to_min_clean
1212  *
1213  * Purpose:    Flush dirty entries until the caches min clean size is
1214  *        attained.
1215  *
1216  *        This function is used in the implementation of the
1217  *        metadata cache in PHDF5.  To avoid "messages from the
1218  *        future", the cache on process 0 can't be allowed to
1219  *        flush entries until the other processes have reached
1220  *        the same point in the calculation.  If this constraint
1221  *        is not met, it is possible that the other processes will
1222  *        read metadata generated at a future point in the
1223  *        computation.
1224  *
1225  *
1226  * Return:      Non-negative on success/Negative on failure or if
1227  *        write is not permitted.
1228  *
1229  * Programmer:  John Mainzer
1230  *        9/16/05
1231  *
1232  *-------------------------------------------------------------------------
1233  */
1234 herr_t
H5C_flush_to_min_clean(H5F_t * f)1235 H5C_flush_to_min_clean(H5F_t *f)
1236 {
1237     H5C_t * cache_ptr;
1238     hbool_t write_permitted;
1239     herr_t  ret_value = SUCCEED;
1240 
1241     FUNC_ENTER_NOAPI(FAIL)
1242 
1243     HDassert(f);
1244     HDassert(f->shared);
1245 
1246     cache_ptr = f->shared->cache;
1247 
1248     HDassert(cache_ptr);
1249     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1250 
1251     if (cache_ptr->check_write_permitted != NULL) {
1252         if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
1253             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't get write_permitted")
1254     } /* end if */
1255     else
1256         write_permitted = cache_ptr->write_permitted;
1257 
1258     if (!write_permitted)
1259         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cache write is not permitted!?!")
1260 
1261     if (H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0)
1262         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__make_space_in_cache failed")
1263 
1264 done:
1265     FUNC_LEAVE_NOAPI(ret_value)
1266 } /* H5C_flush_to_min_clean() */
1267 
1268 /*-------------------------------------------------------------------------
1269  * Function:    H5C_insert_entry
1270  *
1271  * Purpose:     Adds the specified thing to the cache.  The thing need not
1272  *              exist on disk yet, but it must have an address and disk
1273  *              space reserved.
1274  *
1275  *        Observe that this function cannot occasion a read.
1276  *
1277  * Return:      Non-negative on success/Negative on failure
1278  *
1279  * Programmer:  John Mainzer
1280  *        6/2/04
1281  *
1282  *-------------------------------------------------------------------------
1283  */
1284 herr_t
H5C_insert_entry(H5F_t * f,const H5C_class_t * type,haddr_t addr,void * thing,unsigned int flags)1285 H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags)
1286 {
1287     H5C_t *     cache_ptr;
1288     H5AC_ring_t ring = H5C_RING_UNDEFINED;
1289     hbool_t     insert_pinned;
1290     hbool_t     flush_last;
1291 #ifdef H5_HAVE_PARALLEL
1292     hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */
1293 #endif                           /* H5_HAVE_PARALLEL */
1294     hbool_t            set_flush_marker;
1295     hbool_t            write_permitted = TRUE;
1296     size_t             empty_space;
1297     H5C_cache_entry_t *entry_ptr = NULL;
1298     H5C_cache_entry_t *test_entry_ptr;
1299     hbool_t            entry_tagged = FALSE;
1300     herr_t             ret_value    = SUCCEED; /* Return value */
1301 
1302     FUNC_ENTER_NOAPI(FAIL)
1303 
1304     HDassert(f);
1305     HDassert(f->shared);
1306 
1307     cache_ptr = f->shared->cache;
1308 
1309     HDassert(cache_ptr);
1310     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1311     HDassert(type);
1312     HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type);
1313     HDassert(type->image_len);
1314     HDassert(H5F_addr_defined(addr));
1315     HDassert(thing);
1316 
1317 #if H5C_DO_EXTREME_SANITY_CHECKS
1318     /* no need to verify that entry is not already in the index as */
1319     /* we already make that check below.                           */
1320     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
1321         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
1322         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
1323 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1324 
1325     set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0);
1326     insert_pinned    = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
1327     flush_last       = ((flags & H5C__FLUSH_LAST_FLAG) != 0);
1328 
1329     /* Get the ring type from the API context */
1330     ring = H5CX_get_ring();
1331 
1332     entry_ptr = (H5C_cache_entry_t *)thing;
1333 
1334     /* verify that the new entry isn't already in the hash table -- scream
1335      * and die if it is.
1336      */
1337 
1338     H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
1339 
1340     if (test_entry_ptr != NULL) {
1341         if (test_entry_ptr == entry_ptr)
1342             HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache")
1343         else
1344             HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache")
1345     } /* end if */
1346 
1347     entry_ptr->magic     = H5C__H5C_CACHE_ENTRY_T_MAGIC;
1348     entry_ptr->cache_ptr = cache_ptr;
1349     entry_ptr->addr      = addr;
1350     entry_ptr->type      = type;
1351 
1352     entry_ptr->image_ptr        = NULL;
1353     entry_ptr->image_up_to_date = FALSE;
1354 
1355     entry_ptr->is_protected = FALSE;
1356     entry_ptr->is_read_only = FALSE;
1357     entry_ptr->ro_ref_count = 0;
1358 
1359     entry_ptr->is_pinned          = insert_pinned;
1360     entry_ptr->pinned_from_client = insert_pinned;
1361     entry_ptr->pinned_from_cache  = FALSE;
1362     entry_ptr->flush_me_last      = flush_last;
1363 
1364     /* newly inserted entries are assumed to be dirty */
1365     entry_ptr->is_dirty = TRUE;
1366 
1367     /* not protected, so can't be dirtied */
1368     entry_ptr->dirtied = FALSE;
1369 
1370     /* Retrieve the size of the thing */
1371     if ((type->image_len)(thing, &(entry_ptr->size)) < 0)
1372         HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "can't get size of thing")
1373     HDassert(entry_ptr->size > 0 && entry_ptr->size < H5C_MAX_ENTRY_SIZE);
1374 
1375     entry_ptr->in_slist = FALSE;
1376 
1377 #ifdef H5_HAVE_PARALLEL
1378     entry_ptr->clear_on_unprotect = FALSE;
1379     entry_ptr->flush_immediately  = FALSE;
1380 #endif /* H5_HAVE_PARALLEL */
1381 
1382     entry_ptr->flush_in_progress   = FALSE;
1383     entry_ptr->destroy_in_progress = FALSE;
1384 
1385     entry_ptr->ring = ring;
1386 
1387     /* Initialize flush dependency fields */
1388     entry_ptr->flush_dep_parent          = NULL;
1389     entry_ptr->flush_dep_nparents        = 0;
1390     entry_ptr->flush_dep_parent_nalloc   = 0;
1391     entry_ptr->flush_dep_nchildren       = 0;
1392     entry_ptr->flush_dep_ndirty_children = 0;
1393     entry_ptr->flush_dep_nunser_children = 0;
1394 
1395     entry_ptr->ht_next = NULL;
1396     entry_ptr->ht_prev = NULL;
1397     entry_ptr->il_next = NULL;
1398     entry_ptr->il_prev = NULL;
1399 
1400     entry_ptr->next = NULL;
1401     entry_ptr->prev = NULL;
1402 
1403 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
1404     entry_ptr->aux_next = NULL;
1405     entry_ptr->aux_prev = NULL;
1406 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
1407 
1408 #ifdef H5_HAVE_PARALLEL
1409     entry_ptr->coll_next = NULL;
1410     entry_ptr->coll_prev = NULL;
1411 #endif /* H5_HAVE_PARALLEL */
1412 
1413     /* initialize cache image related fields */
1414     entry_ptr->include_in_image     = FALSE;
1415     entry_ptr->lru_rank             = 0;
1416     entry_ptr->image_dirty          = FALSE;
1417     entry_ptr->fd_parent_count      = 0;
1418     entry_ptr->fd_parent_addrs      = NULL;
1419     entry_ptr->fd_child_count       = 0;
1420     entry_ptr->fd_dirty_child_count = 0;
1421     entry_ptr->image_fd_height      = 0;
1422     entry_ptr->prefetched           = FALSE;
1423     entry_ptr->prefetch_type_id     = 0;
1424     entry_ptr->age                  = 0;
1425     entry_ptr->prefetched_dirty     = FALSE;
1426 #ifndef NDEBUG /* debugging field */
1427     entry_ptr->serialization_count = 0;
1428 #endif /* NDEBUG */
1429 
1430     entry_ptr->tl_next  = NULL;
1431     entry_ptr->tl_prev  = NULL;
1432     entry_ptr->tag_info = NULL;
1433 
1434     /* Apply tag to newly inserted entry */
1435     if (H5C__tag_entry(cache_ptr, entry_ptr) < 0)
1436         HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
1437     entry_tagged = TRUE;
1438 
1439     H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
1440 
1441     if (cache_ptr->flash_size_increase_possible &&
1442         (entry_ptr->size > cache_ptr->flash_size_increase_threshold))
1443         if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
1444             HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed")
1445 
1446     if (cache_ptr->index_size >= cache_ptr->max_cache_size)
1447         empty_space = 0;
1448     else
1449         empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
1450 
1451     if (cache_ptr->evictions_enabled &&
1452         (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) ||
1453          (((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)))) {
1454         size_t space_needed;
1455 
1456         if (empty_space <= entry_ptr->size)
1457             cache_ptr->cache_full = TRUE;
1458 
1459         if (cache_ptr->check_write_permitted != NULL) {
1460             if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
1461                 HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted")
1462         } /* end if */
1463         else
1464             write_permitted = cache_ptr->write_permitted;
1465 
1466         HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
1467         space_needed = entry_ptr->size;
1468         if (space_needed > cache_ptr->max_cache_size)
1469             space_needed = cache_ptr->max_cache_size;
1470 
1471         /* Note that space_needed is just the amount of space that
1472          * needed to insert the new entry without exceeding the cache
1473          * size limit.  The subsequent call to H5C__make_space_in_cache()
1474          * may evict the entries required to free more or less space
1475          * depending on conditions.  It MAY be less if the cache is
1476          * currently undersized, or more if the cache is oversized.
1477          *
1478          * The cache can exceed its maximum size limit via the following
1479          * mechanisms:
1480          *
1481          * First, it is possible for the cache to grow without
1482          * bound as long as entries are protected and not unprotected.
1483          *
1484          * Second, when writes are not permitted it is also possible
1485          * for the cache to grow without bound.
1486          *
1487          * Finally, we usually don't check to see if the cache is
1488          * oversized at the end of an unprotect.  As a result, it is
1489          * possible to have a vastly oversized cache with no protected
1490          * entries as long as all the protects preceed the unprotects.
1491          *
1492          * Since items 1 and 2 are not changing any time soon, I see
1493          * no point in worrying about the third.
1494          */
1495 
1496         if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0)
1497             HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed")
1498     } /* end if */
1499 
1500     H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
1501 
1502     /* New entries are presumed to be dirty */
1503     HDassert(entry_ptr->is_dirty);
1504     entry_ptr->flush_marker = set_flush_marker;
1505     H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
1506     H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL)
1507 
1508 #if H5C_DO_EXTREME_SANITY_CHECKS
1509     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
1510         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
1511         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done")
1512 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1513 
1514     /* If the entry's type has a 'notify' callback send a 'after insertion'
1515      * notice now that the entry is fully integrated into the cache.
1516      */
1517     if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0)
1518         HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry inserted into cache")
1519 
1520     H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
1521 
1522 #ifdef H5_HAVE_PARALLEL
1523     if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI))
1524         coll_access = H5CX_get_coll_metadata_read();
1525 
1526     entry_ptr->coll_access = coll_access;
1527     if (coll_access) {
1528         H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, FAIL)
1529 
1530         /* Make sure the size of the collective entries in the cache remain in check */
1531         if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
1532             if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
1533                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
1534     } /* end if */
1535 #endif
1536 
1537 done:
1538 #if H5C_DO_EXTREME_SANITY_CHECKS
1539     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
1540         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
1541         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
1542 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1543 
1544     if (ret_value < 0 && entry_tagged)
1545         if (H5C__untag_entry(cache_ptr, entry_ptr) < 0)
1546             HDONE_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
1547 
1548     FUNC_LEAVE_NOAPI(ret_value)
1549 } /* H5C_insert_entry() */
1550 
1551 /*-------------------------------------------------------------------------
1552  * Function:    H5C_mark_entry_dirty
1553  *
1554  * Purpose:    Mark a pinned or protected entry as dirty.  The target entry
1555  *         MUST be either pinned or protected, and MAY be both.
1556  *
1557  *         In the protected case, this call is the functional
1558  *         equivalent of setting the H5C__DIRTIED_FLAG on an unprotect
1559  *         call.
1560  *
1561  *         In the pinned but not protected case, if the entry is not
1562  *         already dirty, the function places function marks the entry
1563  *         dirty and places it on the skip list.
1564  *
1565  * Return:      Non-negative on success/Negative on failure
1566  *
1567  * Programmer:  John Mainzer
1568  *              5/15/06
1569  *
1570  *         JRM -- 11/5/08
1571  *         Added call to H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY() to
1572  *         update the new clean_index_size and dirty_index_size
1573  *         fields of H5C_t in the case that the entry was clean
1574  *         prior to this call, and is pinned and not protected.
1575  *
1576  *-------------------------------------------------------------------------
1577  */
1578 herr_t
H5C_mark_entry_dirty(void * thing)1579 H5C_mark_entry_dirty(void *thing)
1580 {
1581     H5C_t *            cache_ptr;
1582     H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing;
1583     herr_t             ret_value = SUCCEED; /* Return value */
1584 
1585     FUNC_ENTER_NOAPI(FAIL)
1586 
1587     /* Sanity checks */
1588     HDassert(entry_ptr);
1589     HDassert(H5F_addr_defined(entry_ptr->addr));
1590     cache_ptr = entry_ptr->cache_ptr;
1591     HDassert(cache_ptr);
1592     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1593 
1594     if (entry_ptr->is_protected) {
1595         HDassert(!((entry_ptr)->is_read_only));
1596 
1597         /* set the dirtied flag */
1598         entry_ptr->dirtied = TRUE;
1599 
1600         /* reset image_up_to_date */
1601         if (entry_ptr->image_up_to_date) {
1602             entry_ptr->image_up_to_date = FALSE;
1603 
1604             if (entry_ptr->flush_dep_nparents > 0)
1605                 if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
1606                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
1607                                 "Can't propagate serialization status to fd parents")
1608         } /* end if */
1609     }     /* end if */
1610     else if (entry_ptr->is_pinned) {
1611         hbool_t was_clean; /* Whether the entry was previously clean */
1612         hbool_t image_was_up_to_date;
1613 
1614         /* Remember previous dirty status */
1615         was_clean = !entry_ptr->is_dirty;
1616 
1617         /* Check if image is up to date */
1618         image_was_up_to_date = entry_ptr->image_up_to_date;
1619 
1620         /* Mark the entry as dirty if it isn't already */
1621         entry_ptr->is_dirty         = TRUE;
1622         entry_ptr->image_up_to_date = FALSE;
1623 
1624         /* Modify cache data structures */
1625         if (was_clean)
1626             H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
1627         if (!entry_ptr->in_slist)
1628             H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
1629 
1630         /* Update stats for entry being marked dirty */
1631         H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
1632 
1633         /* Check for entry changing status and do notifications, etc. */
1634         if (was_clean) {
1635             /* If the entry's type has a 'notify' callback send a 'entry dirtied'
1636              * notice now that the entry is fully integrated into the cache.
1637              */
1638             if (entry_ptr->type->notify &&
1639                 (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
1640                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
1641 
1642             /* Propagate the dirty flag up the flush dependency chain if appropriate */
1643             if (entry_ptr->flush_dep_nparents > 0)
1644                 if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
1645                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
1646         } /* end if */
1647         if (image_was_up_to_date)
1648             if (entry_ptr->flush_dep_nparents > 0)
1649                 if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
1650                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
1651                                 "Can't propagate serialization status to fd parents")
1652     } /* end if */
1653     else
1654         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is neither pinned nor protected??")
1655 
1656 done:
1657     FUNC_LEAVE_NOAPI(ret_value)
1658 } /* H5C_mark_entry_dirty() */
1659 
1660 /*-------------------------------------------------------------------------
1661  * Function:    H5C_mark_entry_clean
1662  *
1663  * Purpose:    Mark a pinned entry as clean.  The target entry MUST be pinned.
1664  *
1665  *         If the entry is not
1666  *         already clean, the function places function marks the entry
1667  *         clean and removes it from the skip list.
1668  *
1669  * Return:      Non-negative on success/Negative on failure
1670  *
1671  * Programmer:  Quincey Koziol
1672  *              7/23/16
1673  *
1674  *-------------------------------------------------------------------------
1675  */
1676 herr_t
H5C_mark_entry_clean(void * _thing)1677 H5C_mark_entry_clean(void *_thing)
1678 {
1679     H5C_t *            cache_ptr;
1680     H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_thing;
1681     herr_t             ret_value = SUCCEED; /* Return value */
1682 
1683     FUNC_ENTER_NOAPI(FAIL)
1684 
1685     /* Sanity checks */
1686     HDassert(entry_ptr);
1687     HDassert(H5F_addr_defined(entry_ptr->addr));
1688     cache_ptr = entry_ptr->cache_ptr;
1689     HDassert(cache_ptr);
1690     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1691 
1692     /* Operate on pinned entry */
1693     if (entry_ptr->is_protected)
1694         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "entry is protected")
1695     else if (entry_ptr->is_pinned) {
1696         hbool_t was_dirty; /* Whether the entry was previously dirty */
1697 
1698         /* Remember previous dirty status */
1699         was_dirty = entry_ptr->is_dirty;
1700 
1701         /* Mark the entry as clean if it isn't already */
1702         entry_ptr->is_dirty = FALSE;
1703 
1704         /* Also reset the 'flush_marker' flag, since the entry shouldn't be flushed now */
1705         entry_ptr->flush_marker = FALSE;
1706 
1707         /* Modify cache data structures */
1708         if (was_dirty)
1709             H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr)
1710         if (entry_ptr->in_slist)
1711             H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE)
1712 
1713         /* Update stats for entry being marked clean */
1714         H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
1715 
1716         /* Check for entry changing status and do notifications, etc. */
1717         if (was_dirty) {
1718             /* If the entry's type has a 'notify' callback send a 'entry cleaned'
1719              * notice now that the entry is fully integrated into the cache.
1720              */
1721             if (entry_ptr->type->notify &&
1722                 (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
1723                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
1724                             "can't notify client about entry dirty flag cleared")
1725 
1726             /* Propagate the clean up the flush dependency chain, if appropriate */
1727             if (entry_ptr->flush_dep_nparents > 0)
1728                 if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
1729                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean")
1730         } /* end if */
1731     }     /* end if */
1732     else
1733         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Entry is not pinned??")
1734 
1735 done:
1736     FUNC_LEAVE_NOAPI(ret_value)
1737 } /* H5C_mark_entry_clean() */
1738 
1739 /*-------------------------------------------------------------------------
1740  * Function:    H5C_mark_entry_unserialized
1741  *
1742  * Purpose:    Mark a pinned or protected entry as unserialized.  The target
1743  *             entry MUST be either pinned or protected, and MAY be both.
1744  *
1745  * Return:      Non-negative on success/Negative on failure
1746  *
1747  * Programmer:  Quincey Koziol
1748  *              12/23/16
1749  *
1750  *-------------------------------------------------------------------------
1751  */
1752 herr_t
H5C_mark_entry_unserialized(void * thing)1753 H5C_mark_entry_unserialized(void *thing)
1754 {
1755     H5C_cache_entry_t *entry     = (H5C_cache_entry_t *)thing;
1756     herr_t             ret_value = SUCCEED; /* Return value */
1757 
1758     FUNC_ENTER_NOAPI(FAIL)
1759 
1760     /* Sanity checks */
1761     HDassert(entry);
1762     HDassert(H5F_addr_defined(entry->addr));
1763 
1764     if (entry->is_protected || entry->is_pinned) {
1765         HDassert(!entry->is_read_only);
1766 
1767         /* Reset image_up_to_date */
1768         if (entry->image_up_to_date) {
1769             entry->image_up_to_date = FALSE;
1770 
1771             if (entry->flush_dep_nparents > 0)
1772                 if (H5C__mark_flush_dep_unserialized(entry) < 0)
1773                     HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL,
1774                                 "Can't propagate serialization status to fd parents")
1775         } /* end if */
1776     }     /* end if */
1777     else
1778         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKUNSERIALIZED, FAIL,
1779                     "Entry to unserialize is neither pinned nor protected??")
1780 
1781 done:
1782     FUNC_LEAVE_NOAPI(ret_value)
1783 } /* H5C_mark_entry_unserialized() */
1784 
1785 /*-------------------------------------------------------------------------
1786  * Function:    H5C_mark_entry_serialized
1787  *
1788  * Purpose:    Mark a pinned entry as serialized.  The target entry MUST be
1789  *             pinned.
1790  *
1791  * Return:      Non-negative on success/Negative on failure
1792  *
1793  * Programmer:  Quincey Koziol
1794  *              12/23/16
1795  *
1796  *-------------------------------------------------------------------------
1797  */
1798 herr_t
H5C_mark_entry_serialized(void * _thing)1799 H5C_mark_entry_serialized(void *_thing)
1800 {
1801     H5C_cache_entry_t *entry     = (H5C_cache_entry_t *)_thing;
1802     herr_t             ret_value = SUCCEED; /* Return value */
1803 
1804     FUNC_ENTER_NOAPI(FAIL)
1805 
1806     /* Sanity checks */
1807     HDassert(entry);
1808     HDassert(H5F_addr_defined(entry->addr));
1809 
1810     /* Operate on pinned entry */
1811     if (entry->is_protected)
1812         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "entry is protected")
1813     else if (entry->is_pinned) {
1814         /* Check for entry changing status and do notifications, etc. */
1815         if (!entry->image_up_to_date) {
1816             /* Set the image_up_to_date flag */
1817             entry->image_up_to_date = TRUE;
1818 
1819             /* Propagate the serialize up the flush dependency chain, if appropriate */
1820             if (entry->flush_dep_nparents > 0)
1821                 if (H5C__mark_flush_dep_serialized(entry) < 0)
1822                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL,
1823                                 "Can't propagate flush dep serialize")
1824         } /* end if */
1825     }     /* end if */
1826     else
1827         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Entry is not pinned??")
1828 
1829 done:
1830     FUNC_LEAVE_NOAPI(ret_value)
1831 } /* H5C_mark_entry_serialized() */
1832 
1833 /*-------------------------------------------------------------------------
1834  *
1835  * Function:    H5C_move_entry
1836  *
1837  * Purpose:     Use this function to notify the cache that an entry's
1838  *              file address changed.
1839  *
1840  * Return:      Non-negative on success/Negative on failure
1841  *
1842  * Programmer:  John Mainzer
1843  *              6/2/04
1844  *
1845  *-------------------------------------------------------------------------
1846  */
1847 herr_t
H5C_move_entry(H5C_t * cache_ptr,const H5C_class_t * type,haddr_t old_addr,haddr_t new_addr)1848 H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, haddr_t new_addr)
1849 {
1850     H5C_cache_entry_t *entry_ptr      = NULL;
1851     H5C_cache_entry_t *test_entry_ptr = NULL;
1852     herr_t             ret_value      = SUCCEED; /* Return value */
1853 
1854     FUNC_ENTER_NOAPI(FAIL)
1855 
1856     HDassert(cache_ptr);
1857     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1858     HDassert(type);
1859     HDassert(H5F_addr_defined(old_addr));
1860     HDassert(H5F_addr_defined(new_addr));
1861     HDassert(H5F_addr_ne(old_addr, new_addr));
1862 
1863 #if H5C_DO_EXTREME_SANITY_CHECKS
1864     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
1865         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
1866         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
1867 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1868 
1869     H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL)
1870 
1871     if (entry_ptr == NULL || entry_ptr->type != type)
1872         /* the old item doesn't exist in the cache, so we are done. */
1873         HGOTO_DONE(SUCCEED)
1874 
1875     HDassert(entry_ptr->addr == old_addr);
1876     HDassert(entry_ptr->type == type);
1877 
1878     /* Check for R/W status, otherwise error */
1879     /* (Moving a R/O entry would mark it dirty, which shouldn't
1880      *  happen. QAK - 2016/12/02)
1881      */
1882     if (entry_ptr->is_read_only)
1883         HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "can't move R/O entry")
1884 
1885     H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL)
1886 
1887     if (test_entry_ptr != NULL) { /* we are hosed */
1888         if (test_entry_ptr->type == type)
1889             HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "target already moved & reinserted???")
1890         else
1891             HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "new address already in use?")
1892     } /* end if */
1893 
1894     /* If we get this far we have work to do.  Remove *entry_ptr from
1895      * the hash table (and skip list if necessary), change its address to the
1896      * new address, mark it as dirty (if it isn't already) and then re-insert.
1897      *
1898      * Update the replacement policy for a hit to avoid an eviction before
1899      * the moved entry is touched.  Update stats for a move.
1900      *
1901      * Note that we do not check the size of the cache, or evict anything.
1902      * Since this is a simple re-name, cache size should be unaffected.
1903      *
1904      * Check to see if the target entry is in the process of being destroyed
1905      * before we delete from the index, etc.  If it is, all we do is
1906      * change the addr.  If the entry is only in the process of being flushed,
1907      * don't mark it as dirty either, lest we confuse the flush call back.
1908      */
1909     if (!entry_ptr->destroy_in_progress) {
1910         H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
1911 
1912         if (entry_ptr->in_slist) {
1913             HDassert(cache_ptr->slist_ptr);
1914             H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE)
1915         } /* end if */
1916     }     /* end if */
1917 
1918     entry_ptr->addr = new_addr;
1919 
1920     if (!entry_ptr->destroy_in_progress) {
1921         hbool_t was_dirty; /* Whether the entry was previously dirty */
1922 
1923         /* Remember previous dirty status */
1924         was_dirty = entry_ptr->is_dirty;
1925 
1926         /* Mark the entry as dirty if it isn't already */
1927         entry_ptr->is_dirty = TRUE;
1928 
1929         /* This shouldn't be needed, but it keeps the test code happy */
1930         if (entry_ptr->image_up_to_date) {
1931             entry_ptr->image_up_to_date = FALSE;
1932             if (entry_ptr->flush_dep_nparents > 0)
1933                 if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
1934                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
1935                                 "Can't propagate serialization status to fd parents")
1936         } /* end if */
1937 
1938         /* Modify cache data structures */
1939         H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
1940         H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
1941 
1942         /* Skip some actions if we're in the middle of flushing the entry */
1943         if (!entry_ptr->flush_in_progress) {
1944             /* Update the replacement policy for the entry */
1945             H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL)
1946 
1947             /* Check for entry changing status and do notifications, etc. */
1948             if (!was_dirty) {
1949                 /* If the entry's type has a 'notify' callback send a 'entry dirtied'
1950                  * notice now that the entry is fully integrated into the cache.
1951                  */
1952                 if (entry_ptr->type->notify &&
1953                     (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
1954                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
1955                                 "can't notify client about entry dirty flag set")
1956 
1957                 /* Propagate the dirty flag up the flush dependency chain if appropriate */
1958                 if (entry_ptr->flush_dep_nparents > 0)
1959                     if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
1960                         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL,
1961                                     "Can't propagate flush dep dirty flag")
1962             } /* end if */
1963         }     /* end if */
1964     }         /* end if */
1965 
1966     H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
1967 
1968 done:
1969 #if H5C_DO_EXTREME_SANITY_CHECKS
1970     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
1971         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
1972         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
1973 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1974 
1975     FUNC_LEAVE_NOAPI(ret_value)
1976 } /* H5C_move_entry() */
1977 
1978 /*-------------------------------------------------------------------------
1979  * Function:    H5C_resize_entry
1980  *
1981  * Purpose:    Resize a pinned or protected entry.
1982  *
1983  *         Resizing an entry dirties it, so if the entry is not
1984  *         already dirty, the function places the entry on the
1985  *         skip list.
1986  *
1987  * Return:      Non-negative on success/Negative on failure
1988  *
1989  * Programmer:  John Mainzer
1990  *              7/5/06
1991  *
1992  *-------------------------------------------------------------------------
1993  */
1994 herr_t
H5C_resize_entry(void * thing,size_t new_size)1995 H5C_resize_entry(void *thing, size_t new_size)
1996 {
1997     H5C_t *            cache_ptr;
1998     H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing;
1999     herr_t             ret_value = SUCCEED; /* Return value */
2000 
2001     FUNC_ENTER_NOAPI(FAIL)
2002 
2003     /* Sanity checks */
2004     HDassert(entry_ptr);
2005     HDassert(H5F_addr_defined(entry_ptr->addr));
2006     cache_ptr = entry_ptr->cache_ptr;
2007     HDassert(cache_ptr);
2008     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
2009 
2010     /* Check for usage errors */
2011     if (new_size <= 0)
2012         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive")
2013     if (!(entry_ptr->is_pinned || entry_ptr->is_protected))
2014         HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??")
2015 
2016 #if H5C_DO_EXTREME_SANITY_CHECKS
2017     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
2018         (H5C__validate_pinned_entry_list(cache_ptr) < 0))
2019         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
2020 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2021 
2022     /* update for change in entry size if necessary */
2023     if (entry_ptr->size != new_size) {
2024         hbool_t was_clean;
2025 
2026         /* make note of whether the entry was clean to begin with */
2027         was_clean = !entry_ptr->is_dirty;
2028 
2029         /* mark the entry as dirty if it isn't already */
2030         entry_ptr->is_dirty = TRUE;
2031 
2032         /* Reset the image up-to-date status */
2033         if (entry_ptr->image_up_to_date) {
2034             entry_ptr->image_up_to_date = FALSE;
2035             if (entry_ptr->flush_dep_nparents > 0)
2036                 if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
2037                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
2038                                 "Can't propagate serialization status to fd parents")
2039         } /* end if */
2040 
2041         /* Release the current image */
2042         if (entry_ptr->image_ptr)
2043             entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
2044 
2045         /* do a flash cache size increase if appropriate */
2046         if (cache_ptr->flash_size_increase_possible) {
2047 
2048             if (new_size > entry_ptr->size) {
2049                 size_t size_increase;
2050 
2051                 size_increase = new_size - entry_ptr->size;
2052 
2053                 if (size_increase >= cache_ptr->flash_size_increase_threshold) {
2054                     if (H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0)
2055                         HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "flash cache increase failed")
2056                 }
2057             }
2058         }
2059 
2060         /* update the pinned and/or protected entry list */
2061         if (entry_ptr->is_pinned) {
2062             H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pel_len), (cache_ptr->pel_size), (entry_ptr->size),
2063                                             (new_size))
2064         } /* end if */
2065         if (entry_ptr->is_protected) {
2066             H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pl_len), (cache_ptr->pl_size), (entry_ptr->size),
2067                                             (new_size))
2068         } /* end if */
2069 
2070 #ifdef H5_HAVE_PARALLEL
2071         if (entry_ptr->coll_access) {
2072             H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->coll_list_len), (cache_ptr->coll_list_size),
2073                                             (entry_ptr->size), (new_size))
2074         } /* end if */
2075 #endif    /* H5_HAVE_PARALLEL */
2076 
2077         /* update statistics just before changing the entry size */
2078         H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size);
2079 
2080         /* update the hash table */
2081         H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size, entry_ptr, was_clean);
2082 
2083         /* if the entry is in the skip list, update that too */
2084         if (entry_ptr->in_slist)
2085             H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size);
2086 
2087         /* finally, update the entry size proper */
2088         entry_ptr->size = new_size;
2089 
2090         if (!entry_ptr->in_slist)
2091             H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
2092 
2093         if (entry_ptr->is_pinned)
2094             H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
2095 
2096         /* Check for entry changing status and do notifications, etc. */
2097         if (was_clean) {
2098             /* If the entry's type has a 'notify' callback send a 'entry dirtied'
2099              * notice now that the entry is fully integrated into the cache.
2100              */
2101             if (entry_ptr->type->notify &&
2102                 (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
2103                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
2104 
2105             /* Propagate the dirty flag up the flush dependency chain if appropriate */
2106             if (entry_ptr->flush_dep_nparents > 0)
2107                 if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
2108                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
2109         } /* end if */
2110     }     /* end if */
2111 
2112 done:
2113 #if H5C_DO_EXTREME_SANITY_CHECKS
2114     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
2115         (H5C__validate_pinned_entry_list(cache_ptr) < 0))
2116         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
2117 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2118 
2119     FUNC_LEAVE_NOAPI(ret_value)
2120 } /* H5C_resize_entry() */
2121 
2122 /*-------------------------------------------------------------------------
2123  * Function:    H5C_pin_protected_entry()
2124  *
2125  * Purpose:    Pin a protected cache entry.  The entry must be protected
2126  *             at the time of call, and must be unpinned.
2127  *
2128  * Return:      Non-negative on success/Negative on failure
2129  *
2130  * Programmer:  John Mainzer
2131  *              4/26/06
2132  *
2133  * Changes:    Added extreme sanity checks on entry and exit.
2134  *                                          JRM -- 4/26/14
2135  *
2136  *-------------------------------------------------------------------------
2137  */
2138 herr_t
H5C_pin_protected_entry(void * thing)2139 H5C_pin_protected_entry(void *thing)
2140 {
2141     H5C_t *            cache_ptr;
2142     H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; /* Pointer to entry to pin */
2143     herr_t             ret_value = SUCCEED;                    /* Return value */
2144 
2145     FUNC_ENTER_NOAPI(FAIL)
2146 
2147     /* Sanity checks */
2148     HDassert(entry_ptr);
2149     HDassert(H5F_addr_defined(entry_ptr->addr));
2150     cache_ptr = entry_ptr->cache_ptr;
2151     HDassert(cache_ptr);
2152     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
2153 
2154 #if H5C_DO_EXTREME_SANITY_CHECKS
2155     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
2156         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
2157         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
2158 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2159 
2160     /* Only protected entries can be pinned */
2161     if (!entry_ptr->is_protected)
2162         HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry isn't protected")
2163 
2164     /* Pin the entry from a client */
2165     if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
2166         HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
2167 
2168 done:
2169 #if H5C_DO_EXTREME_SANITY_CHECKS
2170     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
2171         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
2172         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
2173 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2174 
2175     FUNC_LEAVE_NOAPI(ret_value)
2176 } /* H5C_pin_protected_entry() */
2177 
2178 /*-------------------------------------------------------------------------
2179  * Function:    H5C_protect
2180  *
2181  * Purpose:     If the target entry is not in the cache, load it.  If
2182  *        necessary, attempt to evict one or more entries to keep
2183  *        the cache within its maximum size.
2184  *
2185  *        Mark the target entry as protected, and return its address
2186  *        to the caller.  The caller must call H5C_unprotect() when
2187  *        finished with the entry.
2188  *
2189  *        While it is protected, the entry may not be either evicted
2190  *        or flushed -- nor may it be accessed by another call to
2191  *        H5C_protect.  Any attempt to do so will result in a failure.
2192  *
2193  * Return:      Success:        Ptr to the desired entry
2194  *              Failure:        NULL
2195  *
2196  * Programmer:  John Mainzer -  6/2/04
2197  *
2198  *-------------------------------------------------------------------------
2199  */
2200 void *
H5C_protect(H5F_t * f,const H5C_class_t * type,haddr_t addr,void * udata,unsigned flags)2201 H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsigned flags)
2202 {
2203     H5C_t *     cache_ptr;
2204     H5AC_ring_t ring = H5C_RING_UNDEFINED;
2205     hbool_t     hit;
2206     hbool_t     have_write_permitted = FALSE;
2207     hbool_t     read_only            = FALSE;
2208     hbool_t     flush_last;
2209 #ifdef H5_HAVE_PARALLEL
2210     hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */
2211 #endif                           /* H5_HAVE_PARALLEL */
2212     hbool_t            write_permitted = FALSE;
2213     hbool_t            was_loaded      = FALSE; /* Whether the entry was loaded as a result of the protect */
2214     size_t             empty_space;
2215     void *             thing;
2216     H5C_cache_entry_t *entry_ptr;
2217     void *             ret_value = NULL; /* Return value */
2218 
2219     FUNC_ENTER_NOAPI(NULL)
2220 
2221     /* check args */
2222     HDassert(f);
2223     HDassert(f->shared);
2224 
2225     cache_ptr = f->shared->cache;
2226 
2227     HDassert(cache_ptr);
2228     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
2229     HDassert(type);
2230     HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type);
2231     HDassert(H5F_addr_defined(addr));
2232 
2233 #if H5C_DO_EXTREME_SANITY_CHECKS
2234     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
2235         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
2236         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry")
2237 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2238 
2239     /* Load the cache image, if requested */
2240     if (cache_ptr->load_image) {
2241         cache_ptr->load_image = FALSE;
2242         if (H5C__load_cache_image(f) < 0)
2243             HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't load cache image")
2244     } /* end if */
2245 
2246     read_only  = ((flags & H5C__READ_ONLY_FLAG) != 0);
2247     flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0);
2248 
2249     /* Get the ring type from the API context */
2250     ring = H5CX_get_ring();
2251 
2252 #ifdef H5_HAVE_PARALLEL
2253     if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI))
2254         coll_access = H5CX_get_coll_metadata_read();
2255 #endif /* H5_HAVE_PARALLEL */
2256 
2257     /* first check to see if the target is in cache */
2258     H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
2259 
2260     if (entry_ptr != NULL) {
2261         if (entry_ptr->ring != ring)
2262             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry")
2263 
2264         HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
2265 
2266         if (entry_ptr->prefetched) {
2267             /* This call removes the prefetched entry from the cache,
2268              * and replaces it with an entry deserialized from the
2269              * image of the prefetched entry.
2270              */
2271             if (H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0)
2272                 HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry")
2273 
2274             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
2275             HDassert(!entry_ptr->prefetched);
2276             HDassert(entry_ptr->addr == addr);
2277         } /* end if */
2278 
2279         /* Check for trying to load the wrong type of entry from an address */
2280         if (entry_ptr->type != type)
2281             HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type")
2282 
2283             /* if this is a collective metadata read, the entry is not
2284                marked as collective, and is clean, it is possible that
2285                other processes will not have it in its cache and will
2286                expect a bcast of the entry from process 0. So process 0
2287                will bcast the entry to all other ranks. Ranks that _do_ have
2288                the entry in their cache still have to participate in the
2289                bcast. */
2290 #ifdef H5_HAVE_PARALLEL
2291         if (coll_access) {
2292             if (!(entry_ptr->is_dirty) && !(entry_ptr->coll_access)) {
2293                 MPI_Comm comm;     /* File MPI Communicator */
2294                 int      mpi_code; /* MPI error code */
2295                 int      buf_size;
2296 
2297                 if (MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f)))
2298                     HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
2299 
2300                 if (entry_ptr->image_ptr == NULL) {
2301                     int mpi_rank;
2302 
2303                     if ((mpi_rank = H5F_mpi_get_rank(f)) < 0)
2304                         HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
2305 
2306                     if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
2307                         HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL,
2308                                     "memory allocation failed for on disk image buffer")
2309 #if H5C_DO_MEMORY_SANITY_CHECKS
2310                     H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
2311                                 H5C_IMAGE_EXTRA_SPACE);
2312 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
2313                     if (0 == mpi_rank)
2314                         if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
2315                             HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image")
2316                 } /* end if */
2317                 HDassert(entry_ptr->image_ptr);
2318 
2319                 H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t);
2320                 if (MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm)))
2321                     HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
2322 
2323                 /* Mark the entry as collective and insert into the collective list */
2324                 entry_ptr->coll_access = TRUE;
2325                 H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
2326             } /* end if */
2327             else if (entry_ptr->coll_access) {
2328                 H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
2329             } /* end else-if */
2330         }     /* end if */
2331 #endif        /* H5_HAVE_PARALLEL */
2332 
2333 #if H5C_DO_TAGGING_SANITY_CHECKS
2334         {
2335             /* Verify tag value */
2336             if (cache_ptr->ignore_tags != TRUE) {
2337                 haddr_t tag; /* Tag value */
2338 
2339                 /* The entry is already in the cache, but make sure that the tag value
2340                  * is still legal. This will ensure that had the entry NOT been in the
2341                  * cache, tagging was still set up correctly and it would have received
2342                  * a legal tag value after getting loaded from disk.
2343                  */
2344 
2345                 /* Get the tag */
2346                 tag = H5CX_get_tag();
2347 
2348                 if (H5C_verify_tag(entry_ptr->type->id, tag) < 0)
2349                     HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed")
2350             } /* end if */
2351         }
2352 #endif
2353 
2354         hit   = TRUE;
2355         thing = (void *)entry_ptr;
2356     }
2357     else {
2358 
2359         /* must try to load the entry from disk. */
2360 
2361         hit = FALSE;
2362 
2363         if (NULL == (thing = H5C__load_entry(f,
2364 #ifdef H5_HAVE_PARALLEL
2365                                              coll_access,
2366 #endif /* H5_HAVE_PARALLEL */
2367                                              type, addr, udata)))
2368             HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
2369 
2370         entry_ptr = (H5C_cache_entry_t *)thing;
2371         cache_ptr->entries_loaded_counter++;
2372 
2373         entry_ptr->ring = ring;
2374 #ifdef H5_HAVE_PARALLEL
2375         if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access)
2376             H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
2377 #endif /* H5_HAVE_PARALLEL */
2378 
2379         /* Apply tag to newly protected entry */
2380         if (H5C__tag_entry(cache_ptr, entry_ptr) < 0)
2381             HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry")
2382 
2383         /* If the entry is very large, and we are configured to allow it,
2384          * we may wish to perform a flash cache size increase.
2385          */
2386         if ((cache_ptr->flash_size_increase_possible) &&
2387             (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) {
2388 
2389             if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
2390                 HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed")
2391         }
2392 
2393         if (cache_ptr->index_size >= cache_ptr->max_cache_size)
2394             empty_space = 0;
2395         else
2396             empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
2397 
2398         /* try to free up if necceary and if evictions are permitted.  Note
2399          * that if evictions are enabled, we will call H5C__make_space_in_cache()
2400          * regardless if the min_free_space requirement is not met.
2401          */
2402         if ((cache_ptr->evictions_enabled) &&
2403             (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) ||
2404              ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size))) {
2405 
2406             size_t space_needed;
2407 
2408             if (empty_space <= entry_ptr->size)
2409                 cache_ptr->cache_full = TRUE;
2410 
2411             if (cache_ptr->check_write_permitted != NULL) {
2412                 if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
2413                     HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1")
2414                 else
2415                     have_write_permitted = TRUE;
2416             } /* end if */
2417             else {
2418                 write_permitted      = cache_ptr->write_permitted;
2419                 have_write_permitted = TRUE;
2420             } /* end else */
2421 
2422             HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
2423             space_needed = entry_ptr->size;
2424             if (space_needed > cache_ptr->max_cache_size)
2425                 space_needed = cache_ptr->max_cache_size;
2426 
2427             /* Note that space_needed is just the amount of space that
2428              * needed to insert the new entry without exceeding the cache
2429              * size limit.  The subsequent call to H5C__make_space_in_cache()
2430              * may evict the entries required to free more or less space
2431              * depending on conditions.  It MAY be less if the cache is
2432              * currently undersized, or more if the cache is oversized.
2433              *
2434              * The cache can exceed its maximum size limit via the following
2435              * mechanisms:
2436              *
2437              * First, it is possible for the cache to grow without
2438              * bound as long as entries are protected and not unprotected.
2439              *
2440              * Second, when writes are not permitted it is also possible
2441              * for the cache to grow without bound.
2442              *
2443              * Third, the user may choose to disable evictions -- causing
2444              * the cache to grow without bound until evictions are
2445              * re-enabled.
2446              *
2447              * Finally, we usually don't check to see if the cache is
2448              * oversized at the end of an unprotect.  As a result, it is
2449              * possible to have a vastly oversized cache with no protected
2450              * entries as long as all the protects preceed the unprotects.
2451              *
2452              * Since items 1, 2, and 3 are not changing any time soon, I
2453              * see no point in worrying about the fourth.
2454              */
2455 
2456             if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0)
2457                 HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
2458         } /* end if */
2459 
2460         /* Insert the entry in the hash table.  It can't be dirty yet, so
2461          * we don't even check to see if it should go in the skip list.
2462          *
2463          * This is no longer true -- due to a bug fix, we may modify
2464          * data on load to repair a file.
2465          *
2466          *   *******************************************
2467          *
2468          * Set the flush_last field
2469          * of the newly loaded entry before inserting it into the
2470          * index.  Must do this, as the index tracked the number of
2471          * entries with the flush_last field set, but assumes that
2472          * the field will not change after insertion into the index.
2473          *
2474          * Note that this means that the H5C__FLUSH_LAST_FLAG flag
2475          * is ignored if the entry is already in cache.
2476          */
2477         entry_ptr->flush_me_last = flush_last;
2478 
2479         H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL)
2480 
2481         if ((entry_ptr->is_dirty) && (!(entry_ptr->in_slist))) {
2482 
2483             H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, NULL)
2484         }
2485 
2486         /* insert the entry in the data structures used by the replacement
2487          * policy.  We are just going to take it out again when we update
2488          * the replacement policy for a protect, but this simplifies the
2489          * code.  If we do this often enough, we may want to optimize this.
2490          */
2491         H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL)
2492 
2493         /* Record that the entry was loaded, to trigger a notify callback later */
2494         /* (After the entry is fully added to the cache) */
2495         was_loaded = TRUE;
2496     } /* end else */
2497 
2498     HDassert(entry_ptr->addr == addr);
2499     HDassert(entry_ptr->type == type);
2500 
2501     if (entry_ptr->is_protected) {
2502         if (read_only && entry_ptr->is_read_only) {
2503             HDassert(entry_ptr->ro_ref_count > 0);
2504             (entry_ptr->ro_ref_count)++;
2505         } /* end if */
2506         else
2507             HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?")
2508     } /* end if */
2509     else {
2510         H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL)
2511 
2512         entry_ptr->is_protected = TRUE;
2513 
2514         if (read_only) {
2515             entry_ptr->is_read_only = TRUE;
2516             entry_ptr->ro_ref_count = 1;
2517         } /* end if */
2518 
2519         entry_ptr->dirtied = FALSE;
2520     } /* end else */
2521 
2522     H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit)
2523 
2524     H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
2525 
2526     ret_value = thing;
2527 
2528     if ((cache_ptr->evictions_enabled) &&
2529         ((cache_ptr->size_decreased) ||
2530          ((cache_ptr->resize_enabled) &&
2531           (cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length)))) {
2532 
2533         if (!have_write_permitted) {
2534 
2535             if (cache_ptr->check_write_permitted != NULL) {
2536                 if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
2537                     HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted")
2538                 else
2539                     have_write_permitted = TRUE;
2540             }
2541             else {
2542 
2543                 write_permitted = cache_ptr->write_permitted;
2544 
2545                 have_write_permitted = TRUE;
2546             }
2547         }
2548 
2549         if (cache_ptr->resize_enabled &&
2550             (cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length)) {
2551 
2552             if (H5C__auto_adjust_cache_size(f, write_permitted) < 0)
2553                 HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed")
2554         } /* end if */
2555 
2556         if (cache_ptr->size_decreased) {
2557             cache_ptr->size_decreased = FALSE;
2558 
2559             /* check to see if the cache is now oversized due to the cache
2560              * size reduction.  If it is, try to evict enough entries to
2561              * bring the cache size down to the current maximum cache size.
2562              *
2563              * Also, if the min_clean_size requirement is not met, we
2564              * should also call H5C__make_space_in_cache() to bring us
2565              * into complience.
2566              */
2567 
2568             if (cache_ptr->index_size >= cache_ptr->max_cache_size)
2569                 empty_space = 0;
2570             else
2571                 empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
2572 
2573             if ((cache_ptr->index_size > cache_ptr->max_cache_size) ||
2574                 ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)) {
2575 
2576                 if (cache_ptr->index_size > cache_ptr->max_cache_size)
2577                     cache_ptr->cache_full = TRUE;
2578 
2579                 if (H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0)
2580                     HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
2581             }
2582         } /* end if */
2583     }
2584 
2585     /* If we loaded the entry and the entry's type has a 'notify' callback, send
2586      * an 'after load' notice now that the entry is fully integrated into
2587      * the cache and protected.  We must wait until it is protected so it is not
2588      * evicted during the notify callback.
2589      */
2590     if (was_loaded) {
2591         /* If the entry's type has a 'notify' callback send a 'after load'
2592          * notice now that the entry is fully integrated into the cache.
2593          */
2594         if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0)
2595             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL,
2596                         "can't notify client about entry inserted into cache")
2597     } /* end if */
2598 
2599 #ifdef H5_HAVE_PARALLEL
2600     /* Make sure the size of the collective entries in the cache remain in check */
2601     if (coll_access)
2602         if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
2603             if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
2604                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
2605 #endif /* H5_HAVE_PARALLEL */
2606 
2607 done:
2608 #if H5C_DO_EXTREME_SANITY_CHECKS
2609     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
2610         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
2611         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit")
2612 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2613 
2614     FUNC_LEAVE_NOAPI(ret_value)
2615 } /* H5C_protect() */
2616 
2617 /*-------------------------------------------------------------------------
2618  *
2619  * Function:    H5C_reset_cache_hit_rate_stats()
2620  *
2621  * Purpose:     Reset the cache hit rate computation fields.
2622  *
2623  * Return:      SUCCEED on success, and FAIL on failure.
2624  *
2625  * Programmer:  John Mainzer, 10/5/04
2626  *
2627  *-------------------------------------------------------------------------
2628  */
2629 herr_t
H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr)2630 H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr)
2631 {
2632     herr_t ret_value = SUCCEED; /* Return value */
2633 
2634     FUNC_ENTER_NOAPI(FAIL)
2635 
2636     if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
2637         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry")
2638 
2639     cache_ptr->cache_hits     = 0;
2640     cache_ptr->cache_accesses = 0;
2641 
2642 done:
2643     FUNC_LEAVE_NOAPI(ret_value)
2644 } /* H5C_reset_cache_hit_rate_stats() */
2645 
2646 /*-------------------------------------------------------------------------
2647  * Function:    H5C_set_cache_auto_resize_config
2648  *
2649  * Purpose:    Set the cache automatic resize configuration to the
2650  *        provided values if they are in range, and fail if they
2651  *        are not.
2652  *
2653  *        If the new configuration enables automatic cache resizing,
2654  *        coerce the cache max size and min clean size into agreement
2655  *        with the new policy and re-set the full cache hit rate
2656  *        stats.
2657  *
2658  * Return:      SUCCEED on success, and FAIL on failure.
2659  *
2660  * Programmer:  John Mainzer
2661  *        10/8/04
2662  *
2663  *-------------------------------------------------------------------------
2664  */
2665 herr_t
H5C_set_cache_auto_resize_config(H5C_t * cache_ptr,H5C_auto_size_ctl_t * config_ptr)2666 H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr)
2667 {
2668     size_t new_max_cache_size;
2669     size_t new_min_clean_size;
2670     herr_t ret_value = SUCCEED; /* Return value */
2671 
2672     FUNC_ENTER_NOAPI(FAIL)
2673 
2674     if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
2675         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry")
2676     if (config_ptr == NULL)
2677         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
2678     if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
2679         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version")
2680 
2681     /* check general configuration section of the config: */
2682     if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_GENERAL) < 0)
2683         HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config")
2684 
2685     /* check size increase control fields of the config: */
2686     if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INCREMENT) < 0)
2687         HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config")
2688 
2689     /* check size decrease control fields of the config: */
2690     if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_DECREMENT) < 0)
2691         HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config")
2692 
2693     /* check for conflicts between size increase and size decrease controls: */
2694     if (H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) < 0)
2695         HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config")
2696 
2697     /* will set the increase possible fields to FALSE later if needed */
2698     cache_ptr->size_increase_possible       = TRUE;
2699     cache_ptr->flash_size_increase_possible = TRUE;
2700     cache_ptr->size_decrease_possible       = TRUE;
2701 
2702     switch (config_ptr->incr_mode) {
2703         case H5C_incr__off:
2704             cache_ptr->size_increase_possible = FALSE;
2705             break;
2706 
2707         case H5C_incr__threshold:
2708             if ((config_ptr->lower_hr_threshold <= 0.0) || (config_ptr->increment <= 1.0) ||
2709                 ((config_ptr->apply_max_increment) && (config_ptr->max_increment <= 0)))
2710                 cache_ptr->size_increase_possible = FALSE;
2711             break;
2712 
2713         default: /* should be unreachable */
2714             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?")
2715     } /* end switch */
2716 
2717     /* logically, this is were configuration for flash cache size increases
2718      * should go.  However, this configuration depends on max_cache_size, so
2719      * we wait until the end of the function, when this field is set.
2720      */
2721 
2722     switch (config_ptr->decr_mode) {
2723         case H5C_decr__off:
2724             cache_ptr->size_decrease_possible = FALSE;
2725             break;
2726 
2727         case H5C_decr__threshold:
2728             if ((config_ptr->upper_hr_threshold >= 1.0) || (config_ptr->decrement >= 1.0) ||
2729                 ((config_ptr->apply_max_decrement) && (config_ptr->max_decrement <= 0)))
2730                 cache_ptr->size_decrease_possible = FALSE;
2731             break;
2732 
2733         case H5C_decr__age_out:
2734             if (((config_ptr->apply_empty_reserve) && (config_ptr->empty_reserve >= 1.0)) ||
2735                 ((config_ptr->apply_max_decrement) && (config_ptr->max_decrement <= 0)))
2736                 cache_ptr->size_decrease_possible = FALSE;
2737             break;
2738 
2739         case H5C_decr__age_out_with_threshold:
2740             if (((config_ptr->apply_empty_reserve) && (config_ptr->empty_reserve >= 1.0)) ||
2741                 ((config_ptr->apply_max_decrement) && (config_ptr->max_decrement <= 0)) ||
2742                 (config_ptr->upper_hr_threshold >= 1.0))
2743                 cache_ptr->size_decrease_possible = FALSE;
2744             break;
2745 
2746         default: /* should be unreachable */
2747             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?")
2748     } /* end switch */
2749 
2750     if (config_ptr->max_size == config_ptr->min_size) {
2751         cache_ptr->size_increase_possible       = FALSE;
2752         cache_ptr->flash_size_increase_possible = FALSE;
2753         cache_ptr->size_decrease_possible       = FALSE;
2754     } /* end if */
2755 
2756     /* flash_size_increase_possible is intentionally omitted from the
2757      * following:
2758      */
2759     cache_ptr->resize_enabled = cache_ptr->size_increase_possible || cache_ptr->size_decrease_possible;
2760 
2761     cache_ptr->resize_ctl = *config_ptr;
2762 
2763     /* Resize the cache to the supplied initial value if requested, or as
2764      * necessary to force it within the bounds of the current automatic
2765      * cache resizing configuration.
2766      *
2767      * Note that the min_clean_fraction may have changed, so we
2768      * go through the exercise even if the current size is within
2769      * range and an initial size has not been provided.
2770      */
2771     if (cache_ptr->resize_ctl.set_initial_size)
2772         new_max_cache_size = cache_ptr->resize_ctl.initial_size;
2773     else if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.max_size)
2774         new_max_cache_size = cache_ptr->resize_ctl.max_size;
2775     else if (cache_ptr->max_cache_size < cache_ptr->resize_ctl.min_size)
2776         new_max_cache_size = cache_ptr->resize_ctl.min_size;
2777     else
2778         new_max_cache_size = cache_ptr->max_cache_size;
2779 
2780     new_min_clean_size = (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
2781 
2782     /* since new_min_clean_size is of type size_t, we have
2783      *
2784      *     ( 0 <= new_min_clean_size )
2785      *
2786      * by definition.
2787      */
2788     HDassert(new_min_clean_size <= new_max_cache_size);
2789     HDassert(cache_ptr->resize_ctl.min_size <= new_max_cache_size);
2790     HDassert(new_max_cache_size <= cache_ptr->resize_ctl.max_size);
2791 
2792     if (new_max_cache_size < cache_ptr->max_cache_size)
2793         cache_ptr->size_decreased = TRUE;
2794 
2795     cache_ptr->max_cache_size = new_max_cache_size;
2796     cache_ptr->min_clean_size = new_min_clean_size;
2797 
2798     if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
2799         /* this should be impossible... */
2800         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
2801 
2802     /* remove excess epoch markers if any */
2803     if ((config_ptr->decr_mode == H5C_decr__age_out_with_threshold) ||
2804         (config_ptr->decr_mode == H5C_decr__age_out)) {
2805         if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction)
2806             if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
2807                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
2808     } /* end if */
2809     else if (cache_ptr->epoch_markers_active > 0) {
2810         if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
2811             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
2812     }
2813 
2814     /* configure flash size increase facility.  We wait until the
2815      * end of the function, as we need the max_cache_size set before
2816      * we start to keep things simple.
2817      *
2818      * If we haven't already ruled out flash cache size increases above,
2819      * go ahead and configure it.
2820      */
2821 
2822     if (cache_ptr->flash_size_increase_possible) {
2823         switch (config_ptr->flash_incr_mode) {
2824             case H5C_flash_incr__off:
2825                 cache_ptr->flash_size_increase_possible = FALSE;
2826                 break;
2827 
2828             case H5C_flash_incr__add_space:
2829                 cache_ptr->flash_size_increase_possible  = TRUE;
2830                 cache_ptr->flash_size_increase_threshold = (size_t)(
2831                     ((double)(cache_ptr->max_cache_size)) * ((cache_ptr->resize_ctl).flash_threshold));
2832                 break;
2833 
2834             default: /* should be unreachable */
2835                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
2836                 break;
2837         } /* end switch */
2838     }     /* end if */
2839 
2840 done:
2841     FUNC_LEAVE_NOAPI(ret_value)
2842 } /* H5C_set_cache_auto_resize_config() */
2843 
2844 /*-------------------------------------------------------------------------
2845  * Function:    H5C_set_evictions_enabled()
2846  *
2847  * Purpose:     Set cache_ptr->evictions_enabled to the value of the
2848  *              evictions enabled parameter.
2849  *
2850  * Return:      SUCCEED on success, and FAIL on failure.
2851  *
2852  * Programmer:  John Mainzer
2853  *              7/27/07
2854  *
2855  *-------------------------------------------------------------------------
2856  */
2857 herr_t
H5C_set_evictions_enabled(H5C_t * cache_ptr,hbool_t evictions_enabled)2858 H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled)
2859 {
2860     herr_t ret_value = SUCCEED; /* Return value */
2861 
2862     FUNC_ENTER_NOAPI(FAIL)
2863 
2864     if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
2865         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry")
2866 
2867     /* There is no fundamental reason why we should not permit
2868      * evictions to be disabled while automatic resize is enabled.
2869      * However, I can't think of any good reason why one would
2870      * want to, and allowing it would greatly complicate testing
2871      * the feature.  Hence the following:
2872      */
2873     if ((evictions_enabled != TRUE) && ((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) ||
2874                                         (cache_ptr->resize_ctl.decr_mode != H5C_decr__off)))
2875         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled")
2876 
2877     cache_ptr->evictions_enabled = evictions_enabled;
2878 
2879 done:
2880     FUNC_LEAVE_NOAPI(ret_value)
2881 } /* H5C_set_evictions_enabled() */
2882 
2883 /*-------------------------------------------------------------------------
2884  *
2885  * Function:    H5C_set_slist_enabled()
2886  *
2887  * Purpose:     Enable or disable the slist as directed.
2888  *
2889  *              The slist (skip list) is an address ordered list of
2890  *              dirty entries in the metadata cache.  However, this
2891  *              list is only needed during flush and close, where we
2892  *              use it to write entries in more or less increasing
2893  *              address order.
2894  *
2895  *              This function sets up and enables further operations
2896  *              on the slist, or disable the slist.  This in turn
2897  *              allows us to avoid the overhead of maintaining the
2898  *              slist when it is not needed.
2899  *
2900  *
2901  *              If the slist_enabled parameter is TRUE, the function
2902  *
2903  *              1) Verifies that the slist is empty.
2904  *
2905  *              2) Scans the index list, and inserts all dirty entries
2906  *                 into the slist.
2907  *
2908  *              3) Sets cache_ptr->slist_enabled = TRUE.
2909  *
2910  *              Note that the clear_slist parameter is ignored if
2911  *              the slist_enabed parameter is TRUE.
2912  *
2913  *
2914  *              If the slist_enabled_parameter is FALSE, the function
2915  *              shuts down the slist.
2916  *
2917  *              Normally the slist will be empty at this point, however
2918  *              that need not be the case if H5C_flush_cache() has been
2919  *              called with the H5C__FLUSH_MARKED_ENTRIES_FLAG.
2920  *
2921  *              Thus shutdown proceeds as follows:
2922  *
2923  *              1) Test to see if the slist is empty.  If it is, proceed
2924  *                 to step 3.
2925  *
2926  *              2) Test to see if the clear_slist parameter is TRUE.
2927  *
2928  *                 If it is, remove all entries from the slist.
2929  *
2930  *                 If it isn't, throw an error.
2931  *
2932  *              3) set cache_ptr->slist_enabled = FALSE.
2933  *
2934  * Return:      SUCCEED on success, and FAIL on failure.
2935  *
2936  * Programmer:  John Mainzer
2937  *              5/1/20
2938  *
2939  * Modifications:
2940  *
2941  *              None.
2942  *
2943  *-------------------------------------------------------------------------
2944  */
2945 herr_t
H5C_set_slist_enabled(H5C_t * cache_ptr,hbool_t slist_enabled,hbool_t clear_slist)2946 H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_slist)
2947 {
2948     H5C_cache_entry_t *entry_ptr;
2949     herr_t             ret_value = SUCCEED; /* Return value */
2950 
2951     FUNC_ENTER_NOAPI(FAIL)
2952 
2953     if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
2954 
2955         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry")
2956 
2957 #if H5C__SLIST_OPT_ENABLED
2958 
2959     if (slist_enabled) {
2960 
2961         if (cache_ptr->slist_enabled) {
2962 
2963             HDassert(FALSE);
2964             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already enabled?")
2965         }
2966 
2967         if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) {
2968 
2969             HDassert(FALSE);
2970             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty (1)?")
2971         }
2972 
2973         /* set cache_ptr->slist_enabled to TRUE so that the slist
2974          * mainenance macros will be enabled.
2975          */
2976         cache_ptr->slist_enabled = TRUE;
2977 
2978         /* scan the index list and insert all dirty entries in the slist */
2979         entry_ptr = cache_ptr->il_head;
2980 
2981         while (entry_ptr != NULL) {
2982 
2983             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
2984 
2985             if (entry_ptr->is_dirty) {
2986 
2987                 H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
2988             }
2989 
2990             entry_ptr = entry_ptr->il_next;
2991         }
2992 
2993         /* we don't maintain a dirty index len, so we can't do a cross
2994          * check against it.  Note that there is no point in cross checking
2995          * against the dirty LRU size, as the dirty LRU may not be maintained,
2996          * and in any case, there is no requirement that all dirty entries
2997          * will reside on the dirty LRU.
2998          */
2999         HDassert(cache_ptr->dirty_index_size == cache_ptr->slist_size);
3000     }
3001     else { /* take down the skip list */
3002 
3003         if (!cache_ptr->slist_enabled) {
3004 
3005             HDassert(FALSE);
3006             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already disabled?")
3007         }
3008 
3009         if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) {
3010 
3011             if (clear_slist) {
3012 
3013                 H5SL_node_t *node_ptr;
3014 
3015                 node_ptr = H5SL_first(cache_ptr->slist_ptr);
3016 
3017                 while (node_ptr != NULL) {
3018 
3019                     entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
3020 
3021                     H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
3022 
3023                     node_ptr = H5SL_first(cache_ptr->slist_ptr);
3024                 }
3025             }
3026             else {
3027 
3028                 HDassert(FALSE);
3029                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty (2)?")
3030             }
3031         }
3032 
3033         cache_ptr->slist_enabled = FALSE;
3034 
3035         HDassert(0 == cache_ptr->slist_len);
3036         HDassert(0 == cache_ptr->slist_size);
3037     }
3038 
3039 #else /* H5C__SLIST_OPT_ENABLED is FALSE */
3040 
3041     HDassert(cache_ptr->slist_enabled);
3042 
3043 #endif /* H5C__SLIST_OPT_ENABLED is FALSE */
3044 
3045 done:
3046 
3047     FUNC_LEAVE_NOAPI(ret_value)
3048 
3049 } /* H5C_set_slist_enabled() */
3050 
3051 /*-------------------------------------------------------------------------
3052  * Function:    H5C_unpin_entry()
3053  *
3054  * Purpose:    Unpin a cache entry.  The entry can be either protected or
3055  *             unprotected at the time of call, but must be pinned.
3056  *
3057  * Return:      Non-negative on success/Negative on failure
3058  *
3059  * Programmer:  John Mainzer
3060  *              3/22/06
3061  *
3062  * Changes:     Added extreme sanity checks on entry and exit.
3063  *                                      JRM -- 4/26/14
3064  *
3065  *-------------------------------------------------------------------------
3066  */
3067 herr_t
H5C_unpin_entry(void * _entry_ptr)3068 H5C_unpin_entry(void *_entry_ptr)
3069 {
3070     H5C_t *            cache_ptr;
3071     H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_entry_ptr; /* Pointer to entry to unpin */
3072     herr_t             ret_value = SUCCEED;                         /* Return value */
3073 
3074     FUNC_ENTER_NOAPI(FAIL)
3075 
3076     /* Sanity check */
3077     HDassert(entry_ptr);
3078     cache_ptr = entry_ptr->cache_ptr;
3079     HDassert(cache_ptr);
3080     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
3081 
3082 #if H5C_DO_EXTREME_SANITY_CHECKS
3083     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
3084         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
3085         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
3086 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
3087 
3088     /* Unpin the entry */
3089     if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, TRUE) < 0)
3090         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry from client")
3091 
3092 done:
3093 #if H5C_DO_EXTREME_SANITY_CHECKS
3094     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
3095         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
3096         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
3097 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
3098 
3099     FUNC_LEAVE_NOAPI(ret_value)
3100 } /* H5C_unpin_entry() */
3101 
3102 /*-------------------------------------------------------------------------
3103  * Function:    H5C_unprotect
3104  *
3105  * Purpose:    Undo an H5C_protect() call -- specifically, mark the
3106  *        entry as unprotected, remove it from the protected list,
3107  *        and give it back to the replacement policy.
3108  *
3109  *        The TYPE and ADDR arguments must be the same as those in
3110  *        the corresponding call to H5C_protect() and the THING
3111  *        argument must be the value returned by that call to
3112  *        H5C_protect().
3113  *
3114  * Return:      Non-negative on success/Negative on failure
3115  *
3116  *        If the deleted flag is TRUE, simply remove the target entry
3117  *        from the cache, clear it, and free it without writing it to
3118  *        disk.
3119  *
3120  * Return:      Non-negative on success/Negative on failure
3121  *
3122  * Programmer:  John Mainzer
3123  *              6/2/04
3124  *
3125  * Modifications:
3126  *
3127  *              JRM -- 7/21/04
3128  *              Updated for the addition of the hash table.
3129  *
3130  *              JRM -- 10/28/04
3131  *              Added code to set cache_full to TRUE whenever we try to
3132  *              make space in the cache.
3133  *
3134  *              JRM -- 11/12/04
3135  *              Added code to call to H5C_make_space_in_cache() after the
3136  *              call to H5C__auto_adjust_cache_size() if that function
3137  *              sets the size_decreased flag is TRUE.
3138  *
3139  *              JRM -- 4/25/05
3140  *              The size_decreased flag can also be set to TRUE in
3141  *              H5C_set_cache_auto_resize_config() if a new configuration
3142  *              forces an immediate reduction in cache size.  Modified
3143  *              the code to deal with this eventuallity.
3144  *
3145  *              JRM -- 6/24/05
3146  *              Added support for the new write_permitted field of H5C_t.
3147  *
3148  *              JRM -- 10/22/05
3149  *              Hand optimizations.
3150  *
3151  *              JRM -- 5/3/06
3152  *              Added code to set the new dirtied field in
3153  *              H5C_cache_entry_t to FALSE prior to return.
3154  *
3155  *              JRM -- 6/23/06
3156  *              Modified code to allow dirty entries to be loaded from
3157  *              disk.  This is necessary as a bug fix in the object
3158  *              header code requires us to modify a header as it is read.
3159  *
3160  *              JRM -- 3/28/07
3161  *              Added the flags parameter and supporting code.  At least
3162  *              for now, this parameter is used to allow the entry to
3163  *              be protected read only, thus allowing multiple protects.
3164  *
3165  *              Also added code to allow multiple read only protects
3166  *              of cache entries.
3167  *
3168  *              JRM -- 7/27/07
3169  *              Added code supporting the new evictions_enabled field
3170  *              in H5C_t.
3171  *
3172  *              JRM -- 1/3/08
3173  *              Added to do a flash cache size increase if appropriate
3174  *              when a large entry is loaded.
3175  *
3176  *              JRM -- 11/13/08
3177  *              Modified function to call H5C_make_space_in_cache() when
3178  *              the min_clean_size is violated, not just when there isn't
3179  *              enough space for and entry that has just been loaded.
3180  *
3181  *              The purpose of this modification is to avoid "metadata
3182  *              blizzards" in the write only case.  In such instances,
3183  *              the cache was allowed to fill with dirty metadata.  When
3184  *              we finally needed to evict an entry to make space, we had
3185  *              to flush out a whole cache full of metadata -- which has
3186  *              interesting performance effects.  We hope to avoid (or
3187  *              perhaps more accurately hide) this effect by maintaining
3188  *              the min_clean_size, which should force us to start flushing
3189  *              entries long before we actually have to evict something
3190  *              to make space.
3191  *
3192  *
3193  *              Missing entries?
3194  *
3195  *
3196  *              JRM -- 5/8/20
3197  *              Updated for the possibility that the slist will be
3198  *              disabled.
3199  *
3200  *-------------------------------------------------------------------------
3201  */
3202 herr_t
H5C_unprotect(H5F_t * f,haddr_t addr,void * thing,unsigned flags)3203 H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
3204 {
3205     H5C_t * cache_ptr;
3206     hbool_t deleted;
3207     hbool_t dirtied;
3208     hbool_t set_flush_marker;
3209     hbool_t pin_entry;
3210     hbool_t unpin_entry;
3211     hbool_t free_file_space;
3212     hbool_t take_ownership;
3213     hbool_t was_clean;
3214 #ifdef H5_HAVE_PARALLEL
3215     hbool_t clear_entry = FALSE;
3216 #endif /* H5_HAVE_PARALLEL */
3217     H5C_cache_entry_t *entry_ptr;
3218     H5C_cache_entry_t *test_entry_ptr;
3219     herr_t             ret_value = SUCCEED; /* Return value */
3220 
3221     FUNC_ENTER_NOAPI(FAIL)
3222 
3223     deleted          = ((flags & H5C__DELETED_FLAG) != 0);
3224     dirtied          = ((flags & H5C__DIRTIED_FLAG) != 0);
3225     set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0);
3226     pin_entry        = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
3227     unpin_entry      = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0);
3228     free_file_space  = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
3229     take_ownership   = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
3230 
3231     HDassert(f);
3232     HDassert(f->shared);
3233 
3234     cache_ptr = f->shared->cache;
3235 
3236     HDassert(cache_ptr);
3237     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
3238     HDassert(H5F_addr_defined(addr));
3239     HDassert(thing);
3240     HDassert(!(pin_entry && unpin_entry));
3241 
3242     /* deleted flag must accompany free_file_space */
3243     HDassert((!free_file_space) || (deleted));
3244 
3245     /* deleted flag must accompany take_ownership */
3246     HDassert((!take_ownership) || (deleted));
3247 
3248     /* can't have both free_file_space & take_ownership */
3249     HDassert(!(free_file_space && take_ownership));
3250 
3251     entry_ptr = (H5C_cache_entry_t *)thing;
3252 
3253     HDassert(entry_ptr->addr == addr);
3254 
3255     /* also set the dirtied variable if the dirtied field is set in
3256      * the entry.
3257      */
3258     dirtied |= entry_ptr->dirtied;
3259     was_clean = !(entry_ptr->is_dirty);
3260 
3261 #if H5C_DO_EXTREME_SANITY_CHECKS
3262     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
3263         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
3264 
3265         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
3266 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
3267 
3268     /* if the entry has multiple read only protects, just decrement
3269      * the ro_ref_counter.  Don't actually unprotect until the ref count
3270      * drops to zero.
3271      */
3272     if (entry_ptr->ro_ref_count > 1) {
3273 
3274         /* Sanity check */
3275         HDassert(entry_ptr->is_protected);
3276         HDassert(entry_ptr->is_read_only);
3277 
3278         if (dirtied)
3279 
3280             HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
3281 
3282         /* Reduce the RO ref count */
3283         (entry_ptr->ro_ref_count)--;
3284 
3285         /* Pin or unpin the entry as requested. */
3286         if (pin_entry) {
3287 
3288             /* Pin the entry from a client */
3289             if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
3290 
3291                 HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
3292         }
3293         else if (unpin_entry) {
3294 
3295             /* Unpin the entry from a client */
3296             if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
3297 
3298                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
3299 
3300         } /* end if */
3301     }
3302     else {
3303 
3304         if (entry_ptr->is_read_only) {
3305 
3306             /* Sanity check */
3307             HDassert(entry_ptr->ro_ref_count == 1);
3308 
3309             if (dirtied)
3310 
3311                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
3312 
3313             entry_ptr->is_read_only = FALSE;
3314             entry_ptr->ro_ref_count = 0;
3315 
3316         } /* end if */
3317 
3318 #ifdef H5_HAVE_PARALLEL
3319         /* When the H5C code is used to implement the metadata cache in the
3320          * PHDF5 case, only the cache on process 0 is allowed to write to file.
3321          * All the other metadata caches must hold dirty entries until they
3322          * are told that the entries are clean.
3323          *
3324          * The clear_on_unprotect flag in the H5C_cache_entry_t structure
3325          * exists to deal with the case in which an entry is protected when
3326          * its cache receives word that the entry is now clean.  In this case,
3327          * the clear_on_unprotect flag is set, and the entry is flushed with
3328          * the H5C__FLUSH_CLEAR_ONLY_FLAG.
3329          *
3330          * All this is a bit awkward, but until the metadata cache entries
3331          * are contiguous, with only one dirty flag, we have to let the supplied
3332          * functions deal with the resetting the is_dirty flag.
3333          */
3334         if (entry_ptr->clear_on_unprotect) {
3335             /* Sanity check */
3336             HDassert(entry_ptr->is_dirty);
3337 
3338             entry_ptr->clear_on_unprotect = FALSE;
3339             if (!dirtied)
3340                 clear_entry = TRUE;
3341         } /* end if */
3342 #endif    /* H5_HAVE_PARALLEL */
3343 
3344         if (!entry_ptr->is_protected)
3345 
3346             HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??")
3347 
3348         /* Mark the entry as dirty if appropriate */
3349         entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied);
3350 
3351         if (dirtied) {
3352 
3353             if (entry_ptr->image_up_to_date) {
3354 
3355                 entry_ptr->image_up_to_date = FALSE;
3356 
3357                 if (entry_ptr->flush_dep_nparents > 0) {
3358 
3359                     if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
3360 
3361                         HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
3362                                     "Can't propagate serialization status to fd parents")
3363 
3364                 } /* end if */
3365             }     /* end if */
3366         }         /* end if */
3367 
3368         /* Check for newly dirtied entry */
3369         if (was_clean && entry_ptr->is_dirty) {
3370 
3371             /* Update index for newly dirtied entry */
3372             H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
3373 
3374             /* If the entry's type has a 'notify' callback send a
3375              * 'entry dirtied' notice now that the entry is fully
3376              * integrated into the cache.
3377              */
3378             if ((entry_ptr->type->notify) &&
3379                 ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0))
3380 
3381                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
3382 
3383             /* Propagate the flush dep dirty flag up the flush dependency chain
3384              * if appropriate
3385              */
3386             if (entry_ptr->flush_dep_nparents > 0) {
3387 
3388                 if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
3389 
3390                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
3391             }
3392         } /* end if */
3393         /* Check for newly clean entry */
3394         else if (!was_clean && !entry_ptr->is_dirty) {
3395 
3396             /* If the entry's type has a 'notify' callback send a
3397              * 'entry cleaned' notice now that the entry is fully
3398              * integrated into the cache.
3399              */
3400             if ((entry_ptr->type->notify) &&
3401                 ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0))
3402 
3403                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
3404                             "can't notify client about entry dirty flag cleared")
3405 
3406             /* Propagate the flush dep clean flag up the flush dependency chain
3407              * if appropriate
3408              */
3409             if (entry_ptr->flush_dep_nparents > 0) {
3410 
3411                 if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
3412 
3413                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
3414             }
3415         } /* end else-if */
3416 
3417         /* Pin or unpin the entry as requested. */
3418         if (pin_entry) {
3419 
3420             /* Pin the entry from a client */
3421             if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
3422 
3423                 HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
3424         }
3425         else if (unpin_entry) {
3426 
3427             /* Unpin the entry from a client */
3428             if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
3429 
3430                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
3431         } /* end if */
3432 
3433         /* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on
3434          * the pinned entry list if entry_ptr->is_pinned is TRUE.
3435          */
3436         H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL)
3437 
3438         entry_ptr->is_protected = FALSE;
3439 
3440         /* if the entry is dirty, 'or' its flush_marker with the set flush flag,
3441          * and then add it to the skip list if it isn't there already.
3442          */
3443         if (entry_ptr->is_dirty) {
3444 
3445             entry_ptr->flush_marker |= set_flush_marker;
3446 
3447             if (!entry_ptr->in_slist) {
3448 
3449                 /* this is a no-op if cache_ptr->slist_enabled is FALSE */
3450                 H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
3451             }
3452         } /* end if */
3453 
3454         /* this implementation of the "deleted" option is a bit inefficient, as
3455          * we re-insert the entry to be deleted into the replacement policy
3456          * data structures, only to remove them again.  Depending on how often
3457          * we do this, we may want to optimize a bit.
3458          *
3459          * On the other hand, this implementation is reasonably clean, and
3460          * makes good use of existing code.
3461          *                                             JRM - 5/19/04
3462          */
3463         if (deleted) {
3464 
3465             unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__FLUSH_INVALIDATE_FLAG);
3466 
3467             /* verify that the target entry is in the cache. */
3468             H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
3469 
3470             if (test_entry_ptr == NULL)
3471 
3472                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
3473 
3474             else if (test_entry_ptr != entry_ptr)
3475 
3476                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL,
3477                             "hash table contains multiple entries for addr?!?")
3478 
3479             /* Set the 'free file space' flag for the flush, if needed */
3480             if (free_file_space) {
3481 
3482                 flush_flags |= H5C__FREE_FILE_SPACE_FLAG;
3483             }
3484 
3485             /* Set the "take ownership" flag for the flush, if needed */
3486             if (take_ownership) {
3487 
3488                 flush_flags |= H5C__TAKE_OWNERSHIP_FLAG;
3489             }
3490 
3491             /* Delete the entry from the skip list on destroy */
3492             flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
3493 
3494             HDassert((!cache_ptr->slist_enabled) || (((!was_clean) || dirtied) == (entry_ptr->in_slist)));
3495 
3496             if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0)
3497 
3498                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry")
3499 
3500         } /* end if */
3501 #ifdef H5_HAVE_PARALLEL
3502         else if (clear_entry) {
3503 
3504             /* verify that the target entry is in the cache. */
3505             H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
3506 
3507             if (test_entry_ptr == NULL)
3508 
3509                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
3510 
3511             else if (test_entry_ptr != entry_ptr)
3512 
3513                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL,
3514                             "hash table contains multiple entries for addr?!?")
3515 
3516             if (H5C__flush_single_entry(f, entry_ptr,
3517                                         H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
3518                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry")
3519 
3520         } /* end else if */
3521 #endif    /* H5_HAVE_PARALLEL */
3522     }
3523 
3524     H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
3525 
3526 done:
3527 
3528 #if H5C_DO_EXTREME_SANITY_CHECKS
3529     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
3530         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
3531 
3532         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
3533 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
3534 
3535     FUNC_LEAVE_NOAPI(ret_value)
3536 
3537 } /* H5C_unprotect() */
3538 
3539 /*-------------------------------------------------------------------------
3540  *
3541  * Function:    H5C_unsettle_entry_ring
3542  *
3543  * Purpose:     Advise the metadata cache that the specified entry's free space
3544  *              manager ring is no longer settled (if it was on entry).
3545  *
3546  *              If the target free space manager ring is already
3547  *              unsettled, do nothing, and return SUCCEED.
3548  *
3549  *              If the target free space manager ring is settled, and
3550  *              we are not in the process of a file shutdown, mark
3551  *              the ring as unsettled, and return SUCCEED.
3552  *
3553  *              If the target free space manager is settled, and we
3554  *              are in the process of a file shutdown, post an error
3555  *              message, and return FAIL.
3556  *
3557  * Return:      Non-negative on success/Negative on failure
3558  *
3559  * Programmer:  Quincey Koziol
3560  *              January 3, 2017
3561  *
3562  *-------------------------------------------------------------------------
3563  */
3564 herr_t
H5C_unsettle_entry_ring(void * _entry)3565 H5C_unsettle_entry_ring(void *_entry)
3566 {
3567     H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */
3568     H5C_t *            cache;                               /* Cache for file */
3569     herr_t             ret_value = SUCCEED;                 /* Return value */
3570 
3571     FUNC_ENTER_NOAPI(FAIL)
3572 
3573     /* Sanity checks */
3574     HDassert(entry);
3575     HDassert(entry->ring != H5C_RING_UNDEFINED);
3576     HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) ||
3577              (H5C_RING_MDFSM == entry->ring));
3578     cache = entry->cache_ptr;
3579     HDassert(cache);
3580     HDassert(cache->magic == H5C__H5C_T_MAGIC);
3581 
3582     switch (entry->ring) {
3583         case H5C_RING_USER:
3584             /* Do nothing */
3585             break;
3586 
3587         case H5C_RING_RDFSM:
3588             if (cache->rdfsm_settled) {
3589                 if (cache->flush_in_progress || cache->close_warning_received)
3590                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
3591                 cache->rdfsm_settled = FALSE;
3592             } /* end if */
3593             break;
3594 
3595         case H5C_RING_MDFSM:
3596             if (cache->mdfsm_settled) {
3597                 if (cache->flush_in_progress || cache->close_warning_received)
3598                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
3599                 cache->mdfsm_settled = FALSE;
3600             } /* end if */
3601             break;
3602 
3603         default:
3604             HDassert(FALSE); /* this should be un-reachable */
3605             break;
3606     } /* end switch */
3607 
3608 done:
3609     FUNC_LEAVE_NOAPI(ret_value)
3610 } /* H5C_unsettle_entry_ring() */
3611 
3612 /*-------------------------------------------------------------------------
3613  * Function:    H5C_unsettle_ring()
3614  *
3615  * Purpose:     Advise the metadata cache that the specified free space
3616  *              manager ring is no longer settled (if it was on entry).
3617  *
3618  *              If the target free space manager ring is already
3619  *              unsettled, do nothing, and return SUCCEED.
3620  *
3621  *              If the target free space manager ring is settled, and
3622  *              we are not in the process of a file shutdown, mark
3623  *              the ring as unsettled, and return SUCCEED.
3624  *
3625  *              If the target free space manager is settled, and we
3626  *              are in the process of a file shutdown, post an error
3627  *              message, and return FAIL.
3628  *
3629  * Return:      Non-negative on success/Negative on failure
3630  *
3631  * Programmer:  John Mainzer
3632  *              10/15/16
3633  *
3634  *-------------------------------------------------------------------------
3635  */
3636 herr_t
H5C_unsettle_ring(H5F_t * f,H5C_ring_t ring)3637 H5C_unsettle_ring(H5F_t *f, H5C_ring_t ring)
3638 {
3639     H5C_t *cache_ptr;
3640     herr_t ret_value = SUCCEED; /* Return value */
3641 
3642     FUNC_ENTER_NOAPI(FAIL)
3643 
3644     /* Sanity checks */
3645     HDassert(f);
3646     HDassert(f->shared);
3647     HDassert(f->shared->cache);
3648     HDassert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring));
3649     cache_ptr = f->shared->cache;
3650     HDassert(H5C__H5C_T_MAGIC == cache_ptr->magic);
3651 
3652     switch (ring) {
3653         case H5C_RING_RDFSM:
3654             if (cache_ptr->rdfsm_settled) {
3655                 if (cache_ptr->close_warning_received)
3656                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
3657                 cache_ptr->rdfsm_settled = FALSE;
3658             } /* end if */
3659             break;
3660 
3661         case H5C_RING_MDFSM:
3662             if (cache_ptr->mdfsm_settled) {
3663                 if (cache_ptr->close_warning_received)
3664                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
3665                 cache_ptr->mdfsm_settled = FALSE;
3666             } /* end if */
3667             break;
3668 
3669         default:
3670             HDassert(FALSE); /* this should be un-reachable */
3671             break;
3672     } /* end switch */
3673 
3674 done:
3675     FUNC_LEAVE_NOAPI(ret_value)
3676 } /* H5C_unsettle_ring() */
3677 
3678 /*-------------------------------------------------------------------------
3679  * Function:    H5C_validate_resize_config()
3680  *
3681  * Purpose:    Run a sanity check on the specified sections of the
3682  *             provided instance of struct H5C_auto_size_ctl_t.
3683  *
3684  *        Do nothing and return SUCCEED if no errors are detected,
3685  *        and flag an error and return FAIL otherwise.
3686  *
3687  * Return:      Non-negative on success/Negative on failure
3688  *
3689  * Programmer:  John Mainzer
3690  *              3/23/05
3691  *
3692  *-------------------------------------------------------------------------
3693  */
3694 herr_t
H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,unsigned int tests)3695 H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests)
3696 {
3697     herr_t ret_value = SUCCEED; /* Return value */
3698 
3699     FUNC_ENTER_NOAPI(FAIL)
3700 
3701     if (config_ptr == NULL)
3702         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
3703 
3704     if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
3705         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version")
3706 
3707     if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) {
3708 
3709         if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE)
3710             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big")
3711 
3712         if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE)
3713             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small")
3714 
3715         if (config_ptr->min_size > config_ptr->max_size)
3716             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size")
3717 
3718         if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) ||
3719                                              (config_ptr->initial_size > config_ptr->max_size)))
3720             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
3721                         "initial_size must be in the interval [min_size, max_size]")
3722 
3723         if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0))
3724             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]")
3725 
3726         if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH)
3727             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small")
3728 
3729         if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH)
3730             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big")
3731     } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */
3732 
3733     if ((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) {
3734         if ((config_ptr->incr_mode != H5C_incr__off) && (config_ptr->incr_mode != H5C_incr__threshold))
3735             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode")
3736 
3737         if (config_ptr->incr_mode == H5C_incr__threshold) {
3738             if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0))
3739                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
3740                             "lower_hr_threshold must be in the range [0.0, 1.0]")
3741 
3742             if (config_ptr->increment < 1.0)
3743                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "increment must be greater than or equal to 1.0")
3744 
3745             /* no need to check max_increment, as it is a size_t,
3746              * and thus must be non-negative.
3747              */
3748         } /* H5C_incr__threshold */
3749 
3750         switch (config_ptr->flash_incr_mode) {
3751             case H5C_flash_incr__off:
3752                 /* nothing to do here */
3753                 break;
3754 
3755             case H5C_flash_incr__add_space:
3756                 if ((config_ptr->flash_multiple < 0.1) || (config_ptr->flash_multiple > 10.0))
3757                     HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
3758                                 "flash_multiple must be in the range [0.1, 10.0]")
3759                 if ((config_ptr->flash_threshold < 0.1) || (config_ptr->flash_threshold > 1.0))
3760                     HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
3761                                 "flash_threshold must be in the range [0.1, 1.0]")
3762                 break;
3763 
3764             default:
3765                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode")
3766                 break;
3767         } /* end switch */
3768     }     /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */
3769 
3770     if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) {
3771 
3772         if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) &&
3773             (config_ptr->decr_mode != H5C_decr__age_out) &&
3774             (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) {
3775 
3776             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode")
3777         }
3778 
3779         if (config_ptr->decr_mode == H5C_decr__threshold) {
3780             if (config_ptr->upper_hr_threshold > 1.0)
3781                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0")
3782 
3783             if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0))
3784                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]")
3785 
3786             /* no need to check max_decrement as it is a size_t
3787              * and thus must be non-negative.
3788              */
3789         } /* H5C_decr__threshold */
3790 
3791         if ((config_ptr->decr_mode == H5C_decr__age_out) ||
3792             (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) {
3793 
3794             if (config_ptr->epochs_before_eviction < 1)
3795                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive")
3796             if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS)
3797                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big")
3798 
3799             if ((config_ptr->apply_empty_reserve) &&
3800                 ((config_ptr->empty_reserve > 1.0) || (config_ptr->empty_reserve < 0.0)))
3801                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty_reserve must be in the interval [0.0, 1.0]")
3802 
3803             /* no need to check max_decrement as it is a size_t
3804              * and thus must be non-negative.
3805              */
3806         } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */
3807 
3808         if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) {
3809             if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0))
3810                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
3811                             "upper_hr_threshold must be in the interval [0.0, 1.0]")
3812         } /* H5C_decr__age_out_with_threshold */
3813     }     /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */
3814 
3815     if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) {
3816         if ((config_ptr->incr_mode == H5C_incr__threshold) &&
3817             ((config_ptr->decr_mode == H5C_decr__threshold) ||
3818              (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) &&
3819             (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold))
3820             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config")
3821     } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */
3822 
3823 done:
3824     FUNC_LEAVE_NOAPI(ret_value)
3825 } /* H5C_validate_resize_config() */
3826 
3827 /*-------------------------------------------------------------------------
3828  * Function:    H5C_create_flush_dependency()
3829  *
3830  * Purpose:     Initiates a parent<->child entry flush dependency.  The parent
3831  *              entry must be pinned or protected at the time of call, and must
3832  *              have all dependencies removed before the cache can shut down.
3833  *
3834  * Note:        Flush dependencies in the cache indicate that a child entry
3835  *              must be flushed to the file before its parent.  (This is
3836  *              currently used to implement Single-Writer/Multiple-Reader (SWMR)
3837  *              I/O access for data structures in the file).
3838  *
3839  *              Creating a flush dependency between two entries will also pin
3840  *              the parent entry.
3841  *
3842  * Return:      Non-negative on success/Negative on failure
3843  *
3844  * Programmer:  Quincey Koziol
3845  *              3/05/09
3846  *
3847  *-------------------------------------------------------------------------
3848  */
3849 herr_t
H5C_create_flush_dependency(void * parent_thing,void * child_thing)3850 H5C_create_flush_dependency(void *parent_thing, void *child_thing)
3851 {
3852     H5C_t *            cache_ptr;
3853     H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */
3854     H5C_cache_entry_t *child_entry  = (H5C_cache_entry_t *)child_thing;  /* Ptr to child thing's entry */
3855     herr_t             ret_value    = SUCCEED;                           /* Return value */
3856 
3857     FUNC_ENTER_NOAPI(FAIL)
3858 
3859     /* Sanity checks */
3860     HDassert(parent_entry);
3861     HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
3862     HDassert(H5F_addr_defined(parent_entry->addr));
3863     HDassert(child_entry);
3864     HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
3865     HDassert(H5F_addr_defined(child_entry->addr));
3866     cache_ptr = parent_entry->cache_ptr;
3867     HDassert(cache_ptr);
3868     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
3869     HDassert(cache_ptr == child_entry->cache_ptr);
3870 #ifndef NDEBUG
3871     /* Make sure the parent is not already a parent */
3872     {
3873         unsigned u;
3874 
3875         for (u = 0; u < child_entry->flush_dep_nparents; u++)
3876             HDassert(child_entry->flush_dep_parent[u] != parent_entry);
3877     }  /* end block */
3878 #endif /* NDEBUG */
3879 
3880     /* More sanity checks */
3881     if (child_entry == parent_entry)
3882         HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself")
3883     if (!(parent_entry->is_protected || parent_entry->is_pinned))
3884         HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected")
3885 
3886     /* Check for parent not pinned */
3887     if (!parent_entry->is_pinned) {
3888         /* Sanity check */
3889         HDassert(parent_entry->flush_dep_nchildren == 0);
3890         HDassert(!parent_entry->pinned_from_client);
3891         HDassert(!parent_entry->pinned_from_cache);
3892 
3893         /* Pin the parent entry */
3894         parent_entry->is_pinned = TRUE;
3895         H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry)
3896     } /* end else */
3897 
3898     /* Mark the entry as pinned from the cache's action (possibly redundantly) */
3899     parent_entry->pinned_from_cache = TRUE;
3900 
3901     /* Check if we need to resize the child's parent array */
3902     if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) {
3903         if (child_entry->flush_dep_parent_nalloc == 0) {
3904             /* Array does not exist yet, allocate it */
3905             HDassert(!child_entry->flush_dep_parent);
3906 
3907             if (NULL == (child_entry->flush_dep_parent =
3908                              H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT)))
3909                 HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
3910                             "memory allocation failed for flush dependency parent list")
3911             child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT;
3912         } /* end if */
3913         else {
3914             /* Resize existing array */
3915             HDassert(child_entry->flush_dep_parent);
3916 
3917             if (NULL == (child_entry->flush_dep_parent =
3918                              H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
3919                                               2 * child_entry->flush_dep_parent_nalloc)))
3920                 HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
3921                             "memory allocation failed for flush dependency parent list")
3922             child_entry->flush_dep_parent_nalloc *= 2;
3923         } /* end else */
3924         cache_ptr->entry_fd_height_change_counter++;
3925     } /* end if */
3926 
3927     /* Add the dependency to the child's parent array */
3928     child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry;
3929     child_entry->flush_dep_nparents++;
3930 
3931     /* Increment parent's number of children */
3932     parent_entry->flush_dep_nchildren++;
3933 
3934     /* Adjust the number of dirty children */
3935     if (child_entry->is_dirty) {
3936         /* Sanity check */
3937         HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren);
3938 
3939         parent_entry->flush_dep_ndirty_children++;
3940 
3941         /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
3942         if (parent_entry->type->notify &&
3943             (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0)
3944             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
3945                         "can't notify parent about child entry dirty flag set")
3946     } /* end if */
3947 
3948     /* adjust the parent's number of unserialized children.  Note
3949      * that it is possible for and entry to be clean and unserialized.
3950      */
3951     if (!child_entry->image_up_to_date) {
3952         HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren);
3953 
3954         parent_entry->flush_dep_nunser_children++;
3955 
3956         /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
3957         if (parent_entry->type->notify &&
3958             (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0)
3959             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
3960                         "can't notify parent about child entry serialized flag reset")
3961     } /* end if */
3962 
3963     /* Post-conditions, for successful operation */
3964     HDassert(parent_entry->is_pinned);
3965     HDassert(parent_entry->flush_dep_nchildren > 0);
3966     HDassert(child_entry->flush_dep_parent);
3967     HDassert(child_entry->flush_dep_nparents > 0);
3968     HDassert(child_entry->flush_dep_parent_nalloc > 0);
3969 #ifndef NDEBUG
3970     H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
3971 #endif /* NDEBUG */
3972 
3973 done:
3974     FUNC_LEAVE_NOAPI(ret_value)
3975 } /* H5C_create_flush_dependency() */
3976 
3977 /*-------------------------------------------------------------------------
3978  * Function:    H5C_destroy_flush_dependency()
3979  *
3980  * Purpose:     Terminates a parent<-> child entry flush dependency.  The
3981  *              parent entry must be pinned.
3982  *
3983  * Return:      Non-negative on success/Negative on failure
3984  *
3985  * Programmer:  Quincey Koziol
3986  *              3/05/09
3987  *
3988  *-------------------------------------------------------------------------
3989  */
3990 herr_t
H5C_destroy_flush_dependency(void * parent_thing,void * child_thing)3991 H5C_destroy_flush_dependency(void *parent_thing, void *child_thing)
3992 {
3993     H5C_t *            cache_ptr;
3994     H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
3995     H5C_cache_entry_t *child_entry  = (H5C_cache_entry_t *)child_thing;  /* Ptr to child entry */
3996     unsigned           u;                                                /* Local index variable */
3997     herr_t             ret_value = SUCCEED;                              /* Return value */
3998 
3999     FUNC_ENTER_NOAPI(FAIL)
4000 
4001     /* Sanity checks */
4002     HDassert(parent_entry);
4003     HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
4004     HDassert(H5F_addr_defined(parent_entry->addr));
4005     HDassert(child_entry);
4006     HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
4007     HDassert(H5F_addr_defined(child_entry->addr));
4008     cache_ptr = parent_entry->cache_ptr;
4009     HDassert(cache_ptr);
4010     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
4011     HDassert(cache_ptr == child_entry->cache_ptr);
4012 
4013     /* Usage checks */
4014     if (!parent_entry->is_pinned)
4015         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
4016     if (NULL == child_entry->flush_dep_parent)
4017         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
4018                     "Child entry doesn't have a flush dependency parent array")
4019     if (0 == parent_entry->flush_dep_nchildren)
4020         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
4021                     "Parent entry flush dependency ref. count has no child dependencies")
4022 
4023     /* Search for parent in child's parent array.  This is a linear search
4024      * because we do not expect large numbers of parents.  If this changes, we
4025      * may wish to change the parent array to a skip list */
4026     for (u = 0; u < child_entry->flush_dep_nparents; u++)
4027         if (child_entry->flush_dep_parent[u] == parent_entry)
4028             break;
4029     if (u == child_entry->flush_dep_nparents)
4030         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
4031                     "Parent entry isn't a flush dependency parent for child entry")
4032 
4033     /* Remove parent entry from child's parent array */
4034     if (u < (child_entry->flush_dep_nparents - 1))
4035         HDmemmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1],
4036                   (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0]));
4037     child_entry->flush_dep_nparents--;
4038 
4039     /* Adjust parent entry's nchildren and unpin parent if it goes to zero */
4040     parent_entry->flush_dep_nchildren--;
4041     if (0 == parent_entry->flush_dep_nchildren) {
4042         /* Sanity check */
4043         HDassert(parent_entry->pinned_from_cache);
4044 
4045         /* Check if we should unpin parent entry now */
4046         if (!parent_entry->pinned_from_client)
4047             if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0)
4048                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry")
4049 
4050         /* Mark the entry as unpinned from the cache's action */
4051         parent_entry->pinned_from_cache = FALSE;
4052     } /* end if */
4053 
4054     /* Adjust parent entry's ndirty_children */
4055     if (child_entry->is_dirty) {
4056         /* Sanity check */
4057         HDassert(parent_entry->flush_dep_ndirty_children > 0);
4058 
4059         parent_entry->flush_dep_ndirty_children--;
4060 
4061         /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
4062         if (parent_entry->type->notify &&
4063             (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0)
4064             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
4065                         "can't notify parent about child entry dirty flag reset")
4066     } /* end if */
4067 
4068     /* adjust parent entry's number of unserialized children */
4069     if (!child_entry->image_up_to_date) {
4070         HDassert(parent_entry->flush_dep_nunser_children > 0);
4071 
4072         parent_entry->flush_dep_nunser_children--;
4073 
4074         /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
4075         if (parent_entry->type->notify &&
4076             (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0)
4077             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
4078                         "can't notify parent about child entry serialized flag set")
4079     } /* end if */
4080 
4081     /* Shrink or free the parent array if apporpriate */
4082     if (child_entry->flush_dep_nparents == 0) {
4083         child_entry->flush_dep_parent = H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent);
4084         child_entry->flush_dep_parent_nalloc = 0;
4085     } /* end if */
4086     else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT &&
4087              child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) {
4088         if (NULL == (child_entry->flush_dep_parent =
4089                          H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
4090                                           child_entry->flush_dep_parent_nalloc / 4)))
4091             HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
4092                         "memory allocation failed for flush dependency parent list")
4093         child_entry->flush_dep_parent_nalloc /= 4;
4094     } /* end if */
4095 
4096 done:
4097     FUNC_LEAVE_NOAPI(ret_value)
4098 } /* H5C_destroy_flush_dependency() */
4099 
4100 /*************************************************************************/
4101 /**************************** Private Functions: *************************/
4102 /*************************************************************************/
4103 
4104 /*-------------------------------------------------------------------------
4105  * Function:    H5C__pin_entry_from_client()
4106  *
4107  * Purpose:     Internal routine to pin a cache entry from a client action.
4108  *
4109  * Return:      Non-negative on success/Negative on failure
4110  *
4111  * Programmer:  Quincey Koziol
4112  *              3/26/09
4113  *
4114  *-------------------------------------------------------------------------
4115  */
4116 #if H5C_COLLECT_CACHE_STATS
4117 static herr_t
H5C__pin_entry_from_client(H5C_t * cache_ptr,H5C_cache_entry_t * entry_ptr)4118 H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
4119 #else
4120 static herr_t
4121 H5C__pin_entry_from_client(H5C_t H5_ATTR_UNUSED *cache_ptr, H5C_cache_entry_t *entry_ptr)
4122 #endif
4123 {
4124     herr_t ret_value = SUCCEED; /* Return value */
4125 
4126     FUNC_ENTER_STATIC
4127 
4128     /* Sanity checks */
4129     HDassert(cache_ptr);
4130     HDassert(entry_ptr);
4131     HDassert(entry_ptr->is_protected);
4132 
4133     /* Check if the entry is already pinned */
4134     if (entry_ptr->is_pinned) {
4135         /* Check if the entry was pinned through an explicit pin from a client */
4136         if (entry_ptr->pinned_from_client)
4137             HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned")
4138     } /* end if */
4139     else {
4140         entry_ptr->is_pinned = TRUE;
4141 
4142         H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
4143     } /* end else */
4144 
4145     /* Mark that the entry was pinned through an explicit pin from a client */
4146     entry_ptr->pinned_from_client = TRUE;
4147 
4148 done:
4149     FUNC_LEAVE_NOAPI(ret_value)
4150 } /* H5C__pin_entry_from_client() */
4151 
4152 /*-------------------------------------------------------------------------
4153  * Function:    H5C__unpin_entry_real()
4154  *
4155  * Purpose:     Internal routine to unpin a cache entry.
4156  *
4157  * Return:      Non-negative on success/Negative on failure
4158  *
4159  * Programmer:  Quincey Koziol
4160  *              1/6/18
4161  *
4162  *-------------------------------------------------------------------------
4163  */
4164 static herr_t
H5C__unpin_entry_real(H5C_t * cache_ptr,H5C_cache_entry_t * entry_ptr,hbool_t update_rp)4165 H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp)
4166 {
4167     herr_t ret_value = SUCCEED; /* Return value */
4168 
4169 #if H5C_DO_SANITY_CHECKS
4170     FUNC_ENTER_STATIC
4171 #else
4172     FUNC_ENTER_STATIC_NOERR
4173 #endif
4174 
4175     /* Sanity checking */
4176     HDassert(cache_ptr);
4177     HDassert(entry_ptr);
4178     HDassert(entry_ptr->is_pinned);
4179 
4180     /* If requested, update the replacement policy if the entry is not protected */
4181     if (update_rp && !entry_ptr->is_protected)
4182         H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL)
4183 
4184     /* Unpin the entry now */
4185     entry_ptr->is_pinned = FALSE;
4186 
4187     /* Update the stats for an unpin operation */
4188     H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
4189 
4190 #if H5C_DO_SANITY_CHECKS
4191 done:
4192 #endif
4193     FUNC_LEAVE_NOAPI(ret_value)
4194 } /* H5C__unpin_entry_real() */
4195 
4196 /*-------------------------------------------------------------------------
4197  * Function:    H5C__unpin_entry_from_client()
4198  *
4199  * Purpose:     Internal routine to unpin a cache entry from a client action.
4200  *
4201  * Return:      Non-negative on success/Negative on failure
4202  *
4203  * Programmer:  Quincey Koziol
4204  *              3/24/09
4205  *
4206  *-------------------------------------------------------------------------
4207  */
4208 static herr_t
H5C__unpin_entry_from_client(H5C_t * cache_ptr,H5C_cache_entry_t * entry_ptr,hbool_t update_rp)4209 H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp)
4210 {
4211     herr_t ret_value = SUCCEED; /* Return value */
4212 
4213     FUNC_ENTER_STATIC
4214 
4215     /* Sanity checking */
4216     HDassert(cache_ptr);
4217     HDassert(entry_ptr);
4218 
4219     /* Error checking (should be sanity checks?) */
4220     if (!entry_ptr->is_pinned)
4221         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned")
4222     if (!entry_ptr->pinned_from_client)
4223         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client")
4224 
4225     /* Check if the entry is not pinned from a flush dependency */
4226     if (!entry_ptr->pinned_from_cache)
4227         if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0)
4228             HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry")
4229 
4230     /* Mark the entry as explicitly unpinned by the client */
4231     entry_ptr->pinned_from_client = FALSE;
4232 
4233 done:
4234     FUNC_LEAVE_NOAPI(ret_value)
4235 } /* H5C__unpin_entry_from_client() */
4236 
4237 /*-------------------------------------------------------------------------
4238  *
4239  * Function:    H5C__auto_adjust_cache_size
4240  *
4241  * Purpose:        Obtain the current full cache hit rate, and compare it
4242  *        with the hit rate thresholds for modifying cache size.
4243  *        If one of the thresholds has been crossed, adjusts the
4244  *        size of the cache accordingly.
4245  *
4246  *        The function then resets the full cache hit rate
4247  *        statistics, and exits.
4248  *
4249  * Return:      Non-negative on success/Negative on failure or if there was
4250  *        an attempt to flush a protected item.
4251  *
4252  *
4253  * Programmer:  John Mainzer, 10/7/04
4254  *
4255  *-------------------------------------------------------------------------
4256  */
4257 static herr_t
H5C__auto_adjust_cache_size(H5F_t * f,hbool_t write_permitted)4258 H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted)
4259 {
4260     H5C_t *                cache_ptr             = f->shared->cache;
4261     hbool_t                reentrant_call        = FALSE;
4262     hbool_t                inserted_epoch_marker = FALSE;
4263     size_t                 new_max_cache_size    = 0;
4264     size_t                 old_max_cache_size    = 0;
4265     size_t                 new_min_clean_size    = 0;
4266     size_t                 old_min_clean_size    = 0;
4267     double                 hit_rate;
4268     enum H5C_resize_status status    = in_spec; /* will change if needed */
4269     herr_t                 ret_value = SUCCEED; /* Return value */
4270 
4271     FUNC_ENTER_STATIC
4272 
4273     HDassert(f);
4274     HDassert(cache_ptr);
4275     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
4276     HDassert(cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length);
4277     HDassert(0.0 <= (cache_ptr->resize_ctl).min_clean_fraction);
4278     HDassert((cache_ptr->resize_ctl).min_clean_fraction <= 100.0);
4279 
4280     /* check to see if cache_ptr->resize_in_progress is TRUE.  If it, this
4281      * is a re-entrant call via a client callback called in the resize
4282      * process.  To avoid an infinite recursion, set reentrant_call to
4283      * TRUE, and goto done.
4284      */
4285     if (cache_ptr->resize_in_progress) {
4286         reentrant_call = TRUE;
4287         HGOTO_DONE(SUCCEED)
4288     } /* end if */
4289 
4290     cache_ptr->resize_in_progress = TRUE;
4291 
4292     if (!cache_ptr->resize_enabled)
4293         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled")
4294 
4295     HDassert(((cache_ptr->resize_ctl).incr_mode != H5C_incr__off) ||
4296              ((cache_ptr->resize_ctl).decr_mode != H5C_decr__off));
4297 
4298     if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
4299         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
4300 
4301     HDassert((0.0 <= hit_rate) && (hit_rate <= 1.0));
4302 
4303     switch ((cache_ptr->resize_ctl).incr_mode) {
4304         case H5C_incr__off:
4305             if (cache_ptr->size_increase_possible)
4306                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?")
4307             break;
4308 
4309         case H5C_incr__threshold:
4310             if (hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold) {
4311 
4312                 if (!cache_ptr->size_increase_possible) {
4313 
4314                     status = increase_disabled;
4315                 }
4316                 else if (cache_ptr->max_cache_size >= (cache_ptr->resize_ctl).max_size) {
4317 
4318                     HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).max_size);
4319                     status = at_max_size;
4320                 }
4321                 else if (!cache_ptr->cache_full) {
4322 
4323                     status = not_full;
4324                 }
4325                 else {
4326 
4327                     new_max_cache_size =
4328                         (size_t)(((double)(cache_ptr->max_cache_size)) * (cache_ptr->resize_ctl).increment);
4329 
4330                     /* clip to max size if necessary */
4331                     if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) {
4332 
4333                         new_max_cache_size = (cache_ptr->resize_ctl).max_size;
4334                     }
4335 
4336                     /* clip to max increment if necessary */
4337                     if (((cache_ptr->resize_ctl).apply_max_increment) &&
4338                         ((cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment) <
4339                          new_max_cache_size)) {
4340 
4341                         new_max_cache_size =
4342                             cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment;
4343                     }
4344 
4345                     status = increase;
4346                 }
4347             }
4348             break;
4349 
4350         default:
4351             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
4352     }
4353 
4354     /* If the decr_mode is either age out or age out with threshold, we
4355      * must run the marker maintenance code, whether we run the size
4356      * reduction code or not.  We do this in two places -- here we
4357      * insert a new marker if the number of active epoch markers is
4358      * is less than the the current epochs before eviction, and after
4359      * the ageout call, we cycle the markers.
4360      *
4361      * However, we can't call the ageout code or cycle the markers
4362      * unless there was a full complement of markers in place on
4363      * entry.  The inserted_epoch_marker flag is used to track this.
4364      */
4365 
4366     if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
4367          ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) &&
4368         (cache_ptr->epoch_markers_active < (cache_ptr->resize_ctl).epochs_before_eviction)) {
4369 
4370         if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0)
4371             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker")
4372 
4373         inserted_epoch_marker = TRUE;
4374     }
4375 
4376     /* don't run the cache size decrease code unless the cache size
4377      * increase code is disabled, or the size increase code sees no need
4378      * for action.  In either case, status == in_spec at this point.
4379      */
4380 
4381     if (status == in_spec) {
4382 
4383         switch ((cache_ptr->resize_ctl).decr_mode) {
4384             case H5C_decr__off:
4385                 break;
4386 
4387             case H5C_decr__threshold:
4388                 if (hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold) {
4389 
4390                     if (!cache_ptr->size_decrease_possible) {
4391 
4392                         status = decrease_disabled;
4393                     }
4394                     else if (cache_ptr->max_cache_size <= (cache_ptr->resize_ctl).min_size) {
4395 
4396                         HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).min_size);
4397                         status = at_min_size;
4398                     }
4399                     else {
4400 
4401                         new_max_cache_size = (size_t)(((double)(cache_ptr->max_cache_size)) *
4402                                                       (cache_ptr->resize_ctl).decrement);
4403 
4404                         /* clip to min size if necessary */
4405                         if (new_max_cache_size < (cache_ptr->resize_ctl).min_size) {
4406 
4407                             new_max_cache_size = (cache_ptr->resize_ctl).min_size;
4408                         }
4409 
4410                         /* clip to max decrement if necessary */
4411                         if (((cache_ptr->resize_ctl).apply_max_decrement) &&
4412                             (((cache_ptr->resize_ctl).max_decrement + new_max_cache_size) <
4413                              cache_ptr->max_cache_size)) {
4414 
4415                             new_max_cache_size =
4416                                 cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement;
4417                         }
4418 
4419                         status = decrease;
4420                     }
4421                 }
4422                 break;
4423 
4424             case H5C_decr__age_out_with_threshold:
4425             case H5C_decr__age_out:
4426                 if (!inserted_epoch_marker) {
4427                     if (!cache_ptr->size_decrease_possible)
4428                         status = decrease_disabled;
4429                     else {
4430                         if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size,
4431                                                     write_permitted) < 0)
4432                             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed")
4433                     } /* end else */
4434                 }     /* end if */
4435                 break;
4436 
4437             default:
4438                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
4439         }
4440     }
4441 
4442     /* cycle the epoch markers here if appropriate */
4443     if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
4444          ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) &&
4445         (!inserted_epoch_marker)) {
4446 
4447         /* move last epoch marker to the head of the LRU list */
4448         if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0)
4449             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker")
4450     }
4451 
4452     if ((status == increase) || (status == decrease)) {
4453 
4454         old_max_cache_size = cache_ptr->max_cache_size;
4455         old_min_clean_size = cache_ptr->min_clean_size;
4456 
4457         new_min_clean_size =
4458             (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
4459 
4460         /* new_min_clean_size is of size_t, and thus must be non-negative.
4461          * Hence we have
4462          *
4463          *     ( 0 <= new_min_clean_size ).
4464          *
4465          * by definition.
4466          */
4467         HDassert(new_min_clean_size <= new_max_cache_size);
4468         HDassert((cache_ptr->resize_ctl).min_size <= new_max_cache_size);
4469         HDassert(new_max_cache_size <= (cache_ptr->resize_ctl).max_size);
4470 
4471         cache_ptr->max_cache_size = new_max_cache_size;
4472         cache_ptr->min_clean_size = new_min_clean_size;
4473 
4474         if (status == increase) {
4475 
4476             cache_ptr->cache_full = FALSE;
4477         }
4478         else if (status == decrease) {
4479 
4480             cache_ptr->size_decreased = TRUE;
4481         }
4482 
4483         /* update flash cache size increase fields as appropriate */
4484         if (cache_ptr->flash_size_increase_possible) {
4485 
4486             switch ((cache_ptr->resize_ctl).flash_incr_mode) {
4487                 case H5C_flash_incr__off:
4488                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
4489                                 "flash_size_increase_possible but H5C_flash_incr__off?!")
4490                     break;
4491 
4492                 case H5C_flash_incr__add_space:
4493                     cache_ptr->flash_size_increase_threshold = (size_t)(
4494                         ((double)(cache_ptr->max_cache_size)) * ((cache_ptr->resize_ctl).flash_threshold));
4495                     break;
4496 
4497                 default: /* should be unreachable */
4498                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
4499                     break;
4500             }
4501         }
4502     }
4503 
4504     if ((cache_ptr->resize_ctl).rpt_fcn != NULL) {
4505         (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status,
4506                                              old_max_cache_size, new_max_cache_size, old_min_clean_size,
4507                                              new_min_clean_size);
4508     }
4509 
4510     if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
4511         /* this should be impossible... */
4512         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
4513 
4514 done:
4515     /* Sanity checks */
4516     HDassert(cache_ptr->resize_in_progress);
4517     if (!reentrant_call)
4518         cache_ptr->resize_in_progress = FALSE;
4519     HDassert((!reentrant_call) || (cache_ptr->resize_in_progress));
4520 
4521     FUNC_LEAVE_NOAPI(ret_value)
4522 } /* H5C__auto_adjust_cache_size() */
4523 
4524 /*-------------------------------------------------------------------------
4525  *
4526  * Function:    H5C__autoadjust__ageout
4527  *
4528  * Purpose:     Implement the ageout automatic cache size decrement
4529  *        algorithm.  Note that while this code evicts aged out
4530  *        entries, the code does not change the maximum cache size.
4531  *        Instead, the function simply computes the new value (if
4532  *        any change is indicated) and reports this value in
4533  *        *new_max_cache_size_ptr.
4534  *
4535  * Return:      Non-negative on success/Negative on failure or if there was
4536  *              an attempt to flush a protected item.
4537  *
4538  *
4539  * Programmer:  John Mainzer, 11/18/04
4540  *
4541  *-------------------------------------------------------------------------
4542  */
4543 static herr_t
H5C__autoadjust__ageout(H5F_t * f,double hit_rate,enum H5C_resize_status * status_ptr,size_t * new_max_cache_size_ptr,hbool_t write_permitted)4544 H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr,
4545                         size_t *new_max_cache_size_ptr, hbool_t write_permitted)
4546 {
4547     H5C_t *cache_ptr = f->shared->cache;
4548     size_t test_size;
4549     herr_t ret_value = SUCCEED; /* Return value */
4550 
4551     FUNC_ENTER_STATIC
4552 
4553     HDassert(f);
4554     HDassert(cache_ptr);
4555     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
4556     HDassert((status_ptr) && (*status_ptr == in_spec));
4557     HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0));
4558 
4559     /* remove excess epoch markers if any */
4560     if (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction)
4561         if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
4562             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
4563 
4564     if (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
4565         (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold) &&
4566          (hit_rate >= (cache_ptr->resize_ctl).upper_hr_threshold))) {
4567 
4568         if (cache_ptr->max_cache_size > (cache_ptr->resize_ctl).min_size) {
4569 
4570             /* evict aged out cache entries if appropriate... */
4571             if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0)
4572                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries")
4573 
4574             /* ... and then reduce cache size if appropriate */
4575             if (cache_ptr->index_size < cache_ptr->max_cache_size) {
4576 
4577                 if ((cache_ptr->resize_ctl).apply_empty_reserve) {
4578 
4579                     test_size = (size_t)(((double)cache_ptr->index_size) /
4580                                          (1 - (cache_ptr->resize_ctl).empty_reserve));
4581 
4582                     if (test_size < cache_ptr->max_cache_size) {
4583 
4584                         *status_ptr             = decrease;
4585                         *new_max_cache_size_ptr = test_size;
4586                     }
4587                 }
4588                 else {
4589 
4590                     *status_ptr             = decrease;
4591                     *new_max_cache_size_ptr = cache_ptr->index_size;
4592                 }
4593 
4594                 if (*status_ptr == decrease) {
4595 
4596                     /* clip to min size if necessary */
4597                     if (*new_max_cache_size_ptr < (cache_ptr->resize_ctl).min_size) {
4598 
4599                         *new_max_cache_size_ptr = (cache_ptr->resize_ctl).min_size;
4600                     }
4601 
4602                     /* clip to max decrement if necessary */
4603                     if (((cache_ptr->resize_ctl).apply_max_decrement) &&
4604                         (((cache_ptr->resize_ctl).max_decrement + *new_max_cache_size_ptr) <
4605                          cache_ptr->max_cache_size)) {
4606 
4607                         *new_max_cache_size_ptr =
4608                             cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement;
4609                     }
4610                 }
4611             }
4612         }
4613         else {
4614 
4615             *status_ptr = at_min_size;
4616         }
4617     }
4618 
4619 done:
4620 
4621     FUNC_LEAVE_NOAPI(ret_value)
4622 
4623 } /* H5C__autoadjust__ageout() */
4624 
4625 /*-------------------------------------------------------------------------
4626  *
4627  * Function:    H5C__autoadjust__ageout__cycle_epoch_marker
4628  *
4629  * Purpose:     Remove the oldest epoch marker from the LRU list,
4630  *        and reinsert it at the head of the LRU list.  Also
4631  *        remove the epoch marker's index from the head of the
4632  *        ring buffer, and re-insert it at the tail of the ring
4633  *        buffer.
4634  *
4635  * Return:      SUCCEED on success/FAIL on failure.
4636  *
4637  * Programmer:  John Mainzer, 11/22/04
4638  *
4639  *-------------------------------------------------------------------------
4640  */
4641 static herr_t
H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)4642 H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr)
4643 {
4644     int    i;
4645     herr_t ret_value = SUCCEED; /* Return value */
4646 
4647     FUNC_ENTER_STATIC
4648 
4649     HDassert(cache_ptr);
4650     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
4651 
4652     if (cache_ptr->epoch_markers_active <= 0)
4653         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?")
4654 
4655     /* remove the last marker from both the ring buffer and the LRU list */
4656 
4657     i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first];
4658 
4659     cache_ptr->epoch_marker_ringbuf_first =
4660         (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
4661 
4662     cache_ptr->epoch_marker_ringbuf_size -= 1;
4663 
4664     if (cache_ptr->epoch_marker_ringbuf_size < 0)
4665         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
4666     if ((cache_ptr->epoch_marker_active)[i] != TRUE)
4667         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
4668 
4669     H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
4670                     (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
4671 
4672     /* now, re-insert it at the head of the LRU list, and at the tail of
4673      * the ring buffer.
4674      */
4675 
4676     HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
4677     HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
4678     HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
4679 
4680     cache_ptr->epoch_marker_ringbuf_last =
4681         (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
4682 
4683     (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
4684 
4685     cache_ptr->epoch_marker_ringbuf_size += 1;
4686 
4687     if (cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS)
4688         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
4689 
4690     H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
4691                      (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
4692 done:
4693 
4694     FUNC_LEAVE_NOAPI(ret_value)
4695 
4696 } /* H5C__autoadjust__ageout__cycle_epoch_marker() */
4697 
4698 /*-------------------------------------------------------------------------
4699  *
4700  * Function:    H5C__autoadjust__ageout__evict_aged_out_entries
4701  *
4702  * Purpose:     Evict clean entries in the cache that haven't
4703  *        been accessed for at least
4704  *        (cache_ptr->resize_ctl).epochs_before_eviction epochs,
4705  *        and flush dirty entries that haven't been accessed for
4706  *        that amount of time.
4707  *
4708  *        Depending on configuration, the function will either
4709  *        flush or evict all such entries, or all such entries it
4710  *        encounters until it has freed the maximum amount of space
4711  *        allowed under the maximum decrement.
4712  *
4713  *        If we are running in parallel mode, writes may not be
4714  *        permitted.  If so, the function simply skips any dirty
4715  *        entries it may encounter.
4716  *
4717  *        The function makes no attempt to maintain the minimum
4718  *        clean size, as there is no guarantee that the cache size
4719  *        will be changed.
4720  *
4721  *        If there is no cache size change, the minimum clean size
4722  *        constraint will be met through a combination of clean
4723  *        entries and free space in the cache.
4724  *
4725  *        If there is a cache size reduction, the minimum clean size
4726  *        will be re-calculated, and will be enforced the next time
4727  *        we have to make space in the cache.
4728  *
4729  *              Observe that this function cannot occasion a read.
4730  *
4731  * Return:      Non-negative on success/Negative on failure.
4732  *
4733  * Programmer:  John Mainzer, 11/22/04
4734  *
4735  *-------------------------------------------------------------------------
4736  */
4737 static herr_t
H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,hbool_t write_permitted)4738 H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted)
4739 {
4740     H5C_t *            cache_ptr = f->shared->cache;
4741     size_t             eviction_size_limit;
4742     size_t             bytes_evicted = 0;
4743     hbool_t            prev_is_dirty = FALSE;
4744     hbool_t            restart_scan;
4745     H5C_cache_entry_t *entry_ptr;
4746     H5C_cache_entry_t *next_ptr;
4747     H5C_cache_entry_t *prev_ptr;
4748     herr_t             ret_value = SUCCEED; /* Return value */
4749 
4750     FUNC_ENTER_STATIC
4751 
4752     HDassert(f);
4753     HDassert(cache_ptr);
4754     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
4755 
4756     /* if there is a limit on the amount that the cache size can be decrease
4757      * in any one round of the cache size reduction algorithm, load that
4758      * limit into eviction_size_limit.  Otherwise, set eviction_size_limit
4759      * to the equivalent of infinity.  The current size of the index will
4760      * do nicely.
4761      */
4762     if ((cache_ptr->resize_ctl).apply_max_decrement) {
4763 
4764         eviction_size_limit = (cache_ptr->resize_ctl).max_decrement;
4765     }
4766     else {
4767 
4768         eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */
4769     }
4770 
4771     if (write_permitted) {
4772 
4773         restart_scan = FALSE;
4774         entry_ptr    = cache_ptr->LRU_tail_ptr;
4775 
4776         while ((entry_ptr != NULL) && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
4777                (bytes_evicted < eviction_size_limit)) {
4778             hbool_t skipping_entry = FALSE;
4779 
4780             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
4781             HDassert(!(entry_ptr->is_protected));
4782             HDassert(!(entry_ptr->is_read_only));
4783             HDassert((entry_ptr->ro_ref_count) == 0);
4784 
4785             next_ptr = entry_ptr->next;
4786             prev_ptr = entry_ptr->prev;
4787 
4788             if (prev_ptr != NULL)
4789                 prev_is_dirty = prev_ptr->is_dirty;
4790 
4791             if (entry_ptr->is_dirty) {
4792                 HDassert(!entry_ptr->prefetched_dirty);
4793 
4794                 /* dirty corked entry is skipped */
4795                 if (entry_ptr->tag_info && entry_ptr->tag_info->corked)
4796                     skipping_entry = TRUE;
4797                 else {
4798                     /* reset entries_removed_counter and
4799                      * last_entry_removed_ptr prior to the call to
4800                      * H5C__flush_single_entry() so that we can spot
4801                      * unexpected removals of entries from the cache,
4802                      * and set the restart_scan flag if proceeding
4803                      * would be likely to cause us to scan an entry
4804                      * that is no longer in the cache.
4805                      */
4806                     cache_ptr->entries_removed_counter = 0;
4807                     cache_ptr->last_entry_removed_ptr  = NULL;
4808 
4809                     if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
4810                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
4811 
4812                     if (cache_ptr->entries_removed_counter > 1 ||
4813                         cache_ptr->last_entry_removed_ptr == prev_ptr)
4814                         restart_scan = TRUE;
4815                 } /* end else */
4816             }     /* end if */
4817             else if (!entry_ptr->prefetched_dirty) {
4818 
4819                 bytes_evicted += entry_ptr->size;
4820 
4821                 if (H5C__flush_single_entry(
4822                         f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
4823                     HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
4824             } /* end else-if */
4825             else {
4826                 HDassert(!entry_ptr->is_dirty);
4827                 HDassert(entry_ptr->prefetched_dirty);
4828 
4829                 skipping_entry = TRUE;
4830             } /* end else */
4831 
4832             if (prev_ptr != NULL) {
4833                 if (skipping_entry)
4834                     entry_ptr = prev_ptr;
4835                 else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) ||
4836                          (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) {
4837                     /* Something has happened to the LRU -- start over
4838                      * from the tail.
4839                      */
4840                     restart_scan = FALSE;
4841                     entry_ptr    = cache_ptr->LRU_tail_ptr;
4842 
4843                     H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
4844                 } /* end else-if */
4845                 else
4846                     entry_ptr = prev_ptr;
4847             } /* end if */
4848             else
4849                 entry_ptr = NULL;
4850         } /* end while */
4851 
4852         /* for now at least, don't bother to maintain the minimum clean size,
4853          * as the cache should now be less than its maximum size.  Due to
4854          * the vaguries of the cache size reduction algorthim, we may not
4855          * reduce the size of the cache.
4856          *
4857          * If we do, we will calculate a new minimum clean size, which will
4858          * be enforced the next time we try to make space in the cache.
4859          *
4860          * If we don't, no action is necessary, as we have just evicted and/or
4861          * or flushed a bunch of entries and therefore the sum of the clean
4862          * and free space in the cache must be greater than or equal to the
4863          * min clean space requirement (assuming that requirement was met on
4864          * entry).
4865          */
4866 
4867     } /* end if */
4868     else /* ! write_permitted */ {
4869         /* Since we are not allowed to write, all we can do is evict
4870          * any clean entries that we may encounter before we either
4871          * hit the eviction size limit, or encounter the epoch marker.
4872          *
4873          * If we are operating read only, this isn't an issue, as there
4874          * will not be any dirty entries.
4875          *
4876          * If we are operating in R/W mode, all the dirty entries we
4877          * skip will be flushed the next time we attempt to make space
4878          * when writes are permitted.  This may have some local
4879          * performance implications, but it shouldn't cause any net
4880          * slowdown.
4881          */
4882         HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
4883         entry_ptr = cache_ptr->LRU_tail_ptr;
4884         while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
4885                (bytes_evicted < eviction_size_limit)) {
4886             HDassert(!(entry_ptr->is_protected));
4887 
4888             prev_ptr = entry_ptr->prev;
4889 
4890             if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty))
4891                 if (H5C__flush_single_entry(
4892                         f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
4893                     HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
4894 
4895             /* just skip the entry if it is dirty, as we can't do
4896              * anything with it now since we can't write.
4897              *
4898              * Since all entries are clean, serialize() will not be called,
4899              * and thus we needn't test to see if the LRU has been changed
4900              * out from under us.
4901              */
4902             entry_ptr = prev_ptr;
4903         } /* end while */
4904     }     /* end else */
4905 
4906     if (cache_ptr->index_size < cache_ptr->max_cache_size)
4907         cache_ptr->cache_full = FALSE;
4908 
4909 done:
4910     FUNC_LEAVE_NOAPI(ret_value)
4911 } /* H5C__autoadjust__ageout__evict_aged_out_entries() */
4912 
4913 /*-------------------------------------------------------------------------
4914  *
4915  * Function:    H5C__autoadjust__ageout__insert_new_marker
4916  *
4917  * Purpose:     Find an unused marker cache entry, mark it as used, and
4918  *        insert it at the head of the LRU list.  Also add the
4919  *        marker's index in the epoch_markers array.
4920  *
4921  * Return:      SUCCEED on success/FAIL on failure.
4922  *
4923  * Programmer:  John Mainzer, 11/19/04
4924  *
4925  *-------------------------------------------------------------------------
4926  */
4927 static herr_t
H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr)4928 H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr)
4929 {
4930     int    i;
4931     herr_t ret_value = SUCCEED; /* Return value */
4932 
4933     FUNC_ENTER_STATIC
4934 
4935     HDassert(cache_ptr);
4936     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
4937 
4938     if (cache_ptr->epoch_markers_active >= (cache_ptr->resize_ctl).epochs_before_eviction)
4939         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers")
4940 
4941     /* find an unused marker */
4942     i = 0;
4943     while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS)
4944         i++;
4945 
4946     if (i >= H5C__MAX_EPOCH_MARKERS)
4947         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker")
4948 
4949     HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
4950     HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
4951     HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
4952 
4953     (cache_ptr->epoch_marker_active)[i] = TRUE;
4954 
4955     cache_ptr->epoch_marker_ringbuf_last =
4956         (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
4957 
4958     (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
4959 
4960     cache_ptr->epoch_marker_ringbuf_size += 1;
4961 
4962     if (cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS) {
4963 
4964         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
4965     }
4966 
4967     H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
4968                      (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
4969 
4970     cache_ptr->epoch_markers_active += 1;
4971 
4972 done:
4973 
4974     FUNC_LEAVE_NOAPI(ret_value)
4975 
4976 } /* H5C__autoadjust__ageout__insert_new_marker() */
4977 
4978 /*-------------------------------------------------------------------------
4979  *
4980  * Function:    H5C__autoadjust__ageout__remove_all_markers
4981  *
4982  * Purpose:     Remove all epoch markers from the LRU list and mark them
4983  *              as inactive.
4984  *
4985  * Return:      SUCCEED on success/FAIL on failure.
4986  *
4987  * Programmer:  John Mainzer, 11/22/04
4988  *
4989  *-------------------------------------------------------------------------
4990  */
4991 static herr_t
H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr)4992 H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr)
4993 {
4994     int    ring_buf_index;
4995     int    i;
4996     herr_t ret_value = SUCCEED; /* Return value */
4997 
4998     FUNC_ENTER_STATIC
4999 
5000     HDassert(cache_ptr);
5001     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
5002 
5003     while (cache_ptr->epoch_markers_active > 0) {
5004         /* get the index of the last epoch marker in the LRU list
5005          * and remove it from the ring buffer.
5006          */
5007 
5008         ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
5009         i              = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
5010 
5011         cache_ptr->epoch_marker_ringbuf_first =
5012             (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
5013 
5014         cache_ptr->epoch_marker_ringbuf_size -= 1;
5015 
5016         if (cache_ptr->epoch_marker_ringbuf_size < 0)
5017             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
5018 
5019         if ((cache_ptr->epoch_marker_active)[i] != TRUE)
5020             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
5021 
5022         /* remove the epoch marker from the LRU list */
5023         H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
5024                         (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
5025                         (FAIL))
5026 
5027         /* mark the epoch marker as unused. */
5028         (cache_ptr->epoch_marker_active)[i] = FALSE;
5029 
5030         HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
5031         HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
5032         HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
5033 
5034         /* decrement the number of active epoch markers */
5035         cache_ptr->epoch_markers_active -= 1;
5036 
5037         HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
5038     }
5039 
5040 done:
5041 
5042     FUNC_LEAVE_NOAPI(ret_value)
5043 
5044 } /* H5C__autoadjust__ageout__remove_all_markers() */
5045 
5046 /*-------------------------------------------------------------------------
5047  *
5048  * Function:    H5C__autoadjust__ageout__remove_excess_markers
5049  *
5050  * Purpose:     Remove epoch markers from the end of the LRU list and
5051  *        mark them as inactive until the number of active markers
5052  *        equals the the current value of
5053  *        (cache_ptr->resize_ctl).epochs_before_eviction.
5054  *
5055  * Return:      SUCCEED on success/FAIL on failure.
5056  *
5057  * Programmer:  John Mainzer, 11/19/04
5058  *
5059  *-------------------------------------------------------------------------
5060  */
5061 static herr_t
H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr)5062 H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr)
5063 {
5064     int    ring_buf_index;
5065     int    i;
5066     herr_t ret_value = SUCCEED; /* Return value */
5067 
5068     FUNC_ENTER_STATIC
5069 
5070     HDassert(cache_ptr);
5071     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
5072 
5073     if (cache_ptr->epoch_markers_active <= (cache_ptr->resize_ctl).epochs_before_eviction)
5074         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry")
5075 
5076     while (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction) {
5077         /* get the index of the last epoch marker in the LRU list
5078          * and remove it from the ring buffer.
5079          */
5080 
5081         ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
5082         i              = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
5083 
5084         cache_ptr->epoch_marker_ringbuf_first =
5085             (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
5086 
5087         cache_ptr->epoch_marker_ringbuf_size -= 1;
5088 
5089         if (cache_ptr->epoch_marker_ringbuf_size < 0)
5090             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
5091         if ((cache_ptr->epoch_marker_active)[i] != TRUE)
5092             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
5093 
5094         /* remove the epoch marker from the LRU list */
5095         H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
5096                         (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
5097                         (FAIL))
5098 
5099         /* mark the epoch marker as unused. */
5100         (cache_ptr->epoch_marker_active)[i] = FALSE;
5101 
5102         HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
5103         HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
5104         HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
5105 
5106         /* decrement the number of active epoch markers */
5107         cache_ptr->epoch_markers_active -= 1;
5108 
5109         HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
5110     }
5111 
5112 done:
5113 
5114     FUNC_LEAVE_NOAPI(ret_value)
5115 
5116 } /* H5C__autoadjust__ageout__remove_excess_markers() */
5117 
5118 /*-------------------------------------------------------------------------
5119  *
5120  * Function:    H5C__flash_increase_cache_size
5121  *
5122  * Purpose:     If there is not at least new_entry_size - old_entry_size
5123  *              bytes of free space in the cache and the current
5124  *              max_cache_size is less than (cache_ptr->resize_ctl).max_size,
5125  *              perform a flash increase in the cache size and then reset
5126  *              the full cache hit rate statistics, and exit.
5127  *
5128  * Return:      Non-negative on success/Negative on failure.
5129  *
5130  * Programmer:  John Mainzer, 12/31/07
5131  *
5132  *-------------------------------------------------------------------------
5133  */
5134 static herr_t
H5C__flash_increase_cache_size(H5C_t * cache_ptr,size_t old_entry_size,size_t new_entry_size)5135 H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size)
5136 {
5137     size_t                 new_max_cache_size = 0;
5138     size_t                 old_max_cache_size = 0;
5139     size_t                 new_min_clean_size = 0;
5140     size_t                 old_min_clean_size = 0;
5141     size_t                 space_needed;
5142     enum H5C_resize_status status = flash_increase; /* may change */
5143     double                 hit_rate;
5144     herr_t                 ret_value = SUCCEED; /* Return value */
5145 
5146     FUNC_ENTER_STATIC
5147 
5148     HDassert(cache_ptr);
5149     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
5150     HDassert(cache_ptr->flash_size_increase_possible);
5151     HDassert(new_entry_size > cache_ptr->flash_size_increase_threshold);
5152     HDassert(old_entry_size < new_entry_size);
5153 
5154     if (old_entry_size >= new_entry_size)
5155         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size")
5156 
5157     space_needed = new_entry_size - old_entry_size;
5158 
5159     if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
5160         (cache_ptr->max_cache_size < (cache_ptr->resize_ctl).max_size)) {
5161 
5162         /* we have work to do */
5163 
5164         switch ((cache_ptr->resize_ctl).flash_incr_mode) {
5165             case H5C_flash_incr__off:
5166                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
5167                             "flash_size_increase_possible but H5C_flash_incr__off?!")
5168                 break;
5169 
5170             case H5C_flash_incr__add_space:
5171                 if (cache_ptr->index_size < cache_ptr->max_cache_size) {
5172 
5173                     HDassert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed);
5174                     space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size;
5175                 }
5176                 space_needed = (size_t)(((double)space_needed) * (cache_ptr->resize_ctl).flash_multiple);
5177 
5178                 new_max_cache_size = cache_ptr->max_cache_size + space_needed;
5179 
5180                 break;
5181 
5182             default: /* should be unreachable */
5183                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
5184                 break;
5185         }
5186 
5187         if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) {
5188 
5189             new_max_cache_size = (cache_ptr->resize_ctl).max_size;
5190         }
5191 
5192         HDassert(new_max_cache_size > cache_ptr->max_cache_size);
5193 
5194         new_min_clean_size =
5195             (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
5196 
5197         HDassert(new_min_clean_size <= new_max_cache_size);
5198 
5199         old_max_cache_size = cache_ptr->max_cache_size;
5200         old_min_clean_size = cache_ptr->min_clean_size;
5201 
5202         cache_ptr->max_cache_size = new_max_cache_size;
5203         cache_ptr->min_clean_size = new_min_clean_size;
5204 
5205         /* update flash cache size increase fields as appropriate */
5206         HDassert(cache_ptr->flash_size_increase_possible);
5207 
5208         switch ((cache_ptr->resize_ctl).flash_incr_mode) {
5209             case H5C_flash_incr__off:
5210                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
5211                             "flash_size_increase_possible but H5C_flash_incr__off?!")
5212                 break;
5213 
5214             case H5C_flash_incr__add_space:
5215                 cache_ptr->flash_size_increase_threshold = (size_t)(
5216                     ((double)(cache_ptr->max_cache_size)) * ((cache_ptr->resize_ctl).flash_threshold));
5217                 break;
5218 
5219             default: /* should be unreachable */
5220                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
5221                 break;
5222         }
5223 
5224         /* note that we don't cycle the epoch markers.  We can
5225          * argue either way as to whether we should, but for now
5226          * we don't.
5227          */
5228 
5229         if ((cache_ptr->resize_ctl).rpt_fcn != NULL) {
5230 
5231             /* get the hit rate for the reporting function.  Should still
5232              * be good as we haven't reset the hit rate statistics.
5233              */
5234             if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
5235                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
5236 
5237             (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate,
5238                                                  status, old_max_cache_size, new_max_cache_size,
5239                                                  old_min_clean_size, new_min_clean_size);
5240         }
5241 
5242         if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
5243             /* this should be impossible... */
5244             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
5245     }
5246 
5247 done:
5248 
5249     FUNC_LEAVE_NOAPI(ret_value)
5250 
5251 } /* H5C__flash_increase_cache_size() */
5252 
5253 /*-------------------------------------------------------------------------
5254  *
5255  * Function:    H5C__flush_invalidate_cache
5256  *
5257  * Purpose:    Flush and destroy the entries contained in the target
5258  *        cache.
5259  *
5260  *        If the cache contains protected entries, the function will
5261  *        fail, as protected entries cannot be either flushed or
5262  *        destroyed.  However all unprotected entries should be
5263  *        flushed and destroyed before the function returns failure.
5264  *
5265  *        While pinned entries can usually be flushed, they cannot
5266  *        be destroyed.  However, they should be unpinned when all
5267  *        the entries that reference them have been destroyed (thus
5268  *        reduding the pinned entry's reference count to 0, allowing
5269  *        it to be unpinned).
5270  *
5271  *        If pinned entries are present, the function makes repeated
5272  *        passes through the cache, flushing all dirty entries
5273  *        (including the pinned dirty entries where permitted) and
5274  *        destroying all unpinned entries.  This process is repeated
5275  *        until either the cache is empty, or the number of pinned
5276  *        entries stops decreasing on each pass.
5277  *
5278  * Return:      Non-negative on success/Negative on failure or if there was
5279  *        a request to flush all items and something was protected.
5280  *
5281  * Programmer:  John Mainzer
5282  *        3/24/065
5283  *
5284  * Modifications:
5285  *
5286  *              To support the fractal heap, the cache must now deal with
5287  *              entries being dirtied, resized, and/or renamed inside
5288  *              flush callbacks.  Updated function to support this.
5289  *
5290  *                                                   -- JRM 8/27/06
5291  *
5292  *              Added code to detect and manage the case in which a
5293  *              flush callback changes the s-list out from under
5294  *              the function.  The only way I can think of in which this
5295  *              can happen is if a flush function loads an entry
5296  *              into the cache that isn't there already.  Quincey tells
5297  *              me that this will never happen, but I'm not sure I
5298  *              believe him.
5299  *
5300  *              Note that this is a pretty bad scenario if it ever
5301  *              happens.  The code I have added should allow us to
5302  *              handle the situation under all but the worst conditions,
5303  *              but one can argue that we should just scream and die if
5304  *              we ever detect the condition.
5305  *
5306  *                                                      -- JRM 10/13/07
5307  *
5308  *              Missing entries?
5309  *
5310  *
5311  *              Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
5312  *              This flag is used to flush and evict all entries in
5313  *              the metadata cache that are not pinned -- typically,
5314  *              everything other than the superblock.
5315  *
5316  *                                           ??? -- ??/??/??
5317  *
5318  *              Added sanity checks to verify that the skip list is
5319  *              enabled on entry.  On the face of it, it would make
5320  *              sense to enable the slist on entry, and disable it
5321  *              on exit, as this function is not called repeatedly.
5322  *              However, since this function can be called from
5323  *              H5C_flush_cache(), this would create cases in the test
5324  *              code where we would have to check the flags to determine
5325  *              whether we must setup and take down the slist.
5326  *
5327  *                                           JRM -- 5/5/20
5328  *
5329  *-------------------------------------------------------------------------
5330  */
5331 static herr_t
H5C__flush_invalidate_cache(H5F_t * f,unsigned flags)5332 H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
5333 {
5334     H5C_t *    cache_ptr;
5335     H5C_ring_t ring;
5336     herr_t     ret_value = SUCCEED;
5337 
5338     FUNC_ENTER_STATIC
5339 
5340     HDassert(f);
5341     HDassert(f->shared);
5342     cache_ptr = f->shared->cache;
5343     HDassert(cache_ptr);
5344     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
5345     HDassert(cache_ptr->slist_ptr);
5346     HDassert(cache_ptr->slist_enabled);
5347 
5348 #if H5C_DO_SANITY_CHECKS
5349     {
5350         int32_t  i;
5351         uint32_t index_len        = 0;
5352         uint32_t slist_len        = 0;
5353         size_t   index_size       = (size_t)0;
5354         size_t   clean_index_size = (size_t)0;
5355         size_t   dirty_index_size = (size_t)0;
5356         size_t   slist_size       = (size_t)0;
5357 
5358         HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
5359         HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
5360         HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
5361         HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
5362         HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
5363         HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
5364 
5365         for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
5366 
5367             index_len += cache_ptr->index_ring_len[i];
5368             index_size += cache_ptr->index_ring_size[i];
5369             clean_index_size += cache_ptr->clean_index_ring_size[i];
5370             dirty_index_size += cache_ptr->dirty_index_ring_size[i];
5371 
5372             slist_len += cache_ptr->slist_ring_len[i];
5373             slist_size += cache_ptr->slist_ring_size[i];
5374 
5375         } /* end for */
5376 
5377         HDassert(cache_ptr->index_len == index_len);
5378         HDassert(cache_ptr->index_size == index_size);
5379         HDassert(cache_ptr->clean_index_size == clean_index_size);
5380         HDassert(cache_ptr->dirty_index_size == dirty_index_size);
5381         HDassert(cache_ptr->slist_len == slist_len);
5382         HDassert(cache_ptr->slist_size == slist_size);
5383     }
5384 #endif /* H5C_DO_SANITY_CHECKS */
5385 
5386     /* remove ageout markers if present */
5387     if (cache_ptr->epoch_markers_active > 0) {
5388 
5389         if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
5390 
5391             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
5392     }
5393 
5394     /* flush invalidate each ring, starting from the outermost ring and
5395      * working inward.
5396      */
5397     ring = H5C_RING_USER;
5398 
5399     while (ring < H5C_RING_NTYPES) {
5400 
5401         if (H5C__flush_invalidate_ring(f, ring, flags) < 0)
5402 
5403             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
5404         ring++;
5405 
5406     } /* end while */
5407 
5408     /* Invariants, after destroying all entries in the hash table */
5409     if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
5410 
5411         HDassert(cache_ptr->index_size == 0);
5412         HDassert(cache_ptr->clean_index_size == 0);
5413         HDassert(cache_ptr->pel_len == 0);
5414         HDassert(cache_ptr->pel_size == 0);
5415 
5416     } /* end if */
5417     else {
5418 
5419         H5C_cache_entry_t *entry_ptr; /* Cache entry */
5420         unsigned           u;         /* Local index variable */
5421 
5422         /* All rings except ring 4 should be empty now */
5423         /* (Ring 4 has the superblock) */
5424         for (u = H5C_RING_USER; u < H5C_RING_SB; u++) {
5425 
5426             HDassert(cache_ptr->index_ring_len[u] == 0);
5427             HDassert(cache_ptr->index_ring_size[u] == 0);
5428             HDassert(cache_ptr->clean_index_ring_size[u] == 0);
5429 
5430         } /* end for */
5431 
5432         /* Check that any remaining pinned entries are in the superblock ring */
5433 
5434         entry_ptr = cache_ptr->pel_head_ptr;
5435 
5436         while (entry_ptr) {
5437 
5438             /* Check ring */
5439             HDassert(entry_ptr->ring == H5C_RING_SB);
5440 
5441             /* Advance to next entry in pinned entry list */
5442             entry_ptr = entry_ptr->next;
5443 
5444         } /* end while */
5445     }     /* end else */
5446 
5447     HDassert(cache_ptr->dirty_index_size == 0);
5448     HDassert(cache_ptr->slist_len == 0);
5449     HDassert(cache_ptr->slist_size == 0);
5450     HDassert(cache_ptr->pl_len == 0);
5451     HDassert(cache_ptr->pl_size == 0);
5452     HDassert(cache_ptr->LRU_list_len == 0);
5453     HDassert(cache_ptr->LRU_list_size == 0);
5454 
5455 done:
5456 
5457     FUNC_LEAVE_NOAPI(ret_value)
5458 
5459 } /* H5C__flush_invalidate_cache() */
5460 
5461 /*-------------------------------------------------------------------------
5462  * Function:    H5C__flush_invalidate_ring
5463  *
5464  * Purpose:     Flush and destroy the entries contained in the target
5465  *              cache and ring.
5466  *
5467  *              If the ring contains protected entries, the function will
5468  *              fail, as protected entries cannot be either flushed or
5469  *              destroyed.  However all unprotected entries should be
5470  *              flushed and destroyed before the function returns failure.
5471  *
5472  *              While pinned entries can usually be flushed, they cannot
5473  *              be destroyed.  However, they should be unpinned when all
5474  *              the entries that reference them have been destroyed (thus
5475  *              reduding the pinned entry's reference count to 0, allowing
5476  *              it to be unpinned).
5477  *
5478  *              If pinned entries are present, the function makes repeated
5479  *              passes through the cache, flushing all dirty entries
5480  *              (including the pinned dirty entries where permitted) and
5481  *              destroying all unpinned entries.  This process is repeated
5482  *              until either the cache is empty, or the number of pinned
5483  *              entries stops decreasing on each pass.
5484  *
5485  *              If flush dependencies appear in the target ring, the
5486  *              function makes repeated passes through the cache flushing
5487  *              entries in flush dependency order.
5488  *
5489  * Return:      Non-negative on success/Negative on failure or if there was
5490  *              a request to flush all items and something was protected.
5491  *
5492  * Programmer:  John Mainzer
5493  *              9/1/15
5494  *
5495  * Changes:     Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
5496  *              This flag is used to flush and evict all entries in
5497  *              the metadata cache that are not pinned -- typically,
5498  *              everything other than the superblock.
5499  *
5500  *                                           ??? -- ??/??/??
5501  *
5502  *              A recent optimization turns off the slist unless a flush
5503  *              is in progress.  This should not effect this function, as
5504  *              it is only called during a flush.  Added an assertion to
5505  *              verify this.
5506  *
5507  *                                           JRM -- 5/6/20
5508  *
5509  *-------------------------------------------------------------------------
5510  */
5511 static herr_t
H5C__flush_invalidate_ring(H5F_t * f,H5C_ring_t ring,unsigned flags)5512 H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
5513 {
5514     H5C_t *            cache_ptr;
5515     hbool_t            restart_slist_scan;
5516     uint32_t           protected_entries = 0;
5517     int32_t            i;
5518     int32_t            cur_ring_pel_len;
5519     int32_t            old_ring_pel_len;
5520     unsigned           cooked_flags;
5521     unsigned           evict_flags;
5522     H5SL_node_t *      node_ptr       = NULL;
5523     H5C_cache_entry_t *entry_ptr      = NULL;
5524     H5C_cache_entry_t *next_entry_ptr = NULL;
5525 #if H5C_DO_SANITY_CHECKS
5526     uint32_t initial_slist_len  = 0;
5527     size_t   initial_slist_size = 0;
5528 #endif /* H5C_DO_SANITY_CHECKS */
5529     herr_t ret_value = SUCCEED;
5530 
5531     FUNC_ENTER_STATIC
5532 
5533     HDassert(f);
5534     HDassert(f->shared);
5535 
5536     cache_ptr = f->shared->cache;
5537 
5538     HDassert(cache_ptr);
5539     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
5540     HDassert(cache_ptr->slist_enabled);
5541     HDassert(cache_ptr->slist_ptr);
5542     HDassert(ring > H5C_RING_UNDEFINED);
5543     HDassert(ring < H5C_RING_NTYPES);
5544 
5545     HDassert(cache_ptr->epoch_markers_active == 0);
5546 
5547     /* Filter out the flags that are not relevant to the flush/invalidate.
5548      */
5549     cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
5550     evict_flags  = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
5551 
5552     /* The flush procedure here is a bit strange.
5553      *
5554      * In the outer while loop we make at least one pass through the
5555      * cache, and then repeat until either all the pinned entries in
5556      * the ring unpin themselves, or until the number of pinned entries
5557      * in the ring stops declining.  In this later case, we scream and die.
5558      *
5559      * Since the fractal heap can dirty, resize, and/or move entries
5560      * in is flush callback, it is possible that the cache will still
5561      * contain dirty entries at this point.  If so, we must make more
5562      * passes through the skip list to allow it to empty.
5563      *
5564      * Further, since clean entries can be dirtied, resized, and/or moved
5565      * as the result of a flush call back (either the entries own, or that
5566      * for some other cache entry), we can no longer promise to flush
5567      * the cache entries in increasing address order.
5568      *
5569      * Instead, we just do the best we can -- making a pass through
5570      * the skip list, and then a pass through the "clean" entries, and
5571      * then repeating as needed.  Thus it is quite possible that an
5572      * entry will be evicted from the cache only to be re-loaded later
5573      * in the flush process (From what Quincey tells me, the pin
5574      * mechanism makes this impossible, but even it it is true now,
5575      * we shouldn't count on it in the future.)
5576      *
5577      * The bottom line is that entries will probably be flushed in close
5578      * to increasing address order, but there are no guarantees.
5579      */
5580 
5581     /* compute the number of pinned entries in this ring */
5582 
5583     entry_ptr        = cache_ptr->pel_head_ptr;
5584     cur_ring_pel_len = 0;
5585 
5586     while (entry_ptr != NULL) {
5587 
5588         HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5589         HDassert(entry_ptr->ring >= ring);
5590         if (entry_ptr->ring == ring)
5591             cur_ring_pel_len++;
5592 
5593         entry_ptr = entry_ptr->next;
5594 
5595     } /* end while */
5596 
5597     old_ring_pel_len = cur_ring_pel_len;
5598 
5599     while (cache_ptr->index_ring_len[ring] > 0) {
5600 
5601         /* first, try to flush-destroy any dirty entries.   Do this by
5602          * making a scan through the slist.  Note that new dirty entries
5603          * may be created by the flush call backs.  Thus it is possible
5604          * that the slist will not be empty after we finish the scan.
5605          */
5606 
5607 #if H5C_DO_SANITY_CHECKS
5608         /* Depending on circumstances, H5C__flush_single_entry() will
5609          * remove dirty entries from the slist as it flushes them.
5610          * Thus for sanity checks we must make note of the initial
5611          * slist length and size before we do any flushes.
5612          */
5613         initial_slist_len  = cache_ptr->slist_len;
5614         initial_slist_size = cache_ptr->slist_size;
5615 
5616         /* There is also the possibility that entries will be
5617          * dirtied, resized, moved, and/or removed from the cache
5618          * as the result of calls to the flush callbacks.  We use
5619          * the slist_len_increase and slist_size_increase increase
5620          * fields in struct H5C_t to track these changes for purpose
5621          * of sanity checking.
5622          *
5623          * To this end, we must zero these fields before we start
5624          * the pass through the slist.
5625          */
5626         cache_ptr->slist_len_increase  = 0;
5627         cache_ptr->slist_size_increase = 0;
5628 #endif /* H5C_DO_SANITY_CHECKS */
5629 
5630         /* Set the cache_ptr->slist_changed to false.
5631          *
5632          * This flag is set to TRUE by H5C__flush_single_entry if the slist
5633          * is modified by a pre_serialize, serialize, or notify callback.
5634          *
5635          * H5C__flush_invalidate_ring() uses this flag to detect any
5636          * modifications to the slist that might corrupt the scan of
5637          * the slist -- and restart the scan in this event.
5638          */
5639         cache_ptr->slist_changed = FALSE;
5640 
5641         /* this done, start the scan of the slist */
5642         restart_slist_scan = TRUE;
5643 
5644         while (restart_slist_scan || (node_ptr != NULL)) {
5645 
5646             if (restart_slist_scan) {
5647 
5648                 restart_slist_scan = FALSE;
5649 
5650                 /* Start at beginning of skip list */
5651                 node_ptr = H5SL_first(cache_ptr->slist_ptr);
5652 
5653                 if (node_ptr == NULL)
5654                     /* the slist is empty -- break out of inner loop */
5655                     break;
5656 
5657                 /* Get cache entry for this node */
5658                 next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
5659 
5660                 if (NULL == next_entry_ptr)
5661 
5662                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
5663 
5664                 HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5665                 HDassert(next_entry_ptr->is_dirty);
5666                 HDassert(next_entry_ptr->in_slist);
5667                 HDassert(next_entry_ptr->ring >= ring);
5668 
5669             } /* end if */
5670 
5671             entry_ptr = next_entry_ptr;
5672 
5673             /* It is possible that entries will be dirtied, resized,
5674              * flushed, or removed from the cache via the take ownership
5675              * flag as the result of pre_serialize or serialized callbacks.
5676              *
5677              * This in turn can corrupt the scan through the slist.
5678              *
5679              * We test for slist modifications in the pre_serialize
5680              * and serialize callbacks, and restart the scan of the
5681              * slist if we find them.  However, best we do some extra
5682              * sanity checking just in case.
5683              */
5684             HDassert(entry_ptr != NULL);
5685             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5686             HDassert(entry_ptr->in_slist);
5687             HDassert(entry_ptr->is_dirty);
5688             HDassert(entry_ptr->ring >= ring);
5689 
5690             /* increment node pointer now, before we delete its target
5691              * from the slist.
5692              */
5693             node_ptr = H5SL_next(node_ptr);
5694 
5695             if (node_ptr != NULL) {
5696 
5697                 next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
5698 
5699                 if (NULL == next_entry_ptr)
5700 
5701                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
5702 
5703                 HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5704                 HDassert(next_entry_ptr->is_dirty);
5705                 HDassert(next_entry_ptr->in_slist);
5706                 HDassert(next_entry_ptr->ring >= ring);
5707                 HDassert(entry_ptr != next_entry_ptr);
5708             } /* end if */
5709             else {
5710 
5711                 next_entry_ptr = NULL;
5712             }
5713 
5714             /* Note that we now remove nodes from the slist as we flush
5715              * the associated entries, instead of leaving them there
5716              * until we are done, and then destroying all nodes in
5717              * the slist.
5718              *
5719              * While this optimization used to be easy, with the possibility
5720              * of new entries being added to the slist in the midst of the
5721              * flush, we must keep the slist in canonical form at all
5722              * times.
5723              */
5724             if (((!entry_ptr->flush_me_last) ||
5725                  ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
5726                 (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
5727 
5728                 if (entry_ptr->is_protected) {
5729 
5730                     /* we have major problems -- but lets flush
5731                      * everything we can before we flag an error.
5732                      */
5733                     protected_entries++;
5734 
5735                 } /* end if */
5736                 else if (entry_ptr->is_pinned) {
5737 
5738                     if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
5739 
5740                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
5741 
5742                     if (cache_ptr->slist_changed) {
5743 
5744                         /* The slist has been modified by something
5745                          * other than the simple removal of the
5746                          * of the flushed entry after the flush.
5747                          *
5748                          * This has the potential to corrupt the
5749                          * scan through the slist, so restart it.
5750                          */
5751                         restart_slist_scan       = TRUE;
5752                         cache_ptr->slist_changed = FALSE;
5753                         H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
5754 
5755                     } /* end if */
5756                 }     /* end else-if */
5757                 else {
5758 
5759                     if (H5C__flush_single_entry(f, entry_ptr,
5760                                                 (cooked_flags | H5C__DURING_FLUSH_FLAG |
5761                                                  H5C__FLUSH_INVALIDATE_FLAG |
5762                                                  H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
5763 
5764                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
5765 
5766                     if (cache_ptr->slist_changed) {
5767 
5768                         /* The slist has been modified by something
5769                          * other than the simple removal of the
5770                          * of the flushed entry after the flush.
5771                          *
5772                          * This has the potential to corrupt the
5773                          * scan through the slist, so restart it.
5774                          */
5775                         restart_slist_scan       = TRUE;
5776                         cache_ptr->slist_changed = FALSE;
5777                         H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
5778                     } /* end if */
5779                 }     /* end else */
5780             }         /* end if */
5781         }             /* end while loop scanning skip list */
5782 
5783 #if H5C_DO_SANITY_CHECKS
5784         /* It is possible that entries were added to the slist during
5785          * the scan, either before or after scan pointer.  The following
5786          * asserts take this into account.
5787          *
5788          * Don't bother with the sanity checks if node_ptr != NULL, as
5789          * in this case we broke out of the loop because it got changed
5790          * out from under us.
5791          */
5792 
5793         if (node_ptr == NULL) {
5794 
5795             HDassert(cache_ptr->slist_len ==
5796                      (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
5797 
5798             HDassert(cache_ptr->slist_size ==
5799                      (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
5800         } /* end if */
5801 #endif    /* H5C_DO_SANITY_CHECKS */
5802 
5803         /* Since we are doing a destroy, we must make a pass through
5804          * the hash table and try to flush - destroy all entries that
5805          * remain.
5806          *
5807          * It used to be that all entries remaining in the cache at
5808          * this point had to be clean, but with the fractal heap mods
5809          * this may not be the case.  If so, we will flush entries out
5810          * in increasing address order.
5811          *
5812          * Writes to disk are possible here.
5813          */
5814 
5815         /* reset the counters so that we can detect insertions, loads,
5816          * and moves caused by the pre_serialize and serialize calls.
5817          */
5818         cache_ptr->entries_loaded_counter    = 0;
5819         cache_ptr->entries_inserted_counter  = 0;
5820         cache_ptr->entries_relocated_counter = 0;
5821 
5822         next_entry_ptr = cache_ptr->il_head;
5823 
5824         while (next_entry_ptr != NULL) {
5825 
5826             entry_ptr = next_entry_ptr;
5827             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5828             HDassert(entry_ptr->ring >= ring);
5829 
5830             next_entry_ptr = entry_ptr->il_next;
5831             HDassert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
5832 
5833             if (((!entry_ptr->flush_me_last) ||
5834                  (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
5835                 (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
5836 
5837                 if (entry_ptr->is_protected) {
5838 
5839                     /* we have major problems -- but lets flush and
5840                      * destroy everything we can before we flag an
5841                      * error.
5842                      */
5843                     protected_entries++;
5844 
5845                     if (!entry_ptr->in_slist) {
5846 
5847                         HDassert(!(entry_ptr->is_dirty));
5848                     }
5849                 } /* end if */
5850                 else if (!(entry_ptr->is_pinned)) {
5851 
5852                     /* if *entry_ptr is dirty, it is possible
5853                      * that one or more other entries may be
5854                      * either removed from the cache, loaded
5855                      * into the cache, or moved to a new location
5856                      * in the file as a side effect of the flush.
5857                      *
5858                      * It's also possible that removing a clean
5859                      * entry will remove the last child of a proxy
5860                      * entry, allowing it to be removed also and
5861                      * invalidating the next_entry_ptr.
5862                      *
5863                      * If either of these happen, and one of the target
5864                      * or proxy entries happens to be the next entry in
5865                      * the hash bucket, we could either find ourselves
5866                      * either scanning a non-existant entry, scanning
5867                      * through a different bucket, or skipping an entry.
5868                      *
5869                      * Neither of these are good, so restart the
5870                      * the scan at the head of the hash bucket
5871                      * after the flush if we detect that the next_entry_ptr
5872                      * becomes invalid.
5873                      *
5874                      * This is not as inefficient at it might seem,
5875                      * as hash buckets typically have at most two
5876                      * or three entries.
5877                      */
5878                     cache_ptr->entry_watched_for_removal = next_entry_ptr;
5879 
5880                     if (H5C__flush_single_entry(f, entry_ptr,
5881                                                 (cooked_flags | H5C__DURING_FLUSH_FLAG |
5882                                                  H5C__FLUSH_INVALIDATE_FLAG |
5883                                                  H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
5884 
5885                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
5886 
5887                     /* Restart the index list scan if necessary.  Must
5888                      * do this if the next entry is evicted, and also if
5889                      * one or more entries are inserted, loaded, or moved
5890                      * as these operations can result in part of the scan
5891                      * being skipped -- which can cause a spurious failure
5892                      * if this results in the size of the pinned entry
5893                      * failing to decline during the pass.
5894                      */
5895                     if (((NULL != next_entry_ptr) && (NULL == cache_ptr->entry_watched_for_removal)) ||
5896                         (cache_ptr->entries_loaded_counter > 0) ||
5897                         (cache_ptr->entries_inserted_counter > 0) ||
5898                         (cache_ptr->entries_relocated_counter > 0)) {
5899 
5900                         next_entry_ptr = cache_ptr->il_head;
5901 
5902                         cache_ptr->entries_loaded_counter    = 0;
5903                         cache_ptr->entries_inserted_counter  = 0;
5904                         cache_ptr->entries_relocated_counter = 0;
5905 
5906                         H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
5907 
5908                     } /* end if */
5909                     else {
5910 
5911                         cache_ptr->entry_watched_for_removal = NULL;
5912                     }
5913                 } /* end if */
5914             }     /* end if */
5915         }         /* end for loop scanning hash table */
5916 
5917         /* We can't do anything if entries are pinned.  The
5918          * hope is that the entries will be unpinned as the
5919          * result of destroys of entries that reference them.
5920          *
5921          * We detect this by noting the change in the number
5922          * of pinned entries from pass to pass.  If it stops
5923          * shrinking before it hits zero, we scream and die.
5924          */
5925         old_ring_pel_len = cur_ring_pel_len;
5926         entry_ptr        = cache_ptr->pel_head_ptr;
5927         cur_ring_pel_len = 0;
5928 
5929         while (entry_ptr != NULL) {
5930 
5931             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5932             HDassert(entry_ptr->ring >= ring);
5933 
5934             if (entry_ptr->ring == ring) {
5935 
5936                 cur_ring_pel_len++;
5937             }
5938 
5939             entry_ptr = entry_ptr->next;
5940 
5941         } /* end while */
5942 
5943         /* Check if the number of pinned entries in the ring is positive, and
5944          * it is not declining.  Scream and die if so.
5945          */
5946         if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
5947 
5948             /* Don't error if allowed to have pinned entries remaining */
5949             if (evict_flags) {
5950 
5951                 HGOTO_DONE(TRUE)
5952             }
5953 
5954             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
5955                         "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = "
5956                         "%d, ring = %d",
5957                         (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
5958         } /* end if */
5959 
5960         HDassert(protected_entries == cache_ptr->pl_len);
5961 
5962         if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len))
5963 
5964             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
5965                         "Only protected entries left in cache, protected_entries = %d",
5966                         (int)protected_entries)
5967 
5968     } /* main while loop */
5969 
5970     /* Invariants, after destroying all entries in the ring */
5971     for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
5972 
5973         HDassert(cache_ptr->index_ring_len[i] == 0);
5974         HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
5975         HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
5976         HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
5977 
5978         HDassert(cache_ptr->slist_ring_len[i] == 0);
5979         HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
5980 
5981     } /* end for */
5982 
5983     HDassert(protected_entries <= cache_ptr->pl_len);
5984 
5985     if (protected_entries > 0) {
5986 
5987         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
5988     }
5989     else if (cur_ring_pel_len > 0) {
5990 
5991         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
5992     }
5993 
5994 done:
5995 
5996     FUNC_LEAVE_NOAPI(ret_value)
5997 
5998 } /* H5C__flush_invalidate_ring() */
5999 
6000 /*-------------------------------------------------------------------------
6001  *
6002  * Function:    H5C__flush_ring
6003  *
6004  * Purpose:     Flush the entries contained in the specified cache and
6005  *              ring.  All entries in rings outside the specified ring
6006  *              must have been flushed on entry.
6007  *
6008  *              If the cache contains protected entries in the specified
6009  *              ring, the function will fail, as protected entries cannot
6010  *              be flushed.  However all unprotected entries in the target
6011  *              ring should be flushed before the function returns failure.
6012  *
6013  *              If flush dependencies appear in the target ring, the
6014  *              function makes repeated passes through the slist flushing
6015  *              entries in flush dependency order.
6016  *
6017  * Return:      Non-negative on success/Negative on failure or if there was
6018  *              a request to flush all items and something was protected.
6019  *
6020  * Programmer:  John Mainzer
6021  *              9/1/15
6022  *
6023  * Changes:     A recent optimization turns off the slist unless a flush
6024  *              is in progress.  This should not effect this function, as
6025  *              it is only called during a flush.  Added an assertion to
6026  *              verify this.
6027  *
6028  *                                             JRM -- 5/6/20
6029  *
6030  *
6031  *-------------------------------------------------------------------------
6032  */
6033 static herr_t
H5C__flush_ring(H5F_t * f,H5C_ring_t ring,unsigned flags)6034 H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
6035 {
6036     H5C_t *            cache_ptr = f->shared->cache;
6037     hbool_t            flushed_entries_last_pass;
6038     hbool_t            flush_marked_entries;
6039     hbool_t            ignore_protected;
6040     hbool_t            tried_to_flush_protected_entry = FALSE;
6041     hbool_t            restart_slist_scan;
6042     uint32_t           protected_entries = 0;
6043     H5SL_node_t *      node_ptr          = NULL;
6044     H5C_cache_entry_t *entry_ptr         = NULL;
6045     H5C_cache_entry_t *next_entry_ptr    = NULL;
6046 #if H5C_DO_SANITY_CHECKS
6047     uint32_t initial_slist_len  = 0;
6048     size_t   initial_slist_size = 0;
6049 #endif /* H5C_DO_SANITY_CHECKS */
6050     int    i;
6051     herr_t ret_value = SUCCEED;
6052 
6053     FUNC_ENTER_STATIC
6054 
6055     HDassert(cache_ptr);
6056     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
6057     HDassert(cache_ptr->slist_enabled);
6058     HDassert(cache_ptr->slist_ptr);
6059     HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
6060     HDassert(ring > H5C_RING_UNDEFINED);
6061     HDassert(ring < H5C_RING_NTYPES);
6062 
6063 #if H5C_DO_EXTREME_SANITY_CHECKS
6064     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
6065         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
6066 
6067         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
6068 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
6069 
6070     ignore_protected     = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0);
6071     flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0);
6072 
6073     if (!flush_marked_entries) {
6074 
6075         for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++) {
6076 
6077             HDassert(cache_ptr->slist_ring_len[i] == 0);
6078         }
6079     }
6080 
6081     HDassert(cache_ptr->flush_in_progress);
6082 
6083     /* When we are only flushing marked entries, the slist will usually
6084      * still contain entries when we have flushed everything we should.
6085      * Thus we track whether we have flushed any entries in the last
6086      * pass, and terminate if we haven't.
6087      */
6088     flushed_entries_last_pass = TRUE;
6089 
6090     /* Set the cache_ptr->slist_changed to false.
6091      *
6092      * This flag is set to TRUE by H5C__flush_single_entry if the
6093      * slist is modified by a pre_serialize, serialize, or notify callback.
6094      * H5C_flush_cache uses this flag to detect any modifications
6095      * to the slist that might corrupt the scan of the slist -- and
6096      * restart the scan in this event.
6097      */
6098     cache_ptr->slist_changed = FALSE;
6099 
6100     while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) && (flushed_entries_last_pass)) {
6101 
6102         flushed_entries_last_pass = FALSE;
6103 
6104 #if H5C_DO_SANITY_CHECKS
6105         /* For sanity checking, try to verify that the skip list has
6106          * the expected size and number of entries at the end of each
6107          * internal while loop (see below).
6108          *
6109          * Doing this get a bit tricky, as depending on flags, we may
6110          * or may not flush all the entries in the slist.
6111          *
6112          * To make things more entertaining, with the advent of the
6113          * fractal heap, the entry serialize callback can cause entries
6114          * to be dirtied, resized, and/or moved.  Also, the
6115          * pre_serialize callback can result in an entry being
6116          * removed from the cache via the take ownership flag.
6117          *
6118          * To deal with this, we first make note of the initial
6119          * skip list length and size:
6120          */
6121         initial_slist_len  = cache_ptr->slist_len;
6122         initial_slist_size = cache_ptr->slist_size;
6123 
6124         /* As mentioned above, there is the possibility that
6125          * entries will be dirtied, resized, flushed, or removed
6126          * from the cache via the take ownership flag  during
6127          * our pass through the skip list.  To capture the number
6128          * of entries added, and the skip list size delta,
6129          * zero the slist_len_increase and slist_size_increase of
6130          * the cache's instance of H5C_t.  These fields will be
6131          * updated elsewhere to account for slist insertions and/or
6132          * dirty entry size changes.
6133          */
6134         cache_ptr->slist_len_increase  = 0;
6135         cache_ptr->slist_size_increase = 0;
6136 
6137         /* at the end of the loop, use these values to compute the
6138          * expected slist length and size and compare this with the
6139          * value recorded in the cache's instance of H5C_t.
6140          */
6141 #endif /* H5C_DO_SANITY_CHECKS */
6142 
6143         restart_slist_scan = TRUE;
6144 
6145         while ((restart_slist_scan) || (node_ptr != NULL)) {
6146 
6147             if (restart_slist_scan) {
6148 
6149                 restart_slist_scan = FALSE;
6150 
6151                 /* Start at beginning of skip list */
6152                 node_ptr = H5SL_first(cache_ptr->slist_ptr);
6153 
6154                 if (node_ptr == NULL) {
6155 
6156                     /* the slist is empty -- break out of inner loop */
6157                     break;
6158                 }
6159 
6160                 /* Get cache entry for this node */
6161                 next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
6162 
6163                 if (NULL == next_entry_ptr)
6164 
6165                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
6166 
6167                 HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
6168                 HDassert(next_entry_ptr->is_dirty);
6169                 HDassert(next_entry_ptr->in_slist);
6170 
6171             } /* end if */
6172 
6173             entry_ptr = next_entry_ptr;
6174 
6175             /* With the advent of the fractal heap, the free space
6176              * manager, and the version 3 cache, it is possible
6177              * that the pre-serialize or serialize callback will
6178              * dirty, resize, or take ownership of other entries
6179              * in the cache.
6180              *
6181              * To deal with this, I have inserted code to detect any
6182              * change in the skip list not directly under the control
6183              * of this function.  If such modifications are detected,
6184              * we must re-start the scan of the skip list to avoid
6185              * the possibility that the target of the next_entry_ptr
6186              * may have been flushed or deleted from the cache.
6187              *
6188              * To verify that all such possibilities have been dealt
6189              * with, we do a bit of extra sanity checking on
6190              * entry_ptr.
6191              */
6192             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
6193             HDassert(entry_ptr->in_slist);
6194             HDassert(entry_ptr->is_dirty);
6195 
6196             if ((!flush_marked_entries) || (entry_ptr->flush_marker)) {
6197 
6198                 HDassert(entry_ptr->ring >= ring);
6199             }
6200 
6201             /* Advance node pointer now, before we delete its target
6202              * from the slist.
6203              */
6204             node_ptr = H5SL_next(node_ptr);
6205 
6206             if (node_ptr != NULL) {
6207 
6208                 next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
6209 
6210                 if (NULL == next_entry_ptr)
6211 
6212                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
6213 
6214                 HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
6215                 HDassert(next_entry_ptr->is_dirty);
6216                 HDassert(next_entry_ptr->in_slist);
6217 
6218                 if (!flush_marked_entries || next_entry_ptr->flush_marker) {
6219 
6220                     HDassert(next_entry_ptr->ring >= ring);
6221                 }
6222 
6223                 HDassert(entry_ptr != next_entry_ptr);
6224 
6225             } /* end if */
6226             else {
6227 
6228                 next_entry_ptr = NULL;
6229             }
6230 
6231             if ((!flush_marked_entries || entry_ptr->flush_marker) &&
6232                 ((!entry_ptr->flush_me_last) ||
6233                  ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
6234                                                  (flush_marked_entries && entry_ptr->flush_marker)))) &&
6235                 ((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) &&
6236                 (entry_ptr->ring == ring)) {
6237 
6238                 HDassert(entry_ptr->flush_dep_nunser_children == 0);
6239 
6240                 if (entry_ptr->is_protected) {
6241 
6242                     /* we probably have major problems -- but lets
6243                      * flush everything we can before we decide
6244                      * whether to flag an error.
6245                      */
6246                     tried_to_flush_protected_entry = TRUE;
6247                     protected_entries++;
6248 
6249                 } /* end if */
6250                 else {
6251 
6252                     if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
6253 
6254                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
6255 
6256                     if (cache_ptr->slist_changed) {
6257 
6258                         /* The slist has been modified by something
6259                          * other than the simple removal of the
6260                          * of the flushed entry after the flush.
6261                          *
6262                          * This has the potential to corrupt the
6263                          * scan through the slist, so restart it.
6264                          */
6265                         restart_slist_scan       = TRUE;
6266                         cache_ptr->slist_changed = FALSE;
6267                         H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
6268 
6269                     } /* end if */
6270 
6271                     flushed_entries_last_pass = TRUE;
6272 
6273                 } /* end else */
6274             }     /* end if */
6275         }         /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
6276 
6277 #if H5C_DO_SANITY_CHECKS
6278         /* Verify that the slist size and length are as expected. */
6279         HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) ==
6280                  cache_ptr->slist_len);
6281         HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) ==
6282                  cache_ptr->slist_size);
6283 #endif /* H5C_DO_SANITY_CHECKS */
6284 
6285     } /* while */
6286 
6287     HDassert(protected_entries <= cache_ptr->pl_len);
6288 
6289     if (((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
6290 
6291         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
6292 
6293 #if H5C_DO_SANITY_CHECKS
6294     if (!flush_marked_entries) {
6295 
6296         HDassert(cache_ptr->slist_ring_len[ring] == 0);
6297         HDassert(cache_ptr->slist_ring_size[ring] == 0);
6298 
6299     }  /* end if */
6300 #endif /* H5C_DO_SANITY_CHECKS */
6301 
6302 done:
6303 
6304     FUNC_LEAVE_NOAPI(ret_value)
6305 
6306 } /* H5C__flush_ring() */
6307 
6308 /*-------------------------------------------------------------------------
6309  *
6310  * Function:    H5C__flush_single_entry
6311  *
6312  * Purpose:     Flush or clear (and evict if requested) the cache entry
6313  *              with the specified address and type.  If the type is NULL,
6314  *              any unprotected entry at the specified address will be
6315  *              flushed (and possibly evicted).
6316  *
6317  *              Attempts to flush a protected entry will result in an
6318  *              error.
6319  *
6320  *              If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
6321  *              be cleared and not flushed, and the call can't be part of a
6322  *              sequence of flushes.
6323  *
6324  *              The function does nothing silently if there is no entry
6325  *              at the supplied address, or if the entry found has the
6326  *              wrong type.
6327  *
6328  * Return:      Non-negative on success/Negative on failure or if there was
6329  *              an attempt to flush a protected item.
6330  *
6331  * Programmer:  John Mainzer, 5/5/04
6332  *
6333  * Modifications:
6334  *
6335  *              JRM -- 7/21/04
6336  *              Updated function for the addition of the hash table.
6337  *
6338  *              QAK -- 11/26/04
6339  *              Updated function for the switch from TBBTs to skip lists.
6340  *
6341  *              JRM -- 1/6/05
6342  *              Updated function to reset the flush_marker field.
6343  *              Also replace references to H5F_FLUSH_INVALIDATE and
6344  *              H5F_FLUSH_CLEAR_ONLY with references to
6345  *              H5C__FLUSH_INVALIDATE_FLAG and H5C__FLUSH_CLEAR_ONLY_FLAG
6346  *              respectively.
6347  *
6348  *              JRM -- 6/24/05
6349  *              Added code to remove dirty entries from the slist after
6350  *              they have been flushed.  Also added a sanity check that
6351  *              will scream if we attempt a write when writes are
6352  *              completely disabled.
6353  *
6354  *              JRM -- 7/5/05
6355  *              Added code to call the new log_flush callback whenever
6356  *              a dirty entry is written to disk.  Note that the callback
6357  *              is not called if the H5C__FLUSH_CLEAR_ONLY_FLAG is set,
6358  *              as there is no write to file in this case.
6359  *
6360  *              JRM -- 8/21/06
6361  *              Added code maintaining the flush_in_progress and
6362  *              destroy_in_progress fields in H5C_cache_entry_t.
6363  *
6364  *              Also added flush_flags parameter to the call to
6365  *              type_ptr->flush() so that the flush routine can report
6366  *              whether the entry has been resized or renamed.  Added
6367  *              code using the flush_flags variable to detect the case
6368  *              in which the target entry is resized during flush, and
6369  *              update the caches data structures accordingly.
6370  *
6371  *              JRM -- 3/29/07
6372  *              Added sanity checks on the new is_read_only and
6373  *              ro_ref_count fields.
6374  *
6375  *              QAK -- 2/07/08
6376  *              Separated "destroy entry" concept from "remove entry from
6377  *              cache" concept, by adding the 'take_ownership' flag and
6378  *              the "destroy_entry" variable.
6379  *
6380  *              JRM -- 11/5/08
6381  *              Added call to H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN() to
6382  *              maintain the new clean_index_size and clean_index_size
6383  *              fields of H5C_t.
6384  *
6385  *
6386  *              Missing entries??
6387  *
6388  *
6389  *              JRM -- 5/8/20
6390  *              Updated sanity checks for the possibility that the slist
6391  *              is disabled.
6392  *
6393  *              Also updated main comment to conform more closely with
6394  *              the current state of the code.
6395  *
6396  *-------------------------------------------------------------------------
6397  */
6398 herr_t
H5C__flush_single_entry(H5F_t * f,H5C_cache_entry_t * entry_ptr,unsigned flags)6399 H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
6400 {
6401     H5C_t * cache_ptr;                 /* Cache for file */
6402     hbool_t destroy;                   /* external flag */
6403     hbool_t clear_only;                /* external flag */
6404     hbool_t free_file_space;           /* external flag */
6405     hbool_t take_ownership;            /* external flag */
6406     hbool_t del_from_slist_on_destroy; /* external flag */
6407     hbool_t during_flush;              /* external flag */
6408     hbool_t write_entry;               /* internal flag */
6409     hbool_t destroy_entry;             /* internal flag */
6410     hbool_t generate_image;            /* internal flag */
6411     hbool_t update_page_buffer;        /* internal flag */
6412     hbool_t was_dirty;
6413     hbool_t suppress_image_entry_writes = FALSE;
6414     hbool_t suppress_image_entry_frees  = FALSE;
6415     haddr_t entry_addr                  = HADDR_UNDEF;
6416     herr_t  ret_value                   = SUCCEED; /* Return value */
6417 
6418     FUNC_ENTER_PACKAGE
6419 
6420     HDassert(f);
6421     cache_ptr = f->shared->cache;
6422     HDassert(cache_ptr);
6423     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
6424     HDassert(entry_ptr);
6425     HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
6426     HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
6427     HDassert(entry_ptr->type);
6428 
6429     /* setup external flags from the flags parameter */
6430     destroy                   = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
6431     clear_only                = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
6432     free_file_space           = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
6433     take_ownership            = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
6434     del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
6435     during_flush              = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
6436     generate_image            = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
6437     update_page_buffer        = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
6438 
6439     /* Set the flag for destroying the entry, based on the 'take ownership'
6440      * and 'destroy' flags
6441      */
6442     if (take_ownership) {
6443 
6444         destroy_entry = FALSE;
6445     }
6446     else {
6447 
6448         destroy_entry = destroy;
6449     }
6450 
6451     /* we will write the entry to disk if it exists, is dirty, and if the
6452      * clear only flag is not set.
6453      */
6454     if (entry_ptr->is_dirty && !clear_only) {
6455 
6456         write_entry = TRUE;
6457     }
6458     else {
6459 
6460         write_entry = FALSE;
6461     }
6462 
6463     /* if we have received close warning, and we have been instructed to
6464      * generate a metadata cache image, and we have actually constructed
6465      * the entry images, set suppress_image_entry_frees to TRUE.
6466      *
6467      * Set suppress_image_entry_writes to TRUE if indicated by the
6468      * image_ctl flags.
6469      */
6470     if ((cache_ptr->close_warning_received) && (cache_ptr->image_ctl.generate_image) &&
6471         (cache_ptr->num_entries_in_image > 0) && (cache_ptr->image_entries != NULL)) {
6472 
6473         /* Sanity checks */
6474         HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
6475         HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
6476         HDassert((!clear_only) || !(entry_ptr->include_in_image));
6477         HDassert((!take_ownership) || !(entry_ptr->include_in_image));
6478         HDassert((!free_file_space) || !(entry_ptr->include_in_image));
6479 
6480         suppress_image_entry_frees = TRUE;
6481 
6482         if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES) {
6483 
6484             suppress_image_entry_writes = TRUE;
6485 
6486         } /* end if */
6487     }     /* end if */
6488 
6489     /* run initial sanity checks */
6490 #if H5C_DO_SANITY_CHECKS
6491     if (cache_ptr->slist_enabled) {
6492 
6493         if (entry_ptr->in_slist) {
6494 
6495             HDassert(entry_ptr->is_dirty);
6496 
6497             if ((entry_ptr->flush_marker) && (!entry_ptr->is_dirty))
6498 
6499                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks")
6500         } /* end if */
6501         else {
6502 
6503             HDassert(!entry_ptr->is_dirty);
6504             HDassert(!entry_ptr->flush_marker);
6505 
6506             if ((entry_ptr->is_dirty) || (entry_ptr->flush_marker))
6507 
6508                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks")
6509 
6510         } /* end else */
6511     }
6512     else { /* slist is disabled */
6513 
6514         HDassert(!entry_ptr->in_slist);
6515 
6516         if (!entry_ptr->is_dirty) {
6517 
6518             if (entry_ptr->flush_marker)
6519 
6520                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?")
6521         }
6522     }
6523 #endif /* H5C_DO_SANITY_CHECKS */
6524 
6525     if (entry_ptr->is_protected) {
6526 
6527         HDassert(!entry_ptr->is_protected);
6528 
6529         /* Attempt to flush a protected entry -- scream and die. */
6530         HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
6531 
6532     } /* end if */
6533 
6534     /* Set entry_ptr->flush_in_progress = TRUE and set
6535      * entry_ptr->flush_marker = FALSE
6536      *
6537      * We will set flush_in_progress back to FALSE at the end if the
6538      * entry still exists at that point.
6539      */
6540     entry_ptr->flush_in_progress = TRUE;
6541     entry_ptr->flush_marker      = FALSE;
6542 
6543     /* Preserve current dirty state for later */
6544     was_dirty = entry_ptr->is_dirty;
6545 
6546     /* The entry is dirty, and we are doing a flush, a flush destroy or have
6547      * been requested to generate an image.  In those cases, serialize the
6548      * entry.
6549      */
6550     if (write_entry || generate_image) {
6551 
6552         HDassert(entry_ptr->is_dirty);
6553 
6554         if (NULL == entry_ptr->image_ptr) {
6555 
6556             if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
6557 
6558                 HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
6559                             "memory allocation failed for on disk image buffer")
6560 
6561 #if H5C_DO_MEMORY_SANITY_CHECKS
6562             H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
6563                         H5C_IMAGE_EXTRA_SPACE);
6564 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
6565 
6566         } /* end if */
6567 
6568         if (!(entry_ptr->image_up_to_date)) {
6569 
6570             /* Sanity check */
6571             HDassert(!entry_ptr->prefetched);
6572 
6573             /* Generate the entry's image */
6574             if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
6575 
6576                 HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
6577 
6578         } /* end if ( ! (entry_ptr->image_up_to_date) ) */
6579     }     /* end if */
6580 
6581     /* Finally, write the image to disk.
6582      *
6583      * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the
6584      * in the entry's type, we silently skip the write.  This
6585      * flag should only be used in test code.
6586      */
6587     if (write_entry) {
6588 
6589         HDassert(entry_ptr->is_dirty);
6590 
6591 #if H5C_DO_SANITY_CHECKS
6592         if ((cache_ptr->check_write_permitted) && (!(cache_ptr->write_permitted)))
6593 
6594             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
6595 #endif /* H5C_DO_SANITY_CHECKS */
6596 
6597         /* Write the image to disk unless the write is suppressed.
6598          *
6599          * This happens if both suppress_image_entry_writes and
6600          * entry_ptr->include_in_image are TRUE, or if the
6601          * H5AC__CLASS_SKIP_WRITES is set in the entry's type.  This
6602          * flag should only be used in test code
6603          */
6604         if (((!suppress_image_entry_writes) || (!entry_ptr->include_in_image)) &&
6605             (((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0)) {
6606 
6607             H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
6608 
6609 #ifdef H5_HAVE_PARALLEL
6610             if (cache_ptr->coll_write_list) {
6611 
6612                 if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
6613 
6614                     HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
6615             } /* end if */
6616             else {
6617 #endif /* H5_HAVE_PARALLEL */
6618 
6619                 if (entry_ptr->prefetched) {
6620 
6621                     HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
6622 
6623                     mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type;
6624                 } /* end if */
6625                 else {
6626 
6627                     mem_type = entry_ptr->type->mem_type;
6628                 }
6629 
6630                 if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0)
6631                     HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
6632 #ifdef H5_HAVE_PARALLEL
6633             }
6634 #endif /* H5_HAVE_PARALLEL */
6635 
6636         } /* end if */
6637 
6638         /* if the entry has a notify callback, notify it that we have
6639          * just flushed the entry.
6640          */
6641         if ((entry_ptr->type->notify) &&
6642             ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0))
6643 
6644             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush")
6645 
6646     } /* if ( write_entry ) */
6647 
6648     /* At this point, all pre-serialize and serialize calls have been
6649      * made if it was appropriate to make them.  Similarly, the entry
6650      * has been written to disk if desired.
6651      *
6652      * Thus it is now safe to update the cache data structures for the
6653      * flush.
6654      */
6655 
6656     /* start by updating the statistics */
6657     if (clear_only) {
6658 
6659         /* only log a clear if the entry was dirty */
6660         if (was_dirty) {
6661 
6662             H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
6663 
6664         } /* end if */
6665     }
6666     else if (write_entry) {
6667 
6668         HDassert(was_dirty);
6669 
6670         /* only log a flush if we actually wrote to disk */
6671         H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
6672 
6673     } /* end else if */
6674 
6675     /* Note that the algorithm below is (very) similar to the set of operations
6676      * in H5C_remove_entry() and should be kept in sync with changes
6677      * to that code. - QAK, 2016/11/30
6678      */
6679 
6680     /* Update the cache internal data structures. */
6681     if (destroy) {
6682 
6683         /* Sanity checks */
6684         if (take_ownership) {
6685 
6686             HDassert(!destroy_entry);
6687         }
6688         else {
6689 
6690             HDassert(destroy_entry);
6691         }
6692 
6693         HDassert(!entry_ptr->is_pinned);
6694 
6695         /* Update stats, while entry is still in the cache */
6696         H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
6697 
6698         /* If the entry's type has a 'notify' callback and the entry is about
6699          * to be removed from the cache, send a 'before eviction' notice while
6700          * the entry is still fully integrated in the cache.
6701          */
6702         if ((entry_ptr->type->notify) &&
6703             ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0))
6704 
6705             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
6706 
6707         /* Update the cache internal data structures as appropriate
6708          * for a destroy.  Specifically:
6709          *
6710          * 1) Delete it from the index
6711          *
6712          * 2) Delete it from the skip list if requested.
6713          *
6714          * 3) Delete it from the collective read access list.
6715          *
6716          * 4) Update the replacement policy for eviction
6717          *
6718          * 5) Remove it from the tag list for this object
6719          *
6720          * Finally, if the destroy_entry flag is set, discard the
6721          * entry.
6722          */
6723         H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
6724 
6725         if ((entry_ptr->in_slist) && (del_from_slist_on_destroy)) {
6726 
6727             H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
6728         }
6729 
6730 #ifdef H5_HAVE_PARALLEL
6731         /* Check for collective read access flag */
6732         if (entry_ptr->coll_access) {
6733 
6734             entry_ptr->coll_access = FALSE;
6735 
6736             H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
6737 
6738         } /* end if */
6739 #endif    /* H5_HAVE_PARALLEL */
6740 
6741         H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
6742 
6743         /* Remove entry from tag list */
6744         if (H5C__untag_entry(cache_ptr, entry_ptr) < 0)
6745 
6746             HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
6747 
6748         /* verify that the entry is no longer part of any flush dependencies */
6749         HDassert(entry_ptr->flush_dep_nparents == 0);
6750         HDassert(entry_ptr->flush_dep_nchildren == 0);
6751 
6752     } /* end if */
6753     else {
6754 
6755         HDassert(clear_only || write_entry);
6756         HDassert(entry_ptr->is_dirty);
6757         HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
6758 
6759         /* We are either doing a flush or a clear.
6760          *
6761          * A clear and a flush are the same from the point of
6762          * view of the replacement policy and the slist.
6763          * Hence no differentiation between them.
6764          *
6765          *                              JRM -- 7/7/07
6766          */
6767 
6768         H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
6769 
6770         H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
6771 
6772         /* mark the entry as clean and update the index for
6773          * entry clean.  Also, call the clear callback
6774          * if defined.
6775          */
6776         entry_ptr->is_dirty = FALSE;
6777 
6778         H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr);
6779 
6780         /* Check for entry changing status and do notifications, etc. */
6781         if (was_dirty) {
6782 
6783             /* If the entry's type has a 'notify' callback send a
6784              * 'entry cleaned' notice now that the entry is fully
6785              * integrated into the cache.
6786              */
6787             if ((entry_ptr->type->notify) &&
6788                 ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0))
6789 
6790                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
6791                             "can't notify client about entry dirty flag cleared")
6792 
6793             /* Propagate the clean flag up the flush dependency chain
6794              * if appropriate
6795              */
6796             if (entry_ptr->flush_dep_ndirty_children != 0) {
6797 
6798                 HDassert(entry_ptr->flush_dep_ndirty_children == 0);
6799             }
6800 
6801             if (entry_ptr->flush_dep_nparents > 0) {
6802 
6803                 if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
6804 
6805                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag")
6806             }
6807         } /* end if */
6808     }     /* end else */
6809 
6810     /* reset the flush_in progress flag */
6811     entry_ptr->flush_in_progress = FALSE;
6812 
6813     /* capture the cache entry address for the log_flush call at the
6814      * end before the entry_ptr gets freed
6815      */
6816     entry_addr = entry_ptr->addr;
6817 
6818     /* Internal cache data structures should now be up to date, and
6819      * consistent with the status of the entry.
6820      *
6821      * Now discard the entry if appropriate.
6822      */
6823     if (destroy) {
6824 
6825         /* Sanity check */
6826         HDassert(0 == entry_ptr->flush_dep_nparents);
6827 
6828         /* if both suppress_image_entry_frees and entry_ptr->include_in_image
6829          * are true, simply set entry_ptr->image_ptr to NULL, as we have
6830          * another pointer to the buffer in an instance of H5C_image_entry_t
6831          * in cache_ptr->image_entries.
6832          *
6833          * Otherwise, free the buffer if it exists.
6834          */
6835         if (suppress_image_entry_frees && entry_ptr->include_in_image) {
6836 
6837             entry_ptr->image_ptr = NULL;
6838         }
6839         else if (entry_ptr->image_ptr != NULL) {
6840 
6841             entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
6842         }
6843 
6844         /* If the entry is not a prefetched entry, verify that the flush
6845          * dependency parents addresses array has been transferred.
6846          *
6847          * If the entry is prefetched, the free_isr routine will dispose of
6848          * the flush dependency parents addresses array if necessary.
6849          */
6850         if (!entry_ptr->prefetched) {
6851 
6852             HDassert(0 == entry_ptr->fd_parent_count);
6853             HDassert(NULL == entry_ptr->fd_parent_addrs);
6854 
6855         } /* end if */
6856 
6857         /* Check whether we should free the space in the file that
6858          * the entry occupies
6859          */
6860         if (free_file_space) {
6861 
6862             hsize_t fsf_size;
6863 
6864             /* Sanity checks */
6865             HDassert(H5F_addr_defined(entry_ptr->addr));
6866             HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr));
6867 #ifndef NDEBUG
6868             {
6869                 size_t curr_len;
6870 
6871                 /* Get the actual image size for the thing again */
6872                 entry_ptr->type->image_len((void *)entry_ptr, &curr_len);
6873                 HDassert(curr_len == entry_ptr->size);
6874             }
6875 #endif /* NDEBUG */
6876 
6877             /* If the file space free size callback is defined, use
6878              * it to get the size of the block of file space to free.
6879              * Otherwise use entry_ptr->size.
6880              */
6881             if (entry_ptr->type->fsf_size) {
6882 
6883                 if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0)
6884 
6885                     HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size")
6886 
6887             }      /* end if */
6888             else { /* no file space free size callback -- use entry size */
6889 
6890                 fsf_size = entry_ptr->size;
6891             }
6892 
6893             /* Release the space on disk */
6894             if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0)
6895 
6896                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry")
6897 
6898         } /* end if ( free_file_space ) */
6899 
6900         /* Reset the pointer to the cache the entry is within. -QAK */
6901         entry_ptr->cache_ptr = NULL;
6902 
6903         /* increment entries_removed_counter and set
6904          * last_entry_removed_ptr.  As we are likely abuut to
6905          * free the entry, recall that last_entry_removed_ptr
6906          * must NEVER be dereferenced.
6907          *
6908          * Recall that these fields are maintained to allow functions
6909          * that perform scans of lists of entries to detect the
6910          * unexpected removal of entries (via expunge, eviction,
6911          * or take ownership at present), so that they can re-start
6912          * their scans if necessary.
6913          *
6914          * Also check if the entry we are watching for removal is being
6915          * removed (usually the 'next' entry for an iteration) and reset
6916          * it to indicate that it was removed.
6917          */
6918         cache_ptr->entries_removed_counter++;
6919         cache_ptr->last_entry_removed_ptr = entry_ptr;
6920 
6921         if (entry_ptr == cache_ptr->entry_watched_for_removal) {
6922 
6923             cache_ptr->entry_watched_for_removal = NULL;
6924         }
6925 
6926         /* Check for actually destroying the entry in memory */
6927         /* (As opposed to taking ownership of it) */
6928         if (destroy_entry) {
6929 
6930             if (entry_ptr->is_dirty) {
6931 
6932                 /* Reset dirty flag */
6933                 entry_ptr->is_dirty = FALSE;
6934 
6935                 /* If the entry's type has a 'notify' callback send a
6936                  * 'entry cleaned' notice now that the entry is fully
6937                  * integrated into the cache.
6938                  */
6939                 if ((entry_ptr->type->notify) &&
6940                     ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0))
6941 
6942                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
6943                                 "can't notify client about entry dirty flag cleared")
6944 
6945             } /* end if */
6946 
6947             /* we are about to discard the in core representation --
6948              * set the magic field to bad magic so we can detect a
6949              * freed entry if we see one.
6950              */
6951             entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
6952 
6953             /* verify that the image has been freed */
6954             HDassert(entry_ptr->image_ptr == NULL);
6955 
6956             if (entry_ptr->type->free_icr((void *)entry_ptr) < 0)
6957 
6958                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
6959 
6960         } /* end if */
6961         else {
6962 
6963             HDassert(take_ownership);
6964 
6965             /* client is taking ownership of the entry.
6966              * set bad magic here too so the cache will choke
6967              * unless the entry is re-inserted properly
6968              */
6969             entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
6970 
6971         } /* end else */
6972     }     /* if (destroy) */
6973 
6974     /* Check if we have to update the page buffer with cleared entries
6975      * so it doesn't go out of date
6976      */
6977     if (update_page_buffer) {
6978 
6979         /* Sanity check */
6980         HDassert(!destroy);
6981         HDassert(entry_ptr->image_ptr);
6982 
6983         if ((f->shared->page_buf) && (f->shared->page_buf->page_size >= entry_ptr->size)) {
6984 
6985             if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size,
6986                                   entry_ptr->image_ptr) > 0)
6987 
6988                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache")
6989         } /* end if */
6990     }     /* end if */
6991 
6992     if (cache_ptr->log_flush) {
6993 
6994         if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
6995 
6996             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
6997 
6998     } /* end if */
6999 
7000 done:
7001 
7002     HDassert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress));
7003 
7004     HDassert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty));
7005 
7006     FUNC_LEAVE_NOAPI(ret_value)
7007 
7008 } /* H5C__flush_single_entry() */
7009 
7010 /*-------------------------------------------------------------------------
7011  *
7012  * Function:    H5C__verify_len_eoa
7013  *
7014  * Purpose:     Verify that 'len' does not exceed eoa when 'actual' is
7015  *              false i.e. 'len" is the initial speculative length from
7016  *              get_load_size callback with null image pointer.
7017  *              If exceed, adjust 'len' accordingly.
7018  *
7019  *              Verify that 'len' should not exceed eoa when 'actual' is
7020  *              true i.e. 'len' is the actual length from get_load_size
7021  *              callback with non-null image pointer.
7022  *              If exceed, return error.
7023  *
7024  * Return:      FAIL if error is detected, SUCCEED otherwise.
7025  *
7026  * Programmer:  Vailin Choi
7027  *              9/6/15
7028  *
7029  *-------------------------------------------------------------------------
7030  */
7031 static herr_t
H5C__verify_len_eoa(H5F_t * f,const H5C_class_t * type,haddr_t addr,size_t * len,hbool_t actual)7032 H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, hbool_t actual)
7033 {
7034     H5FD_mem_t cooked_type;         /* Modified type, accounting for switching global heaps */
7035     haddr_t    eoa;                 /* End-of-allocation in the file */
7036     herr_t     ret_value = SUCCEED; /* Return value */
7037 
7038     FUNC_ENTER_STATIC
7039 
7040     /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
7041      * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
7042      * Thus we do the same for purposes of computing the EOA
7043      * for sanity checks.
7044      */
7045     cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
7046 
7047     /* Get the file's end-of-allocation value */
7048     eoa = H5F_get_eoa(f, cooked_type);
7049     if (!H5F_addr_defined(eoa))
7050         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file")
7051 
7052     /* Check for bad address in general */
7053     if (H5F_addr_gt(addr, eoa))
7054         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation")
7055 
7056     /* Check if the amount of data to read will be past the EOA */
7057     if (H5F_addr_gt((addr + *len), eoa)) {
7058         if (actual)
7059             HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA")
7060         else
7061             /* Trim down the length of the metadata */
7062             *len = (size_t)(eoa - addr);
7063     } /* end if */
7064 
7065     if (*len <= 0)
7066         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA")
7067 
7068 done:
7069     FUNC_LEAVE_NOAPI(ret_value)
7070 } /* H5C__verify_len_eoa() */
7071 
7072 /*-------------------------------------------------------------------------
7073  *
7074  * Function:    H5C__load_entry
7075  *
7076  * Purpose:     Attempt to load the entry at the specified disk address
7077  *              and with the specified type into memory.  If successful.
7078  *              return the in memory address of the entry.  Return NULL
7079  *              on failure.
7080  *
7081  *              Note that this function simply loads the entry into
7082  *              core.  It does not insert it into the cache.
7083  *
7084  * Return:      Non-NULL on success / NULL on failure.
7085  *
7086  * Programmer:  John Mainzer, 5/18/04
7087  *
7088  *-------------------------------------------------------------------------
7089  */
7090 static void *
H5C__load_entry(H5F_t * f,hbool_t coll_access,const H5C_class_t * type,haddr_t addr,void * udata)7091 H5C__load_entry(H5F_t *f,
7092 #ifdef H5_HAVE_PARALLEL
7093                 hbool_t coll_access,
7094 #endif /* H5_HAVE_PARALLEL */
7095                 const H5C_class_t *type, haddr_t addr, void *udata)
7096 {
7097     hbool_t            dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */
7098     uint8_t *          image = NULL;  /* Buffer for disk image                    */
7099     void *             thing = NULL;  /* Pointer to thing loaded                  */
7100     H5C_cache_entry_t *entry = NULL;  /* Alias for thing loaded, as cache entry   */
7101     size_t             len;           /* Size of image in file                    */
7102 #ifdef H5_HAVE_PARALLEL
7103     int      mpi_rank = 0;             /* MPI process rank                         */
7104     MPI_Comm comm     = MPI_COMM_NULL; /* File MPI Communicator                    */
7105     int      mpi_code;                 /* MPI error code                           */
7106 #endif                                 /* H5_HAVE_PARALLEL */
7107     void *ret_value = NULL;            /* Return value                             */
7108 
7109     FUNC_ENTER_STATIC
7110 
7111     /* Sanity checks */
7112     HDassert(f);
7113     HDassert(f->shared);
7114     HDassert(f->shared->cache);
7115     HDassert(type);
7116     HDassert(H5F_addr_defined(addr));
7117     HDassert(type->get_initial_load_size);
7118     if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
7119         HDassert(type->get_final_load_size);
7120     else
7121         HDassert(NULL == type->get_final_load_size);
7122     HDassert(type->deserialize);
7123 
7124     /* Can't see how skip reads could be usefully combined with
7125      * the speculative read flag.  Hence disallow.
7126      */
7127     HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
7128 
7129     /* Call the get_initial_load_size callback, to retrieve the initial size of image */
7130     if (type->get_initial_load_size(udata, &len) < 0)
7131         HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size")
7132     HDassert(len > 0);
7133 
7134     /* Check for possible speculative read off the end of the file */
7135     if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
7136         if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0)
7137             HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA")
7138 
7139     /* Allocate the buffer for reading the on-disk entry image */
7140     if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
7141         HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
7142 #if H5C_DO_MEMORY_SANITY_CHECKS
7143     H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
7144 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
7145 
7146 #ifdef H5_HAVE_PARALLEL
7147     if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
7148         if ((mpi_rank = H5F_mpi_get_rank(f)) < 0)
7149             HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
7150         if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL)
7151             HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
7152     }  /* end if */
7153 #endif /* H5_HAVE_PARALLEL */
7154 
7155     /* Get the on-disk entry image */
7156     if (0 == (type->flags & H5C__CLASS_SKIP_READS)) {
7157         unsigned tries, max_tries;   /* The # of read attempts               */
7158         unsigned retries;            /* The # of retries                     */
7159         htri_t   chk_ret;            /* return from verify_chksum callback   */
7160         size_t   actual_len = len;   /* The actual length, after speculative reads have been resolved */
7161         uint64_t nanosec    = 1;     /* # of nanoseconds to sleep between retries */
7162         void *   new_image;          /* Pointer to image                     */
7163         hbool_t  len_changed = TRUE; /* Whether to re-check speculative entries */
7164 
7165         /* Get the # of read attempts */
7166         max_tries = tries = H5F_GET_READ_ATTEMPTS(f);
7167 
7168         /*
7169          * This do/while loop performs the following till the metadata checksum
7170          * is correct or the file's number of allowed read attempts are reached.
7171          *   --read the metadata
7172          *   --determine the actual size of the metadata
7173          *   --perform checksum verification
7174          */
7175         do {
7176             if (actual_len != len) {
7177                 if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE)))
7178                     HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
7179                 image = (uint8_t *)new_image;
7180 #if H5C_DO_MEMORY_SANITY_CHECKS
7181                 H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
7182 #endif        /* H5C_DO_MEMORY_SANITY_CHECKS */
7183             } /* end if */
7184 
7185 #ifdef H5_HAVE_PARALLEL
7186             if (!coll_access || 0 == mpi_rank) {
7187 #endif /* H5_HAVE_PARALLEL */
7188                 if (H5F_block_read(f, type->mem_type, addr, len, image) < 0)
7189                     HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
7190 #ifdef H5_HAVE_PARALLEL
7191             } /* end if */
7192             /* if the collective metadata read optimization is turned on,
7193              * bcast the metadata read from process 0 to all ranks in the file
7194              * communicator
7195              */
7196             if (coll_access) {
7197                 int buf_size;
7198 
7199                 H5_CHECKED_ASSIGN(buf_size, int, len, size_t);
7200                 if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm)))
7201                     HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
7202             } /* end if */
7203 #endif        /* H5_HAVE_PARALLEL */
7204 
7205             /* If the entry could be read speculatively and the length is still
7206              *  changing, check for updating the actual size
7207              */
7208             if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && len_changed) {
7209                 /* Retrieve the actual length */
7210                 actual_len = len;
7211                 if (type->get_final_load_size(image, len, udata, &actual_len) < 0)
7212                     continue; /* Transfer control to while() and count towards retries */
7213 
7214                 /* Check for the length changing */
7215                 if (actual_len != len) {
7216                     /* Verify that the length isn't past the EOA for the file */
7217                     if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0)
7218                         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA")
7219 
7220                     /* Expand buffer to new size */
7221                     if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
7222                         HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
7223                     image = (uint8_t *)new_image;
7224 #if H5C_DO_MEMORY_SANITY_CHECKS
7225                     H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
7226 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
7227 
7228                     if (actual_len > len) {
7229 #ifdef H5_HAVE_PARALLEL
7230                         if (!coll_access || 0 == mpi_rank) {
7231 #endif /* H5_HAVE_PARALLEL */
7232                             /* If the thing's image needs to be bigger for a speculatively
7233                              * loaded thing, go get the on-disk image again (the extra portion).
7234                              */
7235                             if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) <
7236                                 0)
7237                                 HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
7238 #ifdef H5_HAVE_PARALLEL
7239                         }
7240                         /* If the collective metadata read optimization is turned on,
7241                          * Bcast the metadata read from process 0 to all ranks in the file
7242                          * communicator */
7243                         if (coll_access) {
7244                             int buf_size;
7245 
7246                             H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t);
7247                             if (MPI_SUCCESS !=
7248                                 (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm)))
7249                                 HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
7250                         } /* end if */
7251 #endif                    /* H5_HAVE_PARALLEL */
7252                     }     /* end if */
7253                 }         /* end if (actual_len != len) */
7254                 else {
7255                     /* The length has stabilized */
7256                     len_changed = FALSE;
7257 
7258                     /* Set the final length */
7259                     len = actual_len;
7260                 } /* else */
7261             }     /* end if */
7262 
7263             /* If there's no way to verify the checksum for a piece of metadata
7264              * (usually because there's no checksum in the file), leave now
7265              */
7266             if (type->verify_chksum == NULL)
7267                 break;
7268 
7269             /* Verify the checksum for the metadata image */
7270             if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0)
7271                 HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback")
7272             if (chk_ret == TRUE)
7273                 break;
7274 
7275             /* Sleep for some time */
7276             H5_nanosleep(nanosec);
7277             nanosec *= 2; /* Double the sleep time next time */
7278         } while (--tries);
7279 
7280         /* Check for too many tries */
7281         if (tries == 0)
7282             HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "incorrect metadata checksum after all read attempts")
7283 
7284         /* Calculate and track the # of retries */
7285         retries = max_tries - tries;
7286         if (retries) /* Does not track 0 retry */
7287             if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0)
7288                 HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries)
7289 
7290         /* Set the final length (in case it wasn't set earlier) */
7291         len = actual_len;
7292     } /* end if !H5C__CLASS_SKIP_READS */
7293 
7294     /* Deserialize the on-disk image into the native memory form */
7295     if (NULL == (thing = type->deserialize(image, len, udata, &dirty)))
7296         HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
7297 
7298     entry = (H5C_cache_entry_t *)thing;
7299 
7300     /* In general, an entry should be clean just after it is loaded.
7301      *
7302      * However, when this code is used in the metadata cache, it is
7303      * possible that object headers will be dirty at this point, as
7304      * the deserialize function will alter object headers if necessary to
7305      * fix an old bug.
7306      *
7307      * In the following assert:
7308      *
7309      *     HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
7310      *
7311      * note that type ids 5 & 6 are associated with object headers in the
7312      * metadata cache.
7313      *
7314      * When we get to using H5C for other purposes, we may wish to
7315      * tighten up the assert so that the loophole only applies to the
7316      * metadata cache.
7317      */
7318 
7319     HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6));
7320 
7321     entry->magic     = H5C__H5C_CACHE_ENTRY_T_MAGIC;
7322     entry->cache_ptr = f->shared->cache;
7323     entry->addr      = addr;
7324     entry->size      = len;
7325     HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
7326     entry->image_ptr        = image;
7327     entry->image_up_to_date = !dirty;
7328     entry->type             = type;
7329     entry->is_dirty         = dirty;
7330     entry->dirtied          = FALSE;
7331     entry->is_protected     = FALSE;
7332     entry->is_read_only     = FALSE;
7333     entry->ro_ref_count     = 0;
7334     entry->is_pinned        = FALSE;
7335     entry->in_slist         = FALSE;
7336     entry->flush_marker     = FALSE;
7337 #ifdef H5_HAVE_PARALLEL
7338     entry->clear_on_unprotect = FALSE;
7339     entry->flush_immediately  = FALSE;
7340     entry->coll_access        = coll_access;
7341 #endif /* H5_HAVE_PARALLEL */
7342     entry->flush_in_progress   = FALSE;
7343     entry->destroy_in_progress = FALSE;
7344 
7345     entry->ring = H5C_RING_UNDEFINED;
7346 
7347     /* Initialize flush dependency fields */
7348     entry->flush_dep_parent          = NULL;
7349     entry->flush_dep_nparents        = 0;
7350     entry->flush_dep_parent_nalloc   = 0;
7351     entry->flush_dep_nchildren       = 0;
7352     entry->flush_dep_ndirty_children = 0;
7353     entry->flush_dep_nunser_children = 0;
7354     entry->ht_next                   = NULL;
7355     entry->ht_prev                   = NULL;
7356     entry->il_next                   = NULL;
7357     entry->il_prev                   = NULL;
7358 
7359     entry->next = NULL;
7360     entry->prev = NULL;
7361 
7362 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
7363     entry->aux_next = NULL;
7364     entry->aux_prev = NULL;
7365 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
7366 
7367 #ifdef H5_HAVE_PARALLEL
7368     entry->coll_next = NULL;
7369     entry->coll_prev = NULL;
7370 #endif /* H5_HAVE_PARALLEL */
7371 
7372     /* initialize cache image related fields */
7373     entry->include_in_image     = FALSE;
7374     entry->lru_rank             = 0;
7375     entry->image_dirty          = FALSE;
7376     entry->fd_parent_count      = 0;
7377     entry->fd_parent_addrs      = NULL;
7378     entry->fd_child_count       = 0;
7379     entry->fd_dirty_child_count = 0;
7380     entry->image_fd_height      = 0;
7381     entry->prefetched           = FALSE;
7382     entry->prefetch_type_id     = 0;
7383     entry->age                  = 0;
7384     entry->prefetched_dirty     = FALSE;
7385 #ifndef NDEBUG /* debugging field */
7386     entry->serialization_count = 0;
7387 #endif /* NDEBUG */
7388 
7389     entry->tl_next  = NULL;
7390     entry->tl_prev  = NULL;
7391     entry->tag_info = NULL;
7392 
7393     H5C__RESET_CACHE_ENTRY_STATS(entry);
7394 
7395     ret_value = thing;
7396 
7397 done:
7398     /* Cleanup on error */
7399     if (NULL == ret_value) {
7400         /* Release resources */
7401         if (thing && type->free_icr(thing) < 0)
7402             HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed")
7403         if (image)
7404             image = (uint8_t *)H5MM_xfree(image);
7405     } /* end if */
7406 
7407     FUNC_LEAVE_NOAPI(ret_value)
7408 } /* H5C__load_entry() */
7409 
7410 /*-------------------------------------------------------------------------
7411  *
7412  * Function:    H5C__make_space_in_cache
7413  *
7414  * Purpose:     Attempt to evict cache entries until the index_size
7415  *        is at least needed_space below max_cache_size.
7416  *
7417  *        In passing, also attempt to bring cLRU_list_size to a
7418  *        value greater than min_clean_size.
7419  *
7420  *        Depending on circumstances, both of these goals may
7421  *        be impossible, as in parallel mode, we must avoid generating
7422  *        a write as part of a read (to avoid deadlock in collective
7423  *        I/O), and in all cases, it is possible (though hopefully
7424  *        highly unlikely) that the protected list may exceed the
7425  *        maximum size of the cache.
7426  *
7427  *        Thus the function simply does its best, returning success
7428  *        unless an error is encountered.
7429  *
7430  *        Observe that this function cannot occasion a read.
7431  *
7432  * Return:      Non-negative on success/Negative on failure.
7433  *
7434  * Programmer:  John Mainzer, 5/14/04
7435  *
7436  *-------------------------------------------------------------------------
7437  */
7438 herr_t
H5C__make_space_in_cache(H5F_t * f,size_t space_needed,hbool_t write_permitted)7439 H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
7440 {
7441     H5C_t *cache_ptr = f->shared->cache;
7442 #if H5C_COLLECT_CACHE_STATS
7443     int32_t clean_entries_skipped    = 0;
7444     int32_t dirty_pf_entries_skipped = 0;
7445     int32_t total_entries_scanned    = 0;
7446 #endif /* H5C_COLLECT_CACHE_STATS */
7447     uint32_t           entries_examined = 0;
7448     uint32_t           initial_list_len;
7449     size_t             empty_space;
7450     hbool_t            reentrant_call    = FALSE;
7451     hbool_t            prev_is_dirty     = FALSE;
7452     hbool_t            didnt_flush_entry = FALSE;
7453     hbool_t            restart_scan;
7454     H5C_cache_entry_t *entry_ptr;
7455     H5C_cache_entry_t *prev_ptr;
7456     H5C_cache_entry_t *next_ptr;
7457     uint32_t           num_corked_entries = 0;
7458     herr_t             ret_value          = SUCCEED; /* Return value */
7459 
7460     FUNC_ENTER_PACKAGE
7461 
7462     /* Sanity checks */
7463     HDassert(f);
7464     HDassert(cache_ptr);
7465     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
7466     HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
7467 
7468     /* check to see if cache_ptr->msic_in_progress is TRUE.  If it, this
7469      * is a re-entrant call via a client callback called in the make
7470      * space in cache process.  To avoid an infinite recursion, set
7471      * reentrant_call to TRUE, and goto done.
7472      */
7473     if (cache_ptr->msic_in_progress) {
7474         reentrant_call = TRUE;
7475         HGOTO_DONE(SUCCEED);
7476     } /* end if */
7477 
7478     cache_ptr->msic_in_progress = TRUE;
7479 
7480     if (write_permitted) {
7481         restart_scan     = FALSE;
7482         initial_list_len = cache_ptr->LRU_list_len;
7483         entry_ptr        = cache_ptr->LRU_tail_ptr;
7484 
7485         if (cache_ptr->index_size >= cache_ptr->max_cache_size)
7486             empty_space = 0;
7487         else
7488             empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
7489 
7490         while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) ||
7491                 ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) &&
7492                (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) {
7493             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
7494             HDassert(!(entry_ptr->is_protected));
7495             HDassert(!(entry_ptr->is_read_only));
7496             HDassert((entry_ptr->ro_ref_count) == 0);
7497 
7498             next_ptr = entry_ptr->next;
7499             prev_ptr = entry_ptr->prev;
7500 
7501             if (prev_ptr != NULL)
7502                 prev_is_dirty = prev_ptr->is_dirty;
7503 
7504             if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) {
7505 
7506                 /* Skip "dirty" corked entries.  */
7507                 ++num_corked_entries;
7508                 didnt_flush_entry = TRUE;
7509             }
7510             else if (((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && (!entry_ptr->flush_in_progress) &&
7511                      (!entry_ptr->prefetched_dirty)) {
7512 
7513                 didnt_flush_entry = FALSE;
7514 
7515                 if (entry_ptr->is_dirty) {
7516 
7517 #if H5C_COLLECT_CACHE_STATS
7518                     if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) {
7519 
7520                         cache_ptr->entries_scanned_to_make_space++;
7521                     }
7522 #endif /* H5C_COLLECT_CACHE_STATS */
7523 
7524                     /* reset entries_removed_counter and
7525                      * last_entry_removed_ptr prior to the call to
7526                      * H5C__flush_single_entry() so that we can spot
7527                      * unexpected removals of entries from the cache,
7528                      * and set the restart_scan flag if proceeding
7529                      * would be likely to cause us to scan an entry
7530                      * that is no longer in the cache.
7531                      */
7532                     cache_ptr->entries_removed_counter = 0;
7533                     cache_ptr->last_entry_removed_ptr  = NULL;
7534 
7535                     if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
7536                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
7537 
7538                     if ((cache_ptr->entries_removed_counter > 1) ||
7539                         (cache_ptr->last_entry_removed_ptr == prev_ptr))
7540 
7541                         restart_scan = TRUE;
7542                 }
7543                 else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size
7544 #ifdef H5_HAVE_PARALLEL
7545                          && !(entry_ptr->coll_access)
7546 #endif /* H5_HAVE_PARALLEL */
7547                 ) {
7548 #if H5C_COLLECT_CACHE_STATS
7549                     cache_ptr->entries_scanned_to_make_space++;
7550 #endif /* H5C_COLLECT_CACHE_STATS */
7551 
7552                     if (H5C__flush_single_entry(f, entry_ptr,
7553                                                 H5C__FLUSH_INVALIDATE_FLAG |
7554                                                     H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
7555                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
7556                 }
7557                 else {
7558                     /* We have enough space so don't flush clean entry. */
7559 #if H5C_COLLECT_CACHE_STATS
7560                     clean_entries_skipped++;
7561 #endif /* H5C_COLLECT_CACHE_STATS */
7562                     didnt_flush_entry = TRUE;
7563                 }
7564 
7565 #if H5C_COLLECT_CACHE_STATS
7566                 total_entries_scanned++;
7567 #endif /* H5C_COLLECT_CACHE_STATS */
7568             }
7569             else {
7570 
7571                 /* Skip epoch markers, entries that are in the process
7572                  * of being flushed, and entries marked as prefetched_dirty
7573                  * (occurs in the R/O case only).
7574                  */
7575                 didnt_flush_entry = TRUE;
7576 
7577 #if H5C_COLLECT_CACHE_STATS
7578                 if (entry_ptr->prefetched_dirty)
7579                     dirty_pf_entries_skipped++;
7580 #endif /* H5C_COLLECT_CACHE_STATS */
7581             }
7582 
7583             if (prev_ptr != NULL) {
7584 
7585                 if (didnt_flush_entry) {
7586 
7587                     /* epoch markers don't get flushed, and we don't touch
7588                      * entries that are in the process of being flushed.
7589                      * Hence no need for sanity checks, as we haven't
7590                      * flushed anything.  Thus just set entry_ptr to prev_ptr
7591                      * and go on.
7592                      */
7593                     entry_ptr = prev_ptr;
7594                 }
7595                 else if ((restart_scan) || (prev_ptr->is_dirty != prev_is_dirty) ||
7596                          (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) {
7597 
7598                     /* something has happened to the LRU -- start over
7599                      * from the tail.
7600                      */
7601                     restart_scan = FALSE;
7602                     entry_ptr    = cache_ptr->LRU_tail_ptr;
7603                     H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
7604                 }
7605                 else {
7606 
7607                     entry_ptr = prev_ptr;
7608                 }
7609             }
7610             else {
7611 
7612                 entry_ptr = NULL;
7613             }
7614 
7615             entries_examined++;
7616 
7617             if (cache_ptr->index_size >= cache_ptr->max_cache_size) {
7618 
7619                 empty_space = 0;
7620             }
7621             else {
7622 
7623                 empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
7624             }
7625 
7626             HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
7627         }
7628 
7629 #if H5C_COLLECT_CACHE_STATS
7630         cache_ptr->calls_to_msic++;
7631 
7632         cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
7633         cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped;
7634         cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
7635 
7636         if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) {
7637 
7638             cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
7639         }
7640 
7641         if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic)
7642             cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped;
7643 
7644         if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) {
7645 
7646             cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
7647         }
7648 #endif /* H5C_COLLECT_CACHE_STATS */
7649 
7650         /* NEED: work on a better assert for corked entries */
7651         HDassert((entries_examined > (2 * initial_list_len)) ||
7652                  ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) >
7653                   cache_ptr->max_cache_size) ||
7654                  ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) ||
7655                  ((num_corked_entries)));
7656 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
7657 
7658         HDassert((entries_examined > (2 * initial_list_len)) ||
7659                  (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size));
7660         HDassert((entries_examined > (2 * initial_list_len)) ||
7661                  (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size));
7662 
7663 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
7664     }
7665     else {
7666 
7667         HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
7668 
7669 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
7670         initial_list_len = cache_ptr->cLRU_list_len;
7671         entry_ptr        = cache_ptr->cLRU_tail_ptr;
7672 
7673         while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
7674                (entries_examined <= initial_list_len) && (entry_ptr != NULL)) {
7675             HDassert(!(entry_ptr->is_protected));
7676             HDassert(!(entry_ptr->is_read_only));
7677             HDassert((entry_ptr->ro_ref_count) == 0);
7678             HDassert(!(entry_ptr->is_dirty));
7679 
7680             prev_ptr = entry_ptr->aux_prev;
7681 
7682             if ((!(entry_ptr->prefetched_dirty))
7683 #ifdef H5_HAVE_PARALLEL
7684                 && (!(entry_ptr->coll_access))
7685 #endif /* H5_HAVE_PARALLEL */
7686             ) {
7687                 if (H5C__flush_single_entry(
7688                         f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
7689                     HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
7690 
7691             } /* end if */
7692 
7693             /* we are scanning the clean LRU, so the serialize function
7694              * will not be called on any entry -- thus there is no
7695              * concern about the list being modified out from under
7696              * this function.
7697              */
7698 
7699             entry_ptr = prev_ptr;
7700             entries_examined++;
7701         }
7702 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
7703     }
7704 
7705 done:
7706     /* Sanity checks */
7707     HDassert(cache_ptr->msic_in_progress);
7708     if (!reentrant_call)
7709         cache_ptr->msic_in_progress = FALSE;
7710     HDassert((!reentrant_call) || (cache_ptr->msic_in_progress));
7711 
7712     FUNC_LEAVE_NOAPI(ret_value)
7713 } /* H5C__make_space_in_cache() */
7714 
7715 /*-------------------------------------------------------------------------
7716  *
7717  * Function:    H5C__validate_lru_list
7718  *
7719  * Purpose:     Debugging function that scans the LRU list for errors.
7720  *
7721  *        If an error is detected, the function generates a
7722  *        diagnostic and returns FAIL.  If no error is detected,
7723  *        the function returns SUCCEED.
7724  *
7725  * Return:      FAIL if error is detected, SUCCEED otherwise.
7726  *
7727  * Programmer:  John Mainzer, 7/14/05
7728  *
7729  *-------------------------------------------------------------------------
7730  */
7731 #if H5C_DO_EXTREME_SANITY_CHECKS
7732 static herr_t
H5C__validate_lru_list(H5C_t * cache_ptr)7733 H5C__validate_lru_list(H5C_t *cache_ptr)
7734 {
7735     int32_t            len       = 0;
7736     size_t             size      = 0;
7737     H5C_cache_entry_t *entry_ptr = NULL;
7738     herr_t             ret_value = SUCCEED; /* Return value */
7739 
7740     FUNC_ENTER_STATIC
7741 
7742     HDassert(cache_ptr);
7743     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
7744 
7745     if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) &&
7746         (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr))
7747         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
7748 
7749     if (cache_ptr->LRU_list_len < 0)
7750         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
7751 
7752     if ((cache_ptr->LRU_list_len == 1) &&
7753         ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) ||
7754          (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size)))
7755         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
7756 
7757     if ((cache_ptr->LRU_list_len >= 1) &&
7758         ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) ||
7759          (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL)))
7760         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
7761 
7762     entry_ptr = cache_ptr->LRU_head_ptr;
7763     while (entry_ptr != NULL) {
7764         if ((entry_ptr != cache_ptr->LRU_head_ptr) &&
7765             ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
7766             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
7767 
7768         if ((entry_ptr != cache_ptr->LRU_tail_ptr) &&
7769             ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
7770             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
7771 
7772         if ((entry_ptr->is_pinned) || (entry_ptr->pinned_from_client) || (entry_ptr->pinned_from_cache))
7773             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
7774 
7775         len++;
7776         size += entry_ptr->size;
7777         entry_ptr = entry_ptr->next;
7778     }
7779 
7780     if ((cache_ptr->LRU_list_len != len) || (cache_ptr->LRU_list_size != size))
7781         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
7782 
7783 done:
7784     if (ret_value != SUCCEED)
7785         HDassert(0);
7786 
7787     FUNC_LEAVE_NOAPI(ret_value)
7788 } /* H5C__validate_lru_list() */
7789 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
7790 
7791 /*-------------------------------------------------------------------------
7792  *
7793  * Function:    H5C__validate_pinned_entry_list
7794  *
7795  * Purpose:     Debugging function that scans the pinned entry list for
7796  *              errors.
7797  *
7798  *        If an error is detected, the function generates a
7799  *        diagnostic and returns FAIL.  If no error is detected,
7800  *        the function returns SUCCEED.
7801  *
7802  * Return:      FAIL if error is detected, SUCCEED otherwise.
7803  *
7804  * Programmer:  John Mainzer, 4/25/14
7805  *
7806  *-------------------------------------------------------------------------
7807  */
7808 #if H5C_DO_EXTREME_SANITY_CHECKS
7809 static herr_t
H5C__validate_pinned_entry_list(H5C_t * cache_ptr)7810 H5C__validate_pinned_entry_list(H5C_t *cache_ptr)
7811 {
7812     int32_t            len       = 0;
7813     size_t             size      = 0;
7814     H5C_cache_entry_t *entry_ptr = NULL;
7815     herr_t             ret_value = SUCCEED; /* Return value */
7816 
7817     FUNC_ENTER_STATIC
7818 
7819     HDassert(cache_ptr);
7820     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
7821 
7822     if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) &&
7823         (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr))
7824         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
7825 
7826     if (cache_ptr->pel_len < 0)
7827         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
7828 
7829     if ((cache_ptr->pel_len == 1) &&
7830         ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) ||
7831          (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size)))
7832         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
7833 
7834     if ((cache_ptr->pel_len >= 1) &&
7835         ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) ||
7836          (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL)))
7837         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
7838 
7839     entry_ptr = cache_ptr->pel_head_ptr;
7840     while (entry_ptr != NULL) {
7841         if ((entry_ptr != cache_ptr->pel_head_ptr) &&
7842             ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
7843             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
7844 
7845         if ((entry_ptr != cache_ptr->pel_tail_ptr) &&
7846             ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
7847             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
7848 
7849         if (!entry_ptr->is_pinned)
7850             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
7851 
7852         if (!(entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache))
7853             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
7854 
7855         len++;
7856         size += entry_ptr->size;
7857         entry_ptr = entry_ptr->next;
7858     }
7859 
7860     if ((cache_ptr->pel_len != len) || (cache_ptr->pel_size != size))
7861         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
7862 
7863 done:
7864     if (ret_value != SUCCEED)
7865         HDassert(0);
7866 
7867     FUNC_LEAVE_NOAPI(ret_value)
7868 } /* H5C__validate_pinned_entry_list() */
7869 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
7870 
7871 /*-------------------------------------------------------------------------
7872  *
7873  * Function:    H5C__validate_protected_entry_list
7874  *
7875  * Purpose:     Debugging function that scans the protected entry list for
7876  *              errors.
7877  *
7878  *        If an error is detected, the function generates a
7879  *        diagnostic and returns FAIL.  If no error is detected,
7880  *        the function returns SUCCEED.
7881  *
7882  * Return:      FAIL if error is detected, SUCCEED otherwise.
7883  *
7884  * Programmer:  John Mainzer, 4/25/14
7885  *
7886  *-------------------------------------------------------------------------
7887  */
7888 #if H5C_DO_EXTREME_SANITY_CHECKS
7889 static herr_t
H5C__validate_protected_entry_list(H5C_t * cache_ptr)7890 H5C__validate_protected_entry_list(H5C_t *cache_ptr)
7891 {
7892     int32_t            len       = 0;
7893     size_t             size      = 0;
7894     H5C_cache_entry_t *entry_ptr = NULL;
7895     herr_t             ret_value = SUCCEED; /* Return value */
7896 
7897     FUNC_ENTER_STATIC
7898 
7899     HDassert(cache_ptr);
7900     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
7901 
7902     if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) &&
7903         (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr))
7904         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
7905 
7906     if (cache_ptr->pl_len < 0)
7907         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
7908 
7909     if ((cache_ptr->pl_len == 1) &&
7910         ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) ||
7911          (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size)))
7912         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
7913 
7914     if ((cache_ptr->pl_len >= 1) &&
7915         ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) ||
7916          (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL)))
7917         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
7918 
7919     entry_ptr = cache_ptr->pl_head_ptr;
7920     while (entry_ptr != NULL) {
7921         if ((entry_ptr != cache_ptr->pl_head_ptr) &&
7922             ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
7923             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
7924 
7925         if ((entry_ptr != cache_ptr->pl_tail_ptr) &&
7926             ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
7927             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
7928 
7929         if (!entry_ptr->is_protected)
7930             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
7931 
7932         if (entry_ptr->is_read_only && (entry_ptr->ro_ref_count <= 0))
7933             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
7934 
7935         len++;
7936         size += entry_ptr->size;
7937         entry_ptr = entry_ptr->next;
7938     }
7939 
7940     if ((cache_ptr->pl_len != len) || (cache_ptr->pl_size != size))
7941         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
7942 
7943 done:
7944     if (ret_value != SUCCEED)
7945         HDassert(0);
7946 
7947     FUNC_LEAVE_NOAPI(ret_value)
7948 } /* H5C__validate_protected_entry_list() */
7949 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
7950 
7951 /*-------------------------------------------------------------------------
7952  *
7953  * Function:    H5C__entry_in_skip_list
7954  *
7955  * Purpose:     Debugging function that scans skip list to see if it
7956  *        is in present.  We need this, as it is possible for
7957  *        an entry to be in the skip list twice.
7958  *
7959  * Return:      FALSE if the entry is not in the skip list, and TRUE
7960  *        if it is.
7961  *
7962  * Programmer:  John Mainzer, 11/1/14
7963  *
7964  *-------------------------------------------------------------------------
7965  */
7966 #if H5C_DO_SLIST_SANITY_CHECKS
7967 static hbool_t
H5C__entry_in_skip_list(H5C_t * cache_ptr,H5C_cache_entry_t * target_ptr)7968 H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr)
7969 {
7970     H5SL_node_t *node_ptr;
7971     hbool_t      in_slist;
7972 
7973     HDassert(cache_ptr);
7974     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
7975     HDassert(cache_ptr->slist_ptr);
7976 
7977     node_ptr = H5SL_first(cache_ptr->slist_ptr);
7978     in_slist = FALSE;
7979     while ((node_ptr != NULL) && (!in_slist)) {
7980         H5C_cache_entry_t *entry_ptr;
7981 
7982         entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
7983 
7984         HDassert(entry_ptr);
7985         HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
7986         HDassert(entry_ptr->is_dirty);
7987         HDassert(entry_ptr->in_slist);
7988 
7989         if (entry_ptr == target_ptr)
7990             in_slist = TRUE;
7991         else
7992             node_ptr = H5SL_next(node_ptr);
7993     }
7994 
7995     return (in_slist);
7996 } /* H5C__entry_in_skip_list() */
7997 #endif /* H5C_DO_SLIST_SANITY_CHECKS */
7998 
7999 /*-------------------------------------------------------------------------
8000  *
8001  * Function:    H5C__flush_marked_entries
8002  *
8003  * Purpose:     Flushes all marked entries in the cache.
8004  *
8005  * Return:      FAIL if error is detected, SUCCEED otherwise.
8006  *
8007  * Programmer:  Mike McGreevy
8008  *              November 3, 2010
8009  *
8010  * Changes:     Modified function to setup the slist before calling
8011  *              H%C_flush_cache(), and take it down afterwards.  Note
8012  *              that the slist need not be empty after the call to
8013  *              H5C_flush_cache() since we are only flushing marked
8014  *              entries.  Thus must set the clear_slist parameter
8015  *              of H5C_set_slist_enabled to TRUE.
8016  *
8017  *                                              JRM -- 5/6/20
8018  *
8019  *-------------------------------------------------------------------------
8020  */
8021 
8022 herr_t
H5C__flush_marked_entries(H5F_t * f)8023 H5C__flush_marked_entries(H5F_t *f)
8024 {
8025     herr_t ret_value = SUCCEED;
8026 
8027     FUNC_ENTER_PACKAGE
8028 
8029     /* Assertions */
8030     HDassert(f != NULL);
8031 
8032     /* Enable the slist, as it is needed in the flush */
8033     if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
8034 
8035         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
8036 
8037     /* Flush all marked entries */
8038     if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
8039 
8040         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
8041 
8042     /* Disable the slist.  Set the clear_slist parameter to TRUE
8043      * since we called H5C_flush_cache() with the
8044      * H5C__FLUSH_MARKED_ENTRIES_FLAG.
8045      */
8046     if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0)
8047 
8048         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed")
8049 
8050 done:
8051 
8052     FUNC_LEAVE_NOAPI(ret_value)
8053 
8054 } /* H5C__flush_marked_entries */
8055 
8056 /*-------------------------------------------------------------------------
8057  *
8058  * Function:    H5C_cork
8059  *
8060  * Purpose:     To cork/uncork/get cork status of an object depending on "action":
8061  *        H5C__SET_CORK:
8062  *            To cork the object
8063  *            Return error if the object is already corked
8064  *        H5C__UNCORK:
8065  *            To uncork the obejct
8066  *            Return error if the object is not corked
8067  *         H5C__GET_CORKED:
8068  *            To retrieve the cork status of an object in
8069  *            the parameter "corked"
8070  *
8071  * Return:      Success:        Non-negative
8072  *              Failure:        Negative
8073  *
8074  * Programmer:  Vailin Choi
8075  *        January 2014
8076  *
8077  *-------------------------------------------------------------------------
8078  */
8079 herr_t
H5C_cork(H5C_t * cache_ptr,haddr_t obj_addr,unsigned action,hbool_t * corked)8080 H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
8081 {
8082     H5C_tag_info_t *tag_info; /* Points to a tag info struct */
8083     herr_t          ret_value = SUCCEED;
8084 
8085     FUNC_ENTER_NOAPI_NOINIT
8086 
8087     /* Assertions */
8088     HDassert(cache_ptr != NULL);
8089     HDassert(H5F_addr_defined(obj_addr));
8090     HDassert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED);
8091 
8092     /* Search the list of corked object addresses in the cache */
8093     tag_info = (H5C_tag_info_t *)H5SL_search(cache_ptr->tag_list, &obj_addr);
8094 
8095     if (H5C__GET_CORKED == action) {
8096         HDassert(corked);
8097         if (tag_info != NULL && tag_info->corked)
8098             *corked = TRUE;
8099         else
8100             *corked = FALSE;
8101     } /* end if */
8102     else {
8103         /* Sanity check */
8104         HDassert(H5C__SET_CORK == action || H5C__UNCORK == action);
8105 
8106         /* Perform appropriate action */
8107         if (H5C__SET_CORK == action) {
8108             /* Check if this is the first entry for this tagged object */
8109             if (NULL == tag_info) {
8110                 /* Allocate new tag info struct */
8111                 if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t)))
8112                     HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry")
8113 
8114                 /* Set the tag for all entries */
8115                 tag_info->tag = obj_addr;
8116 
8117                 /* Insert tag info into skip list */
8118                 if (H5SL_insert(cache_ptr->tag_list, tag_info, &(tag_info->tag)) < 0)
8119                     HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert tag info in skip list")
8120             } /* end if */
8121             else {
8122                 /* Check for object already corked */
8123                 if (tag_info->corked)
8124                     HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked")
8125                 HDassert(tag_info->entry_cnt > 0 && tag_info->head);
8126             } /* end else */
8127 
8128             /* Set the corked status for the entire object */
8129             tag_info->corked = TRUE;
8130             cache_ptr->num_objs_corked++;
8131 
8132         } /* end if */
8133         else {
8134             /* Sanity check */
8135             HDassert(tag_info);
8136 
8137             /* Check for already uncorked */
8138             if (!tag_info->corked)
8139                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked")
8140 
8141             /* Set the corked status for the entire object */
8142             tag_info->corked = FALSE;
8143             cache_ptr->num_objs_corked--;
8144 
8145             /* Remove the tag info from the tag list, if there's no more entries with this tag */
8146             if (0 == tag_info->entry_cnt) {
8147                 /* Sanity check */
8148                 HDassert(NULL == tag_info->head);
8149 
8150                 if (H5SL_remove(cache_ptr->tag_list, &(tag_info->tag)) != tag_info)
8151                     HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove tag info from list")
8152 
8153                 /* Release the tag info */
8154                 tag_info = H5FL_FREE(H5C_tag_info_t, tag_info);
8155             } /* end if */
8156             else
8157                 HDassert(NULL != tag_info->head);
8158         } /* end else */
8159     }     /* end else */
8160 
8161 done:
8162     FUNC_LEAVE_NOAPI(ret_value)
8163 } /* H5C_cork() */
8164 
8165 /*-------------------------------------------------------------------------
8166  * Function:    H5C__mark_flush_dep_dirty()
8167  *
8168  * Purpose:     Recursively propagate the flush_dep_ndirty_children flag
8169  *              up the dependency chain in response to entry either
8170  *              becoming dirty or having its flush_dep_ndirty_children
8171  *              increased from 0.
8172  *
8173  * Return:      Non-negative on success/Negative on failure
8174  *
8175  * Programmer:  Neil Fortner
8176  *              11/13/12
8177  *
8178  *-------------------------------------------------------------------------
8179  */
8180 static herr_t
H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry)8181 H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry)
8182 {
8183     unsigned u;                   /* Local index variable */
8184     herr_t   ret_value = SUCCEED; /* Return value */
8185 
8186     FUNC_ENTER_STATIC
8187 
8188     /* Sanity checks */
8189     HDassert(entry);
8190 
8191     /* Iterate over the parent entries, if any */
8192     for (u = 0; u < entry->flush_dep_nparents; u++) {
8193         /* Sanity check */
8194         HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children <
8195                  entry->flush_dep_parent[u]->flush_dep_nchildren);
8196 
8197         /* Adjust the parent's number of dirty children */
8198         entry->flush_dep_parent[u]->flush_dep_ndirty_children++;
8199 
8200         /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
8201         if (entry->flush_dep_parent[u]->type->notify &&
8202             (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED,
8203                                                        entry->flush_dep_parent[u]) < 0)
8204             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
8205                         "can't notify parent about child entry dirty flag set")
8206     } /* end for */
8207 
8208 done:
8209     FUNC_LEAVE_NOAPI(ret_value)
8210 } /* H5C__mark_flush_dep_dirty() */
8211 
8212 /*-------------------------------------------------------------------------
8213  * Function:    H5C__mark_flush_dep_clean()
8214  *
8215  * Purpose:     Recursively propagate the flush_dep_ndirty_children flag
8216  *              up the dependency chain in response to entry either
8217  *              becoming clean or having its flush_dep_ndirty_children
8218  *              reduced to 0.
8219  *
8220  * Return:      Non-negative on success/Negative on failure
8221  *
8222  * Programmer:  Neil Fortner
8223  *              11/13/12
8224  *
8225  *-------------------------------------------------------------------------
8226  */
8227 static herr_t
H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry)8228 H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry)
8229 {
8230     int    i;                   /* Local index variable */
8231     herr_t ret_value = SUCCEED; /* Return value */
8232 
8233     FUNC_ENTER_STATIC
8234 
8235     /* Sanity checks */
8236     HDassert(entry);
8237 
8238     /* Iterate over the parent entries, if any */
8239     /* Note reverse iteration order, in case the callback removes the flush
8240      *  dependency - QAK, 2017/08/12
8241      */
8242     for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) {
8243         /* Sanity check */
8244         HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0);
8245 
8246         /* Adjust the parent's number of dirty children */
8247         entry->flush_dep_parent[i]->flush_dep_ndirty_children--;
8248 
8249         /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
8250         if (entry->flush_dep_parent[i]->type->notify &&
8251             (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED,
8252                                                        entry->flush_dep_parent[i]) < 0)
8253             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
8254                         "can't notify parent about child entry dirty flag reset")
8255     } /* end for */
8256 
8257 done:
8258     FUNC_LEAVE_NOAPI(ret_value)
8259 } /* H5C__mark_flush_dep_clean() */
8260 
8261 /*-------------------------------------------------------------------------
8262  * Function:    H5C__mark_flush_dep_serialized()
8263  *
8264  * Purpose:     Decrement the flush_dep_nunser_children fields of all the
8265  *        target entry's flush dependency parents in response to
8266  *        the target entry becoming serialized.
8267  *
8268  * Return:      Non-negative on success/Negative on failure
8269  *
8270  * Programmer:  John Mainzer
8271  *              8/30/16
8272  *
8273  *-------------------------------------------------------------------------
8274  */
8275 herr_t
H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr)8276 H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr)
8277 {
8278     int    i;                   /* Local index variable */
8279     herr_t ret_value = SUCCEED; /* Return value */
8280 
8281     FUNC_ENTER_STATIC
8282 
8283     /* Sanity checks */
8284     HDassert(entry_ptr);
8285 
8286     /* Iterate over the parent entries, if any */
8287     /* Note reverse iteration order, in case the callback removes the flush
8288      *  dependency - QAK, 2017/08/12
8289      */
8290     for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) {
8291         /* Sanity checks */
8292         HDassert(entry_ptr->flush_dep_parent);
8293         HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8294         HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0);
8295 
8296         /* decrement the parents number of unserialized children */
8297         entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--;
8298 
8299         /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
8300         if (entry_ptr->flush_dep_parent[i]->type->notify &&
8301             (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED,
8302                                                            entry_ptr->flush_dep_parent[i]) < 0)
8303             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
8304                         "can't notify parent about child entry serialized flag set")
8305     } /* end for */
8306 
8307 done:
8308     FUNC_LEAVE_NOAPI(ret_value)
8309 } /* H5C__mark_flush_dep_serialized() */
8310 
8311 /*-------------------------------------------------------------------------
8312  * Function:    H5C__mark_flush_dep_unserialized()
8313  *
8314  * Purpose:     Increment the flush_dep_nunser_children fields of all the
8315  *              target entry's flush dependency parents in response to
8316  *              the target entry becoming unserialized.
8317  *
8318  * Return:      Non-negative on success/Negative on failure
8319  *
8320  * Programmer:  John Mainzer
8321  *              8/30/16
8322  *
8323  *-------------------------------------------------------------------------
8324  */
8325 herr_t
H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr)8326 H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr)
8327 {
8328     unsigned u;                   /* Local index variable */
8329     herr_t   ret_value = SUCCEED; /* Return value */
8330 
8331     FUNC_ENTER_STATIC
8332 
8333     /* Sanity checks */
8334     HDassert(entry_ptr);
8335 
8336     /* Iterate over the parent entries, if any */
8337     for (u = 0; u < entry_ptr->flush_dep_nparents; u++) {
8338         /* Sanity check */
8339         HDassert(entry_ptr->flush_dep_parent);
8340         HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8341         HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
8342                  entry_ptr->flush_dep_parent[u]->flush_dep_nchildren);
8343 
8344         /* increment parents number of usserialized children */
8345         entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++;
8346 
8347         /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
8348         if (entry_ptr->flush_dep_parent[u]->type->notify &&
8349             (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED,
8350                                                            entry_ptr->flush_dep_parent[u]) < 0)
8351             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
8352                         "can't notify parent about child entry serialized flag reset")
8353     } /* end for */
8354 
8355 done:
8356     FUNC_LEAVE_NOAPI(ret_value)
8357 } /* H5C__mark_flush_dep_unserialized() */
8358 
8359 #ifndef NDEBUG
8360 /*-------------------------------------------------------------------------
8361  * Function:    H5C__assert_flush_dep_nocycle()
8362  *
8363  * Purpose:     Assert recursively that base_entry is not the same as
8364  *              entry, and perform the same assertion on all of entry's
8365  *              flush dependency parents.  This is used to detect cycles
8366  *              created by flush dependencies.
8367  *
8368  * Return:      void
8369  *
8370  * Programmer:  Neil Fortner
8371  *              12/10/12
8372  *
8373  *-------------------------------------------------------------------------
8374  */
8375 static void
H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,const H5C_cache_entry_t * base_entry)8376 H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_entry_t *base_entry)
8377 {
8378     unsigned u; /* Local index variable */
8379 
8380     FUNC_ENTER_STATIC_NOERR
8381 
8382     /* Sanity checks */
8383     HDassert(entry);
8384     HDassert(base_entry);
8385 
8386     /* Make sure the entries are not the same */
8387     HDassert(base_entry != entry);
8388 
8389     /* Iterate over entry's parents (if any) */
8390     for (u = 0; u < entry->flush_dep_nparents; u++)
8391         H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry);
8392 
8393     FUNC_LEAVE_NOAPI_VOID
8394 } /* H5C__assert_flush_dep_nocycle() */
8395 #endif /* NDEBUG */
8396 
8397 /*-------------------------------------------------------------------------
8398  * Function:    H5C__serialize_cache
8399  *
8400  * Purpose:    Serialize (i.e. construct an on disk image) for all entries
8401  *        in the metadata cache including clean entries.
8402  *
8403  *        Note that flush dependencies and "flush me last" flags
8404  *        must be observed in the serialization process.
8405  *
8406  *        Note also that entries may be loaded, flushed, evicted,
8407  *        expunged, relocated, resized, or removed from the cache
8408  *        during this process, just as these actions may occur during
8409  *        a regular flush.
8410  *
8411  *        However, we are given that the cache will contain no protected
8412  *        entries on entry to this routine (although entries may be
8413  *        briefly protected and then unprotected during the serialize
8414  *        process).
8415  *
8416  *        The objective of this routine is serialize all entries and
8417  *        to force all entries into their actual locations on disk.
8418  *
8419  *        The initial need for this routine is to settle all entries
8420  *        in the cache prior to construction of the metadata cache
8421  *        image so that the size of the cache image can be calculated.
8422  *        However, I gather that other uses for the routine are
8423  *        under consideration.
8424  *
8425  * Return:      Non-negative on success/Negative on failure or if there was
8426  *        a request to flush all items and something was protected.
8427  *
8428  * Programmer:  John Mainzer
8429  *        7/22/15
8430  *
8431  *-------------------------------------------------------------------------
8432  */
8433 herr_t
H5C__serialize_cache(H5F_t * f)8434 H5C__serialize_cache(H5F_t *f)
8435 {
8436 #if H5C_DO_SANITY_CHECKS
8437     int      i;
8438     uint32_t index_len        = 0;
8439     size_t   index_size       = (size_t)0;
8440     size_t   clean_index_size = (size_t)0;
8441     size_t   dirty_index_size = (size_t)0;
8442     size_t   slist_size       = (size_t)0;
8443     uint32_t slist_len        = 0;
8444 #endif /* H5C_DO_SANITY_CHECKS */
8445     H5C_ring_t ring;
8446     H5C_t *    cache_ptr;
8447     herr_t     ret_value = SUCCEED;
8448 
8449     FUNC_ENTER_PACKAGE
8450 
8451     /* Sanity checks */
8452     HDassert(f);
8453     HDassert(f->shared);
8454     cache_ptr = f->shared->cache;
8455     HDassert(cache_ptr);
8456     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
8457     HDassert(cache_ptr->slist_ptr);
8458 
8459 #if H5C_DO_SANITY_CHECKS
8460     HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
8461     HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
8462     HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
8463     HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
8464     HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
8465     HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
8466 
8467     for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
8468         index_len += cache_ptr->index_ring_len[i];
8469         index_size += cache_ptr->index_ring_size[i];
8470         clean_index_size += cache_ptr->clean_index_ring_size[i];
8471         dirty_index_size += cache_ptr->dirty_index_ring_size[i];
8472 
8473         slist_len += cache_ptr->slist_ring_len[i];
8474         slist_size += cache_ptr->slist_ring_size[i];
8475     } /* end for */
8476 
8477     HDassert(cache_ptr->index_len == index_len);
8478     HDassert(cache_ptr->index_size == index_size);
8479     HDassert(cache_ptr->clean_index_size == clean_index_size);
8480     HDassert(cache_ptr->dirty_index_size == dirty_index_size);
8481     HDassert(cache_ptr->slist_len == slist_len);
8482     HDassert(cache_ptr->slist_size == slist_size);
8483 #endif /* H5C_DO_SANITY_CHECKS */
8484 
8485 #if H5C_DO_EXTREME_SANITY_CHECKS
8486     if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
8487         (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
8488         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
8489 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
8490 
8491 #ifndef NDEBUG
8492     /* if this is a debug build, set the serialization_count field of
8493      * each entry in the cache to zero before we start the serialization.
8494      * This allows us to detect the case in which any entry is serialized
8495      * more than once (a performance issues), and more importantly, the
8496      * case is which any flush depencency parent is serializes more than
8497      * once (a correctness issue).
8498      */
8499     {
8500         H5C_cache_entry_t *scan_ptr = NULL;
8501 
8502         scan_ptr = cache_ptr->il_head;
8503         while (scan_ptr != NULL) {
8504             HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8505             scan_ptr->serialization_count = 0;
8506             scan_ptr                      = scan_ptr->il_next;
8507         } /* end while */
8508     }     /* end block */
8509 #endif    /* NDEBUG */
8510 
8511     /* set cache_ptr->serialization_in_progress to TRUE, and back
8512      * to FALSE at the end of the function.  Must maintain this flag
8513      * to support H5C_get_serialization_in_progress(), which is in
8514      * turn required to support sanity checking in some cache
8515      * clients.
8516      */
8517     HDassert(!cache_ptr->serialization_in_progress);
8518     cache_ptr->serialization_in_progress = TRUE;
8519 
8520     /* Serialize each ring, starting from the outermost ring and
8521      * working inward.
8522      */
8523     ring = H5C_RING_USER;
8524     while (ring < H5C_RING_NTYPES) {
8525         HDassert(cache_ptr->close_warning_received);
8526         switch (ring) {
8527             case H5C_RING_USER:
8528                 break;
8529 
8530             case H5C_RING_RDFSM:
8531                 /* Settle raw data FSM */
8532                 if (!cache_ptr->rdfsm_settled)
8533                     if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0)
8534                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
8535                 break;
8536 
8537             case H5C_RING_MDFSM:
8538                 /* Settle metadata FSM */
8539                 if (!cache_ptr->mdfsm_settled)
8540                     if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0)
8541                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
8542                 break;
8543 
8544             case H5C_RING_SBE:
8545             case H5C_RING_SB:
8546                 break;
8547 
8548             default:
8549                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
8550                 break;
8551         } /* end switch */
8552 
8553         if (H5C__serialize_ring(f, ring) < 0)
8554             HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
8555 
8556         ring++;
8557     } /* end while */
8558 
8559 #ifndef NDEBUG
8560     /* Verify that no entry has been serialized more than once.
8561      * FD parents with multiple serializations should have been caught
8562      * elsewhere, so no specific check for them here.
8563      */
8564     {
8565         H5C_cache_entry_t *scan_ptr = NULL;
8566 
8567         scan_ptr = cache_ptr->il_head;
8568         while (scan_ptr != NULL) {
8569             HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8570             HDassert(scan_ptr->serialization_count <= 1);
8571 
8572             scan_ptr = scan_ptr->il_next;
8573         } /* end while */
8574     }     /* end block */
8575 #endif    /* NDEBUG */
8576 
8577 done:
8578     cache_ptr->serialization_in_progress = FALSE;
8579     FUNC_LEAVE_NOAPI(ret_value)
8580 } /* H5C__serialize_cache() */
8581 
8582 /*-------------------------------------------------------------------------
8583  * Function:    H5C__serialize_ring
8584  *
8585  * Purpose:     Serialize the entries contained in the specified cache and
8586  *              ring.  All entries in rings outside the specified ring
8587  *              must have been serialized on entry.
8588  *
8589  *              If the cache contains protected entries in the specified
8590  *              ring, the function will fail, as protected entries cannot
8591  *              be serialized.  However all unprotected entries in the
8592  *        target ring should be serialized before the function
8593  *        returns failure.
8594  *
8595  *              If flush dependencies appear in the target ring, the
8596  *              function makes repeated passes through the index list
8597  *        serializing entries in flush dependency order.
8598  *
8599  *        All entries outside the H5C_RING_SBE are marked for
8600  *        inclusion in the cache image.  Entries in H5C_RING_SBE
8601  *        and below are marked for exclusion from the image.
8602  *
8603  * Return:      Non-negative on success/Negative on failure or if there was
8604  *              a request to flush all items and something was protected.
8605  *
8606  * Programmer:  John Mainzer
8607  *              9/11/15
8608  *
8609  *-------------------------------------------------------------------------
8610  */
8611 static herr_t
H5C__serialize_ring(H5F_t * f,H5C_ring_t ring)8612 H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
8613 {
8614     hbool_t            done = FALSE;
8615     H5C_t *            cache_ptr;
8616     H5C_cache_entry_t *entry_ptr;
8617     herr_t             ret_value = SUCCEED;
8618 
8619     FUNC_ENTER_STATIC
8620 
8621     /* Sanity checks */
8622     HDassert(f);
8623     HDassert(f->shared);
8624     cache_ptr = f->shared->cache;
8625     HDassert(cache_ptr);
8626     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
8627     HDassert(ring > H5C_RING_UNDEFINED);
8628     HDassert(ring < H5C_RING_NTYPES);
8629 
8630     HDassert(cache_ptr->serialization_in_progress);
8631 
8632     /* The objective here is to serialize all entries in the cache ring
8633      * in flush dependency order.
8634      *
8635      * The basic algorithm is to scan the cache index list looking for
8636      * unserialized entries that are either not in a flush dependency
8637      * relationship, or which have no unserialized children.  Any such
8638      * entry is serialized and its flush dependency parents (if any) are
8639      * informed -- allowing them to decrement their userialized child counts.
8640      *
8641      * However, this algorithm is complicated by the ability
8642      * of client serialization callbacks to perform operations on
8643      * on the cache which can result in the insertion, deletion,
8644      * relocation, resize, dirty, flush, eviction, or removal (via the
8645      * take ownership flag) of entries.  Changes in the flush dependency
8646      * structure are also possible.
8647      *
8648      * On the other hand, the algorithm is simplified by the fact that
8649      * we are serializing, not flushing.  Thus, as long as all entries
8650      * are serialized correctly, it doesn't matter if we have to go back
8651      * and serialize an entry a second time.
8652      *
8653      * These possible actions result in the following modfications to
8654      * tha basic algorithm:
8655      *
8656      * 1) In the event of an entry expunge, eviction or removal, we must
8657      *    restart the scan as it is possible that the next entry in our
8658      *    scan is no longer in the cache.  Were we to examine this entry,
8659      *    we would be accessing deallocated memory.
8660      *
8661      * 2) A resize, dirty, or insertion of an entry may result in the
8662      *    the increment of a flush dependency parent's dirty and/or
8663      *    unserialized child count.  In the context of serializing the
8664      *    the cache, this is a non-issue, as even if we have already
8665      *    serialized the parent, it will be marked dirty and its image
8666      *    marked out of date if appropriate when the child is serialized.
8667      *
8668      *    However, this is a major issue for a flush, as were this to happen
8669      *    in a flush, it would violate the invariant that the flush dependency
8670      *    feature is intended to enforce.  As the metadata cache has no
8671      *    control over the behavior of cache clients, it has no way of
8672      *    preventing this behaviour.  However, it should detect it if at all
8673      *    possible.
8674      *
8675      *    Do this by maintaining a count of the number of times each entry is
8676      *    serialized during a cache serialization.  If any flush dependency
8677      *    parent is serialized more than once, throw an assertion failure.
8678      *
8679      * 3) An entry relocation will typically change the location of the
8680      *    entry in the index list.  This shouldn't cause problems as we
8681      *    will scan the index list until we make a complete pass without
8682      *    finding anything to serialize -- making relocations of either
8683      *    the current or next entries irrelevant.
8684      *
8685      *    Note that since a relocation may result in our skipping part of
8686      *    the index list, we must always do at least one more pass through
8687      *    the index list after an entry relocation.
8688      *
8689      * 4) Changes in the flush dependency structure are possible on
8690      *    entry insertion, load, expunge, evict, or remove.  Destruction
8691      *    of a flush dependency has no effect, as it can only relax the
8692      *    flush dependencies.  Creation of a flush dependency can create
8693      *    an unserialized child of a flush dependency parent where all
8694      *    flush dependency children were previously serialized.  Should
8695      *    this child dirty the flush dependency parent when it is serialized,
8696      *    the parent will be re-serialized.
8697      *
8698      *    Per the discussion of 2) above, this is a non issue for cache
8699      *    serialization, and a major problem for cache flush.  Using the
8700      *    same detection mechanism, throw an assertion failure if this
8701      *    condition appears.
8702      *
8703      * Observe that either eviction or removal of entries as a result of
8704      * a serialization is not a problem as long as the flush depencency
8705      * tree does not change beyond the removal of a leaf.
8706      */
8707     while (!done) {
8708         /* Reset the counters so that we can detect insertions, loads,
8709          * moves, and flush dependency height changes caused by the pre_serialize
8710          * and serialize callbacks.
8711          */
8712         cache_ptr->entries_loaded_counter    = 0;
8713         cache_ptr->entries_inserted_counter  = 0;
8714         cache_ptr->entries_relocated_counter = 0;
8715 
8716         done      = TRUE; /* set to FALSE if any activity in inner loop */
8717         entry_ptr = cache_ptr->il_head;
8718         while (entry_ptr != NULL) {
8719             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8720 
8721             /* Verify that either the entry is already serialized, or
8722              * that it is assigned to either the target or an inner
8723              * ring.
8724              */
8725             HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
8726 
8727             /* Skip flush me last entries or inner ring entries */
8728             if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
8729 
8730                 /* if we encounter an unserialized entry in the current
8731                  * ring that is not marked flush me last, we are not done.
8732                  */
8733                 if (!entry_ptr->image_up_to_date)
8734                     done = FALSE;
8735 
8736                 /* Serialize the entry if its image is not up to date
8737                  * and it has no unserialized flush dependency children.
8738                  */
8739                 if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
8740                     HDassert(entry_ptr->serialization_count == 0);
8741 
8742                     /* Serialize the entry */
8743                     if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
8744                         HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
8745 
8746                     HDassert(entry_ptr->flush_dep_nunser_children == 0);
8747                     HDassert(entry_ptr->serialization_count == 0);
8748 
8749 #ifndef NDEBUG
8750                     /* Increment serialization counter (to detect multiple serializations) */
8751                     entry_ptr->serialization_count++;
8752 #endif            /* NDEBUG */
8753                 } /* end if */
8754             }     /* end if */
8755 
8756             /* Check for the cache being perturbed during the entry serialize */
8757             if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) ||
8758                 (cache_ptr->entries_relocated_counter > 0)) {
8759 
8760 #if H5C_COLLECT_CACHE_STATS
8761                 H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
8762 #endif /* H5C_COLLECT_CACHE_STATS */
8763 
8764                 /* Reset the counters */
8765                 cache_ptr->entries_loaded_counter    = 0;
8766                 cache_ptr->entries_inserted_counter  = 0;
8767                 cache_ptr->entries_relocated_counter = 0;
8768 
8769                 /* Restart scan */
8770                 entry_ptr = cache_ptr->il_head;
8771             } /* end if */
8772             else
8773                 /* Advance to next entry */
8774                 entry_ptr = entry_ptr->il_next;
8775         } /* while ( entry_ptr != NULL ) */
8776     }     /* while ( ! done ) */
8777 
8778     /* Reset the counters so that we can detect insertions, loads,
8779      * moves, and flush dependency height changes caused by the pre_serialize
8780      * and serialize callbacks.
8781      */
8782     cache_ptr->entries_loaded_counter    = 0;
8783     cache_ptr->entries_inserted_counter  = 0;
8784     cache_ptr->entries_relocated_counter = 0;
8785 
8786     /* At this point, all entries not marked "flush me last" and in
8787      * the current ring or outside it should be serialized and have up
8788      * to date images.  Scan the index list again to serialize the
8789      * "flush me last" entries (if they are in the current ring) and to
8790      * verify that all other entries have up to date images.
8791      */
8792     entry_ptr = cache_ptr->il_head;
8793     while (entry_ptr != NULL) {
8794         HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8795         HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
8796         HDassert(entry_ptr->ring < H5C_RING_NTYPES);
8797         HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
8798 
8799         if (entry_ptr->ring == ring) {
8800             if (entry_ptr->flush_me_last) {
8801                 if (!entry_ptr->image_up_to_date) {
8802                     HDassert(entry_ptr->serialization_count == 0);
8803                     HDassert(entry_ptr->flush_dep_nunser_children == 0);
8804 
8805                     /* Serialize the entry */
8806                     if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
8807                         HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
8808 
8809                     /* Check for the cache changing */
8810                     if ((cache_ptr->entries_loaded_counter > 0) ||
8811                         (cache_ptr->entries_inserted_counter > 0) ||
8812                         (cache_ptr->entries_relocated_counter > 0))
8813                         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
8814                                     "flush_me_last entry serialization triggered restart")
8815 
8816                     HDassert(entry_ptr->flush_dep_nunser_children == 0);
8817                     HDassert(entry_ptr->serialization_count == 0);
8818 #ifndef NDEBUG
8819                     /* Increment serialization counter (to detect multiple serializations) */
8820                     entry_ptr->serialization_count++;
8821 #endif            /* NDEBUG */
8822                 } /* end if */
8823             }     /* end if */
8824             else {
8825                 HDassert(entry_ptr->image_up_to_date);
8826                 HDassert(entry_ptr->serialization_count <= 1);
8827                 HDassert(entry_ptr->flush_dep_nunser_children == 0);
8828             } /* end else */
8829         }     /* if ( entry_ptr->ring == ring ) */
8830 
8831         entry_ptr = entry_ptr->il_next;
8832     } /* while ( entry_ptr != NULL ) */
8833 
8834 done:
8835     HDassert(cache_ptr->serialization_in_progress);
8836     FUNC_LEAVE_NOAPI(ret_value)
8837 } /* H5C__serialize_ring() */
8838 
8839 /*-------------------------------------------------------------------------
8840  * Function:    H5C__serialize_single_entry
8841  *
8842  * Purpose:     Serialize the cache entry pointed to by the entry_ptr
8843  *        parameter.
8844  *
8845  * Return:      Non-negative on success/Negative on failure
8846  *
8847  * Programmer:  John Mainzer, 7/24/15
8848  *
8849  *-------------------------------------------------------------------------
8850  */
8851 static herr_t
H5C__serialize_single_entry(H5F_t * f,H5C_t * cache_ptr,H5C_cache_entry_t * entry_ptr)8852 H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
8853 {
8854     herr_t ret_value = SUCCEED; /* Return value */
8855 
8856     FUNC_ENTER_STATIC
8857 
8858     /* Sanity checks */
8859     HDassert(f);
8860     HDassert(cache_ptr);
8861     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
8862     HDassert(entry_ptr);
8863     HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8864     HDassert(!entry_ptr->prefetched);
8865     HDassert(!entry_ptr->image_up_to_date);
8866     HDassert(entry_ptr->is_dirty);
8867     HDassert(!entry_ptr->is_protected);
8868     HDassert(!entry_ptr->flush_in_progress);
8869     HDassert(entry_ptr->type);
8870 
8871     /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
8872      * will not be evicted out from under us.  Must set it back to FALSE
8873      * when we are done.
8874      */
8875     entry_ptr->flush_in_progress = TRUE;
8876 
8877     /* Allocate buffer for the entry image if required. */
8878     if (NULL == entry_ptr->image_ptr) {
8879         HDassert(entry_ptr->size > 0);
8880         if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
8881             HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
8882 #if H5C_DO_MEMORY_SANITY_CHECKS
8883         H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE,
8884                     H5C_IMAGE_EXTRA_SPACE);
8885 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
8886     }  /* end if */
8887 
8888     /* Generate image for entry */
8889     if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
8890         HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
8891 
8892     /* Reset the flush_in progress flag */
8893     entry_ptr->flush_in_progress = FALSE;
8894 
8895 done:
8896     HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
8897     HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
8898     FUNC_LEAVE_NOAPI(ret_value)
8899 } /* H5C__serialize_single_entry() */
8900 
8901 /*-------------------------------------------------------------------------
8902  * Function:    H5C__generate_image
8903  *
8904  * Purpose:     Serialize an entry and generate its image.
8905  *
8906  * Note:        This may cause the entry to be re-sized and/or moved in
8907  *              the cache.
8908  *
8909  *              As we will not update the metadata cache's data structures
8910  *              until we we finish the write, we must touch up these
8911  *              data structures for size and location changes even if we
8912  *              are about to delete the entry from the cache (i.e. on a
8913  *              flush destroy).
8914  *
8915  * Return:      Non-negative on success/Negative on failure
8916  *
8917  * Programmer:  Mohamad Chaarawi
8918  *              2/10/16
8919  *
8920  * Changes:     Updated sanity checks for the possibility that the skip
8921  *              list is disabled.
8922  *                                        JRM 5/16/20
8923  *
8924  *-------------------------------------------------------------------------
8925  */
8926 static herr_t
H5C__generate_image(H5F_t * f,H5C_t * cache_ptr,H5C_cache_entry_t * entry_ptr)8927 H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
8928 {
8929     haddr_t  new_addr        = HADDR_UNDEF;
8930     haddr_t  old_addr        = HADDR_UNDEF;
8931     size_t   new_len         = 0;
8932     unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
8933     herr_t   ret_value       = SUCCEED;
8934 
8935     FUNC_ENTER_STATIC
8936 
8937     /* Sanity check */
8938     HDassert(f);
8939     HDassert(cache_ptr);
8940     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
8941     HDassert(entry_ptr);
8942     HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8943     HDassert(!entry_ptr->image_up_to_date);
8944     HDassert(entry_ptr->is_dirty);
8945     HDassert(!entry_ptr->is_protected);
8946     HDassert(entry_ptr->type);
8947 
8948     /* make note of the entry's current address */
8949     old_addr = entry_ptr->addr;
8950 
8951     /* Call client's pre-serialize callback, if there's one */
8952     if ((entry_ptr->type->pre_serialize) &&
8953         ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr,
8954                                           &new_len, &serialize_flags) < 0))
8955 
8956         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
8957 
8958     /* Check for any flags set in the pre-serialize callback */
8959     if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
8960 
8961         /* Check for unexpected flags from serialize callback */
8962         if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
8963 
8964             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
8965 
8966 #ifdef H5_HAVE_PARALLEL
8967         /* In the parallel case, resizes and moves in
8968          * the serialize operation can cause problems.
8969          * If they occur, scream and die.
8970          *
8971          * At present, in the parallel case, the aux_ptr
8972          * will only be set if there is more than one
8973          * process.  Thus we can use this to detect
8974          * the parallel case.
8975          *
8976          * This works for now, but if we start using the
8977          * aux_ptr for other purposes, we will have to
8978          * change this test accordingly.
8979          *
8980          * NB: While this test detects entryies that attempt
8981          *     to resize or move themselves during a flush
8982          *     in the parallel case, it will not detect an
8983          *     entry that dirties, resizes, and/or moves
8984          *     other entries during its flush.
8985          *
8986          *     From what Quincey tells me, this test is
8987          *     sufficient for now, as any flush routine that
8988          *     does the latter will also do the former.
8989          *
8990          *     If that ceases to be the case, further
8991          *     tests will be necessary.
8992          */
8993         if (cache_ptr->aux_ptr != NULL)
8994 
8995             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case")
8996 #endif
8997 
8998         /* If required, resize the buffer and update the entry and the cache
8999          * data structures
9000          */
9001         if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
9002 
9003             /* Sanity check */
9004             HDassert(new_len > 0);
9005 
9006             /* Allocate a new image buffer */
9007             if (NULL ==
9008                 (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
9009 
9010                 HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
9011                             "memory allocation failed for on disk image buffer")
9012 
9013 #if H5C_DO_MEMORY_SANITY_CHECKS
9014             H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE,
9015                         H5C_IMAGE_EXTRA_SPACE);
9016 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
9017 
9018             /* Update statistics for resizing the entry */
9019             H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
9020 
9021             /* Update the hash table for the size change */
9022             H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr,
9023                                               !(entry_ptr->is_dirty));
9024 
9025             /* The entry can't be protected since we are in the process of
9026              * flushing it.  Thus we must update the replacement policy data
9027              * structures for the size change.  The macro deals with the pinned
9028              * case.
9029              */
9030             H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
9031 
9032             /* As we haven't updated the cache data structures for
9033              * for the flush or flush destroy yet, the entry should
9034              * be in the slist if the slist is enabled.  Since
9035              * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the
9036              * slist is enabled, call it un-conditionally.
9037              */
9038             HDassert(entry_ptr->is_dirty);
9039             HDassert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled));
9040 
9041             H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
9042 
9043             /* Finally, update the entry for its new size */
9044             entry_ptr->size = new_len;
9045 
9046         } /* end if */
9047 
9048         /* If required, udate the entry and the cache data structures
9049          * for a move
9050          */
9051         if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
9052 
9053             /* Update stats and entries relocated counter */
9054             H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
9055 
9056             /* We must update cache data structures for the change in address */
9057             if (entry_ptr->addr == old_addr) {
9058 
9059                 /* Delete the entry from the hash table and the slist */
9060                 H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
9061                 H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
9062 
9063                 /* Update the entry for its new address */
9064                 entry_ptr->addr = new_addr;
9065 
9066                 /* And then reinsert in the index and slist */
9067                 H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
9068                 H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
9069 
9070             }      /* end if */
9071             else { /* move is already done for us -- just do sanity checks */
9072 
9073                 HDassert(entry_ptr->addr == new_addr);
9074             }
9075         } /* end if */
9076     }     /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
9077 
9078     /* Serialize object into buffer */
9079     if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0)
9080 
9081         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
9082 
9083 #if H5C_DO_MEMORY_SANITY_CHECKS
9084     HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
9085                            H5C_IMAGE_EXTRA_SPACE));
9086 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
9087 
9088     entry_ptr->image_up_to_date = TRUE;
9089 
9090     /* Propagate the fact that the entry is serialized up the
9091      * flush dependency chain if appropriate.  Since the image must
9092      * have been out of date for this function to have been called
9093      * (see assertion on entry), no need to check that -- only check
9094      * for flush dependency parents.
9095      */
9096     HDassert(entry_ptr->flush_dep_nunser_children == 0);
9097 
9098     if (entry_ptr->flush_dep_nparents > 0) {
9099 
9100         if (H5C__mark_flush_dep_serialized(entry_ptr) < 0)
9101 
9102             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
9103     }
9104 
9105 done:
9106     FUNC_LEAVE_NOAPI(ret_value)
9107 } /* H5C__generate_image */
9108 
9109 /*-------------------------------------------------------------------------
9110  *
9111  * Function:    H5C_remove_entry
9112  *
9113  * Purpose:     Remove an entry from the cache.  Must be not protected, pinned,
9114  *        dirty, involved in flush dependencies, etc.
9115  *
9116  * Return:      Non-negative on success/Negative on failure
9117  *
9118  * Programmer:  Quincey Koziol
9119  *              September 17, 2016
9120  *
9121  *-------------------------------------------------------------------------
9122  */
9123 herr_t
H5C_remove_entry(void * _entry)9124 H5C_remove_entry(void *_entry)
9125 {
9126     H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */
9127     H5C_t *            cache;                               /* Cache for file */
9128     herr_t             ret_value = SUCCEED;                 /* Return value */
9129 
9130     FUNC_ENTER_NOAPI(FAIL)
9131 
9132     /* Sanity checks */
9133     HDassert(entry);
9134     HDassert(entry->ring != H5C_RING_UNDEFINED);
9135     cache = entry->cache_ptr;
9136     HDassert(cache);
9137     HDassert(cache->magic == H5C__H5C_T_MAGIC);
9138 
9139     /* Check for error conditions */
9140     if (entry->is_dirty)
9141         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache")
9142     if (entry->is_protected)
9143         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache")
9144     if (entry->is_pinned)
9145         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache")
9146     /* NOTE: If these two errors are getting tripped because the entry is
9147      *          in a flush dependency with a freedspace entry, move the checks
9148      *          after the "before evict" message is sent, and add the
9149      *          "child being evicted" message to the "before evict" notify
9150      *          section below.  QAK - 2017/08/03
9151      */
9152     if (entry->flush_dep_nparents > 0)
9153         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
9154                     "can't remove entry with flush dependency parents from cache")
9155     if (entry->flush_dep_nchildren > 0)
9156         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
9157                     "can't remove entry with flush dependency children from cache")
9158 
9159     /* Additional internal cache consistency checks */
9160     HDassert(!entry->in_slist);
9161     HDassert(!entry->flush_marker);
9162     HDassert(!entry->flush_in_progress);
9163 
9164     /* Note that the algorithm below is (very) similar to the set of operations
9165      * in H5C__flush_single_entry() and should be kept in sync with changes
9166      * to that code. - QAK, 2016/11/30
9167      */
9168 
9169     /* Update stats, as if we are "destroying" and taking ownership of the entry */
9170     H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE)
9171 
9172     /* If the entry's type has a 'notify' callback, send a 'before eviction'
9173      * notice while the entry is still fully integrated in the cache.
9174      */
9175     if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0)
9176         HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
9177 
9178     /* Update the cache internal data structures as appropriate for a destroy.
9179      * Specifically:
9180      *    1) Delete it from the index
9181      *    2) Delete it from the collective read access list
9182      *    3) Update the replacement policy for eviction
9183      *    4) Remove it from the tag list for this object
9184      */
9185 
9186     H5C__DELETE_FROM_INDEX(cache, entry, FAIL)
9187 
9188 #ifdef H5_HAVE_PARALLEL
9189     /* Check for collective read access flag */
9190     if (entry->coll_access) {
9191         entry->coll_access = FALSE;
9192         H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL)
9193     }  /* end if */
9194 #endif /* H5_HAVE_PARALLEL */
9195 
9196     H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL)
9197 
9198     /* Remove entry from tag list */
9199     if (H5C__untag_entry(cache, entry) < 0)
9200         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
9201 
9202     /* Increment entries_removed_counter and set last_entry_removed_ptr.
9203      * As we me be about to free the entry, recall that last_entry_removed_ptr
9204      * must NEVER be dereferenced.
9205      *
9206      * Recall that these fields are maintained to allow functions that perform
9207      * scans of lists of entries to detect the unexpected removal of entries
9208      * (via expunge, eviction, or take ownership at present), so that they can
9209      * re-start their scans if necessary.
9210      *
9211      * Also check if the entry we are watching for removal is being
9212      * removed (usually the 'next' entry for an iteration) and reset
9213      * it to indicate that it was removed.
9214      */
9215     cache->entries_removed_counter++;
9216     cache->last_entry_removed_ptr = entry;
9217     if (entry == cache->entry_watched_for_removal)
9218         cache->entry_watched_for_removal = NULL;
9219 
9220     /* Internal cache data structures should now be up to date, and
9221      * consistent with the status of the entry.
9222      *
9223      * Now clean up internal cache fields if appropriate.
9224      */
9225 
9226     /* Free the buffer for the on disk image */
9227     if (entry->image_ptr != NULL)
9228         entry->image_ptr = H5MM_xfree(entry->image_ptr);
9229 
9230     /* Reset the pointer to the cache the entry is within */
9231     entry->cache_ptr = NULL;
9232 
9233     /* Client is taking ownership of the entry.  Set bad magic here so the
9234      * cache will choke unless the entry is re-inserted properly
9235      */
9236     entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
9237 
9238 done:
9239     FUNC_LEAVE_NOAPI(ret_value)
9240 } /* H5C__remove_entry() */
9241