1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  * Copyright by The HDF Group.                                               *
3  * Copyright by the Board of Trustees of the University of Illinois.         *
4  * All rights reserved.                                                      *
5  *                                                                           *
6  * This file is part of HDF5.  The full HDF5 copyright notice, including     *
7  * terms governing use, modification, and redistribution, is contained in    *
8  * the COPYING file, which can be found at the root of the source code       *
9  * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.  *
10  * If you do not have access to either file, you may request a copy from     *
11  * help@hdfgroup.org.                                                        *
12  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
13 
14 /*-------------------------------------------------------------------------
15  *
16  * Created:     H5C.c
17  *              June 1 2004
18  *              John Mainzer
19  *
20  * Purpose:     Functions in this file implement a generic cache for
21  *              things which exist on disk, and which may be
22  *	 	unambiguously referenced by their disk addresses.
23  *
24  *              The code in this module was initially written in
25  *		support of a complete re-write of the metadata cache
26  *		in H5AC.c  However, other uses for the cache code
27  *		suggested themselves, and thus this file was created
28  *		in an attempt to support re-use.
29  *
30  *		For a detailed overview of the cache, please see the
31  *		header comment for H5C_t in H5Cpkg.h.
32  *
33  *-------------------------------------------------------------------------
34  */
35 
36 /**************************************************************************
37  *
38  *				To Do:
39  *
40  *	Code Changes:
41  *
42  *	 - Remove extra functionality in H5C__flush_single_entry()?
43  *
44  *	 - Change protect/unprotect to lock/unlock.
45  *
46  *	 - Flush entries in increasing address order in
47  *	   H5C__make_space_in_cache().
48  *
49  *	 - Also in H5C__make_space_in_cache(), use high and low water marks
50  *	   to reduce the number of I/O calls.
51  *
52  *	 - When flushing, attempt to combine contiguous entries to reduce
53  *	   I/O overhead.  Can't do this just yet as some entries are not
54  *	   contiguous.  Do this in parallel only or in serial as well?
55  *
56  *	 - Create MPI type for dirty objects when flushing in parallel.
57  *
58  *	 - Now that TBBT routines aren't used, fix nodes in memory to
59  *         point directly to the skip list node from the LRU list, eliminating
60  *         skip list lookups when evicting objects from the cache.
61  *
62  *	Tests:
63  *
64  *	 - Trim execution time.  (This is no longer a major issue with the
65  *	   shift from the TBBT to a hash table for indexing.)
66  *
67  *	 - Add random tests.
68  *
69  **************************************************************************/
70 
71 /****************/
72 /* Module Setup */
73 /****************/
74 
75 #include "H5Cmodule.h"          /* This source code file is part of the H5C module */
76 #define H5F_FRIEND		/* suppress error about including H5Fpkg  */
77 
78 
79 /***********/
80 /* Headers */
81 /***********/
82 #include "H5private.h"		/* Generic Functions			*/
83 #include "H5Cpkg.h"		/* Cache				*/
84 #include "H5CXprivate.h"        /* API Contexts                         */
85 #include "H5Eprivate.h"		/* Error handling		  	*/
86 #include "H5Fpkg.h"		/* Files				*/
87 #include "H5FLprivate.h"	/* Free Lists                           */
88 #include "H5Iprivate.h"		/* IDs			  		*/
89 #include "H5MFprivate.h"	/* File memory management		*/
90 #include "H5MMprivate.h"	/* Memory management			*/
91 #include "H5Pprivate.h"         /* Property lists                       */
92 
93 
94 /****************/
95 /* Local Macros */
96 /****************/
97 #if H5C_DO_MEMORY_SANITY_CHECKS
98 #define H5C_IMAGE_EXTRA_SPACE 8
99 #define H5C_IMAGE_SANITY_VALUE "DeadBeef"
100 #else /* H5C_DO_MEMORY_SANITY_CHECKS */
101 #define H5C_IMAGE_EXTRA_SPACE 0
102 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
103 
104 
105 /******************/
106 /* Local Typedefs */
107 /******************/
108 
109 
110 /********************/
111 /* Local Prototypes */
112 /********************/
113 
114 static herr_t H5C__pin_entry_from_client(H5C_t *cache_ptr,
115     H5C_cache_entry_t *entry_ptr);
116 
117 static herr_t H5C__unpin_entry_real(H5C_t *cache_ptr,
118     H5C_cache_entry_t *entry_ptr, hbool_t update_rp);
119 
120 static herr_t H5C__unpin_entry_from_client(H5C_t *cache_ptr,
121     H5C_cache_entry_t *entry_ptr, hbool_t update_rp);
122 
123 static herr_t H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted);
124 
125 static herr_t H5C__autoadjust__ageout(H5F_t * f,
126                                       double hit_rate,
127                                       enum H5C_resize_status * status_ptr,
128                                       size_t * new_max_cache_size_ptr,
129                                       hbool_t write_permitted);
130 
131 static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr);
132 
133 static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
134                                                        hbool_t write_permitted);
135 
136 static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr);
137 
138 static herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr);
139 
140 static herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr);
141 
142 static herr_t H5C__flash_increase_cache_size(H5C_t * cache_ptr,
143     size_t old_entry_size, size_t new_entry_size);
144 
145 static herr_t H5C__flush_invalidate_cache(H5F_t *f, unsigned flags);
146 
147 static herr_t H5C_flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
148 
149 static herr_t H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
150 
151 static void * H5C_load_entry(H5F_t *             f,
152 #ifdef H5_HAVE_PARALLEL
153                              hbool_t             coll_access,
154 #endif /* H5_HAVE_PARALLEL */
155                              const H5C_class_t * type,
156                              haddr_t             addr,
157                              void *              udata);
158 
159 static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry);
160 
161 static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry);
162 
163 static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring);
164 static herr_t H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr,
165     H5C_cache_entry_t *entry_ptr);
166 
167 static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t * type,
168     haddr_t addr, size_t *len, hbool_t actual);
169 
170 #if H5C_DO_SLIST_SANITY_CHECKS
171 static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr,
172                                       H5C_cache_entry_t *target_ptr);
173 #endif /* H5C_DO_SLIST_SANITY_CHECKS */
174 
175 #if H5C_DO_EXTREME_SANITY_CHECKS
176 static herr_t H5C_validate_lru_list(H5C_t * cache_ptr);
177 static herr_t H5C_validate_pinned_entry_list(H5C_t * cache_ptr);
178 static herr_t H5C_validate_protected_entry_list(H5C_t * cache_ptr);
179 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
180 
181 #ifndef NDEBUG
182 static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
183                                           const H5C_cache_entry_t * base_entry);
184 #endif /* NDEBUG */
185 
186 
187 /*********************/
188 /* Package Variables */
189 /*********************/
190 
191 /* Package initialization variable */
192 hbool_t H5_PKG_INIT_VAR = FALSE;
193 
194 /* Declare a free list to manage the tag info struct */
195 H5FL_DEFINE(H5C_tag_info_t);
196 
197 
198 /*****************************/
199 /* Library Private Variables */
200 /*****************************/
201 
202 
203 /*******************/
204 /* Local Variables */
205 /*******************/
206 
207 /* Declare a free list to manage the H5C_t struct */
208 H5FL_DEFINE_STATIC(H5C_t);
209 
210 /* Declare a free list to manage flush dependency arrays */
211 H5FL_BLK_DEFINE_STATIC(parent);
212 
213 
214 
215 /*-------------------------------------------------------------------------
216  * Function:    H5C_create
217  *
218  * Purpose:     Allocate, initialize, and return the address of a new
219  *		instance of H5C_t.
220  *
221  *		In general, the max_cache_size parameter must be positive,
222  *		and the min_clean_size parameter must lie in the closed
223  *		interval [0, max_cache_size].
224  *
225  *		The check_write_permitted parameter must either be NULL,
226  *		or point to a function of type H5C_write_permitted_func_t.
227  *		If it is NULL, the cache will use the write_permitted
228  *		flag to determine whether writes are permitted.
229  *
230  * Return:      Success:        Pointer to the new instance.
231  *
232  *              Failure:        NULL
233  *
234  * Programmer:  John Mainzer
235  *              6/2/04
236  *
237  *-------------------------------------------------------------------------
238  */
239 H5C_t *
H5C_create(size_t max_cache_size,size_t min_clean_size,int max_type_id,const H5C_class_t * const * class_table_ptr,H5C_write_permitted_func_t check_write_permitted,hbool_t write_permitted,H5C_log_flush_func_t log_flush,void * aux_ptr)240 H5C_create(size_t		      max_cache_size,
241            size_t		      min_clean_size,
242            int			      max_type_id,
243            const H5C_class_t * const * class_table_ptr,
244            H5C_write_permitted_func_t check_write_permitted,
245            hbool_t		      write_permitted,
246            H5C_log_flush_func_t       log_flush,
247            void *                     aux_ptr)
248 {
249     int i;
250     H5C_t * cache_ptr = NULL;
251     H5C_t * ret_value = NULL;      /* Return value */
252 
253     FUNC_ENTER_NOAPI(NULL)
254 
255     HDassert( max_cache_size >= H5C__MIN_MAX_CACHE_SIZE );
256     HDassert( max_cache_size <= H5C__MAX_MAX_CACHE_SIZE );
257     HDassert( min_clean_size <= max_cache_size );
258 
259     HDassert( max_type_id >= 0 );
260     HDassert( max_type_id < H5C__MAX_NUM_TYPE_IDS );
261     HDassert( class_table_ptr );
262 
263     for ( i = 0; i <= max_type_id; i++ ) {
264         HDassert( (class_table_ptr)[i] );
265         HDassert(HDstrlen((class_table_ptr)[i]->name) > 0);
266     } /* end for */
267 
268     if(NULL == (cache_ptr = H5FL_CALLOC(H5C_t)))
269 	HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
270 
271     if(NULL == (cache_ptr->slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
272         HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list")
273 
274     if(NULL == (cache_ptr->tag_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
275         HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list for tagged entry addresses")
276 
277     /* If we get this far, we should succeed.  Go ahead and initialize all
278      * the fields.
279      */
280 
281     cache_ptr->magic 				= H5C__H5C_T_MAGIC;
282 
283     cache_ptr->flush_in_progress		= FALSE;
284 
285     if(NULL == (cache_ptr->log_info = (H5C_log_info_t *)H5MM_calloc(sizeof(H5C_log_info_t))))
286         HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed")
287 
288     cache_ptr->aux_ptr				= aux_ptr;
289 
290     cache_ptr->max_type_id			= max_type_id;
291 
292     cache_ptr->class_table_ptr			= class_table_ptr;
293 
294     cache_ptr->max_cache_size			= max_cache_size;
295     cache_ptr->min_clean_size			= min_clean_size;
296 
297     cache_ptr->check_write_permitted		= check_write_permitted;
298     cache_ptr->write_permitted			= write_permitted;
299 
300     cache_ptr->log_flush			= log_flush;
301 
302     cache_ptr->evictions_enabled		= TRUE;
303     cache_ptr->close_warning_received		= FALSE;
304 
305     cache_ptr->index_len			= 0;
306     cache_ptr->index_size			= (size_t)0;
307     cache_ptr->clean_index_size			= (size_t)0;
308     cache_ptr->dirty_index_size			= (size_t)0;
309 
310     for(i = 0; i < H5C_RING_NTYPES; i++) {
311 	cache_ptr->index_ring_len[i]		= 0;
312 	cache_ptr->index_ring_size[i]		= (size_t)0;
313 	cache_ptr->clean_index_ring_size[i]	= (size_t)0;
314 	cache_ptr->dirty_index_ring_size[i]	= (size_t)0;
315 
316 	cache_ptr->slist_ring_len[i]		= 0;
317 	cache_ptr->slist_ring_size[i]		= (size_t)0;
318     } /* end for */
319 
320     for(i = 0; i < H5C__HASH_TABLE_LEN; i++)
321         (cache_ptr->index)[i] = NULL;
322 
323     cache_ptr->il_len				= 0;
324     cache_ptr->il_size				= (size_t)0;
325     cache_ptr->il_head				= NULL;
326     cache_ptr->il_tail				= NULL;
327 
328     /* Tagging Field Initializations */
329     cache_ptr->ignore_tags                      = FALSE;
330     cache_ptr->num_objs_corked                  = 0;
331 
332     cache_ptr->slist_changed			= FALSE;
333     cache_ptr->slist_len			= 0;
334     cache_ptr->slist_size			= (size_t)0;
335 
336 #if H5C_DO_SANITY_CHECKS
337     cache_ptr->slist_len_increase		= 0;
338     cache_ptr->slist_size_increase		= 0;
339 #endif /* H5C_DO_SANITY_CHECKS */
340 
341     cache_ptr->entries_removed_counter		= 0;
342     cache_ptr->last_entry_removed_ptr		= NULL;
343     cache_ptr->entry_watched_for_removal        = NULL;
344 
345     cache_ptr->pl_len				= 0;
346     cache_ptr->pl_size				= (size_t)0;
347     cache_ptr->pl_head_ptr			= NULL;
348     cache_ptr->pl_tail_ptr			= NULL;
349 
350     cache_ptr->pel_len				= 0;
351     cache_ptr->pel_size				= (size_t)0;
352     cache_ptr->pel_head_ptr			= NULL;
353     cache_ptr->pel_tail_ptr			= NULL;
354 
355     cache_ptr->LRU_list_len			= 0;
356     cache_ptr->LRU_list_size			= (size_t)0;
357     cache_ptr->LRU_head_ptr			= NULL;
358     cache_ptr->LRU_tail_ptr			= NULL;
359 
360 #ifdef H5_HAVE_PARALLEL
361     cache_ptr->coll_list_len			= 0;
362     cache_ptr->coll_list_size			= (size_t)0;
363     cache_ptr->coll_head_ptr			= NULL;
364     cache_ptr->coll_tail_ptr			= NULL;
365     cache_ptr->coll_write_list			= NULL;
366 #endif /* H5_HAVE_PARALLEL */
367 
368 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
369     cache_ptr->cLRU_list_len			= 0;
370     cache_ptr->cLRU_list_size			= (size_t)0;
371     cache_ptr->cLRU_head_ptr			= NULL;
372     cache_ptr->cLRU_tail_ptr			= NULL;
373 
374     cache_ptr->dLRU_list_len			= 0;
375     cache_ptr->dLRU_list_size			= (size_t)0;
376     cache_ptr->dLRU_head_ptr			= NULL;
377     cache_ptr->dLRU_tail_ptr			= NULL;
378 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
379 
380     cache_ptr->size_increase_possible		= FALSE;
381     cache_ptr->flash_size_increase_possible     = FALSE;
382     cache_ptr->flash_size_increase_threshold    = 0;
383     cache_ptr->size_decrease_possible		= FALSE;
384     cache_ptr->resize_enabled			= FALSE;
385     cache_ptr->cache_full			= FALSE;
386     cache_ptr->size_decreased			= FALSE;
387     cache_ptr->resize_in_progress		= FALSE;
388     cache_ptr->msic_in_progress			= FALSE;
389 
390     (cache_ptr->resize_ctl).version		= H5C__CURR_AUTO_SIZE_CTL_VER;
391     (cache_ptr->resize_ctl).rpt_fcn		= NULL;
392     (cache_ptr->resize_ctl).set_initial_size	= FALSE;
393     (cache_ptr->resize_ctl).initial_size	= H5C__DEF_AR_INIT_SIZE;
394     (cache_ptr->resize_ctl).min_clean_fraction	= H5C__DEF_AR_MIN_CLEAN_FRAC;
395     (cache_ptr->resize_ctl).max_size		= H5C__DEF_AR_MAX_SIZE;
396     (cache_ptr->resize_ctl).min_size		= H5C__DEF_AR_MIN_SIZE;
397     (cache_ptr->resize_ctl).epoch_length	= H5C__DEF_AR_EPOCH_LENGTH;
398 
399     (cache_ptr->resize_ctl).incr_mode		= H5C_incr__off;
400     (cache_ptr->resize_ctl).lower_hr_threshold	= H5C__DEF_AR_LOWER_THRESHHOLD;
401     (cache_ptr->resize_ctl).increment	        = H5C__DEF_AR_INCREMENT;
402     (cache_ptr->resize_ctl).apply_max_increment	= TRUE;
403     (cache_ptr->resize_ctl).max_increment	= H5C__DEF_AR_MAX_INCREMENT;
404 
405     (cache_ptr->resize_ctl).flash_incr_mode     = H5C_flash_incr__off;
406     (cache_ptr->resize_ctl).flash_multiple      = 1.0f;
407     (cache_ptr->resize_ctl).flash_threshold     = 0.25f;
408 
409     (cache_ptr->resize_ctl).decr_mode		= H5C_decr__off;
410     (cache_ptr->resize_ctl).upper_hr_threshold	= H5C__DEF_AR_UPPER_THRESHHOLD;
411     (cache_ptr->resize_ctl).decrement	        = H5C__DEF_AR_DECREMENT;
412     (cache_ptr->resize_ctl).apply_max_decrement	= TRUE;
413     (cache_ptr->resize_ctl).max_decrement	= H5C__DEF_AR_MAX_DECREMENT;
414     (cache_ptr->resize_ctl).epochs_before_eviction = H5C__DEF_AR_EPCHS_B4_EVICT;
415     (cache_ptr->resize_ctl).apply_empty_reserve = TRUE;
416     (cache_ptr->resize_ctl).empty_reserve	= H5C__DEF_AR_EMPTY_RESERVE;
417 
418     cache_ptr->epoch_markers_active		= 0;
419 
420     /* no need to initialize the ring buffer itself */
421     cache_ptr->epoch_marker_ringbuf_first	= 1;
422     cache_ptr->epoch_marker_ringbuf_last	= 0;
423     cache_ptr->epoch_marker_ringbuf_size	= 0;
424 
425     /* Initialize all epoch marker entries' fields to zero/FALSE/NULL */
426     HDmemset(cache_ptr->epoch_markers, 0, sizeof(cache_ptr->epoch_markers));
427 
428     /* Set non-zero/FALSE/NULL fields for epoch markers */
429     for ( i = 0; i < H5C__MAX_EPOCH_MARKERS; i++ )
430     {
431         ((cache_ptr->epoch_markers)[i]).magic		 =
432 					       H5C__H5C_CACHE_ENTRY_T_MAGIC;
433         ((cache_ptr->epoch_markers)[i]).addr		 = (haddr_t)i;
434         ((cache_ptr->epoch_markers)[i]).type		 = H5AC_EPOCH_MARKER;
435     }
436 
437     /* Initialize cache image generation on file close related fields.
438      * Initial value of image_ctl must match H5C__DEFAULT_CACHE_IMAGE_CTL
439      * in H5Cprivate.h.
440      */
441     cache_ptr->image_ctl.version            = H5C__CURR_CACHE_IMAGE_CTL_VER;
442     cache_ptr->image_ctl.generate_image     = FALSE;
443     cache_ptr->image_ctl.save_resize_status = FALSE;
444     cache_ptr->image_ctl.entry_ageout       = -1;
445     cache_ptr->image_ctl.flags              = H5C_CI__ALL_FLAGS;
446 
447     cache_ptr->serialization_in_progress= FALSE;
448     cache_ptr->load_image		= FALSE;
449     cache_ptr->image_loaded             = FALSE;
450     cache_ptr->delete_image		= FALSE;
451     cache_ptr->image_addr		= HADDR_UNDEF;
452     cache_ptr->image_len		= 0;
453     cache_ptr->image_data_len		= 0;
454 
455     cache_ptr->entries_loaded_counter		= 0;
456     cache_ptr->entries_inserted_counter		= 0;
457     cache_ptr->entries_relocated_counter	= 0;
458     cache_ptr->entry_fd_height_change_counter	= 0;
459 
460     cache_ptr->num_entries_in_image	= 0;
461     cache_ptr->image_entries		= NULL;
462     cache_ptr->image_buffer		= NULL;
463 
464     /* initialize free space manager related fields: */
465     cache_ptr->rdfsm_settled		= FALSE;
466     cache_ptr->mdfsm_settled		= FALSE;
467 
468     if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
469         /* this should be impossible... */
470         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "H5C_reset_cache_hit_rate_stats failed")
471 
472     H5C_stats__reset(cache_ptr);
473 
474     cache_ptr->prefix[0]			= '\0';  /* empty string */
475 
476 #ifndef NDEBUG
477     cache_ptr->get_entry_ptr_from_addr_counter  = 0;
478 #endif /* NDEBUG */
479 
480     /* Set return value */
481     ret_value = cache_ptr;
482 
483 done:
484     if(NULL == ret_value) {
485         if(cache_ptr != NULL) {
486             if(cache_ptr->slist_ptr != NULL)
487                 H5SL_close(cache_ptr->slist_ptr);
488 
489             if(cache_ptr->tag_list != NULL)
490                 H5SL_close(cache_ptr->tag_list);
491 
492             if(cache_ptr->log_info != NULL)
493                 H5MM_xfree(cache_ptr->log_info);
494 
495             cache_ptr->magic = 0;
496             cache_ptr = H5FL_FREE(H5C_t, cache_ptr);
497         } /* end if */
498     } /* end if */
499 
500     FUNC_LEAVE_NOAPI(ret_value)
501 } /* H5C_create() */
502 
503 
504 /*-------------------------------------------------------------------------
505  * Function:    H5C_def_auto_resize_rpt_fcn
506  *
507  * Purpose:     Print results of a automatic cache resize.
508  *
509  *		This function should only be used where HDprintf() behaves
510  *		well -- i.e. not on Windows.
511  *
512  * Return:      void
513  *
514  * Programmer:  John Mainzer
515  *		10/27/04
516  *
517  *-------------------------------------------------------------------------
518  */
519 void
H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,int32_t version,double hit_rate,enum H5C_resize_status status,size_t old_max_cache_size,size_t new_max_cache_size,size_t old_min_clean_size,size_t new_min_clean_size)520 H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
521 #ifndef NDEBUG
522                             int32_t version,
523 #else /* NDEBUG */
524                             int32_t H5_ATTR_UNUSED version,
525 #endif /* NDEBUG */
526                             double hit_rate,
527                             enum H5C_resize_status status,
528                             size_t old_max_cache_size,
529                             size_t new_max_cache_size,
530                             size_t old_min_clean_size,
531                             size_t new_min_clean_size)
532 {
533     HDassert( cache_ptr != NULL );
534     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
535     HDassert( version == H5C__CURR_AUTO_RESIZE_RPT_FCN_VER );
536 
537     switch ( status )
538     {
539         case in_spec:
540             HDfprintf(stdout,
541                       "%sAuto cache resize -- no change. (hit rate = %lf)\n",
542                       cache_ptr->prefix, hit_rate);
543             break;
544 
545         case increase:
546             HDassert( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold );
547             HDassert( old_max_cache_size < new_max_cache_size );
548 
549             HDfprintf(stdout,
550                       "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
551                       cache_ptr->prefix, hit_rate,
552                       (cache_ptr->resize_ctl).lower_hr_threshold);
553 
554             HDfprintf(stdout,
555                     "%s	cache size increased from (%Zu/%Zu) to (%Zu/%Zu).\n",
556                     cache_ptr->prefix,
557                     old_max_cache_size,
558                     old_min_clean_size,
559                     new_max_cache_size,
560                     new_min_clean_size);
561             break;
562 
563         case flash_increase:
564             HDassert( old_max_cache_size < new_max_cache_size );
565 
566             HDfprintf(stdout,
567                     "%sflash cache resize(%d) -- size threshold = %Zu.\n",
568                     cache_ptr->prefix,
569                     (int)((cache_ptr->resize_ctl).flash_incr_mode),
570                     cache_ptr->flash_size_increase_threshold);
571 
572             HDfprintf(stdout,
573                   "%s cache size increased from (%Zu/%Zu) to (%Zu/%Zu).\n",
574                    cache_ptr->prefix,
575                    old_max_cache_size,
576                    old_min_clean_size,
577                    new_max_cache_size,
578                    new_min_clean_size);
579                 break;
580 
581         case decrease:
582             HDassert( old_max_cache_size > new_max_cache_size );
583 
584             switch ( (cache_ptr->resize_ctl).decr_mode )
585             {
586                 case H5C_decr__off:
587                     HDfprintf(stdout,
588                               "%sAuto cache resize -- decrease off.  HR = %lf\n",
589                               cache_ptr->prefix, hit_rate);
590                     break;
591 
592                 case H5C_decr__threshold:
593                     HDassert( hit_rate >
594                               (cache_ptr->resize_ctl).upper_hr_threshold );
595 
596                     HDfprintf(stdout,
597                               "%sAuto cache resize -- decrease by threshold.  HR = %lf > %6.5lf\n",
598                               cache_ptr->prefix, hit_rate,
599                               (cache_ptr->resize_ctl).upper_hr_threshold);
600 
601                     HDfprintf(stdout, "%sout of bounds high (%6.5lf).\n",
602                               cache_ptr->prefix,
603                               (cache_ptr->resize_ctl).upper_hr_threshold);
604                     break;
605 
606                 case H5C_decr__age_out:
607                     HDfprintf(stdout,
608                               "%sAuto cache resize -- decrease by ageout.  HR = %lf\n",
609                               cache_ptr->prefix, hit_rate);
610                     break;
611 
612                 case H5C_decr__age_out_with_threshold:
613                     HDassert( hit_rate >
614                               (cache_ptr->resize_ctl).upper_hr_threshold );
615 
616                     HDfprintf(stdout,
617                               "%sAuto cache resize -- decrease by ageout with threshold. HR = %lf > %6.5lf\n",
618                               cache_ptr->prefix, hit_rate,
619                               (cache_ptr->resize_ctl).upper_hr_threshold);
620                     break;
621 
622                 default:
623                     HDfprintf(stdout,
624                               "%sAuto cache resize -- decrease by unknown mode.  HR = %lf\n",
625                               cache_ptr->prefix, hit_rate);
626             }
627 
628             HDfprintf(stdout,
629                       "%s	cache size decreased from (%Zu/%Zu) to (%Zu/%Zu).\n",
630                       cache_ptr->prefix,
631                       old_max_cache_size,
632                       old_min_clean_size,
633                       new_max_cache_size,
634                       new_min_clean_size);
635             break;
636 
637         case at_max_size:
638             HDfprintf(stdout,
639                       "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
640                       cache_ptr->prefix, hit_rate,
641                       (cache_ptr->resize_ctl).lower_hr_threshold);
642             HDfprintf(stdout,
643                       "%s	cache already at maximum size so no change.\n",
644                       cache_ptr->prefix);
645             break;
646 
647         case at_min_size:
648             HDfprintf(stdout,
649                       "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n",
650                       cache_ptr->prefix, hit_rate);
651             HDfprintf(stdout, "%s	cache already at minimum size.\n",
652                       cache_ptr->prefix);
653             break;
654 
655         case increase_disabled:
656             HDfprintf(stdout,
657                       "%sAuto cache resize -- increase disabled -- HR = %lf.",
658                       cache_ptr->prefix, hit_rate);
659             break;
660 
661         case decrease_disabled:
662             HDfprintf(stdout,
663                       "%sAuto cache resize -- decrease disabled -- HR = %lf.\n",
664                       cache_ptr->prefix, hit_rate);
665             break;
666 
667         case not_full:
668             HDassert( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold );
669 
670             HDfprintf(stdout,
671                       "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
672                       cache_ptr->prefix, hit_rate,
673                       (cache_ptr->resize_ctl).lower_hr_threshold);
674             HDfprintf(stdout,
675                       "%s	cache not full so no increase in size.\n",
676                       cache_ptr->prefix);
677             break;
678 
679         default:
680             HDfprintf(stdout, "%sAuto cache resize -- unknown status code.\n",
681                       cache_ptr->prefix);
682             break;
683     }
684 
685     return;
686 
687 } /* H5C_def_auto_resize_rpt_fcn() */
688 
689 
690 /*-------------------------------------------------------------------------
691  * Function:    H5C_free_tag_list_cb
692  *
693  * Purpose:     Callback function to free tag nodes from the skip list.
694  *
695  * Return:      Non-negative on success/Negative on failure
696  *
697  * Programmer:  Vailin Choi
698  *		January 2014
699  *
700  *-------------------------------------------------------------------------
701  */
702 static herr_t
H5C_free_tag_list_cb(void * _item,void H5_ATTR_UNUSED * key,void H5_ATTR_UNUSED * op_data)703 H5C_free_tag_list_cb(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *op_data)
704 {
705     H5C_tag_info_t *tag_info = (H5C_tag_info_t *)_item;
706 
707     FUNC_ENTER_NOAPI_NOINIT_NOERR
708 
709     HDassert(tag_info);
710 
711     /* Release the item */
712     tag_info = H5FL_FREE(H5C_tag_info_t, tag_info);
713 
714     FUNC_LEAVE_NOAPI(0)
715 }  /* H5C_free_tag_list_cb() */
716 
717 
718 /*-------------------------------------------------------------------------
719  *
720  * Function:    H5C_prep_for_file_close
721  *
722  * Purpose:     This function should be called just prior to the cache
723  *		flushes at file close.  There should be no protected
724  *		entries in the cache at this point.
725  *
726  * Return:      Non-negative on success/Negative on failure
727  *
728  * Programmer:  John Mainzer
729  *              7/3/15
730  *
731  *-------------------------------------------------------------------------
732  */
733 herr_t
H5C_prep_for_file_close(H5F_t * f)734 H5C_prep_for_file_close(H5F_t *f)
735 {
736     H5C_t *     cache_ptr;
737     hbool_t     image_generated = FALSE;        /* Whether a cache image was generated */
738     herr_t	ret_value = SUCCEED;            /* Return value */
739 
740     FUNC_ENTER_NOAPI(FAIL)
741 
742     /* Sanity checks */
743     HDassert(f);
744     HDassert(f->shared);
745     HDassert(f->shared->cache);
746     cache_ptr = f->shared->cache;
747     HDassert(cache_ptr);
748     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
749 
750     /* For now at least, it is possible to receive the
751      * close warning more than once -- the following
752      * if statement handles this.
753      */
754     if(cache_ptr->close_warning_received)
755         HGOTO_DONE(SUCCEED)
756     cache_ptr->close_warning_received = TRUE;
757 
758     /* Make certain there aren't any protected entries */
759     HDassert(cache_ptr->pl_len == 0);
760 
761     /* Prepare cache image */
762     if(H5C__prep_image_for_file_close(f, &image_generated) < 0)
763         HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cache image")
764 
765 #ifdef H5_HAVE_PARALLEL
766     if ( ( H5F_INTENT(f) & H5F_ACC_RDWR ) &&
767          ( ! image_generated ) &&
768          ( cache_ptr->aux_ptr != NULL ) &&
769          ( f->shared->fs_persist ) ) {
770         /* If persistent free space managers are enabled, flushing the
771          * metadata cache may result in the deletion, insertion, and/or
772          * dirtying of entries.
773          *
774          * This is a problem in PHDF5, as it breaks two invariants of
775          * our management of the metadata cache across all processes:
776          *
777          * 1) Entries will not be dirtied, deleted, inserted, or moved
778          *    during flush in the parallel case.
779          *
780          * 2) All processes contain the same set of dirty metadata
781          *    entries on entry to a sync point.
782          *
783          * To solve this problem for the persistent free space managers,
784          * serialize the metadata cache on all processes prior to the
785          * first sync point on file shutdown.  The shutdown warning is
786          * a convenient location for this call.
787          *
788          * This is sufficient since:
789          *
790          * 1) FSM settle routines are only invoked on file close.  Since
791          *    serialization make the same settle calls as flush on file
792          *    close, and since the close warning is issued after all
793          *    non FSM related space allocations and just before the
794          *    first sync point on close, this call will leave the caches
795          *    in a consistent state across the processes if they were
796          *    consistent before.
797          *
798          * 2) Since the FSM settle routines are only invoked once during
799          *    file close, invoking them now will prevent their invocation
800          *    during a flush, and thus avoid any resulting entrie dirties,
801          *    deletions, insertion, or moves during the flush.
802          */
803         if(H5C__serialize_cache(f) < 0)
804             HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialization of the cache failed")
805     } /* end if */
806 #endif /* H5_HAVE_PARALLEL */
807 
808 done:
809     FUNC_LEAVE_NOAPI(ret_value)
810 } /* H5C_prep_for_file_close() */
811 
812 
813 /*-------------------------------------------------------------------------
814  * Function:    H5C_dest
815  *
816  * Purpose:     Flush all data to disk and destroy the cache.
817  *
818  *              This function fails if any object are protected since the
819  *              resulting file might not be consistent.
820  *
821  *		Note that *cache_ptr has been freed upon successful return.
822  *
823  * Return:      Non-negative on success/Negative on failure
824  *
825  * Programmer:  John Mainzer
826  *		6/2/04
827  *
828  *-------------------------------------------------------------------------
829  */
830 herr_t
H5C_dest(H5F_t * f)831 H5C_dest(H5F_t * f)
832 {
833     H5C_t * cache_ptr = f->shared->cache;
834     herr_t ret_value = SUCCEED;      /* Return value */
835 
836     FUNC_ENTER_NOAPI(FAIL)
837 
838     /* Sanity check */
839     HDassert(cache_ptr);
840     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
841     HDassert(cache_ptr->close_warning_received);
842 
843 #if H5AC_DUMP_IMAGE_STATS_ON_CLOSE
844     if(H5C_image_stats(cache_ptr, TRUE) < 0)
845         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't display cache image stats")
846 #endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */
847 
848     /* Flush and invalidate all cache entries */
849     if(H5C__flush_invalidate_cache(f, H5C__NO_FLAGS_SET) < 0 )
850         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
851 
852     /* Generate & write cache image if requested */
853     if(cache_ptr->image_ctl.generate_image)
854         if(H5C__generate_cache_image(f, cache_ptr) < 0)
855             HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't generate metadata cache image")
856 
857     if(cache_ptr->slist_ptr != NULL) {
858         H5SL_close(cache_ptr->slist_ptr);
859         cache_ptr->slist_ptr = NULL;
860     } /* end if */
861 
862     if(cache_ptr->tag_list != NULL) {
863         H5SL_destroy(cache_ptr->tag_list, H5C_free_tag_list_cb, NULL);
864         cache_ptr->tag_list = NULL;
865     } /* end if */
866 
867     if(cache_ptr->log_info != NULL)
868         H5MM_xfree(cache_ptr->log_info);
869 
870 #ifndef NDEBUG
871 #if H5C_DO_SANITY_CHECKS
872     if(cache_ptr->get_entry_ptr_from_addr_counter > 0)
873         HDfprintf(stdout, "*** %ld calls to H5C_get_entry_ptr_from_add(). ***\n",
874                 cache_ptr->get_entry_ptr_from_addr_counter);
875 #endif /* H5C_DO_SANITY_CHECKS */
876 
877     cache_ptr->magic = 0;
878 #endif /* NDEBUG */
879 
880     cache_ptr = H5FL_FREE(H5C_t, cache_ptr);
881 
882 done:
883     FUNC_LEAVE_NOAPI(ret_value)
884 } /* H5C_dest() */
885 
886 
887 /*-------------------------------------------------------------------------
888  * Function:    H5C_evict
889  *
890  * Purpose:     Evict all except pinned entries in the cache
891  *
892  * Return:      Non-negative on success/Negative on failure
893  *
894  * Programmer:  Vailin Choi
895  *		Dec 2013
896  *
897  *-------------------------------------------------------------------------
898  */
899 herr_t
H5C_evict(H5F_t * f)900 H5C_evict(H5F_t * f)
901 {
902     herr_t ret_value = SUCCEED;      /* Return value */
903 
904     FUNC_ENTER_NOAPI(FAIL)
905 
906     /* Sanity check */
907     HDassert(f);
908 
909     /* Flush and invalidate all cache entries except the pinned entries */
910     if(H5C__flush_invalidate_cache(f, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0 )
911         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict entries in the cache")
912 
913 done:
914     FUNC_LEAVE_NOAPI(ret_value)
915 } /* H5C_evict() */
916 
917 
918 /*-------------------------------------------------------------------------
919  * Function:    H5C_expunge_entry
920  *
921  * Purpose:     Use this function to tell the cache to expunge an entry
922  * 		from the cache without writing it to disk even if it is
923  * 		dirty.  The entry may not be either pinned or protected.
924  *
925  * Return:      Non-negative on success/Negative on failure
926  *
927  * Programmer:  John Mainzer
928  *              6/29/06
929  *
930  *-------------------------------------------------------------------------
931  */
932 herr_t
H5C_expunge_entry(H5F_t * f,const H5C_class_t * type,haddr_t addr,unsigned flags)933 H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flags)
934 {
935     H5C_t *		cache_ptr;
936     H5C_cache_entry_t *	entry_ptr = NULL;
937     unsigned            flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG);
938     herr_t		ret_value = SUCCEED;      /* Return value */
939 
940     FUNC_ENTER_NOAPI(FAIL)
941 
942     HDassert(f);
943     HDassert(f->shared);
944     cache_ptr = f->shared->cache;
945     HDassert(cache_ptr);
946     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
947     HDassert(type);
948     HDassert(H5F_addr_defined(addr));
949 
950 #if H5C_DO_EXTREME_SANITY_CHECKS
951     if(H5C_validate_lru_list(cache_ptr) < 0)
952         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry")
953 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
954 
955     /* Look for entry in cache */
956     H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
957     if((entry_ptr == NULL) || (entry_ptr->type != type))
958         /* the target doesn't exist in the cache, so we are done. */
959         HGOTO_DONE(SUCCEED)
960 
961     HDassert(entry_ptr->addr == addr);
962     HDassert(entry_ptr->type == type);
963 
964     /* Check for entry being pinned or protected */
965     if(entry_ptr->is_protected)
966         HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected")
967     if(entry_ptr->is_pinned)
968         HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned")
969 
970     /* If we get this far, call H5C__flush_single_entry() with the
971      * H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG.
972      * This will clear the entry, and then delete it from the cache.
973      */
974 
975     /* Pass along 'free file space' flag */
976     flush_flags |= (flags & H5C__FREE_FILE_SPACE_FLAG);
977 
978     /* Delete the entry from the skip list on destroy */
979     flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
980 
981     if(H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0)
982         HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry")
983 
984 done:
985 #if H5C_DO_EXTREME_SANITY_CHECKS
986     if(H5C_validate_lru_list(cache_ptr) < 0)
987         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit")
988 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
989 
990     FUNC_LEAVE_NOAPI(ret_value)
991 } /* H5C_expunge_entry() */
992 
993 
994 /*-------------------------------------------------------------------------
995  * Function:    H5C_flush_cache
996  *
997  * Purpose:	Flush (and possibly destroy) the entries contained in the
998  *		specified cache.
999  *
1000  *		If the cache contains protected entries, the function will
1001  *		fail, as protected entries cannot be flushed.  However
1002  *		all unprotected entries should be flushed before the
1003  *		function returns failure.
1004  *
1005  * Return:      Non-negative on success/Negative on failure or if there was
1006  *		a request to flush all items and something was protected.
1007  *
1008  * Programmer:  John Mainzer
1009  *		6/2/04
1010  *
1011  * Changes:	Modified function to test for slist chamges in
1012  *		pre_serialize and serialize callbacks, and re-start
1013  *		scans through the slist when such changes occur.
1014  *
1015  *		This has been a potential problem for some time,
1016  *		and there has been code in this function to deal
1017  *		with elements of this issue.  However the shift
1018  *		to the V3 cache in combination with the activities
1019  *		of some of the cache clients (in particular the
1020  *		free space manager and the fractal heap) have
1021  *		made this re-work necessary.
1022  *
1023  *						JRM -- 12/13/14
1024  *
1025  *		Modified function to support rings.  Basic idea is that
1026  *		every entry in the cache is assigned to a ring.  Entries
1027  *		in the outermost ring are flushed first, followed by
1028  *		those in the next outermost ring, and so on until the
1029  *		innermost ring is flushed.  See header comment on
1030  *		H5C_ring_t in H5Cprivate.h for a more detailed
1031  *		discussion.
1032  *
1033  *						JRM -- 8/30/15
1034  *
1035  *		Modified function to call the free space manager
1036  *		settling functions.
1037  *						JRM -- 6/9/16
1038  *
1039  *-------------------------------------------------------------------------
1040  */
1041 herr_t
H5C_flush_cache(H5F_t * f,unsigned flags)1042 H5C_flush_cache(H5F_t *f, unsigned flags)
1043 {
1044 #if H5C_DO_SANITY_CHECKS
1045     int			i;
1046     uint32_t		index_len = 0;
1047     size_t		index_size = (size_t)0;
1048     size_t		clean_index_size = (size_t)0;
1049     size_t		dirty_index_size = (size_t)0;
1050     size_t		slist_size = (size_t)0;
1051     uint32_t		slist_len = 0;
1052 #endif /* H5C_DO_SANITY_CHECKS */
1053     H5C_ring_t		ring;
1054     H5C_t             * cache_ptr;
1055     hbool_t             destroy;
1056     hbool_t		ignore_protected;
1057     herr_t		ret_value = SUCCEED;
1058 
1059     FUNC_ENTER_NOAPI(FAIL)
1060 
1061     HDassert(f);
1062     HDassert(f->shared);
1063     cache_ptr = f->shared->cache;
1064     HDassert(cache_ptr);
1065     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1066     HDassert(cache_ptr->slist_ptr);
1067 
1068 #if H5C_DO_SANITY_CHECKS
1069     HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
1070     HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
1071     HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
1072     HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
1073     HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
1074     HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
1075 
1076     for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
1077         index_len += cache_ptr->index_ring_len[i];
1078         index_size += cache_ptr->index_ring_size[i];
1079         clean_index_size += cache_ptr->clean_index_ring_size[i];
1080         dirty_index_size += cache_ptr->dirty_index_ring_size[i];
1081 
1082 	slist_len += cache_ptr->slist_ring_len[i];
1083         slist_size += cache_ptr->slist_ring_size[i];
1084     } /* end for */
1085 
1086     HDassert(cache_ptr->index_len == index_len);
1087     HDassert(cache_ptr->index_size == index_size);
1088     HDassert(cache_ptr->clean_index_size == clean_index_size);
1089     HDassert(cache_ptr->dirty_index_size == dirty_index_size);
1090     HDassert(cache_ptr->slist_len == slist_len);
1091     HDassert(cache_ptr->slist_size == slist_size);
1092 #endif /* H5C_DO_SANITY_CHECKS */
1093 
1094 #if H5C_DO_EXTREME_SANITY_CHECKS
1095     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
1096             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
1097             (H5C_validate_lru_list(cache_ptr) < 0))
1098         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
1099 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1100 
1101     ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
1102     destroy = ( (flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 );
1103     HDassert( ! ( destroy && ignore_protected ) );
1104     HDassert( ! ( cache_ptr->flush_in_progress ) );
1105 
1106     cache_ptr->flush_in_progress = TRUE;
1107 
1108     if(destroy) {
1109         if(H5C__flush_invalidate_cache(f, flags) < 0)
1110             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed")
1111     } /* end if */
1112     else {
1113 	/* flush each ring, starting from the outermost ring and
1114          * working inward.
1115          */
1116         ring = H5C_RING_USER;
1117 	while(ring < H5C_RING_NTYPES) {
1118 
1119             /* Only call the free space manager settle routines when close
1120              * warning has been received.
1121              */
1122 	    if(cache_ptr->close_warning_received) {
1123 		switch(ring) {
1124 		    case H5C_RING_USER:
1125 			break;
1126 
1127 		    case H5C_RING_RDFSM:
1128                         /* Settle raw data FSM */
1129 			if(!cache_ptr->rdfsm_settled)
1130 			    if(H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0)
1131                                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
1132 			break;
1133 
1134 		    case H5C_RING_MDFSM:
1135                         /* Settle metadata FSM */
1136 			if(!cache_ptr->mdfsm_settled)
1137 			    if(H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0)
1138                                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
1139 			break;
1140 
1141 		    case H5C_RING_SBE:
1142 		    case H5C_RING_SB:
1143 			break;
1144 
1145 		    default:
1146                         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
1147 			break;
1148 		} /* end switch */
1149             } /* end if */
1150 
1151 	    if(H5C__flush_ring(f, ring, flags) < 0)
1152                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush ring failed")
1153             ring++;
1154         } /* end while */
1155     } /* end else */
1156 
1157 done:
1158     cache_ptr->flush_in_progress = FALSE;
1159 
1160     FUNC_LEAVE_NOAPI(ret_value)
1161 } /* H5C_flush_cache() */
1162 
1163 
1164 /*-------------------------------------------------------------------------
1165  * Function:    H5C_flush_to_min_clean
1166  *
1167  * Purpose:	Flush dirty entries until the caches min clean size is
1168  *		attained.
1169  *
1170  *		This function is used in the implementation of the
1171  *		metadata cache in PHDF5.  To avoid "messages from the
1172  *		future", the cache on process 0 can't be allowed to
1173  *		flush entries until the other processes have reached
1174  *		the same point in the calculation.  If this constraint
1175  *		is not met, it is possible that the other processes will
1176  *		read metadata generated at a future point in the
1177  *		computation.
1178  *
1179  *
1180  * Return:      Non-negative on success/Negative on failure or if
1181  *		write is not permitted.
1182  *
1183  * Programmer:  John Mainzer
1184  *		9/16/05
1185  *
1186  *-------------------------------------------------------------------------
1187  */
1188 herr_t
H5C_flush_to_min_clean(H5F_t * f)1189 H5C_flush_to_min_clean(H5F_t * f)
1190 {
1191     H5C_t *             cache_ptr;
1192     hbool_t		write_permitted;
1193     herr_t		ret_value = SUCCEED;
1194 
1195     FUNC_ENTER_NOAPI(FAIL)
1196 
1197     HDassert( f );
1198     HDassert( f->shared );
1199 
1200     cache_ptr = f->shared->cache;
1201 
1202     HDassert( cache_ptr );
1203     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
1204 
1205     if(cache_ptr->check_write_permitted != NULL) {
1206         if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
1207             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't get write_permitted")
1208     } /* end if */
1209     else
1210         write_permitted = cache_ptr->write_permitted;
1211 
1212     if(!write_permitted)
1213         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cache write is not permitted!?!")
1214 
1215     if(H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0)
1216         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__make_space_in_cache failed")
1217 
1218 done:
1219     FUNC_LEAVE_NOAPI(ret_value)
1220 } /* H5C_flush_to_min_clean() */
1221 
1222 
1223 /*-------------------------------------------------------------------------
1224  * Function:    H5C_insert_entry
1225  *
1226  * Purpose:     Adds the specified thing to the cache.  The thing need not
1227  *              exist on disk yet, but it must have an address and disk
1228  *              space reserved.
1229  *
1230  *		Observe that this function cannot occasion a read.
1231  *
1232  * Return:      Non-negative on success/Negative on failure
1233  *
1234  * Programmer:  John Mainzer
1235  *		6/2/04
1236  *
1237  *-------------------------------------------------------------------------
1238  */
1239 herr_t
H5C_insert_entry(H5F_t * f,const H5C_class_t * type,haddr_t addr,void * thing,unsigned int flags)1240 H5C_insert_entry(H5F_t *             f,
1241                  const H5C_class_t * type,
1242                  haddr_t 	     addr,
1243                  void *		     thing,
1244                  unsigned int        flags)
1245 {
1246     H5C_t               *cache_ptr;
1247     H5AC_ring_t         ring = H5C_RING_UNDEFINED;
1248     hbool_t		insert_pinned;
1249     hbool_t             flush_last;
1250 #ifdef H5_HAVE_PARALLEL
1251     hbool_t             coll_access = FALSE; /* whether access to the cache entry is done collectively */
1252 #endif /* H5_HAVE_PARALLEL */
1253     hbool_t             set_flush_marker;
1254     hbool_t		write_permitted = TRUE;
1255     size_t		empty_space;
1256     H5C_cache_entry_t  *entry_ptr = NULL;
1257     H5C_cache_entry_t  *test_entry_ptr;
1258     hbool_t		entry_tagged = FALSE;
1259     herr_t		ret_value = SUCCEED;    /* Return value */
1260 
1261     FUNC_ENTER_NOAPI(FAIL)
1262 
1263     HDassert( f );
1264     HDassert( f->shared );
1265 
1266     cache_ptr = f->shared->cache;
1267 
1268     HDassert( cache_ptr );
1269     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
1270     HDassert( type );
1271     HDassert( type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type );
1272     HDassert( type->image_len );
1273     HDassert( H5F_addr_defined(addr) );
1274     HDassert( thing );
1275 
1276 #if H5C_DO_EXTREME_SANITY_CHECKS
1277     /* no need to verify that entry is not already in the index as */
1278     /* we already make that check below.                           */
1279     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
1280             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
1281             (H5C_validate_lru_list(cache_ptr) < 0))
1282         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
1283 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1284 
1285     set_flush_marker   = ( (flags & H5C__SET_FLUSH_MARKER_FLAG) != 0 );
1286     insert_pinned      = ( (flags & H5C__PIN_ENTRY_FLAG) != 0 );
1287     flush_last         = ( (flags & H5C__FLUSH_LAST_FLAG) != 0 );
1288 
1289     /* Get the ring type from the DXPL */
1290     ring = H5CX_get_ring();
1291 
1292     entry_ptr = (H5C_cache_entry_t *)thing;
1293 
1294     /* verify that the new entry isn't already in the hash table -- scream
1295      * and die if it is.
1296      */
1297 
1298     H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
1299 
1300     if(test_entry_ptr != NULL) {
1301         if(test_entry_ptr == entry_ptr)
1302             HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache")
1303         else
1304             HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache")
1305     } /* end if */
1306 
1307     entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
1308     entry_ptr->cache_ptr = cache_ptr;
1309     entry_ptr->addr  = addr;
1310     entry_ptr->type  = type;
1311 
1312     entry_ptr->image_ptr = NULL;
1313     entry_ptr->image_up_to_date = FALSE;
1314 
1315     entry_ptr->is_protected = FALSE;
1316     entry_ptr->is_read_only = FALSE;
1317     entry_ptr->ro_ref_count = 0;
1318 
1319     entry_ptr->is_pinned = insert_pinned;
1320     entry_ptr->pinned_from_client = insert_pinned;
1321     entry_ptr->pinned_from_cache = FALSE;
1322     entry_ptr->flush_me_last = flush_last;
1323 
1324     /* newly inserted entries are assumed to be dirty */
1325     entry_ptr->is_dirty = TRUE;
1326 
1327     /* not protected, so can't be dirtied */
1328     entry_ptr->dirtied  = FALSE;
1329 
1330     /* Retrieve the size of the thing */
1331     if((type->image_len)(thing, &(entry_ptr->size)) < 0)
1332         HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "can't get size of thing")
1333     HDassert(entry_ptr->size > 0 &&  entry_ptr->size < H5C_MAX_ENTRY_SIZE);
1334 
1335     entry_ptr->in_slist = FALSE;
1336 
1337 #ifdef H5_HAVE_PARALLEL
1338     entry_ptr->clear_on_unprotect = FALSE;
1339     entry_ptr->flush_immediately = FALSE;
1340 #endif /* H5_HAVE_PARALLEL */
1341 
1342     entry_ptr->flush_in_progress = FALSE;
1343     entry_ptr->destroy_in_progress = FALSE;
1344 
1345     entry_ptr->ring = ring;
1346 
1347     /* Initialize flush dependency fields */
1348     entry_ptr->flush_dep_parent             = NULL;
1349     entry_ptr->flush_dep_nparents           = 0;
1350     entry_ptr->flush_dep_parent_nalloc      = 0;
1351     entry_ptr->flush_dep_nchildren          = 0;
1352     entry_ptr->flush_dep_ndirty_children    = 0;
1353     entry_ptr->flush_dep_nunser_children    = 0;
1354 
1355     entry_ptr->ht_next = NULL;
1356     entry_ptr->ht_prev = NULL;
1357     entry_ptr->il_next = NULL;
1358     entry_ptr->il_prev = NULL;
1359 
1360     entry_ptr->next = NULL;
1361     entry_ptr->prev = NULL;
1362 
1363 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
1364     entry_ptr->aux_next = NULL;
1365     entry_ptr->aux_prev = NULL;
1366 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
1367 
1368 #ifdef H5_HAVE_PARALLEL
1369     entry_ptr->coll_next = NULL;
1370     entry_ptr->coll_prev = NULL;
1371 #endif /* H5_HAVE_PARALLEL */
1372 
1373     /* initialize cache image related fields */
1374     entry_ptr->include_in_image 		= FALSE;
1375     entry_ptr->lru_rank         		= 0;
1376     entry_ptr->image_dirty			= FALSE;
1377     entry_ptr->fd_parent_count			= 0;
1378     entry_ptr->fd_parent_addrs			= NULL;
1379     entry_ptr->fd_child_count			= 0;
1380     entry_ptr->fd_dirty_child_count		= 0;
1381     entry_ptr->image_fd_height			= 0;
1382     entry_ptr->prefetched			= FALSE;
1383     entry_ptr->prefetch_type_id			= 0;
1384     entry_ptr->age				= 0;
1385     entry_ptr->prefetched_dirty                 = FALSE;
1386 #ifndef NDEBUG  /* debugging field */
1387     entry_ptr->serialization_count		= 0;
1388 #endif /* NDEBUG */
1389 
1390     entry_ptr->tl_next  = NULL;
1391     entry_ptr->tl_prev  = NULL;
1392     entry_ptr->tag_info = NULL;
1393 
1394     /* Apply tag to newly inserted entry */
1395     if(H5C__tag_entry(cache_ptr, entry_ptr) < 0)
1396         HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
1397     entry_tagged = TRUE;
1398 
1399     H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
1400 
1401     if(cache_ptr->flash_size_increase_possible &&
1402             (entry_ptr->size > cache_ptr->flash_size_increase_threshold))
1403         if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
1404             HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed")
1405 
1406     if(cache_ptr->index_size >= cache_ptr->max_cache_size)
1407         empty_space = 0;
1408     else
1409         empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
1410 
1411     if(cache_ptr->evictions_enabled &&
1412          (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size)
1413 	   ||
1414 	   (((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)))) {
1415         size_t space_needed;
1416 
1417 	if(empty_space <= entry_ptr->size)
1418             cache_ptr->cache_full = TRUE;
1419 
1420         if(cache_ptr->check_write_permitted != NULL) {
1421             if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
1422                 HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted")
1423         } /* end if */
1424         else
1425             write_permitted = cache_ptr->write_permitted;
1426 
1427         HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
1428         space_needed = entry_ptr->size;
1429         if(space_needed > cache_ptr->max_cache_size)
1430             space_needed = cache_ptr->max_cache_size;
1431 
1432         /* Note that space_needed is just the amount of space that
1433          * needed to insert the new entry without exceeding the cache
1434          * size limit.  The subsequent call to H5C__make_space_in_cache()
1435          * may evict the entries required to free more or less space
1436          * depending on conditions.  It MAY be less if the cache is
1437          * currently undersized, or more if the cache is oversized.
1438          *
1439          * The cache can exceed its maximum size limit via the following
1440          * mechanisms:
1441          *
1442          * First, it is possible for the cache to grow without
1443          * bound as long as entries are protected and not unprotected.
1444          *
1445          * Second, when writes are not permitted it is also possible
1446          * for the cache to grow without bound.
1447          *
1448          * Finally, we usually don't check to see if the cache is
1449          * oversized at the end of an unprotect.  As a result, it is
1450          * possible to have a vastly oversized cache with no protected
1451          * entries as long as all the protects preceed the unprotects.
1452          *
1453          * Since items 1 and 2 are not changing any time soon, I see
1454          * no point in worrying about the third.
1455          */
1456 
1457         if(H5C__make_space_in_cache(f, space_needed, write_permitted) < 0)
1458             HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed")
1459     } /* end if */
1460 
1461     H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
1462 
1463     /* New entries are presumed to be dirty */
1464     HDassert(entry_ptr->is_dirty);
1465     entry_ptr->flush_marker = set_flush_marker;
1466     H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
1467     H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL)
1468 
1469 #if H5C_DO_EXTREME_SANITY_CHECKS
1470     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
1471             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
1472             (H5C_validate_lru_list(cache_ptr) < 0))
1473         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done")
1474 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1475 
1476     /* If the entry's type has a 'notify' callback send a 'after insertion'
1477      * notice now that the entry is fully integrated into the cache.
1478      */
1479     if(entry_ptr->type->notify &&
1480             (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0)
1481         HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry inserted into cache")
1482 
1483     H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
1484 
1485 #ifdef H5_HAVE_PARALLEL
1486     if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI))
1487         coll_access = H5CX_get_coll_metadata_read();
1488 
1489     entry_ptr->coll_access = coll_access;
1490     if(coll_access) {
1491         H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, FAIL)
1492 
1493         /* Make sure the size of the collective entries in the cache remain in check */
1494         if(cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
1495             if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
1496                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
1497     } /* end if */
1498 #endif
1499 
1500 done:
1501 #if H5C_DO_EXTREME_SANITY_CHECKS
1502     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
1503             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
1504             (H5C_validate_lru_list(cache_ptr) < 0))
1505         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
1506 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1507 
1508     if(ret_value < 0 && entry_tagged)
1509         if(H5C__untag_entry(cache_ptr, entry_ptr) < 0)
1510             HDONE_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
1511 
1512     FUNC_LEAVE_NOAPI(ret_value)
1513 } /* H5C_insert_entry() */
1514 
1515 
1516 /*-------------------------------------------------------------------------
1517  * Function:    H5C_mark_entry_dirty
1518  *
1519  * Purpose:	Mark a pinned or protected entry as dirty.  The target entry
1520  * 		MUST be either pinned or protected, and MAY be both.
1521  *
1522  * 		In the protected case, this call is the functional
1523  * 		equivalent of setting the H5C__DIRTIED_FLAG on an unprotect
1524  * 		call.
1525  *
1526  * 		In the pinned but not protected case, if the entry is not
1527  * 		already dirty, the function places function marks the entry
1528  * 		dirty and places it on the skip list.
1529  *
1530  * Return:      Non-negative on success/Negative on failure
1531  *
1532  * Programmer:  John Mainzer
1533  *              5/15/06
1534  *
1535  * 		JRM -- 11/5/08
1536  * 		Added call to H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY() to
1537  * 		update the new clean_index_size and dirty_index_size
1538  * 		fields of H5C_t in the case that the entry was clean
1539  * 		prior to this call, and is pinned and not protected.
1540  *
1541  *-------------------------------------------------------------------------
1542  */
1543 herr_t
H5C_mark_entry_dirty(void * thing)1544 H5C_mark_entry_dirty(void *thing)
1545 {
1546     H5C_t *             cache_ptr;
1547     H5C_cache_entry_t * entry_ptr = (H5C_cache_entry_t *)thing;
1548     herr_t              ret_value = SUCCEED;    /* Return value */
1549 
1550     FUNC_ENTER_NOAPI(FAIL)
1551 
1552     /* Sanity checks */
1553     HDassert(entry_ptr);
1554     HDassert(H5F_addr_defined(entry_ptr->addr));
1555     cache_ptr = entry_ptr->cache_ptr;
1556     HDassert(cache_ptr);
1557     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1558 
1559     if ( entry_ptr->is_protected ) {
1560 	HDassert( ! ((entry_ptr)->is_read_only) );
1561 
1562         /* set the dirtied flag */
1563         entry_ptr->dirtied = TRUE;
1564 
1565         /* reset image_up_to_date */
1566         if(entry_ptr->image_up_to_date) {
1567             entry_ptr->image_up_to_date = FALSE;
1568 
1569             if(entry_ptr->flush_dep_nparents > 0)
1570                 if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
1571                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
1572         }/* end if */
1573     } /* end if */
1574     else if ( entry_ptr->is_pinned ) {
1575         hbool_t		was_clean;      /* Whether the entry was previously clean */
1576         hbool_t		image_was_up_to_date;
1577 
1578         /* Remember previous dirty status */
1579 	was_clean = !entry_ptr->is_dirty;
1580 
1581         /* Check if image is up to date */
1582         image_was_up_to_date = entry_ptr->image_up_to_date;
1583 
1584         /* Mark the entry as dirty if it isn't already */
1585         entry_ptr->is_dirty = TRUE;
1586 	entry_ptr->image_up_to_date = FALSE;
1587 
1588         /* Modify cache data structures */
1589         if(was_clean)
1590             H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
1591         if(!entry_ptr->in_slist)
1592             H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
1593 
1594         /* Update stats for entry being marked dirty */
1595         H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
1596 
1597         /* Check for entry changing status and do notifications, etc. */
1598         if(was_clean) {
1599             /* If the entry's type has a 'notify' callback send a 'entry dirtied'
1600              * notice now that the entry is fully integrated into the cache.
1601              */
1602             if(entry_ptr->type->notify &&
1603                     (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
1604                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
1605 
1606             /* Propagate the dirty flag up the flush dependency chain if appropriate */
1607             if(entry_ptr->flush_dep_nparents > 0)
1608                 if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
1609                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
1610         } /* end if */
1611         if(image_was_up_to_date)
1612             if(entry_ptr->flush_dep_nparents > 0)
1613                 if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
1614                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
1615     } /* end if */
1616     else
1617         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is neither pinned nor protected??")
1618 
1619 done:
1620     FUNC_LEAVE_NOAPI(ret_value)
1621 } /* H5C_mark_entry_dirty() */
1622 
1623 
1624 /*-------------------------------------------------------------------------
1625  * Function:    H5C_mark_entry_clean
1626  *
1627  * Purpose:	Mark a pinned entry as clean.  The target entry MUST be pinned.
1628  *
1629  * 		If the entry is not
1630  * 		already clean, the function places function marks the entry
1631  * 		clean and removes it from the skip list.
1632  *
1633  * Return:      Non-negative on success/Negative on failure
1634  *
1635  * Programmer:  Quincey Koziol
1636  *              7/23/16
1637  *
1638  *-------------------------------------------------------------------------
1639  */
1640 herr_t
H5C_mark_entry_clean(void * _thing)1641 H5C_mark_entry_clean(void *_thing)
1642 {
1643     H5C_t *             cache_ptr;
1644     H5C_cache_entry_t * entry_ptr = (H5C_cache_entry_t *)_thing;
1645     herr_t              ret_value = SUCCEED;    /* Return value */
1646 
1647     FUNC_ENTER_NOAPI(FAIL)
1648 
1649     /* Sanity checks */
1650     HDassert(entry_ptr);
1651     HDassert(H5F_addr_defined(entry_ptr->addr));
1652     cache_ptr = entry_ptr->cache_ptr;
1653     HDassert(cache_ptr);
1654     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1655 
1656     /* Operate on pinned entry */
1657     if(entry_ptr->is_protected)
1658         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "entry is protected")
1659     else if(entry_ptr->is_pinned) {
1660         hbool_t		was_dirty;      /* Whether the entry was previously dirty */
1661 
1662         /* Remember previous dirty status */
1663         was_dirty = entry_ptr->is_dirty;
1664 
1665         /* Mark the entry as clean if it isn't already */
1666         entry_ptr->is_dirty = FALSE;
1667 
1668         /* Also reset the 'flush_marker' flag, since the entry shouldn't be flushed now */
1669         entry_ptr->flush_marker = FALSE;
1670 
1671         /* Modify cache data structures */
1672         if(was_dirty)
1673             H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr)
1674         if(entry_ptr->in_slist)
1675             H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE)
1676 
1677         /* Update stats for entry being marked clean */
1678         H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
1679 
1680         /* Check for entry changing status and do notifications, etc. */
1681         if(was_dirty) {
1682             /* If the entry's type has a 'notify' callback send a 'entry cleaned'
1683              * notice now that the entry is fully integrated into the cache.
1684              */
1685             if(entry_ptr->type->notify &&
1686                     (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
1687                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared")
1688 
1689             /* Propagate the clean up the flush dependency chain, if appropriate */
1690             if(entry_ptr->flush_dep_nparents > 0)
1691                 if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
1692                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean")
1693         } /* end if */
1694     } /* end if */
1695     else
1696         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Entry is not pinned??")
1697 
1698 done:
1699     FUNC_LEAVE_NOAPI(ret_value)
1700 } /* H5C_mark_entry_clean() */
1701 
1702 
1703 /*-------------------------------------------------------------------------
1704  * Function:    H5C_mark_entry_unserialized
1705  *
1706  * Purpose:	Mark a pinned or protected entry as unserialized.  The target
1707  *		entry MUST be either pinned or protected, and MAY be both.
1708  *
1709  * Return:      Non-negative on success/Negative on failure
1710  *
1711  * Programmer:  Quincey Koziol
1712  *              12/23/16
1713  *
1714  *-------------------------------------------------------------------------
1715  */
1716 herr_t
H5C_mark_entry_unserialized(void * thing)1717 H5C_mark_entry_unserialized(void *thing)
1718 {
1719     H5C_cache_entry_t  *entry = (H5C_cache_entry_t *)thing;
1720     herr_t              ret_value = SUCCEED;    /* Return value */
1721 
1722     FUNC_ENTER_NOAPI(FAIL)
1723 
1724     /* Sanity checks */
1725     HDassert(entry);
1726     HDassert(H5F_addr_defined(entry->addr));
1727 
1728     if(entry->is_protected || entry->is_pinned) {
1729         HDassert(!entry->is_read_only);
1730 
1731         /* Reset image_up_to_date */
1732         if(entry->image_up_to_date) {
1733 	    entry->image_up_to_date = FALSE;
1734 
1735             if(entry->flush_dep_nparents > 0)
1736                 if(H5C__mark_flush_dep_unserialized(entry) < 0)
1737                     HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "Can't propagate serialization status to fd parents")
1738         }/* end if */
1739     } /* end if */
1740     else
1741         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKUNSERIALIZED, FAIL, "Entry to unserialize is neither pinned nor protected??")
1742 
1743 done:
1744     FUNC_LEAVE_NOAPI(ret_value)
1745 } /* H5C_mark_entry_unserialized() */
1746 
1747 
1748 /*-------------------------------------------------------------------------
1749  * Function:    H5C_mark_entry_serialized
1750  *
1751  * Purpose:	Mark a pinned entry as serialized.  The target entry MUST be
1752  *		pinned.
1753  *
1754  * Return:      Non-negative on success/Negative on failure
1755  *
1756  * Programmer:  Quincey Koziol
1757  *              12/23/16
1758  *
1759  *-------------------------------------------------------------------------
1760  */
1761 herr_t
H5C_mark_entry_serialized(void * _thing)1762 H5C_mark_entry_serialized(void *_thing)
1763 {
1764     H5C_cache_entry_t  *entry = (H5C_cache_entry_t *)_thing;
1765     herr_t              ret_value = SUCCEED;    /* Return value */
1766 
1767     FUNC_ENTER_NOAPI(FAIL)
1768 
1769     /* Sanity checks */
1770     HDassert(entry);
1771     HDassert(H5F_addr_defined(entry->addr));
1772 
1773     /* Operate on pinned entry */
1774     if(entry->is_protected)
1775         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "entry is protected")
1776     else if(entry->is_pinned) {
1777         /* Check for entry changing status and do notifications, etc. */
1778         if(!entry->image_up_to_date) {
1779 	    /* Set the image_up_to_date flag */
1780             entry->image_up_to_date = TRUE;
1781 
1782             /* Propagate the serialize up the flush dependency chain, if appropriate */
1783             if(entry->flush_dep_nparents > 0)
1784                 if(H5C__mark_flush_dep_serialized(entry) < 0)
1785                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Can't propagate flush dep serialize")
1786         } /* end if */
1787     } /* end if */
1788     else
1789         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Entry is not pinned??")
1790 
1791 done:
1792     FUNC_LEAVE_NOAPI(ret_value)
1793 } /* H5C_mark_entry_serialized() */
1794 
1795 
1796 /*-------------------------------------------------------------------------
1797  *
1798  * Function:    H5C_move_entry
1799  *
1800  * Purpose:     Use this function to notify the cache that an entry's
1801  *              file address changed.
1802  *
1803  * Return:      Non-negative on success/Negative on failure
1804  *
1805  * Programmer:  John Mainzer
1806  *              6/2/04
1807  *
1808  *-------------------------------------------------------------------------
1809  */
1810 herr_t
H5C_move_entry(H5C_t * cache_ptr,const H5C_class_t * type,haddr_t old_addr,haddr_t new_addr)1811 H5C_move_entry(H5C_t *	     cache_ptr,
1812                  const H5C_class_t * type,
1813                  haddr_t 	     old_addr,
1814 	         haddr_t 	     new_addr)
1815 {
1816     H5C_cache_entry_t *	entry_ptr = NULL;
1817     H5C_cache_entry_t *	test_entry_ptr = NULL;
1818     herr_t			ret_value = SUCCEED;      /* Return value */
1819 
1820     FUNC_ENTER_NOAPI(FAIL)
1821 
1822     HDassert(cache_ptr);
1823     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1824     HDassert(type);
1825     HDassert(H5F_addr_defined(old_addr));
1826     HDassert(H5F_addr_defined(new_addr));
1827     HDassert(H5F_addr_ne(old_addr, new_addr));
1828 
1829 #if H5C_DO_EXTREME_SANITY_CHECKS
1830     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
1831              (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
1832              (H5C_validate_lru_list(cache_ptr) < 0))
1833         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
1834 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1835 
1836     H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL)
1837 
1838     if(entry_ptr == NULL || entry_ptr->type != type)
1839         /* the old item doesn't exist in the cache, so we are done. */
1840         HGOTO_DONE(SUCCEED)
1841 
1842     HDassert(entry_ptr->addr == old_addr);
1843     HDassert(entry_ptr->type == type);
1844 
1845     /* Check for R/W status, otherwise error */
1846     /* (Moving a R/O entry would mark it dirty, which shouldn't
1847      *  happen. QAK - 2016/12/02)
1848      */
1849     if(entry_ptr->is_read_only)
1850         HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "can't move R/O entry")
1851 
1852     H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL)
1853 
1854     if(test_entry_ptr != NULL) { /* we are hosed */
1855         if(test_entry_ptr->type == type)
1856             HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "target already moved & reinserted???")
1857         else
1858             HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "new address already in use?")
1859     } /* end if */
1860 
1861     /* If we get this far we have work to do.  Remove *entry_ptr from
1862      * the hash table (and skip list if necessary), change its address to the
1863      * new address, mark it as dirty (if it isn't already) and then re-insert.
1864      *
1865      * Update the replacement policy for a hit to avoid an eviction before
1866      * the moved entry is touched.  Update stats for a move.
1867      *
1868      * Note that we do not check the size of the cache, or evict anything.
1869      * Since this is a simple re-name, cache size should be unaffected.
1870      *
1871      * Check to see if the target entry is in the process of being destroyed
1872      * before we delete from the index, etc.  If it is, all we do is
1873      * change the addr.  If the entry is only in the process of being flushed,
1874      * don't mark it as dirty either, lest we confuse the flush call back.
1875      */
1876     if(!entry_ptr->destroy_in_progress) {
1877         H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
1878 
1879         if(entry_ptr->in_slist) {
1880             HDassert(cache_ptr->slist_ptr);
1881             H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE)
1882         } /* end if */
1883     } /* end if */
1884 
1885     entry_ptr->addr = new_addr;
1886 
1887     if(!entry_ptr->destroy_in_progress) {
1888         hbool_t		was_dirty;      /* Whether the entry was previously dirty */
1889 
1890         /* Remember previous dirty status */
1891         was_dirty = entry_ptr->is_dirty;
1892 
1893         /* Mark the entry as dirty if it isn't already */
1894 	entry_ptr->is_dirty = TRUE;
1895 
1896 	/* This shouldn't be needed, but it keeps the test code happy */
1897         if(entry_ptr->image_up_to_date) {
1898             entry_ptr->image_up_to_date = FALSE;
1899             if(entry_ptr->flush_dep_nparents > 0)
1900                 if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
1901                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
1902         } /* end if */
1903 
1904         /* Modify cache data structures */
1905         H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
1906         H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
1907 
1908         /* Skip some actions if we're in the middle of flushing the entry */
1909 	if(!entry_ptr->flush_in_progress) {
1910             /* Update the replacement policy for the entry */
1911             H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL)
1912 
1913             /* Check for entry changing status and do notifications, etc. */
1914             if(!was_dirty) {
1915                 /* If the entry's type has a 'notify' callback send a 'entry dirtied'
1916                  * notice now that the entry is fully integrated into the cache.
1917                  */
1918                 if(entry_ptr->type->notify &&
1919                         (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
1920                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
1921 
1922                 /* Propagate the dirty flag up the flush dependency chain if appropriate */
1923                 if(entry_ptr->flush_dep_nparents > 0)
1924                     if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
1925                         HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
1926             } /* end if */
1927         } /* end if */
1928     } /* end if */
1929 
1930     H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
1931 
1932 done:
1933 #if H5C_DO_EXTREME_SANITY_CHECKS
1934     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
1935              (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
1936              (H5C_validate_lru_list(cache_ptr) < 0))
1937         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
1938 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1939 
1940     FUNC_LEAVE_NOAPI(ret_value)
1941 } /* H5C_move_entry() */
1942 
1943 
1944 /*-------------------------------------------------------------------------
1945  * Function:    H5C_resize_entry
1946  *
1947  * Purpose:	Resize a pinned or protected entry.
1948  *
1949  * 		Resizing an entry dirties it, so if the entry is not
1950  * 		already dirty, the function places the entry on the
1951  * 		skip list.
1952  *
1953  * Return:      Non-negative on success/Negative on failure
1954  *
1955  * Programmer:  John Mainzer
1956  *              7/5/06
1957  *
1958  *-------------------------------------------------------------------------
1959  */
1960 herr_t
H5C_resize_entry(void * thing,size_t new_size)1961 H5C_resize_entry(void *thing, size_t new_size)
1962 {
1963     H5C_t             * cache_ptr;
1964     H5C_cache_entry_t * entry_ptr = (H5C_cache_entry_t *)thing;
1965     herr_t              ret_value = SUCCEED;    /* Return value */
1966 
1967     FUNC_ENTER_NOAPI(FAIL)
1968 
1969     /* Sanity checks */
1970     HDassert(entry_ptr);
1971     HDassert(H5F_addr_defined(entry_ptr->addr));
1972     cache_ptr = entry_ptr->cache_ptr;
1973     HDassert(cache_ptr);
1974     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
1975 
1976     /* Check for usage errors */
1977     if(new_size <= 0)
1978         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive")
1979     if(!(entry_ptr->is_pinned || entry_ptr->is_protected))
1980         HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??")
1981 
1982 #if H5C_DO_EXTREME_SANITY_CHECKS
1983     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
1984             (H5C_validate_pinned_entry_list(cache_ptr) < 0))
1985         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
1986 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
1987 
1988     /* update for change in entry size if necessary */
1989     if ( entry_ptr->size != new_size ) {
1990         hbool_t		was_clean;
1991 
1992         /* make note of whether the entry was clean to begin with */
1993         was_clean = !entry_ptr->is_dirty;
1994 
1995         /* mark the entry as dirty if it isn't already */
1996         entry_ptr->is_dirty = TRUE;
1997 
1998         /* Reset the image up-to-date status */
1999         if(entry_ptr->image_up_to_date) {
2000             entry_ptr->image_up_to_date = FALSE;
2001             if(entry_ptr->flush_dep_nparents > 0)
2002                 if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
2003                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
2004         } /* end if */
2005 
2006         /* Release the current image */
2007         if(entry_ptr->image_ptr)
2008             entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
2009 
2010         /* do a flash cache size increase if appropriate */
2011         if ( cache_ptr->flash_size_increase_possible ) {
2012 
2013             if ( new_size > entry_ptr->size ) {
2014                 size_t             	size_increase;
2015 
2016                 size_increase = new_size - entry_ptr->size;
2017 
2018                 if(size_increase >= cache_ptr->flash_size_increase_threshold) {
2019                     if(H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0)
2020                         HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "flash cache increase failed")
2021                 }
2022             }
2023         }
2024 
2025         /* update the pinned and/or protected entry list */
2026         if(entry_ptr->is_pinned) {
2027             H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pel_len), \
2028                                             (cache_ptr->pel_size), \
2029                                             (entry_ptr->size), (new_size))
2030         } /* end if */
2031         if(entry_ptr->is_protected) {
2032             H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pl_len), \
2033                                             (cache_ptr->pl_size), \
2034                                             (entry_ptr->size), (new_size))
2035         } /* end if */
2036 
2037 #ifdef H5_HAVE_PARALLEL
2038         if(entry_ptr->coll_access) {
2039             H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->coll_list_len), \
2040                                             (cache_ptr->coll_list_size), \
2041                                             (entry_ptr->size), (new_size))
2042         } /* end if */
2043 #endif /* H5_HAVE_PARALLEL */
2044 
2045         /* update statistics just before changing the entry size */
2046 	H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size);
2047 
2048         /* update the hash table */
2049 	H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
2050                                           new_size, entry_ptr, was_clean);
2051 
2052         /* if the entry is in the skip list, update that too */
2053         if(entry_ptr->in_slist)
2054 	    H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size);
2055 
2056 	/* finally, update the entry size proper */
2057 	entry_ptr->size = new_size;
2058 
2059         if(!entry_ptr->in_slist)
2060             H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
2061 
2062         if(entry_ptr->is_pinned)
2063             H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
2064 
2065         /* Check for entry changing status and do notifications, etc. */
2066         if(was_clean) {
2067             /* If the entry's type has a 'notify' callback send a 'entry dirtied'
2068              * notice now that the entry is fully integrated into the cache.
2069              */
2070             if(entry_ptr->type->notify &&
2071                     (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
2072                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
2073 
2074             /* Propagate the dirty flag up the flush dependency chain if appropriate */
2075             if(entry_ptr->flush_dep_nparents > 0)
2076                 if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
2077                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
2078         } /* end if */
2079     } /* end if */
2080 
2081 done:
2082 #if H5C_DO_EXTREME_SANITY_CHECKS
2083     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
2084             (H5C_validate_pinned_entry_list(cache_ptr) < 0))
2085         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
2086 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2087 
2088     FUNC_LEAVE_NOAPI(ret_value)
2089 } /* H5C_resize_entry() */
2090 
2091 
2092 /*-------------------------------------------------------------------------
2093  * Function:    H5C_pin_protected_entry()
2094  *
2095  * Purpose:	Pin a protected cache entry.  The entry must be protected
2096  * 		at the time of call, and must be unpinned.
2097  *
2098  * Return:      Non-negative on success/Negative on failure
2099  *
2100  * Programmer:  John Mainzer
2101  *              4/26/06
2102  *
2103  * Changes:	Added extreme sanity checks on entry and exit.
2104  *                                          JRM -- 4/26/14
2105  *
2106  *-------------------------------------------------------------------------
2107  */
2108 herr_t
H5C_pin_protected_entry(void * thing)2109 H5C_pin_protected_entry(void *thing)
2110 {
2111     H5C_t             * cache_ptr;
2112     H5C_cache_entry_t * entry_ptr = (H5C_cache_entry_t *)thing; /* Pointer to entry to pin */
2113     herr_t              ret_value = SUCCEED;    /* Return value */
2114 
2115     FUNC_ENTER_NOAPI(FAIL)
2116 
2117     /* Sanity checks */
2118     HDassert(entry_ptr);
2119     HDassert(H5F_addr_defined(entry_ptr->addr));
2120     cache_ptr = entry_ptr->cache_ptr;
2121     HDassert(cache_ptr);
2122     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
2123 
2124 #if H5C_DO_EXTREME_SANITY_CHECKS
2125     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
2126             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
2127             (H5C_validate_lru_list(cache_ptr) < 0))
2128         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
2129 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2130 
2131 
2132     /* Only protected entries can be pinned */
2133     if(!entry_ptr->is_protected)
2134         HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry isn't protected")
2135 
2136     /* Pin the entry from a client */
2137     if(H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
2138         HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
2139 
2140 done:
2141 #if H5C_DO_EXTREME_SANITY_CHECKS
2142     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
2143             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
2144             (H5C_validate_lru_list(cache_ptr) < 0))
2145         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
2146 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2147 
2148     FUNC_LEAVE_NOAPI(ret_value)
2149 } /* H5C_pin_protected_entry() */
2150 
2151 
2152 /*-------------------------------------------------------------------------
2153  * Function:    H5C_protect
2154  *
2155  * Purpose:     If the target entry is not in the cache, load it.  If
2156  *		necessary, attempt to evict one or more entries to keep
2157  *		the cache within its maximum size.
2158  *
2159  *		Mark the target entry as protected, and return its address
2160  *		to the caller.  The caller must call H5C_unprotect() when
2161  *		finished with the entry.
2162  *
2163  *		While it is protected, the entry may not be either evicted
2164  *		or flushed -- nor may it be accessed by another call to
2165  *		H5C_protect.  Any attempt to do so will result in a failure.
2166  *
2167  * Return:      Success:        Ptr to the desired entry
2168  *              Failure:        NULL
2169  *
2170  * Programmer:  John Mainzer -  6/2/04
2171  *
2172  *-------------------------------------------------------------------------
2173  */
2174 void *
H5C_protect(H5F_t * f,const H5C_class_t * type,haddr_t addr,void * udata,unsigned flags)2175 H5C_protect(H5F_t *		f,
2176             const H5C_class_t * type,
2177             haddr_t 	        addr,
2178             void *              udata,
2179 	    unsigned		flags)
2180 {
2181     H5C_t *		cache_ptr;
2182     H5AC_ring_t         ring = H5C_RING_UNDEFINED;
2183     hbool_t		hit;
2184     hbool_t		have_write_permitted = FALSE;
2185     hbool_t		read_only = FALSE;
2186     hbool_t             flush_last;
2187 #ifdef H5_HAVE_PARALLEL
2188     hbool_t             coll_access = FALSE; /* whether access to the cache entry is done collectively */
2189 #endif /* H5_HAVE_PARALLEL */
2190     hbool_t		write_permitted;
2191     hbool_t             was_loaded = FALSE;     /* Whether the entry was loaded as a result of the protect */
2192     size_t		empty_space;
2193     void *		thing;
2194     H5C_cache_entry_t *	entry_ptr;
2195     void *		ret_value = NULL;       /* Return value */
2196 
2197     FUNC_ENTER_NOAPI(NULL)
2198 
2199     /* check args */
2200     HDassert( f );
2201     HDassert( f->shared );
2202 
2203     cache_ptr = f->shared->cache;
2204 
2205     HDassert( cache_ptr );
2206     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
2207     HDassert( type );
2208     HDassert( type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type );
2209     HDassert( H5F_addr_defined(addr) );
2210 
2211 #if H5C_DO_EXTREME_SANITY_CHECKS
2212     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
2213             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
2214             (H5C_validate_lru_list(cache_ptr) < 0))
2215         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry")
2216 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2217 
2218     /* Load the cache image, if requested */
2219     if(cache_ptr->load_image) {
2220         cache_ptr->load_image = FALSE;
2221         if(H5C__load_cache_image(f) < 0)
2222             HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't load cache image")
2223     } /* end if */
2224 
2225     read_only          = ( (flags & H5C__READ_ONLY_FLAG) != 0 );
2226     flush_last         = ( (flags & H5C__FLUSH_LAST_FLAG) != 0 );
2227 
2228     /* Get the ring type from the API context */
2229     ring = H5CX_get_ring();
2230 
2231 #ifdef H5_HAVE_PARALLEL
2232     if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI))
2233         coll_access = H5CX_get_coll_metadata_read();
2234 #endif /* H5_HAVE_PARALLEL */
2235 
2236     /* first check to see if the target is in cache */
2237     H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
2238 
2239     if(entry_ptr != NULL) {
2240         if(entry_ptr->ring != ring)
2241             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry")
2242 
2243         HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
2244 
2245         if(entry_ptr->prefetched) {
2246             /* This call removes the prefetched entry from the cache,
2247              * and replaces it with an entry deserialized from the
2248              * image of the prefetched entry.
2249              */
2250             if(H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0)
2251                 HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry")
2252 
2253             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
2254             HDassert(!entry_ptr->prefetched);
2255             HDassert(entry_ptr->addr == addr);
2256         } /* end if */
2257 
2258         /* Check for trying to load the wrong type of entry from an address */
2259         if(entry_ptr->type != type)
2260             HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type")
2261 
2262         /* if this is a collective metadata read, the entry is not
2263            marked as collective, and is clean, it is possible that
2264            other processes will not have it in its cache and will
2265            expect a bcast of the entry from process 0. So process 0
2266            will bcast the entry to all other ranks. Ranks that _do_ have
2267            the entry in their cache still have to participate in the
2268            bcast. */
2269 #ifdef H5_HAVE_PARALLEL
2270         if(coll_access) {
2271             if(!(entry_ptr->is_dirty) && !(entry_ptr->coll_access)) {
2272                 MPI_Comm  comm;           /* File MPI Communicator */
2273                 int       mpi_code;       /* MPI error code */
2274                 int       buf_size;
2275 
2276                 if(MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f)))
2277                     HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
2278 
2279                 if(entry_ptr->image_ptr == NULL) {
2280                     int mpi_rank;
2281 
2282                     if((mpi_rank = H5F_mpi_get_rank(f)) < 0)
2283                         HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
2284 
2285                     if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
2286                         HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
2287 #if H5C_DO_MEMORY_SANITY_CHECKS
2288                     HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
2289 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
2290                     if(0 == mpi_rank)
2291                         if(H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
2292                             HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image")
2293                 } /* end if */
2294                 HDassert(entry_ptr->image_ptr);
2295 
2296                 H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t);
2297                 if(MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm)))
2298                     HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
2299 
2300                 /* Mark the entry as collective and insert into the collective list */
2301                 entry_ptr->coll_access = TRUE;
2302                 H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
2303             } /* end if */
2304             else if(entry_ptr->coll_access) {
2305                 H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
2306             } /* end else-if */
2307         } /* end if */
2308 #endif /* H5_HAVE_PARALLEL */
2309 
2310 #if H5C_DO_TAGGING_SANITY_CHECKS
2311 {
2312         /* Verify tag value */
2313         if(cache_ptr->ignore_tags != TRUE) {
2314             haddr_t tag;              /* Tag value */
2315 
2316             /* The entry is already in the cache, but make sure that the tag value
2317                is still legal. This will ensure that had
2318                the entry NOT been in the cache, tagging was still set up correctly
2319                and it would have received a legal tag value after getting loaded
2320                from disk. */
2321 
2322             /* Get the tag */
2323             tag = H5CX_get_tag();
2324 
2325             if(H5C_verify_tag(entry_ptr->type->id, tag) < 0)
2326                 HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed")
2327         } /* end if */
2328 }
2329 #endif
2330 
2331         hit = TRUE;
2332         thing = (void *)entry_ptr;
2333 
2334     } else {
2335 
2336         /* must try to load the entry from disk. */
2337 
2338         hit = FALSE;
2339 
2340         if(NULL == (thing = H5C_load_entry(f,
2341 #ifdef H5_HAVE_PARALLEL
2342                                            coll_access,
2343 #endif /* H5_HAVE_PARALLEL */
2344                                            type, addr, udata)))
2345             HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
2346 
2347         entry_ptr = (H5C_cache_entry_t *)thing;
2348         cache_ptr->entries_loaded_counter++;
2349 
2350         entry_ptr->ring  = ring;
2351 #ifdef H5_HAVE_PARALLEL
2352         if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access)
2353             H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
2354 #endif /* H5_HAVE_PARALLEL */
2355 
2356         /* Apply tag to newly protected entry */
2357         if(H5C__tag_entry(cache_ptr, entry_ptr) < 0)
2358             HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry")
2359 
2360         /* If the entry is very large, and we are configured to allow it,
2361          * we may wish to perform a flash cache size increase.
2362          */
2363         if ( ( cache_ptr->flash_size_increase_possible ) &&
2364              ( entry_ptr->size > cache_ptr->flash_size_increase_threshold ) ) {
2365 
2366             if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
2367                 HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed")
2368         }
2369 
2370         if(cache_ptr->index_size >= cache_ptr->max_cache_size)
2371            empty_space = 0;
2372         else
2373            empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
2374 
2375 	/* try to free up if necceary and if evictions are permitted.  Note
2376 	 * that if evictions are enabled, we will call H5C__make_space_in_cache()
2377 	 * regardless if the min_free_space requirement is not met.
2378 	 */
2379         if ( ( cache_ptr->evictions_enabled ) &&
2380              ( ( (cache_ptr->index_size + entry_ptr->size) >
2381 	         cache_ptr->max_cache_size)
2382 	       ||
2383 	       ( ( empty_space + cache_ptr->clean_index_size ) <
2384 	         cache_ptr->min_clean_size )
2385 	     )
2386            ) {
2387 
2388             size_t space_needed;
2389 
2390 	    if(empty_space <= entry_ptr->size)
2391                 cache_ptr->cache_full = TRUE;
2392 
2393             if(cache_ptr->check_write_permitted != NULL) {
2394                 if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
2395                     HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1")
2396                 else
2397                     have_write_permitted = TRUE;
2398             } /* end if */
2399             else {
2400                 write_permitted = cache_ptr->write_permitted;
2401                 have_write_permitted = TRUE;
2402             } /* end else */
2403 
2404             HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
2405             space_needed = entry_ptr->size;
2406             if(space_needed > cache_ptr->max_cache_size)
2407                 space_needed = cache_ptr->max_cache_size;
2408 
2409             /* Note that space_needed is just the amount of space that
2410              * needed to insert the new entry without exceeding the cache
2411              * size limit.  The subsequent call to H5C__make_space_in_cache()
2412              * may evict the entries required to free more or less space
2413              * depending on conditions.  It MAY be less if the cache is
2414              * currently undersized, or more if the cache is oversized.
2415              *
2416              * The cache can exceed its maximum size limit via the following
2417              * mechanisms:
2418              *
2419              * First, it is possible for the cache to grow without
2420              * bound as long as entries are protected and not unprotected.
2421              *
2422              * Second, when writes are not permitted it is also possible
2423              * for the cache to grow without bound.
2424 	     *
2425 	     * Third, the user may choose to disable evictions -- causing
2426 	     * the cache to grow without bound until evictions are
2427 	     * re-enabled.
2428              *
2429              * Finally, we usually don't check to see if the cache is
2430              * oversized at the end of an unprotect.  As a result, it is
2431              * possible to have a vastly oversized cache with no protected
2432              * entries as long as all the protects preceed the unprotects.
2433              *
2434              * Since items 1, 2, and 3 are not changing any time soon, I
2435              * see no point in worrying about the fourth.
2436              */
2437 
2438             if(H5C__make_space_in_cache(f, space_needed, write_permitted) < 0 )
2439                 HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
2440         } /* end if */
2441 
2442         /* Insert the entry in the hash table.  It can't be dirty yet, so
2443          * we don't even check to see if it should go in the skip list.
2444          *
2445          * This is no longer true -- due to a bug fix, we may modify
2446          * data on load to repair a file.
2447          *
2448          *   *******************************************
2449          *
2450          * Set the flush_last field
2451  	 * of the newly loaded entry before inserting it into the
2452          * index.  Must do this, as the index tracked the number of
2453          * entries with the flush_last field set, but assumes that
2454          * the field will not change after insertion into the index.
2455          *
2456          * Note that this means that the H5C__FLUSH_LAST_FLAG flag
2457          * is ignored if the entry is already in cache.
2458          */
2459         entry_ptr->flush_me_last = flush_last;
2460 
2461         H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL)
2462 
2463         if ( ( entry_ptr->is_dirty ) && ( ! (entry_ptr->in_slist) ) ) {
2464 
2465             H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, NULL)
2466         }
2467 
2468         /* insert the entry in the data structures used by the replacement
2469          * policy.  We are just going to take it out again when we update
2470          * the replacement policy for a protect, but this simplifies the
2471          * code.  If we do this often enough, we may want to optimize this.
2472          */
2473         H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL)
2474 
2475         /* Record that the entry was loaded, to trigger a notify callback later */
2476         /* (After the entry is fully added to the cache) */
2477         was_loaded = TRUE;
2478     } /* end else */
2479 
2480     HDassert(entry_ptr->addr == addr);
2481     HDassert(entry_ptr->type == type);
2482 
2483     if(entry_ptr->is_protected) {
2484 	if(read_only && entry_ptr->is_read_only) {
2485 	    HDassert(entry_ptr->ro_ref_count > 0);
2486 	    (entry_ptr->ro_ref_count)++;
2487 	} /* end if */
2488         else
2489             HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?")
2490     } /* end if */
2491     else {
2492     	H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL)
2493 
2494     	entry_ptr->is_protected = TRUE;
2495 
2496 	if ( read_only ) {
2497 	    entry_ptr->is_read_only = TRUE;
2498 	    entry_ptr->ro_ref_count = 1;
2499 	} /* end if */
2500 
2501     	entry_ptr->dirtied = FALSE;
2502     } /* end else */
2503 
2504     H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit)
2505 
2506     H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
2507 
2508     ret_value = thing;
2509 
2510     if ( ( cache_ptr->evictions_enabled ) &&
2511          ( ( cache_ptr->size_decreased ) ||
2512            ( ( cache_ptr->resize_enabled ) &&
2513              ( cache_ptr->cache_accesses >=
2514                (cache_ptr->resize_ctl).epoch_length ) ) ) ) {
2515 
2516         if ( ! have_write_permitted ) {
2517 
2518             if ( cache_ptr->check_write_permitted != NULL ) {
2519                 if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
2520                     HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted")
2521                 else
2522                     have_write_permitted = TRUE;
2523             } else {
2524 
2525                 write_permitted = cache_ptr->write_permitted;
2526 
2527                 have_write_permitted = TRUE;
2528 
2529             }
2530         }
2531 
2532         if(cache_ptr->resize_enabled &&
2533              (cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length)) {
2534 
2535             if(H5C__auto_adjust_cache_size(f, write_permitted) < 0)
2536                 HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed")
2537         } /* end if */
2538 
2539         if(cache_ptr->size_decreased) {
2540             cache_ptr->size_decreased = FALSE;
2541 
2542             /* check to see if the cache is now oversized due to the cache
2543              * size reduction.  If it is, try to evict enough entries to
2544              * bring the cache size down to the current maximum cache size.
2545 	     *
2546 	     * Also, if the min_clean_size requirement is not met, we
2547 	     * should also call H5C__make_space_in_cache() to bring us
2548 	     * into complience.
2549              */
2550 
2551             if(cache_ptr->index_size >= cache_ptr->max_cache_size)
2552                empty_space = 0;
2553             else
2554                empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
2555 
2556             if ( ( cache_ptr->index_size > cache_ptr->max_cache_size )
2557 	         ||
2558 	         ( ( empty_space + cache_ptr->clean_index_size ) <
2559 	           cache_ptr->min_clean_size) ) {
2560 
2561 		if(cache_ptr->index_size > cache_ptr->max_cache_size)
2562                     cache_ptr->cache_full = TRUE;
2563 
2564                 if(H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0 )
2565                     HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
2566             }
2567         } /* end if */
2568     }
2569 
2570     /* If we loaded the entry and the entry's type has a 'notify' callback, send
2571      * an 'after load' notice now that the entry is fully integrated into
2572      * the cache and protected.  We must wait until it is protected so it is not
2573      * evicted during the notify callback.
2574      */
2575     if(was_loaded) {
2576         /* If the entry's type has a 'notify' callback send a 'after load'
2577          * notice now that the entry is fully integrated into the cache.
2578          */
2579         if(entry_ptr->type->notify &&
2580                 (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0)
2581             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL, "can't notify client about entry inserted into cache")
2582     } /* end if */
2583 
2584 #ifdef H5_HAVE_PARALLEL
2585     /* Make sure the size of the collective entries in the cache remain in check */
2586     if(coll_access)
2587         if(cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
2588             if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
2589                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
2590 #endif /* H5_HAVE_PARALLEL */
2591 
2592 done:
2593 #if H5C_DO_EXTREME_SANITY_CHECKS
2594     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
2595             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
2596             (H5C_validate_lru_list(cache_ptr) < 0))
2597         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit")
2598 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2599 
2600     FUNC_LEAVE_NOAPI(ret_value)
2601 } /* H5C_protect() */
2602 
2603 
2604 /*-------------------------------------------------------------------------
2605  *
2606  * Function:    H5C_reset_cache_hit_rate_stats()
2607  *
2608  * Purpose:     Reset the cache hit rate computation fields.
2609  *
2610  * Return:      SUCCEED on success, and FAIL on failure.
2611  *
2612  * Programmer:  John Mainzer, 10/5/04
2613  *
2614  *-------------------------------------------------------------------------
2615  */
2616 herr_t
H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr)2617 H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr)
2618 {
2619     herr_t	ret_value = SUCCEED;      /* Return value */
2620 
2621     FUNC_ENTER_NOAPI(FAIL)
2622 
2623     if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
2624         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry")
2625 
2626     cache_ptr->cache_hits		= 0;
2627     cache_ptr->cache_accesses		= 0;
2628 
2629 done:
2630     FUNC_LEAVE_NOAPI(ret_value)
2631 } /* H5C_reset_cache_hit_rate_stats() */
2632 
2633 
2634 /*-------------------------------------------------------------------------
2635  * Function:    H5C_set_cache_auto_resize_config
2636  *
2637  * Purpose:	Set the cache automatic resize configuration to the
2638  *		provided values if they are in range, and fail if they
2639  *		are not.
2640  *
2641  *		If the new configuration enables automatic cache resizing,
2642  *		coerce the cache max size and min clean size into agreement
2643  *		with the new policy and re-set the full cache hit rate
2644  *		stats.
2645  *
2646  * Return:      SUCCEED on success, and FAIL on failure.
2647  *
2648  * Programmer:  John Mainzer
2649  *		10/8/04
2650  *
2651  *-------------------------------------------------------------------------
2652  */
2653 herr_t
H5C_set_cache_auto_resize_config(H5C_t * cache_ptr,H5C_auto_size_ctl_t * config_ptr)2654 H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
2655                                  H5C_auto_size_ctl_t *config_ptr)
2656 {
2657     size_t      new_max_cache_size;
2658     size_t      new_min_clean_size;
2659     herr_t	ret_value = SUCCEED;      /* Return value */
2660 
2661     FUNC_ENTER_NOAPI(FAIL)
2662 
2663     if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
2664         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry")
2665     if(config_ptr == NULL)
2666         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
2667     if(config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
2668         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version")
2669 
2670     /* check general configuration section of the config: */
2671     if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_GENERAL) < 0)
2672         HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config")
2673 
2674     /* check size increase control fields of the config: */
2675     if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INCREMENT) < 0)
2676         HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config")
2677 
2678     /* check size decrease control fields of the config: */
2679     if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_DECREMENT) < 0)
2680         HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config")
2681 
2682     /* check for conflicts between size increase and size decrease controls: */
2683     if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) < 0)
2684         HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config")
2685 
2686     /* will set the increase possible fields to FALSE later if needed */
2687     cache_ptr->size_increase_possible       = TRUE;
2688     cache_ptr->flash_size_increase_possible = TRUE;
2689     cache_ptr->size_decrease_possible       = TRUE;
2690 
2691     switch(config_ptr->incr_mode) {
2692         case H5C_incr__off:
2693             cache_ptr->size_increase_possible = FALSE;
2694             break;
2695 
2696         case H5C_incr__threshold:
2697             if((config_ptr->lower_hr_threshold <= (double)0.0f) ||
2698                      (config_ptr->increment <= (double)1.0f) ||
2699                      ((config_ptr->apply_max_increment) && (config_ptr->max_increment <= 0)))
2700                  cache_ptr->size_increase_possible = FALSE;
2701             break;
2702 
2703         default: /* should be unreachable */
2704             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?")
2705     } /* end switch */
2706 
2707     /* logically, this is were configuration for flash cache size increases
2708      * should go.  However, this configuration depends on max_cache_size, so
2709      * we wait until the end of the function, when this field is set.
2710      */
2711 
2712     switch(config_ptr->decr_mode) {
2713         case H5C_decr__off:
2714             cache_ptr->size_decrease_possible = FALSE;
2715             break;
2716 
2717         case H5C_decr__threshold:
2718             if((config_ptr->upper_hr_threshold >= (double)1.0f) ||
2719                      (config_ptr->decrement >= (double)1.0f) ||
2720                      ((config_ptr->apply_max_decrement) && (config_ptr->max_decrement <= 0)))
2721                 cache_ptr->size_decrease_possible = FALSE;
2722             break;
2723 
2724         case H5C_decr__age_out:
2725             if(((config_ptr->apply_empty_reserve) && (config_ptr->empty_reserve >= (double)1.0f)) ||
2726                     ((config_ptr->apply_max_decrement) && (config_ptr->max_decrement <= 0)))
2727                 cache_ptr->size_decrease_possible = FALSE;
2728             break;
2729 
2730         case H5C_decr__age_out_with_threshold:
2731             if(((config_ptr->apply_empty_reserve) && (config_ptr->empty_reserve >= (double)1.0f)) ||
2732                     ((config_ptr->apply_max_decrement) && (config_ptr->max_decrement <= 0)) ||
2733                     (config_ptr->upper_hr_threshold >= (double)1.0f))
2734                 cache_ptr->size_decrease_possible = FALSE;
2735             break;
2736 
2737         default: /* should be unreachable */
2738             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?")
2739     } /* end switch */
2740 
2741     if(config_ptr->max_size == config_ptr->min_size) {
2742         cache_ptr->size_increase_possible = FALSE;
2743 	cache_ptr->flash_size_increase_possible = FALSE;
2744         cache_ptr->size_decrease_possible = FALSE;
2745     } /* end if */
2746 
2747     /* flash_size_increase_possible is intentionally omitted from the
2748      * following:
2749      */
2750     cache_ptr->resize_enabled = cache_ptr->size_increase_possible ||
2751                                 cache_ptr->size_decrease_possible;
2752 
2753     cache_ptr->resize_ctl = *config_ptr;
2754 
2755     /* Resize the cache to the supplied initial value if requested, or as
2756      * necessary to force it within the bounds of the current automatic
2757      * cache resizing configuration.
2758      *
2759      * Note that the min_clean_fraction may have changed, so we
2760      * go through the exercise even if the current size is within
2761      * range and an initial size has not been provided.
2762      */
2763     if(cache_ptr->resize_ctl.set_initial_size)
2764         new_max_cache_size = cache_ptr->resize_ctl.initial_size;
2765     else if(cache_ptr->max_cache_size > cache_ptr->resize_ctl.max_size)
2766         new_max_cache_size = cache_ptr->resize_ctl.max_size;
2767     else if(cache_ptr->max_cache_size < cache_ptr->resize_ctl.min_size)
2768         new_max_cache_size = cache_ptr->resize_ctl.min_size;
2769     else
2770         new_max_cache_size = cache_ptr->max_cache_size;
2771 
2772     new_min_clean_size = (size_t)((double)new_max_cache_size *
2773                           ((cache_ptr->resize_ctl).min_clean_fraction));
2774 
2775 
2776     /* since new_min_clean_size is of type size_t, we have
2777      *
2778      * 	( 0 <= new_min_clean_size )
2779      *
2780      * by definition.
2781      */
2782     HDassert(new_min_clean_size <= new_max_cache_size);
2783     HDassert(cache_ptr->resize_ctl.min_size <= new_max_cache_size);
2784     HDassert(new_max_cache_size <= cache_ptr->resize_ctl.max_size);
2785 
2786     if(new_max_cache_size < cache_ptr->max_cache_size)
2787         cache_ptr->size_decreased = TRUE;
2788 
2789     cache_ptr->max_cache_size = new_max_cache_size;
2790     cache_ptr->min_clean_size = new_min_clean_size;
2791 
2792     if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
2793         /* this should be impossible... */
2794         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
2795 
2796     /* remove excess epoch markers if any */
2797     if((config_ptr->decr_mode == H5C_decr__age_out_with_threshold) ||
2798             (config_ptr->decr_mode == H5C_decr__age_out)) {
2799         if(cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction)
2800             if(H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
2801                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
2802     } /* end if */
2803     else if(cache_ptr->epoch_markers_active > 0) {
2804         if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
2805             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
2806     }
2807 
2808     /* configure flash size increase facility.  We wait until the
2809      * end of the function, as we need the max_cache_size set before
2810      * we start to keep things simple.
2811      *
2812      * If we haven't already ruled out flash cache size increases above,
2813      * go ahead and configure it.
2814      */
2815 
2816     if(cache_ptr->flash_size_increase_possible) {
2817         switch(config_ptr->flash_incr_mode) {
2818             case H5C_flash_incr__off:
2819                 cache_ptr->flash_size_increase_possible = FALSE;
2820                 break;
2821 
2822             case H5C_flash_incr__add_space:
2823                 cache_ptr->flash_size_increase_possible = TRUE;
2824                 cache_ptr->flash_size_increase_threshold = (size_t)(((double)(cache_ptr->max_cache_size)) *
2825                      ((cache_ptr->resize_ctl).flash_threshold));
2826                 break;
2827 
2828             default: /* should be unreachable */
2829                  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
2830                  break;
2831         } /* end switch */
2832     } /* end if */
2833 
2834 done:
2835     FUNC_LEAVE_NOAPI(ret_value)
2836 } /* H5C_set_cache_auto_resize_config() */
2837 
2838 
2839 /*-------------------------------------------------------------------------
2840  * Function:    H5C_set_evictions_enabled()
2841  *
2842  * Purpose:     Set cache_ptr->evictions_enabled to the value of the
2843  *              evictions enabled parameter.
2844  *
2845  * Return:      SUCCEED on success, and FAIL on failure.
2846  *
2847  * Programmer:  John Mainzer
2848  *              7/27/07
2849  *
2850  *-------------------------------------------------------------------------
2851  */
2852 herr_t
H5C_set_evictions_enabled(H5C_t * cache_ptr,hbool_t evictions_enabled)2853 H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled)
2854 {
2855     herr_t ret_value = SUCCEED;      /* Return value */
2856 
2857     FUNC_ENTER_NOAPI(FAIL)
2858 
2859     if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
2860         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry")
2861 
2862     /* There is no fundamental reason why we should not permit
2863      * evictions to be disabled while automatic resize is enabled.
2864      * However, I can't think of any good reason why one would
2865      * want to, and allowing it would greatly complicate testing
2866      * the feature.  Hence the following:
2867      */
2868     if((evictions_enabled != TRUE) &&
2869          ((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) ||
2870 	   (cache_ptr->resize_ctl.decr_mode != H5C_decr__off)))
2871         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled")
2872 
2873     cache_ptr->evictions_enabled = evictions_enabled;
2874 
2875 done:
2876     FUNC_LEAVE_NOAPI(ret_value)
2877 } /* H5C_set_evictions_enabled() */
2878 
2879 
2880 /*-------------------------------------------------------------------------
2881  * Function:    H5C_unpin_entry()
2882  *
2883  * Purpose:	Unpin a cache entry.  The entry can be either protected or
2884  * 		unprotected at the time of call, but must be pinned.
2885  *
2886  * Return:      Non-negative on success/Negative on failure
2887  *
2888  * Programmer:  John Mainzer
2889  *              3/22/06
2890  *
2891  * Changes:	Added extreme sanity checks on entry and exit.
2892                 				JRM -- 4/26/14
2893  *
2894  *-------------------------------------------------------------------------
2895  */
2896 herr_t
H5C_unpin_entry(void * _entry_ptr)2897 H5C_unpin_entry(void *_entry_ptr)
2898 {
2899     H5C_t             * cache_ptr;
2900     H5C_cache_entry_t * entry_ptr = (H5C_cache_entry_t *)_entry_ptr; /* Pointer to entry to unpin */
2901     herr_t              ret_value = SUCCEED;    /* Return value */
2902 
2903     FUNC_ENTER_NOAPI(FAIL)
2904 
2905     /* Sanity check */
2906     HDassert(entry_ptr);
2907     cache_ptr = entry_ptr->cache_ptr;
2908     HDassert(cache_ptr);
2909     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
2910 
2911 #if H5C_DO_EXTREME_SANITY_CHECKS
2912     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
2913             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
2914             (H5C_validate_lru_list(cache_ptr) < 0))
2915         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
2916 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2917 
2918 
2919     /* Unpin the entry */
2920     if(H5C__unpin_entry_from_client(cache_ptr, entry_ptr, TRUE) < 0)
2921         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry from client")
2922 
2923 done:
2924 #if H5C_DO_EXTREME_SANITY_CHECKS
2925     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
2926             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
2927             (H5C_validate_lru_list(cache_ptr) < 0))
2928         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
2929 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
2930 
2931     FUNC_LEAVE_NOAPI(ret_value)
2932 } /* H5C_unpin_entry() */
2933 
2934 
2935 /*-------------------------------------------------------------------------
2936  * Function:    H5C_unprotect
2937  *
2938  * Purpose:	Undo an H5C_protect() call -- specifically, mark the
2939  *		entry as unprotected, remove it from the protected list,
2940  *		and give it back to the replacement policy.
2941  *
2942  *		The TYPE and ADDR arguments must be the same as those in
2943  *		the corresponding call to H5C_protect() and the THING
2944  *		argument must be the value returned by that call to
2945  *		H5C_protect().
2946  *
2947  * Return:      Non-negative on success/Negative on failure
2948  *
2949  *		If the deleted flag is TRUE, simply remove the target entry
2950  *		from the cache, clear it, and free it without writing it to
2951  *		disk.
2952  *
2953  * Return:      Non-negative on success/Negative on failure
2954  *
2955  * Programmer:  John Mainzer
2956  *              6/2/04
2957  *
2958  *-------------------------------------------------------------------------
2959  */
2960 herr_t
H5C_unprotect(H5F_t * f,haddr_t addr,void * thing,unsigned flags)2961 H5C_unprotect(H5F_t *f, haddr_t	addr, void *thing, unsigned flags)
2962 {
2963     H5C_t *             cache_ptr;
2964     hbool_t		deleted;
2965     hbool_t		dirtied;
2966     hbool_t             set_flush_marker;
2967     hbool_t		pin_entry;
2968     hbool_t		unpin_entry;
2969     hbool_t		free_file_space;
2970     hbool_t		take_ownership;
2971     hbool_t 		was_clean;
2972 #ifdef H5_HAVE_PARALLEL
2973     hbool_t		clear_entry = FALSE;
2974 #endif /* H5_HAVE_PARALLEL */
2975     H5C_cache_entry_t *	entry_ptr;
2976     H5C_cache_entry_t *	test_entry_ptr;
2977     herr_t              ret_value = SUCCEED;    /* Return value */
2978 
2979     FUNC_ENTER_NOAPI(FAIL)
2980 
2981     deleted                = ((flags & H5C__DELETED_FLAG) != 0);
2982     dirtied                = ((flags & H5C__DIRTIED_FLAG) != 0);
2983     set_flush_marker       = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0);
2984     pin_entry              = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
2985     unpin_entry            = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0);
2986     free_file_space        = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
2987     take_ownership         = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
2988 
2989     HDassert( f );
2990     HDassert( f->shared );
2991 
2992     cache_ptr = f->shared->cache;
2993 
2994     HDassert( cache_ptr );
2995     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
2996     HDassert( H5F_addr_defined(addr) );
2997     HDassert( thing );
2998     HDassert( ! ( pin_entry && unpin_entry ) );
2999     HDassert( ( ! free_file_space ) || ( deleted ) );   /* deleted flag must accompany free_file_space */
3000     HDassert( ( ! take_ownership ) || ( deleted ) );    /* deleted flag must accompany take_ownership */
3001     HDassert( ! ( free_file_space && take_ownership ) );    /* can't have both free_file_space & take_ownership */
3002 
3003     entry_ptr = (H5C_cache_entry_t *)thing;
3004 
3005     HDassert( entry_ptr->addr == addr );
3006 
3007     /* also set the dirtied variable if the dirtied field is set in
3008      * the entry.
3009      */
3010     dirtied |= entry_ptr->dirtied;
3011     was_clean = ! ( entry_ptr->is_dirty );
3012 
3013 #if H5C_DO_EXTREME_SANITY_CHECKS
3014     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
3015             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
3016             (H5C_validate_lru_list(cache_ptr) < 0))
3017         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
3018 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
3019 
3020     /* if the entry has multiple read only protects, just decrement
3021      * the ro_ref_counter.  Don't actually unprotect until the ref count
3022      * drops to zero.
3023      */
3024     if(entry_ptr->ro_ref_count > 1) {
3025         /* Sanity check */
3026 	HDassert(entry_ptr->is_protected);
3027         HDassert(entry_ptr->is_read_only);
3028 
3029 	if(dirtied)
3030             HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
3031 
3032         /* Reduce the RO ref count */
3033 	(entry_ptr->ro_ref_count)--;
3034 
3035         /* Pin or unpin the entry as requested. */
3036         if(pin_entry) {
3037             /* Pin the entry from a client */
3038             if(H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
3039                 HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
3040         } else if(unpin_entry) {
3041             /* Unpin the entry from a client */
3042             if(H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
3043                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
3044         } /* end if */
3045     } else {
3046 	if(entry_ptr->is_read_only) {
3047             /* Sanity check */
3048 	    HDassert(entry_ptr->ro_ref_count == 1);
3049 
3050 	    if(dirtied)
3051                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
3052 
3053 	    entry_ptr->is_read_only = FALSE;
3054 	    entry_ptr->ro_ref_count = 0;
3055 	} /* end if */
3056 
3057 #ifdef H5_HAVE_PARALLEL
3058         /* When the H5C code is used to implement the metadata cache in the
3059          * PHDF5 case, only the cache on process 0 is allowed to write to file.
3060          * All the other metadata caches must hold dirty entries until they
3061          * are told that the entries are clean.
3062          *
3063          * The clear_on_unprotect flag in the H5C_cache_entry_t structure
3064          * exists to deal with the case in which an entry is protected when
3065          * its cache receives word that the entry is now clean.  In this case,
3066          * the clear_on_unprotect flag is set, and the entry is flushed with
3067          * the H5C__FLUSH_CLEAR_ONLY_FLAG.
3068          *
3069          * All this is a bit awkward, but until the metadata cache entries
3070          * are contiguous, with only one dirty flag, we have to let the supplied
3071          * functions deal with the resetting the is_dirty flag.
3072          */
3073         if(entry_ptr->clear_on_unprotect) {
3074             /* Sanity check */
3075             HDassert(entry_ptr->is_dirty);
3076 
3077             entry_ptr->clear_on_unprotect = FALSE;
3078             if(!dirtied)
3079                 clear_entry = TRUE;
3080         } /* end if */
3081 #endif /* H5_HAVE_PARALLEL */
3082 
3083         if(!entry_ptr->is_protected)
3084             HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??")
3085 
3086         /* Mark the entry as dirty if appropriate */
3087         entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied);
3088 
3089 	if(dirtied)
3090 	    if(entry_ptr->image_up_to_date) {
3091 	        entry_ptr->image_up_to_date = FALSE;
3092 	        if(entry_ptr->flush_dep_nparents > 0)
3093 		    if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
3094                         HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
3095             } /* end if */
3096 
3097         /* Check for newly dirtied entry */
3098         if(was_clean && entry_ptr->is_dirty) {
3099             /* Update index for newly dirtied entry */
3100             H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
3101 
3102             /* If the entry's type has a 'notify' callback send a 'entry dirtied'
3103              * notice now that the entry is fully integrated into the cache.
3104              */
3105             if(entry_ptr->type->notify &&
3106                     (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
3107                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
3108 
3109             /* Propagate the flush dep dirty flag up the flush dependency chain
3110              * if appropriate */
3111             if(entry_ptr->flush_dep_nparents > 0)
3112                 if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
3113                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
3114         } /* end if */
3115         /* Check for newly clean entry */
3116         else if(!was_clean && !entry_ptr->is_dirty) {
3117             /* If the entry's type has a 'notify' callback send a 'entry cleaned'
3118              * notice now that the entry is fully integrated into the cache.
3119              */
3120             if(entry_ptr->type->notify &&
3121                     (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
3122                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared")
3123 
3124             /* Propagate the flush dep clean flag up the flush dependency chain
3125              * if appropriate */
3126             if(entry_ptr->flush_dep_nparents > 0)
3127                 if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
3128                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
3129         } /* end else-if */
3130 
3131         /* Pin or unpin the entry as requested. */
3132         if(pin_entry) {
3133             /* Pin the entry from a client */
3134             if(H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
3135                 HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
3136         } else if(unpin_entry) {
3137             /* Unpin the entry from a client */
3138             if(H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
3139                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
3140         } /* end if */
3141 
3142         /* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on
3143          * the pinned entry list if entry_ptr->is_pinned is TRUE.
3144          */
3145         H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL)
3146 
3147         entry_ptr->is_protected = FALSE;
3148 
3149         /* if the entry is dirty, 'or' its flush_marker with the set flush flag,
3150          * and then add it to the skip list if it isn't there already.
3151          */
3152         if(entry_ptr->is_dirty) {
3153             entry_ptr->flush_marker |= set_flush_marker;
3154             if(!entry_ptr->in_slist)
3155                 H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
3156         } /* end if */
3157 
3158         /* this implementation of the "deleted" option is a bit inefficient, as
3159          * we re-insert the entry to be deleted into the replacement policy
3160          * data structures, only to remove them again.  Depending on how often
3161          * we do this, we may want to optimize a bit.
3162          *
3163          * On the other hand, this implementation is reasonably clean, and
3164          * makes good use of existing code.
3165          *                                             JRM - 5/19/04
3166          */
3167         if(deleted) {
3168             unsigned    flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG |
3169                                          H5C__FLUSH_INVALIDATE_FLAG);
3170 
3171             /* verify that the target entry is in the cache. */
3172             H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
3173             if(test_entry_ptr == NULL)
3174                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
3175             else if(test_entry_ptr != entry_ptr)
3176                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?")
3177 
3178             /* Set the 'free file space' flag for the flush, if needed */
3179             if(free_file_space)
3180                 flush_flags |= H5C__FREE_FILE_SPACE_FLAG;
3181 
3182             /* Set the "take ownership" flag for the flush, if needed */
3183             if(take_ownership)
3184                 flush_flags |= H5C__TAKE_OWNERSHIP_FLAG;
3185 
3186             /* Delete the entry from the skip list on destroy */
3187             flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
3188 
3189             HDassert(((!was_clean) || dirtied) == entry_ptr->in_slist);
3190             if(H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0)
3191                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry")
3192         } /* end if */
3193 #ifdef H5_HAVE_PARALLEL
3194         else if(clear_entry) {
3195 
3196             /* verify that the target entry is in the cache. */
3197             H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
3198             if(test_entry_ptr == NULL)
3199                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
3200             else if(test_entry_ptr != entry_ptr)
3201                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?")
3202 
3203             if(H5C__flush_single_entry(f, entry_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
3204                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry")
3205         } /* end else if */
3206 #endif /* H5_HAVE_PARALLEL */
3207     }
3208 
3209     H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
3210 
3211 done:
3212 #if H5C_DO_EXTREME_SANITY_CHECKS
3213     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
3214             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
3215             (H5C_validate_lru_list(cache_ptr) < 0)) {
3216         HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
3217 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
3218 
3219     FUNC_LEAVE_NOAPI(ret_value)
3220 } /* H5C_unprotect() */
3221 
3222 
3223 /*-------------------------------------------------------------------------
3224  *
3225  * Function:    H5C_unsettle_entry_ring
3226  *
3227  * Purpose:     Advise the metadata cache that the specified entry's free space
3228  *              manager ring is no longer settled (if it was on entry).
3229  *
3230  *              If the target free space manager ring is already
3231  *              unsettled, do nothing, and return SUCCEED.
3232  *
3233  *              If the target free space manager ring is settled, and
3234  *              we are not in the process of a file shutdown, mark
3235  *              the ring as unsettled, and return SUCCEED.
3236  *
3237  *              If the target free space manager is settled, and we
3238  *              are in the process of a file shutdown, post an error
3239  *              message, and return FAIL.
3240  *
3241  * Return:      Non-negative on success/Negative on failure
3242  *
3243  * Programmer:  Quincey Koziol
3244  *              January 3, 2017
3245  *
3246  *-------------------------------------------------------------------------
3247  */
3248 herr_t
3249 H5C_unsettle_entry_ring(void *_entry)
3250 {
3251     H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry;     /* Entry whose ring to unsettle */
3252     H5C_t *cache;               /* Cache for file */
3253     herr_t ret_value = SUCCEED; /* Return value */
3254 
3255     FUNC_ENTER_NOAPI(FAIL)
3256 
3257     /* Sanity checks */
3258     HDassert(entry);
3259     HDassert(entry->ring != H5C_RING_UNDEFINED);
3260     HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) || (H5C_RING_MDFSM == entry->ring));
3261     cache = entry->cache_ptr;
3262     HDassert(cache);
3263     HDassert(cache->magic == H5C__H5C_T_MAGIC);
3264 
3265     switch(entry->ring) {
3266 	case H5C_RING_USER:
3267             /* Do nothing */
3268 	    break;
3269 
3270 	case H5C_RING_RDFSM:
3271 	    if(cache->rdfsm_settled) {
3272 		if(cache->flush_in_progress || cache->close_warning_received)
3273 		    HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
3274 		cache->rdfsm_settled = FALSE;
3275 	    } /* end if */
3276 	    break;
3277 
3278 	case H5C_RING_MDFSM:
3279 	    if(cache->mdfsm_settled) {
3280 		if(cache->flush_in_progress || cache->close_warning_received)
3281 		    HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
3282 		cache->mdfsm_settled = FALSE;
3283 	    } /* end if */
3284 	    break;
3285 
3286 	default:
3287 	    HDassert(FALSE); /* this should be un-reachable */
3288 	    break;
3289     } /* end switch */
3290 
3291 done:
3292     FUNC_LEAVE_NOAPI(ret_value)
3293 } /* H5C_unsettle_entry_ring() */
3294 
3295 
3296 /*-------------------------------------------------------------------------
3297  * Function:    H5C_unsettle_ring()
3298  *
3299  * Purpose:     Advise the metadata cache that the specified free space
3300  *              manager ring is no longer settled (if it was on entry).
3301  *
3302  *              If the target free space manager ring is already
3303  *              unsettled, do nothing, and return SUCCEED.
3304  *
3305  *              If the target free space manager ring is settled, and
3306  *              we are not in the process of a file shutdown, mark
3307  *              the ring as unsettled, and return SUCCEED.
3308  *
3309  *              If the target free space manager is settled, and we
3310  *              are in the process of a file shutdown, post an error
3311  *              message, and return FAIL.
3312  *
3313  * Return:      Non-negative on success/Negative on failure
3314  *
3315  * Programmer:  John Mainzer
3316  *              10/15/16
3317  *
3318  *-------------------------------------------------------------------------
3319  */
3320 herr_t
3321 H5C_unsettle_ring(H5F_t * f, H5C_ring_t ring)
3322 {
3323     H5C_t *             cache_ptr;
3324     herr_t ret_value = SUCCEED; /* Return value */
3325 
3326     FUNC_ENTER_NOAPI(FAIL)
3327 
3328     /* Sanity checks */
3329     HDassert(f);
3330     HDassert(f->shared);
3331     HDassert(f->shared->cache);
3332     HDassert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring));
3333     cache_ptr = f->shared->cache;
3334     HDassert(H5C__H5C_T_MAGIC == cache_ptr->magic);
3335 
3336     switch(ring) {
3337         case H5C_RING_RDFSM:
3338             if(cache_ptr->rdfsm_settled) {
3339                 if(cache_ptr->close_warning_received)
3340                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
3341                 cache_ptr->rdfsm_settled = FALSE;
3342             } /* end if */
3343             break;
3344 
3345         case H5C_RING_MDFSM:
3346             if(cache_ptr->mdfsm_settled) {
3347                 if(cache_ptr->close_warning_received)
3348                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
3349                 cache_ptr->mdfsm_settled = FALSE;
3350             } /* end if */
3351             break;
3352 
3353 	default:
3354 	    HDassert(FALSE); /* this should be un-reachable */
3355 	    break;
3356     } /* end switch */
3357 
3358 done:
3359     FUNC_LEAVE_NOAPI(ret_value)
3360 } /* H5C_unsettle_ring() */
3361 
3362 
3363 /*-------------------------------------------------------------------------
3364  * Function:    H5C_validate_resize_config()
3365  *
3366  * Purpose:	Run a sanity check on the specified sections of the
3367  *		provided instance of struct H5C_auto_size_ctl_t.
3368  *
3369  *		Do nothing and return SUCCEED if no errors are detected,
3370  *		and flag an error and return FAIL otherwise.
3371  *
3372  * Return:      Non-negative on success/Negative on failure
3373  *
3374  * Programmer:  John Mainzer
3375  *              3/23/05
3376  *
3377  *-------------------------------------------------------------------------
3378  */
3379 herr_t
3380 H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
3381                            unsigned int tests)
3382 {
3383     herr_t              ret_value = SUCCEED;    /* Return value */
3384 
3385     FUNC_ENTER_NOAPI(FAIL)
3386 
3387     if(config_ptr == NULL)
3388         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
3389 
3390     if(config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
3391         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version")
3392 
3393     if((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) {
3394 
3395         if(config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE)
3396             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big")
3397 
3398         if(config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE)
3399             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small")
3400 
3401         if(config_ptr->min_size > config_ptr->max_size)
3402             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size")
3403 
3404         if(config_ptr->set_initial_size &&
3405                 ((config_ptr->initial_size < config_ptr->min_size) ||
3406                     (config_ptr->initial_size > config_ptr->max_size)))
3407             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "initial_size must be in the interval [min_size, max_size]")
3408 
3409         if((config_ptr->min_clean_fraction < (double)0.0f) ||
3410                 (config_ptr->min_clean_fraction > (double)1.0f))
3411             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]")
3412 
3413         if(config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH)
3414             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small")
3415 
3416         if(config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH)
3417             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big")
3418     } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */
3419 
3420 
3421     if((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) {
3422         if((config_ptr->incr_mode != H5C_incr__off) &&
3423                 (config_ptr->incr_mode != H5C_incr__threshold))
3424             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode")
3425 
3426         if(config_ptr->incr_mode == H5C_incr__threshold) {
3427             if((config_ptr->lower_hr_threshold < (double)0.0f) ||
3428                     (config_ptr->lower_hr_threshold > (double)1.0f))
3429                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "lower_hr_threshold must be in the range [0.0, 1.0]")
3430 
3431             if(config_ptr->increment < (double)1.0f)
3432                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "increment must be greater than or equal to 1.0")
3433 
3434             /* no need to check max_increment, as it is a size_t,
3435              * and thus must be non-negative.
3436              */
3437         } /* H5C_incr__threshold */
3438 
3439         switch(config_ptr->flash_incr_mode) {
3440             case H5C_flash_incr__off:
3441                 /* nothing to do here */
3442                 break;
3443 
3444             case H5C_flash_incr__add_space:
3445                 if((config_ptr->flash_multiple < (double)0.1f) ||
3446                         (config_ptr->flash_multiple > (double)10.0f))
3447                     HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "flash_multiple must be in the range [0.1, 10.0]")
3448                 if((config_ptr->flash_threshold < (double)0.1f) ||
3449                         (config_ptr->flash_threshold > (double)1.0f))
3450                     HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "flash_threshold must be in the range [0.1, 1.0]")
3451                 break;
3452 
3453             default:
3454                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode")
3455                 break;
3456         } /* end switch */
3457     } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */
3458 
3459 
3460     if ( (tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0 ) {
3461 
3462         if ( ( config_ptr->decr_mode != H5C_decr__off ) &&
3463              ( config_ptr->decr_mode != H5C_decr__threshold ) &&
3464              ( config_ptr->decr_mode != H5C_decr__age_out ) &&
3465              ( config_ptr->decr_mode != H5C_decr__age_out_with_threshold )
3466            ) {
3467 
3468             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode")
3469         }
3470 
3471         if ( config_ptr->decr_mode == H5C_decr__threshold ) {
3472             if(config_ptr->upper_hr_threshold > (double)1.0f)
3473                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0")
3474 
3475             if((config_ptr->decrement > (double)1.0f) ||
3476                     (config_ptr->decrement < (double)0.0f))
3477                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]")
3478 
3479             /* no need to check max_decrement as it is a size_t
3480              * and thus must be non-negative.
3481              */
3482         } /* H5C_decr__threshold */
3483 
3484         if((config_ptr->decr_mode == H5C_decr__age_out) ||
3485                 (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) {
3486 
3487             if(config_ptr->epochs_before_eviction < 1)
3488                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive")
3489             if(config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS)
3490                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big")
3491 
3492             if((config_ptr->apply_empty_reserve) &&
3493                     ((config_ptr->empty_reserve > (double)1.0f) ||
3494                         (config_ptr->empty_reserve < (double)0.0f)))
3495                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty_reserve must be in the interval [0.0, 1.0]")
3496 
3497             /* no need to check max_decrement as it is a size_t
3498              * and thus must be non-negative.
3499              */
3500         } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */
3501 
3502         if(config_ptr->decr_mode == H5C_decr__age_out_with_threshold) {
3503             if((config_ptr->upper_hr_threshold > (double)1.0f) ||
3504                     (config_ptr->upper_hr_threshold < (double)0.0f))
3505                 HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be in the interval [0.0, 1.0]")
3506         } /* H5C_decr__age_out_with_threshold */
3507     } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */
3508 
3509 
3510     if ( (tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0 ) {
3511         if((config_ptr->incr_mode == H5C_incr__threshold)
3512                 && ((config_ptr->decr_mode == H5C_decr__threshold) ||
3513                        (config_ptr->decr_mode == H5C_decr__age_out_with_threshold))
3514                 && (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold))
3515             HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config")
3516     } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */
3517 
3518 done:
3519     FUNC_LEAVE_NOAPI(ret_value)
3520 } /* H5C_validate_resize_config() */
3521 
3522 
3523 /*-------------------------------------------------------------------------
3524  * Function:    H5C_create_flush_dependency()
3525  *
3526  * Purpose:	Initiates a parent<->child entry flush dependency.  The parent
3527  *              entry must be pinned or protected at the time of call, and must
3528  *              have all dependencies removed before the cache can shut down.
3529  *
3530  * Note:	Flush dependencies in the cache indicate that a child entry
3531  *              must be flushed to the file before its parent.  (This is
3532  *              currently used to implement Single-Writer/Multiple-Reader (SWMR)
3533  *              I/O access for data structures in the file).
3534  *
3535  *              Creating a flush dependency between two entries will also pin
3536  *              the parent entry.
3537  *
3538  * Return:      Non-negative on success/Negative on failure
3539  *
3540  * Programmer:  Quincey Koziol
3541  *              3/05/09
3542  *
3543  *-------------------------------------------------------------------------
3544  */
3545 herr_t
3546 H5C_create_flush_dependency(void * parent_thing, void * child_thing)
3547 {
3548     H5C_t             * cache_ptr;
3549     H5C_cache_entry_t *	parent_entry = (H5C_cache_entry_t *)parent_thing;   /* Ptr to parent thing's entry */
3550     H5C_cache_entry_t * child_entry = (H5C_cache_entry_t *)child_thing;    /* Ptr to child thing's entry */
3551     herr_t ret_value = SUCCEED;         /* Return value */
3552 
3553     FUNC_ENTER_NOAPI(FAIL)
3554 
3555     /* Sanity checks */
3556     HDassert(parent_entry);
3557     HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
3558     HDassert(H5F_addr_defined(parent_entry->addr));
3559     HDassert(child_entry);
3560     HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
3561     HDassert(H5F_addr_defined(child_entry->addr));
3562     cache_ptr = parent_entry->cache_ptr;
3563     HDassert(cache_ptr);
3564     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
3565     HDassert(cache_ptr == child_entry->cache_ptr);
3566 #ifndef NDEBUG
3567     /* Make sure the parent is not already a parent */
3568     {
3569         unsigned u;
3570 
3571         for(u = 0; u < child_entry->flush_dep_nparents; u++)
3572             HDassert(child_entry->flush_dep_parent[u] != parent_entry);
3573     } /* end block */
3574 #endif /* NDEBUG */
3575 
3576     /* More sanity checks */
3577     if(child_entry == parent_entry)
3578         HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself")
3579     if(!(parent_entry->is_protected || parent_entry->is_pinned))
3580         HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected")
3581 
3582     /* Check for parent not pinned */
3583     if(!parent_entry->is_pinned) {
3584         /* Sanity check */
3585         HDassert(parent_entry->flush_dep_nchildren == 0);
3586         HDassert(!parent_entry->pinned_from_client);
3587         HDassert(!parent_entry->pinned_from_cache);
3588 
3589         /* Pin the parent entry */
3590         parent_entry->is_pinned = TRUE;
3591         H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry)
3592     } /* end else */
3593 
3594     /* Mark the entry as pinned from the cache's action (possibly redundantly) */
3595     parent_entry->pinned_from_cache = TRUE;
3596 
3597     /* Check if we need to resize the child's parent array */
3598     if(child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) {
3599         if(child_entry->flush_dep_parent_nalloc == 0) {
3600             /* Array does not exist yet, allocate it */
3601             HDassert(!child_entry->flush_dep_parent);
3602 
3603             if(NULL == (child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_MALLOC(parent, H5C_FLUSH_DEP_PARENT_INIT * sizeof(H5C_cache_entry_t *))))
3604                 HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
3605             child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT;
3606         } /* end if */
3607         else {
3608             /* Resize existing array */
3609             HDassert(child_entry->flush_dep_parent);
3610 
3611             if(NULL == (child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_REALLOC(parent, child_entry->flush_dep_parent, 2 * child_entry->flush_dep_parent_nalloc * sizeof(H5C_cache_entry_t *))))
3612                 HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
3613             child_entry->flush_dep_parent_nalloc *= 2;
3614         } /* end else */
3615         cache_ptr->entry_fd_height_change_counter++;
3616     } /* end if */
3617 
3618     /* Add the dependency to the child's parent array */
3619     child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry;
3620     child_entry->flush_dep_nparents++;
3621 
3622     /* Increment parent's number of children */
3623     parent_entry->flush_dep_nchildren++;
3624 
3625     /* Adjust the number of dirty children */
3626     if(child_entry->is_dirty) {
3627         /* Sanity check */
3628         HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren);
3629 
3630         parent_entry->flush_dep_ndirty_children++;
3631 
3632         /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
3633         if(parent_entry->type->notify &&
3634                 (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0)
3635             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry dirty flag set")
3636     } /* end if */
3637 
3638     /* adjust the parent's number of unserialized children.  Note
3639      * that it is possible for and entry to be clean and unserialized.
3640      */
3641     if(!child_entry->image_up_to_date) {
3642         HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren);
3643 
3644         parent_entry->flush_dep_nunser_children++;
3645 
3646         /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
3647         if(parent_entry->type->notify &&
3648                 (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0)
3649             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry serialized flag reset")
3650     } /* end if */
3651 
3652     /* Post-conditions, for successful operation */
3653     HDassert(parent_entry->is_pinned);
3654     HDassert(parent_entry->flush_dep_nchildren > 0);
3655     HDassert(child_entry->flush_dep_parent);
3656     HDassert(child_entry->flush_dep_nparents > 0);
3657     HDassert(child_entry->flush_dep_parent_nalloc > 0);
3658 #ifndef NDEBUG
3659     H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
3660 #endif /* NDEBUG */
3661 
3662 done:
3663     FUNC_LEAVE_NOAPI(ret_value)
3664 } /* H5C_create_flush_dependency() */
3665 
3666 
3667 /*-------------------------------------------------------------------------
3668  * Function:    H5C_destroy_flush_dependency()
3669  *
3670  * Purpose:	Terminates a parent<-> child entry flush dependency.  The
3671  *              parent entry must be pinned.
3672  *
3673  * Return:      Non-negative on success/Negative on failure
3674  *
3675  * Programmer:  Quincey Koziol
3676  *              3/05/09
3677  *
3678  *-------------------------------------------------------------------------
3679  */
3680 herr_t
3681 H5C_destroy_flush_dependency(void *parent_thing, void * child_thing)
3682 {
3683     H5C_t             * cache_ptr;
3684     H5C_cache_entry_t *	parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
3685     H5C_cache_entry_t *	child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */
3686     unsigned            u;                      /* Local index variable */
3687     herr_t              ret_value = SUCCEED;    /* Return value */
3688 
3689     FUNC_ENTER_NOAPI(FAIL)
3690 
3691     /* Sanity checks */
3692     HDassert(parent_entry);
3693     HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
3694     HDassert(H5F_addr_defined(parent_entry->addr));
3695     HDassert(child_entry);
3696     HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
3697     HDassert(H5F_addr_defined(child_entry->addr));
3698     cache_ptr = parent_entry->cache_ptr;
3699     HDassert(cache_ptr);
3700     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
3701     HDassert(cache_ptr == child_entry->cache_ptr);
3702 
3703     /* Usage checks */
3704     if(!parent_entry->is_pinned)
3705         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
3706     if(NULL == child_entry->flush_dep_parent)
3707         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Child entry doesn't have a flush dependency parent array")
3708     if(0 == parent_entry->flush_dep_nchildren)
3709         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry flush dependency ref. count has no child dependencies")
3710 
3711     /* Search for parent in child's parent array.  This is a linear search
3712      * because we do not expect large numbers of parents.  If this changes, we
3713      * may wish to change the parent array to a skip list */
3714     for(u = 0; u < child_entry->flush_dep_nparents; u++)
3715         if(child_entry->flush_dep_parent[u] == parent_entry)
3716             break;
3717     if(u == child_entry->flush_dep_nparents)
3718         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't a flush dependency parent for child entry")
3719 
3720     /* Remove parent entry from child's parent array */
3721     if(u < (child_entry->flush_dep_nparents - 1))
3722         HDmemmove(&child_entry->flush_dep_parent[u],
3723                 &child_entry->flush_dep_parent[u + 1],
3724                 (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0]));
3725     child_entry->flush_dep_nparents--;
3726 
3727     /* Adjust parent entry's nchildren and unpin parent if it goes to zero */
3728     parent_entry->flush_dep_nchildren--;
3729     if(0 == parent_entry->flush_dep_nchildren) {
3730         /* Sanity check */
3731         HDassert(parent_entry->pinned_from_cache);
3732 
3733         /* Check if we should unpin parent entry now */
3734         if(!parent_entry->pinned_from_client)
3735             if(H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0)
3736                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry")
3737 
3738         /* Mark the entry as unpinned from the cache's action */
3739         parent_entry->pinned_from_cache = FALSE;
3740     } /* end if */
3741 
3742     /* Adjust parent entry's ndirty_children */
3743     if(child_entry->is_dirty) {
3744         /* Sanity check */
3745         HDassert(parent_entry->flush_dep_ndirty_children > 0);
3746 
3747         parent_entry->flush_dep_ndirty_children--;
3748 
3749         /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
3750         if(parent_entry->type->notify &&
3751                 (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0)
3752             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry dirty flag reset")
3753     } /* end if */
3754 
3755     /* adjust parent entry's number of unserialized children */
3756     if(!child_entry->image_up_to_date) {
3757         HDassert(parent_entry->flush_dep_nunser_children > 0);
3758 
3759         parent_entry->flush_dep_nunser_children--;
3760 
3761         /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
3762         if(parent_entry->type->notify &&
3763                 (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0)
3764             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry serialized flag set")
3765     } /* end if */
3766 
3767     /* Shrink or free the parent array if apporpriate */
3768     if(child_entry->flush_dep_nparents == 0) {
3769         child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_FREE(parent, child_entry->flush_dep_parent);
3770         child_entry->flush_dep_parent_nalloc = 0;
3771     } /* end if */
3772     else if(child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT
3773             && child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) {
3774         if(NULL == (child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_REALLOC(parent, child_entry->flush_dep_parent, (child_entry->flush_dep_parent_nalloc / 4) * sizeof(H5C_cache_entry_t *))))
3775             HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
3776         child_entry->flush_dep_parent_nalloc /= 4;
3777     } /* end if */
3778 
3779 done:
3780     FUNC_LEAVE_NOAPI(ret_value)
3781 } /* H5C_destroy_flush_dependency() */
3782 
3783 
3784 /*************************************************************************/
3785 /**************************** Private Functions: *************************/
3786 /*************************************************************************/
3787 
3788 
3789 /*-------------------------------------------------------------------------
3790  * Function:    H5C__pin_entry_from_client()
3791  *
3792  * Purpose:	Internal routine to pin a cache entry from a client action.
3793  *
3794  * Return:      Non-negative on success/Negative on failure
3795  *
3796  * Programmer:  Quincey Koziol
3797  *              3/26/09
3798  *
3799  *-------------------------------------------------------------------------
3800  */
3801 #if H5C_COLLECT_CACHE_STATS
3802 static herr_t
3803 H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
3804 #else
3805 static herr_t
3806 H5C__pin_entry_from_client(H5C_t H5_ATTR_UNUSED *cache_ptr, H5C_cache_entry_t *entry_ptr)
3807 #endif
3808 {
3809     herr_t ret_value = SUCCEED;    /* Return value */
3810 
3811     FUNC_ENTER_STATIC
3812 
3813     /* Sanity checks */
3814     HDassert(cache_ptr);
3815     HDassert(entry_ptr);
3816     HDassert(entry_ptr->is_protected);
3817 
3818     /* Check if the entry is already pinned */
3819     if(entry_ptr->is_pinned) {
3820         /* Check if the entry was pinned through an explicit pin from a client */
3821         if(entry_ptr->pinned_from_client)
3822             HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned")
3823     } /* end if */
3824     else {
3825         entry_ptr->is_pinned = TRUE;
3826 
3827         H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
3828     } /* end else */
3829 
3830     /* Mark that the entry was pinned through an explicit pin from a client */
3831     entry_ptr->pinned_from_client = TRUE;
3832 
3833 done:
3834     FUNC_LEAVE_NOAPI(ret_value)
3835 } /* H5C__pin_entry_from_client() */
3836 
3837 
3838 /*-------------------------------------------------------------------------
3839  * Function:    H5C__unpin_entry_real()
3840  *
3841  * Purpose:	Internal routine to unpin a cache entry.
3842  *
3843  * Return:      Non-negative on success/Negative on failure
3844  *
3845  * Programmer:  Quincey Koziol
3846  *              1/6/18
3847  *
3848  *-------------------------------------------------------------------------
3849  */
3850 static herr_t
3851 H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
3852     hbool_t update_rp)
3853 {
3854     herr_t ret_value = SUCCEED;    /* Return value */
3855 
3856     FUNC_ENTER_STATIC
3857 
3858     /* Sanity checking */
3859     HDassert(cache_ptr);
3860     HDassert(entry_ptr);
3861     HDassert(entry_ptr->is_pinned);
3862 
3863     /* If requested, update the replacement policy if the entry is not protected */
3864     if(update_rp && !entry_ptr->is_protected)
3865         H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL)
3866 
3867     /* Unpin the entry now */
3868     entry_ptr->is_pinned = FALSE;
3869 
3870     /* Update the stats for an unpin operation */
3871     H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
3872 
3873 done:
3874     FUNC_LEAVE_NOAPI(ret_value)
3875 } /* H5C__unpin_entry_real() */
3876 
3877 
3878 /*-------------------------------------------------------------------------
3879  * Function:    H5C__unpin_entry_from_client()
3880  *
3881  * Purpose:	Internal routine to unpin a cache entry from a client action.
3882  *
3883  * Return:      Non-negative on success/Negative on failure
3884  *
3885  * Programmer:  Quincey Koziol
3886  *              3/24/09
3887  *
3888  *-------------------------------------------------------------------------
3889  */
3890 static herr_t
3891 H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
3892     hbool_t update_rp)
3893 {
3894     herr_t ret_value = SUCCEED;    /* Return value */
3895 
3896     FUNC_ENTER_STATIC
3897 
3898     /* Sanity checking */
3899     HDassert(cache_ptr);
3900     HDassert(entry_ptr);
3901 
3902     /* Error checking (should be sanity checks?) */
3903     if(!entry_ptr->is_pinned)
3904         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned")
3905     if(!entry_ptr->pinned_from_client)
3906         HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client")
3907 
3908     /* Check if the entry is not pinned from a flush dependency */
3909     if(!entry_ptr->pinned_from_cache)
3910         if(H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0)
3911             HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry")
3912 
3913     /* Mark the entry as explicitly unpinned by the client */
3914     entry_ptr->pinned_from_client = FALSE;
3915 
3916 done:
3917     FUNC_LEAVE_NOAPI(ret_value)
3918 } /* H5C__unpin_entry_from_client() */
3919 
3920 /*-------------------------------------------------------------------------
3921  *
3922  * Function:	H5C__auto_adjust_cache_size
3923  *
3924  * Purpose:    	Obtain the current full cache hit rate, and compare it
3925  *		with the hit rate thresholds for modifying cache size.
3926  *		If one of the thresholds has been crossed, adjusts the
3927  *		size of the cache accordingly.
3928  *
3929  *		The function then resets the full cache hit rate
3930  *		statistics, and exits.
3931  *
3932  * Return:      Non-negative on success/Negative on failure or if there was
3933  *		an attempt to flush a protected item.
3934  *
3935  *
3936  * Programmer:  John Mainzer, 10/7/04
3937  *
3938  *-------------------------------------------------------------------------
3939  */
3940 static herr_t
3941 H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted)
3942 {
3943     H5C_t *			cache_ptr = f->shared->cache;
3944     hbool_t			reentrant_call = FALSE;
3945     hbool_t			inserted_epoch_marker = FALSE;
3946     size_t			new_max_cache_size = 0;
3947     size_t			old_max_cache_size = 0;
3948     size_t			new_min_clean_size = 0;
3949     size_t			old_min_clean_size = 0;
3950     double			hit_rate;
3951     enum H5C_resize_status	status = in_spec; /* will change if needed */
3952     herr_t			ret_value = SUCCEED;      /* Return value */
3953 
3954     FUNC_ENTER_NOAPI_NOINIT
3955 
3956     HDassert( f );
3957     HDassert( cache_ptr );
3958     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
3959     HDassert( cache_ptr->cache_accesses >=
3960               (cache_ptr->resize_ctl).epoch_length );
3961     HDassert( (double)0.0f <= (cache_ptr->resize_ctl).min_clean_fraction );
3962     HDassert( (cache_ptr->resize_ctl).min_clean_fraction <= (double)100.0f );
3963 
3964     /* check to see if cache_ptr->resize_in_progress is TRUE.  If it, this
3965      * is a re-entrant call via a client callback called in the resize
3966      * process.  To avoid an infinite recursion, set reentrant_call to
3967      * TRUE, and goto done.
3968      */
3969     if(cache_ptr->resize_in_progress) {
3970         reentrant_call = TRUE;
3971         HGOTO_DONE(SUCCEED)
3972     } /* end if */
3973 
3974     cache_ptr->resize_in_progress = TRUE;
3975 
3976     if(!cache_ptr->resize_enabled)
3977         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled")
3978 
3979     HDassert(((cache_ptr->resize_ctl).incr_mode != H5C_incr__off) || \
3980               ((cache_ptr->resize_ctl).decr_mode != H5C_decr__off));
3981 
3982     if(H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
3983         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
3984 
3985     HDassert( ( (double)0.0f <= hit_rate ) && ( hit_rate <= (double)1.0f ) );
3986 
3987     switch((cache_ptr->resize_ctl).incr_mode) {
3988         case H5C_incr__off:
3989             if(cache_ptr->size_increase_possible)
3990                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?")
3991             break;
3992 
3993         case H5C_incr__threshold:
3994             if ( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold ) {
3995 
3996                 if ( ! cache_ptr->size_increase_possible ) {
3997 
3998                     status = increase_disabled;
3999 
4000                 } else if ( cache_ptr->max_cache_size >=
4001                             (cache_ptr->resize_ctl).max_size ) {
4002 
4003                     HDassert( cache_ptr->max_cache_size == \
4004                               (cache_ptr->resize_ctl).max_size );
4005                     status = at_max_size;
4006 
4007                 } else if ( ! cache_ptr->cache_full ) {
4008 
4009                     status = not_full;
4010 
4011                 } else {
4012 
4013                     new_max_cache_size = (size_t)
4014                                      (((double)(cache_ptr->max_cache_size)) *
4015                                       (cache_ptr->resize_ctl).increment);
4016 
4017                     /* clip to max size if necessary */
4018                     if ( new_max_cache_size >
4019                          (cache_ptr->resize_ctl).max_size ) {
4020 
4021                         new_max_cache_size = (cache_ptr->resize_ctl).max_size;
4022                     }
4023 
4024                     /* clip to max increment if necessary */
4025                     if ( ( (cache_ptr->resize_ctl).apply_max_increment ) &&
4026                          ( (cache_ptr->max_cache_size +
4027                             (cache_ptr->resize_ctl).max_increment) <
4028                            new_max_cache_size ) ) {
4029 
4030                         new_max_cache_size = cache_ptr->max_cache_size +
4031                                          (cache_ptr->resize_ctl).max_increment;
4032                     }
4033 
4034                     status = increase;
4035                 }
4036             }
4037             break;
4038 
4039         default:
4040             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
4041     }
4042 
4043     /* If the decr_mode is either age out or age out with threshold, we
4044      * must run the marker maintenance code, whether we run the size
4045      * reduction code or not.  We do this in two places -- here we
4046      * insert a new marker if the number of active epoch markers is
4047      * is less than the the current epochs before eviction, and after
4048      * the ageout call, we cycle the markers.
4049      *
4050      * However, we can't call the ageout code or cycle the markers
4051      * unless there was a full complement of markers in place on
4052      * entry.  The inserted_epoch_marker flag is used to track this.
4053      */
4054 
4055     if ( ( ( (cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out )
4056            ||
4057            ( (cache_ptr->resize_ctl).decr_mode ==
4058               H5C_decr__age_out_with_threshold
4059            )
4060          )
4061          &&
4062          ( cache_ptr->epoch_markers_active <
4063            (cache_ptr->resize_ctl).epochs_before_eviction
4064          )
4065        ) {
4066 
4067         if(H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0)
4068             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker")
4069 
4070         inserted_epoch_marker = TRUE;
4071     }
4072 
4073     /* don't run the cache size decrease code unless the cache size
4074      * increase code is disabled, or the size increase code sees no need
4075      * for action.  In either case, status == in_spec at this point.
4076      */
4077 
4078     if ( status == in_spec ) {
4079 
4080         switch ( (cache_ptr->resize_ctl).decr_mode )
4081         {
4082             case H5C_decr__off:
4083                 break;
4084 
4085             case H5C_decr__threshold:
4086                 if ( hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold ) {
4087 
4088                     if ( ! cache_ptr->size_decrease_possible ) {
4089 
4090                         status = decrease_disabled;
4091 
4092                     } else if ( cache_ptr->max_cache_size <=
4093                                 (cache_ptr->resize_ctl).min_size ) {
4094 
4095                         HDassert( cache_ptr->max_cache_size ==
4096                                   (cache_ptr->resize_ctl).min_size );
4097                         status = at_min_size;
4098 
4099                     } else {
4100 
4101                         new_max_cache_size = (size_t)
4102                                  (((double)(cache_ptr->max_cache_size)) *
4103                                   (cache_ptr->resize_ctl).decrement);
4104 
4105                         /* clip to min size if necessary */
4106                         if ( new_max_cache_size <
4107                              (cache_ptr->resize_ctl).min_size ) {
4108 
4109                             new_max_cache_size =
4110                                 (cache_ptr->resize_ctl).min_size;
4111                         }
4112 
4113                         /* clip to max decrement if necessary */
4114                         if ( ( (cache_ptr->resize_ctl).apply_max_decrement ) &&
4115                              ( ((cache_ptr->resize_ctl).max_decrement +
4116                                 new_max_cache_size) <
4117                                cache_ptr->max_cache_size ) ) {
4118 
4119                             new_max_cache_size = cache_ptr->max_cache_size -
4120                                          (cache_ptr->resize_ctl).max_decrement;
4121                         }
4122 
4123                         status = decrease;
4124                     }
4125                 }
4126                 break;
4127 
4128             case H5C_decr__age_out_with_threshold:
4129             case H5C_decr__age_out:
4130                 if(!inserted_epoch_marker) {
4131                     if(!cache_ptr->size_decrease_possible)
4132                         status = decrease_disabled;
4133                     else {
4134                         if(H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size, write_permitted) < 0)
4135                             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed")
4136                     } /* end else */
4137                 } /* end if */
4138                 break;
4139 
4140             default:
4141                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
4142         }
4143     }
4144 
4145     /* cycle the epoch markers here if appropriate */
4146     if ( ( ( (cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out )
4147            ||
4148            ( (cache_ptr->resize_ctl).decr_mode ==
4149               H5C_decr__age_out_with_threshold
4150            )
4151          )
4152          &&
4153          ( ! inserted_epoch_marker )
4154        ) {
4155 
4156         /* move last epoch marker to the head of the LRU list */
4157         if(H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0)
4158             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker")
4159     }
4160 
4161     if ( ( status == increase ) || ( status == decrease ) ) {
4162 
4163         old_max_cache_size = cache_ptr->max_cache_size;
4164         old_min_clean_size = cache_ptr->min_clean_size;
4165 
4166         new_min_clean_size = (size_t)
4167                              ((double)new_max_cache_size *
4168                               ((cache_ptr->resize_ctl).min_clean_fraction));
4169 
4170         /* new_min_clean_size is of size_t, and thus must be non-negative.
4171          * Hence we have
4172          *
4173          * 	( 0 <= new_min_clean_size ).
4174          *
4175  	 * by definition.
4176          */
4177         HDassert( new_min_clean_size <= new_max_cache_size );
4178         HDassert( (cache_ptr->resize_ctl).min_size <= new_max_cache_size );
4179         HDassert( new_max_cache_size <= (cache_ptr->resize_ctl).max_size );
4180 
4181         cache_ptr->max_cache_size = new_max_cache_size;
4182         cache_ptr->min_clean_size = new_min_clean_size;
4183 
4184         if ( status == increase ) {
4185 
4186             cache_ptr->cache_full = FALSE;
4187 
4188         } else if ( status == decrease ) {
4189 
4190             cache_ptr->size_decreased = TRUE;
4191         }
4192 
4193 	/* update flash cache size increase fields as appropriate */
4194 	if ( cache_ptr->flash_size_increase_possible ) {
4195 
4196             switch ( (cache_ptr->resize_ctl).flash_incr_mode )
4197             {
4198                 case H5C_flash_incr__off:
4199                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!")
4200                     break;
4201 
4202                 case H5C_flash_incr__add_space:
4203                     cache_ptr->flash_size_increase_threshold =
4204                         (size_t)
4205                         (((double)(cache_ptr->max_cache_size)) *
4206                          ((cache_ptr->resize_ctl).flash_threshold));
4207                      break;
4208 
4209                 default: /* should be unreachable */
4210                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
4211                     break;
4212             }
4213         }
4214     }
4215 
4216     if ( (cache_ptr->resize_ctl).rpt_fcn != NULL ) {
4217         (*((cache_ptr->resize_ctl).rpt_fcn))
4218             (cache_ptr,
4219              H5C__CURR_AUTO_RESIZE_RPT_FCN_VER,
4220              hit_rate,
4221              status,
4222              old_max_cache_size,
4223              new_max_cache_size,
4224              old_min_clean_size,
4225              new_min_clean_size);
4226     }
4227 
4228     if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
4229         /* this should be impossible... */
4230         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
4231 
4232 done:
4233     /* Sanity checks */
4234     HDassert(cache_ptr->resize_in_progress);
4235     if(!reentrant_call)
4236         cache_ptr->resize_in_progress = FALSE;
4237     HDassert((!reentrant_call) || (cache_ptr->resize_in_progress));
4238 
4239     FUNC_LEAVE_NOAPI(ret_value)
4240 } /* H5C__auto_adjust_cache_size() */
4241 
4242 
4243 /*-------------------------------------------------------------------------
4244  *
4245  * Function:    H5C__autoadjust__ageout
4246  *
4247  * Purpose:     Implement the ageout automatic cache size decrement
4248  *		algorithm.  Note that while this code evicts aged out
4249  *		entries, the code does not change the maximum cache size.
4250  *		Instead, the function simply computes the new value (if
4251  *		any change is indicated) and reports this value in
4252  *		*new_max_cache_size_ptr.
4253  *
4254  * Return:      Non-negative on success/Negative on failure or if there was
4255  *              an attempt to flush a protected item.
4256  *
4257  *
4258  * Programmer:  John Mainzer, 11/18/04
4259  *
4260  *-------------------------------------------------------------------------
4261  */
4262 static herr_t
4263 H5C__autoadjust__ageout(H5F_t * f,
4264                         double hit_rate,
4265                         enum H5C_resize_status * status_ptr,
4266                         size_t * new_max_cache_size_ptr,
4267                         hbool_t write_permitted)
4268 {
4269     H5C_t *     cache_ptr = f->shared->cache;
4270     size_t	test_size;
4271     herr_t	ret_value = SUCCEED;      /* Return value */
4272 
4273     FUNC_ENTER_NOAPI_NOINIT
4274 
4275     HDassert( f );
4276     HDassert( cache_ptr );
4277     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
4278     HDassert( ( status_ptr ) && ( *status_ptr == in_spec ) );
4279     HDassert( ( new_max_cache_size_ptr ) && ( *new_max_cache_size_ptr == 0 ) );
4280 
4281     /* remove excess epoch markers if any */
4282     if(cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction)
4283         if(H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
4284             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
4285 
4286     if ( ( (cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out )
4287          ||
4288          ( ( (cache_ptr->resize_ctl).decr_mode ==
4289               H5C_decr__age_out_with_threshold
4290                )
4291            &&
4292            ( hit_rate >= (cache_ptr->resize_ctl).upper_hr_threshold )
4293          )
4294        ) {
4295 
4296         if ( cache_ptr->max_cache_size > (cache_ptr->resize_ctl).min_size ){
4297 
4298             /* evict aged out cache entries if appropriate... */
4299             if(H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0)
4300                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries")
4301 
4302             /* ... and then reduce cache size if appropriate */
4303             if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
4304 
4305                 if ( (cache_ptr->resize_ctl).apply_empty_reserve ) {
4306 
4307                     test_size = (size_t)(((double)cache_ptr->index_size) /
4308                                 (1 - (cache_ptr->resize_ctl).empty_reserve));
4309 
4310                     if ( test_size < cache_ptr->max_cache_size ) {
4311 
4312                         *status_ptr = decrease;
4313                         *new_max_cache_size_ptr = test_size;
4314                     }
4315                 } else {
4316 
4317                     *status_ptr = decrease;
4318                     *new_max_cache_size_ptr = cache_ptr->index_size;
4319                 }
4320 
4321                 if ( *status_ptr == decrease ) {
4322 
4323                     /* clip to min size if necessary */
4324                     if ( *new_max_cache_size_ptr <
4325                          (cache_ptr->resize_ctl).min_size ) {
4326 
4327                         *new_max_cache_size_ptr =
4328                                 (cache_ptr->resize_ctl).min_size;
4329                     }
4330 
4331                     /* clip to max decrement if necessary */
4332                     if ( ( (cache_ptr->resize_ctl).apply_max_decrement ) &&
4333                          ( ((cache_ptr->resize_ctl).max_decrement +
4334                             *new_max_cache_size_ptr) <
4335                            cache_ptr->max_cache_size ) ) {
4336 
4337                         *new_max_cache_size_ptr = cache_ptr->max_cache_size -
4338                                          (cache_ptr->resize_ctl).max_decrement;
4339                     }
4340                 }
4341             }
4342         } else {
4343 
4344             *status_ptr = at_min_size;
4345         }
4346     }
4347 
4348 done:
4349 
4350     FUNC_LEAVE_NOAPI(ret_value)
4351 
4352 } /* H5C__autoadjust__ageout() */
4353 
4354 
4355 /*-------------------------------------------------------------------------
4356  *
4357  * Function:    H5C__autoadjust__ageout__cycle_epoch_marker
4358  *
4359  * Purpose:     Remove the oldest epoch marker from the LRU list,
4360  *		and reinsert it at the head of the LRU list.  Also
4361  *		remove the epoch marker's index from the head of the
4362  *		ring buffer, and re-insert it at the tail of the ring
4363  *		buffer.
4364  *
4365  * Return:      SUCCEED on success/FAIL on failure.
4366  *
4367  * Programmer:  John Mainzer, 11/22/04
4368  *
4369  *-------------------------------------------------------------------------
4370  */
4371 static herr_t
4372 H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
4373 {
4374     herr_t                      ret_value = SUCCEED;      /* Return value */
4375     int i;
4376 
4377     FUNC_ENTER_NOAPI_NOINIT
4378 
4379     HDassert( cache_ptr );
4380     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
4381 
4382     if(cache_ptr->epoch_markers_active <= 0)
4383         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?")
4384 
4385     /* remove the last marker from both the ring buffer and the LRU list */
4386 
4387     i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first];
4388 
4389     cache_ptr->epoch_marker_ringbuf_first =
4390             (cache_ptr->epoch_marker_ringbuf_first + 1) %
4391             (H5C__MAX_EPOCH_MARKERS + 1);
4392 
4393     cache_ptr->epoch_marker_ringbuf_size -= 1;
4394 
4395     if(cache_ptr->epoch_marker_ringbuf_size < 0)
4396         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
4397     if((cache_ptr->epoch_marker_active)[i] != TRUE)
4398         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
4399 
4400     H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
4401                     (cache_ptr)->LRU_head_ptr, \
4402                     (cache_ptr)->LRU_tail_ptr, \
4403                     (cache_ptr)->LRU_list_len, \
4404                     (cache_ptr)->LRU_list_size, \
4405                     (FAIL))
4406 
4407     /* now, re-insert it at the head of the LRU list, and at the tail of
4408      * the ring buffer.
4409      */
4410 
4411     HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
4412     HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
4413     HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
4414 
4415     cache_ptr->epoch_marker_ringbuf_last =
4416         (cache_ptr->epoch_marker_ringbuf_last + 1) %
4417         (H5C__MAX_EPOCH_MARKERS + 1);
4418 
4419     (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
4420 
4421     cache_ptr->epoch_marker_ringbuf_size += 1;
4422 
4423     if(cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS)
4424         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
4425 
4426     H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), \
4427                      (cache_ptr)->LRU_head_ptr, \
4428                      (cache_ptr)->LRU_tail_ptr, \
4429                      (cache_ptr)->LRU_list_len, \
4430                      (cache_ptr)->LRU_list_size, \
4431                      (FAIL))
4432 done:
4433 
4434     FUNC_LEAVE_NOAPI(ret_value)
4435 
4436 } /* H5C__autoadjust__ageout__cycle_epoch_marker() */
4437 
4438 
4439 /*-------------------------------------------------------------------------
4440  *
4441  * Function:    H5C__autoadjust__ageout__evict_aged_out_entries
4442  *
4443  * Purpose:     Evict clean entries in the cache that haven't
4444  *		been accessed for at least
4445  *              (cache_ptr->resize_ctl).epochs_before_eviction epochs,
4446  *      	and flush dirty entries that haven't been accessed for
4447  *		that amount of time.
4448  *
4449  *		Depending on configuration, the function will either
4450  *		flush or evict all such entries, or all such entries it
4451  *		encounters until it has freed the maximum amount of space
4452  *		allowed under the maximum decrement.
4453  *
4454  *		If we are running in parallel mode, writes may not be
4455  *		permitted.  If so, the function simply skips any dirty
4456  *		entries it may encounter.
4457  *
4458  *		The function makes no attempt to maintain the minimum
4459  *		clean size, as there is no guarantee that the cache size
4460  *		will be changed.
4461  *
4462  *		If there is no cache size change, the minimum clean size
4463  *		constraint will be met through a combination of clean
4464  *		entries and free space in the cache.
4465  *
4466  *		If there is a cache size reduction, the minimum clean size
4467  *		will be re-calculated, and will be enforced the next time
4468  *		we have to make space in the cache.
4469  *
4470  *              Observe that this function cannot occasion a read.
4471  *
4472  * Return:      Non-negative on success/Negative on failure.
4473  *
4474  * Programmer:  John Mainzer, 11/22/04
4475  *
4476  *-------------------------------------------------------------------------
4477  */
4478 static herr_t
4479 H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted)
4480 {
4481     H5C_t *		cache_ptr = f->shared->cache;
4482     size_t		eviction_size_limit;
4483     size_t		bytes_evicted = 0;
4484     hbool_t		prev_is_dirty = FALSE;
4485     hbool_t             restart_scan;
4486     H5C_cache_entry_t * entry_ptr;
4487     H5C_cache_entry_t * next_ptr;
4488     H5C_cache_entry_t * prev_ptr;
4489     herr_t              ret_value = SUCCEED;      /* Return value */
4490 
4491     FUNC_ENTER_NOAPI_NOINIT
4492 
4493     HDassert( f );
4494     HDassert( cache_ptr );
4495     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
4496 
4497     /* if there is a limit on the amount that the cache size can be decrease
4498      * in any one round of the cache size reduction algorithm, load that
4499      * limit into eviction_size_limit.  Otherwise, set eviction_size_limit
4500      * to the equivalent of infinity.  The current size of the index will
4501      * do nicely.
4502      */
4503     if ( (cache_ptr->resize_ctl).apply_max_decrement ) {
4504 
4505         eviction_size_limit = (cache_ptr->resize_ctl).max_decrement;
4506 
4507     } else {
4508 
4509         eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */
4510     }
4511 
4512     if ( write_permitted ) {
4513 
4514         restart_scan = FALSE;
4515         entry_ptr = cache_ptr->LRU_tail_ptr;
4516 
4517         while ( ( entry_ptr != NULL ) &&
4518                 ( (entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID ) &&
4519                 ( bytes_evicted < eviction_size_limit ) )
4520         {
4521             hbool_t skipping_entry = FALSE;
4522 
4523             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
4524             HDassert( ! (entry_ptr->is_protected) );
4525             HDassert( ! (entry_ptr->is_read_only) );
4526             HDassert( (entry_ptr->ro_ref_count) == 0 );
4527 
4528 	    next_ptr = entry_ptr->next;
4529             prev_ptr = entry_ptr->prev;
4530 
4531 	    if(prev_ptr != NULL)
4532                 prev_is_dirty = prev_ptr->is_dirty;
4533 
4534             if(entry_ptr->is_dirty ) {
4535                 HDassert(!entry_ptr->prefetched_dirty);
4536 
4537                 /* dirty corked entry is skipped */
4538                 if(entry_ptr->tag_info && entry_ptr->tag_info->corked)
4539                     skipping_entry = TRUE;
4540                 else {
4541                     /* reset entries_removed_counter and
4542                      * last_entry_removed_ptr prior to the call to
4543                      * H5C__flush_single_entry() so that we can spot
4544                      * unexpected removals of entries from the cache,
4545                      * and set the restart_scan flag if proceeding
4546                      * would be likely to cause us to scan an entry
4547                      * that is no longer in the cache.
4548                      */
4549                     cache_ptr->entries_removed_counter = 0;
4550                     cache_ptr->last_entry_removed_ptr  = NULL;
4551 
4552                     if(H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
4553                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
4554 
4555                     if(cache_ptr->entries_removed_counter > 1 || cache_ptr->last_entry_removed_ptr == prev_ptr)
4556                         restart_scan = TRUE;
4557                 } /* end else */
4558             } /* end if */
4559             else if(!entry_ptr->prefetched_dirty) {
4560 
4561                 bytes_evicted += entry_ptr->size;
4562 
4563                 if(H5C__flush_single_entry(f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 )
4564                     HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
4565             } /* end else-if */
4566             else {
4567                 HDassert(!entry_ptr->is_dirty);
4568                 HDassert(entry_ptr->prefetched_dirty);
4569 
4570                 skipping_entry = TRUE;
4571             } /* end else */
4572 
4573             if(prev_ptr != NULL) {
4574                 if(skipping_entry)
4575                     entry_ptr = prev_ptr;
4576 		else if(restart_scan || (prev_ptr->is_dirty != prev_is_dirty)
4577                           || (prev_ptr->next != next_ptr)
4578                           || (prev_ptr->is_protected)
4579                           || (prev_ptr->is_pinned)) {
4580                     /* Something has happened to the LRU -- start over
4581 		     * from the tail.
4582                      */
4583                     restart_scan = FALSE;
4584                     entry_ptr = cache_ptr->LRU_tail_ptr;
4585 
4586 		    H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
4587                 } /* end else-if */
4588                 else
4589                     entry_ptr = prev_ptr;
4590 	    } /* end if */
4591             else
4592 		entry_ptr = NULL;
4593         } /* end while */
4594 
4595         /* for now at least, don't bother to maintain the minimum clean size,
4596          * as the cache should now be less than its maximum size.  Due to
4597          * the vaguries of the cache size reduction algorthim, we may not
4598          * reduce the size of the cache.
4599          *
4600          * If we do, we will calculate a new minimum clean size, which will
4601          * be enforced the next time we try to make space in the cache.
4602          *
4603          * If we don't, no action is necessary, as we have just evicted and/or
4604          * or flushed a bunch of entries and therefore the sum of the clean
4605          * and free space in the cache must be greater than or equal to the
4606          * min clean space requirement (assuming that requirement was met on
4607          * entry).
4608          */
4609 
4610     } /* end if */
4611     else /* ! write_permitted */  {
4612         /* Since we are not allowed to write, all we can do is evict
4613          * any clean entries that we may encounter before we either
4614          * hit the eviction size limit, or encounter the epoch marker.
4615          *
4616          * If we are operating read only, this isn't an issue, as there
4617          * will not be any dirty entries.
4618          *
4619          * If we are operating in R/W mode, all the dirty entries we
4620          * skip will be flushed the next time we attempt to make space
4621          * when writes are permitted.  This may have some local
4622          * performance implications, but it shouldn't cause any net
4623          * slowdown.
4624          */
4625         HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
4626         entry_ptr = cache_ptr->LRU_tail_ptr;
4627         while(entry_ptr != NULL &&
4628                 ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
4629                 (bytes_evicted < eviction_size_limit)) {
4630             HDassert(!(entry_ptr->is_protected));
4631 
4632             prev_ptr = entry_ptr->prev;
4633 
4634             if(!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty))
4635                 if(H5C__flush_single_entry(f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
4636                     HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
4637 
4638             /* just skip the entry if it is dirty, as we can't do
4639              * anything with it now since we can't write.
4640 	     *
4641 	     * Since all entries are clean, serialize() will not be called,
4642 	     * and thus we needn't test to see if the LRU has been changed
4643 	     * out from under us.
4644              */
4645             entry_ptr = prev_ptr;
4646         } /* end while */
4647     } /* end else */
4648 
4649     if(cache_ptr->index_size < cache_ptr->max_cache_size)
4650         cache_ptr->cache_full = FALSE;
4651 
4652 done:
4653     FUNC_LEAVE_NOAPI(ret_value)
4654 } /* H5C__autoadjust__ageout__evict_aged_out_entries() */
4655 
4656 
4657 /*-------------------------------------------------------------------------
4658  *
4659  * Function:    H5C__autoadjust__ageout__insert_new_marker
4660  *
4661  * Purpose:     Find an unused marker cache entry, mark it as used, and
4662  *		insert it at the head of the LRU list.  Also add the
4663  *		marker's index in the epoch_markers array.
4664  *
4665  * Return:      SUCCEED on success/FAIL on failure.
4666  *
4667  * Programmer:  John Mainzer, 11/19/04
4668  *
4669  *-------------------------------------------------------------------------
4670  */
4671 static herr_t
4672 H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr)
4673 {
4674     herr_t                      ret_value = SUCCEED;      /* Return value */
4675     int i;
4676 
4677     FUNC_ENTER_NOAPI_NOINIT
4678 
4679     HDassert( cache_ptr );
4680     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
4681 
4682     if(cache_ptr->epoch_markers_active >= (cache_ptr->resize_ctl).epochs_before_eviction)
4683         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers")
4684 
4685     /* find an unused marker */
4686     i = 0;
4687     while((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS)
4688         i++;
4689 
4690     if(i >= H5C__MAX_EPOCH_MARKERS)
4691         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker")
4692 
4693     HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
4694     HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
4695     HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL );
4696 
4697     (cache_ptr->epoch_marker_active)[i] = TRUE;
4698 
4699     cache_ptr->epoch_marker_ringbuf_last =
4700         (cache_ptr->epoch_marker_ringbuf_last + 1) %
4701         (H5C__MAX_EPOCH_MARKERS + 1);
4702 
4703     (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
4704 
4705     cache_ptr->epoch_marker_ringbuf_size += 1;
4706 
4707     if ( cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS ) {
4708 
4709         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
4710     }
4711 
4712     H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), \
4713                      (cache_ptr)->LRU_head_ptr, \
4714                      (cache_ptr)->LRU_tail_ptr, \
4715                      (cache_ptr)->LRU_list_len, \
4716                      (cache_ptr)->LRU_list_size, \
4717                      (FAIL))
4718 
4719     cache_ptr->epoch_markers_active += 1;
4720 
4721 done:
4722 
4723     FUNC_LEAVE_NOAPI(ret_value)
4724 
4725 } /* H5C__autoadjust__ageout__insert_new_marker() */
4726 
4727 
4728 /*-------------------------------------------------------------------------
4729  *
4730  * Function:    H5C__autoadjust__ageout__remove_all_markers
4731  *
4732  * Purpose:     Remove all epoch markers from the LRU list and mark them
4733  *		as inactive.
4734  *
4735  * Return:      SUCCEED on success/FAIL on failure.
4736  *
4737  * Programmer:  John Mainzer, 11/22/04
4738  *
4739  *-------------------------------------------------------------------------
4740  */
4741 static herr_t
4742 H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr)
4743 {
4744     herr_t                      ret_value = SUCCEED;      /* Return value */
4745     int i;
4746     int ring_buf_index;
4747 
4748     FUNC_ENTER_NOAPI_NOINIT
4749 
4750     HDassert( cache_ptr );
4751     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
4752 
4753     while ( cache_ptr->epoch_markers_active > 0 )
4754     {
4755         /* get the index of the last epoch marker in the LRU list
4756          * and remove it from the ring buffer.
4757          */
4758 
4759         ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
4760         i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
4761 
4762         cache_ptr->epoch_marker_ringbuf_first =
4763             (cache_ptr->epoch_marker_ringbuf_first + 1) %
4764             (H5C__MAX_EPOCH_MARKERS + 1);
4765 
4766         cache_ptr->epoch_marker_ringbuf_size -= 1;
4767 
4768         if(cache_ptr->epoch_marker_ringbuf_size < 0)
4769             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
4770 
4771         if((cache_ptr->epoch_marker_active)[i] != TRUE)
4772             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
4773 
4774         /* remove the epoch marker from the LRU list */
4775         H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
4776                         (cache_ptr)->LRU_head_ptr, \
4777                         (cache_ptr)->LRU_tail_ptr, \
4778                         (cache_ptr)->LRU_list_len, \
4779                         (cache_ptr)->LRU_list_size, \
4780                         (FAIL))
4781 
4782         /* mark the epoch marker as unused. */
4783         (cache_ptr->epoch_marker_active)[i] = FALSE;
4784 
4785         HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
4786         HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
4787         HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL );
4788 
4789         /* decrement the number of active epoch markers */
4790         cache_ptr->epoch_markers_active -= 1;
4791 
4792         HDassert( cache_ptr->epoch_markers_active == \
4793                   cache_ptr->epoch_marker_ringbuf_size );
4794     }
4795 
4796 done:
4797 
4798     FUNC_LEAVE_NOAPI(ret_value)
4799 
4800 } /* H5C__autoadjust__ageout__remove_all_markers() */
4801 
4802 
4803 /*-------------------------------------------------------------------------
4804  *
4805  * Function:    H5C__autoadjust__ageout__remove_excess_markers
4806  *
4807  * Purpose:     Remove epoch markers from the end of the LRU list and
4808  *		mark them as inactive until the number of active markers
4809  *		equals the the current value of
4810  *		(cache_ptr->resize_ctl).epochs_before_eviction.
4811  *
4812  * Return:      SUCCEED on success/FAIL on failure.
4813  *
4814  * Programmer:  John Mainzer, 11/19/04
4815  *
4816  *-------------------------------------------------------------------------
4817  */
4818 static herr_t
4819 H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr)
4820 {
4821     herr_t	ret_value = SUCCEED;      /* Return value */
4822     int		i;
4823     int		ring_buf_index;
4824 
4825     FUNC_ENTER_NOAPI_NOINIT
4826 
4827     HDassert( cache_ptr );
4828     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
4829 
4830     if(cache_ptr->epoch_markers_active <= (cache_ptr->resize_ctl).epochs_before_eviction)
4831         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry")
4832 
4833     while(cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction) {
4834         /* get the index of the last epoch marker in the LRU list
4835          * and remove it from the ring buffer.
4836          */
4837 
4838         ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
4839         i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
4840 
4841         cache_ptr->epoch_marker_ringbuf_first =
4842             (cache_ptr->epoch_marker_ringbuf_first + 1) %
4843             (H5C__MAX_EPOCH_MARKERS + 1);
4844 
4845         cache_ptr->epoch_marker_ringbuf_size -= 1;
4846 
4847         if(cache_ptr->epoch_marker_ringbuf_size < 0)
4848             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
4849         if((cache_ptr->epoch_marker_active)[i] != TRUE)
4850             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
4851 
4852         /* remove the epoch marker from the LRU list */
4853         H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
4854                         (cache_ptr)->LRU_head_ptr, \
4855                         (cache_ptr)->LRU_tail_ptr, \
4856                         (cache_ptr)->LRU_list_len, \
4857                         (cache_ptr)->LRU_list_size, \
4858                         (FAIL))
4859 
4860         /* mark the epoch marker as unused. */
4861         (cache_ptr->epoch_marker_active)[i] = FALSE;
4862 
4863         HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
4864         HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
4865         HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL );
4866 
4867         /* decrement the number of active epoch markers */
4868         cache_ptr->epoch_markers_active -= 1;
4869 
4870         HDassert( cache_ptr->epoch_markers_active == \
4871                   cache_ptr->epoch_marker_ringbuf_size );
4872     }
4873 
4874 done:
4875 
4876     FUNC_LEAVE_NOAPI(ret_value)
4877 
4878 } /* H5C__autoadjust__ageout__remove_excess_markers() */
4879 
4880 
4881 /*-------------------------------------------------------------------------
4882  *
4883  * Function:    H5C__flash_increase_cache_size
4884  *
4885  * Purpose:     If there is not at least new_entry_size - old_entry_size
4886  *              bytes of free space in the cache and the current
4887  *              max_cache_size is less than (cache_ptr->resize_ctl).max_size,
4888  *              perform a flash increase in the cache size and then reset
4889  *              the full cache hit rate statistics, and exit.
4890  *
4891  * Return:      Non-negative on success/Negative on failure.
4892  *
4893  * Programmer:  John Mainzer, 12/31/07
4894  *
4895  *-------------------------------------------------------------------------
4896  */
4897 static herr_t
4898 H5C__flash_increase_cache_size(H5C_t * cache_ptr,
4899                                size_t old_entry_size,
4900                                size_t new_entry_size)
4901 {
4902     size_t                     new_max_cache_size = 0;
4903     size_t                     old_max_cache_size = 0;
4904     size_t                     new_min_clean_size = 0;
4905     size_t                     old_min_clean_size = 0;
4906     size_t                     space_needed;
4907     enum H5C_resize_status     status = flash_increase;  /* may change */
4908     double                     hit_rate;
4909     herr_t                     ret_value = SUCCEED;      /* Return value */
4910 
4911     FUNC_ENTER_NOAPI_NOINIT
4912 
4913     HDassert( cache_ptr );
4914     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
4915     HDassert( cache_ptr->flash_size_increase_possible );
4916     HDassert( new_entry_size > cache_ptr->flash_size_increase_threshold );
4917     HDassert( old_entry_size < new_entry_size );
4918 
4919     if(old_entry_size >= new_entry_size)
4920         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size")
4921 
4922     space_needed = new_entry_size - old_entry_size;
4923 
4924     if ( ( (cache_ptr->index_size + space_needed) >
4925                             cache_ptr->max_cache_size ) &&
4926          ( cache_ptr->max_cache_size < (cache_ptr->resize_ctl).max_size ) ) {
4927 
4928         /* we have work to do */
4929 
4930         switch ( (cache_ptr->resize_ctl).flash_incr_mode )
4931         {
4932             case H5C_flash_incr__off:
4933                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!")
4934                 break;
4935 
4936             case H5C_flash_incr__add_space:
4937                 if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
4938 
4939                     HDassert( (cache_ptr->max_cache_size - cache_ptr->index_size)
4940                                < space_needed );
4941                     space_needed -= cache_ptr->max_cache_size -
4942 			            cache_ptr->index_size;
4943                 }
4944                 space_needed =
4945                     (size_t)(((double)space_needed) *
4946                              (cache_ptr->resize_ctl).flash_multiple);
4947 
4948                 new_max_cache_size = cache_ptr->max_cache_size + space_needed;
4949 
4950                 break;
4951 
4952             default: /* should be unreachable */
4953                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
4954                 break;
4955         }
4956 
4957         if ( new_max_cache_size > (cache_ptr->resize_ctl).max_size ) {
4958 
4959             new_max_cache_size = (cache_ptr->resize_ctl).max_size;
4960         }
4961 
4962         HDassert( new_max_cache_size > cache_ptr->max_cache_size );
4963 
4964         new_min_clean_size = (size_t)
4965                              ((double)new_max_cache_size *
4966                               ((cache_ptr->resize_ctl).min_clean_fraction));
4967 
4968         HDassert( new_min_clean_size <= new_max_cache_size );
4969 
4970         old_max_cache_size = cache_ptr->max_cache_size;
4971         old_min_clean_size = cache_ptr->min_clean_size;
4972 
4973         cache_ptr->max_cache_size = new_max_cache_size;
4974         cache_ptr->min_clean_size = new_min_clean_size;
4975 
4976         /* update flash cache size increase fields as appropriate */
4977         HDassert ( cache_ptr->flash_size_increase_possible );
4978 
4979         switch ( (cache_ptr->resize_ctl).flash_incr_mode )
4980         {
4981             case H5C_flash_incr__off:
4982                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!")
4983                 break;
4984 
4985             case H5C_flash_incr__add_space:
4986                 cache_ptr->flash_size_increase_threshold =
4987                     (size_t)
4988                     (((double)(cache_ptr->max_cache_size)) *
4989                      ((cache_ptr->resize_ctl).flash_threshold));
4990                 break;
4991 
4992             default: /* should be unreachable */
4993                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
4994                 break;
4995         }
4996 
4997         /* note that we don't cycle the epoch markers.  We can
4998 	 * argue either way as to whether we should, but for now
4999 	 * we don't.
5000 	 */
5001 
5002         if ( (cache_ptr->resize_ctl).rpt_fcn != NULL ) {
5003 
5004             /* get the hit rate for the reporting function.  Should still
5005              * be good as we haven't reset the hit rate statistics.
5006              */
5007             if(H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
5008                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
5009 
5010             (*((cache_ptr->resize_ctl).rpt_fcn))
5011                 (cache_ptr,
5012                  H5C__CURR_AUTO_RESIZE_RPT_FCN_VER,
5013                  hit_rate,
5014                  status,
5015                  old_max_cache_size,
5016                  new_max_cache_size,
5017                  old_min_clean_size,
5018                  new_min_clean_size);
5019         }
5020 
5021         if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
5022             /* this should be impossible... */
5023             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
5024     }
5025 
5026 done:
5027 
5028     FUNC_LEAVE_NOAPI(ret_value)
5029 
5030 } /* H5C__flash_increase_cache_size() */
5031 
5032 
5033 /*-------------------------------------------------------------------------
5034  * Function:    H5C__flush_invalidate_cache
5035  *
5036  * Purpose:	Flush and destroy the entries contained in the target
5037  *		cache.
5038  *
5039  *		If the cache contains protected entries, the function will
5040  *		fail, as protected entries cannot be either flushed or
5041  *		destroyed.  However all unprotected entries should be
5042  *		flushed and destroyed before the function returns failure.
5043  *
5044  *		While pinned entries can usually be flushed, they cannot
5045  *		be destroyed.  However, they should be unpinned when all
5046  *		the entries that reference them have been destroyed (thus
5047  *		reduding the pinned entry's reference count to 0, allowing
5048  *		it to be unpinned).
5049  *
5050  *		If pinned entries are present, the function makes repeated
5051  *		passes through the cache, flushing all dirty entries
5052  *		(including the pinned dirty entries where permitted) and
5053  *		destroying all unpinned entries.  This process is repeated
5054  *		until either the cache is empty, or the number of pinned
5055  *		entries stops decreasing on each pass.
5056  *
5057  * Return:      Non-negative on success/Negative on failure or if there was
5058  *		a request to flush all items and something was protected.
5059  *
5060  * Programmer:  John Mainzer
5061  *		3/24/065
5062  *
5063  *-------------------------------------------------------------------------
5064  */
5065 static herr_t
5066 H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
5067 {
5068     H5C_t *		cache_ptr;
5069     H5C_ring_t		ring;
5070     herr_t		ret_value = SUCCEED;
5071 
5072     FUNC_ENTER_STATIC
5073 
5074     HDassert(f);
5075     HDassert(f->shared);
5076     cache_ptr = f->shared->cache;
5077     HDassert(cache_ptr);
5078     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
5079     HDassert(cache_ptr->slist_ptr);
5080 
5081 #if H5C_DO_SANITY_CHECKS
5082 {
5083     int32_t		i;
5084     uint32_t		index_len = 0;
5085     uint32_t		slist_len = 0;
5086     size_t		index_size = (size_t)0;
5087     size_t		clean_index_size = (size_t)0;
5088     size_t		dirty_index_size = (size_t)0;
5089     size_t		slist_size = (size_t)0;
5090 
5091     HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
5092     HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
5093     HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
5094     HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
5095     HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
5096     HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
5097 
5098     for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
5099         index_len += cache_ptr->index_ring_len[i];
5100         index_size += cache_ptr->index_ring_size[i];
5101         clean_index_size += cache_ptr->clean_index_ring_size[i];
5102         dirty_index_size += cache_ptr->dirty_index_ring_size[i];
5103 
5104         slist_len += cache_ptr->slist_ring_len[i];
5105         slist_size += cache_ptr->slist_ring_size[i];
5106     } /* end for */
5107 
5108     HDassert(cache_ptr->index_len == index_len);
5109     HDassert(cache_ptr->index_size == index_size);
5110     HDassert(cache_ptr->clean_index_size == clean_index_size);
5111     HDassert(cache_ptr->dirty_index_size == dirty_index_size);
5112     HDassert(cache_ptr->slist_len == slist_len);
5113     HDassert(cache_ptr->slist_size == slist_size);
5114 }
5115 #endif /* H5C_DO_SANITY_CHECKS */
5116 
5117     /* remove ageout markers if present */
5118     if(cache_ptr->epoch_markers_active > 0)
5119         if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
5120             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
5121 
5122     /* flush invalidate each ring, starting from the outermost ring and
5123      * working inward.
5124      */
5125     ring = H5C_RING_USER;
5126     while(ring < H5C_RING_NTYPES) {
5127         if(H5C_flush_invalidate_ring(f, ring, flags) < 0)
5128             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
5129         ring++;
5130     } /* end while */
5131 
5132     /* Invariants, after destroying all entries in the hash table */
5133     if(!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
5134         HDassert(cache_ptr->index_size == 0);
5135         HDassert(cache_ptr->clean_index_size == 0);
5136         HDassert(cache_ptr->pel_len == 0);
5137         HDassert(cache_ptr->pel_size == 0);
5138     } /* end if */
5139     else {
5140         H5C_cache_entry_t *entry_ptr;   /* Cache entry */
5141         unsigned u;                     /* Local index variable */
5142 
5143         /* All rings except ring 4 should be empty now */
5144         /* (Ring 4 has the superblock) */
5145         for(u = H5C_RING_USER; u < H5C_RING_SB; u++) {
5146             HDassert(cache_ptr->index_ring_len[u] == 0);
5147             HDassert(cache_ptr->index_ring_size[u] == 0);
5148             HDassert(cache_ptr->clean_index_ring_size[u] == 0);
5149         } /* end for */
5150 
5151         /* Check that any remaining pinned entries are in the superblock ring */
5152         entry_ptr = cache_ptr->pel_head_ptr;
5153         while(entry_ptr) {
5154             /* Check ring */
5155             HDassert(entry_ptr->ring == H5C_RING_SB);
5156 
5157             /* Advance to next entry in pinned entry list */
5158             entry_ptr = entry_ptr->next;
5159         } /* end while */
5160     } /* end else */
5161     HDassert(cache_ptr->dirty_index_size == 0);
5162     HDassert(cache_ptr->slist_len == 0);
5163     HDassert(cache_ptr->slist_size == 0);
5164     HDassert(cache_ptr->pl_len == 0);
5165     HDassert(cache_ptr->pl_size == 0);
5166     HDassert(cache_ptr->LRU_list_len == 0);
5167     HDassert(cache_ptr->LRU_list_size == 0);
5168 
5169 done:
5170     FUNC_LEAVE_NOAPI(ret_value)
5171 } /* H5C__flush_invalidate_cache() */
5172 
5173 
5174 /*-------------------------------------------------------------------------
5175  * Function:    H5C_flush_invalidate_ring
5176  *
5177  * Purpose:	Flush and destroy the entries contained in the target
5178  *		cache and ring.
5179  *
5180  *		If the ring contains protected entries, the function will
5181  *		fail, as protected entries cannot be either flushed or
5182  *		destroyed.  However all unprotected entries should be
5183  *		flushed and destroyed before the function returns failure.
5184  *
5185  *		While pinned entries can usually be flushed, they cannot
5186  *		be destroyed.  However, they should be unpinned when all
5187  *		the entries that reference them have been destroyed (thus
5188  *		reduding the pinned entry's reference count to 0, allowing
5189  *		it to be unpinned).
5190  *
5191  *		If pinned entries are present, the function makes repeated
5192  *		passes through the cache, flushing all dirty entries
5193  *		(including the pinned dirty entries where permitted) and
5194  *		destroying all unpinned entries.  This process is repeated
5195  *		until either the cache is empty, or the number of pinned
5196  *		entries stops decreasing on each pass.
5197  *
5198  *		If flush dependencies appear in the target ring, the
5199  *		function makes repeated passes through the cache flushing
5200  *		entries in flush dependency order.
5201  *
5202  * Return:      Non-negative on success/Negative on failure or if there was
5203  *		a request to flush all items and something was protected.
5204  *
5205  * Programmer:  John Mainzer
5206  *		9/1/15
5207  *
5208  *-------------------------------------------------------------------------
5209  */
5210 static herr_t
5211 H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
5212 {
5213     H5C_t              *cache_ptr;
5214     hbool_t             restart_slist_scan;
5215     uint32_t            protected_entries = 0;
5216     int32_t             i;
5217     int32_t             cur_ring_pel_len;
5218     int32_t             old_ring_pel_len;
5219     unsigned            cooked_flags;
5220     unsigned            evict_flags;
5221     H5SL_node_t        *node_ptr = NULL;
5222     H5C_cache_entry_t  *entry_ptr = NULL;
5223     H5C_cache_entry_t  *next_entry_ptr = NULL;
5224 #if H5C_DO_SANITY_CHECKS
5225     uint32_t            initial_slist_len = 0;
5226     size_t              initial_slist_size = 0;
5227 #endif /* H5C_DO_SANITY_CHECKS */
5228     herr_t              ret_value = SUCCEED;
5229 
5230     FUNC_ENTER_NOAPI(FAIL)
5231 
5232     HDassert(f);
5233     HDassert(f->shared);
5234     cache_ptr = f->shared->cache;
5235     HDassert(cache_ptr);
5236     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
5237     HDassert(cache_ptr->slist_ptr);
5238     HDassert(ring > H5C_RING_UNDEFINED);
5239     HDassert(ring < H5C_RING_NTYPES);
5240 
5241     HDassert(cache_ptr->epoch_markers_active == 0);
5242 
5243     /* Filter out the flags that are not relevant to the flush/invalidate.
5244      */
5245     cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
5246     evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
5247 
5248     /* The flush procedure here is a bit strange.
5249      *
5250      * In the outer while loop we make at least one pass through the
5251      * cache, and then repeat until either all the pinned entries in
5252      * the ring unpin themselves, or until the number of pinned entries
5253      * in the ring stops declining.  In this later case, we scream and die.
5254      *
5255      * Since the fractal heap can dirty, resize, and/or move entries
5256      * in is flush callback, it is possible that the cache will still
5257      * contain dirty entries at this point.  If so, we must make more
5258      * passes through the skip list to allow it to empty.
5259      *
5260      * Further, since clean entries can be dirtied, resized, and/or moved
5261      * as the result of a flush call back (either the entries own, or that
5262      * for some other cache entry), we can no longer promise to flush
5263      * the cache entries in increasing address order.
5264      *
5265      * Instead, we just do the best we can -- making a pass through
5266      * the skip list, and then a pass through the "clean" entries, and
5267      * then repeating as needed.  Thus it is quite possible that an
5268      * entry will be evicted from the cache only to be re-loaded later
5269      * in the flush process (From what Quincey tells me, the pin
5270      * mechanism makes this impossible, but even it it is true now,
5271      * we shouldn't count on it in the future.)
5272      *
5273      * The bottom line is that entries will probably be flushed in close
5274      * to increasing address order, but there are no guarantees.
5275      */
5276 
5277     /* compute the number of pinned entries in this ring */
5278     entry_ptr = cache_ptr->pel_head_ptr;
5279     cur_ring_pel_len = 0;
5280     while(entry_ptr != NULL) {
5281         HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5282         HDassert(entry_ptr->ring >= ring);
5283         if(entry_ptr->ring == ring)
5284             cur_ring_pel_len++;
5285 
5286         entry_ptr = entry_ptr->next;
5287     } /* end while */
5288 
5289     old_ring_pel_len = cur_ring_pel_len;
5290     while(cache_ptr->index_ring_len[ring] > 0) {
5291         /* first, try to flush-destroy any dirty entries.   Do this by
5292          * making a scan through the slist.  Note that new dirty entries
5293          * may be created by the flush call backs.  Thus it is possible
5294          * that the slist will not be empty after we finish the scan.
5295          */
5296 
5297 #if H5C_DO_SANITY_CHECKS
5298         /* Depending on circumstances, H5C__flush_single_entry() will
5299          * remove dirty entries from the slist as it flushes them.
5300          * Thus for sanity checks we must make note of the initial
5301          * slist length and size before we do any flushes.
5302          */
5303         initial_slist_len = cache_ptr->slist_len;
5304         initial_slist_size = cache_ptr->slist_size;
5305 
5306         /* There is also the possibility that entries will be
5307          * dirtied, resized, moved, and/or removed from the cache
5308          * as the result of calls to the flush callbacks.  We use
5309          * the slist_len_increase and slist_size_increase increase
5310          * fields in struct H5C_t to track these changes for purpose
5311          * of sanity checking.
5312          *
5313          * To this end, we must zero these fields before we start
5314          * the pass through the slist.
5315          */
5316         cache_ptr->slist_len_increase = 0;
5317         cache_ptr->slist_size_increase = 0;
5318 #endif /* H5C_DO_SANITY_CHECKS */
5319 
5320         /* Set the cache_ptr->slist_changed to false.
5321          *
5322          * This flag is set to TRUE by H5C__flush_single_entry if the slist
5323          * is modified by a pre_serialize, serialize, or notify callback.
5324          *
5325          * H5C_flush_invalidate_ring() uses this flag to detect any
5326          * modifications to the slist that might corrupt the scan of
5327          * the slist -- and restart the scan in this event.
5328          */
5329         cache_ptr->slist_changed = FALSE;
5330 
5331         /* this done, start the scan of the slist */
5332         restart_slist_scan = TRUE;
5333         while(restart_slist_scan || (node_ptr != NULL)) {
5334             if(restart_slist_scan) {
5335                 restart_slist_scan = FALSE;
5336 
5337                 /* Start at beginning of skip list */
5338                 node_ptr = H5SL_first(cache_ptr->slist_ptr);
5339                 if(node_ptr == NULL)
5340                     /* the slist is empty -- break out of inner loop */
5341                     break;
5342 
5343                 /* Get cache entry for this node */
5344                 next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
5345                 if(NULL == next_entry_ptr)
5346                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
5347 
5348                 HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5349                 HDassert(next_entry_ptr->is_dirty);
5350                 HDassert(next_entry_ptr->in_slist);
5351                 HDassert(next_entry_ptr->ring >= ring);
5352             } /* end if */
5353 
5354             entry_ptr = next_entry_ptr;
5355 
5356             /* It is possible that entries will be dirtied, resized,
5357              * flushed, or removed from the cache via the take ownership
5358              * flag as the result of pre_serialize or serialized callbacks.
5359              *
5360              * This in turn can corrupt the scan through the slist.
5361              *
5362              * We test for slist modifications in the pre_serialize
5363              * and serialize callbacks, and restart the scan of the
5364              * slist if we find them.  However, best we do some extra
5365              * sanity checking just in case.
5366              */
5367             HDassert(entry_ptr != NULL);
5368             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5369             HDassert(entry_ptr->in_slist);
5370             HDassert(entry_ptr->is_dirty);
5371             HDassert(entry_ptr->ring >= ring);
5372 
5373             /* increment node pointer now, before we delete its target
5374              * from the slist.
5375              */
5376             node_ptr = H5SL_next(node_ptr);
5377             if(node_ptr != NULL) {
5378                 next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
5379                 if(NULL == next_entry_ptr)
5380                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
5381                 HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5382                 HDassert(next_entry_ptr->is_dirty);
5383                 HDassert(next_entry_ptr->in_slist);
5384                 HDassert(next_entry_ptr->ring >= ring);
5385                 HDassert(entry_ptr != next_entry_ptr);
5386             } /* end if */
5387             else
5388                 next_entry_ptr = NULL;
5389 
5390             /* Note that we now remove nodes from the slist as we flush
5391              * the associated entries, instead of leaving them there
5392              * until we are done, and then destroying all nodes in
5393              * the slist.
5394              *
5395              * While this optimization used to be easy, with the possibility
5396              * of new entries being added to the slist in the midst of the
5397              * flush, we must keep the slist in canonical form at all
5398              * times.
5399              */
5400             if(((!entry_ptr->flush_me_last) ||
5401                     ((entry_ptr->flush_me_last) &&
5402                         (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
5403                     (entry_ptr->flush_dep_nchildren == 0) &&
5404                     (entry_ptr->ring == ring)) {
5405                 if(entry_ptr->is_protected) {
5406                     /* we have major problems -- but lets flush
5407                      * everything we can before we flag an error.
5408                      */
5409                     protected_entries++;
5410                 } /* end if */
5411                 else if(entry_ptr->is_pinned) {
5412                     if(H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
5413                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
5414 
5415                     if(cache_ptr->slist_changed) {
5416                         /* The slist has been modified by something
5417                          * other than the simple removal of the
5418                          * of the flushed entry after the flush.
5419                          *
5420                          * This has the potential to corrupt the
5421                          * scan through the slist, so restart it.
5422                          */
5423                         restart_slist_scan = TRUE;
5424                         cache_ptr->slist_changed = FALSE;
5425                         H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
5426                     } /* end if */
5427                 } /* end else-if */
5428                 else {
5429                     if(H5C__flush_single_entry(f, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
5430                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
5431 
5432                     if(cache_ptr->slist_changed) {
5433                         /* The slist has been modified by something
5434                          * other than the simple removal of the
5435                          * of the flushed entry after the flush.
5436                          *
5437                          * This has the potential to corrupt the
5438                          * scan through the slist, so restart it.
5439                          */
5440                         restart_slist_scan = TRUE;
5441                         cache_ptr->slist_changed = FALSE;
5442                         H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
5443                     } /* end if */
5444                 } /* end else */
5445             } /* end if */
5446         } /* end while loop scanning skip list */
5447 
5448 #if H5C_DO_SANITY_CHECKS
5449         /* It is possible that entries were added to the slist during
5450          * the scan, either before or after scan pointer.  The following
5451          * asserts take this into account.
5452          *
5453          * Don't bother with the sanity checks if node_ptr != NULL, as
5454          * in this case we broke out of the loop because it got changed
5455          * out from under us.
5456          */
5457 
5458         if(node_ptr == NULL) {
5459             HDassert(cache_ptr->slist_len == (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
5460             HDassert(cache_ptr->slist_size == (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
5461         } /* end if */
5462 #endif /* H5C_DO_SANITY_CHECKS */
5463 
5464         /* Since we are doing a destroy, we must make a pass through
5465          * the hash table and try to flush - destroy all entries that
5466          * remain.
5467          *
5468          * It used to be that all entries remaining in the cache at
5469          * this point had to be clean, but with the fractal heap mods
5470          * this may not be the case.  If so, we will flush entries out
5471          * in increasing address order.
5472          *
5473          * Writes to disk are possible here.
5474          */
5475 
5476         /* reset the counters so that we can detect insertions, loads,
5477          * and moves caused by the pre_serialize and serialize calls.
5478          */
5479         cache_ptr->entries_loaded_counter         = 0;
5480         cache_ptr->entries_inserted_counter       = 0;
5481         cache_ptr->entries_relocated_counter      = 0;
5482 
5483         next_entry_ptr = cache_ptr->il_head;
5484         while(next_entry_ptr != NULL) {
5485             entry_ptr = next_entry_ptr;
5486             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5487             HDassert(entry_ptr->ring >= ring);
5488 
5489             next_entry_ptr = entry_ptr->il_next;
5490             HDassert((next_entry_ptr == NULL) ||
5491                      (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
5492 
5493             if((!entry_ptr->flush_me_last || (entry_ptr->flush_me_last && cache_ptr->num_last_entries >= cache_ptr->slist_len))
5494                     && entry_ptr->flush_dep_nchildren == 0 && entry_ptr->ring == ring) {
5495                 if(entry_ptr->is_protected) {
5496                     /* we have major problems -- but lets flush and
5497                      * destroy everything we can before we flag an
5498                      * error.
5499                      */
5500                     protected_entries++;
5501                     if(!entry_ptr->in_slist)
5502                         HDassert(!(entry_ptr->is_dirty));
5503                 } /* end if */
5504                 else if(!(entry_ptr->is_pinned)) {
5505                     /* if *entry_ptr is dirty, it is possible
5506                      * that one or more other entries may be
5507                      * either removed from the cache, loaded
5508                      * into the cache, or moved to a new location
5509                      * in the file as a side effect of the flush.
5510                      *
5511                      * It's also possible that removing a clean
5512                      * entry will remove the last child of a proxy
5513                      * entry, allowing it to be removed also and
5514                      * invalidating the next_entry_ptr.
5515                      *
5516                      * If either of these happen, and one of the target
5517                      * or proxy entries happens to be the next entry in
5518                      * the hash bucket, we could either find ourselves
5519                      * either scanning a non-existant entry, scanning
5520                      * through a different bucket, or skipping an entry.
5521                      *
5522                      * Neither of these are good, so restart the
5523                      * the scan at the head of the hash bucket
5524                      * after the flush if we detect that the next_entry_ptr
5525                      * becomes invalid.
5526                      *
5527                      * This is not as inefficient at it might seem,
5528                      * as hash buckets typically have at most two
5529                      * or three entries.
5530                      */
5531                     cache_ptr->entry_watched_for_removal = next_entry_ptr;
5532 
5533                     if(H5C__flush_single_entry(f, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
5534                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
5535 
5536                     /* Restart the index list scan if necessary.  Must
5537                      * do this if the next entry is evicted, and also if
5538                      * one or more entries are inserted, loaded, or moved
5539                      * as these operations can result in part of the scan
5540                      * being skipped -- which can cause a spurious failure
5541                      * if this results in the size of the pinned entry
5542                      * failing to decline during the pass.
5543                      */
5544                     if((NULL != next_entry_ptr && NULL == cache_ptr->entry_watched_for_removal)
5545                             || (cache_ptr->entries_loaded_counter > 0)
5546                             || (cache_ptr->entries_inserted_counter > 0)
5547                             || (cache_ptr->entries_relocated_counter > 0)) {
5548 
5549                         next_entry_ptr = cache_ptr->il_head;
5550 
5551                         cache_ptr->entries_loaded_counter         = 0;
5552                         cache_ptr->entries_inserted_counter       = 0;
5553                         cache_ptr->entries_relocated_counter      = 0;
5554 
5555                         H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
5556                     } /* end if */
5557                     else
5558                        cache_ptr->entry_watched_for_removal = NULL;
5559                 } /* end if */
5560             } /* end if */
5561         } /* end for loop scanning hash table */
5562 
5563         /* We can't do anything if entries are pinned.  The
5564          * hope is that the entries will be unpinned as the
5565          * result of destroys of entries that reference them.
5566          *
5567          * We detect this by noting the change in the number
5568          * of pinned entries from pass to pass.  If it stops
5569          * shrinking before it hits zero, we scream and die.
5570          */
5571         old_ring_pel_len = cur_ring_pel_len;
5572         entry_ptr = cache_ptr->pel_head_ptr;
5573         cur_ring_pel_len = 0;
5574         while(entry_ptr != NULL) {
5575             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5576             HDassert(entry_ptr->ring >= ring);
5577 
5578             if(entry_ptr->ring == ring)
5579                 cur_ring_pel_len++;
5580 
5581             entry_ptr = entry_ptr->next;
5582         } /* end while */
5583 
5584        /* Check if the number of pinned entries in the ring is positive, and
5585         * it is not declining.  Scream and die if so.
5586         */
5587         if(cur_ring_pel_len > 0 && cur_ring_pel_len >= old_ring_pel_len) {
5588             /* Don't error if allowed to have pinned entries remaining */
5589             if(evict_flags)
5590                 HGOTO_DONE(TRUE)
5591 
5592             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
5593         } /* end if */
5594 
5595         HDassert(protected_entries == cache_ptr->pl_len);
5596 
5597         if(protected_entries > 0 && protected_entries == cache_ptr->index_len)
5598             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Only protected entries left in cache, protected_entries = %d", (int)protected_entries)
5599     } /* main while loop */
5600 
5601     /* Invariants, after destroying all entries in the ring */
5602     for(i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
5603         HDassert(cache_ptr->index_ring_len[i] == 0);
5604         HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
5605         HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
5606         HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
5607 
5608         HDassert(cache_ptr->slist_ring_len[i] == 0);
5609         HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
5610     } /* end for */
5611 
5612     HDassert(protected_entries <= cache_ptr->pl_len);
5613 
5614     if(protected_entries > 0)
5615         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
5616     else if(cur_ring_pel_len > 0)
5617         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
5618 
5619 done:
5620     FUNC_LEAVE_NOAPI(ret_value)
5621 } /* H5C_flush_invalidate_ring() */
5622 
5623 
5624 /*-------------------------------------------------------------------------
5625  * Function:    H5C__flush_ring
5626  *
5627  * Purpose:	Flush the entries contained in the specified cache and
5628  *		ring.  All entries in rings outside the specified ring
5629  *		must have been flushed on entry.
5630  *
5631  *		If the cache contains protected entries in the specified
5632  *		ring, the function will fail, as protected entries cannot
5633  *		be flushed.  However all unprotected entries in the target
5634  *		ring should be flushed before the function returns failure.
5635  *
5636  *		If flush dependencies appear in the target ring, the
5637  *		function makes repeated passes through the slist flushing
5638  *		entries in flush dependency order.
5639  *
5640  * Return:      Non-negative on success/Negative on failure or if there was
5641  *		a request to flush all items and something was protected.
5642  *
5643  * Programmer:  John Mainzer
5644  *		9/1/15
5645  *
5646  *-------------------------------------------------------------------------
5647  */
5648 static herr_t
5649 H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
5650 {
5651     H5C_t * cache_ptr = f->shared->cache;
5652     hbool_t		flushed_entries_last_pass;
5653     hbool_t		flush_marked_entries;
5654     hbool_t		ignore_protected;
5655     hbool_t		tried_to_flush_protected_entry = FALSE;
5656     hbool_t		restart_slist_scan;
5657     uint32_t		protected_entries = 0;
5658     H5SL_node_t * 	node_ptr = NULL;
5659     H5C_cache_entry_t *	entry_ptr = NULL;
5660     H5C_cache_entry_t *	next_entry_ptr = NULL;
5661 #if H5C_DO_SANITY_CHECKS
5662     uint32_t		initial_slist_len = 0;
5663     size_t              initial_slist_size = 0;
5664 #endif /* H5C_DO_SANITY_CHECKS */
5665     int                 i;
5666     herr_t		ret_value = SUCCEED;
5667 
5668     FUNC_ENTER_STATIC
5669 
5670     HDassert(cache_ptr);
5671     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
5672     HDassert(cache_ptr->slist_ptr);
5673     HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
5674     HDassert(ring > H5C_RING_UNDEFINED);
5675     HDassert(ring < H5C_RING_NTYPES);
5676 
5677 #if H5C_DO_EXTREME_SANITY_CHECKS
5678     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
5679             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
5680             (H5C_validate_lru_list(cache_ptr) < 0))
5681         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
5682 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
5683 
5684     ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
5685     flush_marked_entries = ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 );
5686 
5687     if(!flush_marked_entries)
5688         for(i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++)
5689 	    HDassert(cache_ptr->slist_ring_len[i] == 0);
5690 
5691     HDassert(cache_ptr->flush_in_progress);
5692 
5693     /* When we are only flushing marked entries, the slist will usually
5694      * still contain entries when we have flushed everything we should.
5695      * Thus we track whether we have flushed any entries in the last
5696      * pass, and terminate if we haven't.
5697      */
5698     flushed_entries_last_pass = TRUE;
5699 
5700     /* Set the cache_ptr->slist_changed to false.
5701      *
5702      * This flag is set to TRUE by H5C__flush_single_entry if the
5703      * slist is modified by a pre_serialize, serialize, or notify callback.
5704      * H5C_flush_cache uses this flag to detect any modifications
5705      * to the slist that might corrupt the scan of the slist -- and
5706      * restart the scan in this event.
5707      */
5708     cache_ptr->slist_changed = FALSE;
5709 
5710     while((cache_ptr->slist_ring_len[ring] > 0) &&
5711 	    (protected_entries == 0)  &&
5712 	    (flushed_entries_last_pass)) {
5713         flushed_entries_last_pass = FALSE;
5714 
5715 #if H5C_DO_SANITY_CHECKS
5716         /* For sanity checking, try to verify that the skip list has
5717          * the expected size and number of entries at the end of each
5718          * internal while loop (see below).
5719          *
5720          * Doing this get a bit tricky, as depending on flags, we may
5721          * or may not flush all the entries in the slist.
5722          *
5723          * To make things more entertaining, with the advent of the
5724          * fractal heap, the entry serialize callback can cause entries
5725          * to be dirtied, resized, and/or moved.  Also, the
5726          * pre_serialize callback can result in an entry being
5727          * removed from the cache via the take ownership flag.
5728          *
5729          * To deal with this, we first make note of the initial
5730          * skip list length and size:
5731          */
5732         initial_slist_len = cache_ptr->slist_len;
5733         initial_slist_size = cache_ptr->slist_size;
5734 
5735         /* As mentioned above, there is the possibility that
5736          * entries will be dirtied, resized, flushed, or removed
5737          * from the cache via the take ownership flag  during
5738          * our pass through the skip list.  To capture the number
5739          * of entries added, and the skip list size delta,
5740          * zero the slist_len_increase and slist_size_increase of
5741          * the cache's instance of H5C_t.  These fields will be
5742          * updated elsewhere to account for slist insertions and/or
5743          * dirty entry size changes.
5744          */
5745         cache_ptr->slist_len_increase = 0;
5746         cache_ptr->slist_size_increase = 0;
5747 
5748         /* at the end of the loop, use these values to compute the
5749          * expected slist length and size and compare this with the
5750          * value recorded in the cache's instance of H5C_t.
5751          */
5752 #endif /* H5C_DO_SANITY_CHECKS */
5753 
5754         restart_slist_scan = TRUE;
5755 
5756         while((restart_slist_scan ) || (node_ptr != NULL)) {
5757             if(restart_slist_scan) {
5758                 restart_slist_scan = FALSE;
5759 
5760                 /* Start at beginning of skip list */
5761                 node_ptr = H5SL_first(cache_ptr->slist_ptr);
5762 
5763                 if(node_ptr == NULL)
5764                     /* the slist is empty -- break out of inner loop */
5765                     break;
5766 
5767                 /* Get cache entry for this node */
5768                 next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
5769 
5770                 if(NULL == next_entry_ptr)
5771                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
5772 
5773                 HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5774                 HDassert(next_entry_ptr->is_dirty);
5775                 HDassert(next_entry_ptr->in_slist);
5776             } /* end if */
5777 
5778             entry_ptr = next_entry_ptr;
5779 
5780             /* With the advent of the fractal heap, the free space
5781              * manager, and the version 3 cache, it is possible
5782              * that the pre-serialize or serialize callback will
5783              * dirty, resize, or take ownership of other entries
5784              * in the cache.
5785              *
5786              * To deal with this, I have inserted code to detect any
5787              * change in the skip list not directly under the control
5788              * of this function.  If such modifications are detected,
5789              * we must re-start the scan of the skip list to avoid
5790              * the possibility that the target of the next_entry_ptr
5791              * may have been flushed or deleted from the cache.
5792              *
5793              * To verify that all such possibilities have been dealt
5794              * with, we do a bit of extra sanity checking on
5795              * entry_ptr.
5796              */
5797             HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5798             HDassert(entry_ptr->in_slist);
5799             HDassert(entry_ptr->is_dirty);
5800             if(!flush_marked_entries || entry_ptr->flush_marker)
5801                 HDassert(entry_ptr->ring >= ring);
5802 
5803             /* Advance node pointer now, before we delete its target
5804              * from the slist.
5805              */
5806             node_ptr = H5SL_next(node_ptr);
5807             if(node_ptr != NULL) {
5808                 next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
5809                 if(NULL == next_entry_ptr)
5810                     HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
5811 
5812                 HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5813                 HDassert(next_entry_ptr->is_dirty);
5814                 HDassert(next_entry_ptr->in_slist);
5815 
5816                 if(!flush_marked_entries || next_entry_ptr->flush_marker)
5817                     HDassert(next_entry_ptr->ring >= ring);
5818 
5819                 HDassert(entry_ptr != next_entry_ptr);
5820             } /* end if */
5821             else
5822                 next_entry_ptr = NULL;
5823 
5824             if((!flush_marked_entries || entry_ptr->flush_marker)
5825                     && (!entry_ptr->flush_me_last ||
5826                         (entry_ptr->flush_me_last
5827                             && (cache_ptr->num_last_entries >= cache_ptr->slist_len
5828                                || (flush_marked_entries && entry_ptr->flush_marker))))
5829                     && (entry_ptr->flush_dep_nchildren == 0
5830                         || entry_ptr->flush_dep_ndirty_children == 0)
5831                     && entry_ptr->ring == ring) {
5832 
5833                 HDassert(entry_ptr->flush_dep_nunser_children == 0);
5834 
5835                 if(entry_ptr->is_protected) {
5836                     /* we probably have major problems -- but lets
5837                      * flush everything we can before we decide
5838                      * whether to flag an error.
5839                      */
5840                     tried_to_flush_protected_entry = TRUE;
5841                     protected_entries++;
5842                 } /* end if */
5843                 else {
5844                     if(H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
5845                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
5846 
5847                     if(cache_ptr->slist_changed) {
5848                         /* The slist has been modified by something
5849                          * other than the simple removal of the
5850                          * of the flushed entry after the flush.
5851                          *
5852                          * This has the potential to corrupt the
5853                          * scan through the slist, so restart it.
5854                          */
5855                         restart_slist_scan = TRUE;
5856                         cache_ptr->slist_changed = FALSE;
5857                         H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
5858                     } /* end if */
5859 
5860                     flushed_entries_last_pass = TRUE;
5861                 } /* end else */
5862             } /* end if */
5863         } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
5864 
5865 #if H5C_DO_SANITY_CHECKS
5866         /* Verify that the slist size and length are as expected. */
5867         HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
5868         HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
5869 #endif /* H5C_DO_SANITY_CHECKS */
5870     } /* while */
5871 
5872     HDassert(protected_entries <= cache_ptr->pl_len);
5873 
5874     if(((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
5875         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
5876 
5877 #if H5C_DO_SANITY_CHECKS
5878     if(!flush_marked_entries) {
5879         HDassert(cache_ptr->slist_ring_len[ring] == 0);
5880         HDassert(cache_ptr->slist_ring_size[ring] == 0);
5881     } /* end if */
5882 #endif /* H5C_DO_SANITY_CHECKS */
5883 
5884 done:
5885     FUNC_LEAVE_NOAPI(ret_value)
5886 } /* H5C__flush_ring() */
5887 
5888 
5889 /*-------------------------------------------------------------------------
5890  *
5891  * Function:    H5C__flush_single_entry
5892  *
5893  * Purpose:     Flush or clear (and evict if requested) the cache entry
5894  *		with the specified address and type.  If the type is NULL,
5895  *		any unprotected entry at the specified address will be
5896  *		flushed (and possibly evicted).
5897  *
5898  *		Attempts to flush a protected entry will result in an
5899  *		error.
5900  *
5901  *		If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
5902  *		be cleared and not flushed, and the call can't be part of a
5903  *              sequence of flushes.
5904  *
5905  *		If the caller knows the address of the skip list node at
5906  *		which the target entry resides, it can avoid a lookup
5907  *		by supplying that address in the tgt_node_ptr parameter.
5908  *		If this parameter is NULL, the function will do a skip list
5909  *		search for the entry instead.
5910  *
5911  *		The function does nothing silently if there is no entry
5912  *		at the supplied address, or if the entry found has the
5913  *		wrong type.
5914  *
5915  * Return:      Non-negative on success/Negative on failure or if there was
5916  *		an attempt to flush a protected item.
5917  *
5918  * Programmer:  John Mainzer, 5/5/04
5919  *
5920  *-------------------------------------------------------------------------
5921  */
5922 herr_t
5923 H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
5924 {
5925     H5C_t *	     	cache_ptr;              /* Cache for file */
5926     hbool_t		destroy;		/* external flag */
5927     hbool_t		clear_only;		/* external flag */
5928     hbool_t		free_file_space;	/* external flag */
5929     hbool_t		take_ownership;		/* external flag */
5930     hbool_t             del_from_slist_on_destroy;    /* external flag */
5931     hbool_t		during_flush;		/* external flag */
5932     hbool_t		write_entry;		/* internal flag */
5933     hbool_t		destroy_entry;		/* internal flag */
5934     hbool_t		generate_image;		/* internal flag */
5935     hbool_t		update_page_buffer;	/* internal flag */
5936     hbool_t		was_dirty;
5937     hbool_t		suppress_image_entry_writes = FALSE;
5938     hbool_t		suppress_image_entry_frees = FALSE;
5939     haddr_t             entry_addr = HADDR_UNDEF;
5940     herr_t		ret_value = SUCCEED;      /* Return value */
5941 
5942     FUNC_ENTER_PACKAGE
5943 
5944     HDassert(f);
5945     cache_ptr = f->shared->cache;
5946     HDassert(cache_ptr);
5947     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
5948     HDassert(entry_ptr);
5949     HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
5950     HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
5951     HDassert(entry_ptr->type);
5952 
5953     /* setup external flags from the flags parameter */
5954     destroy                = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
5955     clear_only             = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
5956     free_file_space        = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
5957     take_ownership         = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
5958     del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
5959     during_flush           = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
5960     generate_image         = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
5961     update_page_buffer     = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
5962 
5963     /* Set the flag for destroying the entry, based on the 'take ownership'
5964      * and 'destroy' flags
5965      */
5966     if(take_ownership)
5967         destroy_entry = FALSE;
5968     else
5969         destroy_entry = destroy;
5970 
5971     /* we will write the entry to disk if it exists, is dirty, and if the
5972      * clear only flag is not set.
5973      */
5974     if(entry_ptr->is_dirty && !clear_only)
5975         write_entry = TRUE;
5976     else
5977         write_entry = FALSE;
5978 
5979     /* if we have received close warning, and we have been instructed to
5980      * generate a metadata cache image, and we have actually constructed
5981      * the entry images, set suppress_image_entry_frees to TRUE.
5982      *
5983      * Set suppress_image_entry_writes to TRUE if indicated by the
5984      * image_ctl flags.
5985      */
5986     if(cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image
5987             && cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries) {
5988         /* Sanity checks */
5989         HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
5990         HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
5991         HDassert((!clear_only) || !(entry_ptr->include_in_image));
5992         HDassert((!take_ownership) || !(entry_ptr->include_in_image));
5993         HDassert((!free_file_space) || !(entry_ptr->include_in_image));
5994 
5995         suppress_image_entry_frees = TRUE;
5996 
5997         if(cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES)
5998             suppress_image_entry_writes = TRUE;
5999     } /* end if */
6000 
6001     /* run initial sanity checks */
6002 #if H5C_DO_SANITY_CHECKS
6003     if(entry_ptr->in_slist) {
6004         HDassert(entry_ptr->is_dirty);
6005 
6006         if((entry_ptr->flush_marker) && (!entry_ptr->is_dirty))
6007             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks")
6008     } /* end if */
6009     else {
6010         HDassert(!entry_ptr->is_dirty);
6011         HDassert(!entry_ptr->flush_marker);
6012 
6013         if((entry_ptr->is_dirty) || (entry_ptr->flush_marker))
6014             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks")
6015     } /* end else */
6016 #endif /* H5C_DO_SANITY_CHECKS */
6017 
6018     if(entry_ptr->is_protected) {
6019         HDassert(!entry_ptr->is_protected);
6020 
6021         /* Attempt to flush a protected entry -- scream and die. */
6022         HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
6023     } /* end if */
6024 
6025     /* Set entry_ptr->flush_in_progress = TRUE and set
6026      * entry_ptr->flush_marker = FALSE
6027      *
6028      * We will set flush_in_progress back to FALSE at the end if the
6029      * entry still exists at that point.
6030      */
6031     entry_ptr->flush_in_progress = TRUE;
6032     entry_ptr->flush_marker = FALSE;
6033 
6034     /* Preserve current dirty state for later */
6035     was_dirty = entry_ptr->is_dirty;
6036 
6037     /* The entry is dirty, and we are doing a flush, a flush destroy or have
6038      * been requested to generate an image.  In those cases, serialize the
6039      * entry.
6040      */
6041     if(write_entry || generate_image) {
6042         HDassert(entry_ptr->is_dirty);
6043 
6044         if(NULL == entry_ptr->image_ptr) {
6045             if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
6046                 HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
6047 #if H5C_DO_MEMORY_SANITY_CHECKS
6048             HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
6049 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
6050         } /* end if */
6051 
6052         if(!(entry_ptr->image_up_to_date)) {
6053             /* Sanity check */
6054             HDassert(!entry_ptr->prefetched);
6055 
6056             /* Generate the entry's image */
6057             if(H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
6058                 HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
6059         } /* end if ( ! (entry_ptr->image_up_to_date) ) */
6060     } /* end if */
6061 
6062     /* Finally, write the image to disk.
6063      *
6064      * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the
6065      * in the entry's type, we silently skip the write.  This
6066      * flag should only be used in test code.
6067      */
6068     if(write_entry) {
6069         HDassert(entry_ptr->is_dirty);
6070 
6071 #if H5C_DO_SANITY_CHECKS
6072         if(cache_ptr->check_write_permitted && !(cache_ptr->write_permitted))
6073             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
6074 #endif /* H5C_DO_SANITY_CHECKS */
6075 
6076         /* Write the image to disk unless the write is suppressed.
6077          *
6078          * This happens if both suppress_image_entry_writes and
6079          * entry_ptr->include_in_image are TRUE, or if the
6080          * H5AC__CLASS_SKIP_WRITES is set in the entry's type.  This
6081          * flag should only be used in test code
6082          */
6083         if((!suppress_image_entry_writes || !entry_ptr->include_in_image)
6084                 && (((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0)) {
6085             H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
6086 
6087 #ifdef H5_HAVE_PARALLEL
6088             if(cache_ptr->coll_write_list) {
6089                 if(H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
6090                     HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
6091             } /* end if */
6092             else
6093             {
6094 #endif /* H5_HAVE_PARALLEL */
6095 
6096                 if(entry_ptr->prefetched) {
6097                     HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
6098                     mem_type = cache_ptr->
6099                                class_table_ptr[entry_ptr->prefetch_type_id]->
6100                                mem_type;
6101                 } /* end if */
6102                 else
6103                     mem_type = entry_ptr->type->mem_type;
6104 
6105                 if(H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0)
6106                     HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
6107 #ifdef H5_HAVE_PARALLEL
6108             }
6109 #endif /* H5_HAVE_PARALLEL */
6110         } /* end if */
6111 
6112         /* if the entry has a notify callback, notify it that we have
6113          * just flushed the entry.
6114          */
6115         if(entry_ptr->type->notify &&
6116                 (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0 )
6117             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush")
6118     } /* if ( write_entry ) */
6119 
6120     /* At this point, all pre-serialize and serialize calls have been
6121      * made if it was appropriate to make them.  Similarly, the entry
6122      * has been written to disk if desired.
6123      *
6124      * Thus it is now safe to update the cache data structures for the
6125      * flush.
6126      */
6127 
6128     /* start by updating the statistics */
6129     if(clear_only) {
6130         /* only log a clear if the entry was dirty */
6131         if(was_dirty) {
6132             H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
6133         } /* end if */
6134     } else if(write_entry) {
6135         HDassert(was_dirty);
6136 
6137         /* only log a flush if we actually wrote to disk */
6138         H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
6139     } /* end else if */
6140 
6141     /* Note that the algorithm below is (very) similar to the set of operations
6142      * in H5C_remove_entry() and should be kept in sync with changes
6143      * to that code. - QAK, 2016/11/30
6144      */
6145 
6146     /* Update the cache internal data structures. */
6147     if(destroy) {
6148         /* Sanity checks */
6149         if(take_ownership)
6150             HDassert(!destroy_entry);
6151         else
6152             HDassert(destroy_entry);
6153         HDassert(!entry_ptr->is_pinned);
6154 
6155         /* Update stats, while entry is still in the cache */
6156         H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
6157 
6158         /* If the entry's type has a 'notify' callback and the entry is about
6159          * to be removed from the cache, send a 'before eviction' notice while
6160          * the entry is still fully integrated in the cache.
6161          */
6162         if(entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0)
6163             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
6164 
6165         /* Update the cache internal data structures as appropriate
6166          * for a destroy.  Specifically:
6167          *
6168          * 1) Delete it from the index
6169          *
6170          * 2) Delete it from the skip list if requested.
6171          *
6172          * 3) Delete it from the collective read access list.
6173          *
6174          * 4) Update the replacement policy for eviction
6175          *
6176          * 5) Remove it from the tag list for this object
6177          *
6178          * Finally, if the destroy_entry flag is set, discard the
6179          * entry.
6180          */
6181         H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
6182 
6183         if(entry_ptr->in_slist && del_from_slist_on_destroy)
6184             H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
6185 
6186 #ifdef H5_HAVE_PARALLEL
6187         /* Check for collective read access flag */
6188         if(entry_ptr->coll_access) {
6189             entry_ptr->coll_access = FALSE;
6190             H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
6191         } /* end if */
6192 #endif /* H5_HAVE_PARALLEL */
6193 
6194         H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
6195 
6196         /* Remove entry from tag list */
6197         if(H5C__untag_entry(cache_ptr, entry_ptr) < 0)
6198             HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
6199 
6200 	/* verify that the entry is no longer part of any flush dependencies */
6201         HDassert(entry_ptr->flush_dep_nparents == 0);
6202 	HDassert(entry_ptr->flush_dep_nchildren == 0);
6203     } /* end if */
6204     else {
6205         HDassert(clear_only || write_entry);
6206         HDassert(entry_ptr->is_dirty);
6207         HDassert(entry_ptr->in_slist);
6208 
6209         /* We are either doing a flush or a clear.
6210          *
6211          * A clear and a flush are the same from the point of
6212          * view of the replacement policy and the slist.
6213          * Hence no differentiation between them.
6214          *
6215          * 					JRM -- 7/7/07
6216          */
6217 
6218         H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
6219 
6220         H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
6221 
6222         /* mark the entry as clean and update the index for
6223          * entry clean.  Also, call the clear callback
6224          * if defined.
6225          */
6226         entry_ptr->is_dirty = FALSE;
6227 
6228         H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr);
6229 
6230         /* Check for entry changing status and do notifications, etc. */
6231         if(was_dirty) {
6232             /* If the entry's type has a 'notify' callback send a 'entry cleaned'
6233              * notice now that the entry is fully integrated into the cache.
6234              */
6235             if(entry_ptr->type->notify &&
6236                     (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
6237                 HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared")
6238 
6239             /* Propagate the clean flag up the flush dependency chain if appropriate */
6240             if(entry_ptr->flush_dep_ndirty_children != 0)
6241                 HDassert(entry_ptr->flush_dep_ndirty_children == 0);
6242             if(entry_ptr->flush_dep_nparents > 0)
6243                 if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
6244                     HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag")
6245         } /* end if */
6246     } /* end else */
6247 
6248     /* reset the flush_in progress flag */
6249     entry_ptr->flush_in_progress = FALSE;
6250 
6251     /* capture the cache entry address for the log_flush call at the
6252        end before the entry_ptr gets freed */
6253     entry_addr = entry_ptr->addr;
6254 
6255     /* Internal cache data structures should now be up to date, and
6256      * consistent with the status of the entry.
6257      *
6258      * Now discard the entry if appropriate.
6259      */
6260     if(destroy) {
6261         /* Sanity check */
6262         HDassert(0 == entry_ptr->flush_dep_nparents);
6263 
6264         /* if both suppress_image_entry_frees and entry_ptr->include_in_image
6265          * are true, simply set entry_ptr->image_ptr to NULL, as we have
6266          * another pointer to the buffer in an instance of H5C_image_entry_t
6267          * in cache_ptr->image_entries.
6268          *
6269          * Otherwise, free the buffer if it exists.
6270          */
6271         if(suppress_image_entry_frees && entry_ptr->include_in_image)
6272             entry_ptr->image_ptr = NULL;
6273         else if(entry_ptr->image_ptr != NULL)
6274             entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
6275 
6276         /* If the entry is not a prefetched entry, verify that the flush
6277          * dependency parents addresses array has been transferred.
6278          *
6279          * If the entry is prefetched, the free_isr routine will dispose of
6280          * the flush dependency parents addresses array if necessary.
6281          */
6282         if(!entry_ptr->prefetched) {
6283             HDassert(0 == entry_ptr->fd_parent_count);
6284             HDassert(NULL == entry_ptr->fd_parent_addrs);
6285         } /* end if */
6286 
6287         /* Check whether we should free the space in the file that
6288          * the entry occupies
6289          */
6290         if(free_file_space) {
6291             hsize_t fsf_size;
6292 
6293             /* Sanity checks */
6294             HDassert(H5F_addr_defined(entry_ptr->addr));
6295             HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr));
6296 #ifndef NDEBUG
6297             {
6298                 size_t curr_len;
6299 
6300                 /* Get the actual image size for the thing again */
6301                 entry_ptr->type->image_len((void *)entry_ptr, &curr_len);
6302                 HDassert(curr_len == entry_ptr->size);
6303             }
6304 #endif /* NDEBUG */
6305 
6306             /* If the file space free size callback is defined, use
6307              * it to get the size of the block of file space to free.
6308              * Otherwise use entry_ptr->size.
6309              */
6310             if(entry_ptr->type->fsf_size) {
6311                 if((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0)
6312                     HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size")
6313             } /* end if */
6314             else    /* no file space free size callback -- use entry size */
6315                 fsf_size = entry_ptr->size;
6316 
6317             /* Release the space on disk */
6318             if(H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0)
6319                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry")
6320         } /* end if ( free_file_space ) */
6321 
6322         /* Reset the pointer to the cache the entry is within. -QAK */
6323         entry_ptr->cache_ptr = NULL;
6324 
6325         /* increment entries_removed_counter and set
6326          * last_entry_removed_ptr.  As we are likely abuut to
6327          * free the entry, recall that last_entry_removed_ptr
6328          * must NEVER be dereferenced.
6329          *
6330          * Recall that these fields are maintained to allow functions
6331          * that perform scans of lists of entries to detect the
6332          * unexpected removal of entries (via expunge, eviction,
6333          * or take ownership at present), so that they can re-start
6334          * their scans if necessary.
6335          *
6336          * Also check if the entry we are watching for removal is being
6337          * removed (usually the 'next' entry for an iteration) and reset
6338          * it to indicate that it was removed.
6339          */
6340         cache_ptr->entries_removed_counter++;
6341         cache_ptr->last_entry_removed_ptr = entry_ptr;
6342         if(entry_ptr == cache_ptr->entry_watched_for_removal)
6343             cache_ptr->entry_watched_for_removal = NULL;
6344 
6345         /* Check for actually destroying the entry in memory */
6346         /* (As opposed to taking ownership of it) */
6347         if(destroy_entry) {
6348             if(entry_ptr->is_dirty) {
6349                 /* Reset dirty flag */
6350                 entry_ptr->is_dirty = FALSE;
6351 
6352                 /* If the entry's type has a 'notify' callback send a 'entry cleaned'
6353                  * notice now that the entry is fully integrated into the cache.
6354                  */
6355                 if(entry_ptr->type->notify &&
6356                         (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
6357                     HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared")
6358             } /* end if */
6359 
6360             /* we are about to discard the in core representation --
6361              * set the magic field to bad magic so we can detect a
6362              * freed entry if we see one.
6363              */
6364             entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
6365 
6366             /* verify that the image has been freed */
6367             HDassert(entry_ptr->image_ptr == NULL);
6368 
6369             if(entry_ptr->type->free_icr((void *)entry_ptr) < 0)
6370                 HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
6371         }  /* end if */
6372         else {
6373             HDassert(take_ownership);
6374 
6375             /* client is taking ownership of the entry.
6376              * set bad magic here too so the cache will choke
6377              * unless the entry is re-inserted properly
6378              */
6379             entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
6380         } /* end else */
6381     } /* if (destroy) */
6382 
6383     /* Check if we have to update the page buffer with cleared entries
6384      * so it doesn't go out of date
6385      */
6386     if(update_page_buffer) {
6387         /* Sanity check */
6388         HDassert(!destroy);
6389         HDassert(entry_ptr->image_ptr);
6390 
6391         if(f->shared->page_buf && f->shared->page_buf->page_size >= entry_ptr->size)
6392             if(H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) > 0)
6393                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache")
6394     } /* end if */
6395 
6396     if(cache_ptr->log_flush)
6397         if((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
6398             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
6399 
6400 done:
6401     HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
6402               ( ! entry_ptr->flush_in_progress ) );
6403     HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
6404               ( take_ownership ) || ( ! entry_ptr->is_dirty ) );
6405 
6406     FUNC_LEAVE_NOAPI(ret_value)
6407 } /* H5C__flush_single_entry() */
6408 
6409 
6410 /*-------------------------------------------------------------------------
6411  *
6412  * Function:    H5C__verify_len_eoa
6413  *
6414  * Purpose:     Verify that 'len' does not exceed eoa when 'actual' is
6415  *              false i.e. 'len" is the initial speculative length from
6416  *              get_load_size callback with null image pointer.
6417  *              If exceed, adjust 'len' accordingly.
6418  *
6419  *              Verify that 'len' should not exceed eoa when 'actual' is
6420  *              true i.e. 'len' is the actual length from get_load_size
6421  *              callback with non-null image pointer.
6422  *              If exceed, return error.
6423  *
6424  * Return:      FAIL if error is detected, SUCCEED otherwise.
6425  *
6426  * Programmer:  Vailin Choi
6427  *              9/6/15
6428  *
6429  *-------------------------------------------------------------------------
6430  */
6431 static herr_t
6432 H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr,
6433     size_t *len, hbool_t actual)
6434 {
6435     H5FD_mem_t cooked_type;             /* Modified type, accounting for switching global heaps */
6436     haddr_t eoa;                	/* End-of-allocation in the file */
6437     herr_t ret_value = SUCCEED;      	/* Return value */
6438 
6439     FUNC_ENTER_STATIC
6440 
6441     /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
6442      * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
6443      * Thus we do the same for purposes of computing the EOA
6444      * for sanity checks.
6445      */
6446     cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
6447 
6448     /* Get the file's end-of-allocation value */
6449     eoa = H5F_get_eoa(f, cooked_type);
6450     if(!H5F_addr_defined(eoa))
6451 	HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file")
6452 
6453     /* Check for bad address in general */
6454     if(H5F_addr_gt(addr, eoa))
6455 	HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation")
6456 
6457     /* Check if the amount of data to read will be past the EOA */
6458     if(H5F_addr_gt((addr + *len), eoa)) {
6459 	if(actual)
6460 	    HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA")
6461 	else
6462 	    /* Trim down the length of the metadata */
6463 	    *len = (size_t)(eoa - addr);
6464     } /* end if */
6465 
6466     if(*len <= 0)
6467 	HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA")
6468 
6469 done:
6470     FUNC_LEAVE_NOAPI(ret_value)
6471 } /* H5C__verify_len_eoa() */
6472 
6473 
6474 /*-------------------------------------------------------------------------
6475  *
6476  * Function:    H5C_load_entry
6477  *
6478  * Purpose:     Attempt to load the entry at the specified disk address
6479  *              and with the specified type into memory.  If successful.
6480  *              return the in memory address of the entry.  Return NULL
6481  *              on failure.
6482  *
6483  *              Note that this function simply loads the entry into
6484  *              core.  It does not insert it into the cache.
6485  *
6486  * Return:      Non-NULL on success / NULL on failure.
6487  *
6488  * Programmer:  John Mainzer, 5/18/04
6489  *
6490  *-------------------------------------------------------------------------
6491  */
6492 static void *
6493 H5C_load_entry(H5F_t *              f,
6494 #ifdef H5_HAVE_PARALLEL
6495                 hbool_t             coll_access,
6496 #endif /* H5_HAVE_PARALLEL */
6497                 const H5C_class_t * type,
6498                 haddr_t             addr,
6499                 void *              udata)
6500 {
6501     hbool_t     dirty = FALSE;          /* Flag indicating whether thing was dirtied during deserialize */
6502     uint8_t *   image = NULL;           /* Buffer for disk image                    */
6503     void *      thing = NULL;           /* Pointer to thing loaded                  */
6504     H5C_cache_entry_t *entry = NULL;    /* Alias for thing loaded, as cache entry   */
6505     size_t      len;                    /* Size of image in file                    */
6506 #ifdef H5_HAVE_PARALLEL
6507     int         mpi_rank = 0;           /* MPI process rank                         */
6508     MPI_Comm    comm = MPI_COMM_NULL;   /* File MPI Communicator                    */
6509     int         mpi_code;               /* MPI error code                           */
6510 #endif /* H5_HAVE_PARALLEL */
6511     void *      ret_value = NULL;       /* Return value                             */
6512 
6513     FUNC_ENTER_NOAPI_NOINIT
6514 
6515     /* Sanity checks */
6516     HDassert(f);
6517     HDassert(f->shared);
6518     HDassert(f->shared->cache);
6519     HDassert(type);
6520     HDassert(H5F_addr_defined(addr));
6521     HDassert(type->get_initial_load_size);
6522     if(type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
6523         HDassert(type->get_final_load_size);
6524     else
6525         HDassert(NULL == type->get_final_load_size);
6526     HDassert(type->deserialize);
6527 
6528     /* Can't see how skip reads could be usefully combined with
6529      * the speculative read flag.  Hence disallow.
6530      */
6531     HDassert(!((type->flags & H5C__CLASS_SKIP_READS) &&
6532                (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
6533 
6534     /* Call the get_initial_load_size callback, to retrieve the initial size of image */
6535     if(type->get_initial_load_size(udata, &len) < 0)
6536         HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size")
6537     HDassert(len > 0);
6538 
6539     /* Check for possible speculative read off the end of the file */
6540     if(type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
6541         if(H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0)
6542             HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA")
6543 
6544     /* Allocate the buffer for reading the on-disk entry image */
6545     if(NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
6546         HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
6547 #if H5C_DO_MEMORY_SANITY_CHECKS
6548     HDmemcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
6549 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
6550 
6551 #ifdef H5_HAVE_PARALLEL
6552     if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
6553         if((mpi_rank = H5F_mpi_get_rank(f)) < 0)
6554             HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
6555         if((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL)
6556             HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
6557     } /* end if */
6558 #endif /* H5_HAVE_PARALLEL */
6559 
6560     /* Get the on-disk entry image */
6561     if(0 == (type->flags & H5C__CLASS_SKIP_READS)) {
6562         unsigned tries, max_tries;      /* The # of read attempts               */
6563         unsigned retries;               /* The # of retries                     */
6564         htri_t chk_ret;                 /* return from verify_chksum callback   */
6565         size_t actual_len = len;        /* The actual length, after speculative reads have been resolved */
6566         uint64_t nanosec = 1;           /* # of nanoseconds to sleep between retries */
6567         void *new_image;                /* Pointer to image                     */
6568         hbool_t len_changed = TRUE;     /* Whether to re-check speculative entries */
6569 
6570         /* Get the # of read attempts */
6571         max_tries = tries = H5F_GET_READ_ATTEMPTS(f);
6572 
6573         /*
6574          * This do/while loop performs the following till the metadata checksum
6575          * is correct or the file's number of allowed read attempts are reached.
6576          *   --read the metadata
6577          *   --determine the actual size of the metadata
6578          *   --perform checksum verification
6579          */
6580         do {
6581             if(actual_len != len) {
6582                 if(NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE)))
6583                     HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
6584                 image = (uint8_t *)new_image;
6585 #if H5C_DO_MEMORY_SANITY_CHECKS
6586                 HDmemcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
6587 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
6588             } /* end if */
6589 
6590 #ifdef H5_HAVE_PARALLEL
6591             if(!coll_access || 0 == mpi_rank) {
6592 #endif /* H5_HAVE_PARALLEL */
6593                 if(H5F_block_read(f, type->mem_type, addr, len, image) < 0)
6594                     HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
6595 #ifdef H5_HAVE_PARALLEL
6596             } /* end if */
6597             /* if the collective metadata read optimization is turned on,
6598              * bcast the metadata read from process 0 to all ranks in the file
6599              * communicator
6600              */
6601             if(coll_access) {
6602                 int buf_size;
6603 
6604                 H5_CHECKED_ASSIGN(buf_size, int, len, size_t);
6605                 if(MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm)))
6606                     HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
6607             } /* end if */
6608 #endif /* H5_HAVE_PARALLEL */
6609 
6610             /* If the entry could be read speculatively and the length is still
6611              *  changing, check for updating the actual size
6612              */
6613             if((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && len_changed) {
6614                 /* Retrieve the actual length */
6615                 actual_len = len;
6616                 if(type->get_final_load_size(image, len, udata, &actual_len) < 0)
6617                     continue;   /* Transfer control to while() and count towards retries */
6618 
6619                 /* Check for the length changing */
6620                 if(actual_len != len) {
6621                     /* Verify that the length isn't past the EOA for the file */
6622                     if(H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0)
6623                         HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA")
6624 
6625                     /* Expand buffer to new size */
6626                     if(NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
6627                         HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
6628                     image = (uint8_t *)new_image;
6629 #if H5C_DO_MEMORY_SANITY_CHECKS
6630                     HDmemcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
6631 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
6632 
6633                     if(actual_len > len) {
6634 #ifdef H5_HAVE_PARALLEL
6635                         if(!coll_access || 0 == mpi_rank) {
6636 #endif /* H5_HAVE_PARALLEL */
6637                             /* If the thing's image needs to be bigger for a speculatively
6638                              * loaded thing, go get the on-disk image again (the extra portion).
6639                              */
6640                             if(H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) < 0)
6641                                 HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
6642 #ifdef H5_HAVE_PARALLEL
6643                         }
6644                         /* If the collective metadata read optimization is turned on,
6645                          * Bcast the metadata read from process 0 to all ranks in the file
6646                          * communicator */
6647                         if(coll_access) {
6648                             int buf_size;
6649 
6650                             H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t);
6651                             if(MPI_SUCCESS != (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm)))
6652                                 HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
6653                         } /* end if */
6654 #endif /* H5_HAVE_PARALLEL */
6655                     } /* end if */
6656                 } /* end if (actual_len != len) */
6657                 else {
6658                     /* The length has stabilized */
6659                     len_changed = FALSE;
6660 
6661                     /* Set the final length */
6662                     len = actual_len;
6663                 } /* else */
6664             } /* end if */
6665 
6666             /* If there's no way to verify the checksum for a piece of metadata
6667              * (usually because there's no checksum in the file), leave now
6668              */
6669             if(type->verify_chksum == NULL)
6670                 break;
6671 
6672             /* Verify the checksum for the metadata image */
6673             if((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0)
6674                 HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback")
6675             if(chk_ret == TRUE)
6676                 break;
6677 
6678             /* Sleep for some time */
6679             H5_nanosleep(nanosec);
6680             nanosec *= 2;               /* Double the sleep time next time */
6681         } while(--tries);
6682 
6683         /* Check for too many tries */
6684         if(tries == 0)
6685             HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "incorrect metadatda checksum after all read attempts")
6686 
6687         /* Calculate and track the # of retries */
6688         retries = max_tries - tries;
6689         if(retries)     /* Does not track 0 retry */
6690             if(H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0)
6691                 HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries)
6692 
6693         /* Set the final length (in case it wasn't set earlier) */
6694         len = actual_len;
6695     } /* end if !H5C__CLASS_SKIP_READS */
6696 
6697     /* Deserialize the on-disk image into the native memory form */
6698     if(NULL == (thing = type->deserialize(image, len, udata, &dirty)))
6699         HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
6700 
6701     entry = (H5C_cache_entry_t *)thing;
6702 
6703     /* In general, an entry should be clean just after it is loaded.
6704      *
6705      * However, when this code is used in the metadata cache, it is
6706      * possible that object headers will be dirty at this point, as
6707      * the deserialize function will alter object headers if necessary to
6708      * fix an old bug.
6709      *
6710      * In the following assert:
6711      *
6712      * 	HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
6713      *
6714      * note that type ids 5 & 6 are associated with object headers in the
6715      * metadata cache.
6716      *
6717      * When we get to using H5C for other purposes, we may wish to
6718      * tighten up the assert so that the loophole only applies to the
6719      * metadata cache.
6720      */
6721 
6722     HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6) );
6723 
6724     entry->magic                        = H5C__H5C_CACHE_ENTRY_T_MAGIC;
6725     entry->cache_ptr                    = f->shared->cache;
6726     entry->addr                         = addr;
6727     entry->size                         = len;
6728     HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
6729     entry->image_ptr                    = image;
6730     entry->image_up_to_date             = !dirty;
6731     entry->type                         = type;
6732     entry->is_dirty	                    = dirty;
6733     entry->dirtied                      = FALSE;
6734     entry->is_protected                 = FALSE;
6735     entry->is_read_only                 = FALSE;
6736     entry->ro_ref_count                 = 0;
6737     entry->is_pinned                    = FALSE;
6738     entry->in_slist                     = FALSE;
6739     entry->flush_marker                 = FALSE;
6740 #ifdef H5_HAVE_PARALLEL
6741     entry->clear_on_unprotect           = FALSE;
6742     entry->flush_immediately            = FALSE;
6743     entry->coll_access                  = coll_access;
6744 #endif /* H5_HAVE_PARALLEL */
6745     entry->flush_in_progress            = FALSE;
6746     entry->destroy_in_progress          = FALSE;
6747 
6748     entry->ring                         = H5C_RING_UNDEFINED;
6749 
6750     /* Initialize flush dependency fields */
6751     entry->flush_dep_parent             = NULL;
6752     entry->flush_dep_nparents           = 0;
6753     entry->flush_dep_parent_nalloc      = 0;
6754     entry->flush_dep_nchildren          = 0;
6755     entry->flush_dep_ndirty_children    = 0;
6756     entry->flush_dep_nunser_children    = 0;
6757     entry->ht_next                      = NULL;
6758     entry->ht_prev                      = NULL;
6759     entry->il_next                      = NULL;
6760     entry->il_prev             	        = NULL;
6761 
6762     entry->next                         = NULL;
6763     entry->prev                         = NULL;
6764 
6765 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
6766     entry->aux_next                     = NULL;
6767     entry->aux_prev                     = NULL;
6768 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
6769 
6770 #ifdef H5_HAVE_PARALLEL
6771     entry->coll_next                    = NULL;
6772     entry->coll_prev                    = NULL;
6773 #endif /* H5_HAVE_PARALLEL */
6774 
6775     /* initialize cache image related fields */
6776     entry->include_in_image             = FALSE;
6777     entry->lru_rank                     = 0;
6778     entry->image_dirty                  = FALSE;
6779     entry->fd_parent_count              = 0;
6780     entry->fd_parent_addrs              = NULL;
6781     entry->fd_child_count               = 0;
6782     entry->fd_dirty_child_count         = 0;
6783     entry->image_fd_height              = 0;
6784     entry->prefetched                   = FALSE;
6785     entry->prefetch_type_id             = 0;
6786     entry->age                          = 0;
6787     entry->prefetched_dirty             = FALSE;
6788 #ifndef NDEBUG  /* debugging field */
6789     entry->serialization_count          = 0;
6790 #endif /* NDEBUG */
6791 
6792     entry->tl_next  = NULL;
6793     entry->tl_prev  = NULL;
6794     entry->tag_info = NULL;
6795 
6796     H5C__RESET_CACHE_ENTRY_STATS(entry);
6797 
6798     ret_value = thing;
6799 
6800 done:
6801     /* Cleanup on error */
6802     if(NULL == ret_value) {
6803         /* Release resources */
6804         if(thing && type->free_icr(thing) < 0)
6805             HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed")
6806         if(image)
6807             image = (uint8_t *)H5MM_xfree(image);
6808     } /* end if */
6809 
6810     FUNC_LEAVE_NOAPI(ret_value)
6811 } /* H5C_load_entry() */
6812 
6813 
6814 /*-------------------------------------------------------------------------
6815  *
6816  * Function:    H5C__make_space_in_cache
6817  *
6818  * Purpose:     Attempt to evict cache entries until the index_size
6819  *		is at least needed_space below max_cache_size.
6820  *
6821  *		In passing, also attempt to bring cLRU_list_size to a
6822  *		value greater than min_clean_size.
6823  *
6824  *		Depending on circumstances, both of these goals may
6825  *		be impossible, as in parallel mode, we must avoid generating
6826  *		a write as part of a read (to avoid deadlock in collective
6827  *		I/O), and in all cases, it is possible (though hopefully
6828  *		highly unlikely) that the protected list may exceed the
6829  *		maximum size of the cache.
6830  *
6831  *		Thus the function simply does its best, returning success
6832  *		unless an error is encountered.
6833  *
6834  *		Observe that this function cannot occasion a read.
6835  *
6836  * Return:      Non-negative on success/Negative on failure.
6837  *
6838  * Programmer:  John Mainzer, 5/14/04
6839  *
6840  *-------------------------------------------------------------------------
6841  */
6842 herr_t
6843 H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t	write_permitted)
6844 {
6845     H5C_t *		cache_ptr = f->shared->cache;
6846 #if H5C_COLLECT_CACHE_STATS
6847     int32_t             clean_entries_skipped = 0;
6848     int32_t             dirty_pf_entries_skipped = 0;
6849     int32_t             total_entries_scanned = 0;
6850 #endif /* H5C_COLLECT_CACHE_STATS */
6851     uint32_t		entries_examined = 0;
6852     uint32_t		initial_list_len;
6853     size_t		empty_space;
6854     hbool_t             reentrant_call = FALSE;
6855     hbool_t		prev_is_dirty = FALSE;
6856     hbool_t             didnt_flush_entry = FALSE;
6857     hbool_t		restart_scan;
6858     H5C_cache_entry_t *	entry_ptr;
6859     H5C_cache_entry_t *	prev_ptr;
6860     H5C_cache_entry_t *	next_ptr;
6861     uint32_t 		num_corked_entries = 0;
6862     herr_t		ret_value = SUCCEED;      /* Return value */
6863 
6864     FUNC_ENTER_PACKAGE
6865 
6866     /* Sanity checks */
6867     HDassert(f);
6868     HDassert(cache_ptr);
6869     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
6870     HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
6871 
6872     /* check to see if cache_ptr->msic_in_progress is TRUE.  If it, this
6873      * is a re-entrant call via a client callback called in the make
6874      * space in cache process.  To avoid an infinite recursion, set
6875      * reentrant_call to TRUE, and goto done.
6876      */
6877     if(cache_ptr->msic_in_progress) {
6878         reentrant_call = TRUE;
6879         HGOTO_DONE(SUCCEED);
6880     } /* end if */
6881 
6882     cache_ptr->msic_in_progress = TRUE;
6883 
6884     if ( write_permitted ) {
6885         restart_scan = FALSE;
6886         initial_list_len = cache_ptr->LRU_list_len;
6887         entry_ptr = cache_ptr->LRU_tail_ptr;
6888 
6889 	if(cache_ptr->index_size >= cache_ptr->max_cache_size)
6890 	   empty_space = 0;
6891 	else
6892 	   empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
6893 
6894         while ( ( ( (cache_ptr->index_size + space_needed)
6895                     >
6896                     cache_ptr->max_cache_size
6897                   )
6898 		  ||
6899 		  (
6900 		    ( empty_space + cache_ptr->clean_index_size )
6901 		    <
6902 		    ( cache_ptr->min_clean_size )
6903                   )
6904 		)
6905                 &&
6906                 ( entries_examined <= (2 * initial_list_len) )
6907                 &&
6908                 ( entry_ptr != NULL )
6909               )
6910         {
6911 	    HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
6912             HDassert( !(entry_ptr->is_protected) );
6913             HDassert( ! (entry_ptr->is_read_only) );
6914             HDassert( (entry_ptr->ro_ref_count) == 0 );
6915 
6916 	    next_ptr = entry_ptr->next;
6917             prev_ptr = entry_ptr->prev;
6918 
6919 	    if(prev_ptr != NULL)
6920 		prev_is_dirty = prev_ptr->is_dirty;
6921 
6922 	    if(entry_ptr->is_dirty &&
6923                     (entry_ptr->tag_info && entry_ptr->tag_info->corked)) {
6924 
6925                 /* Skip "dirty" corked entries.  */
6926 		++num_corked_entries;
6927                 didnt_flush_entry = TRUE;
6928 
6929 	    } else if ( ( (entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID ) &&
6930                         ( ! entry_ptr->flush_in_progress ) &&
6931                         ( ! entry_ptr->prefetched_dirty ) ) {
6932 
6933                 didnt_flush_entry = FALSE;
6934 
6935                 if ( entry_ptr->is_dirty ) {
6936 
6937 #if H5C_COLLECT_CACHE_STATS
6938                     if ( (cache_ptr->index_size + space_needed)
6939                            >
6940                           cache_ptr->max_cache_size ) {
6941 
6942                         cache_ptr->entries_scanned_to_make_space++;
6943                     }
6944 #endif /* H5C_COLLECT_CACHE_STATS */
6945 
6946 		    /* reset entries_removed_counter and
6947                      * last_entry_removed_ptr prior to the call to
6948                      * H5C__flush_single_entry() so that we can spot
6949                      * unexpected removals of entries from the cache,
6950                      * and set the restart_scan flag if proceeding
6951                      * would be likely to cause us to scan an entry
6952                      * that is no longer in the cache.
6953                      */
6954                     cache_ptr->entries_removed_counter = 0;
6955                     cache_ptr->last_entry_removed_ptr  = NULL;
6956 
6957                     if(H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
6958                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
6959 
6960 		    if ( ( cache_ptr->entries_removed_counter > 1 ) ||
6961                          ( cache_ptr->last_entry_removed_ptr == prev_ptr ) )
6962 
6963                         restart_scan = TRUE;
6964 
6965                 } else if ( (cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size
6966 #ifdef H5_HAVE_PARALLEL
6967                             && !(entry_ptr->coll_access)
6968 #endif /* H5_HAVE_PARALLEL */
6969                             ) {
6970 #if H5C_COLLECT_CACHE_STATS
6971                     cache_ptr->entries_scanned_to_make_space++;
6972 #endif /* H5C_COLLECT_CACHE_STATS */
6973 
6974                     if(H5C__flush_single_entry(f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
6975                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
6976                 } else {
6977                     /* We have enough space so don't flush clean entry. */
6978 #if H5C_COLLECT_CACHE_STATS
6979                     clean_entries_skipped++;
6980 #endif /* H5C_COLLECT_CACHE_STATS */
6981                     didnt_flush_entry = TRUE;
6982                 }
6983 
6984 #if H5C_COLLECT_CACHE_STATS
6985                 total_entries_scanned++;
6986 #endif /* H5C_COLLECT_CACHE_STATS */
6987 
6988             } else {
6989 
6990                 /* Skip epoch markers, entries that are in the process
6991                  * of being flushed, and entries marked as prefetched_dirty
6992                  * (occurs in the R/O case only).
6993                  */
6994                 didnt_flush_entry = TRUE;
6995 
6996 #if H5C_COLLECT_CACHE_STATS
6997                 if(entry_ptr->prefetched_dirty)
6998                     dirty_pf_entries_skipped++;
6999 #endif /* H5C_COLLECT_CACHE_STATS */
7000             }
7001 
7002 	    if ( prev_ptr != NULL ) {
7003 
7004 		if ( didnt_flush_entry ) {
7005 
7006 		    /* epoch markers don't get flushed, and we don't touch
7007                      * entries that are in the process of being flushed.
7008                      * Hence no need for sanity checks, as we haven't
7009                      * flushed anything.  Thus just set entry_ptr to prev_ptr
7010                      * and go on.
7011 		     */
7012                     entry_ptr = prev_ptr;
7013 
7014 		} else if ( ( restart_scan )
7015                             ||
7016                             ( prev_ptr->is_dirty != prev_is_dirty )
7017 		            ||
7018 		            ( prev_ptr->next != next_ptr )
7019 		            ||
7020 		            ( prev_ptr->is_protected )
7021 		            ||
7022 		            ( prev_ptr->is_pinned ) ) {
7023 
7024 		    /* something has happened to the LRU -- start over
7025 		     * from the tail.
7026 		     */
7027                     restart_scan = FALSE;
7028 	            entry_ptr = cache_ptr->LRU_tail_ptr;
7029 		    H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
7030 
7031 		} else {
7032 
7033 		    entry_ptr = prev_ptr;
7034 
7035 		}
7036 	    } else {
7037 
7038 		entry_ptr = NULL;
7039 
7040 	    }
7041 
7042 	    entries_examined++;
7043 
7044 	    if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
7045 
7046 	       empty_space = 0;
7047 
7048 	    } else {
7049 
7050 	       empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
7051 
7052 	    }
7053 
7054 	    HDassert( cache_ptr->index_size ==
7055 	              (cache_ptr->clean_index_size +
7056 		       cache_ptr->dirty_index_size) );
7057 
7058 	}
7059 
7060 #if H5C_COLLECT_CACHE_STATS
7061         cache_ptr->calls_to_msic++;
7062 
7063         cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
7064         cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped;
7065         cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
7066 
7067         if ( clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic ) {
7068 
7069             cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
7070         }
7071 
7072         if(dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic)
7073             cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped;
7074 
7075         if ( total_entries_scanned > cache_ptr->max_entries_scanned_in_msic ) {
7076 
7077             cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
7078         }
7079 #endif /* H5C_COLLECT_CACHE_STATS */
7080 
7081 
7082 	/* NEED: work on a better assert for corked entries */
7083 	HDassert( ( entries_examined > (2 * initial_list_len) ) ||
7084 		  ( (cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) >
7085 		    cache_ptr->max_cache_size ) ||
7086 		  ( ( cache_ptr->clean_index_size + empty_space )
7087 		    >= cache_ptr->min_clean_size ) ||
7088 		  ( ( num_corked_entries )));
7089 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
7090 
7091         HDassert( ( entries_examined > (2 * initial_list_len) ) ||
7092 		  ( cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size ) );
7093         HDassert( ( entries_examined > (2 * initial_list_len) ) ||
7094 		  ( cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size ) );
7095 
7096 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
7097 
7098     } else {
7099 
7100         HDassert( H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS );
7101 
7102 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
7103         initial_list_len = cache_ptr->cLRU_list_len;
7104         entry_ptr = cache_ptr->cLRU_tail_ptr;
7105 
7106         while ( ( (cache_ptr->index_size + space_needed)
7107                   >
7108                   cache_ptr->max_cache_size
7109                 )
7110                 &&
7111                 ( entries_examined <= initial_list_len )
7112                 &&
7113                 ( entry_ptr != NULL )
7114               )
7115         {
7116             HDassert( ! (entry_ptr->is_protected) );
7117             HDassert( ! (entry_ptr->is_read_only) );
7118             HDassert( (entry_ptr->ro_ref_count) == 0 );
7119             HDassert( ! (entry_ptr->is_dirty) );
7120 
7121             prev_ptr = entry_ptr->aux_prev;
7122 
7123             if ( ( !(entry_ptr->prefetched_dirty) )
7124 #ifdef H5_HAVE_PARALLEL
7125                  && ( ! (entry_ptr->coll_access) )
7126 #endif /* H5_HAVE_PARALLEL */
7127                ) {
7128                 if(H5C__flush_single_entry(f, entry_ptr,
7129                         H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
7130                     HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
7131 
7132             } /* end if */
7133 
7134 	    /* we are scanning the clean LRU, so the serialize function
7135 	     * will not be called on any entry -- thus there is no
7136 	     * concern about the list being modified out from under
7137 	     * this function.
7138 	     */
7139 
7140             entry_ptr = prev_ptr;
7141 	    entries_examined++;
7142         }
7143 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
7144     }
7145 
7146 done:
7147     /* Sanity checks */
7148     HDassert(cache_ptr->msic_in_progress);
7149     if(!reentrant_call)
7150         cache_ptr->msic_in_progress = FALSE;
7151     HDassert((!reentrant_call) || (cache_ptr->msic_in_progress));
7152 
7153     FUNC_LEAVE_NOAPI(ret_value)
7154 } /* H5C__make_space_in_cache() */
7155 
7156 
7157 /*-------------------------------------------------------------------------
7158  *
7159  * Function:    H5C_validate_lru_list
7160  *
7161  * Purpose:     Debugging function that scans the LRU list for errors.
7162  *
7163  *		If an error is detected, the function generates a
7164  *		diagnostic and returns FAIL.  If no error is detected,
7165  *		the function returns SUCCEED.
7166  *
7167  * Return:      FAIL if error is detected, SUCCEED otherwise.
7168  *
7169  * Programmer:  John Mainzer, 7/14/05
7170  *
7171  * Changes:
7172  *
7173  *		Added code to verify that the LRU contains no pinned
7174  *		entries.                        JRM -- 4/25/14
7175  *
7176  *-------------------------------------------------------------------------
7177  */
7178 #if H5C_DO_EXTREME_SANITY_CHECKS
7179 
7180 static herr_t
7181 H5C_validate_lru_list(H5C_t * cache_ptr)
7182 {
7183     herr_t		ret_value = SUCCEED;      /* Return value */
7184     int32_t             len = 0;
7185     size_t              size = 0;
7186     H5C_cache_entry_t *	entry_ptr = NULL;
7187 
7188     FUNC_ENTER_NOAPI_NOINIT
7189 
7190     HDassert( cache_ptr );
7191     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
7192 
7193     if ( ( ( cache_ptr->LRU_head_ptr == NULL )
7194            ||
7195            ( cache_ptr->LRU_tail_ptr == NULL )
7196          )
7197          &&
7198          ( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
7199        ) {
7200 
7201         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
7202     }
7203 
7204     if(cache_ptr->LRU_list_len < 0)
7205         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
7206 
7207     if ( ( cache_ptr->LRU_list_len == 1 )
7208          &&
7209          ( ( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
7210            ||
7211            ( cache_ptr->LRU_head_ptr == NULL )
7212            ||
7213            ( cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size )
7214          )
7215        ) {
7216 
7217         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
7218     }
7219 
7220     if ( ( cache_ptr->LRU_list_len >= 1 )
7221          &&
7222          ( ( cache_ptr->LRU_head_ptr == NULL )
7223            ||
7224            ( cache_ptr->LRU_head_ptr->prev != NULL )
7225            ||
7226            ( cache_ptr->LRU_tail_ptr == NULL )
7227            ||
7228            ( cache_ptr->LRU_tail_ptr->next != NULL )
7229          )
7230        ) {
7231 
7232         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
7233     }
7234 
7235     entry_ptr = cache_ptr->LRU_head_ptr;
7236     while ( entry_ptr != NULL )
7237     {
7238 
7239         if ( ( entry_ptr != cache_ptr->LRU_head_ptr ) &&
7240              ( ( entry_ptr->prev == NULL ) ||
7241                ( entry_ptr->prev->next != entry_ptr ) ) ) {
7242 
7243             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
7244         }
7245 
7246         if ( ( entry_ptr != cache_ptr->LRU_tail_ptr ) &&
7247              ( ( entry_ptr->next == NULL ) ||
7248                ( entry_ptr->next->prev != entry_ptr ) ) ) {
7249 
7250             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
7251         }
7252 
7253         if ( ( entry_ptr->is_pinned ) ||
7254              ( entry_ptr->pinned_from_client ) ||
7255              ( entry_ptr->pinned_from_cache ) ) {
7256 
7257             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
7258         }
7259 
7260         len++;
7261         size += entry_ptr->size;
7262         entry_ptr = entry_ptr->next;
7263     }
7264 
7265     if ( ( cache_ptr->LRU_list_len != len ) ||
7266          ( cache_ptr->LRU_list_size != size ) ) {
7267 
7268         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
7269     }
7270 
7271 done:
7272 
7273     if ( ret_value != SUCCEED ) {
7274 
7275         HDassert(0);
7276     }
7277 
7278     FUNC_LEAVE_NOAPI(ret_value)
7279 
7280 } /* H5C_validate_lru_list() */
7281 
7282 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
7283 
7284 
7285 /*-------------------------------------------------------------------------
7286  *
7287  * Function:    H5C_validate_pinned_entry_list
7288  *
7289  * Purpose:     Debugging function that scans the pinned entry list for
7290  *              errors.
7291  *
7292  *		If an error is detected, the function generates a
7293  *		diagnostic and returns FAIL.  If no error is detected,
7294  *		the function returns SUCCEED.
7295  *
7296  * Return:      FAIL if error is detected, SUCCEED otherwise.
7297  *
7298  * Programmer:  John Mainzer, 4/25/14
7299  *
7300  * Changes:
7301  *
7302  *		None.
7303  *
7304  *-------------------------------------------------------------------------
7305  */
7306 #if H5C_DO_EXTREME_SANITY_CHECKS
7307 
7308 static herr_t
7309 H5C_validate_pinned_entry_list(H5C_t * cache_ptr)
7310 {
7311     herr_t		ret_value = SUCCEED;      /* Return value */
7312     int32_t             len = 0;
7313     size_t              size = 0;
7314     H5C_cache_entry_t *	entry_ptr = NULL;
7315 
7316     FUNC_ENTER_NOAPI_NOINIT
7317 
7318     HDassert( cache_ptr );
7319     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
7320 
7321     if ( ( ( cache_ptr->pel_head_ptr == NULL )
7322            ||
7323            ( cache_ptr->pel_tail_ptr == NULL )
7324          )
7325          &&
7326          ( cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr )
7327        ) {
7328 
7329         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
7330     }
7331 
7332     if(cache_ptr->pel_len < 0)
7333         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
7334 
7335     if ( ( cache_ptr->pel_len == 1 )
7336          &&
7337          ( ( cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr )
7338            ||
7339            ( cache_ptr->pel_head_ptr == NULL )
7340            ||
7341            ( cache_ptr->pel_head_ptr->size != cache_ptr->pel_size )
7342          )
7343        ) {
7344 
7345         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
7346     }
7347 
7348     if ( ( cache_ptr->pel_len >= 1 )
7349          &&
7350          ( ( cache_ptr->pel_head_ptr == NULL )
7351            ||
7352            ( cache_ptr->pel_head_ptr->prev != NULL )
7353            ||
7354            ( cache_ptr->pel_tail_ptr == NULL )
7355            ||
7356            ( cache_ptr->pel_tail_ptr->next != NULL )
7357          )
7358        ) {
7359 
7360         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
7361     }
7362 
7363     entry_ptr = cache_ptr->pel_head_ptr;
7364     while ( entry_ptr != NULL )
7365     {
7366 
7367         if ( ( entry_ptr != cache_ptr->pel_head_ptr ) &&
7368              ( ( entry_ptr->prev == NULL ) ||
7369                ( entry_ptr->prev->next != entry_ptr ) ) ) {
7370 
7371             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
7372         }
7373 
7374         if ( ( entry_ptr != cache_ptr->pel_tail_ptr ) &&
7375              ( ( entry_ptr->next == NULL ) ||
7376                ( entry_ptr->next->prev != entry_ptr ) ) ) {
7377 
7378             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
7379         }
7380 
7381         if ( ! entry_ptr->is_pinned ) {
7382 
7383             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
7384         }
7385 
7386         if ( ! ( ( entry_ptr->pinned_from_client ) ||
7387                  ( entry_ptr->pinned_from_cache ) ) ) {
7388 
7389             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
7390         }
7391 
7392         len++;
7393         size += entry_ptr->size;
7394         entry_ptr = entry_ptr->next;
7395     }
7396 
7397     if ( ( cache_ptr->pel_len != len ) ||
7398          ( cache_ptr->pel_size != size ) ) {
7399 
7400         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
7401     }
7402 
7403 done:
7404 
7405     if ( ret_value != SUCCEED ) {
7406 
7407         HDassert(0);
7408     }
7409 
7410     FUNC_LEAVE_NOAPI(ret_value)
7411 
7412 } /* H5C_validate_pinned_entry_list() */
7413 
7414 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
7415 
7416 
7417 /*-------------------------------------------------------------------------
7418  *
7419  * Function:    H5C_validate_protected_entry_list
7420  *
7421  * Purpose:     Debugging function that scans the protected entry list for
7422  *              errors.
7423  *
7424  *		If an error is detected, the function generates a
7425  *		diagnostic and returns FAIL.  If no error is detected,
7426  *		the function returns SUCCEED.
7427  *
7428  * Return:      FAIL if error is detected, SUCCEED otherwise.
7429  *
7430  * Programmer:  John Mainzer, 4/25/14
7431  *
7432  * Changes:
7433  *
7434  *		None.
7435  *
7436  *-------------------------------------------------------------------------
7437  */
7438 #if H5C_DO_EXTREME_SANITY_CHECKS
7439 
7440 static herr_t
7441 H5C_validate_protected_entry_list(H5C_t * cache_ptr)
7442 {
7443     herr_t		ret_value = SUCCEED;      /* Return value */
7444     int32_t             len = 0;
7445     size_t              size = 0;
7446     H5C_cache_entry_t *	entry_ptr = NULL;
7447 
7448     FUNC_ENTER_NOAPI_NOINIT
7449 
7450     HDassert( cache_ptr );
7451     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
7452 
7453     if(((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL))
7454              && (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr))
7455         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
7456 
7457     if(cache_ptr->pl_len < 0)
7458         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
7459 
7460     if ( ( cache_ptr->pl_len == 1 )
7461          &&
7462          ( ( cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr )
7463            ||
7464            ( cache_ptr->pl_head_ptr == NULL )
7465            ||
7466            ( cache_ptr->pl_head_ptr->size != cache_ptr->pl_size )
7467          )
7468        ) {
7469 
7470         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
7471     }
7472 
7473     if ( ( cache_ptr->pl_len >= 1 )
7474          &&
7475          ( ( cache_ptr->pl_head_ptr == NULL )
7476            ||
7477            ( cache_ptr->pl_head_ptr->prev != NULL )
7478            ||
7479            ( cache_ptr->pl_tail_ptr == NULL )
7480            ||
7481            ( cache_ptr->pl_tail_ptr->next != NULL )
7482          )
7483        ) {
7484 
7485         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
7486     }
7487 
7488     entry_ptr = cache_ptr->pl_head_ptr;
7489     while ( entry_ptr != NULL )
7490     {
7491 
7492         if ( ( entry_ptr != cache_ptr->pl_head_ptr ) &&
7493              ( ( entry_ptr->prev == NULL ) ||
7494                ( entry_ptr->prev->next != entry_ptr ) ) ) {
7495 
7496             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
7497         }
7498 
7499         if ( ( entry_ptr != cache_ptr->pl_tail_ptr ) &&
7500              ( ( entry_ptr->next == NULL ) ||
7501                ( entry_ptr->next->prev != entry_ptr ) ) ) {
7502 
7503             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
7504         }
7505 
7506         if ( ! entry_ptr->is_protected ) {
7507 
7508             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
7509         }
7510 
7511         if ( ( entry_ptr->is_read_only ) &&
7512              ( entry_ptr->ro_ref_count <= 0 ) ) {
7513 
7514             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
7515         }
7516 
7517         len++;
7518         size += entry_ptr->size;
7519         entry_ptr = entry_ptr->next;
7520     }
7521 
7522     if ( ( cache_ptr->pl_len != len ) ||
7523          ( cache_ptr->pl_size != size ) ) {
7524 
7525         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
7526     }
7527 
7528 done:
7529 
7530     if ( ret_value != SUCCEED ) {
7531 
7532         HDassert(0);
7533     }
7534 
7535     FUNC_LEAVE_NOAPI(ret_value)
7536 
7537 } /* H5C_validate_protected_entry_list() */
7538 
7539 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
7540 
7541 
7542 /*-------------------------------------------------------------------------
7543  *
7544  * Function:    H5C_entry_in_skip_list
7545  *
7546  * Purpose:     Debugging function that scans skip list to see if it
7547  *		is in present.  We need this, as it is possible for
7548  *		an entry to be in the skip list twice.
7549  *
7550  * Return:      FALSE if the entry is not in the skip list, and TRUE
7551  *		if it is.
7552  *
7553  * Programmer:  John Mainzer, 11/1/14
7554  *
7555  * Changes:
7556  *
7557  *		None.
7558  *
7559  *-------------------------------------------------------------------------
7560  */
7561 #if H5C_DO_SLIST_SANITY_CHECKS
7562 
7563 static hbool_t
7564 H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t *target_ptr)
7565 {
7566     hbool_t in_slist              = FALSE;
7567     H5SL_node_t *       node_ptr  = NULL;
7568     H5C_cache_entry_t *	entry_ptr = NULL;
7569 
7570     HDassert( cache_ptr );
7571     HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
7572     HDassert( cache_ptr->slist_ptr );
7573 
7574     node_ptr = H5SL_first(cache_ptr->slist_ptr);
7575 
7576     while ( ( node_ptr != NULL ) && ( ! in_slist ) )
7577     {
7578         entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
7579 
7580 	HDassert( entry_ptr );
7581 	HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
7582         HDassert( entry_ptr->is_dirty );
7583         HDassert( entry_ptr->in_slist );
7584 
7585         if ( entry_ptr == target_ptr ) {
7586 
7587 	    in_slist = TRUE;
7588 
7589 	} else {
7590 
7591 	    node_ptr = H5SL_next(node_ptr);
7592 	}
7593     }
7594 
7595     return(in_slist);
7596 
7597 } /* H5C_entry_in_skip_list() */
7598 #endif /* H5C_DO_SLIST_SANITY_CHECKS */
7599 
7600 
7601 /*-------------------------------------------------------------------------
7602  *
7603  * Function:    H5C__flush_marked_entries
7604  *
7605  * Purpose:     Flushes all marked entries in the cache.
7606  *
7607  * Return:      FAIL if error is detected, SUCCEED otherwise.
7608  *
7609  * Programmer:  Mike McGreevy
7610  *              November 3, 2010
7611  *
7612  *-------------------------------------------------------------------------
7613  */
7614 herr_t
7615 H5C__flush_marked_entries(H5F_t * f)
7616 {
7617     herr_t ret_value = SUCCEED;
7618 
7619     FUNC_ENTER_PACKAGE
7620 
7621     /* Assertions */
7622     HDassert(f != NULL);
7623 
7624     /* Flush all marked entries */
7625     if(H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
7626         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
7627 
7628 done:
7629     FUNC_LEAVE_NOAPI(ret_value)
7630 } /* H5C__flush_marked_entries */
7631 
7632 
7633 /*-------------------------------------------------------------------------
7634  *
7635  * Function:    H5C_cork
7636  *
7637  * Purpose:     To cork/uncork/get cork status of an object depending on "action":
7638  *		H5C__SET_CORK:
7639  *			To cork the object
7640  *			Return error if the object is already corked
7641  *		H5C__UNCORK:
7642  *			To uncork the obejct
7643  *			Return error if the object is not corked
7644  * 		H5C__GET_CORKED:
7645  *			To retrieve the cork status of an object in
7646  *			the parameter "corked"
7647  *
7648  * Return:      Success:        Non-negative
7649  *              Failure:        Negative
7650  *
7651  * Programmer:  Vailin Choi
7652  *		January 2014
7653  *
7654  *-------------------------------------------------------------------------
7655  */
7656 herr_t
7657 H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
7658 {
7659     H5C_tag_info_t *tag_info;	/* Points to a tag info struct */
7660     herr_t              ret_value = SUCCEED;
7661 
7662     FUNC_ENTER_NOAPI_NOINIT
7663 
7664     /* Assertions */
7665     HDassert(cache_ptr != NULL);
7666     HDassert(H5F_addr_defined(obj_addr));
7667     HDassert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED);
7668 
7669     /* Search the list of corked object addresses in the cache */
7670     tag_info = (H5C_tag_info_t *)H5SL_search(cache_ptr->tag_list, &obj_addr);
7671 
7672     if(H5C__GET_CORKED == action) {
7673         HDassert(corked);
7674         if(tag_info != NULL && tag_info->corked)
7675             *corked = TRUE;
7676         else
7677             *corked = FALSE;
7678     } /* end if */
7679     else {
7680         /* Sanity check */
7681         HDassert(H5C__SET_CORK == action || H5C__UNCORK == action);
7682 
7683         /* Perform appropriate action */
7684         if(H5C__SET_CORK == action) {
7685             /* Check if this is the first entry for this tagged object */
7686             if(NULL == tag_info) {
7687                 /* Allocate new tag info struct */
7688                 if(NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t)))
7689                     HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry")
7690 
7691                 /* Set the tag for all entries */
7692                 tag_info->tag = obj_addr;
7693 
7694                 /* Insert tag info into skip list */
7695                 if(H5SL_insert(cache_ptr->tag_list, tag_info, &(tag_info->tag)) < 0 )
7696                     HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert tag info in skip list")
7697             } /* end if */
7698             else {
7699                 /* Check for object already corked */
7700                 if(tag_info->corked)
7701                     HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked")
7702                 HDassert(tag_info->entry_cnt > 0 && tag_info->head);
7703             } /* end else */
7704 
7705             /* Set the corked status for the entire object */
7706             tag_info->corked = TRUE;
7707             cache_ptr->num_objs_corked++;
7708 
7709         } /* end if */
7710         else {
7711             /* Sanity check */
7712             HDassert(tag_info);
7713 
7714             /* Check for already uncorked */
7715             if(!tag_info->corked)
7716                 HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked")
7717 
7718             /* Set the corked status for the entire object */
7719             tag_info->corked = FALSE;
7720             cache_ptr->num_objs_corked--;
7721 
7722             /* Remove the tag info from the tag list, if there's no more entries with this tag */
7723             if(0 == tag_info->entry_cnt) {
7724                 /* Sanity check */
7725                 HDassert(NULL == tag_info->head);
7726 
7727                 if(H5SL_remove(cache_ptr->tag_list, &(tag_info->tag)) != tag_info)
7728                     HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove tag info from list")
7729 
7730                 /* Release the tag info */
7731                 tag_info = H5FL_FREE(H5C_tag_info_t, tag_info);
7732             } /* end if */
7733             else
7734                 HDassert(NULL != tag_info->head);
7735         } /* end else */
7736     } /* end else */
7737 
7738 done:
7739     FUNC_LEAVE_NOAPI(ret_value)
7740 } /* H5C_cork() */
7741 
7742 
7743 /*-------------------------------------------------------------------------
7744  * Function:    H5C__mark_flush_dep_dirty()
7745  *
7746  * Purpose:     Recursively propagate the flush_dep_ndirty_children flag
7747  *              up the dependency chain in response to entry either
7748  *              becoming dirty or having its flush_dep_ndirty_children
7749  *              increased from 0.
7750  *
7751  * Return:      Non-negative on success/Negative on failure
7752  *
7753  * Programmer:  Neil Fortner
7754  *              11/13/12
7755  *
7756  *-------------------------------------------------------------------------
7757  */
7758 static herr_t
7759 H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry)
7760 {
7761     unsigned u;                         /* Local index variable */
7762     herr_t ret_value = SUCCEED;         /* Return value */
7763 
7764     FUNC_ENTER_STATIC
7765 
7766     /* Sanity checks */
7767     HDassert(entry);
7768 
7769     /* Iterate over the parent entries, if any */
7770     for(u = 0; u < entry->flush_dep_nparents; u++) {
7771 	/* Sanity check */
7772 	HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children < entry->flush_dep_parent[u]->flush_dep_nchildren);
7773 
7774 	/* Adjust the parent's number of dirty children */
7775 	entry->flush_dep_parent[u]->flush_dep_ndirty_children++;
7776 
7777         /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
7778         if(entry->flush_dep_parent[u]->type->notify &&
7779                 (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, entry->flush_dep_parent[u]) < 0)
7780             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry dirty flag set")
7781     } /* end for */
7782 
7783 done:
7784     FUNC_LEAVE_NOAPI(ret_value)
7785 } /* H5C__mark_flush_dep_dirty() */
7786 
7787 
7788 /*-------------------------------------------------------------------------
7789  * Function:    H5C__mark_flush_dep_clean()
7790  *
7791  * Purpose:     Recursively propagate the flush_dep_ndirty_children flag
7792  *              up the dependency chain in response to entry either
7793  *              becoming clean or having its flush_dep_ndirty_children
7794  *              reduced to 0.
7795  *
7796  * Return:      Non-negative on success/Negative on failure
7797  *
7798  * Programmer:  Neil Fortner
7799  *              11/13/12
7800  *
7801  *-------------------------------------------------------------------------
7802  */
7803 static herr_t
7804 H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry)
7805 {
7806     int i;                              /* Local index variable */
7807     herr_t ret_value = SUCCEED;         /* Return value */
7808 
7809     FUNC_ENTER_STATIC
7810 
7811     /* Sanity checks */
7812     HDassert(entry);
7813 
7814     /* Iterate over the parent entries, if any */
7815     /* Note reverse iteration order, in case the callback removes the flush
7816      *  dependency - QAK, 2017/08/12
7817      */
7818     for(i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) {
7819 	/* Sanity check */
7820 	HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0);
7821 
7822 	/* Adjust the parent's number of dirty children */
7823 	entry->flush_dep_parent[i]->flush_dep_ndirty_children--;
7824 
7825         /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
7826         if(entry->flush_dep_parent[i]->type->notify &&
7827                 (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, entry->flush_dep_parent[i]) < 0)
7828             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry dirty flag reset")
7829     } /* end for */
7830 
7831 done:
7832     FUNC_LEAVE_NOAPI(ret_value)
7833 } /* H5C__mark_flush_dep_clean() */
7834 
7835 
7836 /*-------------------------------------------------------------------------
7837  * Function:    H5C__mark_flush_dep_serialized()
7838  *
7839  * Purpose:     Decrement the flush_dep_nunser_children fields of all the
7840  *		target entry's flush dependency parents in response to
7841  *		the target entry becoming serialized.
7842  *
7843  * Return:      Non-negative on success/Negative on failure
7844  *
7845  * Programmer:  John Mainzer
7846  *              8/30/16
7847  *
7848  *-------------------------------------------------------------------------
7849  */
7850 herr_t
7851 H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr)
7852 {
7853     int i;                              /* Local index variable */
7854     herr_t ret_value = SUCCEED;         /* Return value */
7855 
7856     FUNC_ENTER_STATIC
7857 
7858     /* Sanity checks */
7859     HDassert(entry_ptr);
7860 
7861     /* Iterate over the parent entries, if any */
7862     /* Note reverse iteration order, in case the callback removes the flush
7863      *  dependency - QAK, 2017/08/12
7864      */
7865     for(i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) {
7866 	/* Sanity checks */
7867         HDassert(entry_ptr->flush_dep_parent);
7868         HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
7869         HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0);
7870 
7871         /* decrement the parents number of unserialized children */
7872         entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--;
7873 
7874         /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
7875         if(entry_ptr->flush_dep_parent[i]->type->notify &&
7876                 (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, entry_ptr->flush_dep_parent[i]) < 0)
7877             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry serialized flag set")
7878     } /* end for */
7879 
7880 done:
7881     FUNC_LEAVE_NOAPI(ret_value)
7882 } /* H5C__mark_flush_dep_serialized() */
7883 
7884 
7885 /*-------------------------------------------------------------------------
7886  * Function:    H5C__mark_flush_dep_unserialized()
7887  *
7888  * Purpose:     Increment the flush_dep_nunser_children fields of all the
7889  *              target entry's flush dependency parents in response to
7890  *              the target entry becoming unserialized.
7891  *
7892  * Return:      Non-negative on success/Negative on failure
7893  *
7894  * Programmer:  John Mainzer
7895  *              8/30/16
7896  *
7897  *-------------------------------------------------------------------------
7898  */
7899 herr_t
7900 H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr)
7901 {
7902     unsigned u;                         /* Local index variable */
7903     herr_t ret_value = SUCCEED;         /* Return value */
7904 
7905     FUNC_ENTER_STATIC
7906 
7907     /* Sanity checks */
7908     HDassert(entry_ptr);
7909 
7910     /* Iterate over the parent entries, if any */
7911     for(u = 0; u < entry_ptr->flush_dep_nparents; u++) {
7912         /* Sanity check */
7913         HDassert(entry_ptr->flush_dep_parent);
7914         HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
7915         HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
7916                  entry_ptr->flush_dep_parent[u]->flush_dep_nchildren);
7917 
7918         /* increment parents number of usserialized children */
7919         entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++;
7920 
7921         /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
7922         if(entry_ptr->flush_dep_parent[u]->type->notify &&
7923                 (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, entry_ptr->flush_dep_parent[u]) < 0)
7924             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry serialized flag reset")
7925     } /* end for */
7926 
7927 done:
7928     FUNC_LEAVE_NOAPI(ret_value)
7929 } /* H5C__mark_flush_dep_unserialized() */
7930 
7931 
7932 #ifndef NDEBUG
7933 /*-------------------------------------------------------------------------
7934  * Function:    H5C__assert_flush_dep_nocycle()
7935  *
7936  * Purpose:     Assert recursively that base_entry is not the same as
7937  *              entry, and perform the same assertion on all of entry's
7938  *              flush dependency parents.  This is used to detect cycles
7939  *              created by flush dependencies.
7940  *
7941  * Return:      void
7942  *
7943  * Programmer:  Neil Fortner
7944  *              12/10/12
7945  *
7946  *-------------------------------------------------------------------------
7947  */
7948 static void
7949 H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
7950     const H5C_cache_entry_t * base_entry)
7951 {
7952     unsigned u;                         /* Local index variable */
7953 
7954     FUNC_ENTER_STATIC_NOERR
7955 
7956     /* Sanity checks */
7957     HDassert(entry);
7958     HDassert(base_entry);
7959 
7960     /* Make sure the entries are not the same */
7961     HDassert(base_entry != entry);
7962 
7963     /* Iterate over entry's parents (if any) */
7964     for(u = 0; u < entry->flush_dep_nparents; u++)
7965 	H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry);
7966 
7967     FUNC_LEAVE_NOAPI_VOID
7968 } /* H5C__assert_flush_dep_nocycle() */
7969 #endif /* NDEBUG */
7970 
7971 
7972 /*-------------------------------------------------------------------------
7973  * Function:    H5C__serialize_cache
7974  *
7975  * Purpose:	Serialize (i.e. construct an on disk image) for all entries
7976  *		in the metadata cache including clean entries.
7977  *
7978  *		Note that flush dependencies and "flush me last" flags
7979  *		must be observed in the serialization process.
7980  *
7981  *		Note also that entries may be loaded, flushed, evicted,
7982  *		expunged, relocated, resized, or removed from the cache
7983  *		during this process, just as these actions may occur during
7984  *		a regular flush.
7985  *
7986  *		However, we are given that the cache will contain no protected
7987  *		entries on entry to this routine (although entries may be
7988  *		briefly protected and then unprotected during the serialize
7989  *		process).
7990  *
7991  *		The objective of this routine is serialize all entries and
7992  *		to force all entries into their actual locations on disk.
7993  *
7994  *		The initial need for this routine is to settle all entries
7995  *		in the cache prior to construction of the metadata cache
7996  *		image so that the size of the cache image can be calculated.
7997  *		However, I gather that other uses for the routine are
7998  *		under consideration.
7999  *
8000  * Return:      Non-negative on success/Negative on failure or if there was
8001  *		a request to flush all items and something was protected.
8002  *
8003  * Programmer:  John Mainzer
8004  *		7/22/15
8005  *
8006  *-------------------------------------------------------------------------
8007  */
8008 herr_t
8009 H5C__serialize_cache(H5F_t *f)
8010 {
8011 #if H5C_DO_SANITY_CHECKS
8012     int                 i;
8013     uint32_t            index_len = 0;
8014     size_t              index_size = (size_t)0;
8015     size_t              clean_index_size = (size_t)0;
8016     size_t              dirty_index_size = (size_t)0;
8017     size_t              slist_size = (size_t)0;
8018     uint32_t            slist_len = 0;
8019 #endif /* H5C_DO_SANITY_CHECKS */
8020     H5C_ring_t          ring;
8021     H5C_t             * cache_ptr;
8022     herr_t              ret_value = SUCCEED;
8023 
8024     FUNC_ENTER_PACKAGE
8025 
8026     /* Sanity checks */
8027     HDassert(f);
8028     HDassert(f->shared);
8029     cache_ptr = f->shared->cache;
8030     HDassert(cache_ptr);
8031     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
8032     HDassert(cache_ptr->slist_ptr);
8033 
8034 #if H5C_DO_SANITY_CHECKS
8035     HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
8036     HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
8037     HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
8038     HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
8039     HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
8040     HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
8041 
8042     for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
8043         index_len += cache_ptr->index_ring_len[i];
8044         index_size += cache_ptr->index_ring_size[i];
8045         clean_index_size += cache_ptr->clean_index_ring_size[i];
8046         dirty_index_size += cache_ptr->dirty_index_ring_size[i];
8047 
8048         slist_len += cache_ptr->slist_ring_len[i];
8049         slist_size += cache_ptr->slist_ring_size[i];
8050     } /* end for */
8051 
8052     HDassert(cache_ptr->index_len == index_len);
8053     HDassert(cache_ptr->index_size == index_size);
8054     HDassert(cache_ptr->clean_index_size == clean_index_size);
8055     HDassert(cache_ptr->dirty_index_size == dirty_index_size);
8056     HDassert(cache_ptr->slist_len == slist_len);
8057     HDassert(cache_ptr->slist_size == slist_size);
8058 #endif /* H5C_DO_SANITY_CHECKS */
8059 
8060 #if H5C_DO_EXTREME_SANITY_CHECKS
8061     if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
8062             (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
8063             (H5C_validate_lru_list(cache_ptr) < 0))
8064         HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
8065 #endif /* H5C_DO_EXTREME_SANITY_CHECKS */
8066 
8067 #ifndef NDEBUG
8068     /* if this is a debug build, set the serialization_count field of
8069      * each entry in the cache to zero before we start the serialization.
8070      * This allows us to detect the case in which any entry is serialized
8071      * more than once (a performance issues), and more importantly, the
8072      * case is which any flush depencency parent is serializes more than
8073      * once (a correctness issue).
8074      */
8075      {
8076         H5C_cache_entry_t * scan_ptr = NULL;
8077 
8078         scan_ptr = cache_ptr->il_head;
8079         while(scan_ptr != NULL) {
8080 	    HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8081 	    scan_ptr->serialization_count = 0;
8082 	    scan_ptr = scan_ptr->il_next;
8083         } /* end while */
8084      } /* end block */
8085 #endif /* NDEBUG */
8086 
8087     /* set cache_ptr->serialization_in_progress to TRUE, and back
8088      * to FALSE at the end of the function.  Must maintain this flag
8089      * to support H5C_get_serialization_in_progress(), which is in
8090      * turn required to support sanity checking in some cache
8091      * clients.
8092      */
8093     HDassert(!cache_ptr->serialization_in_progress);
8094     cache_ptr->serialization_in_progress = TRUE;
8095 
8096     /* Serialize each ring, starting from the outermost ring and
8097      * working inward.
8098      */
8099     ring = H5C_RING_USER;
8100     while(ring < H5C_RING_NTYPES) {
8101 	HDassert(cache_ptr->close_warning_received);
8102         switch(ring) {
8103             case H5C_RING_USER:
8104                 break;
8105 
8106             case H5C_RING_RDFSM:
8107                 /* Settle raw data FSM */
8108                 if(!cache_ptr->rdfsm_settled)
8109                     if(H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0)
8110                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
8111                 break;
8112 
8113             case H5C_RING_MDFSM:
8114                 /* Settle metadata FSM */
8115                 if(!cache_ptr->mdfsm_settled)
8116                     if(H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0)
8117                         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
8118                 break;
8119 
8120             case H5C_RING_SBE:
8121             case H5C_RING_SB:
8122                 break;
8123 
8124             default:
8125                 HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
8126                 break;
8127         } /* end switch */
8128 
8129         if(H5C__serialize_ring(f, ring) < 0)
8130             HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
8131 
8132         ring++;
8133     } /* end while */
8134 
8135 #ifndef NDEBUG
8136     /* Verify that no entry has been serialized more than once.
8137      * FD parents with multiple serializations should have been caught
8138      * elsewhere, so no specific check for them here.
8139      */
8140      {
8141         H5C_cache_entry_t * scan_ptr = NULL;
8142 
8143         scan_ptr = cache_ptr->il_head;
8144         while(scan_ptr != NULL) {
8145 	    HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8146 	    HDassert(scan_ptr->serialization_count <= 1);
8147 
8148 	    scan_ptr = scan_ptr->il_next;
8149         } /* end while */
8150      } /* end block */
8151 #endif /* NDEBUG */
8152 
8153 done:
8154     cache_ptr->serialization_in_progress = FALSE;
8155     FUNC_LEAVE_NOAPI(ret_value)
8156 } /* H5C__serialize_cache() */
8157 
8158 
8159 /*-------------------------------------------------------------------------
8160  * Function:    H5C__serialize_ring
8161  *
8162  * Purpose:     Serialize the entries contained in the specified cache and
8163  *              ring.  All entries in rings outside the specified ring
8164  *              must have been serialized on entry.
8165  *
8166  *              If the cache contains protected entries in the specified
8167  *              ring, the function will fail, as protected entries cannot
8168  *              be serialized.  However all unprotected entries in the
8169  *		target ring should be serialized before the function
8170  *		returns failure.
8171  *
8172  *              If flush dependencies appear in the target ring, the
8173  *              function makes repeated passes through the index list
8174  *		serializing entries in flush dependency order.
8175  *
8176  *		All entries outside the H5C_RING_SBE are marked for
8177  *		inclusion in the cache image.  Entries in H5C_RING_SBE
8178  *		and below are marked for exclusion from the image.
8179  *
8180  * Return:      Non-negative on success/Negative on failure or if there was
8181  *              a request to flush all items and something was protected.
8182  *
8183  * Programmer:  John Mainzer
8184  *              9/11/15
8185  *
8186  *-------------------------------------------------------------------------
8187  */
8188 static herr_t
8189 H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
8190 {
8191     hbool_t		done = FALSE;
8192     H5C_t             * cache_ptr;
8193     H5C_cache_entry_t * entry_ptr;
8194     herr_t              ret_value = SUCCEED;
8195 
8196     FUNC_ENTER_STATIC
8197 
8198     /* Sanity checks */
8199     HDassert(f);
8200     HDassert(f->shared);
8201     cache_ptr = f->shared->cache;
8202     HDassert(cache_ptr);
8203     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
8204     HDassert(ring > H5C_RING_UNDEFINED);
8205     HDassert(ring < H5C_RING_NTYPES);
8206 
8207     HDassert(cache_ptr->serialization_in_progress);
8208 
8209     /* The objective here is to serialize all entries in the cache ring
8210      * in flush dependency order.
8211      *
8212      * The basic algorithm is to scan the cache index list looking for
8213      * unserialized entries that are either not in a flush dependency
8214      * relationship, or which have no unserialized children.  Any such
8215      * entry is serialized and its flush dependency parents (if any) are
8216      * informed -- allowing them to decrement their userialized child counts.
8217      *
8218      * However, this algorithm is complicated by the ability
8219      * of client serialization callbacks to perform operations on
8220      * on the cache which can result in the insertion, deletion,
8221      * relocation, resize, dirty, flush, eviction, or removal (via the
8222      * take ownership flag) of entries.  Changes in the flush dependency
8223      * structure are also possible.
8224      *
8225      * On the other hand, the algorithm is simplified by the fact that
8226      * we are serializing, not flushing.  Thus, as long as all entries
8227      * are serialized correctly, it doesn't matter if we have to go back
8228      * and serialize an entry a second time.
8229      *
8230      * These possible actions result in the following modfications to
8231      * tha basic algorithm:
8232      *
8233      * 1) In the event of an entry expunge, eviction or removal, we must
8234      *    restart the scan as it is possible that the next entry in our
8235      *    scan is no longer in the cache.  Were we to examine this entry,
8236      *    we would be accessing deallocated memory.
8237      *
8238      * 2) A resize, dirty, or insertion of an entry may result in the
8239      *    the increment of a flush dependency parent's dirty and/or
8240      *    unserialized child count.  In the context of serializing the
8241      *    the cache, this is a non-issue, as even if we have already
8242      *    serialized the parent, it will be marked dirty and its image
8243      *    marked out of date if appropriate when the child is serialized.
8244      *
8245      *    However, this is a major issue for a flush, as were this to happen
8246      *    in a flush, it would violate the invariant that the flush dependency
8247      *    feature is intended to enforce.  As the metadata cache has no
8248      *    control over the behavior of cache clients, it has no way of
8249      *    preventing this behaviour.  However, it should detect it if at all
8250      *    possible.
8251      *
8252      *    Do this by maintaining a count of the number of times each entry is
8253      *    serialized during a cache serialization.  If any flush dependency
8254      *    parent is serialized more than once, throw an assertion failure.
8255      *
8256      * 3) An entry relocation will typically change the location of the
8257      *    entry in the index list.  This shouldn't cause problems as we
8258      *    will scan the index list until we make a complete pass without
8259      *    finding anything to serialize -- making relocations of either
8260      *    the current or next entries irrelevant.
8261      *
8262      *    Note that since a relocation may result in our skipping part of
8263      *    the index list, we must always do at least one more pass through
8264      *    the index list after an entry relocation.
8265      *
8266      * 4) Changes in the flush dependency structure are possible on
8267      *    entry insertion, load, expunge, evict, or remove.  Destruction
8268      *    of a flush dependency has no effect, as it can only relax the
8269      *    flush dependencies.  Creation of a flush dependency can create
8270      *    an unserialized child of a flush dependency parent where all
8271      *    flush dependency children were previously serialized.  Should
8272      *    this child dirty the flush dependency parent when it is serialized,
8273      *    the parent will be re-serialized.
8274      *
8275      *    Per the discussion of 2) above, this is a non issue for cache
8276      *    serialization, and a major problem for cache flush.  Using the
8277      *    same detection mechanism, throw an assertion failure if this
8278      *    condition appears.
8279      *
8280      * Observe that either eviction or removal of entries as a result of
8281      * a serialization is not a problem as long as the flush depencency
8282      * tree does not change beyond the removal of a leaf.
8283      */
8284     while(!done) {
8285         /* Reset the counters so that we can detect insertions, loads,
8286          * moves, and flush dependency height changes caused by the pre_serialize
8287          * and serialize callbacks.
8288          */
8289         cache_ptr->entries_loaded_counter         = 0;
8290         cache_ptr->entries_inserted_counter	  = 0;
8291         cache_ptr->entries_relocated_counter      = 0;
8292 
8293 	done = TRUE; /* set to FALSE if any activity in inner loop */
8294 	entry_ptr = cache_ptr->il_head;
8295 	while(entry_ptr != NULL) {
8296 	    HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8297 
8298 	    /* Verify that either the entry is already serialized, or
8299              * that it is assigned to either the target or an inner
8300              * ring.
8301              */
8302             HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
8303 
8304 	    /* Skip flush me last entries or inner ring entries */
8305 	    if(!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
8306 
8307 		/* if we encounter an unserialized entry in the current
8308                  * ring that is not marked flush me last, we are not done.
8309                  */
8310 		if(!entry_ptr->image_up_to_date)
8311 		    done = FALSE;
8312 
8313 		/* Serialize the entry if its image is not up to date
8314                  * and it has no unserialized flush dependency children.
8315                  */
8316 		if(!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
8317 		    HDassert(entry_ptr->serialization_count == 0);
8318 
8319 		    /* Serialize the entry */
8320 		    if(H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
8321             	        HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
8322 
8323                     HDassert(entry_ptr->flush_dep_nunser_children == 0);
8324 		    HDassert(entry_ptr->serialization_count == 0);
8325 
8326 #ifndef NDEBUG
8327                     /* Increment serialization counter (to detect multiple serializations) */
8328 		    entry_ptr->serialization_count++;
8329 #endif /* NDEBUG */
8330                  } /* end if */
8331             } /* end if */
8332 
8333             /* Check for the cache being perturbed during the entry serialize */
8334             if((cache_ptr->entries_loaded_counter > 0) ||
8335                     (cache_ptr->entries_inserted_counter > 0) ||
8336                     (cache_ptr->entries_relocated_counter > 0)) {
8337 
8338 #if H5C_COLLECT_CACHE_STATS
8339                 H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
8340 #endif /* H5C_COLLECT_CACHE_STATS */
8341 
8342                 /* Reset the counters */
8343                 cache_ptr->entries_loaded_counter         = 0;
8344                 cache_ptr->entries_inserted_counter	  = 0;
8345                 cache_ptr->entries_relocated_counter      = 0;
8346 
8347                 /* Restart scan */
8348                 entry_ptr = cache_ptr->il_head;
8349             } /* end if */
8350             else
8351                 /* Advance to next entry */
8352 	        entry_ptr = entry_ptr->il_next;
8353 	} /* while ( entry_ptr != NULL ) */
8354     } /* while ( ! done ) */
8355 
8356 
8357     /* Reset the counters so that we can detect insertions, loads,
8358      * moves, and flush dependency height changes caused by the pre_serialize
8359      * and serialize callbacks.
8360      */
8361     cache_ptr->entries_loaded_counter     = 0;
8362     cache_ptr->entries_inserted_counter	  = 0;
8363     cache_ptr->entries_relocated_counter  = 0;
8364 
8365     /* At this point, all entries not marked "flush me last" and in
8366      * the current ring or outside it should be serialized and have up
8367      * to date images.  Scan the index list again to serialize the
8368      * "flush me last" entries (if they are in the current ring) and to
8369      * verify that all other entries have up to date images.
8370      */
8371     entry_ptr = cache_ptr->il_head;
8372     while(entry_ptr != NULL) {
8373 	HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8374     	HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
8375         HDassert(entry_ptr->ring < H5C_RING_NTYPES);
8376         HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
8377 
8378 	if(entry_ptr->ring == ring) {
8379 	    if(entry_ptr->flush_me_last) {
8380 		if(!entry_ptr->image_up_to_date) {
8381 		    HDassert(entry_ptr->serialization_count == 0);
8382                     HDassert(entry_ptr->flush_dep_nunser_children == 0);
8383 
8384 	            /* Serialize the entry */
8385 	            if(H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
8386 		        HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
8387 
8388                     /* Check for the cache changing */
8389                     if((cache_ptr->entries_loaded_counter > 0) ||
8390                             (cache_ptr->entries_inserted_counter > 0) ||
8391                             (cache_ptr->entries_relocated_counter > 0))
8392 		        HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush_me_last entry serialization triggered restart")
8393 
8394                     HDassert(entry_ptr->flush_dep_nunser_children == 0);
8395 		    HDassert(entry_ptr->serialization_count == 0);
8396 #ifndef NDEBUG
8397                     /* Increment serialization counter (to detect multiple serializations) */
8398 		    entry_ptr->serialization_count++;
8399 #endif /* NDEBUG */
8400                 } /* end if */
8401             } /* end if */
8402             else {
8403 	        HDassert(entry_ptr->image_up_to_date);
8404 		HDassert(entry_ptr->serialization_count <= 1);
8405                 HDassert(entry_ptr->flush_dep_nunser_children == 0);
8406             }  /* end else */
8407         } /* if ( entry_ptr->ring == ring ) */
8408 
8409 	entry_ptr = entry_ptr->il_next;
8410     } /* while ( entry_ptr != NULL ) */
8411 
8412 done:
8413     HDassert(cache_ptr->serialization_in_progress);
8414     FUNC_LEAVE_NOAPI(ret_value)
8415 } /* H5C__serialize_ring() */
8416 
8417 
8418 /*-------------------------------------------------------------------------
8419  * Function:    H5C__serialize_single_entry
8420  *
8421  * Purpose:     Serialize the cache entry pointed to by the entry_ptr
8422  *		parameter.
8423  *
8424  * Return:      Non-negative on success/Negative on failure
8425  *
8426  * Programmer:  John Mainzer, 7/24/15
8427  *
8428  *-------------------------------------------------------------------------
8429  */
8430 static herr_t
8431 H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
8432 {
8433     herr_t		ret_value = SUCCEED;      /* Return value */
8434 
8435     FUNC_ENTER_STATIC
8436 
8437     /* Sanity checks */
8438     HDassert(f);
8439     HDassert(cache_ptr);
8440     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
8441     HDassert(entry_ptr);
8442     HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8443     HDassert(!entry_ptr->prefetched);
8444     HDassert(!entry_ptr->image_up_to_date);
8445     HDassert(entry_ptr->is_dirty);
8446     HDassert(!entry_ptr->is_protected);
8447     HDassert(!entry_ptr->flush_in_progress);
8448     HDassert(entry_ptr->type);
8449 
8450     /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
8451      * will not be evicted out from under us.  Must set it back to FALSE
8452      * when we are done.
8453      */
8454     entry_ptr->flush_in_progress = TRUE;
8455 
8456     /* Allocate buffer for the entry image if required. */
8457     if(NULL == entry_ptr->image_ptr) {
8458         HDassert(entry_ptr->size > 0);
8459         if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)) )
8460             HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
8461 #if H5C_DO_MEMORY_SANITY_CHECKS
8462         HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
8463 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
8464     } /* end if */
8465 
8466     /* Generate image for entry */
8467     if(H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
8468         HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
8469 
8470     /* Reset the flush_in progress flag */
8471     entry_ptr->flush_in_progress = FALSE;
8472 
8473 done:
8474     HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
8475     HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
8476     FUNC_LEAVE_NOAPI(ret_value)
8477 } /* H5C__serialize_single_entry() */
8478 
8479 
8480 /*-------------------------------------------------------------------------
8481  * Function:    H5C__generate_image
8482  *
8483  * Purpose:     Serialize an entry and generate its image.
8484  *
8485  * Note:	This may cause the entry to be re-sized and/or moved in
8486  *              the cache.
8487  *
8488  *              As we will not update the metadata cache's data structures
8489  *              until we we finish the write, we must touch up these
8490  *              data structures for size and location changes even if we
8491  *              are about to delete the entry from the cache (i.e. on a
8492  *              flush destroy).
8493  *
8494  * Return:      Non-negative on success/Negative on failure
8495  *
8496  * Programmer:  Mohamad Chaarawi
8497  *              2/10/16
8498  *
8499  *-------------------------------------------------------------------------
8500  */
8501 herr_t
8502 H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
8503 {
8504     haddr_t		new_addr = HADDR_UNDEF;
8505     haddr_t		old_addr = HADDR_UNDEF;
8506     size_t		new_len = 0;
8507     unsigned            serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
8508     herr_t              ret_value = SUCCEED;
8509 
8510     FUNC_ENTER_PACKAGE
8511 
8512     /* Sanity check */
8513     HDassert(f);
8514     HDassert(cache_ptr);
8515     HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
8516     HDassert(entry_ptr);
8517     HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
8518     HDassert(!entry_ptr->image_up_to_date);
8519     HDassert(entry_ptr->is_dirty);
8520     HDassert(!entry_ptr->is_protected);
8521     HDassert(entry_ptr->type);
8522 
8523     /* make note of the entry's current address */
8524     old_addr = entry_ptr->addr;
8525 
8526     /* Call client's pre-serialize callback, if there's one */
8527     if(entry_ptr->type->pre_serialize &&
8528             (entry_ptr->type->pre_serialize)(f, (void *)entry_ptr,
8529                 entry_ptr->addr, entry_ptr->size, &new_addr, &new_len, &serialize_flags) < 0)
8530         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
8531 
8532     /* Check for any flags set in the pre-serialize callback */
8533     if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
8534         /* Check for unexpected flags from serialize callback */
8535         if(serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
8536             HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
8537 
8538 #ifdef H5_HAVE_PARALLEL
8539         /* In the parallel case, resizes and moves in
8540          * the serialize operation can cause problems.
8541          * If they occur, scream and die.
8542          *
8543          * At present, in the parallel case, the aux_ptr
8544          * will only be set if there is more than one
8545          * process.  Thus we can use this to detect
8546          * the parallel case.
8547          *
8548          * This works for now, but if we start using the
8549          * aux_ptr for other purposes, we will have to
8550          * change this test accordingly.
8551          *
8552          * NB: While this test detects entryies that attempt
8553          *     to resize or move themselves during a flush
8554          *     in the parallel case, it will not detect an
8555          *     entry that dirties, resizes, and/or moves
8556          *     other entries during its flush.
8557          *
8558          *     From what Quincey tells me, this test is
8559          *     sufficient for now, as any flush routine that
8560          *     does the latter will also do the former.
8561          *
8562          *     If that ceases to be the case, further
8563          *     tests will be necessary.
8564          */
8565         if(cache_ptr->aux_ptr != NULL)
8566             HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case")
8567 #endif
8568 
8569         /* If required, resize the buffer and update the entry and the cache
8570          * data structures */
8571         if(serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
8572             /* Sanity check */
8573             HDassert(new_len > 0);
8574 
8575             /* Allocate a new image buffer */
8576             if(NULL == (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
8577                 HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
8578 #if H5C_DO_MEMORY_SANITY_CHECKS
8579             HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
8580 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
8581 
8582             /* Update statistics for resizing the entry */
8583             H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
8584 
8585             /* Update the hash table for the size change */
8586             H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr, !(entry_ptr->is_dirty));
8587 
8588             /* The entry can't be protected since we are in the process of
8589              * flushing it.  Thus we must update the replacement policy data
8590              * structures for the size change.  The macro deals with the pinned
8591              * case.
8592              */
8593             H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
8594 
8595             /* As we haven't updated the cache data structures for
8596              * for the flush or flush destroy yet, the entry should
8597              * be in the slist.  Thus update it for the size change.
8598              */
8599             HDassert(entry_ptr->is_dirty);
8600             HDassert(entry_ptr->in_slist);
8601             H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
8602 
8603             /* Finally, update the entry for its new size */
8604             entry_ptr->size = new_len;
8605         } /* end if */
8606 
8607         /* If required, udate the entry and the cache data structures
8608          * for a move
8609          */
8610         if(serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
8611             /* Update stats and entries relocated counter */
8612             H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
8613 
8614             /* We must update cache data structures for the change in address */
8615             if(entry_ptr->addr == old_addr) {
8616                 /* Delete the entry from the hash table and the slist */
8617                 H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
8618                 H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
8619 
8620                 /* Update the entry for its new address */
8621                 entry_ptr->addr = new_addr;
8622 
8623                 /* And then reinsert in the index and slist */
8624                 H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
8625                 H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
8626             } /* end if */
8627             else /* move is already done for us -- just do sanity checks */
8628                 HDassert(entry_ptr->addr == new_addr);
8629         } /* end if */
8630     } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
8631 
8632     /* Serialize object into buffer */
8633     if(entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0)
8634         HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
8635 #if H5C_DO_MEMORY_SANITY_CHECKS
8636     HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE));
8637 #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
8638     entry_ptr->image_up_to_date = TRUE;
8639 
8640     /* Propagate the fact that the entry is serialized up the
8641      * flush dependency chain if appropriate.  Since the image must
8642      * have been out of date for this function to have been called
8643      * (see assertion on entry), no need to check that -- only check
8644      * for flush dependency parents.
8645      */
8646     HDassert(entry_ptr->flush_dep_nunser_children == 0);
8647     if(entry_ptr->flush_dep_nparents > 0)
8648         if(H5C__mark_flush_dep_serialized(entry_ptr) < 0)
8649             HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
8650 
8651 done:
8652     FUNC_LEAVE_NOAPI(ret_value)
8653 } /* H5C__generate_image */
8654 
8655 
8656 /*-------------------------------------------------------------------------
8657  *
8658  * Function:    H5C_remove_entry
8659  *
8660  * Purpose:     Remove an entry from the cache.  Must be not protected, pinned,
8661  *		dirty, involved in flush dependencies, etc.
8662  *
8663  * Return:      Non-negative on success/Negative on failure
8664  *
8665  * Programmer:  Quincey Koziol
8666  *              September 17, 2016
8667  *
8668  *-------------------------------------------------------------------------
8669  */
8670 herr_t
8671 H5C_remove_entry(void *_entry)
8672 {
8673     H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry;     /* Entry to remove */
8674     H5C_t *cache;               /* Cache for file */
8675     herr_t ret_value = SUCCEED; /* Return value */
8676 
8677     FUNC_ENTER_NOAPI(FAIL)
8678 
8679     /* Sanity checks */
8680     HDassert(entry);
8681     HDassert(entry->ring != H5C_RING_UNDEFINED);
8682     cache = entry->cache_ptr;
8683     HDassert(cache);
8684     HDassert(cache->magic == H5C__H5C_T_MAGIC);
8685 
8686     /* Check for error conditions */
8687     if(entry->is_dirty)
8688         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache")
8689     if(entry->is_protected)
8690         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache")
8691     if(entry->is_pinned)
8692         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache")
8693     /* NOTE: If these two errors are getting tripped because the entry is
8694      *          in a flush dependency with a freedspace entry, move the checks
8695      *          after the "before evict" message is sent, and add the
8696      *          "child being evicted" message to the "before evict" notify
8697      *          section below.  QAK - 2017/08/03
8698      */
8699     if(entry->flush_dep_nparents > 0)
8700         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry with flush dependency parents from cache")
8701     if(entry->flush_dep_nchildren > 0)
8702         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry with flush dependency children from cache")
8703 
8704     /* Additional internal cache consistency checks */
8705     HDassert(!entry->in_slist);
8706     HDassert(!entry->flush_marker);
8707     HDassert(!entry->flush_in_progress);
8708 
8709     /* Note that the algorithm below is (very) similar to the set of operations
8710      * in H5C__flush_single_entry() and should be kept in sync with changes
8711      * to that code. - QAK, 2016/11/30
8712      */
8713 
8714     /* Update stats, as if we are "destroying" and taking ownership of the entry */
8715     H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE)
8716 
8717     /* If the entry's type has a 'notify' callback, send a 'before eviction'
8718      * notice while the entry is still fully integrated in the cache.
8719      */
8720     if(entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0)
8721         HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
8722 
8723     /* Update the cache internal data structures as appropriate for a destroy.
8724      * Specifically:
8725      *	1) Delete it from the index
8726      *	2) Delete it from the collective read access list
8727      *	3) Update the replacement policy for eviction
8728      *	4) Remove it from the tag list for this object
8729      */
8730 
8731     H5C__DELETE_FROM_INDEX(cache, entry, FAIL)
8732 
8733 #ifdef H5_HAVE_PARALLEL
8734     /* Check for collective read access flag */
8735     if(entry->coll_access) {
8736         entry->coll_access = FALSE;
8737         H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL)
8738     } /* end if */
8739 #endif /* H5_HAVE_PARALLEL */
8740 
8741     H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL)
8742 
8743     /* Remove entry from tag list */
8744     if(H5C__untag_entry(cache, entry) < 0)
8745         HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
8746 
8747     /* Increment entries_removed_counter and set last_entry_removed_ptr.
8748      * As we me be about to free the entry, recall that last_entry_removed_ptr
8749      * must NEVER be dereferenced.
8750      *
8751      * Recall that these fields are maintained to allow functions that perform
8752      * scans of lists of entries to detect the unexpected removal of entries
8753      * (via expunge, eviction, or take ownership at present), so that they can
8754      * re-start their scans if necessary.
8755      *
8756      * Also check if the entry we are watching for removal is being
8757      * removed (usually the 'next' entry for an iteration) and reset
8758      * it to indicate that it was removed.
8759      */
8760     cache->entries_removed_counter++;
8761     cache->last_entry_removed_ptr = entry;
8762     if(entry == cache->entry_watched_for_removal)
8763         cache->entry_watched_for_removal = NULL;
8764 
8765     /* Internal cache data structures should now be up to date, and
8766      * consistent with the status of the entry.
8767      *
8768      * Now clean up internal cache fields if appropriate.
8769      */
8770 
8771     /* Free the buffer for the on disk image */
8772     if(entry->image_ptr != NULL)
8773         entry->image_ptr = H5MM_xfree(entry->image_ptr);
8774 
8775     /* Reset the pointer to the cache the entry is within */
8776     entry->cache_ptr = NULL;
8777 
8778     /* Client is taking ownership of the entry.  Set bad magic here so the
8779      * cache will choke unless the entry is re-inserted properly
8780      */
8781     entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
8782 
8783 done:
8784     FUNC_LEAVE_NOAPI(ret_value)
8785 } /* H5C__remove_entry() */
8786 
8787