1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  * Copyright by The HDF Group.                                               *
3  * Copyright by the Board of Trustees of the University of Illinois.         *
4  * All rights reserved.                                                      *
5  *                                                                           *
6  * This file is part of HDF5.  The full HDF5 copyright notice, including     *
7  * terms governing use, modification, and redistribution, is contained in    *
8  * the COPYING file, which can be found at the root of the source code       *
9  * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.  *
10  * If you do not have access to either file, you may request a copy from     *
11  * help@hdfgroup.org.                                                        *
12  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
13 
14 /*
15  * Programmer: Robb Matzke <matzke@llnl.gov>
16  *	       Friday, October 10, 1997
17  */
18 
19 
20 #include "H5private.h"
21 #include "H5Eprivate.h"
22 #include "H5Oprivate.h"
23 #include "H5VMprivate.h"
24 
25 /* Local typedefs */
26 typedef struct H5VM_memcpy_ud_t {
27     unsigned char *dst;         /* Pointer to destination buffer */
28     const unsigned char *src;   /* Pointer to source buffer */
29 } H5VM_memcpy_ud_t;
30 
31 /* Local macros */
32 #define H5VM_HYPER_NDIMS H5O_LAYOUT_NDIMS
33 
34 /* Local prototypes */
35 static void
36 H5VM_stride_optimize1(unsigned *np/*in,out*/, hsize_t *elmt_size/*in,out*/,
37 		     const hsize_t *size, hsize_t *stride1);
38 static void
39 H5VM_stride_optimize2(unsigned *np/*in,out*/, hsize_t *elmt_size/*in,out*/,
40 		     const hsize_t *size, hsize_t *stride1, hsize_t *stride2);
41 #ifdef LATER
42 static void
43 H5VM_stride_copy2(hsize_t nelmts, hsize_t elmt_size,
44      unsigned dst_n, const hsize_t *dst_size, const ssize_t *dst_stride, void *_dst,
45      unsigned src_n, const hsize_t *src_size, const ssize_t *src_stride, const void *_src);
46 #endif /* LATER */
47 
48 
49 /*-------------------------------------------------------------------------
50  * Function:	H5VM_stride_optimize1
51  *
52  * Purpose:	Given a stride vector which references elements of the
53  *		specified size, optimize the dimensionality, the stride
54  *		vector, and the element size to minimize the dimensionality
55  *		and the number of memory accesses.
56  *
57  *		All arguments are passed by reference and their values may be
58  *		modified by this function.
59  *
60  * Return:	None
61  *
62  * Programmer:	Robb Matzke
63  *		Saturday, October 11, 1997
64  *
65  * Modifications:
66  *
67  *-------------------------------------------------------------------------
68  */
69 static void
H5VM_stride_optimize1(unsigned * np,hsize_t * elmt_size,const hsize_t * size,hsize_t * stride1)70 H5VM_stride_optimize1(unsigned *np/*in,out*/, hsize_t *elmt_size/*in,out*/,
71 		     const hsize_t *size, hsize_t *stride1)
72 {
73     FUNC_ENTER_NOAPI_NOINIT_NOERR
74 
75     /*
76      * This has to be true because if we optimize the dimensionality down to
77      * zero we still must make one reference.
78      */
79     HDassert(1 == H5VM_vector_reduce_product(0, NULL));
80 
81     /*
82      * Combine adjacent memory accesses
83      */
84     while (*np && stride1[*np-1]>0 &&
85            (hsize_t)(stride1[*np-1])==*elmt_size) {
86         *elmt_size *= size[*np-1];
87         if (--*np)
88             stride1[*np-1] += size[*np] * stride1[*np];
89     }
90 
91     FUNC_LEAVE_NOAPI_VOID
92 }
93 
94 
95 /*-------------------------------------------------------------------------
96  * Function:	H5VM_stride_optimize2
97  *
98  * Purpose:	Given two stride vectors which reference elements of the
99  *		specified size, optimize the dimensionality, the stride
100  *		vectors, and the element size to minimize the dimensionality
101  *		and the number of memory accesses.
102  *
103  *		All arguments are passed by reference and their values may be
104  *		modified by this function.
105  *
106  * Return:	Non-negative on success/Negative on failure
107  *
108  * Programmer:	Robb Matzke
109  *		Saturday, October 11, 1997
110  *
111  * Modifications:
112  *              Unrolled loops for common cases
113  *              Quincey Koziol
114  *		?, ? ?, 2001?
115  *
116  *-------------------------------------------------------------------------
117  */
118 static void
H5VM_stride_optimize2(unsigned * np,hsize_t * elmt_size,const hsize_t * size,hsize_t * stride1,hsize_t * stride2)119 H5VM_stride_optimize2(unsigned *np/*in,out*/, hsize_t *elmt_size/*in,out*/,
120 		     const hsize_t *size, hsize_t *stride1, hsize_t *stride2)
121 {
122     FUNC_ENTER_NOAPI_NOINIT_NOERR
123 
124     /*
125      * This has to be true because if we optimize the dimensionality down to
126      * zero we still must make one reference.
127      */
128     HDassert(1 == H5VM_vector_reduce_product(0, NULL));
129     HDassert(*elmt_size>0);
130 
131     /*
132      * Combine adjacent memory accesses
133      */
134 
135     /* Unroll loop for common cases */
136     switch(*np) {
137         case 1: /* For 0-D datasets (dunno if this ever gets used...) */
138             if(stride1[0] == *elmt_size && stride2[0] == *elmt_size) {
139                 *elmt_size *= size[0];
140                 --*np;  /* *np decrements to a value of 0 now */
141             } /* end if */
142             break;
143 
144         case 2: /* For 1-D datasets */
145             if(stride1[1] == *elmt_size && stride2[1] == *elmt_size) {
146                 *elmt_size *= size[1];
147                 --*np;  /* *np decrements to a value of 1 now */
148                 stride1[0] += size[1] * stride1[1];
149                 stride2[0] += size[1] * stride2[1];
150 
151                 if(stride1[0] == *elmt_size && stride2[0] == *elmt_size) {
152                     *elmt_size *= size[0];
153                     --*np;  /* *np decrements to a value of 0 now */
154                 } /* end if */
155             } /* end if */
156             break;
157 
158         case 3: /* For 2-D datasets */
159             if(stride1[2] == *elmt_size && stride2[2] == *elmt_size) {
160                 *elmt_size *= size[2];
161                 --*np;  /* *np decrements to a value of 2 now */
162                 stride1[1] += size[2] * stride1[2];
163                 stride2[1] += size[2] * stride2[2];
164 
165                 if(stride1[1] == *elmt_size && stride2[1] == *elmt_size) {
166                     *elmt_size *= size[1];
167                     --*np;  /* *np decrements to a value of 1 now */
168                     stride1[0] += size[1] * stride1[1];
169                     stride2[0] += size[1] * stride2[1];
170 
171                     if(stride1[0] == *elmt_size && stride2[0] == *elmt_size) {
172                         *elmt_size *= size[0];
173                         --*np;  /* *np decrements to a value of 0 now */
174                     } /* end if */
175                 } /* end if */
176             } /* end if */
177             break;
178 
179         case 4: /* For 3-D datasets */
180             if(stride1[3] == *elmt_size && stride2[3] == *elmt_size) {
181                 *elmt_size *= size[3];
182                 --*np;  /* *np decrements to a value of 3 now */
183                 stride1[2] += size[3] * stride1[3];
184                 stride2[2] += size[3] * stride2[3];
185 
186                 if(stride1[2] == *elmt_size && stride2[2] == *elmt_size) {
187                     *elmt_size *= size[2];
188                     --*np;  /* *np decrements to a value of 2 now */
189                     stride1[1] += size[2] * stride1[2];
190                     stride2[1] += size[2] * stride2[2];
191 
192                     if(stride1[1] == *elmt_size && stride2[1] == *elmt_size) {
193                         *elmt_size *= size[1];
194                         --*np;  /* *np decrements to a value of 1 now */
195                         stride1[0] += size[1] * stride1[1];
196                         stride2[0] += size[1] * stride2[1];
197 
198                         if(stride1[0] == *elmt_size && stride2[0] == *elmt_size) {
199                             *elmt_size *= size[0];
200                             --*np;  /* *np decrements to a value of 0 now */
201                         } /* end if */
202                     } /* end if */
203                 } /* end if */
204             } /* end if */
205             break;
206 
207         default:
208             while (*np &&
209                     stride1[*np-1] == *elmt_size &&
210                     stride2[*np-1] == *elmt_size) {
211                 *elmt_size *= size[*np-1];
212                 if (--*np) {
213                     stride1[*np-1] += size[*np] * stride1[*np];
214                     stride2[*np-1] += size[*np] * stride2[*np];
215                 }
216             }
217             break;
218     } /* end switch */
219 
220     FUNC_LEAVE_NOAPI_VOID
221 }
222 
223 
224 /*-------------------------------------------------------------------------
225  * Function:	H5VM_hyper_stride
226  *
227  * Purpose:	Given a description of a hyperslab, this function returns
228  *		(through STRIDE[]) the byte strides appropriate for accessing
229  *		all bytes of the hyperslab and the byte offset where the
230  *		striding will begin.  The SIZE can be passed to the various
231  *		stride functions.
232  *
233  *		The dimensionality of the whole array, the hyperslab, and the
234  *		returned stride array is N.  The whole array dimensions are
235  *		TOTAL_SIZE and the hyperslab is at offset OFFSET and has
236  *		dimensions SIZE.
237  *
238  *		The stride and starting point returned will cause the
239  *		hyperslab elements to be referenced in C order.
240  *
241  * Return:	Success:	Byte offset from beginning of array to start
242  *				of striding.
243  *
244  *		Failure:	abort() -- should never fail
245  *
246  * Programmer:	Robb Matzke
247  *		Saturday, October 11, 1997
248  *
249  * Modifications:
250  *              Unrolled loops for common cases
251  *              Quincey Koziol
252  *		?, ? ?, 2001?
253  *
254  *-------------------------------------------------------------------------
255  */
256 hsize_t
H5VM_hyper_stride(unsigned n,const hsize_t * size,const hsize_t * total_size,const hsize_t * offset,hsize_t * stride)257 H5VM_hyper_stride(unsigned n, const hsize_t *size,
258 		 const hsize_t *total_size, const hsize_t *offset,
259 		 hsize_t *stride/*out*/)
260 {
261     hsize_t	    skip;	/*starting point byte offset		*/
262     hsize_t	    acc;	/*accumulator				*/
263     int		i;		/*counter				*/
264     hsize_t	    ret_value;  /* Return value */
265 
266     FUNC_ENTER_NOAPI_NOINIT_NOERR
267 
268     HDassert(n <= H5VM_HYPER_NDIMS);
269     HDassert(size);
270     HDassert(total_size);
271     HDassert(stride);
272 
273     /* init */
274     HDassert(n>0);
275     stride[n-1] = 1;
276     skip = offset ? offset[n-1] : 0;
277 
278     switch(n) {
279         case 2: /* 1-D dataset */
280             HDassert(total_size[1]>=size[1]);
281             stride[0] = total_size[1]-size[1]; /*overflow checked*/
282             acc = total_size[1];
283             skip += acc * (offset ? offset[0] : 0);
284             break;
285 
286         case 3: /* 2-D dataset */
287             HDassert(total_size[2]>=size[2]);
288             stride[1] = total_size[2]-size[2]; /*overflow checked*/
289             acc = total_size[2];
290             skip += acc * (offset ? (hsize_t)offset[1] : 0);
291 
292             HDassert(total_size[1]>=size[1]);
293             stride[0] = acc * (total_size[1] - size[1]); /*overflow checked*/
294             acc *= total_size[1];
295             skip += acc * (offset ? (hsize_t)offset[0] : 0);
296             break;
297 
298         case 4: /* 3-D dataset */
299             HDassert(total_size[3]>=size[3]);
300             stride[2] = total_size[3]-size[3]; /*overflow checked*/
301             acc = total_size[3];
302             skip += acc * (offset ? (hsize_t)offset[2] : 0);
303 
304             HDassert(total_size[2]>=size[2]);
305             stride[1] = acc * (total_size[2] - size[2]); /*overflow checked*/
306             acc *= total_size[2];
307             skip += acc * (offset ? (hsize_t)offset[1] : 0);
308 
309             HDassert(total_size[1]>=size[1]);
310             stride[0] = acc * (total_size[1] - size[1]); /*overflow checked*/
311             acc *= total_size[1];
312             skip += acc * (offset ? (hsize_t)offset[0] : 0);
313             break;
314 
315         default:
316             /* others */
317             for (i=(int)(n-2), acc=1; i>=0; --i) {
318                 HDassert(total_size[i+1]>=size[i+1]);
319                 stride[i] = acc * (total_size[i+1] - size[i+1]); /*overflow checked*/
320                 acc *= total_size[i+1];
321                 skip += acc * (offset ? (hsize_t)offset[i] : 0);
322             }
323             break;
324     } /* end switch */
325 
326     /* Set return value */
327     ret_value=skip;
328 
329     FUNC_LEAVE_NOAPI(ret_value)
330 }
331 
332 
333 /*-------------------------------------------------------------------------
334  * Function:	H5VM_hyper_eq
335  *
336  * Purpose:	Determines whether two hyperslabs are equal.  This function
337  *		assumes that both hyperslabs are relative to the same array,
338  *		for if not, they could not possibly be equal.
339  *
340  * Return:	Success:	TRUE if the hyperslabs are equal (that is,
341  *				both refer to exactly the same elements of an
342  *				array)
343  *
344  *				FALSE otherwise.
345  *
346  *		Failure:	TRUE the rank is zero or if both hyperslabs
347  *				are of zero size.
348  *
349  * Programmer:	Robb Matzke
350  *		Friday, October 17, 1997
351  *
352  * Modifications:
353  *
354  *-------------------------------------------------------------------------
355  */
356 htri_t
H5VM_hyper_eq(unsigned n,const hsize_t * offset1,const hsize_t * size1,const hsize_t * offset2,const hsize_t * size2)357 H5VM_hyper_eq(unsigned n,
358 	     const hsize_t *offset1, const hsize_t *size1,
359 	     const hsize_t *offset2, const hsize_t *size2)
360 {
361     hsize_t	nelmts1 = 1, nelmts2 = 1;
362     unsigned	i;
363     htri_t      ret_value=TRUE;         /* Return value */
364 
365     /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
366     FUNC_ENTER_NOAPI_NOINIT_NOERR
367 
368     if (n == 0) HGOTO_DONE(TRUE)
369 
370     for (i=0; i<n; i++) {
371 	if ((offset1 ? offset1[i] : 0) != (offset2 ? offset2[i] : 0))
372 	    HGOTO_DONE(FALSE)
373 	if ((size1 ? size1[i] : 0) != (size2 ? size2[i] : 0))
374 	    HGOTO_DONE(FALSE)
375 	if (0 == (nelmts1 *= (size1 ? size1[i] : 0)))
376             HGOTO_DONE(FALSE)
377 	if (0 == (nelmts2 *= (size2 ? size2[i] : 0)))
378             HGOTO_DONE(FALSE)
379     }
380 
381 done:
382     FUNC_LEAVE_NOAPI(ret_value)
383 }
384 
385 
386 /*-------------------------------------------------------------------------
387  * Function:	H5VM_hyper_fill
388  *
389  * Purpose:	Similar to memset() except it operates on hyperslabs...
390  *
391  *		Fills a hyperslab of array BUF with some value VAL.  BUF
392  *		is treated like a C-order array with N dimensions where the
393  *		size of each dimension is TOTAL_SIZE[].	 The hyperslab which
394  *		will be filled with VAL begins at byte offset OFFSET[] from
395  *		the minimum corner of BUF and continues for SIZE[] bytes in
396  *		each dimension.
397  *
398  * Return:	Non-negative on success/Negative on failure
399  *
400  * Programmer:	Robb Matzke
401  *		Friday, October 10, 1997
402  *
403  * Modifications:
404  *
405  *-------------------------------------------------------------------------
406  */
407 herr_t
H5VM_hyper_fill(unsigned n,const hsize_t * _size,const hsize_t * total_size,const hsize_t * offset,void * _dst,unsigned fill_value)408 H5VM_hyper_fill(unsigned n, const hsize_t *_size,
409 	       const hsize_t *total_size, const hsize_t *offset, void *_dst,
410 	       unsigned fill_value)
411 {
412     uint8_t	*dst = (uint8_t*)_dst;	/*cast for ptr arithmetic	*/
413     hsize_t	size[H5VM_HYPER_NDIMS];	/*a modifiable copy of _size	*/
414     hsize_t	dst_stride[H5VM_HYPER_NDIMS]; /*destination stride info  */
415     hsize_t	dst_start;		/*byte offset to start of stride*/
416     hsize_t	elmt_size = 1;		/*bytes per element		*/
417     herr_t	ret_value;		/*function return status	*/
418 #ifndef NDEBUG
419     unsigned	u;
420 #endif
421 
422     FUNC_ENTER_NOAPI_NOINIT_NOERR
423 
424     /* check args */
425     HDassert(n > 0 && n <= H5VM_HYPER_NDIMS);
426     HDassert(_size);
427     HDassert(total_size);
428     HDassert(dst);
429 #ifndef NDEBUG
430     for (u = 0; u < n; u++) {
431         HDassert(_size[u] > 0);
432         HDassert(total_size[u] > 0);
433     }
434 #endif
435 
436     /* Copy the size vector so we can modify it */
437     H5VM_vector_cpy(n, size, _size);
438 
439     /* Compute an optimal destination stride vector */
440     dst_start = H5VM_hyper_stride(n, size, total_size, offset, dst_stride);
441     H5VM_stride_optimize1(&n, &elmt_size, size, dst_stride);
442 
443     /* Copy */
444     ret_value = H5VM_stride_fill(n, elmt_size, size, dst_stride, dst+dst_start,
445 			     fill_value);
446 
447     FUNC_LEAVE_NOAPI(ret_value)
448 }
449 
450 
451 /*-------------------------------------------------------------------------
452  * Function:	H5VM_hyper_copy
453  *
454  * Purpose:	Copies a hyperslab from the source to the destination.
455  *
456  *		A hyperslab is a logically contiguous region of
457  *		multi-dimensional size SIZE of an array whose dimensionality
458  *		is N and whose total size is DST_TOTAL_SIZE or SRC_TOTAL_SIZE.
459  *		The minimum corner of the hyperslab begins at a
460  *		multi-dimensional offset from the minimum corner of the DST
461  *		(destination) or SRC (source) array.  The sizes and offsets
462  *		are assumed to be in C order, that is, the first size/offset
463  *		varies the slowest while the last varies the fastest in the
464  *		mapping from N-dimensional space to linear space.  This
465  *		function assumes that the array elements are single bytes (if
466  *		your array has multi-byte elements then add an additional
467  *		dimension whose size is that of your element).
468  *
469  *		The SRC and DST array may be the same array, but the results
470  *		are undefined if the source hyperslab overlaps the
471  *		destination hyperslab.
472  *
473  * Return:	Non-negative on success/Negative on failure
474  *
475  * Programmer:	Robb Matzke
476  *		Friday, October 10, 1997
477  *
478  * Modifications:
479  *              Unrolled loops for common cases
480  *              Quincey Koziol
481  *		?, ? ?, 2001?
482  *
483  *-------------------------------------------------------------------------
484  */
485 herr_t
H5VM_hyper_copy(unsigned n,const hsize_t * _size,const hsize_t * dst_size,const hsize_t * dst_offset,void * _dst,const hsize_t * src_size,const hsize_t * src_offset,const void * _src)486 H5VM_hyper_copy(unsigned n, const hsize_t *_size,
487 
488 	       /*destination*/
489 	       const hsize_t *dst_size, const hsize_t *dst_offset,
490 	       void *_dst,
491 
492 	       /*source*/
493 	       const hsize_t *src_size, const hsize_t *src_offset,
494 	       const void *_src)
495 {
496     const uint8_t *src = (const uint8_t*)_src;	/*cast for ptr arithmtc */
497     uint8_t	*dst = (uint8_t*) _dst;		/*cast for ptr arithmtc */
498     hsize_t	size[H5VM_HYPER_NDIMS];		/*a modifiable _size	*/
499     hsize_t	src_stride[H5VM_HYPER_NDIMS];	/*source stride info	*/
500     hsize_t	dst_stride[H5VM_HYPER_NDIMS];	/*dest stride info	*/
501     hsize_t	dst_start, src_start;		/*offset to start at	*/
502     hsize_t	elmt_size = 1;			/*element size in bytes */
503     herr_t	ret_value;			/*return status		*/
504 #ifndef NDEBUG
505     unsigned	u;
506 #endif
507 
508     FUNC_ENTER_NOAPI_NOINIT_NOERR
509 
510     /* check args */
511     HDassert(n > 0 && n <= H5VM_HYPER_NDIMS);
512     HDassert(_size);
513     HDassert(dst_size);
514     HDassert(src_size);
515     HDassert(dst);
516     HDassert(src);
517 #ifndef NDEBUG
518     for (u = 0; u < n; u++) {
519         HDassert(_size[u] > 0);
520         HDassert(dst_size[u] > 0);
521         HDassert(src_size[u] > 0);
522     }
523 #endif
524 
525     /* Copy the size vector so we can modify it */
526     H5VM_vector_cpy(n, size, _size);
527 
528     /* Compute stride vectors for source and destination */
529 #ifdef NO_INLINED_CODE
530     dst_start = H5VM_hyper_stride(n, size, dst_size, dst_offset, dst_stride);
531     src_start = H5VM_hyper_stride(n, size, src_size, src_offset, src_stride);
532 #else /* NO_INLINED_CODE */
533     /* in-line version of two calls to H5VM_hyper_stride() */
534     {
535         hsize_t	    dst_acc;	/*accumulator				*/
536         hsize_t	    src_acc;	/*accumulator				*/
537         int        ii;		    /*counter				*/
538 
539         /* init */
540         HDassert(n>0);
541         dst_stride[n-1] = 1;
542         src_stride[n-1] = 1;
543         dst_start = dst_offset ? dst_offset[n-1] : 0;
544         src_start = src_offset ? src_offset[n-1] : 0;
545 
546         /* Unroll loop for common cases */
547         switch(n) {
548             case 2:
549                 HDassert(dst_size[1]>=size[1]);
550                 HDassert(src_size[1]>=size[1]);
551                 dst_stride[0] = dst_size[1] - size[1]; /*overflow checked*/
552                 src_stride[0] = src_size[1] - size[1]; /*overflow checked*/
553                 dst_acc = dst_size[1];
554                 src_acc = src_size[1];
555                 dst_start += dst_acc * (dst_offset ? dst_offset[0] : 0);
556                 src_start += src_acc * (src_offset ? src_offset[0] : 0);
557                 break;
558 
559             case 3:
560                 HDassert(dst_size[2]>=size[2]);
561                 HDassert(src_size[2]>=size[2]);
562                 dst_stride[1] = dst_size[2] - size[2]; /*overflow checked*/
563                 src_stride[1] = src_size[2] - size[2]; /*overflow checked*/
564                 dst_acc = dst_size[2];
565                 src_acc = src_size[2];
566                 dst_start += dst_acc * (dst_offset ? dst_offset[1] : 0);
567                 src_start += src_acc * (src_offset ? src_offset[1] : 0);
568 
569                 HDassert(dst_size[1]>=size[1]);
570                 HDassert(src_size[1]>=size[1]);
571                 dst_stride[0] = dst_acc * (dst_size[1] - size[1]); /*overflow checked*/
572                 src_stride[0] = src_acc * (src_size[1] - size[1]); /*overflow checked*/
573                 dst_acc *= dst_size[1];
574                 src_acc *= src_size[1];
575                 dst_start += dst_acc * (dst_offset ? dst_offset[0] : 0);
576                 src_start += src_acc * (src_offset ? src_offset[0] : 0);
577                 break;
578 
579             case 4:
580                 HDassert(dst_size[3]>=size[3]);
581                 HDassert(src_size[3]>=size[3]);
582                 dst_stride[2] = dst_size[3] - size[3]; /*overflow checked*/
583                 src_stride[2] = src_size[3] - size[3]; /*overflow checked*/
584                 dst_acc = dst_size[3];
585                 src_acc = src_size[3];
586                 dst_start += dst_acc * (dst_offset ? dst_offset[2] : 0);
587                 src_start += src_acc * (src_offset ? src_offset[2] : 0);
588 
589                 HDassert(dst_size[2]>=size[2]);
590                 HDassert(src_size[2]>=size[2]);
591                 dst_stride[1] = dst_acc * (dst_size[2] - size[2]); /*overflow checked*/
592                 src_stride[1] = src_acc * (src_size[2] - size[2]); /*overflow checked*/
593                 dst_acc *= dst_size[2];
594                 src_acc *= src_size[2];
595                 dst_start += dst_acc * (dst_offset ? dst_offset[1] : 0);
596                 src_start += src_acc * (src_offset ? src_offset[1] : 0);
597 
598                 HDassert(dst_size[1]>=size[1]);
599                 HDassert(src_size[1]>=size[1]);
600                 dst_stride[0] = dst_acc * (dst_size[1] - size[1]); /*overflow checked*/
601                 src_stride[0] = src_acc * (src_size[1] - size[1]); /*overflow checked*/
602                 dst_acc *= dst_size[1];
603                 src_acc *= src_size[1];
604                 dst_start += dst_acc * (dst_offset ? dst_offset[0] : 0);
605                 src_start += src_acc * (src_offset ? src_offset[0] : 0);
606                 break;
607 
608             default:
609                 /* others */
610                 for (ii=(int)(n-2), dst_acc=1, src_acc=1; ii>=0; --ii) {
611                     HDassert(dst_size[ii+1]>=size[ii+1]);
612                     HDassert(src_size[ii+1]>=size[ii+1]);
613                     dst_stride[ii] = dst_acc * (dst_size[ii+1] - size[ii+1]); /*overflow checked*/
614                     src_stride[ii] = src_acc * (src_size[ii+1] - size[ii+1]); /*overflow checked*/
615                     dst_acc *= dst_size[ii+1];
616                     src_acc *= src_size[ii+1];
617                     dst_start += dst_acc * (dst_offset ? dst_offset[ii] : 0);
618                     src_start += src_acc * (src_offset ? src_offset[ii] : 0);
619                 }
620                 break;
621         } /* end switch */
622     }
623 #endif /* NO_INLINED_CODE */
624 
625     /* Optimize the strides as a pair */
626     H5VM_stride_optimize2(&n, &elmt_size, size, dst_stride, src_stride);
627 
628     /* Perform the copy in terms of stride */
629     ret_value = H5VM_stride_copy(n, elmt_size, size,
630              dst_stride, dst+dst_start, src_stride, src+src_start);
631 
632     FUNC_LEAVE_NOAPI(ret_value)
633 }
634 
635 
636 /*-------------------------------------------------------------------------
637  * Function:	H5VM_stride_fill
638  *
639  * Purpose:	Fills all bytes of a hyperslab with the same value using
640  *		memset().
641  *
642  * Return:	Non-negative on success/Negative on failure
643  *
644  * Programmer:	Robb Matzke
645  *		Saturday, October 11, 1997
646  *
647  * Modifications:
648  *
649  *-------------------------------------------------------------------------
650  */
651 herr_t
H5VM_stride_fill(unsigned n,hsize_t elmt_size,const hsize_t * size,const hsize_t * stride,void * _dst,unsigned fill_value)652 H5VM_stride_fill(unsigned n, hsize_t elmt_size, const hsize_t *size,
653 		const hsize_t *stride, void *_dst, unsigned fill_value)
654 {
655     uint8_t	*dst = (uint8_t*)_dst; 	/*cast for ptr arithmetic	*/
656     hsize_t	idx[H5VM_HYPER_NDIMS]; 	/*1-origin indices		*/
657     hsize_t	nelmts;			/*number of elements to fill	*/
658     hsize_t	i;			/*counter			*/
659     int	j;			/*counter			*/
660     hbool_t	carry;			/*subtraction carray value	*/
661 
662     FUNC_ENTER_NOAPI_NOINIT_NOERR
663 
664     HDassert(elmt_size < SIZET_MAX);
665 
666     H5VM_vector_cpy(n, idx, size);
667     nelmts = H5VM_vector_reduce_product(n, size);
668     for (i=0; i<nelmts; i++) {
669         /* Copy an element */
670         H5_CHECK_OVERFLOW(elmt_size,hsize_t,size_t);
671         HDmemset(dst, (int)fill_value, (size_t)elmt_size); /*lint !e671 The elmt_size will be OK */
672 
673         /* Decrement indices and advance pointer */
674         for (j=(int)(n-1), carry=TRUE; j>=0 && carry; --j) {
675             dst += stride[j];
676 
677             if (--idx[j])
678                 carry = FALSE;
679             else {
680                 HDassert(size);
681                 idx[j] = size[j];
682             } /* end else */
683         }
684     }
685 
686     FUNC_LEAVE_NOAPI(SUCCEED)
687 }
688 
689 
690 /*-------------------------------------------------------------------------
691  * Function:	H5VM_stride_copy
692  *
693  * Purpose:	Uses DST_STRIDE and SRC_STRIDE to advance through the arrays
694  *		DST and SRC while copying bytes from SRC to DST.  This
695  *		function minimizes the number of calls to memcpy() by
696  *		combining various strides, but it will never touch memory
697  *		outside the hyperslab defined by the strides.
698  *
699  * Note:	If the src_stride is all zero and elmt_size is one, then it's
700  *		probably more efficient to use H5VM_stride_fill() instead.
701  *
702  * Return:	Non-negative on success/Negative on failure
703  *
704  * Programmer:	Robb Matzke
705  *		Saturday, October 11, 1997
706  *
707  * Modifications:
708  *
709  *-------------------------------------------------------------------------
710  */
711 herr_t
H5VM_stride_copy(unsigned n,hsize_t elmt_size,const hsize_t * size,const hsize_t * dst_stride,void * _dst,const hsize_t * src_stride,const void * _src)712 H5VM_stride_copy(unsigned n, hsize_t elmt_size, const hsize_t *size,
713 		const hsize_t *dst_stride, void *_dst,
714 		const hsize_t *src_stride, const void *_src)
715 {
716     uint8_t	*dst = (uint8_t*)_dst;		/*cast for ptr arithmetic*/
717     const uint8_t *src = (const uint8_t*) _src;	/*cast for ptr arithmetic*/
718     hsize_t	idx[H5VM_HYPER_NDIMS];		/*1-origin indices	*/
719     hsize_t	nelmts;				/*num elements to copy	*/
720     hsize_t	i;				/*counter		*/
721     int	j;				/*counters		*/
722     hbool_t	carry;				/*carray for subtraction*/
723 
724     FUNC_ENTER_NOAPI_NOINIT_NOERR
725 
726     HDassert(elmt_size<SIZET_MAX);
727 
728     if (n) {
729         H5VM_vector_cpy(n, idx, size);
730         nelmts = H5VM_vector_reduce_product(n, size);
731         for (i=0; i<nelmts; i++) {
732 
733             /* Copy an element */
734             H5_CHECK_OVERFLOW(elmt_size,hsize_t,size_t);
735             HDmemcpy(dst, src, (size_t)elmt_size); /*lint !e671 The elmt_size will be OK */
736 
737             /* Decrement indices and advance pointers */
738             for (j=(int)(n-1), carry=TRUE; j>=0 && carry; --j) {
739                 src += src_stride[j];
740                 dst += dst_stride[j];
741 
742                 if (--idx[j])
743                     carry = FALSE;
744                 else {
745                     HDassert(size);
746                     idx[j] = size[j];
747                 }
748             }
749         }
750     } else {
751         H5_CHECK_OVERFLOW(elmt_size,hsize_t,size_t);
752         HDmemcpy (dst, src, (size_t)elmt_size); /*lint !e671 The elmt_size will be OK */
753     }
754 
755     FUNC_LEAVE_NOAPI(SUCCEED)
756 }
757 
758 
759 /*-------------------------------------------------------------------------
760  * Function:	H5VM_stride_copy_s
761  *
762  * Purpose:	Uses DST_STRIDE and SRC_STRIDE to advance through the arrays
763  *		DST and SRC while copying bytes from SRC to DST.  This
764  *		function minimizes the number of calls to memcpy() by
765  *		combining various strides, but it will never touch memory
766  *		outside the hyperslab defined by the strides.
767  *
768  * Note:	If the src_stride is all zero and elmt_size is one, then it's
769  *		probably more efficient to use H5VM_stride_fill() instead.
770  *
771  * Return:	Non-negative on success/Negative on failure
772  *
773  * Programmer:	Robb Matzke
774  *		Saturday, October 11, 1997
775  *
776  * Modifications:
777  *
778  *-------------------------------------------------------------------------
779  */
780 herr_t
H5VM_stride_copy_s(unsigned n,hsize_t elmt_size,const hsize_t * size,const hssize_t * dst_stride,void * _dst,const hssize_t * src_stride,const void * _src)781 H5VM_stride_copy_s(unsigned n, hsize_t elmt_size, const hsize_t *size,
782 		const hssize_t *dst_stride, void *_dst,
783 		const hssize_t *src_stride, const void *_src)
784 {
785     uint8_t	*dst = (uint8_t*)_dst;		/*cast for ptr arithmetic*/
786     const uint8_t *src = (const uint8_t*) _src;	/*cast for ptr arithmetic*/
787     hsize_t	idx[H5VM_HYPER_NDIMS];		/*1-origin indices	*/
788     hsize_t	nelmts;				/*num elements to copy	*/
789     hsize_t	i;				/*counter		*/
790     int	j;				/*counters		*/
791     hbool_t	carry;				/*carray for subtraction*/
792 
793     FUNC_ENTER_NOAPI_NOINIT_NOERR
794 
795     HDassert(elmt_size<SIZET_MAX);
796 
797     if (n) {
798         H5VM_vector_cpy(n, idx, size);
799         nelmts = H5VM_vector_reduce_product(n, size);
800         for (i=0; i<nelmts; i++) {
801 
802             /* Copy an element */
803             H5_CHECK_OVERFLOW(elmt_size,hsize_t,size_t);
804             HDmemcpy(dst, src, (size_t)elmt_size); /*lint !e671 The elmt_size will be OK */
805 
806             /* Decrement indices and advance pointers */
807             for (j=(int)(n-1), carry=TRUE; j>=0 && carry; --j) {
808                 src += src_stride[j];
809                 dst += dst_stride[j];
810 
811                 if (--idx[j])
812                     carry = FALSE;
813                 else {
814                     HDassert(size);
815                     idx[j] = size[j];
816                 }
817             }
818         }
819     } else {
820         H5_CHECK_OVERFLOW(elmt_size,hsize_t,size_t);
821         HDmemcpy (dst, src, (size_t)elmt_size); /*lint !e671 The elmt_size will be OK */
822     }
823 
824     FUNC_LEAVE_NOAPI(SUCCEED)
825 }
826 
827 #ifdef LATER
828 
829 /*-------------------------------------------------------------------------
830  * Function:	H5VM_stride_copy2
831  *
832  * Purpose:	Similar to H5VM_stride_copy() except the source and
833  *		destination each have their own dimensionality and size and
834  *		we copy exactly NELMTS elements each of size ELMT_SIZE.	 The
835  *		size counters wrap if NELMTS is more than a size counter.
836  *
837  * Return:	None
838  *
839  * Programmer:	Robb Matzke
840  *		Saturday, October 11, 1997
841  *
842  * Modifications:
843  *
844  *-------------------------------------------------------------------------
845  */
846 static void
H5VM_stride_copy2(hsize_t nelmts,hsize_t elmt_size,unsigned dst_n,const hsize_t * dst_size,const hsize_t * dst_stride,void * _dst,unsigned src_n,const hsize_t * src_size,const hsize_t * src_stride,const void * _src)847 H5VM_stride_copy2(hsize_t nelmts, hsize_t elmt_size,
848 
849 		 /* destination */
850 		 unsigned dst_n, const hsize_t *dst_size,
851 		 const hsize_t *dst_stride,
852 		 void *_dst,
853 
854 		 /* source */
855 		 unsigned src_n, const hsize_t *src_size,
856 		 const hsize_t *src_stride,
857 		 const void *_src)
858 {
859     uint8_t	*dst = (uint8_t *) _dst;
860     const uint8_t *src = (const uint8_t *) _src;
861     hsize_t	dst_idx[H5VM_HYPER_NDIMS];
862     hsize_t	src_idx[H5VM_HYPER_NDIMS];
863     hsize_t	i;              /* Local index variable */
864     int		j;              /* Local index variable */
865     hbool_t	carry;
866 
867     FUNC_ENTER_NOAPI_NOINIT_NOERR
868 
869     HDassert(elmt_size < SIZET_MAX);
870     HDassert(dst_n>0);
871     HDassert(src_n>0);
872 
873     H5VM_vector_cpy(dst_n, dst_idx, dst_size);
874     H5VM_vector_cpy(src_n, src_idx, src_size);
875 
876     for (i=0; i<nelmts; i++) {
877 
878 	/* Copy an element */
879         H5_CHECK_OVERFLOW(elmt_size,hsize_t,size_t);
880 	HDmemcpy(dst, src, (size_t)elmt_size); /*lint !e671 The elmt_size will be OK */
881 
882 	/* Decrement indices and advance pointers */
883 	for (j=(int)(dst_n-1), carry=TRUE; j>=0 && carry; --j) {
884 	    dst += dst_stride[j];
885 	    if (--dst_idx[j])
886                 carry = FALSE;
887 	    else {
888                 HDassert(dst_size);
889                 dst_idx[j] = dst_size[j];
890             } /* end else */
891 	}
892 	for (j=(int)(src_n-1), carry=TRUE; j>=0 && carry; --j) {
893 	    src += src_stride[j];
894 	    if (--src_idx[j])
895                 carry = FALSE;
896 	    else {
897                 HDassert(src_size);
898                 src_idx[j] = src_size[j];
899             } /* end else */
900 	}
901     }
902 
903     FUNC_LEAVE_NOAPI_VOID
904 }
905 #endif /* LATER */
906 
907 
908 /*-------------------------------------------------------------------------
909  * Function:	H5VM_array_fill
910  *
911  * Purpose:	Fills all bytes of an array with the same value using
912  *		memset(). Increases amount copied by power of two until the
913  *		halfway point is crossed, then copies the rest in one swoop.
914  *
915  * Return:	Non-negative on success/Negative on failure
916  *
917  * Programmer:	Quincey Koziol
918  *		Thursday, June 18, 1998
919  *
920  * Modifications:
921  *
922  *-------------------------------------------------------------------------
923  */
924 herr_t
H5VM_array_fill(void * _dst,const void * src,size_t size,size_t count)925 H5VM_array_fill(void *_dst, const void *src, size_t size, size_t count)
926 {
927     size_t      copy_size;          /* size of the buffer to copy	*/
928     size_t      copy_items;         /* number of items currently copying*/
929     size_t      items_left;         /* number of items left to copy 	*/
930     uint8_t     *dst=(uint8_t*)_dst;/* alias for pointer arithmetic	*/
931 
932     FUNC_ENTER_NOAPI_NOINIT_NOERR
933 
934     HDassert(dst);
935     HDassert(src);
936     HDassert(size < SIZET_MAX && size > 0);
937     HDassert(count < SIZET_MAX && count > 0);
938 
939     HDmemcpy(dst, src, size);   /* copy first item */
940 
941     /* Initialize counters, etc. while compensating for first element copied */
942     copy_size = size;
943     copy_items = 1;
944     items_left = count - 1;
945     dst += size;
946 
947     /* copy until we've copied at least half of the items */
948     while (items_left >= copy_items)
949     {
950         HDmemcpy(dst, _dst, copy_size);   /* copy the current chunk */
951         dst += copy_size;     /* move the offset for the next chunk */
952         items_left -= copy_items;   /* decrement the number of items left */
953 
954         copy_size *= 2;     /* increase the size of the chunk to copy */
955         copy_items *= 2;    /* increase the count of items we are copying */
956     }   /* end while */
957     if (items_left > 0)   /* if there are any items left to copy */
958         HDmemcpy(dst, _dst, items_left * size);
959 
960     FUNC_LEAVE_NOAPI(SUCCEED)
961 }   /* H5VM_array_fill() */
962 
963 
964 /*-------------------------------------------------------------------------
965  * Function:	H5VM_array_down
966  *
967  * Purpose:	Given a set of dimension sizes, calculate the size of each
968  *              "down" slice.  This is the size of the dimensions for all the
969  *              dimensions below the current one, which is used for indexing
970  *              offsets in this dimension.
971  *
972  * Return:	Non-negative on success/Negative on failure
973  *
974  * Programmer:	Quincey Koziol
975  *		Monday, April 28, 2003
976  *
977  * Modifications:
978  *
979  *-------------------------------------------------------------------------
980  */
981 herr_t
H5VM_array_down(unsigned n,const hsize_t * total_size,hsize_t * down)982 H5VM_array_down(unsigned n, const hsize_t *total_size, hsize_t *down)
983 {
984     hsize_t	acc;	                /*accumulator			*/
985     int	        i;		        /*counter			*/
986 
987     FUNC_ENTER_NOAPI_NOINIT_NOERR
988 
989     HDassert(n <= H5VM_HYPER_NDIMS);
990     HDassert(total_size);
991     HDassert(down);
992 
993     /* Build the sizes of each dimension in the array */
994     /* (From fastest to slowest) */
995     for(i=(int)(n-1),acc=1; i>=0; i--) {
996         down[i]=acc;
997         acc *= total_size[i];
998     } /* end for */
999 
1000     FUNC_LEAVE_NOAPI(SUCCEED)
1001 } /* end H5VM_array_down() */
1002 
1003 
1004 /*-------------------------------------------------------------------------
1005  * Function:	H5VM_array_offset_pre
1006  *
1007  * Purpose:	Given a coordinate description of a location in an array, this
1008  *      function returns the byte offset of the coordinate.
1009  *
1010  *		The dimensionality of the whole array, and the offset is N.
1011  *              The whole array dimensions are TOTAL_SIZE and the coordinate
1012  *              is at offset OFFSET.
1013  *
1014  * Return:	Success: Byte offset from beginning of array to element offset
1015  *		Failure: abort() -- should never fail
1016  *
1017  * Programmer:	Quincey Koziol
1018  *		Tuesday, June 22, 1999
1019  *
1020  *-------------------------------------------------------------------------
1021  */
1022 hsize_t
H5VM_array_offset_pre(unsigned n,const hsize_t * acc,const hsize_t * offset)1023 H5VM_array_offset_pre(unsigned n, const hsize_t *acc, const hsize_t *offset)
1024 {
1025     unsigned        u;		/* Local index variable */
1026     hsize_t	    ret_value;  /* Return value */
1027 
1028     FUNC_ENTER_NOAPI_NOINIT_NOERR
1029 
1030     HDassert(n <= H5VM_HYPER_NDIMS);
1031     HDassert(acc);
1032     HDassert(offset);
1033 
1034     /* Compute offset in array */
1035     for(u = 0, ret_value = 0; u < n; u++)
1036         ret_value += acc[u] * offset[u];
1037 
1038     FUNC_LEAVE_NOAPI(ret_value)
1039 } /* end H5VM_array_offset_pre() */
1040 
1041 
1042 /*-------------------------------------------------------------------------
1043  * Function:	H5VM_array_offset
1044  *
1045  * Purpose:	Given a coordinate description of a location in an array, this
1046  *      function returns the byte offset of the coordinate.
1047  *
1048  *		The dimensionality of the whole array, and the offset is N.
1049  *              The whole array dimensions are TOTAL_SIZE and the coordinate
1050  *              is at offset OFFSET.
1051  *
1052  * Return:	Success: Byte offset from beginning of array to element offset
1053  *		Failure: abort() -- should never fail
1054  *
1055  * Programmer:	Quincey Koziol
1056  *		Tuesday, June 22, 1999
1057  *
1058  * Modifications:
1059  *
1060  *-------------------------------------------------------------------------
1061  */
1062 hsize_t
H5VM_array_offset(unsigned n,const hsize_t * total_size,const hsize_t * offset)1063 H5VM_array_offset(unsigned n, const hsize_t *total_size, const hsize_t *offset)
1064 {
1065     hsize_t	acc_arr[H5VM_HYPER_NDIMS];	/* Accumulated size of down dimensions */
1066     hsize_t	ret_value;  /* Return value */
1067 
1068     FUNC_ENTER_NOAPI((HDabort(), 0)) /*lint !e527 Don't worry about unreachable statement */
1069 
1070     HDassert(n <= H5VM_HYPER_NDIMS);
1071     HDassert(total_size);
1072     HDassert(offset);
1073 
1074     /* Build the sizes of each dimension in the array */
1075     if(H5VM_array_down(n,total_size,acc_arr)<0)
1076         HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, UFAIL, "can't compute down sizes")
1077 
1078     /* Set return value */
1079     ret_value=H5VM_array_offset_pre(n,acc_arr,offset);
1080 
1081 done:
1082     FUNC_LEAVE_NOAPI(ret_value)
1083 } /* end H5VM_array_offset() */
1084 
1085 
1086 /*-------------------------------------------------------------------------
1087  * Function:	H5VM_array_calc_pre
1088  *
1089  * Purpose:	Given a linear offset in an array, the dimensions of that
1090  *              array and the pre-computed 'down' (accumulator) sizes, this
1091  *              function computes the coordinates of that offset in the array.
1092  *
1093  *		The dimensionality of the whole array, and the coordinates is N.
1094  *              The array dimensions are TOTAL_SIZE and the coordinates
1095  *              are returned in COORD.  The linear offset is in OFFSET.
1096  *
1097  * Return:	Non-negative on success/Negative on failure
1098  *
1099  * Programmer:	Quincey Koziol
1100  *		Thursday, July 16, 2009
1101  *
1102  *-------------------------------------------------------------------------
1103  */
1104 herr_t
H5VM_array_calc_pre(hsize_t offset,unsigned n,const hsize_t * down,hsize_t * coords)1105 H5VM_array_calc_pre(hsize_t offset, unsigned n, const hsize_t *down,
1106     hsize_t *coords)
1107 {
1108     unsigned    u;                      /* Local index variable */
1109 
1110     FUNC_ENTER_NOAPI_NOINIT_NOERR
1111 
1112     /* Sanity check */
1113     HDassert(n <= H5VM_HYPER_NDIMS);
1114     HDassert(coords);
1115 
1116     /* Compute the coordinates from the offset */
1117     for(u = 0; u < n; u++) {
1118         coords[u] = offset / down[u];
1119         offset %= down[u];
1120     } /* end for */
1121 
1122     FUNC_LEAVE_NOAPI(SUCCEED)
1123 } /* end H5VM_array_calc_pre() */
1124 
1125 
1126 /*-------------------------------------------------------------------------
1127  * Function:	H5VM_array_calc
1128  *
1129  * Purpose:	Given a linear offset in an array and the dimensions of that
1130  *              array, this function computes the coordinates of that offset
1131  *              in the array.
1132  *
1133  *		The dimensionality of the whole array, and the coordinates is N.
1134  *              The array dimensions are TOTAL_SIZE and the coordinates
1135  *              are returned in COORD.  The linear offset is in OFFSET.
1136  *
1137  * Return:	Non-negative on success/Negative on failure
1138  *
1139  * Programmer:	Quincey Koziol
1140  *		Wednesday, April 16, 2003
1141  *
1142  * Modifications:
1143  *
1144  *-------------------------------------------------------------------------
1145  */
1146 herr_t
H5VM_array_calc(hsize_t offset,unsigned n,const hsize_t * total_size,hsize_t * coords)1147 H5VM_array_calc(hsize_t offset, unsigned n, const hsize_t *total_size, hsize_t *coords)
1148 {
1149     hsize_t	idx[H5VM_HYPER_NDIMS];	/* Size of each dimension in bytes */
1150     herr_t      ret_value = SUCCEED;    /* Return value */
1151 
1152     FUNC_ENTER_NOAPI(FAIL)
1153 
1154     /* Sanity check */
1155     HDassert(n <= H5VM_HYPER_NDIMS);
1156     HDassert(total_size);
1157     HDassert(coords);
1158 
1159     /* Build the sizes of each dimension in the array */
1160     if(H5VM_array_down(n, total_size, idx) < 0)
1161         HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute down sizes")
1162 
1163     /* Compute the coordinates from the offset */
1164     if(H5VM_array_calc_pre(offset, n, idx, coords) < 0)
1165         HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute coordinates")
1166 
1167 done:
1168     FUNC_LEAVE_NOAPI(ret_value)
1169 } /* end H5VM_array_calc() */
1170 
1171 
1172 /*-------------------------------------------------------------------------
1173  * Function:	H5VM_chunk_index
1174  *
1175  * Purpose:	Given a coordinate offset (COORD), the size of each chunk
1176  *              (CHUNK), the number of chunks in each dimension (NCHUNKS)
1177  *              and the number of dimensions of all of these (NDIMS), calculate
1178  *              a "chunk index" for the chunk that the coordinate offset is
1179  *              located in.
1180  *
1181  *              The chunk index starts at 0 and increases according to the
1182  *              fastest changing dimension, then the next fastest, etc.
1183  *
1184  *              For example, with a 3x5 chunk size and 6 chunks in the fastest
1185  *              changing dimension and 3 chunks in the slowest changing
1186  *              dimension, the chunk indices are as follows:
1187  *
1188  *              +-----+-----+-----+-----+-----+-----+
1189  *              |     |     |     |     |     |     |
1190  *              |  0  |  1  |  2  |  3  |  4  |  5  |
1191  *              |     |     |     |     |     |     |
1192  *              +-----+-----+-----+-----+-----+-----+
1193  *              |     |     |     |     |     |     |
1194  *              |  6  |  7  |  8  |  9  | 10  | 11  |
1195  *              |     |     |     |     |     |     |
1196  *              +-----+-----+-----+-----+-----+-----+
1197  *              |     |     |     |     |     |     |
1198  *              | 12  | 13  | 14  | 15  | 16  | 17  |
1199  *              |     |     |     |     |     |     |
1200  *              +-----+-----+-----+-----+-----+-----+
1201  *
1202  *              The chunk index is placed in the CHUNK_IDX location for return
1203  *              from this function
1204  *
1205  * Return:	Chunk index on success (can't fail)
1206  *
1207  * Programmer:	Quincey Koziol
1208  *		Monday, April 21, 2003
1209  *
1210  *-------------------------------------------------------------------------
1211  */
1212 hsize_t
H5VM_chunk_index(unsigned ndims,const hsize_t * coord,const uint32_t * chunk,const hsize_t * down_nchunks)1213 H5VM_chunk_index(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
1214     const hsize_t *down_nchunks)
1215 {
1216     hsize_t scaled_coord[H5VM_HYPER_NDIMS];	/* Scaled, coordinates, in terms of chunks */
1217     hsize_t chunk_idx;          /* Chunk index computed */
1218 
1219     FUNC_ENTER_NOAPI_NOINIT_NOERR
1220 
1221     /* Sanity check */
1222     HDassert(ndims <= H5VM_HYPER_NDIMS);
1223     HDassert(coord);
1224     HDassert(chunk);
1225     HDassert(down_nchunks);
1226 
1227     /* Defer to H5VM_chunk_index_scaled */
1228     chunk_idx = H5VM_chunk_index_scaled(ndims, coord, chunk, down_nchunks, scaled_coord);
1229 
1230     FUNC_LEAVE_NOAPI(chunk_idx)
1231 } /* end H5VM_chunk_index() */
1232 
1233 
1234 /*-------------------------------------------------------------------------
1235  * Function:	H5VM_chunk_scaled
1236  *
1237  * Purpose:	Compute the scaled coordinates for a chunk offset
1238  *
1239  * Return:	<none>
1240  *
1241  * Programmer:	Quincey Koziol
1242  *		Wednesday, November 19, 2014
1243  *
1244  *-------------------------------------------------------------------------
1245  */
1246 void
H5VM_chunk_scaled(unsigned ndims,const hsize_t * coord,const uint32_t * chunk,hsize_t * scaled)1247 H5VM_chunk_scaled(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
1248     hsize_t *scaled)
1249 {
1250     unsigned u;                 /* Local index variable */
1251 
1252     FUNC_ENTER_NOAPI_NOINIT_NOERR
1253 
1254     /* Sanity check */
1255     HDassert(ndims <= H5VM_HYPER_NDIMS);
1256     HDassert(coord);
1257     HDassert(chunk);
1258     HDassert(scaled);
1259 
1260     /* Compute the scaled coordinates for actual coordinates */
1261     /* (Note that the 'scaled' array is an 'OUT' parameter) */
1262     for(u = 0; u < ndims; u++)
1263         scaled[u] = coord[u] / chunk[u];
1264 
1265     FUNC_LEAVE_NOAPI_VOID
1266 } /* end H5VM_chunk_scaled() */
1267 
1268 
1269 /*-------------------------------------------------------------------------
1270  * Function:	H5VM_chunk_index_scaled
1271  *
1272  * Purpose:	Given a coordinate offset (COORD), the size of each chunk
1273  *              (CHUNK), the number of chunks in each dimension (NCHUNKS)
1274  *              and the number of dimensions of all of these (NDIMS), calculate
1275  *              a "chunk index" for the chunk that the coordinate offset is
1276  *              located in.
1277  *
1278  *              The chunk index starts at 0 and increases according to the
1279  *              fastest changing dimension, then the next fastest, etc.
1280  *
1281  *              For example, with a 3x5 chunk size and 6 chunks in the fastest
1282  *              changing dimension and 3 chunks in the slowest changing
1283  *              dimension, the chunk indices are as follows:
1284  *
1285  *              +-----+-----+-----+-----+-----+-----+
1286  *              |     |     |     |     |     |     |
1287  *              |  0  |  1  |  2  |  3  |  4  |  5  |
1288  *              |     |     |     |     |     |     |
1289  *              +-----+-----+-----+-----+-----+-----+
1290  *              |     |     |     |     |     |     |
1291  *              |  6  |  7  |  8  |  9  | 10  | 11  |
1292  *              |     |     |     |     |     |     |
1293  *              +-----+-----+-----+-----+-----+-----+
1294  *              |     |     |     |     |     |     |
1295  *              | 12  | 13  | 14  | 15  | 16  | 17  |
1296  *              |     |     |     |     |     |     |
1297  *              +-----+-----+-----+-----+-----+-----+
1298  *
1299  *              The chunk index is placed in the CHUNK_IDX location for return
1300  *              from this function
1301  *
1302  * Note:	This routine is identical to H5VM_chunk_index(), except for
1303  *		caching the scaled information.  Make changes in both places.
1304  *
1305  * Return:	Chunk index on success (can't fail)
1306  *
1307  * Programmer:	Vailin Choi
1308  *		Monday, February 9, 2015
1309  *
1310  *-------------------------------------------------------------------------
1311  */
1312 hsize_t
H5VM_chunk_index_scaled(unsigned ndims,const hsize_t * coord,const uint32_t * chunk,const hsize_t * down_nchunks,hsize_t * scaled)1313 H5VM_chunk_index_scaled(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
1314     const hsize_t *down_nchunks, hsize_t *scaled)
1315 {
1316     hsize_t chunk_idx;          /* Computed chunk index */
1317     unsigned u;                 /* Local index variable */
1318 
1319     FUNC_ENTER_NOAPI_NOINIT_NOERR
1320 
1321     /* Sanity check */
1322     HDassert(ndims <= H5VM_HYPER_NDIMS);
1323     HDassert(coord);
1324     HDassert(chunk);
1325     HDassert(down_nchunks);
1326     HDassert(scaled);
1327 
1328     /* Compute the scaled coordinates for actual coordinates */
1329     /* (Note that the 'scaled' array is an 'OUT' parameter) */
1330     for(u = 0; u < ndims; u++)
1331         scaled[u] = coord[u] / chunk[u];
1332 
1333     /* Compute the chunk index */
1334     chunk_idx = H5VM_array_offset_pre(ndims, down_nchunks, scaled); /*lint !e772 scaled_coord will always be initialized */
1335 
1336     FUNC_LEAVE_NOAPI(chunk_idx)
1337 } /* end H5VM_chunk_index_scaled() */
1338 
1339 
1340 /*-------------------------------------------------------------------------
1341  * Function:	H5VM_opvv
1342  *
1343  * Purpose:	Perform an operation on a source & destination sequences
1344  *		of offset/length pairs.  Each set of sequnces has an array
1345  *		of lengths, an array of offsets, the maximum number of
1346  *		sequences and the current sequence to start at in the sequence.
1347  *
1348  *              There may be different numbers of bytes in the source and
1349  *              destination sequences, the operation stops when either the
1350  *              source or destination sequence runs out of information.
1351  *
1352  * Note:	The algorithm in this routine is [basically] the same as for
1353  *		H5VM_memcpyvv().  Changes should be made to both!
1354  *
1355  * Return:	Non-negative # of bytes operated on, on success/Negative on failure
1356  *
1357  * Programmer:	Quincey Koziol
1358  *		Thursday, September 30, 2010
1359  *
1360  *-------------------------------------------------------------------------
1361  */
1362 ssize_t
H5VM_opvv(size_t dst_max_nseq,size_t * dst_curr_seq,size_t dst_len_arr[],hsize_t dst_off_arr[],size_t src_max_nseq,size_t * src_curr_seq,size_t src_len_arr[],hsize_t src_off_arr[],H5VM_opvv_func_t op,void * op_data)1363 H5VM_opvv(size_t dst_max_nseq, size_t *dst_curr_seq, size_t dst_len_arr[],
1364     hsize_t dst_off_arr[],
1365     size_t src_max_nseq, size_t *src_curr_seq, size_t src_len_arr[],
1366     hsize_t src_off_arr[],
1367     H5VM_opvv_func_t op, void *op_data)
1368 {
1369     hsize_t *max_dst_off_ptr, *max_src_off_ptr;  /* Pointers to max. source and destination offset locations */
1370     hsize_t *dst_off_ptr, *src_off_ptr; /* Pointers to source and destination offset arrays */
1371     size_t *dst_len_ptr, *src_len_ptr;  /* Pointers to source and destination length arrays */
1372     hsize_t tmp_dst_off, tmp_src_off;   /* Temporary source and destination offset values */
1373     size_t tmp_dst_len, tmp_src_len;    /* Temporary source and destination length values */
1374     size_t acc_len;             /* Accumulated length of sequences */
1375     ssize_t ret_value = 0;      /* Return value (Total size of sequence in bytes) */
1376 
1377     FUNC_ENTER_NOAPI(FAIL)
1378 
1379     /* Sanity check */
1380     HDassert(dst_curr_seq);
1381     HDassert(*dst_curr_seq < dst_max_nseq);
1382     HDassert(dst_len_arr);
1383     HDassert(dst_off_arr);
1384     HDassert(src_curr_seq);
1385     HDassert(*src_curr_seq < src_max_nseq);
1386     HDassert(src_len_arr);
1387     HDassert(src_off_arr);
1388     HDassert(op);
1389 
1390     /* Set initial offset & length pointers */
1391     dst_len_ptr = dst_len_arr + *dst_curr_seq;
1392     dst_off_ptr = dst_off_arr + *dst_curr_seq;
1393     src_len_ptr = src_len_arr + *src_curr_seq;
1394     src_off_ptr = src_off_arr + *src_curr_seq;
1395 
1396     /* Get temporary source & destination sequence offsets & lengths */
1397     tmp_dst_len = *dst_len_ptr;
1398     tmp_dst_off = *dst_off_ptr;
1399     tmp_src_len = *src_len_ptr;
1400     tmp_src_off = *src_off_ptr;
1401 
1402     /* Compute maximum offset pointer values */
1403     max_dst_off_ptr = dst_off_arr + dst_max_nseq;
1404     max_src_off_ptr = src_off_arr + src_max_nseq;
1405 
1406 /* Work through the sequences */
1407 /* (Choose smallest sequence available initially) */
1408 
1409     /* Source sequence is less than destination sequence */
1410     if(tmp_src_len < tmp_dst_len) {
1411 src_smaller:
1412         acc_len = 0;
1413         do {
1414             /* Make operator callback */
1415             if((*op)(tmp_dst_off, tmp_src_off, tmp_src_len, op_data) < 0)
1416                 HGOTO_ERROR(H5E_INTERNAL, H5E_CANTOPERATE, FAIL, "can't perform operation")
1417 
1418             /* Accumulate number of bytes copied */
1419             acc_len += tmp_src_len;
1420 
1421             /* Update destination length */
1422             tmp_dst_off += tmp_src_len;
1423             tmp_dst_len -= tmp_src_len;
1424 
1425             /* Advance source offset & check for being finished */
1426             src_off_ptr++;
1427             if(src_off_ptr >= max_src_off_ptr) {
1428                 /* Roll accumulated changes into appropriate counters */
1429                 *dst_off_ptr = tmp_dst_off;
1430                 *dst_len_ptr = tmp_dst_len;
1431 
1432                 /* Done with sequences */
1433                 goto finished;
1434             } /* end if */
1435             tmp_src_off = *src_off_ptr;
1436 
1437             /* Update source information */
1438             src_len_ptr++;
1439             tmp_src_len = *src_len_ptr;
1440         } while(tmp_src_len < tmp_dst_len);
1441 
1442         /* Roll accumulated sequence lengths into return value */
1443         ret_value += (ssize_t)acc_len;
1444 
1445         /* Transition to next state */
1446         if(tmp_dst_len < tmp_src_len)
1447             goto dst_smaller;
1448         else
1449             goto equal;
1450     } /* end if */
1451     /* Destination sequence is less than source sequence */
1452     else if(tmp_dst_len < tmp_src_len) {
1453 dst_smaller:
1454         acc_len = 0;
1455         do {
1456             /* Make operator callback */
1457             if((*op)(tmp_dst_off, tmp_src_off, tmp_dst_len, op_data) < 0)
1458                 HGOTO_ERROR(H5E_INTERNAL, H5E_CANTOPERATE, FAIL, "can't perform operation")
1459 
1460             /* Accumulate number of bytes copied */
1461             acc_len += tmp_dst_len;
1462 
1463             /* Update source length */
1464             tmp_src_off += tmp_dst_len;
1465             tmp_src_len -= tmp_dst_len;
1466 
1467             /* Advance destination offset & check for being finished */
1468             dst_off_ptr++;
1469             if(dst_off_ptr >= max_dst_off_ptr) {
1470                 /* Roll accumulated changes into appropriate counters */
1471                 *src_off_ptr = tmp_src_off;
1472                 *src_len_ptr = tmp_src_len;
1473 
1474                 /* Done with sequences */
1475                 goto finished;
1476             } /* end if */
1477             tmp_dst_off = *dst_off_ptr;
1478 
1479             /* Update destination information */
1480             dst_len_ptr++;
1481             tmp_dst_len = *dst_len_ptr;
1482         } while(tmp_dst_len < tmp_src_len);
1483 
1484         /* Roll accumulated sequence lengths into return value */
1485         ret_value += (ssize_t)acc_len;
1486 
1487         /* Transition to next state */
1488         if(tmp_src_len < tmp_dst_len)
1489             goto src_smaller;
1490         else
1491             goto equal;
1492     } /* end else-if */
1493     /* Destination sequence and source sequence are same length */
1494     else {
1495 equal:
1496         acc_len = 0;
1497         do {
1498             /* Make operator callback */
1499             if((*op)(tmp_dst_off, tmp_src_off, tmp_dst_len, op_data) < 0)
1500                 HGOTO_ERROR(H5E_INTERNAL, H5E_CANTOPERATE, FAIL, "can't perform operation")
1501 
1502             /* Accumulate number of bytes copied */
1503             acc_len += tmp_dst_len;
1504 
1505             /* Advance source & destination offset & check for being finished */
1506             src_off_ptr++;
1507             dst_off_ptr++;
1508             if(src_off_ptr >= max_src_off_ptr || dst_off_ptr >= max_dst_off_ptr)
1509                 /* Done with sequences */
1510                 goto finished;
1511             tmp_src_off = *src_off_ptr;
1512             tmp_dst_off = *dst_off_ptr;
1513 
1514             /* Update source information */
1515             src_len_ptr++;
1516             tmp_src_len = *src_len_ptr;
1517 
1518             /* Update destination information */
1519             dst_len_ptr++;
1520             tmp_dst_len = *dst_len_ptr;
1521         } while(tmp_dst_len == tmp_src_len);
1522 
1523         /* Roll accumulated sequence lengths into return value */
1524         ret_value += (ssize_t)acc_len;
1525 
1526         /* Transition to next state */
1527         if(tmp_dst_len < tmp_src_len)
1528             goto dst_smaller;
1529         else
1530             goto src_smaller;
1531     } /* end else */
1532 
1533 finished:
1534     /* Roll accumulated sequence lengths into return value */
1535     ret_value += (ssize_t)acc_len;
1536 
1537     /* Update current sequence vectors */
1538     *dst_curr_seq = (size_t)(dst_off_ptr - dst_off_arr);
1539     *src_curr_seq = (size_t)(src_off_ptr - src_off_arr);
1540 
1541 done:
1542     FUNC_LEAVE_NOAPI(ret_value)
1543 } /* end H5VM_opvv() */
1544 
1545 
1546 /*-------------------------------------------------------------------------
1547  * Function:	H5VM_memcpyvv
1548  *
1549  * Purpose:	Given source and destination buffers in memory (SRC & DST)
1550  *              copy sequences of from the source buffer into the destination
1551  *              buffer.  Each set of sequences has an array of lengths, an
1552  *              array of offsets, the maximum number of sequences and the
1553  *              current sequence to start at in the sequence.
1554  *
1555  *              There may be different numbers of bytes in the source and
1556  *              destination sequences, data copying stops when either the
1557  *              source or destination buffer runs out of sequence information.
1558  *
1559  * Note:	The algorithm in this routine is [basically] the same as for
1560  *		H5VM_opvv().  Changes should be made to both!
1561  *
1562  * Return:	Non-negative # of bytes copied on success/Negative on failure
1563  *
1564  * Programmer:	Quincey Koziol
1565  *		Friday, May 2, 2003
1566  *
1567  *-------------------------------------------------------------------------
1568  */
1569 ssize_t
H5VM_memcpyvv(void * _dst,size_t dst_max_nseq,size_t * dst_curr_seq,size_t dst_len_arr[],hsize_t dst_off_arr[],const void * _src,size_t src_max_nseq,size_t * src_curr_seq,size_t src_len_arr[],hsize_t src_off_arr[])1570 H5VM_memcpyvv(void *_dst,
1571     size_t dst_max_nseq, size_t *dst_curr_seq, size_t dst_len_arr[], hsize_t dst_off_arr[],
1572     const void *_src,
1573     size_t src_max_nseq, size_t *src_curr_seq, size_t src_len_arr[], hsize_t src_off_arr[])
1574 {
1575     unsigned char *dst;         /* Destination buffer pointer */
1576     const unsigned char *src;   /* Source buffer pointer */
1577     hsize_t *max_dst_off_ptr, *max_src_off_ptr;  /* Pointers to max. source and destination offset locations */
1578     hsize_t *dst_off_ptr, *src_off_ptr;  /* Pointers to source and destination offset arrays */
1579     size_t *dst_len_ptr, *src_len_ptr;  /* Pointers to source and destination length arrays */
1580     size_t tmp_dst_len;         /* Temporary dest. length value */
1581     size_t tmp_src_len;         /* Temporary source length value */
1582     size_t acc_len;             /* Accumulated length of sequences */
1583     ssize_t ret_value = 0;      /* Return value (Total size of sequence in bytes) */
1584 
1585     FUNC_ENTER_NOAPI_NOINIT_NOERR
1586 
1587     /* Sanity check */
1588     HDassert(_dst);
1589     HDassert(dst_curr_seq);
1590     HDassert(*dst_curr_seq < dst_max_nseq);
1591     HDassert(dst_len_arr);
1592     HDassert(dst_off_arr);
1593     HDassert(_src);
1594     HDassert(src_curr_seq);
1595     HDassert(*src_curr_seq < src_max_nseq);
1596     HDassert(src_len_arr);
1597     HDassert(src_off_arr);
1598 
1599     /* Set initial offset & length pointers */
1600     dst_len_ptr = dst_len_arr + *dst_curr_seq;
1601     dst_off_ptr = dst_off_arr + *dst_curr_seq;
1602     src_len_ptr = src_len_arr + *src_curr_seq;
1603     src_off_ptr = src_off_arr + *src_curr_seq;
1604 
1605     /* Get temporary source & destination sequence lengths */
1606     tmp_dst_len = *dst_len_ptr;
1607     tmp_src_len = *src_len_ptr;
1608 
1609     /* Compute maximum offset pointer values */
1610     max_dst_off_ptr = dst_off_arr + dst_max_nseq;
1611     max_src_off_ptr = src_off_arr + src_max_nseq;
1612 
1613     /* Compute buffer offsets */
1614     dst = (unsigned char *)_dst + *dst_off_ptr;
1615     src = (const unsigned char *)_src + *src_off_ptr;
1616 
1617 /* Work through the sequences */
1618 /* (Choose smallest sequence available initially) */
1619 
1620     /* Source sequence is less than destination sequence */
1621     if(tmp_src_len < tmp_dst_len) {
1622 src_smaller:
1623         acc_len = 0;
1624         do {
1625             /* Copy data */
1626             HDmemcpy(dst, src, tmp_src_len);
1627 
1628             /* Accumulate number of bytes copied */
1629             acc_len += tmp_src_len;
1630 
1631             /* Update destination length */
1632             tmp_dst_len -= tmp_src_len;
1633 
1634             /* Advance source offset & check for being finished */
1635             src_off_ptr++;
1636             if(src_off_ptr >= max_src_off_ptr) {
1637                 /* Roll accumulated changes into appropriate counters */
1638                 *dst_off_ptr += acc_len;
1639                 *dst_len_ptr = tmp_dst_len;
1640 
1641                 /* Done with sequences */
1642                 goto finished;
1643             } /* end if */
1644 
1645             /* Update destination pointer */
1646             dst += tmp_src_len;
1647 
1648             /* Update source information */
1649             src_len_ptr++;
1650             tmp_src_len = *src_len_ptr;
1651             src = (const unsigned char *)_src + *src_off_ptr;
1652         } while(tmp_src_len < tmp_dst_len);
1653 
1654         /* Roll accumulated sequence lengths into return value */
1655         ret_value += (ssize_t)acc_len;
1656 
1657         /* Transition to next state */
1658         if(tmp_dst_len < tmp_src_len)
1659             goto dst_smaller;
1660         else
1661             goto equal;
1662     } /* end if */
1663     /* Destination sequence is less than source sequence */
1664     else if(tmp_dst_len < tmp_src_len) {
1665 dst_smaller:
1666         acc_len = 0;
1667         do {
1668             /* Copy data */
1669             HDmemcpy(dst, src, tmp_dst_len);
1670 
1671             /* Accumulate number of bytes copied */
1672             acc_len += tmp_dst_len;
1673 
1674             /* Update source length */
1675             tmp_src_len -= tmp_dst_len;
1676 
1677             /* Advance destination offset & check for being finished */
1678             dst_off_ptr++;
1679             if(dst_off_ptr >= max_dst_off_ptr) {
1680                 /* Roll accumulated changes into appropriate counters */
1681                 *src_off_ptr += acc_len;
1682                 *src_len_ptr = tmp_src_len;
1683 
1684                 /* Done with sequences */
1685                 goto finished;
1686             } /* end if */
1687 
1688             /* Update source pointer */
1689             src += tmp_dst_len;
1690 
1691             /* Update destination information */
1692             dst_len_ptr++;
1693             tmp_dst_len = *dst_len_ptr;
1694             dst = (unsigned char *)_dst + *dst_off_ptr;
1695         } while(tmp_dst_len < tmp_src_len);
1696 
1697         /* Roll accumulated sequence lengths into return value */
1698         ret_value += (ssize_t)acc_len;
1699 
1700         /* Transition to next state */
1701         if(tmp_src_len < tmp_dst_len)
1702             goto src_smaller;
1703         else
1704             goto equal;
1705     } /* end else-if */
1706     /* Destination sequence and source sequence are same length */
1707     else {
1708 equal:
1709         acc_len = 0;
1710         do {
1711             /* Copy data */
1712             HDmemcpy(dst, src, tmp_dst_len);
1713 
1714             /* Accumulate number of bytes copied */
1715             acc_len += tmp_dst_len;
1716 
1717             /* Advance source & destination offset & check for being finished */
1718             src_off_ptr++;
1719             dst_off_ptr++;
1720             if(src_off_ptr >= max_src_off_ptr || dst_off_ptr >= max_dst_off_ptr)
1721                 /* Done with sequences */
1722                 goto finished;
1723 
1724             /* Update source information */
1725             src_len_ptr++;
1726             tmp_src_len = *src_len_ptr;
1727             src = (const unsigned char *)_src + *src_off_ptr;
1728 
1729             /* Update destination information */
1730             dst_len_ptr++;
1731             tmp_dst_len = *dst_len_ptr;
1732             dst = (unsigned char *)_dst + *dst_off_ptr;
1733         } while(tmp_dst_len == tmp_src_len);
1734 
1735         /* Roll accumulated sequence lengths into return value */
1736         ret_value += (ssize_t)acc_len;
1737 
1738         /* Transition to next state */
1739         if(tmp_dst_len < tmp_src_len)
1740             goto dst_smaller;
1741         else
1742             goto src_smaller;
1743     } /* end else */
1744 
1745 finished:
1746     /* Roll accumulated sequence lengths into return value */
1747     ret_value += (ssize_t)acc_len;
1748 
1749     /* Update current sequence vectors */
1750     *dst_curr_seq = (size_t)(dst_off_ptr - dst_off_arr);
1751     *src_curr_seq = (size_t)(src_off_ptr - src_off_arr);
1752 
1753     FUNC_LEAVE_NOAPI(ret_value)
1754 } /* end H5VM_memcpyvv() */
1755 
1756