/dports/science/hdf5/hdf5-1.10.6/testpar/ |
H A D | t_filters_parallel.h | 91 #define WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size) 92 #define WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size) 124 #define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS (mpi_size) 150 #define WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size) 154 #define WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size) 211 #define READ_SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size) 212 #define READ_SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size) 270 #define READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size) 274 #define READ_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size) 317 #define WRITE_SERIAL_READ_PARALLEL_DEPTH (mpi_size) [all …]
|
H A D | testphdf5.c | 72 int mpi_size, mpi_rank; in pause_proc() local 77 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in pause_proc() 220 if ((dim0 % mpi_size) || (dim1 % mpi_size)){ in parse_options() 223 dim0, dim1, mpi_size); in parse_options() 322 dim0 = ROW_FACTOR*mpi_size; in main() 323 dim1 = COL_FACTOR*mpi_size; in main() 434 if((mpi_size < 3)&& MAINPROCESS ) { in main() 490 if((mpi_size < 3) && MAINPROCESS) { in main() 494 if(mpi_size > 2) { in main() 528 if((mpi_size < 2) && MAINPROCESS) { in main() [all …]
|
H A D | t_file_image.c | 65 int mpi_size, mpi_rank; in file_image_daisy_chain_test() local 86 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in file_image_daisy_chain_test() 107 dims[0] = (hsize_t)mpi_size; in file_image_daisy_chain_test() 120 vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); in file_image_daisy_chain_test() 124 for(i = 1; i < mpi_size; i++) in file_image_daisy_chain_test() 229 vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); in file_image_daisy_chain_test() 237 for(i = 0; i < mpi_size; i++) in file_image_daisy_chain_test() 316 vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); in file_image_daisy_chain_test() 324 for(i = 0; i < mpi_size; i++){ in file_image_daisy_chain_test() 363 MPI_BYTE, (mpi_rank + 1) % mpi_size, 0, in file_image_daisy_chain_test() [all …]
|
H A D | t_mpi.c | 42 int mpi_size, mpi_rank; in test_mpio_overlap_writes() local 61 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in test_mpio_overlap_writes() 65 if (mpi_size < 2) { in test_mpio_overlap_writes() 184 int mpi_size, mpi_rank; in test_mpio_gb_file() local 203 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in test_mpio_gb_file() 345 mpi_off = (i * mpi_size + (mpi_size - mpi_rank - 1)) in test_mpio_gb_file() 354 expected = i * mpi_size + (mpi_size - mpi_rank - 1); in test_mpio_gb_file() 438 int mpi_size, mpi_rank; in test_mpio_1wMr() local 692 int mpi_rank, mpi_size; in test_mpio_derived_dtype() local 862 int mpi_size, mpi_rank; in test_mpio_special_collective() local [all …]
|
H A D | t_dset.c | 297 int mpi_size, mpi_rank; in dataset_writeInd() local 442 int mpi_size, mpi_rank; in dataset_readInd() local 573 int mpi_size, mpi_rank; in dataset_writeAll() local 1105 int mpi_size, mpi_rank; in dataset_readAll() local 1518 int mpi_size, mpi_rank; in extend_writeInd() local 1906 int mpi_size, mpi_rank; in extend_readInd() local 2089 int mpi_size, mpi_rank; in extend_writeAll() local 3393 int mpi_size = -1; in actual_io_mode_tests() local 3407 if (mpi_size > 2) in actual_io_mode_tests() 4284 count[0] = mpi_size; in dataset_atomicity() [all …]
|
H A D | t_chunk_alloc.c | 23 static int mpi_size, mpi_rank; variable 92 MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); in create_chunked_dataset() 97 nchunks=chunk_factor*mpi_size; in create_chunked_dataset() 206 MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); in parallel_access_dataset() 209 nchunks=chunk_factor*mpi_size; in parallel_access_dataset() 247 for (i=0; i<nchunks/mpi_size; i++) { in parallel_access_dataset() 248 offset[0] = (i*mpi_size+mpi_rank)*chunk_dims[0]; in parallel_access_dataset() 339 MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); in verify_data() 342 nchunks=chunk_factor*mpi_size; in verify_data() 389 value = i%mpi_size + 1; in verify_data() [all …]
|
/dports/science/hdf5-18/hdf5-1.8.21/testpar/ |
H A D | testphdf5.c | 72 int mpi_size, mpi_rank; in pause_proc() local 182 dim0 = atoi(*(++argv))*mpi_size; in parse_options() 184 dim1 = atoi(*(++argv))*mpi_size; in parse_options() 220 if ((dim0 % mpi_size) || (dim1 % mpi_size)){ in parse_options() 223 dim0, dim1, mpi_size); in parse_options() 318 dim0 = ROW_FACTOR*mpi_size; in main() 319 dim1 = COL_FACTOR*mpi_size; in main() 422 if((mpi_size < 3)&& MAINPROCESS ) { in main() 478 if((mpi_size < 3) && MAINPROCESS) { in main() 482 if(mpi_size > 2) { in main() [all …]
|
H A D | t_file_image.c | 65 int mpi_size, mpi_rank; in file_image_daisy_chain_test() local 86 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in file_image_daisy_chain_test() 107 dims[0] = (hsize_t)mpi_size; in file_image_daisy_chain_test() 120 vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); in file_image_daisy_chain_test() 124 for(i = 1; i < mpi_size; i++) in file_image_daisy_chain_test() 229 vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); in file_image_daisy_chain_test() 237 for(i = 0; i < mpi_size; i++) in file_image_daisy_chain_test() 316 vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); in file_image_daisy_chain_test() 324 for(i = 0; i < mpi_size; i++){ in file_image_daisy_chain_test() 363 MPI_BYTE, (mpi_rank + 1) % mpi_size, 0, in file_image_daisy_chain_test() [all …]
|
H A D | t_mpi.c | 46 int mpi_size, mpi_rank; in test_mpio_overlap_writes() local 70 if (mpi_size < 2) { in test_mpio_overlap_writes() 192 int mpi_size, mpi_rank; in test_mpio_gb_file() local 303 ntimes = GB/MB*n/mpi_size + 1; in test_mpio_gb_file() 345 ntimes = GB/MB*n/mpi_size + 1; in test_mpio_gb_file() 347 mpi_off = (i*mpi_size + (mpi_size - mpi_rank - 1))*(MPI_Offset)MB; in test_mpio_gb_file() 353 expected = i*mpi_size + (mpi_size - mpi_rank - 1); in test_mpio_gb_file() 438 int mpi_size, mpi_rank; in test_mpio_1wMr() local 691 int mpi_rank,mpi_size; in test_mpio_derived_dtype() local 865 int mpi_size, mpi_rank; in test_mpio_special_collective() local [all …]
|
H A D | t_dset.c | 51 block[0] = dim0/mpi_size; in slab_set() 64 block[1] = dim1/mpi_size; in slab_set() 289 int mpi_size, mpi_rank; in dataset_writeInd() local 434 int mpi_size, mpi_rank; in dataset_readInd() local 565 int mpi_size, mpi_rank; in dataset_writeAll() local 1097 int mpi_size, mpi_rank; in dataset_readAll() local 1510 int mpi_size, mpi_rank; in extend_writeInd() local 3368 int mpi_size = -1; in actual_io_mode_tests() local 3382 if (mpi_size > 2) in actual_io_mode_tests() 4257 count[0] = mpi_size; in dataset_atomicity() [all …]
|
H A D | t_chunk_alloc.c | 23 static int mpi_size, mpi_rank; variable 92 MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); in create_chunked_dataset() 97 nchunks=chunk_factor*mpi_size; in create_chunked_dataset() 207 MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); in parallel_access_dataset() 210 nchunks=chunk_factor*mpi_size; in parallel_access_dataset() 248 for (i=0; i<nchunks/mpi_size; i++){ in parallel_access_dataset() 249 offset[0] = (i*mpi_size+mpi_rank)*chunk_dims[0]; in parallel_access_dataset() 338 MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); in verify_data() 341 nchunks=chunk_factor*mpi_size; in verify_data() 388 value = i%mpi_size + 1; in verify_data() [all …]
|
/dports/math/libmesh/libmesh-1.6.2/contrib/netcdf/netcdf-c-4.6.2/nc_test4/ |
H A D | tst_nc4perf.c | 32 MPI_Info info, int mpi_size, int mpi_rank, in test_pio_2d() argument 52 for (i = 0; i < DIMSIZE1 / mpi_size; i++) in test_pio_2d() 85 start[1] = mpi_rank * DIMSIZE1/mpi_size; in test_pio_2d() 87 count[1] = DIMSIZE1 / mpi_size; in test_pio_2d() 158 for(i = 0; i < DIMSIZE1 / mpi_size; i++) in test_pio_4d() 194 ustart[3] = DIMSIZE1 * mpi_rank / mpi_size; in test_pio_4d() 198 ucount[3] = DIMSIZE1 / mpi_size; in test_pio_4d() 258 int mpi_size, mpi_rank; in main() local 272 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in main() 276 if ((float)DIMSIZE1 / mpi_size != (int)(DIMSIZE1 / mpi_size)) in main() [all …]
|
H A D | tst_parallel3.c | 69 if(mpi_size == NUMP[i]) in main() 159 int mpi_size, mpi_rank; in test_pio() local 212 count[1] = DIMSIZE/mpi_size; in test_pio() 253 ucount[3] = DIMSIZE/mpi_size; in test_pio() 291 count[1] = DIMSIZE/mpi_size; in test_pio() 364 int mpi_size, mpi_rank; in test_pio_attr() local 428 count[1] = DIMSIZE/mpi_size; in test_pio_attr() 456 count[1] = DIMSIZE/mpi_size; in test_pio_attr() 548 int mpi_size, mpi_rank; in test_pio_hyper() local 573 if(mpi_size == 1) return 0; in test_pio_hyper() [all …]
|
H A D | tst_parallel4.c | 37 int mpi_size, mpi_rank; in main() local 58 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in main() 64 if (NUM_SLABS % mpi_size != 0) in main() 67 NUM_SLABS, mpi_size); in main() 114 if (nc_put_att_int(ncid, NC_GLOBAL, "num_processors", NC_INT, 1, &mpi_size)) ERR; in main() 140 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() 142 start[0] = NUM_SLABS / mpi_size * mpi_rank + i; in main() 160 …printf("%d\t%g\t%g\n", mpi_size, total_time, DIMSIZE * DIMSIZE * NUM_SLABS * sizeof(int) / total_t… in main() 185 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() 187 start[0] = NUM_SLABS / mpi_size * mpi_rank + i; in main()
|
H A D | tst_simplerw_coll_r.c | 31 int mpi_size, mpi_rank; in main() local 44 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in main() 49 if (NUM_SLABS % mpi_size) in main() 53 NUM_SLABS, mpi_size); in main() 132 NUM_SLABS, DIMSIZE, DIMSIZE, mpi_size, fv, test_type[tt]); in main() 284 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() 286 write_start[0] = NUM_SLABS / mpi_size * mpi_rank + i; in main() 312 if (mpi_size_in != mpi_size) ERR; in main() 314 if (mpi_size_in != mpi_size) ERR; in main() 323 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() [all …]
|
/dports/science/netcdf/netcdf-c-4.7.4/nc_test4/ |
H A D | tst_nc4perf.c | 32 MPI_Info info, int mpi_size, int mpi_rank, in test_pio_2d() argument 52 for (i = 0; i < DIMSIZE1 / mpi_size; i++) in test_pio_2d() 85 start[1] = mpi_rank * DIMSIZE1/mpi_size; in test_pio_2d() 87 count[1] = DIMSIZE1 / mpi_size; in test_pio_2d() 158 for(i = 0; i < DIMSIZE1 / mpi_size; i++) in test_pio_4d() 194 ustart[3] = DIMSIZE1 * mpi_rank / mpi_size; in test_pio_4d() 198 ucount[3] = DIMSIZE1 / mpi_size; in test_pio_4d() 258 int mpi_size, mpi_rank; in main() local 272 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in main() 276 if ((float)DIMSIZE1 / mpi_size != (int)(DIMSIZE1 / mpi_size)) in main() [all …]
|
H A D | tst_parallel3.c | 69 if(mpi_size == NUMP[i]) in main() 159 int mpi_size, mpi_rank; in test_pio() local 212 count[1] = DIMSIZE/mpi_size; in test_pio() 253 ucount[3] = DIMSIZE/mpi_size; in test_pio() 291 count[1] = DIMSIZE/mpi_size; in test_pio() 364 int mpi_size, mpi_rank; in test_pio_attr() local 428 count[1] = DIMSIZE/mpi_size; in test_pio_attr() 456 count[1] = DIMSIZE/mpi_size; in test_pio_attr() 548 int mpi_size, mpi_rank; in test_pio_hyper() local 573 if(mpi_size == 1) return 0; in test_pio_hyper() [all …]
|
H A D | tst_parallel4.c | 37 int mpi_size, mpi_rank; in main() local 58 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in main() 64 if (NUM_SLABS % mpi_size != 0) in main() 67 NUM_SLABS, mpi_size); in main() 114 if (nc_put_att_int(ncid, NC_GLOBAL, "num_processors", NC_INT, 1, &mpi_size)) ERR; in main() 140 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() 142 start[0] = NUM_SLABS / mpi_size * mpi_rank + i; in main() 160 …printf("%d\t%g\t%g\n", mpi_size, total_time, DIMSIZE * DIMSIZE * NUM_SLABS * sizeof(int) / total_t… in main() 185 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() 187 start[0] = NUM_SLABS / mpi_size * mpi_rank + i; in main()
|
H A D | tst_parallel_compress.c | 38 int mpi_size, mpi_rank; in main() local 51 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in main() 55 if (!(slab_data = malloc(sizeof(int) * DIMSIZE * DIMSIZE / mpi_size))) ERR; in main() 59 for (i = 0; i < DIMSIZE * DIMSIZE / mpi_size; i++) in main() 117 start[0] = mpi_rank * DIMSIZE/mpi_size; in main() 119 count[0] = DIMSIZE/mpi_size; in main() 143 if (!(slab_data_in = malloc(sizeof(int) * DIMSIZE * DIMSIZE / mpi_size))) ERR; in main() 166 for (i = 0; i < DIMSIZE * DIMSIZE / mpi_size; i++) in main()
|
H A D | tst_simplerw_coll_r.c | 31 int mpi_size, mpi_rank; in main() local 44 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in main() 49 if (NUM_SLABS % mpi_size) in main() 53 NUM_SLABS, mpi_size); in main() 132 NUM_SLABS, DIMSIZE, DIMSIZE, mpi_size, fv, test_type[tt]); in main() 284 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() 286 write_start[0] = NUM_SLABS / mpi_size * mpi_rank + i; in main() 312 if (mpi_size_in != mpi_size) ERR; in main() 314 if (mpi_size_in != mpi_size) ERR; in main() 323 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() [all …]
|
/dports/math/libmesh/libmesh-1.6.2/contrib/netcdf/netcdf-c-4.6.2/nc_test/ |
H A D | tst_parallel2.c | 35 int mpi_size, mpi_rank; in main() local 56 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in main() 60 printf("mpi_name: %s size: %d rank: %d\n", mpi_name, mpi_size, mpi_rank); in main() 64 if (NUM_SLABS % mpi_size != 0) in main() 67 NUM_SLABS, mpi_size); in main() 97 NUM_SLABS, DIMSIZE, DIMSIZE, mpi_size); in main() 119 if (nc_put_att_int(ncid, NC_GLOBAL, "num_processors", NC_INT, 1, &mpi_size)) ERR; in main() 150 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() 152 start[0] = NUM_SLABS / mpi_size * mpi_rank + i; in main() 194 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() [all …]
|
/dports/science/netcdf/netcdf-c-4.7.4/nc_test/ |
H A D | tst_parallel2.c | 46 int mpi_size, mpi_rank; in main() local 67 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in main() 71 printf("mpi_name: %s size: %d rank: %d\n", mpi_name, mpi_size, mpi_rank); in main() 75 if (NUM_SLABS % mpi_size != 0) in main() 78 NUM_SLABS, mpi_size); in main() 108 NUM_SLABS, DIMSIZE, DIMSIZE, mpi_size); in main() 130 if (nc_put_att_int(ncid, NC_GLOBAL, "num_processors", NC_INT, 1, &mpi_size)) ERR; in main() 161 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() 163 start[0] = NUM_SLABS / mpi_size * mpi_rank + i; in main() 205 for (i = 0; i < NUM_SLABS / mpi_size; i++) in main() [all …]
|
/dports/science/hdf5-18/hdf5-1.8.21/fortran/testpar/ |
H A D | ptest.f90 | 29 INTEGER :: mpi_size ! number of processes in the group of communicator variable 45 CALL mpi_comm_size( MPI_COMM_WORLD, mpi_size, mpierror ) 63 CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) 73 CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) 83 CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) 93 CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) 103 CALL multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
|
/dports/science/ascent/ascent-0.7.1-66-gbcf2742a/src/examples/proxies/kripke/Kripke/ |
H A D | ParallelComm.cpp | 20 int mpi_size; in computeTag() local 21 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in computeTag() 23 int tag = mpi_rank + mpi_size*sdom_id; in computeTag() 29 int mpi_size; in computeRankSdom() local 30 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in computeRankSdom() 32 mpi_rank = tag % mpi_size; in computeRankSdom() 33 sdom_id = tag / mpi_size; in computeRankSdom() 77 int mpi_rank, mpi_size; in postRecvs() local 79 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); in postRecvs() 119 int mpi_rank, mpi_size; in postSends() local [all …]
|
/dports/science/chemps2/CheMPS2-1.8.10/CheMPS2/include/chemps2/ |
H A D | MPIchemps2.h | 119 static int mpi_size(){ in mpi_size() function 168 return ( 1 + index1 + (index2*(index2+1))/2 ) % mpi_size(); in owner_absigma() 180 return ( 1 + (L*(L+1))/2 + index1 + (index2*(index2+1))/2 ) % mpi_size(); in owner_cdf() 190 return ( 1 + L*(L+1) + index ) % mpi_size(); in owner_q() 204 …n ( 1 + L*(L+1) + index1 + (index2*(index2+1))/2 + (index3*(index3+1)*(index3+2))/6 ) % mpi_size(); in owner_3rdm_diagram() 220 return (macro + L*(L+2)) % mpi_size(); in owner_specific_diagram() 230 return (MPI_CHEMPS2_OFFSET + L*(L+2) + excitation) % mpi_size(); in owner_specific_excitation() 272 …return ( my_value * MPIchemps2::mpi_size() == tot_value ); // Only true if mybool is the same for … in all_booleans_equal()
|