/dports/misc/adios2/ADIOS2-2.7.1/bindings/Matlab/test/ |
H A D | test1_write.py | 25 npcols = numpy.array([NCols]) variable 39 varCols = io.DefineVariable("ncols", npcols) 53 fw.Put(varCols, npcols)
|
/dports/science/cp2k/cp2k-2e995eec7fd208c8a72d9544807bd8b8ba8cd1cc/src/ |
H A D | cp_dbcsr_operations.F | 727 CALL dbcsr_distribution_get(sparse_dist, npcols=npcols, row_dist=row_dist) 900 CALL dbcsr_distribution_get(tmpl_dist, nprows=nprows, npcols=npcols) 902 CALL create_bl_distribution(col_dist, col_blk_size, n, npcols) 948 npcols=npcols, & 952 CALL create_bl_distribution(col_dist, col_blk_size, n, npcols) 1093 npcols=npcols) 1099 nimages = lcm(nprows, npcols)/nprows 1100 multiplicity = nprows/gcd(nprows, npcols) 1250 npcols = SIZE(pgrid, 2) 1251 DO pdim = 0, MIN(npcols - 1, nblkcols - 1) [all …]
|
H A D | distribution_methods.F | 848 INTEGER, INTENT(IN) :: npcols local 864 nbins = lcm(nprows, npcols) 865 pgrid_gcd = gcd(nprows, npcols) 879 prow = INT((bin - 1)*pgrid_gcd/npcols) 883 IF (pcol .GE. npcols) & 914 INTEGER, INTENT(IN) :: npcols local 926 nbins = lcm(nprows, npcols) 927 pgrid_gcd = gcd(nprows, npcols) 937 row_distribution(iatom) = (distribution(iatom) - 1)*pgrid_gcd/npcols + 1 1025 INTEGER, INTENT(IN) :: npcols local [all …]
|
/dports/science/cp2k-data/cp2k-7.1.0/src/ |
H A D | cp_dbcsr_operations.F | 733 CALL dbcsr_distribution_get(sparse_dist, npcols=npcols, row_dist=row_dist) 906 CALL dbcsr_distribution_get(tmpl_dist, nprows=nprows, npcols=npcols) 908 CALL create_bl_distribution(col_dist, col_blk_size, n, npcols) 954 npcols=npcols, & 958 CALL create_bl_distribution(col_dist, col_blk_size, n, npcols) 1099 npcols=npcols) 1105 nimages = lcm (nprows, npcols) / nprows 1106 multiplicity = nprows / gcd (nprows, npcols) 1256 npcols = SIZE(pgrid, 2) 1257 DO pdim = 0 , MIN(npcols-1, nblkcols-1) [all …]
|
H A D | distribution_methods.F | 851 INTEGER, INTENT(IN) :: npcols local 867 nbins = lcm(nprows, npcols) 868 pgrid_gcd = gcd(nprows, npcols) 882 prow = INT((bin - 1)*pgrid_gcd/npcols) 886 IF (pcol .GE. npcols) & 917 INTEGER, INTENT(IN) :: npcols local 929 nbins = lcm(nprows, npcols) 930 pgrid_gcd = gcd(nprows, npcols) 940 row_distribution(iatom) = (distribution(iatom) - 1)*pgrid_gcd/npcols + 1 1028 INTEGER, INTENT(IN) :: npcols local [all …]
|
/dports/math/dbcsr/dbcsr-2.1.0/src/dist/ |
H A D | dbcsr_dist_methods.F | 155 INTEGER :: handle, i, lcmv, mypcoor, npcols, & local 164 npcols = dbcsr_mp_npcols(mp_env) 165 lcmv = lcm(nprows, npcols) 182 IF (dist%d%max_col_dist .GE. npcols) & 237 … group, mynode, numnodes, nprows, npcols, myprow, mypcol, pgrid, & argument 243 … INTEGER, INTENT(OUT), OPTIONAL :: group, mynode, numnodes, nprows, npcols, & local 259 IF (PRESENT(npcols)) npcols = SIZE(dist%d%mp_env%mp%pgrid, 2) 422 nprows, npcols) RESULT(num_images_1d) 426 npcols local 435 lcmv = lcm(nprows, npcols)
|
/dports/science/cp2k/cp2k-2e995eec7fd208c8a72d9544807bd8b8ba8cd1cc/exts/dbcsr/src/dist/ |
H A D | dbcsr_dist_methods.F | 156 INTEGER :: handle, i, lcmv, mypcoor, npcols, & local 165 npcols = dbcsr_mp_npcols(mp_env) 166 lcmv = lcm(nprows, npcols) 183 IF (dist%d%max_col_dist .GE. npcols) & 238 … group, mynode, numnodes, nprows, npcols, myprow, mypcol, pgrid, & argument 244 … INTEGER, INTENT(OUT), OPTIONAL :: group, mynode, numnodes, nprows, npcols, & local 260 IF (PRESENT(npcols)) npcols = SIZE(dist%d%mp_env%mp%pgrid, 2) 423 nprows, npcols) RESULT(num_images_1d) 427 npcols local 436 lcmv = lcm(nprows, npcols)
|
/dports/math/eclib/eclib-20210318/libsrc/eclib/ |
H A D | mmatrix.h | 87 friend mat_m echelon0(const mat_m& m, vec_i& pcols, vec_i& npcols, 90 friend mat_m echmodp(const mat_m& m, vec_i& pcols, vec_i& npcols, 119 mat_m echelon(const mat_m& m, vec_i& pcols, vec_i& npcols, 121 mat_m echelon(const mat_m& m, vec_l& pcols, vec_l& npcols,
|
/dports/math/dbcsr/dbcsr-2.1.0/src/mm/ |
H A D | dbcsr_mm_dist_operations.F | 100 npcols = dbcsr_mp_npcols(mp_env) 102 nprows, npcols 145 imgdist%i%col_decimation = nimages_cols/npcols 166 npcols, & 171 npcols, & 178 npcols, imgdist%i%col_decimation) 583 npcols = SIZE(mp%mp%pgrid, 2) 589 nvpcols = npcols*ncol_images 775 INTEGER :: error_handle, npcols local 783 npcols = dbcsr_mp_npcols(dbcsr_distribution_mp(dist)) [all …]
|
/dports/science/cp2k/cp2k-2e995eec7fd208c8a72d9544807bd8b8ba8cd1cc/exts/dbcsr/src/mm/ |
H A D | dbcsr_mm_dist_operations.F | 101 npcols = dbcsr_mp_npcols(mp_env) 103 nprows, npcols 146 imgdist%i%col_decimation = nimages_cols/npcols 167 npcols, & 172 npcols, & 179 npcols, imgdist%i%col_decimation) 593 npcols = SIZE(mp%mp%pgrid, 2) 599 nvpcols = npcols*ncol_images 793 INTEGER :: error_handle, npcols local 801 npcols = dbcsr_mp_npcols(dbcsr_distribution_mp(dist)) [all …]
|
/dports/math/dbcsr/dbcsr-2.1.0/tests/inputs/ |
H A D | test_H2O.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_rect1_dense.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_rect1_sparse.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_square_sparse_bigblocks.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_square_sparse.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_square_sparse_rma.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_rect2_sparse.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_square_dense.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_rect2_dense.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_singleblock.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
/dports/science/cp2k/cp2k-2e995eec7fd208c8a72d9544807bd8b8ba8cd1cc/exts/dbcsr/tests/ |
H A D | input.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
/dports/science/cp2k/cp2k-2e995eec7fd208c8a72d9544807bd8b8ba8cd1cc/exts/dbcsr/tests/inputs/ |
H A D | test_rect2_sparse.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_square_sparse_rma.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_rect1_sparse.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|
H A D | test_rect2_dense.perf | 1 # npcols MPI grid, 0 leaves MPI to find the best grid. 2 # Note that the total number of processors must be divisible per npcols
|