Home
last modified time | relevance | path

Searched refs:rank (Results 8601 – 8625 of 52263) sorted by relevance

1...<<341342343344345346347348349350>>...2091

/dports/net/mpich2/mpich2-1.5/test/mpi/pt2pt/
H A Dpscancel.c18 int rank, size, source, dest; in main() local
33 MPI_Comm_rank( comm, &rank ); in main()
40 if (rank == 0) { in main()
69 else if (rank == dest) in main()
90 if (rank == 0) { in main()
131 else if (rank == dest) in main()
159 if (rank == 0 && dest != rank) { in main()
197 else if (rank == dest) in main()
219 if (rank == 0) { in main()
248 else if (rank == dest) in main()
/dports/devel/blitz/blitz-1.0.2/blitz/
H A Dindexexpr.h213 ((N>0) ? ArraySectionInfo<T1>::rank : 0)
214 + ((N>1) ? ArraySectionInfo<T2>::rank : 0)
215 + ((N>2) ? ArraySectionInfo<T3>::rank : 0)
216 + ((N>3) ? ArraySectionInfo<T4>::rank : 0)
217 + ((N>4) ? ArraySectionInfo<T5>::rank : 0)
218 + ((N>5) ? ArraySectionInfo<T6>::rank : 0)
219 + ((N>6) ? ArraySectionInfo<T7>::rank : 0)
220 + ((N>7) ? ArraySectionInfo<T8>::rank : 0)
221 + ((N>8) ? ArraySectionInfo<T9>::rank : 0)
222 + ((N>9) ? ArraySectionInfo<T10>::rank : 0)
[all …]
/dports/net/mpich2/mpich2-1.5/src/mpi/rma/
H A Dwin_unlock.c51 int MPI_Win_unlock(int rank, MPI_Win win) in MPI_Win_unlock() argument
90 MPIR_ERRTEST_SEND_RANK(comm_ptr, rank, mpi_errno); in MPI_Win_unlock()
92 if (win_ptr->lockRank != rank) { in MPI_Win_unlock()
99 "**mismatchedlockrank %d %d", rank, win_ptr->lockRank ); in MPI_Win_unlock()
110 mpi_errno = MPIU_RMA_CALL(win_ptr,Win_unlock(rank, win_ptr)); in MPI_Win_unlock()
129 "**mpi_win_unlock %d %W", rank, win); in MPI_Win_unlock()
/dports/net/mpich/mpich-3.4.3/test/mpi/io/
H A Di_rdwrord.c18 int size, rank, i, *buf, rc; in main() local
31 MPI_Comm_rank(comm, &rank); in main()
33 buf[0] = rank; in main()
54 fprintf(stderr, "%d: buf[%d] = %d\n", rank, i, buf[i]); in main()
62 if (buf[0] != rank) { in main()
64 fprintf(stderr, "%d: buf[0] = %d\n", rank, buf[0]); in main()
H A Di_darray_read.c31 int rank, size; in main() local
59 MPI_Comm_rank(MPI_COMM_WORLD, &rank); in main()
62 CHECK(MPI_Type_create_darray(size, rank, 2, gsize, distrib, in main()
71 if (rank == 0) { in main()
99 if (rank == i) { in main()
100 printf("=== Rank %i === (%i elements) \nPacked: ", rank, nelem); in main()
114 if (rank == i) { in main()
118 rank, j, pdata[j], ldata[j]); in main()
/dports/net/mpich/mpich-3.4.3/test/mpi/pt2pt/
H A Drecv_any.c25 int rank = 0, nprocs = 0; in main() local
34 MPI_Comm_rank(MPI_COMM_WORLD, &rank); in main()
47 if (rank == dst) { in main()
58 rank, x, stat.MPI_SOURCE, src); in main()
64 rank, x, i, rbuf[i], i + 1); in main()
67 } else if (rank == src) { in main()
/dports/misc/adios2/ADIOS2-2.7.1/examples/heatTransfer/read/
H A DReadSettings.cpp36 ReadSettings::ReadSettings(int argc, char *argv[], int rank, int nproc) in ReadSettings() argument
37 : rank{rank} in ReadSettings()
55 posx = rank % npx;
56 posy = rank / npx;
82 std::cout << "rank " << rank << " reads 2D slice " << ndx << " x " << ndy in DecomposeArray()
/dports/math/gap/gap-4.11.0/pkg/SCO-2019.09.02/examples/orbifolds/
H A Dpmg.g22 # 1: 15 x 134 matrix with rank 14 and kernel dimension 1. Time: 0.000 sec.
23 # 2: 134 x 583 matrix with rank 117 and kernel dimension 17. Time: 0.004 sec.
24 # 3: 583 x 2934 matrix with rank 462 and kernel dimension 121. Time: 0.152 sec.
25 # 4: 2934 x 17126 matrix with rank 2468 and kernel dimension 466. Time: 3.145 sec.
26 # 5: 17126 x 104729 matrix with rank 14654 and kernel dimension 2472. Time: 114.119 sec.
27 # 6: 104729 x 658093 matrix with rank 90071 and kernel dimension 14658. Time: 4647.566 sec.
H A DS1C2V4.g10 # 1: 3 x 23 matrix with rank 2 and kernel dimension 1. Time: 0.000 sec.
11 # 2: 23 x 131 matrix with rank 18 and kernel dimension 5. Time: 0.000 sec.
12 # 3: 131 x 791 matrix with rank 109 and kernel dimension 22. Time: 0.008 sec.
13 # 4: 791 x 5123 matrix with rank 677 and kernel dimension 114. Time: 0.248 sec.
14 # 5: 5123 x 34583 matrix with rank 4440 and kernel dimension 683. Time: 10.929 sec.
15 # 6: 34583 x 238211 matrix with rank 30136 and kernel dimension 4447. Time: 483.906 sec.
/dports/science/pnetcdf/parallel-netcdf-1.8.1/test/testcases/
H A Dnoclobber.c26 int err, nerrs=0, ncid, cmode, rank, nprocs; in main() local
30 MPI_Comm_rank(MPI_COMM_WORLD, &rank); in main()
33 if (!rank) printf("Usage: %s [filename]\n",argv[0]); in main()
41 if (rank == 0) { in main()
65 if (rank == 0 && sum_size > 0) in main()
71 if (rank == 0) { in main()
/dports/audio/lsp-plugins-lv2/lsp-plugins-1.1.31/include/dsp/arch/x86/avx2/fft/
H A Dnormalize.h31 void normalize_fft3(float *dre, float *dim, const float *re, const float *im, size_t rank) in normalize_fft3() argument
34 float k = 1.0f/(1 << rank); in normalize_fft3()
35 size_t count = 1 << rank, off = 0; in normalize_fft3()
77 void normalize_fft2(float *re, float *im, size_t rank) in normalize_fft2() argument
80 float k = 1.0f/(1 << rank); in normalize_fft2()
81 size_t count = 1 << rank, off = 0; in normalize_fft2()
/dports/net/openmpi3/openmpi-3.1.6/ompi/mpi/fortran/mpif-h/
H A Dwin_lock_f.c41 … (MPI_Fint *lock_type, MPI_Fint *rank, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr),
42 (lock_type, rank, assert, win, ierr) )
61 … (MPI_Fint *lock_type, MPI_Fint *rank, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr),
62 (lock_type, rank, assert, win, ierr) )
69 void ompi_win_lock_f(MPI_Fint *lock_type, MPI_Fint *rank, in ompi_win_lock_f() argument
76 OMPI_FINT_2_INT(*rank), in ompi_win_lock_f()
/dports/net/openmpi/openmpi-4.1.1/ompi/mpi/fortran/mpif-h/
H A Dwin_lock_f.c41 … (MPI_Fint *lock_type, MPI_Fint *rank, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr),
42 (lock_type, rank, assert, win, ierr) )
61 … (MPI_Fint *lock_type, MPI_Fint *rank, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr),
62 (lock_type, rank, assert, win, ierr) )
69 void ompi_win_lock_f(MPI_Fint *lock_type, MPI_Fint *rank, in ompi_win_lock_f() argument
76 OMPI_FINT_2_INT(*rank), in ompi_win_lock_f()
/dports/science/chrono/chrono-7.0.1/src/tests/unit_tests/synchrono/
H A Dutest_SYN_MPI.cpp35 int rank; variable
45 rank = communicator->GetRank(); in main()
47 SynChronoManager syn_manager(rank, num_ranks, communicator); in main()
50 if (rank != 0) { in main()
63 int msg_length = 10 + num_ranks - rank; in TEST()
76 my_data.push_back(rank); in TEST()
/dports/www/chromium-legacy/chromium-88.0.4324.182/third_party/llvm/mlir/lib/ExecutionEngine/
H A DRunnerUtils.cpp43 extern "C" void print_memref_i32(int64_t rank, void *ptr) { in print_memref_i32() argument
44 UnrankedMemRefType<int32_t> descriptor = {rank, ptr}; in print_memref_i32()
48 extern "C" void print_memref_f32(int64_t rank, void *ptr) { in print_memref_f32() argument
49 UnrankedMemRefType<float> descriptor = {rank, ptr}; in print_memref_f32()
53 extern "C" void print_memref_f64(int64_t rank, void *ptr) { in print_memref_f64() argument
54 UnrankedMemRefType<double> descriptor = {rank, ptr}; in print_memref_f64()
/dports/devel/tinygo/tinygo-0.14.1/llvm-project/mlir/test/mlir-cpu-runner/
H A Dmlir_runner_utils.cpp30 int rank = M->rank; in print_memref_i8() local
33 switch (rank) { in print_memref_i8()
46 int rank = M->rank; in print_memref_f32() local
49 switch (rank) { in print_memref_f32()
/dports/science/smoldyn/smoldyn-2.67/source/libSteve/
H A DZn.h27 int indx2addZV(int *indx,int *dim,int rank);
28 int *add2indxZV(int add,int *indx,int *dim,int rank);
29 int nextaddZV(int add,int *indx1,int *indx2,int *dim,int rank);
30 int indx2add3ZV(int *indx,int rank);
31 int *add2indx3ZV(int add,int *indx,int rank);
32 int neighborZV(int *indx,int *c,int *dim,int rank,int type,int *wrap,int *mid);
/dports/audio/lsp-plugins-lv2/lsp-plugins-1.1.31/include/dsp/arch/x86/avx/fft/
H A Dnormalize.h31 void normalize_fft3(float *dre, float *dim, const float *re, const float *im, size_t rank) in normalize_fft3() argument
34 float k = 1.0f/(1 << rank); in normalize_fft3()
35 size_t count = 1 << rank, off = 0; in normalize_fft3()
77 void normalize_fft2(float *re, float *im, size_t rank) in normalize_fft2() argument
80 float k = 1.0f/(1 << rank); in normalize_fft2()
81 size_t count = 1 << rank, off = 0; in normalize_fft2()
/dports/science/dakota/dakota-6.13.0-release-public.src-UI/packages/external/trilinos/packages/rol/adapters/tpetra/src/mpi/
H A DROL_PinTVectorCommunication_Tpetra.hpp66 void send(MPI_Comm comm,int rank,Vector<Real> & source,int tag=0) const override in send() argument
76 …MPI_Send(const_cast<Real*>(&view(0,0)),int(view.extent(0)*view.extent(1)),MPI_DOUBLE,rank,tag,comm… in send()
82 void recv(MPI_Comm comm,int rank,Vector<Real> & dest,int tag=0) const override in recv() argument
90 …MPI_Recv(&view(0,0),int(view.extent(0)*view.extent(1)),MPI_DOUBLE,rank,tag,comm,MPI_STATUS_IGNORE); in recv()
98 void recvSumInto(MPI_Comm comm,int rank,Vector<Real> & dest,int tag=0) const override in recvSumInto() argument
109 MPI_Recv(&buffer[0],int(buffer.size()),MPI_DOUBLE,rank,tag,comm,MPI_STATUS_IGNORE); in recvSumInto()
/dports/math/py-numpy/numpy-1.20.3/doc/neps/
H A Dnep-0027-zero-rank-arrarys.rst15 NumPy has both zero rank arrays and scalars. This design document, adapted
16 from a `2006 wiki entry`_, describes what zero rank arrays are and why they
19 for zero rank arrays and scalars in NumPy.
27 Zero-rank arrays are arrays with shape=(). For example:
37 Array scalars are similar to zero-rank arrays in many aspects::
69 * `rank-0 arrays`_ in a 2002 mailing list thread.
72 It has been suggested several times that NumPy just use rank-0 arrays to
153 As of NumPy release 0.9.3, zero-rank arrays do not support any indexing::
176 Francesc Altet supported the idea of ``[...]`` on zero-rank arrays and
217 Increasing rank with newaxis
[all …]
/dports/science/siconos/siconos-4.4.0/externals/numeric_bindings/boost/numeric/bindings/lapack/driver/
H A Dgelsd.hpp94 fortran_int_t& rank, std::complex<float>* work, in gelsd() argument
111 fortran_int_t& rank, std::complex<double>* work, in gelsd() argument
145 const real_type rcond, fortran_int_t& rank, in invoke()
206 const real_type rcond, fortran_int_t& rank, in invoke()
233 const real_type rcond, fortran_int_t& rank, in invoke()
297 const real_type rcond, fortran_int_t& rank, in invoke()
358 const real_type rcond, fortran_int_t& rank, in invoke()
387 const real_type rcond, fortran_int_t& rank, in invoke()
467 MatrixA >::type >::type rcond, fortran_int_t& rank, in gelsd() argument
482 MatrixA >::type >::type rcond, fortran_int_t& rank ) { in gelsd() argument
[all …]
/dports/math/vtk9/VTK-9.1.0/ThirdParty/vtkm/vtkvtkm/vtk-m/examples/streamline_mpi/
H A DStreamlineMPI.cxx32 void LoadData(std::string& fname, std::vector<vtkm::cont::DataSet>& dataSets, int rank, int nRanks) in LoadData() argument
53 if (rank == 0) in LoadData()
57 int b0 = rank * nPer, b1 = (rank + 1) * nPer; in LoadData()
58 if (rank == (nRanks - 1)) in LoadData()
95 int rank = comm.rank(); in main() local
100 LoadData(dataFile, dataSets, rank, size); in main()
/dports/net/mpich/mpich-3.4.3/src/mpi/coll/reduce/
H A Dreduce_intra_binomial.c21 int comm_size, rank, is_commutative, type_size ATTRIBUTE((unused)); in MPIR_Reduce_intra_binomial() local
31 rank = comm_ptr->rank; in MPIR_Reduce_intra_binomial()
47 if (rank != root) { in MPIR_Reduce_intra_binomial()
54 if ((rank != root) || (sendbuf != MPI_IN_PLACE)) { in MPIR_Reduce_intra_binomial()
97 relrank = (rank - lroot + comm_size) % comm_size; in MPIR_Reduce_intra_binomial()
149 if (rank == 0) { in MPIR_Reduce_intra_binomial()
152 } else if (rank == root) { in MPIR_Reduce_intra_binomial()
/dports/science/qmcpack/qmcpack-3.11.0/src/Message/
H A DCommunicate.cpp60 d_mycontext = comm.rank(); in Communicate()
68 int p = FairDivideLow(in_comm.rank(), in_comm.size(), nparts, nplist); //group in Communicate()
70 comm = const_cast<mpi3::communicator&>(in_comm.comm).split(p, in_comm.rank()); in Communicate()
73 d_mycontext = comm.rank(); in Communicate()
92 d_mycontext = comm.rank(); in initialize()
99 if (OHMMS::Controller->rank() == proc) in initialize()
118 d_mycontext = comm.rank(); in initializeAsNodeComm()
160 if (!rank()) in barrier_and_abort()
/dports/math/R-cran-influenceR/influenceR/src/
H A Dkeyplayer.c82 int np, rank, new_rank = 0, stop; in keyplayer_driver_omp()
90 #pragma omp parallel shared(fits, allsets, new_rank, g, np, stop) private(rank, start, fullstart) in keyplayer_driver_omp()
97 rank = omp_get_thread_num(); in keyplayer_driver_omp()
99 if (rank == 0) { in keyplayer_driver_omp()
111 int *s = &allsets[rank * n]; in keyplayer_driver_omp()
115 double *fit = &fits[rank]; in keyplayer_driver_omp()
151 if (rank == 0) { in keyplayer_driver_omp()
169 if (rank != new_rank) { in keyplayer_driver_omp()

1...<<341342343344345346347348349350>>...2091