1 /*
2  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
3  *                         University Research and Technology
4  *                         Corporation.  All rights reserved.
5  * Copyright (c) 2004-2005 The University of Tennessee and The University
6  *                         of Tennessee Research Foundation.  All rights
7  *                         reserved.
8  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
9  *                         University of Stuttgart.  All rights reserved.
10  * Copyright (c) 2004-2005 The Regents of the University of California.
11  *                         All rights reserved.
12  * Copyright (c) 2015      Research Organization for Information Science
13  *                         and Technology (RIST). All rights reserved.
14  * Copyright (c) 2015      Los Alamos National Security, LLC. All rights
15  *                         reserved.
16  * Copyright (c) 2017      FUJITSU LIMITED.  All rights reserved.
17  * $COPYRIGHT$
18  *
19  * Additional copyrights may follow
20  *
21  * $HEADER$
22  *
23  *
24  * This file is almost a complete re-write for Open MPI compared to the
25  * original mpiJava package. Its license and copyright are listed below.
26  * See <path to ompi/mpi/java/README> for more information.
27  *
28  *
29  *  Licensed under the Apache License, Version 2.0 (the "License");
30  *  you may not use this file except in compliance with the License.
31  *  You may obtain a copy of the License at
32  *
33  *     http://www.apache.org/licenses/LICENSE-2.0
34  *
35  *  Unless required by applicable law or agreed to in writing, software
36  *  distributed under the License is distributed on an "AS IS" BASIS,
37  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
38  *  See the License for the specific language governing permissions and
39  *  limitations under the License.
40  *
41  *
42  * File         : Comm.java
43  * Author       : Sang Lim, Sung-Hoon Ko, Xinying Li, Bryan Carpenter
44  * Created      : Thu Apr  9 12:22:15 1998
45  * Revision     : $Revision: 1.20 $
46  * Updated      : $Date: 2001/08/07 16:36:25 $
47  * Copyright: Northeast Parallel Architectures Center
48  *            at Syracuse University 1998
49  *
50  *
51  *
52  * IMPLEMENTATION DETAILS
53  *
54  * All methods with buffers that can be direct or non direct have
55  * a companion argument 'db' which is true if the buffer is direct.
56  * For example, if the buffer argument is recvBuf, the companion
57  * argument will be 'rdb', meaning if the receive buffer is direct.
58  *
59  * Checking if a buffer is direct is faster in Java than C.
60  */
61 package mpi;
62 
63 import java.nio.*;
64 import static mpi.MPI.assertDirectBuffer;
65 
66 /**
67  * The {@code Comm} class represents communicators.
68  */
69 public class Comm implements Freeable, Cloneable
70 {
71 	public final static int TYPE_SHARED = 0;
72 	protected final static int SELF  = 1;
73 	protected final static int WORLD = 2;
74 	protected long handle;
75 	private Request request;
76 
77 	private static long nullHandle;
78 
79 	static
80 	{
init()81 		init();
82 	}
83 
init()84 	private static native void init();
85 
Comm()86 	protected Comm()
87 	{
88 	}
89 
Comm(long handle)90 	protected Comm(long handle)
91 	{
92 		this.handle = handle;
93 	}
94 
Comm(long[] commRequest)95 	protected Comm(long[] commRequest)
96 	{
97 		handle  = commRequest[0];
98 		request = new Request(commRequest[1]);
99 	}
100 
setType(int type)101 	protected final void setType(int type)
102 	{
103 		getComm(type);
104 	}
105 
getComm(int type)106 	private native void getComm(int type);
107 
108 	/**
109 	 * Duplicates this communicator.
110 	 * <p>Java binding of {@code MPI_COMM_DUP}.
111 	 * <p>It is recommended to use {@link #dup} instead of {@link #clone}
112 	 * because the last can't throw an {@link mpi.MPIException}.
113 	 * @return copy of this communicator
114 	 */
clone()115 	@Override public Comm clone()
116 	{
117 		try
118 		{
119 			return dup();
120 		}
121 		catch(MPIException e)
122 		{
123 			throw new RuntimeException(e.getMessage());
124 		}
125 	}
126 
127 	/**
128 	 * Duplicates this communicator.
129 	 * <p>Java binding of {@code MPI_COMM_DUP}.
130 	 * @return copy of this communicator
131 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
132 	 */
dup()133 	public Comm dup() throws MPIException
134 	{
135 		MPI.check();
136 		return new Comm(dup(handle));
137 	}
138 
dup(long comm)139 	protected final native long dup(long comm) throws MPIException;
140 
141 	/**
142 	 * Duplicates this communicator.
143 	 * <p>Java binding of {@code MPI_COMM_IDUP}.
144 	 * <p>The new communicator can't be used before the operation completes.
145 	 * The request object must be obtained calling {@link #getRequest}.
146 	 * @return copy of this communicator
147 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
148 	 */
iDup()149 	public Comm iDup() throws MPIException
150 	{
151 		MPI.check();
152 		return new Comm(iDup(handle));
153 	}
154 
iDup(long comm)155 	protected final native long[] iDup(long comm) throws MPIException;
156 
157 	/**
158 	 * Duplicates this communicator with the info object used in the call.
159 	 * <p>Java binding of {@code MPI_COMM_DUP_WITH_INFO}.
160 	 * @param info	info object to associate with the new communicator
161 	 * @return copy of this communicator
162 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
163 	 */
dupWithInfo(Info info)164 	public Comm dupWithInfo(Info info) throws MPIException
165 	{
166 		MPI.check();
167 		return new Comm(dupWithInfo(handle, info.handle));
168 	}
169 
dupWithInfo(long comm, long info)170 	protected final native long dupWithInfo(long comm, long info) throws MPIException;
171 
172 	/**
173 	 * Returns the associated request to this communicator if it was
174 	 * created using {@link #iDup}.
175 	 * @return associated request if this communicator was created
176 	 *         using {@link #iDup}, or null otherwise.
177 	 */
getRequest()178 	public final Request getRequest()
179 	{
180 		return request;
181 	}
182 
183 	/**
184 	 * Size of group of this communicator.
185 	 * <p>Java binding of the MPI operation {@code MPI_COMM_SIZE}.
186 	 * @return number of processors in the group of this communicator
187 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
188 	 */
getSize()189 	public final int getSize() throws MPIException
190 	{
191 		MPI.check();
192 		return getSize(handle);
193 	}
194 
getSize(long comm)195 	private native int getSize(long comm) throws MPIException;
196 
197 	/**
198 	 * Rank of this process in group of this communicator.
199 	 * <p>Java binding of the MPI operation {@code MPI_COMM_RANK}.
200 	 * @return rank of the calling process in the group of this communicator
201 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
202 	 */
getRank()203 	public final int getRank() throws MPIException
204 	{
205 		MPI.check();
206 		return getRank(handle);
207 	}
208 
getRank(long comm)209 	private native int getRank(long comm) throws MPIException;
210 
211 	/**
212 	 * Compare two communicators.
213 	 * <p>Java binding of the MPI operation {@code MPI_COMM_COMPARE}.
214 	 * @param comm1 first communicator
215 	 * @param comm2 second communicator
216 	 * @return
217 	 * {@code MPI.IDENT} results if the {@code comm1} and {@code comm2}
218 	 * are references to the same object (ie, if {@code comm1 == comm2}).<br>
219 	 * {@code MPI.CONGRUENT} results if the underlying groups are identical
220 	 * but the communicators differ by context.<br>
221 	 * {@code MPI.SIMILAR} results if the underlying groups are similar
222 	 * but the communicators differ by context.<br>
223 	 * {@code MPI.UNEQUAL} results otherwise.
224 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
225 	 */
compare(Comm comm1, Comm comm2)226 	public static int compare(Comm comm1, Comm comm2) throws MPIException
227 	{
228 		MPI.check();
229 		return compare(comm1.handle, comm2.handle);
230 	}
231 
compare(long comm1, long comm2)232 	private static native int compare(long comm1, long comm2) throws MPIException;
233 
234 	/**
235 	 * Java binding of the MPI operation {@code MPI_COMM_FREE}.
236 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
237 	 */
free()238 	@Override final public void free() throws MPIException
239 	{
240 		MPI.check();
241 		handle = free(handle);
242 	}
243 
free(long comm)244 	private native long free(long comm) throws MPIException;
245 
246 	/**
247 	 * Test if communicator object is null (has been freed).
248 	 * Java binding of {@code MPI_COMM_NULL}.
249 	 * @return true if the comm object is null, false otherwise
250 	 */
isNull()251 	public final boolean isNull()
252 	{
253 		return handle == nullHandle;
254 	}
255 
256 	/**
257 	 * Java binding of {@code MPI_COMM_SET_INFO}.
258 	 * @param info info object
259 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
260 	 */
setInfo(Info info)261 	public final void setInfo(Info info) throws MPIException
262 	{
263 		MPI.check();
264 		setInfo(handle, info.handle);
265 	}
266 
setInfo(long comm, long info)267 	private native void setInfo(long comm, long info) throws MPIException;
268 
269 	/**
270 	 * Java binding of {@code MPI_COMM_GET_INFO}.
271 	 * @return new info object
272 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
273 	 */
getInfo()274 	public final Info getInfo() throws MPIException
275 	{
276 		MPI.check();
277 		return new Info(getInfo(handle));
278 	}
279 
getInfo(long comm)280 	private native long getInfo(long comm) throws MPIException;
281 
282 	/**
283 	 * Java binding of the MPI operation {@code MPI_COMM_DISCONNECT}.
284 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
285 	 */
disconnect()286 	public final void disconnect() throws MPIException
287 	{
288 		MPI.check();
289 		handle = disconnect(handle);
290 	}
291 
disconnect(long comm)292 	private native long disconnect(long comm) throws MPIException;
293 
294 	/**
295 	 * Return group associated with a communicator.
296 	 * <p>Java binding of the MPI operation {@code MPI_COMM_GROUP}.
297 	 * @return group corresponding to this communicator group
298 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
299 	 */
getGroup()300 	public final Group getGroup() throws MPIException
301 	{
302 		MPI.check();
303 		return new Group(getGroup(handle));
304 	}
305 
getGroup(long comm)306 	private native long getGroup(long comm);
307 
308 	// Inter-communication
309 
310 	/**
311 	 * Test if this communicator is an inter-communicator.
312 	 * <p>Java binding of the MPI operation {@code MPI_COMM_TEST_INTER}.
313 	 * @return {@code true} if this is an inter-communicator,
314 	 *         {@code false} otherwise
315 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
316 	 */
isInter()317 	public final boolean isInter() throws MPIException
318 	{
319 		MPI.check();
320 		return isInter(handle);
321 	}
322 
isInter(long comm)323 	private native boolean isInter(long comm) throws MPIException;
324 
325 	/**
326 	 * Create an inter-communicator.
327 	 * <p>
328 	 * Java binding of the MPI operation {@code MPI_INTERCOMM_CREATE}.
329 	 * <p>
330 	 * This operation is defined as a method on the "peer communicator",
331 	 * making it analogous to a {@code send} or {@code recv} communication
332 	 * with the remote group leader.
333 	 * @param localComm    local intra-communicator
334 	 * @param localLeader  rank of local group leader in {@code localComm}
335 	 * @param remoteLeader rank of remote group leader in this communicator
336 	 * @param tag          "safe" tag
337 	 * @return new inter-communicator
338 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
339 	 */
createIntercomm(Comm localComm, int localLeader, int remoteLeader, int tag)340 	public final Intercomm createIntercomm(Comm localComm, int localLeader,
341 			int remoteLeader, int tag)
342 					throws MPIException
343 	{
344 		MPI.check();
345 
346 		return new Intercomm(createIntercomm(handle, localComm.handle,
347 				localLeader, remoteLeader, tag));
348 	}
349 
createIntercomm( long comm, long localComm, int localLeader, int remoteLeader, int tag)350 	private native long createIntercomm(
351 			long comm, long localComm, int localLeader,
352 			int remoteLeader, int tag) throws MPIException;
353 
354 	// Blocking Send and Recv
355 
356 	/**
357 	 * Blocking send operation.
358 	 * <p>Java binding of the MPI operation {@code MPI_SEND}.
359 	 * @param buf   send buffer
360 	 * @param count number of items to send
361 	 * @param type  datatype of each item in send buffer
362 	 * @param dest  rank of destination
363 	 * @param tag   message tag
364 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
365 	 */
send(Object buf, int count, Datatype type, int dest, int tag)366 	public final void send(Object buf, int count, Datatype type, int dest, int tag)
367 			throws MPIException
368 	{
369 		MPI.check();
370 		int off = 0;
371 		boolean db = false;
372 
373 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
374 		{
375 			off = type.getOffset(buf);
376 			buf = ((Buffer)buf).array();
377 		}
378 
379 		send(handle, buf, db, off, count, type.handle, type.baseType, dest, tag);
380 	}
381 
send( long comm, Object buf, boolean db, int offset, int count, long type, int baseType, int dest, int tag)382 	private native void send(
383 			long comm, Object buf, boolean db, int offset, int count,
384 			long type, int baseType, int dest, int tag) throws MPIException;
385 
386 	/**
387 	 * Blocking receive operation.
388 	 * <p>Java binding of the MPI operation {@code MPI_RECV}.
389 	 * @param buf    receive buffer
390 	 * @param count  number of items in receive buffer
391 	 * @param type   datatype of each item in receive buffer
392 	 * @param source rank of source
393 	 * @param tag    message tag
394 	 * @return status object
395 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
396 	 */
recv(Object buf, int count, Datatype type, int source, int tag)397 	public final Status recv(Object buf, int count,
398 			Datatype type, int source, int tag)
399 					throws MPIException
400 	{
401 		MPI.check();
402 		int off = 0;
403 		boolean db = false;
404 
405 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
406 		{
407 			off = type.getOffset(buf);
408 			buf = ((Buffer)buf).array();
409 		}
410 
411 		Status status = new Status();
412 
413 		recv(handle, buf, db, off, count,
414 				type.handle, type.baseType, source, tag, status.data);
415 
416 		return status;
417 	}
418 
recv( long comm, Object buf, boolean db, int offset, int count, long type, int basetype, int source, int tag, long[] stat)419 	private native void recv(
420 			long comm, Object buf, boolean db, int offset, int count,
421 			long type, int basetype, int source, int tag, long[] stat)
422 					throws MPIException;
423 
424 	// Send-Recv
425 
426 	/**
427 	 * Execute a blocking send and receive operation.
428 	 * <p>Java binding of the MPI operation {@code MPI_SENDRECV}.
429 	 * @param sendbuf   send buffer
430 	 * @param sendcount number of items to send
431 	 * @param sendtype  datatype of each item in send buffer
432 	 * @param dest      rank of destination
433 	 * @param sendtag   send tag
434 	 * @param recvbuf   receive buffer
435 	 * @param recvcount number of items in receive buffer
436 	 * @param recvtype  datatype of each item in receive buffer
437 	 * @param source    rank of source
438 	 * @param recvtag   receive tag
439 	 * @return status object
440 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
441 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
442 	 * @see mpi.Comm#recv(Object, int, Datatype, int, int)
443 	 */
sendRecv( Object sendbuf, int sendcount, Datatype sendtype, int dest, int sendtag, Object recvbuf, int recvcount, Datatype recvtype, int source, int recvtag)444 	public final Status sendRecv(
445 			Object sendbuf, int sendcount, Datatype sendtype, int dest,   int sendtag,
446 			Object recvbuf, int recvcount, Datatype recvtype, int source, int recvtag)
447 					throws MPIException
448 	{
449 		MPI.check();
450 
451 		int sendoff = 0,
452 				recvoff = 0;
453 
454 		boolean sdb = false,
455 				rdb = false;
456 
457 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
458 		{
459 			sendoff = sendtype.getOffset(sendbuf);
460 			sendbuf = ((Buffer)sendbuf).array();
461 		}
462 
463 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
464 		{
465 			recvoff = recvtype.getOffset(recvbuf);
466 			recvbuf = ((Buffer)recvbuf).array();
467 		}
468 
469 		Status status = new Status();
470 
471 		sendRecv(handle, sendbuf, sdb, sendoff, sendcount,
472 				sendtype.handle, sendtype.baseType, dest, sendtag,
473 				recvbuf, rdb, recvoff, recvcount,
474 				recvtype.handle, recvtype.baseType, source, recvtag, status.data);
475 
476 		return status;
477 	}
478 
sendRecv( long comm, Object sbuf, boolean sdb, int soffset, int scount, long sType, int sBaseType, int dest, int stag, Object rbuf, boolean rdb, int roffset, int rcount, long rType, int rBaseType, int source, int rtag, long[] stat)479 	private native void sendRecv(
480 			long comm, Object sbuf, boolean sdb, int soffset, int scount,
481 			long sType, int sBaseType, int dest, int stag,
482 			Object rbuf, boolean rdb, int roffset, int rcount,
483 			long rType, int rBaseType, int source, int rtag,
484 			long[] stat) throws MPIException;
485 
486 	/**
487 	 * Execute a blocking send and receive operation,
488 	 * receiving message into send buffer.
489 	 * <p>Java binding of the MPI operation {@code MPI_SENDRECV_REPLACE}.
490 	 * @param buf     buffer
491 	 * @param count   number of items to send
492 	 * @param type    datatype of each item in buffer
493 	 * @param dest    rank of destination
494 	 * @param sendtag send tag
495 	 * @param source  rank of source
496 	 * @param recvtag receive tag
497 	 * @return status object
498 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
499 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
500 	 * @see mpi.Comm#recv(Object, int, Datatype, int, int)
501 	 */
sendRecvReplace( Object buf, int count, Datatype type, int dest, int sendtag, int source, int recvtag)502 	public final Status sendRecvReplace(
503 			Object buf, int count, Datatype type,
504 			int dest, int sendtag, int source, int recvtag)
505 					throws MPIException
506 	{
507 		MPI.check();
508 		int off = 0;
509 		boolean db = false;
510 
511 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
512 		{
513 			off = type.getOffset(buf);
514 			buf = ((Buffer)buf).array();
515 		}
516 
517 		Status status = new Status();
518 
519 		sendRecvReplace(handle, buf, db, off, count, type.handle, type.baseType,
520 				dest, sendtag, source, recvtag, status.data);
521 
522 		return status;
523 	}
524 
sendRecvReplace( long comm, Object buf, boolean db, int offset, int count, long type, int baseType, int dest, int stag, int source, int rtag, long[] stat)525 	private native void sendRecvReplace(
526 			long comm, Object buf, boolean db, int offset, int count,
527 			long type, int baseType, int dest, int stag,
528 			int source, int rtag, long[] stat) throws MPIException;
529 
530 	// Communication Modes
531 
532 	/**
533 	 * Send in buffered mode.
534 	 * <p>Java binding of the MPI operation {@code MPI_BSEND}.
535 	 * @param buf   send buffer
536 	 * @param count number of items to send
537 	 * @param type  datatype of each item in send buffer
538 	 * @param dest  rank of destination
539 	 * @param tag   message tag
540 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
541 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
542 	 */
bSend(Object buf, int count, Datatype type, int dest, int tag)543 	public final void bSend(Object buf, int count, Datatype type, int dest, int tag)
544 			throws MPIException
545 	{
546 		MPI.check();
547 		int off = 0;
548 		boolean db = false;
549 
550 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
551 		{
552 			off = type.getOffset(buf);
553 			buf = ((Buffer)buf).array();
554 		}
555 
556 		bSend(handle, buf, db, off, count, type.handle, type.baseType, dest, tag);
557 	}
558 
bSend( long comm, Object buf, boolean db, int offset, int count, long type, int baseType, int dest, int tag)559 	private native void bSend(
560 			long comm, Object buf, boolean db, int offset, int count,
561 			long type, int baseType, int dest, int tag) throws MPIException;
562 
563 	/**
564 	 * Send in synchronous mode.
565 	 * <p>Java binding of the MPI operation {@code MPI_SSEND}.
566 	 * @param buf   send buffer
567 	 * @param count number of items to send
568 	 * @param type  datatype of each item in send buffer
569 	 * @param dest  rank of destination
570 	 * @param tag   message tag
571 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
572 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
573 	 */
sSend(Object buf, int count, Datatype type, int dest, int tag)574 	public final void sSend(Object buf, int count, Datatype type, int dest, int tag)
575 			throws MPIException
576 	{
577 		MPI.check();
578 		int off = 0;
579 		boolean db = false;
580 
581 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
582 		{
583 			off = type.getOffset(buf);
584 			buf = ((Buffer)buf).array();
585 		}
586 
587 		sSend(handle, buf, db, off, count, type.handle, type.baseType, dest, tag);
588 	}
589 
sSend( long comm, Object buf, boolean db, int offset, int count, long type, int baseType, int dest, int tag)590 	private native void sSend(
591 			long comm, Object buf, boolean db, int offset, int count,
592 			long type, int baseType, int dest, int tag) throws MPIException;
593 
594 	/**
595 	 * Send in ready mode.
596 	 * <p>Java binding of the MPI operation {@code MPI_RSEND}.
597 	 * @param buf   send buffer
598 	 * @param count number of items to send
599 	 * @param type  datatype of each item in send buffer
600 	 * @param dest  rank of destination
601 	 * @param tag   message tag
602 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
603 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
604 	 */
rSend(Object buf, int count, Datatype type, int dest, int tag)605 	public final void rSend(Object buf, int count, Datatype type, int dest, int tag)
606 			throws MPIException
607 	{
608 		MPI.check();
609 		int off = 0;
610 		boolean db = false;
611 
612 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
613 		{
614 			off = type.getOffset(buf);
615 			buf = ((Buffer)buf).array();
616 		}
617 
618 		rSend(handle, buf, db, off, count, type.handle, type.baseType, dest, tag);
619 	}
620 
rSend( long comm, Object buf, boolean db, int offset, int count, long type, int baseType, int dest, int tag)621 	private native void rSend(
622 			long comm, Object buf, boolean db, int offset, int count,
623 			long type, int baseType, int dest, int tag) throws MPIException;
624 
625 	// Nonblocking communication
626 
627 	/**
628 	 * Start a standard mode, nonblocking send.
629 	 * <p>Java binding of the MPI operation {@code MPI_ISEND}.
630 	 * @param buf   send buffer
631 	 * @param count number of items to send
632 	 * @param type  datatype of each item in send buffer
633 	 * @param dest  rank of destination
634 	 * @param tag   message tag
635 	 * @return communication request
636 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
637 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
638 	 */
iSend(Buffer buf, int count, Datatype type, int dest, int tag)639 	public final Request iSend(Buffer buf, int count,
640 			Datatype type, int dest, int tag)
641 					throws MPIException
642 	{
643 		MPI.check();
644 		assertDirectBuffer(buf);
645 		Request req = new Request(iSend(handle, buf, count, type.handle, dest, tag));
646 		req.addSendBufRef(buf);
647 		return req;
648 	}
649 
iSend( long comm, Buffer buf, int count, long type, int dest, int tag)650 	private native long iSend(
651 			long comm, Buffer buf, int count, long type, int dest, int tag)
652 					throws MPIException;
653 
654 	/**
655 	 * Start a buffered mode, nonblocking send.
656 	 * <p>Java binding of the MPI operation {@code MPI_IBSEND}.
657 	 * @param buf   send buffer
658 	 * @param count number of items to send
659 	 * @param type  datatype of each item in send buffer
660 	 * @param dest  rank of destination
661 	 * @param tag   message tag
662 	 * @return communication request
663 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
664 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
665 	 */
ibSend(Buffer buf, int count, Datatype type, int dest, int tag)666 	public final Request ibSend(Buffer buf, int count,
667 			Datatype type, int dest, int tag)
668 					throws MPIException
669 	{
670 		MPI.check();
671 		assertDirectBuffer(buf);
672 		Request req = new Request(ibSend(handle, buf, count, type.handle, dest, tag));
673 		req.addSendBufRef(buf);
674 		return req;
675 	}
676 
ibSend( long comm, Buffer buf, int count, long type, int dest, int tag)677 	private native long ibSend(
678 			long comm, Buffer buf, int count, long type, int dest, int tag)
679 					throws MPIException;
680 
681 	/**
682 	 * Start a synchronous mode, nonblocking send.
683 	 * <p>Java binding of the MPI operation {@code MPI_ISSEND}.
684 	 * @param buf   send buffer
685 	 * @param count number of items to send
686 	 * @param type  datatype of each item in send buffer
687 	 * @param dest  rank of destination
688 	 * @param tag   message tag
689 	 * @return communication request
690 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
691 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
692 	 */
isSend(Buffer buf, int count, Datatype type, int dest, int tag)693 	public final Request isSend(Buffer buf, int count,
694 			Datatype type, int dest, int tag)
695 					throws MPIException
696 	{
697 		MPI.check();
698 		assertDirectBuffer(buf);
699 		Request req = new Request(isSend(handle, buf, count, type.handle, dest, tag));
700 		req.addSendBufRef(buf);
701 		return req;
702 	}
703 
isSend( long comm, Buffer buf, int count, long type, int dest, int tag)704 	private native long isSend(
705 			long comm, Buffer buf, int count, long type, int dest, int tag)
706 					throws MPIException;
707 
708 	/**
709 	 * Start a ready mode, nonblocking send.
710 	 * <p>Java binding of the MPI operation {@code MPI_IRSEND}.
711 	 * @param buf   send buffer
712 	 * @param count number of items to send
713 	 * @param type  datatype of each item in send buffer
714 	 * @param dest  rank of destination
715 	 * @param tag   message tag
716 	 * @return communication request
717 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
718 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
719 	 */
irSend(Buffer buf, int count, Datatype type, int dest, int tag)720 	public final Request irSend(Buffer buf, int count,
721 			Datatype type, int dest, int tag)
722 					throws MPIException
723 	{
724 		MPI.check();
725 		assertDirectBuffer(buf);
726 		Request req = new Request(irSend(handle, buf, count, type.handle, dest, tag));
727 		req.addSendBufRef(buf);
728 		return req;
729 	}
730 
irSend( long comm, Buffer buf, int count, long type, int dest, int tag)731 	private native long irSend(
732 			long comm, Buffer buf, int count, long type, int dest, int tag)
733 					throws MPIException;
734 
735 	/**
736 	 * Start a nonblocking receive.
737 	 * <p>Java binding of the MPI operation {@code MPI_IRECV}.
738 	 * @param buf    receive buffer
739 	 * @param count  number of items in receive buffer
740 	 * @param type   datatype of each item in receive buffer
741 	 * @param source rank of source
742 	 * @param tag    message tag
743 	 * @return communication request
744 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
745 	 * @see mpi.Comm#recv(Object, int, Datatype, int, int)
746 	 */
iRecv(Buffer buf, int count, Datatype type, int source, int tag)747 	public final Request iRecv(Buffer buf, int count,
748 			Datatype type, int source, int tag)
749 					throws MPIException
750 	{
751 		MPI.check();
752 		assertDirectBuffer(buf);
753 		Request req = new Request(iRecv(handle, buf, count, type.handle, source, tag));
754 		req.addRecvBufRef(buf);
755 		return req;
756 	}
757 
iRecv( long comm, Buffer buf, int count, long type, int source, int tag)758 	private native long iRecv(
759 			long comm, Buffer buf, int count, long type, int source, int tag)
760 					throws MPIException;
761 
762 
763 	// Persistent communication  requests
764 
765 	/**
766 	 * Creates a persistent communication request for a standard mode send.
767 	 * <p>Java binding of the MPI operation {@code MPI_SEND_INIT}.
768 	 * @param buf   send buffer
769 	 * @param count number of items to send
770 	 * @param type  datatype of each item in send buffer
771 	 * @param dest  rank of destination
772 	 * @param tag   message tag
773 	 * @return persistent communication request
774 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
775 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
776 	 */
sendInit(Buffer buf, int count, Datatype type, int dest, int tag)777 	public final Prequest sendInit(Buffer buf, int count,
778 			Datatype type, int dest, int tag)
779 					throws MPIException
780 	{
781 		MPI.check();
782 		assertDirectBuffer(buf);
783 		Prequest preq = new Prequest(sendInit(handle, buf, count, type.handle, dest, tag));
784 		preq.addSendBufRef(buf);
785 		return preq;
786 	}
787 
sendInit( long comm, Buffer buf, int count, long type, int dest, int tag)788 	private native long sendInit(
789 			long comm, Buffer buf, int count, long type, int dest, int tag)
790 					throws MPIException;
791 
792 	/**
793 	 * Creates a persistent communication request for a buffered mode send.
794 	 * <p>Java binding of the MPI operation {@code MPI_BSEND_INIT}.
795 	 * @param buf   send buffer
796 	 * @param count number of items to send
797 	 * @param type  datatype of each item in send buffer
798 	 * @param dest  rank of destination
799 	 * @param tag   message tag
800 	 * @return persistent communication request
801 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
802 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
803 	 */
bSendInit(Buffer buf, int count, Datatype type, int dest, int tag)804 	public final Prequest bSendInit(Buffer buf, int count,
805 			Datatype type, int dest, int tag)
806 					throws MPIException
807 	{
808 		MPI.check();
809 		assertDirectBuffer(buf);
810 		Prequest preq = new Prequest(bSendInit(handle, buf, count, type.handle, dest, tag));
811 		preq.addSendBufRef(buf);
812 		return preq;
813 	}
814 
bSendInit( long comm, Buffer buf, int count, long type, int dest, int tag)815 	private native long bSendInit(
816 			long comm, Buffer buf, int count, long type, int dest, int tag)
817 					throws MPIException;
818 
819 	/**
820 	 * Creates a persistent communication request for a synchronous mode send.
821 	 * <p>Java binding of the MPI operation {@code MPI_SSEND_INIT}.
822 	 * @param buf   send buffer
823 	 * @param count number of items to send
824 	 * @param type  datatype of each item in send buffer
825 	 * @param dest  rank of destination
826 	 * @param tag   message tag
827 	 * @return persistent communication request
828 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
829 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
830 	 */
sSendInit(Buffer buf, int count, Datatype type, int dest, int tag)831 	public final Prequest sSendInit(Buffer buf, int count,
832 			Datatype type, int dest, int tag)
833 					throws MPIException
834 	{
835 		MPI.check();
836 		assertDirectBuffer(buf);
837 		Prequest preq = new Prequest(sSendInit(handle, buf, count, type.handle, dest, tag));
838 		preq.addSendBufRef(buf);
839 		return preq;
840 	}
841 
sSendInit( long comm, Buffer buf, int count, long type, int dest, int tag)842 	private native long sSendInit(
843 			long comm, Buffer buf, int count, long type, int dest, int tag)
844 					throws MPIException;
845 
846 	/**
847 	 * Creates a persistent communication request for a ready mode send.
848 	 * <p>Java binding of the MPI operation {@code MPI_RSEND_INIT}.
849 	 * @param buf   send buffer
850 	 * @param count number of items to send
851 	 * @param type  datatype of each item in send buffer
852 	 * @param dest  rank of destination
853 	 * @param tag   message tag
854 	 * @return persistent communication request
855 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
856 	 * @see mpi.Comm#send(Object, int, Datatype, int, int)
857 	 */
rSendInit(Buffer buf, int count, Datatype type, int dest, int tag)858 	public final Prequest rSendInit(Buffer buf, int count,
859 			Datatype type, int dest, int tag)
860 					throws MPIException
861 	{
862 		MPI.check();
863 		assertDirectBuffer(buf);
864 		Prequest preq = new Prequest(rSendInit(handle, buf, count, type.handle, dest, tag));
865 		preq.addSendBufRef(buf);
866 		return preq;
867 	}
868 
rSendInit( long comm, Buffer buf, int count, long type, int dest, int tag)869 	private native long rSendInit(
870 			long comm, Buffer buf, int count, long type, int dest, int tag)
871 					throws MPIException;
872 
873 	/**
874 	 * Creates a persistent communication request for a receive operation.
875 	 * <p>Java binding of the MPI operation {@code MPI_RECV_INIT}.
876 	 * @param buf    receive buffer
877 	 * @param count  number of items in receive buffer
878 	 * @param type   datatype of each item in receive buffer
879 	 * @param source rank of source
880 	 * @param tag    message tag
881 	 * @return communication request
882 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
883 	 * @see mpi.Comm#recv(Object, int, Datatype, int, int)
884 	 */
recvInit(Buffer buf, int count, Datatype type, int source, int tag)885 	public final Prequest recvInit(Buffer buf, int count,
886 			Datatype type, int source, int tag)
887 					throws MPIException
888 	{
889 		MPI.check();
890 		assertDirectBuffer(buf);
891 		Prequest preq = new Prequest(recvInit(handle, buf, count, type.handle, source, tag));
892 		preq.addRecvBufRef(buf);
893 		return preq;
894 	}
895 
recvInit( long comm, Buffer buf, int count, long type, int source, int tag)896 	private native long recvInit(
897 			long comm, Buffer buf, int count, long type, int source, int tag)
898 					throws MPIException;
899 
900 	// Pack and Unpack
901 
902 	/**
903 	 * Packs message in send buffer {@code inbuf} into space specified in
904 	 * {@code outbuf}.
905 	 * <p>
906 	 * Java binding of the MPI operation {@code MPI_PACK}.
907 	 * <p>
908 	 * The return value is the output value of {@code position} - the
909 	 * inital value incremented by the number of bytes written.
910 	 * @param inbuf    input buffer
911 	 * @param incount  number of items in input buffer
912 	 * @param type     datatype of each item in input buffer
913 	 * @param outbuf   output buffer
914 	 * @param position initial position in output buffer
915 	 * @return final position in output buffer
916 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
917 	 */
pack(Object inbuf, int incount, Datatype type, byte[] outbuf, int position)918 	public final int pack(Object inbuf, int incount, Datatype type,
919 			byte[] outbuf, int position)
920 					throws MPIException
921 	{
922 		MPI.check();
923 		int offset = 0;
924 		boolean indb = false;
925 
926 		if(inbuf instanceof Buffer && !(indb = ((Buffer)inbuf).isDirect()))
927 		{
928 			offset = type.getOffset(inbuf);
929 			inbuf  = ((Buffer)inbuf).array();
930 		}
931 
932 		return pack(handle, inbuf, indb, offset, incount,
933 				type.handle, outbuf, position);
934 	}
935 
pack( long comm, Object inbuf, boolean indb, int offset, int incount, long type, byte[] outbuf, int position)936 	private native int pack(
937 			long comm, Object inbuf, boolean indb, int offset, int incount,
938 			long type, byte[] outbuf, int position) throws MPIException;
939 
940 	/**
941 	 * Unpacks message in receive buffer {@code outbuf} into space specified in
942 	 * {@code inbuf}.
943 	 * <p>
944 	 * Java binding of the MPI operation {@code MPI_UNPACK}.
945 	 * <p>
946 	 * The return value is the output value of {@code position} - the
947 	 * inital value incremented by the number of bytes read.
948 	 * @param inbuf    input buffer
949 	 * @param position initial position in input buffer
950 	 * @param outbuf   output buffer
951 	 * @param outcount number of items in output buffer
952 	 * @param type     datatype of each item in output buffer
953 	 * @return final position in input buffer
954 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
955 	 */
unpack(byte[] inbuf, int position, Object outbuf, int outcount, Datatype type)956 	public final int unpack(byte[] inbuf, int position,
957 			Object outbuf, int outcount, Datatype type)
958 					throws MPIException
959 	{
960 		MPI.check();
961 		int offset = 0;
962 		boolean outdb = false;
963 
964 		if(outbuf instanceof Buffer && !(outdb = ((Buffer)outbuf).isDirect()))
965 		{
966 			offset = type.getOffset(outbuf);
967 			outbuf = ((Buffer)outbuf).array();
968 		}
969 
970 		return unpack(handle, inbuf, position, outbuf, outdb,
971 				offset, outcount, type.handle);
972 	}
973 
unpack( long comm, byte[] inbuf, int position, Object outbuf, boolean outdb, int offset, int outcount, long type)974 	private native int unpack(
975 			long comm, byte[] inbuf, int position, Object outbuf, boolean outdb,
976 			int offset, int outcount, long type) throws MPIException;
977 
978 	/**
979 	 * Returns an upper bound on the increment of {@code position} effected
980 	 * by {@code pack}.
981 	 * <p>Java binding of the MPI operation {@code MPI_PACK_SIZE}.
982 	 * @param incount number of items in input buffer
983 	 * @param type    datatype of each item in input buffer
984 	 * @return upper bound on size of packed message
985 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
986 	 */
packSize(int incount, Datatype type)987 	public final int packSize(int incount, Datatype type) throws MPIException
988 	{
989 		MPI.check();
990 		return packSize(handle, incount, type.handle);
991 	}
992 
packSize(long comm, int incount, long type)993 	private native int packSize(long comm, int incount, long type)
994 			throws MPIException;
995 
996 	// Probe and Cancel
997 
998 	/**
999 	 * Check if there is an incoming message matching the pattern specified.
1000 	 * <p>Java binding of the MPI operation {@code MPI_IPROBE}.
1001 	 * <p>If such a message is currently available, a status object similar
1002 	 * to the return value of a matching {@code recv} operation is returned.
1003 	 * @param source rank of source
1004 	 * @param tag    message tag
1005 	 * @return status object if such a message is currently available,
1006 	 *         {@code null} otherwise.
1007 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1008 	 */
iProbe(int source, int tag)1009 	public final Status iProbe(int source, int tag) throws MPIException
1010 	{
1011 		MPI.check();
1012 		return iProbe(handle, source, tag);
1013 	}
1014 
iProbe(long comm, int source, int tag)1015 	private native Status iProbe(long comm, int source, int tag)
1016 			throws MPIException;
1017 
1018 	/**
1019 	 * Wait until there is an incoming message matching the pattern specified.
1020 	 * <p>Java binding of the MPI operation {@code MPI_PROBE}.
1021 	 * <p>Returns a status object similar to the return value of a matching
1022 	 * {@code recv} operation.
1023 	 * @param source rank of source
1024 	 * @param tag    message tag
1025 	 * @return status object
1026 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1027 	 */
probe(int source, int tag)1028 	public final Status probe(int source, int tag) throws MPIException
1029 	{
1030 		MPI.check();
1031 		Status status = new Status();
1032 		probe(handle, source, tag, status.data);
1033 		return status;
1034 	}
1035 
probe(long comm, int source, int tag, long[] stat)1036 	private native void probe(long comm, int source, int tag, long[] stat)
1037 			throws MPIException;
1038 
1039 	// Caching
1040 
1041 	/**
1042 	 * Create a new attribute key.
1043 	 * <p>Java binding of the MPI operation {@code MPI_COMM_CREATE_KEYVAL}.
1044 	 * @return attribute key for future access
1045 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1046 	 */
createKeyval()1047 	public static int createKeyval() throws MPIException
1048 	{
1049 		MPI.check();
1050 		return createKeyval_jni();
1051 	}
1052 
createKeyval_jni()1053 	private static native int createKeyval_jni() throws MPIException;
1054 
1055 	/**
1056 	 * Frees an attribute key for communicators.
1057 	 * <p>Java binding of the MPI operation {@code MPI_COMM_FREE_KEYVAL}.
1058 	 * @param keyval attribute key
1059 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1060 	 */
freeKeyval(int keyval)1061 	public static void freeKeyval(int keyval) throws MPIException
1062 	{
1063 		MPI.check();
1064 		freeKeyval_jni(keyval);
1065 	}
1066 
freeKeyval_jni(int keyval)1067 	private static native void freeKeyval_jni(int keyval) throws MPIException;
1068 
1069 	/**
1070 	 * Stores attribute value associated with a key.
1071 	 * <p>Java binding of the MPI operation {@code MPI_COMM_SET_ATTR}.
1072 	 * @param keyval attribute key
1073 	 * @param value  attribute value
1074 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1075 	 */
setAttr(int keyval, Object value)1076 	public final void setAttr(int keyval, Object value) throws MPIException
1077 	{
1078 		MPI.check();
1079 		setAttr(handle, keyval, MPI.attrSet(value));
1080 	}
1081 
setAttr(long comm, int keyval, byte[] value)1082 	private native void setAttr(long comm, int keyval, byte[] value)
1083 			throws MPIException;
1084 
1085 	/**
1086 	 * Retrieves attribute value by key.
1087 	 * <p>Java binding of the MPI operation {@code MPI_COMM_GET_ATTR}.
1088 	 * @param keyval attribute key
1089 	 * @return attribute value or null if no attribute is associated with the key.
1090 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1091 	 */
getAttr(int keyval)1092 	public final Object getAttr(int keyval) throws MPIException
1093 	{
1094 		MPI.check();
1095 
1096 		if( keyval == MPI.TAG_UB       ||
1097 				keyval == MPI.HOST         ||
1098 				keyval == MPI.IO           ||
1099 				keyval == MPI.APPNUM       ||
1100 				keyval == MPI.LASTUSEDCODE ||
1101 				keyval == MPI.UNIVERSE_SIZE)
1102 		{
1103 			return getAttr_predefined(handle, keyval);
1104 		}
1105 		else if(keyval == MPI.WTIME_IS_GLOBAL)
1106 		{
1107 			Integer value = (Integer)getAttr_predefined(handle, keyval);
1108 			return value==null ? null : value.intValue() != 0;
1109 		}
1110 		else
1111 		{
1112 			return MPI.attrGet(getAttr(handle, keyval));
1113 		}
1114 	}
1115 
getAttr_predefined(long comm, int keyval)1116 	private native Object getAttr_predefined(long comm, int keyval)
1117 			throws MPIException;
1118 
getAttr(long comm, int keyval)1119 	private native byte[] getAttr(long comm, int keyval) throws MPIException;
1120 
1121 	/**
1122 	 * Deletes an attribute value associated with a key on a communicator.
1123 	 * <p>Java binding of the MPI operation {@code MPI_COMM_DELETE_ATTR}.
1124 	 * @param keyval attribute key
1125 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1126 	 */
deleteAttr(int keyval)1127 	public final void deleteAttr(int keyval) throws MPIException
1128 	{
1129 		MPI.check();
1130 		deleteAttr(handle, keyval);
1131 	}
1132 
deleteAttr(long comm, int keyval)1133 	private native void deleteAttr(long comm, int keyval) throws MPIException;
1134 
1135 	// Process Topologies
1136 
1137 	/**
1138 	 * Returns the type of topology associated with the communicator.
1139 	 * <p>Java binding of the MPI operation {@code MPI_TOPO_TEST}.
1140 	 * <p>The return value will be one of {@code MPI.GRAPH}, {@code MPI.CART}
1141 	 * or {@code MPI.UNDEFINED}.
1142 	 * @return topology type of communicator
1143 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1144 	 */
getTopology()1145 	public final int getTopology() throws MPIException
1146 	{
1147 		MPI.check();
1148 		return getTopology(handle);
1149 	}
1150 
getTopology(long comm)1151 	private native int getTopology(long comm) throws MPIException;
1152 
1153 	// Enviromental Management
1154 
1155 	/**
1156 	 * Abort MPI.
1157 	 * <p>Java binding of the MPI operation {@code MPI_ABORT}.
1158 	 * @param errorcode error code for Unix or POSIX environments
1159 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1160 	 */
abort(int errorcode)1161 	public final void abort(int errorcode) throws MPIException
1162 	{
1163 		MPI.check();
1164 		abort(handle, errorcode);
1165 	}
1166 
abort(long comm, int errorcode)1167 	private native void abort(long comm, int errorcode) throws MPIException;
1168 
1169 	// Error handler
1170 
1171 	/**
1172 	 * Associates a new error handler with communicator at the calling process.
1173 	 * <p>Java binding of the MPI operation {@code MPI_ERRHANDLER_SET}.
1174 	 * @param errhandler new MPI error handler for communicator
1175 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1176 	 */
setErrhandler(Errhandler errhandler)1177 	public final void setErrhandler(Errhandler errhandler) throws MPIException
1178 	{
1179 		MPI.check();
1180 		setErrhandler(handle, errhandler.handle);
1181 	}
1182 
setErrhandler(long comm, long errhandler)1183 	private native void setErrhandler(long comm, long errhandler)
1184 			throws MPIException;
1185 
1186 	/**
1187 	 * Returns the error handler currently associated with the communicator.
1188 	 * <p>Java binding of the MPI operation {@code MPI_ERRHANDLER_GET}.
1189 	 * @return MPI error handler currently associated with communicator
1190 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1191 	 */
getErrhandler()1192 	public final Errhandler getErrhandler() throws MPIException
1193 	{
1194 		MPI.check();
1195 		return new Errhandler(getErrhandler(handle));
1196 	}
1197 
getErrhandler(long comm)1198 	private native long getErrhandler(long comm);
1199 
1200 	/**
1201 	 * Calls the error handler currently associated with the communicator.
1202 	 * <p>Java binding of the MPI operation {@code MPI_COMM_CALL_ERRHANDLER}.
1203 	 * @param errorCode error code
1204 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1205 	 */
callErrhandler(int errorCode)1206 	public void callErrhandler(int errorCode) throws MPIException
1207 	{
1208 		callErrhandler(handle, errorCode);
1209 	}
1210 
callErrhandler(long handle, int errorCode)1211 	private native void callErrhandler(long handle, int errorCode)
1212 			throws MPIException;
1213 
1214 	// Collective Communication
1215 
1216 	/**
1217 	 * A call to {@code barrier} blocks the caller until all process
1218 	 * in the group have called it.
1219 	 * <p>Java binding of the MPI operation {@code MPI_BARRIER}.
1220 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1221 	 */
barrier()1222 	public final void barrier() throws MPIException
1223 	{
1224 		MPI.check();
1225 		barrier(handle);
1226 	}
1227 
barrier(long comm)1228 	private native void barrier(long comm) throws MPIException;
1229 
1230 	/**
1231 	 * Nonblocking barrier sinchronization.
1232 	 * <p>Java binding of the MPI operation {@code MPI_IBARRIER}.
1233 	 * @return communication request
1234 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1235 	 */
iBarrier()1236 	public final Request iBarrier() throws MPIException
1237 	{
1238 		MPI.check();
1239 		return new Request(iBarrier(handle));
1240 	}
1241 
iBarrier(long comm)1242 	private native long iBarrier(long comm) throws MPIException;
1243 
1244 	/**
1245 	 * Broadcast a message from the process with rank {@code root}
1246 	 * to all processes of the group.
1247 	 * <p>Java binding of the MPI operation {@code MPI_BCAST}.
1248 	 * @param buf   buffer
1249 	 * @param count number of items in buffer
1250 	 * @param type  datatype of each item in buffer
1251 	 * @param root  rank of broadcast root
1252 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1253 	 */
bcast(Object buf, int count, Datatype type, int root)1254 	public final void bcast(Object buf, int count, Datatype type, int root)
1255 			throws MPIException
1256 	{
1257 		MPI.check();
1258 		int off = 0;
1259 		boolean db = false;
1260 
1261 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
1262 		{
1263 			off = type.getOffset(buf);
1264 			buf = ((Buffer)buf).array();
1265 		}
1266 
1267 		bcast(handle, buf, db, off, count, type.handle, type.baseType, root);
1268 	}
1269 
bcast( long comm, Object buf, boolean db, int offset, int count, long type, int basetype, int root)1270 	private native void bcast(
1271 			long comm, Object buf, boolean db, int offset, int count,
1272 			long type, int basetype, int root) throws MPIException;
1273 
1274 	/**
1275 	 * Broadcast a message from the process with rank {@code root}
1276 	 * to all processes of the group.
1277 	 * <p>Java binding of the MPI operation {@code MPI_IBCAST}.
1278 	 * @param buf   buffer
1279 	 * @param count number of items in buffer
1280 	 * @param type  datatype of each item in buffer
1281 	 * @param root  rank of broadcast root
1282 	 * @return communication request
1283 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1284 	 */
iBcast(Buffer buf, int count, Datatype type, int root)1285 	public final Request iBcast(Buffer buf, int count, Datatype type, int root)
1286 			throws MPIException
1287 	{
1288 		MPI.check();
1289 		assertDirectBuffer(buf);
1290 		Request req = new Request(iBcast(handle, buf, count, type.handle, root));
1291 		req.addSendBufRef(buf);
1292 		return req;
1293 	}
1294 
iBcast( long comm, Buffer buf, int count, long type, int root)1295 	private native long iBcast(
1296 			long comm, Buffer buf, int count, long type, int root)
1297 					throws MPIException;
1298 
1299 	/**
1300 	 * Each process sends the contents of its send buffer to the root process.
1301 	 * <p>Java binding of the MPI operation {@code MPI_GATHER}.
1302 	 * @param sendbuf   send buffer
1303 	 * @param sendcount number of items to send
1304 	 * @param sendtype  datatype of each item in send buffer
1305 	 * @param recvbuf   receive buffer
1306 	 * @param recvcount number of items to receive
1307 	 * @param recvtype  datatype of each item in receive buffer
1308 	 * @param root      rank of receiving process
1309 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1310 	 */
gather( Object sendbuf, int sendcount, Datatype sendtype, Object recvbuf, int recvcount, Datatype recvtype, int root)1311 	public final void gather(
1312 			Object sendbuf, int sendcount, Datatype sendtype,
1313 			Object recvbuf, int recvcount, Datatype recvtype, int root)
1314 					throws MPIException
1315 	{
1316 		MPI.check();
1317 
1318 		int sendoff = 0,
1319 				recvoff = 0;
1320 
1321 		boolean sdb = false,
1322 				rdb = false;
1323 
1324 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
1325 		{
1326 			sendoff = sendtype.getOffset(sendbuf);
1327 			sendbuf = ((Buffer)sendbuf).array();
1328 		}
1329 
1330 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
1331 		{
1332 			recvoff = recvtype.getOffset(recvbuf);
1333 			recvbuf = ((Buffer)recvbuf).array();
1334 		}
1335 
1336 		gather(handle, sendbuf, sdb, sendoff, sendcount,
1337 				sendtype.handle, sendtype.baseType,
1338 				recvbuf, rdb, recvoff, recvcount,
1339 				recvtype.handle, recvtype.baseType, root);
1340 	}
1341 
1342 	/**
1343 	 * Each process sends the contents of its send buffer to the root process.
1344 	 * <p>Java binding of the MPI operation {@code MPI_GATHER}
1345 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
1346 	 * The buffer is used by the root process to receive data,
1347 	 * and it is used by the non-root processes to send data.
1348 	 * @param buf   buffer
1349 	 * @param count number of items to send/receive
1350 	 * @param type  datatype of each item in buffer
1351 	 * @param root  rank of receiving process
1352 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1353 	 */
gather(Object buf, int count, Datatype type, int root)1354 	public final void gather(Object buf, int count, Datatype type, int root)
1355 			throws MPIException
1356 	{
1357 		MPI.check();
1358 		int off = 0;
1359 		boolean db = false;
1360 
1361 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
1362 		{
1363 			off = type.getOffset(buf);
1364 			buf = ((Buffer)buf).array();
1365 		}
1366 
1367 		gather(handle, null, false, 0, 0, 0, 0,
1368 				buf, db, off, count, type.handle, type.baseType, root);
1369 	}
1370 
gather( long comm, Object sendBuf, boolean sdb, int sendOff, int sendCount, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOff, int recvCount, long recvType, int recvBaseType, int root)1371 	private native void gather(
1372 			long comm, Object sendBuf, boolean sdb, int sendOff, int sendCount,
1373 			long sendType, int sendBaseType,
1374 			Object recvBuf, boolean rdb, int recvOff, int recvCount,
1375 			long recvType, int recvBaseType, int root)
1376 					throws MPIException;
1377 
1378 	/**
1379 	 * Each process sends the contents of its send buffer to the root process.
1380 	 * <p>Java binding of the MPI operation {@code MPI_IGATHER}.
1381 	 * @param sendbuf   send buffer
1382 	 * @param sendcount number of items to send
1383 	 * @param sendtype  datatype of each item in send buffer
1384 	 * @param recvbuf   receive buffer
1385 	 * @param recvcount number of items to receive
1386 	 * @param recvtype  datatype of each item in receive buffer
1387 	 * @param root      rank of receiving process
1388 	 * @return communication request
1389 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1390 	 */
iGather( Buffer sendbuf, int sendcount, Datatype sendtype, Buffer recvbuf, int recvcount, Datatype recvtype, int root)1391 	public final Request iGather(
1392 			Buffer sendbuf, int sendcount, Datatype sendtype,
1393 			Buffer recvbuf, int recvcount, Datatype recvtype, int root)
1394 					throws MPIException
1395 	{
1396 		MPI.check();
1397 		assertDirectBuffer(sendbuf, recvbuf);
1398 		Request req = new Request(iGather(handle, sendbuf, sendcount, sendtype.handle,
1399 				recvbuf, recvcount, recvtype.handle, root));
1400 		req.addSendBufRef(sendbuf);
1401 		req.addRecvBufRef(recvbuf);
1402 		return req;
1403 	}
1404 
1405 	/**
1406 	 * Each process sends the contents of its send buffer to the root process.
1407 	 * <p>Java binding of the MPI operation {@code MPI_IGATHER}
1408 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
1409 	 * The buffer is used by the root process to receive data,
1410 	 * and it is used by the non-root processes to send data.
1411 	 * @param buf   buffer
1412 	 * @param count number of items to send/receive
1413 	 * @param type  datatype of each item in buffer
1414 	 * @param root  rank of receiving process
1415 	 * @return communication request
1416 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1417 	 */
iGather(Buffer buf, int count, Datatype type, int root)1418 	public final Request iGather(Buffer buf, int count, Datatype type, int root)
1419 			throws MPIException
1420 	{
1421 		MPI.check();
1422 		assertDirectBuffer(buf);
1423 		Request req = new Request(iGather(handle, null, 0, 0,
1424 				buf, count, type.handle, root));
1425 		req.addRecvBufRef(buf);
1426 		return req;
1427 	}
1428 
iGather( long comm, Buffer sendbuf, int sendcount, long sendtype, Buffer recvbuf, int recvcount, long recvtype, int root)1429 	private native long iGather(
1430 			long comm, Buffer sendbuf, int sendcount, long sendtype,
1431 			Buffer recvbuf, int recvcount, long recvtype,
1432 			int root) throws MPIException;
1433 
1434 	/**
1435 	 * Extends functionality of {@code gather} by allowing varying
1436 	 * counts of data from each process.
1437 	 * <p>Java binding of the MPI operation {@code MPI_GATHERV}.
1438 	 * @param sendbuf   send buffer
1439 	 * @param sendcount number of items to send
1440 	 * @param sendtype  datatype of each item in send buffer
1441 	 * @param recvbuf   receive buffer
1442 	 * @param recvcount number of elements received from each process
1443 	 * @param displs    displacements at which to place incoming data
1444 	 * @param recvtype  datatype of each item in receive buffer
1445 	 * @param root      rank of receiving process
1446 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1447 	 */
gatherv(Object sendbuf, int sendcount, Datatype sendtype, Object recvbuf, int[] recvcount, int[] displs, Datatype recvtype, int root)1448 	public final void gatherv(Object sendbuf, int sendcount, Datatype sendtype,
1449 			Object recvbuf, int[] recvcount, int[] displs,
1450 			Datatype recvtype, int root)
1451 					throws MPIException
1452 	{
1453 		MPI.check();
1454 
1455 		int sendoff = 0,
1456 				recvoff = 0;
1457 
1458 		boolean sdb = false,
1459 				rdb = false;
1460 
1461 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
1462 		{
1463 			sendoff = sendtype.getOffset(sendbuf);
1464 			sendbuf = ((Buffer)sendbuf).array();
1465 		}
1466 
1467 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
1468 		{
1469 			recvoff = recvtype.getOffset(recvbuf);
1470 			recvbuf = ((Buffer)recvbuf).array();
1471 		}
1472 
1473 		gatherv(handle, sendbuf, sdb, sendoff, sendcount,
1474 				sendtype.handle, sendtype.baseType,
1475 				recvbuf, rdb, recvoff, recvcount, displs,
1476 				recvtype.handle, recvtype.baseType, root);
1477 	}
1478 
1479 	/**
1480 	 * Extends functionality of {@code gather} by allowing varying
1481 	 * counts of data from each process.
1482 	 * <p>Java binding of the MPI operation {@code MPI_GATHERV} using
1483 	 * {@code MPI_IN_PLACE} instead of the send buffer in the root process.
1484 	 * This method must be used in the root process.
1485 	 * @param recvbuf   receive buffer
1486 	 * @param recvcount number of elements received from each process
1487 	 * @param displs    displacements at which to place incoming data
1488 	 * @param recvtype  datatype of each item in receive buffer
1489 	 * @param root      rank of receiving process
1490 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1491 	 */
gatherv(Object recvbuf, int[] recvcount, int[] displs, Datatype recvtype, int root)1492 	public final void gatherv(Object recvbuf, int[] recvcount, int[] displs,
1493 			Datatype recvtype, int root)
1494 					throws MPIException
1495 	{
1496 		MPI.check();
1497 		int recvoff = 0;
1498 		boolean rdb = false;
1499 
1500 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
1501 		{
1502 			recvoff = recvtype.getOffset(recvbuf);
1503 			recvbuf = ((Buffer)recvbuf).array();
1504 		}
1505 
1506 		gatherv(handle, null, false, 0, 0, 0, 0, recvbuf, rdb, recvoff, recvcount,
1507 				displs, recvtype.handle, recvtype.baseType, root);
1508 	}
1509 
1510 	/**
1511 	 * Extends functionality of {@code gather} by allowing varying
1512 	 * counts of data from each process.
1513 	 * <p>Java binding of the MPI operation {@code MPI_GATHERV} using
1514 	 * {@code MPI_IN_PLACE} instead of the send buffer in the root process.
1515 	 * This method must be used in the non-root processes.
1516 	 * @param sendbuf   send buffer
1517 	 * @param sendcount number of items to send
1518 	 * @param sendtype  datatype of each item in send buffer
1519 	 * @param root      rank of receiving process
1520 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1521 	 */
gatherv(Object sendbuf, int sendcount, Datatype sendtype, int root)1522 	public final void gatherv(Object sendbuf, int sendcount,
1523 			Datatype sendtype, int root)
1524 					throws MPIException
1525 	{
1526 		MPI.check();
1527 		int sendoff = 0;
1528 		boolean sdb = false;
1529 
1530 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
1531 		{
1532 			sendoff = sendtype.getOffset(sendbuf);
1533 			sendbuf = ((Buffer)sendbuf).array();
1534 		}
1535 
1536 		gatherv(handle, sendbuf, sdb, sendoff, sendcount,
1537 				sendtype.handle, sendtype.baseType,
1538 				null, false, 0, null, null, 0, 0, root);
1539 	}
1540 
gatherv( long comm, Object sendBuf, boolean sdb, int sendOffset, int sendCount, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOffset, int[] recvCount, int[] displs, long recvType, int recvBaseType, int root)1541 	private native void gatherv(
1542 			long comm, Object sendBuf, boolean sdb, int sendOffset,
1543 			int sendCount, long sendType, int sendBaseType,
1544 			Object recvBuf, boolean rdb, int recvOffset,
1545 			int[] recvCount, int[] displs, long recvType, int recvBaseType,
1546 			int root) throws MPIException;
1547 
1548 	/**
1549 	 * Extends functionality of {@code gather} by allowing varying
1550 	 * counts of data from each process.
1551 	 * <p>Java binding of the MPI operation {@code MPI_IGATHERV}.
1552 	 * @param sendbuf   send buffer
1553 	 * @param sendcount number of items to send
1554 	 * @param sendtype  datatype of each item in send buffer
1555 	 * @param recvbuf   receive buffer
1556 	 * @param recvcount number of elements received from each process
1557 	 * @param displs    displacements at which to place incoming data
1558 	 * @param recvtype  datatype of each item in receive buffer
1559 	 * @param root      rank of receiving process
1560 	 * @return communication request
1561 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1562 	 */
iGatherv( Buffer sendbuf, int sendcount, Datatype sendtype, Buffer recvbuf, int[] recvcount, int[] displs, Datatype recvtype, int root)1563 	public final Request iGatherv(
1564 			Buffer sendbuf, int sendcount, Datatype sendtype, Buffer recvbuf,
1565 			int[] recvcount, int[] displs, Datatype recvtype, int root)
1566 					throws MPIException
1567 	{
1568 		MPI.check();
1569 		assertDirectBuffer(sendbuf, recvbuf);
1570 		Request req = new Request(iGatherv(
1571 				handle, sendbuf, sendcount, sendtype.handle,
1572 				recvbuf, recvcount, displs, recvtype.handle, root));
1573 		req.addSendBufRef(sendbuf);
1574 		return req;
1575 	}
1576 
1577 	/**
1578 	 * Extends functionality of {@code gather} by allowing varying
1579 	 * counts of data from each process.
1580 	 * <p>Java binding of the MPI operation {@code MPI_IGATHERV} using
1581 	 * {@code MPI_IN_PLACE} instead of the send buffer in the root process.
1582 	 * This method must be used in the root process.
1583 	 * @param recvbuf   receive buffer
1584 	 * @param recvcount number of elements received from each process
1585 	 * @param displs    displacements at which to place incoming data
1586 	 * @param recvtype  datatype of each item in receive buffer
1587 	 * @param root      rank of receiving process
1588 	 * @return communication request
1589 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1590 	 */
iGatherv(Buffer recvbuf, int[] recvcount, int[] displs, Datatype recvtype, int root)1591 	public final Request iGatherv(Buffer recvbuf, int[] recvcount, int[] displs,
1592 			Datatype recvtype, int root)
1593 					throws MPIException
1594 	{
1595 		MPI.check();
1596 		assertDirectBuffer(recvbuf);
1597 		Request req = new Request(iGatherv(handle, null, 0, 0,
1598 				recvbuf, recvcount, displs, recvtype.handle, root));
1599 		req.addRecvBufRef(recvbuf);
1600 		return req;
1601 	}
1602 
1603 	/**
1604 	 * Extends functionality of {@code gather} by allowing varying
1605 	 * counts of data from each process.
1606 	 * <p>Java binding of the MPI operation {@code MPI_IGATHERV} using
1607 	 * {@code MPI_IN_PLACE} instead of the send buffer in the root process.
1608 	 * This method must be used in the non-root processes.
1609 	 * @param sendbuf   send buffer
1610 	 * @param sendcount number of items to send
1611 	 * @param sendtype  datatype of each item in send buffer
1612 	 * @param root      rank of receiving process
1613 	 * @return communication request
1614 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1615 	 */
iGatherv(Buffer sendbuf, int sendcount, Datatype sendtype, int root)1616 	public final Request iGatherv(Buffer sendbuf, int sendcount,
1617 			Datatype sendtype, int root)
1618 					throws MPIException
1619 	{
1620 		MPI.check();
1621 		assertDirectBuffer(sendbuf);
1622 		Request req = new Request(iGatherv(handle, sendbuf, sendcount, sendtype.handle,
1623 				null, null, null, 0, root));
1624 		req.addSendBufRef(sendbuf);
1625 		return req;
1626 	}
1627 
iGatherv( long handle, Buffer sendbuf, int sendcount, long sendtype, Buffer recvbuf, int[] recvcount, int[] displs, long recvtype, int root)1628 	private native long iGatherv(
1629 			long handle, Buffer sendbuf, int sendcount, long sendtype,
1630 			Buffer recvbuf, int[] recvcount, int[] displs,
1631 			long recvtype, int root)
1632 					throws MPIException;
1633 
1634 	/**
1635 	 * Inverse of the operation {@code gather}.
1636 	 * <p>Java binding of the MPI operation {@code MPI_SCATTER}.
1637 	 * @param sendbuf   send buffer
1638 	 * @param sendcount number of items to send
1639 	 * @param sendtype  datatype of each item in send buffer
1640 	 * @param recvbuf   receive buffer
1641 	 * @param recvcount number of items to receive
1642 	 * @param recvtype  datatype of each item in receive buffer
1643 	 * @param root      rank of sending process
1644 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1645 	 */
scatter( Object sendbuf, int sendcount, Datatype sendtype, Object recvbuf, int recvcount, Datatype recvtype, int root)1646 	public final void scatter(
1647 			Object sendbuf, int sendcount, Datatype sendtype,
1648 			Object recvbuf, int recvcount, Datatype recvtype, int root)
1649 					throws MPIException
1650 	{
1651 		MPI.check();
1652 
1653 		int sendoff = 0,
1654 				recvoff = 0;
1655 
1656 		boolean sdb = false,
1657 				rdb = false;
1658 
1659 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
1660 		{
1661 			sendoff = sendtype.getOffset(sendbuf);
1662 			sendbuf = ((Buffer)sendbuf).array();
1663 		}
1664 
1665 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
1666 		{
1667 			recvoff = recvtype.getOffset(recvbuf);
1668 			recvbuf = ((Buffer)recvbuf).array();
1669 		}
1670 
1671 		scatter(handle, sendbuf, sdb, sendoff, sendcount,
1672 				sendtype.handle, sendtype.baseType,
1673 				recvbuf, rdb, recvoff, recvcount,
1674 				recvtype.handle, recvtype.baseType, root);
1675 	}
1676 
1677 	/**
1678 	 * Inverse of the operation {@code gather}.
1679 	 * <p>Java binding of the MPI operation {@code MPI_SCATTER}
1680 	 * using {@code MPI_IN_PLACE} instead of the receive buffer.
1681 	 * The buffer is used by the root process to send data,
1682 	 * and it is used by the non-root processes to receive data.
1683 	 * @param buf   send/receive buffer
1684 	 * @param count number of items to send/receive
1685 	 * @param type  datatype of each item in buffer
1686 	 * @param root  rank of sending process
1687 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1688 	 */
scatter(Object buf, int count, Datatype type, int root)1689 	public final void scatter(Object buf, int count, Datatype type, int root)
1690 			throws MPIException
1691 	{
1692 		MPI.check();
1693 		int off = 0;
1694 		boolean db = false;
1695 
1696 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
1697 		{
1698 			off = type.getOffset(buf);
1699 			buf = ((Buffer)buf).array();
1700 		}
1701 
1702 		scatter(handle, buf, db, off, count, type.handle, type.baseType,
1703 				null, false, 0, 0, 0, 0, root);
1704 	}
1705 
scatter( long comm, Object sendBuf, boolean sdb, int sendOffset, int sendCount, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOffset, int recvCount, long recvType, int recvBaseType, int root)1706 	private native void scatter(
1707 			long comm, Object sendBuf, boolean sdb, int sendOffset, int sendCount,
1708 			long sendType, int sendBaseType,
1709 			Object recvBuf, boolean rdb, int recvOffset, int recvCount,
1710 			long recvType, int recvBaseType, int root) throws MPIException;
1711 
1712 	/**
1713 	 * Inverse of the operation {@code gather}.
1714 	 * <p>Java binding of the MPI operation {@code MPI_ISCATTER}.
1715 	 * @param sendbuf   send buffer
1716 	 * @param sendcount number of items to send
1717 	 * @param sendtype  datatype of each item in send buffer
1718 	 * @param recvbuf   receive buffer
1719 	 * @param recvcount number of items to receive
1720 	 * @param recvtype  datatype of each item in receive buffer
1721 	 * @param root      rank of sending process
1722 	 * @return communication request
1723 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1724 	 */
iScatter( Buffer sendbuf, int sendcount, Datatype sendtype, Buffer recvbuf, int recvcount, Datatype recvtype, int root)1725 	public final Request iScatter(
1726 			Buffer sendbuf, int sendcount, Datatype sendtype,
1727 			Buffer recvbuf, int recvcount, Datatype recvtype, int root)
1728 					throws MPIException
1729 	{
1730 		MPI.check();
1731 		assertDirectBuffer(sendbuf, recvbuf);
1732 		Request req = new Request(iScatter(handle, sendbuf, sendcount, sendtype.handle,
1733 				recvbuf, recvcount, recvtype.handle, root));
1734 		req.addSendBufRef(sendbuf);
1735 		req.addRecvBufRef(recvbuf);
1736 		return req;
1737 	}
1738 
1739 	/**
1740 	 * Inverse of the operation {@code gather}.
1741 	 * <p>Java binding of the MPI operation {@code MPI_ISCATTER}
1742 	 * using {@code MPI_IN_PLACE} instead of the receive buffer.
1743 	 * The buffer is used by the root process to send data,
1744 	 * and it is used by the non-root processes to receive data.
1745 	 * @param buf   send/receive buffer
1746 	 * @param count number of items to send/receive
1747 	 * @param type  datatype of each item in buffer
1748 	 * @param root  rank of sending process
1749 	 * @return communication request
1750 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1751 	 */
iScatter(Buffer buf, int count, Datatype type, int root)1752 	public final Request iScatter(Buffer buf, int count, Datatype type, int root)
1753 			throws MPIException
1754 	{
1755 		MPI.check();
1756 		assertDirectBuffer(buf);
1757 		Request req = new Request(iScatter(handle, buf, count, type.handle,
1758 				null, 0, 0, root));
1759 		req.addSendBufRef(buf);
1760 		return req;
1761 	}
1762 
iScatter( long comm, Buffer sendbuf, int sendcount, long sendtype, Buffer recvbuf, int recvcount, long recvtype, int root)1763 	private native long iScatter(
1764 			long comm, Buffer sendbuf, int sendcount, long sendtype,
1765 			Buffer recvbuf, int recvcount, long recvtype, int root)
1766 					throws MPIException;
1767 
1768 	/**
1769 	 * Inverse of the operation {@code gatherv}.
1770 	 * <p>Java binding of the MPI operation {@code MPI_SCATTERV}.
1771 	 * @param sendbuf   send buffer
1772 	 * @param sendcount number of items sent to each process
1773 	 * @param displs    displacements from which to take outgoing data
1774 	 * @param sendtype  datatype of each item in send buffer
1775 	 * @param recvbuf   receive buffer
1776 	 * @param recvcount number of items to receive
1777 	 * @param recvtype  datatype of each item in receive buffer
1778 	 * @param root      rank of sending process
1779 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1780 	 */
scatterv( Object sendbuf, int[] sendcount, int[] displs, Datatype sendtype, Object recvbuf, int recvcount, Datatype recvtype, int root)1781 	public final void scatterv(
1782 			Object sendbuf, int[] sendcount, int[] displs, Datatype sendtype,
1783 			Object recvbuf, int recvcount, Datatype recvtype, int root)
1784 					throws MPIException
1785 	{
1786 		MPI.check();
1787 
1788 		int sendoff = 0,
1789 				recvoff = 0;
1790 
1791 		boolean sdb = false,
1792 				rdb = false;
1793 
1794 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
1795 		{
1796 			sendoff = sendtype.getOffset(sendbuf);
1797 			sendbuf = ((Buffer)sendbuf).array();
1798 		}
1799 
1800 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
1801 		{
1802 			recvoff = recvtype.getOffset(recvbuf);
1803 			recvbuf = ((Buffer)recvbuf).array();
1804 		}
1805 
1806 		scatterv(handle, sendbuf, sdb, sendoff, sendcount, displs,
1807 				sendtype.handle, sendtype.baseType,
1808 				recvbuf, rdb, recvoff, recvcount,
1809 				recvtype.handle, recvtype.baseType, root);
1810 	}
1811 
1812 	/**
1813 	 * Inverse of the operation {@code gatherv}.
1814 	 * <p>Java binding of the MPI operation {@code MPI_SCATTERV} using
1815 	 * {@code MPI_IN_PLACE} instead of the receive buffer in the root process.
1816 	 * This method must be used in the root process.
1817 	 * @param sendbuf   send buffer
1818 	 * @param sendcount number of items sent to each process
1819 	 * @param displs    displacements from which to take outgoing data
1820 	 * @param sendtype  datatype of each item in send buffer
1821 	 * @param root      rank of sending process
1822 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1823 	 */
scatterv(Object sendbuf, int[] sendcount, int[] displs, Datatype sendtype, int root)1824 	public final void scatterv(Object sendbuf, int[] sendcount, int[] displs,
1825 			Datatype sendtype, int root)
1826 					throws MPIException
1827 	{
1828 		MPI.check();
1829 		int sendoff = 0;
1830 		boolean sdb = false;
1831 
1832 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
1833 		{
1834 			sendoff = sendtype.getOffset(sendbuf);
1835 			sendbuf = ((Buffer)sendbuf).array();
1836 		}
1837 
1838 		scatterv(handle, sendbuf, sdb, sendoff, sendcount, displs,
1839 				sendtype.handle, sendtype.baseType,
1840 				null, false, 0, 0, 0, 0, root);
1841 	}
1842 
1843 	/**
1844 	 * Inverse of the operation {@code gatherv}.
1845 	 * <p>Java binding of the MPI operation {@code MPI_SCATTERV} using
1846 	 * {@code MPI_IN_PLACE} instead of the receive buffer in the root process.
1847 	 * This method must be used in the non-root processes.
1848 	 * @param recvbuf   receive buffer
1849 	 * @param recvcount number of items to receive
1850 	 * @param recvtype  datatype of each item in receive buffer
1851 	 * @param root      rank of sending process
1852 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1853 	 */
scatterv(Object recvbuf, int recvcount, Datatype recvtype, int root)1854 	public final void scatterv(Object recvbuf, int recvcount,
1855 			Datatype recvtype, int root)
1856 					throws MPIException
1857 	{
1858 		MPI.check();
1859 		int recvoff = 0;
1860 		boolean rdb = false;
1861 
1862 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
1863 		{
1864 			recvoff = recvtype.getOffset(recvbuf);
1865 			recvbuf = ((Buffer)recvbuf).array();
1866 		}
1867 
1868 		scatterv(handle, null, false, 0, null, null, 0, 0,
1869 				recvbuf, rdb, recvoff, recvcount,
1870 				recvtype.handle, recvtype.baseType, root);
1871 	}
1872 
scatterv( long comm, Object sendBuf, boolean sdb, int sendOffset, int[] sendCount, int[] displs, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOffset, int recvCount, long recvType, int recvBaseType, int root)1873 	private native void scatterv(
1874 			long comm, Object sendBuf, boolean sdb, int sendOffset,
1875 			int[] sendCount, int[] displs, long sendType, int sendBaseType,
1876 			Object recvBuf, boolean rdb, int recvOffset, int recvCount,
1877 			long recvType, int recvBaseType, int root)
1878 					throws MPIException;
1879 
1880 	/**
1881 	 * Inverse of the operation {@code gatherv}.
1882 	 * <p>Java binding of the MPI operation {@code MPI_ISCATTERV}.
1883 	 * @param sendbuf   send buffer
1884 	 * @param sendcount number of items sent to each process
1885 	 * @param displs    displacements from which to take outgoing data
1886 	 * @param sendtype  datatype of each item in send buffer
1887 	 * @param recvbuf   receive buffer
1888 	 * @param recvcount number of items to receive
1889 	 * @param recvtype  datatype of each item in receive buffer
1890 	 * @param root      rank of sending process
1891 	 * @return communication request
1892 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1893 	 */
iScatterv( Buffer sendbuf, int[] sendcount, int[] displs, Datatype sendtype, Buffer recvbuf, int recvcount, Datatype recvtype, int root)1894 	public final Request iScatterv(
1895 			Buffer sendbuf, int[] sendcount, int[] displs,  Datatype sendtype,
1896 			Buffer recvbuf, int recvcount, Datatype recvtype, int root)
1897 					throws MPIException
1898 	{
1899 		MPI.check();
1900 		assertDirectBuffer(sendbuf, recvbuf);
1901 		Request req = new Request(iScatterv(
1902 				handle, sendbuf, sendcount, displs, sendtype.handle,
1903 				recvbuf, recvcount, recvtype.handle, root));
1904 		req.addSendBufRef(sendbuf);
1905 		req.addRecvBufRef(recvbuf);
1906 		return req;
1907 	}
1908 
1909 	/**
1910 	 * Inverse of the operation {@code gatherv}.
1911 	 * <p>Java binding of the MPI operation {@code MPI_ISCATTERV} using
1912 	 * {@code MPI_IN_PLACE} instead of the receive buffer in the root process.
1913 	 * This method must be used in the root process.
1914 	 * @param sendbuf   send buffer
1915 	 * @param sendcount number of items sent to each process
1916 	 * @param displs    displacements from which to take outgoing data
1917 	 * @param sendtype  datatype of each item in send buffer
1918 	 * @param root      rank of sending process
1919 	 * @return communication request
1920 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1921 	 */
iScatterv(Buffer sendbuf, int[] sendcount, int[] displs, Datatype sendtype, int root)1922 	public final Request iScatterv(Buffer sendbuf, int[] sendcount, int[] displs,
1923 			Datatype sendtype, int root)
1924 					throws MPIException
1925 	{
1926 		MPI.check();
1927 		assertDirectBuffer(sendbuf);
1928 		Request req = new Request(iScatterv(handle, sendbuf, sendcount, displs,
1929 				sendtype.handle, null, 0, 0, root));
1930 		req.addSendBufRef(sendbuf);
1931 		return req;
1932 	}
1933 
1934 	/**
1935 	 * Inverse of the operation {@code gatherv}.
1936 	 * <p>Java binding of the MPI operation {@code MPI_ISCATTERV} using
1937 	 * {@code MPI_IN_PLACE} instead of the receive buffer in the root process.
1938 	 * This method must be used in the non-root processes.
1939 	 * @param recvbuf   receive buffer
1940 	 * @param recvcount number of items to receive
1941 	 * @param recvtype  datatype of each item in receive buffer
1942 	 * @param root      rank of sending process
1943 	 * @return communication request
1944 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1945 	 */
iScatterv(Buffer recvbuf, int recvcount, Datatype recvtype, int root)1946 	public final Request iScatterv(Buffer recvbuf, int recvcount,
1947 			Datatype recvtype, int root)
1948 					throws MPIException
1949 	{
1950 		MPI.check();
1951 		assertDirectBuffer(recvbuf);
1952 		Request req = new Request(iScatterv(handle, null, null, null, 0,
1953 				recvbuf, recvcount, recvtype.handle, root));
1954 		req.addRecvBufRef(recvbuf);
1955 		return req;
1956 	}
1957 
iScatterv( long comm, Buffer sendbuf, int[] sendcount, int[] displs, long sendtype, Buffer recvbuf, int recvcount, long recvtype, int root)1958 	private native long iScatterv(
1959 			long comm, Buffer sendbuf, int[] sendcount, int[] displs, long sendtype,
1960 			Buffer recvbuf, int recvcount, long recvtype, int root)
1961 					throws MPIException;
1962 
1963 	/**
1964 	 * Similar to {@code gather}, but all processes receive the result.
1965 	 * <p>Java binding of the MPI operation {@code MPI_ALLGATHER}.
1966 	 * @param sendbuf   send buffer
1967 	 * @param sendcount number of items to send
1968 	 * @param sendtype  datatype of each item in send buffer
1969 	 * @param recvbuf   receive buffer
1970 	 * @param recvcount number of items to receive
1971 	 * @param recvtype  datatype of each item in receive buffer
1972 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
1973 	 */
allGather(Object sendbuf, int sendcount, Datatype sendtype, Object recvbuf, int recvcount, Datatype recvtype)1974 	public final void allGather(Object sendbuf, int sendcount, Datatype sendtype,
1975 			Object recvbuf, int recvcount, Datatype recvtype)
1976 					throws MPIException
1977 	{
1978 		MPI.check();
1979 
1980 		int sendoff = 0,
1981 				recvoff = 0;
1982 
1983 		boolean sdb = false,
1984 				rdb = false;
1985 
1986 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
1987 		{
1988 			sendoff = sendtype.getOffset(sendbuf);
1989 			sendbuf = ((Buffer)sendbuf).array();
1990 		}
1991 
1992 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
1993 		{
1994 			recvoff = recvtype.getOffset(recvbuf);
1995 			recvbuf = ((Buffer)recvbuf).array();
1996 		}
1997 
1998 		allGather(handle, sendbuf, sdb, sendoff, sendcount,
1999 				sendtype.handle, sendtype.baseType,
2000 				recvbuf, rdb, recvoff, recvcount,
2001 				recvtype.handle, recvtype.baseType);
2002 	}
2003 
2004 	/**
2005 	 * Similar to {@code gather}, but all processes receive the result.
2006 	 * <p>Java binding of the MPI operation {@code MPI_ALLGATHER}
2007 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
2008 	 * @param buf   receive buffer
2009 	 * @param count number of items to receive
2010 	 * @param type  datatype of each item in receive buffer
2011 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2012 	 */
allGather(Object buf, int count, Datatype type)2013 	public final void allGather(Object buf, int count, Datatype type)
2014 			throws MPIException
2015 	{
2016 		MPI.check();
2017 		int off = 0;
2018 		boolean db = false;
2019 
2020 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
2021 		{
2022 			off = type.getOffset(buf);
2023 			buf = ((Buffer)buf).array();
2024 		}
2025 
2026 		allGather(handle, null, false, 0, 0, 0, 0,
2027 				buf, db, off, count, type.handle, type.baseType);
2028 	}
2029 
allGather( long comm, Object sendBuf, boolean sdb, int sendOffset, int sendCount, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOffset, int recvCount, long recvType, int recvBaseType)2030 	private native void allGather(
2031 			long comm, Object sendBuf, boolean sdb, int sendOffset, int sendCount,
2032 			long sendType, int sendBaseType,
2033 			Object recvBuf, boolean rdb, int recvOffset, int recvCount,
2034 			long recvType, int recvBaseType) throws MPIException;
2035 
2036 	/**
2037 	 * Similar to {@code gather}, but all processes receive the result.
2038 	 * <p>Java binding of the MPI operation {@code MPI_IALLGATHER}.
2039 	 * @param sendbuf   send buffer
2040 	 * @param sendcount number of items to send
2041 	 * @param sendtype  datatype of each item in send buffer
2042 	 * @param recvbuf   receive buffer
2043 	 * @param recvcount number of items to receive
2044 	 * @param recvtype  datatype of each item in receive buffer
2045 	 * @return communication request
2046 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2047 	 */
iAllGather( Buffer sendbuf, int sendcount, Datatype sendtype, Buffer recvbuf, int recvcount, Datatype recvtype)2048 	public final Request iAllGather(
2049 			Buffer sendbuf, int sendcount, Datatype sendtype,
2050 			Buffer recvbuf, int recvcount, Datatype recvtype)
2051 					throws MPIException
2052 	{
2053 		MPI.check();
2054 		assertDirectBuffer(sendbuf, recvbuf);
2055 		Request req = new Request(iAllGather(handle, sendbuf, sendcount, sendtype.handle,
2056 				recvbuf, recvcount, recvtype.handle));
2057 		req.addSendBufRef(sendbuf);
2058 		req.addRecvBufRef(recvbuf);
2059 		return req;
2060 	}
2061 
2062 	/**
2063 	 * Similar to {@code gather}, but all processes receive the result.
2064 	 * <p>Java binding of the MPI operation {@code MPI_IALLGATHER}
2065 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
2066 	 * @param buf   receive buffer
2067 	 * @param count number of items to receive
2068 	 * @param type  datatype of each item in receive buffer
2069 	 * @return communication request
2070 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2071 	 */
iAllGather(Buffer buf, int count, Datatype type)2072 	public final Request iAllGather(Buffer buf, int count, Datatype type)
2073 			throws MPIException
2074 	{
2075 		MPI.check();
2076 		assertDirectBuffer(buf);
2077 		Request req = new Request(iAllGather(handle, null, 0, 0, buf, count, type.handle));
2078 		req.addRecvBufRef(buf);
2079 		return req;
2080 	}
2081 
iAllGather( long comm, Buffer sendbuf, int sendcount, long sendtype, Buffer recvbuf, int recvcount, long recvtype)2082 	private native long iAllGather(
2083 			long comm, Buffer sendbuf, int sendcount, long sendtype,
2084 			Buffer recvbuf, int recvcount, long recvtype) throws MPIException;
2085 
2086 	/**
2087 	 * Similar to {@code gatherv}, but all processes receive the result.
2088 	 * <p>Java binding of the MPI operation {@code MPI_ALLGATHERV}.
2089 	 * @param sendbuf   send buffer
2090 	 * @param sendcount number of items to send
2091 	 * @param sendtype  datatype of each item in send buffer
2092 	 * @param recvbuf   receive buffer
2093 	 * @param recvcount number of elements received from each process
2094 	 * @param displs    displacements at which to place incoming data
2095 	 * @param recvtype  datatype of each item in receive buffer
2096 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2097 	 */
allGatherv( Object sendbuf, int sendcount, Datatype sendtype, Object recvbuf, int[] recvcount, int[] displs, Datatype recvtype)2098 	public final void allGatherv(
2099 			Object sendbuf, int sendcount, Datatype sendtype,
2100 			Object recvbuf, int[] recvcount, int[] displs, Datatype recvtype)
2101 					throws MPIException
2102 	{
2103 		MPI.check();
2104 
2105 		int sendoff = 0,
2106 				recvoff = 0;
2107 
2108 		boolean sdb = false,
2109 				rdb = false;
2110 
2111 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
2112 		{
2113 			sendoff = sendtype.getOffset(sendbuf);
2114 			sendbuf = ((Buffer)sendbuf).array();
2115 		}
2116 
2117 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
2118 		{
2119 			recvoff = recvtype.getOffset(recvbuf);
2120 			recvbuf = ((Buffer)recvbuf).array();
2121 		}
2122 
2123 		allGatherv(handle, sendbuf, sdb, sendoff, sendcount,
2124 				sendtype.handle, sendtype.baseType,
2125 				recvbuf, rdb, recvoff, recvcount, displs,
2126 				recvtype.handle, recvtype.baseType);
2127 	}
2128 
2129 	/**
2130 	 * Similar to {@code gatherv}, but all processes receive the result.
2131 	 * <p>Java binding of the MPI operation {@code MPI_ALLGATHERV}
2132 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
2133 	 * @param recvbuf   receive buffer
2134 	 * @param recvcount number of elements received from each process
2135 	 * @param displs    displacements at which to place incoming data
2136 	 * @param recvtype  datatype of each item in receive buffer
2137 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2138 	 */
allGatherv(Object recvbuf, int[] recvcount, int[] displs, Datatype recvtype)2139 	public final void allGatherv(Object recvbuf, int[] recvcount,
2140 			int[] displs, Datatype recvtype)
2141 					throws MPIException
2142 	{
2143 		MPI.check();
2144 		int recvoff = 0;
2145 		boolean rdb = false;
2146 
2147 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
2148 		{
2149 			recvoff = recvtype.getOffset(recvbuf);
2150 			recvbuf = ((Buffer)recvbuf).array();
2151 		}
2152 
2153 		allGatherv(handle, null, false, 0, 0, 0, 0,
2154 				recvbuf, rdb, recvoff, recvcount,
2155 				displs, recvtype.handle, recvtype.baseType);
2156 	}
2157 
allGatherv( long comm, Object sendBuf, boolean sdb, int sendOffset, int sendCount, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOffset, int[] recvCount, int[] displs, long recvType, int recvBasetype)2158 	private native void allGatherv(
2159 			long comm, Object sendBuf, boolean sdb, int sendOffset, int sendCount,
2160 			long sendType, int sendBaseType,
2161 			Object recvBuf, boolean rdb, int recvOffset, int[] recvCount,
2162 			int[] displs, long recvType, int recvBasetype) throws MPIException;
2163 
2164 	/**
2165 	 * Similar to {@code gatherv}, but all processes receive the result.
2166 	 * <p>Java binding of the MPI operation {@code MPI_IALLGATHERV}.
2167 	 * @param sendbuf   send buffer
2168 	 * @param sendcount number of items to send
2169 	 * @param sendtype  datatype of each item in send buffer
2170 	 * @param recvbuf   receive buffer
2171 	 * @param recvcount number of elements received from each process
2172 	 * @param displs    displacements at which to place incoming data
2173 	 * @param recvtype  datatype of each item in receive buffer
2174 	 * @return communication request
2175 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2176 	 */
iAllGatherv( Buffer sendbuf, int sendcount, Datatype sendtype, Buffer recvbuf, int[] recvcount, int[] displs, Datatype recvtype)2177 	public final Request iAllGatherv(
2178 			Buffer sendbuf, int sendcount, Datatype sendtype,
2179 			Buffer recvbuf, int[] recvcount, int[] displs, Datatype recvtype)
2180 					throws MPIException
2181 	{
2182 		MPI.check();
2183 		assertDirectBuffer(sendbuf, recvbuf);
2184 		Request req = new Request(iAllGatherv(
2185 				handle, sendbuf, sendcount, sendtype.handle,
2186 				recvbuf, recvcount, displs, recvtype.handle));
2187 		req.addSendBufRef(sendbuf);
2188 		req.addRecvBufRef(recvbuf);
2189 		return req;
2190 	}
2191 
2192 	/**
2193 	 * Similar to {@code gatherv}, but all processes receive the result.
2194 	 * <p>Java binding of the MPI operation {@code MPI_IALLGATHERV}
2195 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
2196 	 * @param buf    receive buffer
2197 	 * @param count  number of elements received from each process
2198 	 * @param displs displacements at which to place incoming data
2199 	 * @param type   datatype of each item in receive buffer
2200 	 * @return communication request
2201 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2202 	 */
iAllGatherv( Buffer buf, int[] count, int[] displs, Datatype type)2203 	public final Request iAllGatherv(
2204 			Buffer buf, int[] count, int[] displs, Datatype type)
2205 					throws MPIException
2206 	{
2207 		MPI.check();
2208 		assertDirectBuffer(buf);
2209 		Request req = new Request(iAllGatherv(
2210 				handle, null, 0, 0, buf, count, displs, type.handle));
2211 		req.addRecvBufRef(buf);
2212 		return req;
2213 	}
2214 
iAllGatherv( long handle, Buffer sendbuf, int sendcount, long sendtype, Buffer recvbuf, int[] recvcount, int[] displs, long recvtype)2215 	private native long iAllGatherv(
2216 			long handle, Buffer sendbuf, int sendcount, long sendtype,
2217 			Buffer recvbuf, int[] recvcount, int[] displs, long recvtype)
2218 					throws MPIException;
2219 
2220 	/**
2221 	 * Extension of {@code allGather} to the case where each process sends
2222 	 * distinct data to each of the receivers.
2223 	 * <p>Java binding of the MPI operation {@code MPI_ALLTOALL}.
2224 	 * @param sendbuf   send buffer
2225 	 * @param sendcount number of items sent to each process
2226 	 * @param sendtype  datatype send buffer items
2227 	 * @param recvbuf   receive buffer
2228 	 * @param recvcount number of items received from any process
2229 	 * @param recvtype  datatype of receive buffer items
2230 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2231 	 */
allToAll(Object sendbuf, int sendcount, Datatype sendtype, Object recvbuf, int recvcount, Datatype recvtype)2232 	public final void allToAll(Object sendbuf, int sendcount, Datatype sendtype,
2233 			Object recvbuf, int recvcount, Datatype recvtype)
2234 					throws MPIException
2235 	{
2236 		MPI.check();
2237 
2238 		int sendoff = 0,
2239 				recvoff = 0;
2240 
2241 		boolean sdb = false,
2242 				rdb = false;
2243 
2244 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
2245 		{
2246 			sendoff = sendtype.getOffset(sendbuf);
2247 			sendbuf = ((Buffer)sendbuf).array();
2248 		}
2249 
2250 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
2251 		{
2252 			recvoff = recvtype.getOffset(recvbuf);
2253 			recvbuf = ((Buffer)recvbuf).array();
2254 		}
2255 
2256 		allToAll(handle, sendbuf, sdb, sendoff, sendcount,
2257 				sendtype.handle, sendtype.baseType,
2258 				recvbuf, rdb, recvoff, recvcount,
2259 				recvtype.handle, recvtype.baseType);
2260 	}
2261 
allToAll( long comm, Object sendBuf, boolean sdb, int sendOffset, int sendCount, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOffset, int recvCount, long recvType, int recvBaseType)2262 	private native void allToAll(
2263 			long comm, Object sendBuf, boolean sdb, int sendOffset, int sendCount,
2264 			long sendType, int sendBaseType,
2265 			Object recvBuf, boolean rdb, int recvOffset, int recvCount,
2266 			long recvType, int recvBaseType) throws MPIException;
2267 
2268 	/**
2269 	 * Extension of {@code allGather} to the case where each process sends
2270 	 * distinct data to each of the receivers.
2271 	 * <p>Java binding of the MPI operation {@code MPI_IALLTOALL}.
2272 	 * @param sendbuf   send buffer
2273 	 * @param sendcount number of items sent to each process
2274 	 * @param sendtype  datatype send buffer items
2275 	 * @param recvbuf   receive buffer
2276 	 * @param recvcount number of items received from any process
2277 	 * @param recvtype  datatype of receive buffer items
2278 	 * @return communication request
2279 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2280 	 */
iAllToAll(Buffer sendbuf, int sendcount, Datatype sendtype, Buffer recvbuf, int recvcount, Datatype recvtype)2281 	public final Request iAllToAll(Buffer sendbuf, int sendcount, Datatype sendtype,
2282 			Buffer recvbuf, int recvcount, Datatype recvtype)
2283 					throws MPIException
2284 	{
2285 		MPI.check();
2286 		assertDirectBuffer(sendbuf, recvbuf);
2287 		Request req = new Request(iAllToAll(handle, sendbuf, sendcount, sendtype.handle,
2288 				recvbuf, recvcount, recvtype.handle));
2289 		req.addSendBufRef(sendbuf);
2290 		req.addRecvBufRef(recvbuf);
2291 		return req;
2292 	}
2293 
iAllToAll( long comm, Buffer sendbuf, int sendcount, long sendtype, Buffer recvbuf, int recvcount, long recvtype)2294 	private native long iAllToAll(
2295 			long comm, Buffer sendbuf, int sendcount, long sendtype,
2296 			Buffer recvbuf, int recvcount, long recvtype) throws MPIException;
2297 
2298 	/**
2299 	 * Adds flexibility to {@code allToAll}: location of data for send is
2300 	 * specified by {@code sdispls} and location to place data on receive
2301 	 * side is specified by {@code rdispls}.
2302 	 * <p>Java binding of the MPI operation {@code MPI_ALLTOALLV}.
2303 	 * @param sendbuf   send buffer
2304 	 * @param sendcount number of items sent to each buffer
2305 	 * @param sdispls   displacements from which to take outgoing data
2306 	 * @param sendtype  datatype send buffer items
2307 	 * @param recvbuf   receive buffer
2308 	 * @param recvcount number of elements received from each process
2309 	 * @param rdispls   displacements at which to place incoming data
2310 	 * @param recvtype  datatype of each item in receive buffer
2311 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2312 	 */
allToAllv( Object sendbuf, int[] sendcount, int[] sdispls, Datatype sendtype, Object recvbuf, int[] recvcount, int[] rdispls, Datatype recvtype)2313 	public final void allToAllv(
2314 			Object sendbuf, int[] sendcount, int[] sdispls, Datatype sendtype,
2315 			Object recvbuf, int[] recvcount, int[] rdispls, Datatype recvtype)
2316 					throws MPIException
2317 	{
2318 		MPI.check();
2319 
2320 		int sendoff = 0,
2321 				recvoff = 0;
2322 
2323 		boolean sdb = false,
2324 				rdb = false;
2325 
2326 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
2327 		{
2328 			sendoff = sendtype.getOffset(sendbuf);
2329 			sendbuf = ((Buffer)sendbuf).array();
2330 		}
2331 
2332 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
2333 		{
2334 			recvoff = recvtype.getOffset(recvbuf);
2335 			recvbuf = ((Buffer)recvbuf).array();
2336 		}
2337 
2338 		allToAllv(handle, sendbuf, sdb, sendoff, sendcount, sdispls,
2339 				sendtype.handle, sendtype.baseType,
2340 				recvbuf, rdb, recvoff, recvcount, rdispls,
2341 				recvtype.handle, recvtype.baseType);
2342 	}
2343 
allToAllv( long comm, Object sendBuf, boolean sdb, int sendOffset, int[] sendCount, int[] sdispls, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOffset, int[] recvCount, int[] rdispls, long recvType, int recvBaseType)2344 	private native void allToAllv(
2345 			long comm, Object sendBuf, boolean sdb, int sendOffset,
2346 			int[] sendCount, int[] sdispls, long sendType, int sendBaseType,
2347 			Object recvBuf, boolean rdb, int recvOffset,
2348 			int[] recvCount, int[] rdispls, long recvType, int recvBaseType)
2349 					throws MPIException;
2350 
2351 	/**
2352 	 * Adds flexibility to {@code allToAll}: location of data for send is
2353 	 * specified by {@code sdispls} and location to place data on receive
2354 	 * side is specified by {@code rdispls}.
2355 	 * <p>Java binding of the MPI operation {@code MPI_IALLTOALLV}.
2356 	 * @param sendbuf   send buffer
2357 	 * @param sendcount number of items sent to each buffer
2358 	 * @param sdispls   displacements from which to take outgoing data
2359 	 * @param sendtype  datatype send buffer items
2360 	 * @param recvbuf   receive buffer
2361 	 * @param recvcount number of elements received from each process
2362 	 * @param rdispls   displacements at which to place incoming data
2363 	 * @param recvtype  datatype of each item in receive buffer
2364 	 * @return communication request
2365 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2366 	 */
iAllToAllv( Buffer sendbuf, int[] sendcount, int[] sdispls, Datatype sendtype, Buffer recvbuf, int[] recvcount, int[] rdispls, Datatype recvtype)2367 	public final Request iAllToAllv(
2368 			Buffer sendbuf, int[] sendcount, int[] sdispls, Datatype sendtype,
2369 			Buffer recvbuf, int[] recvcount, int[] rdispls, Datatype recvtype)
2370 					throws MPIException
2371 	{
2372 		MPI.check();
2373 		assertDirectBuffer(sendbuf, recvbuf);
2374 		Request req = new Request(iAllToAllv(
2375 				handle, sendbuf, sendcount, sdispls, sendtype.handle,
2376 				recvbuf, recvcount, rdispls, recvtype.handle));
2377 		req.addSendBufRef(sendbuf);
2378 		req.addRecvBufRef(recvbuf);
2379 		return req;
2380 	}
2381 
iAllToAllv(long comm, Buffer sendbuf, int[] sendcount, int[] sdispls, long sendtype, Buffer recvbuf, int[] recvcount, int[] rdispls, long recvtype)2382 	private native long iAllToAllv(long comm,
2383 			Buffer sendbuf, int[] sendcount, int[] sdispls, long sendtype,
2384 			Buffer recvbuf, int[] recvcount, int[] rdispls, long recvtype)
2385 					throws MPIException;
2386 
2387 	/**
2388 	 * Adds flexibility to {@code allToAll}: location of data for send is  //here
2389 	 * specified by {@code sDispls} and location to place data on receive
2390 	 * side is specified by {@code rDispls}.
2391 	 * <p>Java binding of the MPI operation {@code MPI_ALLTOALLW}.
2392 	 * @param sendBuf   send buffer
2393 	 * @param sendCount number of items sent to each buffer
2394 	 * @param sDispls   displacements from which to take outgoing data
2395 	 * @param sendTypes datatypes of send buffer items
2396 	 * @param recvBuf   receive buffer
2397 	 * @param recvCount number of elements received from each process
2398 	 * @param rDispls   displacements at which to place incoming data
2399 	 * @param recvTypes datatype of each item in receive buffer
2400 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2401 	 */
allToAllw( Buffer sendBuf, int[] sendCount, int[] sDispls, Datatype[] sendTypes, Buffer recvBuf, int[] recvCount, int[] rDispls, Datatype[] recvTypes)2402 	public final void allToAllw(
2403 			Buffer sendBuf, int[] sendCount, int[] sDispls, Datatype[] sendTypes,
2404 			Buffer recvBuf, int[] recvCount, int[] rDispls, Datatype[] recvTypes)
2405 					throws MPIException
2406 	{
2407 		MPI.check();
2408 		assertDirectBuffer(sendBuf, recvBuf);
2409 
2410 		long[] sendHandles = convertTypeArray(sendTypes);
2411 		long[] recvHandles = convertTypeArray(recvTypes);
2412 
2413 		allToAllw(handle, sendBuf, sendCount, sDispls,
2414 				sendHandles, recvBuf, recvCount, rDispls,
2415 				recvHandles);
2416 	}
2417 
allToAllw(long comm, Buffer sendBuf, int[] sendCount, int[] sDispls, long[] sendTypes, Buffer recvBuf, int[] recvCount, int[] rDispls, long[] recvTypes)2418 	private native void allToAllw(long comm,
2419 			Buffer sendBuf, int[] sendCount, int[] sDispls, long[] sendTypes,
2420 			Buffer recvBuf, int[] recvCount, int[] rDispls, long[] recvTypes)
2421 					throws MPIException;
2422 
2423 	/**
2424 	 * Adds flexibility to {@code iAllToAll}: location of data for send is
2425 	 * specified by {@code sDispls} and location to place data on receive
2426 	 * side is specified by {@code rDispls}.
2427 	 * <p>Java binding of the MPI operation {@code MPI_IALLTOALLW}.
2428 	 * @param sendBuf   send buffer
2429 	 * @param sendCount number of items sent to each buffer
2430 	 * @param sDispls   displacements from which to take outgoing data
2431 	 * @param sendTypes datatype send buffer items
2432 	 * @param recvBuf   receive buffer
2433 	 * @param recvCount number of elements received from each process
2434 	 * @param rDispls   displacements at which to place incoming data
2435 	 * @param recvTypes datatype of each item in receive buffer
2436 	 * @return communication request
2437 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2438 	 */
iAllToAllw( Buffer sendBuf, int[] sendCount, int[] sDispls, Datatype[] sendTypes, Buffer recvBuf, int[] recvCount, int[] rDispls, Datatype[] recvTypes)2439 	public final Request iAllToAllw(
2440 			Buffer sendBuf, int[] sendCount, int[] sDispls, Datatype[] sendTypes,
2441 			Buffer recvBuf, int[] recvCount, int[] rDispls, Datatype[] recvTypes)
2442 					throws MPIException
2443 	{
2444 		MPI.check();
2445 		assertDirectBuffer(sendBuf, recvBuf);
2446 
2447 		long[] sendHandles = convertTypeArray(sendTypes);
2448 		long[] recvHandles = convertTypeArray(recvTypes);
2449 		Request req = new Request(iAllToAllw(
2450 				handle, sendBuf, sendCount, sDispls, sendHandles,
2451 				recvBuf, recvCount, rDispls, recvHandles));
2452 		req.addSendBufRef(sendBuf);
2453 		req.addRecvBufRef(recvBuf);
2454 		return req;
2455 	}
2456 
iAllToAllw(long comm, Buffer sendBuf, int[] sendCount, int[] sDispls, long[] sendTypes, Buffer recvBuf, int[] recvCount, int[] rDispls, long[] recvTypes)2457 	private native long iAllToAllw(long comm,
2458 			Buffer sendBuf, int[] sendCount, int[] sDispls, long[] sendTypes,
2459 			Buffer recvBuf, int[] recvCount, int[] rDispls, long[] recvTypes)
2460 					throws MPIException;
2461 
2462 	/**
2463 	 * Java binding of {@code MPI_NEIGHBOR_ALLGATHER}.
2464 	 * @param sendbuf   send buffer
2465 	 * @param sendcount number of items to send
2466 	 * @param sendtype  datatype of each item in send buffer
2467 	 * @param recvbuf   receive buffer
2468 	 * @param recvcount number of items to receive
2469 	 * @param recvtype  datatype of each item in receive buffer
2470 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2471 	 */
neighborAllGather( Object sendbuf, int sendcount, Datatype sendtype, Object recvbuf, int recvcount, Datatype recvtype)2472 	public final void neighborAllGather(
2473 			Object sendbuf, int sendcount, Datatype sendtype,
2474 			Object recvbuf, int recvcount, Datatype recvtype)
2475 					throws MPIException
2476 	{
2477 		MPI.check();
2478 
2479 		int sendoff = 0,
2480 				recvoff = 0;
2481 
2482 		boolean sdb = false,
2483 				rdb = false;
2484 
2485 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
2486 		{
2487 			sendoff = sendtype.getOffset(sendbuf);
2488 			sendbuf = ((Buffer)sendbuf).array();
2489 		}
2490 
2491 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
2492 		{
2493 			recvoff = recvtype.getOffset(recvbuf);
2494 			recvbuf = ((Buffer)recvbuf).array();
2495 		}
2496 
2497 		neighborAllGather(handle, sendbuf, sdb, sendoff, sendcount,
2498 				sendtype.handle, sendtype.baseType,
2499 				recvbuf, rdb, recvoff, recvcount,
2500 				recvtype.handle, recvtype.baseType);
2501 	}
2502 
neighborAllGather( long comm, Object sendBuf, boolean sdb, int sendOffset, int sendCount, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOffset, int recvCount, long recvType, int recvBaseType)2503 	private native void neighborAllGather(
2504 			long comm, Object sendBuf, boolean sdb, int sendOffset,
2505 			int sendCount, long sendType, int sendBaseType,
2506 			Object recvBuf, boolean rdb, int recvOffset,
2507 			int recvCount, long recvType, int recvBaseType)
2508 					throws MPIException;
2509 
2510 	/**
2511 	 * Java binding of {@code MPI_INEIGHBOR_ALLGATHER}.
2512 	 * @param sendbuf   send buffer
2513 	 * @param sendcount number of items to send
2514 	 * @param sendtype  datatype of each item in send buffer
2515 	 * @param recvbuf   receive buffer
2516 	 * @param recvcount number of items to receive
2517 	 * @param recvtype  datatype of each item in receive buffer
2518 	 * @return communication request
2519 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2520 	 */
iNeighborAllGather( Buffer sendbuf, int sendcount, Datatype sendtype, Buffer recvbuf, int recvcount, Datatype recvtype)2521 	public final Request iNeighborAllGather(
2522 			Buffer sendbuf, int sendcount, Datatype sendtype,
2523 			Buffer recvbuf, int recvcount, Datatype recvtype)
2524 					throws MPIException
2525 	{
2526 		MPI.check();
2527 		assertDirectBuffer(sendbuf, recvbuf);
2528 		Request req = new Request(iNeighborAllGather(
2529 				handle, sendbuf, sendcount, sendtype.handle,
2530 				recvbuf, recvcount, recvtype.handle));
2531 		req.addSendBufRef(sendbuf);
2532 		req.addRecvBufRef(recvbuf);
2533 		return req;
2534 	}
2535 
iNeighborAllGather( long comm, Buffer sendBuf, int sendCount, long sendType, Buffer recvBuf, int recvCount, long recvType)2536 	private native long iNeighborAllGather(
2537 			long comm, Buffer sendBuf, int sendCount, long sendType,
2538 			Buffer recvBuf, int recvCount, long recvType)
2539 					throws MPIException;
2540 
2541 	/**
2542 	 * Java binding of {@code MPI_NEIGHBOR_ALLGATHERV}.
2543 	 * @param sendbuf   send buffer
2544 	 * @param sendcount number of items to send
2545 	 * @param sendtype  datatype of each item in send buffer
2546 	 * @param recvbuf   receive buffer
2547 	 * @param recvcount number of elements that are received from each neighbor
2548 	 * @param displs    displacements at which to place incoming data
2549 	 * @param recvtype  datatype of receive buffer elements
2550 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2551 	 */
neighborAllGatherv( Object sendbuf, int sendcount, Datatype sendtype, Object recvbuf, int[] recvcount, int[] displs, Datatype recvtype)2552 	public final void neighborAllGatherv(
2553 			Object sendbuf, int sendcount, Datatype sendtype,
2554 			Object recvbuf, int[] recvcount, int[] displs, Datatype recvtype)
2555 					throws MPIException
2556 	{
2557 		MPI.check();
2558 
2559 		int sendoff = 0,
2560 				recvoff = 0;
2561 
2562 		boolean sdb = false,
2563 				rdb = false;
2564 
2565 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
2566 		{
2567 			sendoff = sendtype.getOffset(sendbuf);
2568 			sendbuf = ((Buffer)sendbuf).array();
2569 		}
2570 
2571 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
2572 		{
2573 			recvoff = recvtype.getOffset(recvbuf);
2574 			recvbuf = ((Buffer)recvbuf).array();
2575 		}
2576 
2577 		neighborAllGatherv(handle, sendbuf, sdb, sendoff, sendcount,
2578 				sendtype.handle, sendtype.baseType,
2579 				recvbuf, rdb, recvoff, recvcount, displs,
2580 				recvtype.handle, recvtype.baseType);
2581 	}
2582 
neighborAllGatherv( long comm, Object sendBuf, boolean sdb, int sendOff, int sendCount, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOff, int[] recvCount, int[] displs, long recvType, int recvBaseType)2583 	private native void neighborAllGatherv(
2584 			long comm, Object sendBuf, boolean sdb, int sendOff,
2585 			int sendCount, long sendType, int sendBaseType,
2586 			Object recvBuf, boolean rdb, int recvOff,
2587 			int[] recvCount, int[] displs, long recvType, int recvBaseType);
2588 
2589 	/**
2590 	 * Java binding of {@code MPI_INEIGHBOR_ALLGATHERV}.
2591 	 * @param sendbuf   send buffer
2592 	 * @param sendcount number of items to send
2593 	 * @param sendtype  datatype of each item in send buffer
2594 	 * @param recvbuf   receive buffer
2595 	 * @param recvcount number of elements that are received from each neighbor
2596 	 * @param displs    displacements at which to place incoming data
2597 	 * @param recvtype  datatype of receive buffer elements
2598 	 * @return communication request
2599 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2600 	 */
iNeighborAllGatherv( Buffer sendbuf, int sendcount, Datatype sendtype, Buffer recvbuf, int[] recvcount, int[] displs, Datatype recvtype)2601 	public final Request iNeighborAllGatherv(
2602 			Buffer sendbuf, int sendcount, Datatype sendtype,
2603 			Buffer recvbuf, int[] recvcount, int[] displs, Datatype recvtype)
2604 					throws MPIException
2605 	{
2606 		MPI.check();
2607 		assertDirectBuffer(sendbuf, recvbuf);
2608 		Request req = new Request(iNeighborAllGatherv(
2609 				handle, sendbuf, sendcount, sendtype.handle,
2610 				recvbuf, recvcount, displs, recvtype.handle));
2611 		req.addSendBufRef(sendbuf);
2612 		req.addRecvBufRef(recvbuf);
2613 		return req;
2614 	}
2615 
iNeighborAllGatherv( long comm, Buffer sendBuf, int sendCount, long sendType, Buffer recvBuf, int[] recvCount, int[] displs, long recvType)2616 	private native long iNeighborAllGatherv(
2617 			long comm, Buffer sendBuf, int sendCount, long sendType,
2618 			Buffer recvBuf, int[] recvCount, int[] displs, long recvType)
2619 					throws MPIException;
2620 
2621 	/**
2622 	 * Java binding of {@code MPI_NEIGHBOR_ALLTOALL}.
2623 	 * @param sendbuf   send buffer
2624 	 * @param sendcount number of items to send
2625 	 * @param sendtype  datatype of each item in send buffer
2626 	 * @param recvbuf   receive buffer
2627 	 * @param recvcount number of items to receive
2628 	 * @param recvtype  datatype of each item in receive buffer
2629 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2630 	 */
neighborAllToAll( Object sendbuf, int sendcount, Datatype sendtype, Object recvbuf, int recvcount, Datatype recvtype)2631 	public final void neighborAllToAll(
2632 			Object sendbuf, int sendcount, Datatype sendtype,
2633 			Object recvbuf, int recvcount, Datatype recvtype)
2634 					throws MPIException
2635 	{
2636 		MPI.check();
2637 
2638 		int sendoff = 0,
2639 				recvoff = 0;
2640 
2641 		boolean sdb = false,
2642 				rdb = false;
2643 
2644 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
2645 		{
2646 			sendoff = sendtype.getOffset(sendbuf);
2647 			sendbuf = ((Buffer)sendbuf).array();
2648 		}
2649 
2650 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
2651 		{
2652 			recvoff = recvtype.getOffset(recvbuf);
2653 			recvbuf = ((Buffer)recvbuf).array();
2654 		}
2655 
2656 		neighborAllToAll(handle, sendbuf, sdb, sendoff, sendcount,
2657 				sendtype.handle, sendtype.baseType,
2658 				recvbuf, rdb, recvoff, recvcount,
2659 				recvtype.handle, recvtype.baseType);
2660 	}
2661 
neighborAllToAll( long comm, Object sendBuf, boolean sdb, int sendOff, int sendCount, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOff, int recvCount, long recvType, int recvBaseType)2662 	private native void neighborAllToAll(
2663 			long comm, Object sendBuf, boolean sdb, int sendOff,
2664 			int sendCount, long sendType, int sendBaseType,
2665 			Object recvBuf, boolean rdb, int recvOff,
2666 			int recvCount, long recvType, int recvBaseType)
2667 					throws MPIException;
2668 
2669 	/**
2670 	 * Java binding of {@code MPI_INEIGHBOR_ALLTOALL}.
2671 	 * @param sendbuf   send buffer
2672 	 * @param sendcount number of items to send
2673 	 * @param sendtype  datatype of each item in send buffer
2674 	 * @param recvbuf   receive buffer
2675 	 * @param recvcount number of items to receive
2676 	 * @param recvtype  datatype of each item in receive buffer
2677 	 * @return communication request
2678 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2679 	 */
iNeighborAllToAll( Buffer sendbuf, int sendcount, Datatype sendtype, Buffer recvbuf, int recvcount, Datatype recvtype)2680 	public final Request iNeighborAllToAll(
2681 			Buffer sendbuf, int sendcount, Datatype sendtype,
2682 			Buffer recvbuf, int recvcount, Datatype recvtype)
2683 					throws MPIException
2684 	{
2685 		MPI.check();
2686 		assertDirectBuffer(sendbuf, recvbuf);
2687 		Request req = new Request(iNeighborAllToAll(
2688 				handle, sendbuf, sendcount, sendtype.handle,
2689 				recvbuf, recvcount, recvtype.handle));
2690 		req.addSendBufRef(sendbuf);
2691 		req.addRecvBufRef(recvbuf);
2692 		return req;
2693 	}
2694 
iNeighborAllToAll( long comm, Buffer sendBuf, int sendCount, long sendType, Buffer recvBuf, int recvCount, long recvType)2695 	private native long iNeighborAllToAll(
2696 			long comm, Buffer sendBuf, int sendCount, long sendType,
2697 			Buffer recvBuf, int recvCount, long recvType);
2698 
2699 	/**
2700 	 * Java binding of {@code MPI_NEIGHBOR_ALLTOALLV}.
2701 	 * @param sendbuf   send buffer
2702 	 * @param sendcount number of items sent to each buffer
2703 	 * @param sdispls   displacements from which to take outgoing data
2704 	 * @param sendtype  datatype send buffer items
2705 	 * @param recvbuf   receive buffer
2706 	 * @param recvcount number of elements received from each process
2707 	 * @param rdispls   displacements at which to place incoming data
2708 	 * @param recvtype  datatype of each item in receive buffer
2709 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2710 	 */
neighborAllToAllv( Object sendbuf, int[] sendcount, int[] sdispls, Datatype sendtype, Object recvbuf, int[] recvcount, int[] rdispls, Datatype recvtype)2711 	public final void neighborAllToAllv(
2712 			Object sendbuf, int[] sendcount, int[] sdispls, Datatype sendtype,
2713 			Object recvbuf, int[] recvcount, int[] rdispls, Datatype recvtype)
2714 					throws MPIException
2715 	{
2716 		MPI.check();
2717 
2718 		int sendoff = 0,
2719 				recvoff = 0;
2720 
2721 		boolean sdb = false,
2722 				rdb = false;
2723 
2724 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
2725 		{
2726 			sendoff = sendtype.getOffset(sendbuf);
2727 			sendbuf = ((Buffer)sendbuf).array();
2728 		}
2729 
2730 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
2731 		{
2732 			recvoff = recvtype.getOffset(recvbuf);
2733 			recvbuf = ((Buffer)recvbuf).array();
2734 		}
2735 
2736 		neighborAllToAllv(handle,
2737 				sendbuf, sdb, sendoff, sendcount, sdispls,
2738 				sendtype.handle, sendtype.baseType,
2739 				recvbuf, rdb, recvoff, recvcount, rdispls,
2740 				recvtype.handle, recvtype.baseType);
2741 	}
2742 
neighborAllToAllv( long comm, Object sendBuf, boolean sdb, int sendOff, int[] sendCount, int[] sdispls, long sendType, int sendBaseType, Object recvBuf, boolean rdb, int recvOff, int[] recvCount, int[] rdispls, long recvType, int recvBaseType)2743 	private native void neighborAllToAllv(
2744 			long comm, Object sendBuf, boolean sdb, int sendOff,
2745 			int[] sendCount, int[] sdispls, long sendType, int sendBaseType,
2746 			Object recvBuf, boolean rdb, int recvOff,
2747 			int[] recvCount, int[] rdispls, long recvType, int recvBaseType)
2748 					throws MPIException;
2749 
2750 	/**
2751 	 * Java binding of {@code MPI_INEIGHBOR_ALLTOALLV}.
2752 	 * @param sendbuf   send buffer
2753 	 * @param sendcount number of items sent to each buffer
2754 	 * @param sdispls   displacements from which to take outgoing data
2755 	 * @param sendtype  datatype send buffer items
2756 	 * @param recvbuf   receive buffer
2757 	 * @param recvcount number of elements received from each process
2758 	 * @param rdispls   displacements at which to place incoming data
2759 	 * @param recvtype  datatype of each item in receive buffer
2760 	 * @return communication request
2761 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2762 	 */
iNeighborAllToAllv( Buffer sendbuf, int[] sendcount, int[] sdispls, Datatype sendtype, Buffer recvbuf, int[] recvcount, int[] rdispls, Datatype recvtype)2763 	public final Request iNeighborAllToAllv(
2764 			Buffer sendbuf, int[] sendcount, int[] sdispls, Datatype sendtype,
2765 			Buffer recvbuf, int[] recvcount, int[] rdispls, Datatype recvtype)
2766 					throws MPIException
2767 	{
2768 		MPI.check();
2769 		assertDirectBuffer(sendbuf, recvbuf);
2770 		Request req = new Request(iNeighborAllToAllv(
2771 				handle, sendbuf, sendcount, sdispls, sendtype.handle,
2772 				recvbuf, recvcount, rdispls, recvtype.handle));
2773 		req.addSendBufRef(sendbuf);
2774 		req.addRecvBufRef(recvbuf);
2775 		return req;
2776 	}
2777 
iNeighborAllToAllv( long comm, Buffer sendBuf, int[] sendCount, int[] sdispls, long sType, Buffer recvBuf, int[] recvCount, int[] rdispls, long rType)2778 	private native long iNeighborAllToAllv(
2779 			long comm, Buffer sendBuf, int[] sendCount, int[] sdispls, long sType,
2780 			Buffer recvBuf, int[] recvCount, int[] rdispls, long rType)
2781 					throws MPIException;
2782 
2783 	/**
2784 	 * Combine elements in input buffer of each process using the reduce
2785 	 * operation, and return the combined value in the output buffer of the
2786 	 * root process.
2787 	 * <p>
2788 	 * Java binding of the MPI operation {@code MPI_REDUCE}.
2789 	 * <p>
2790 	 * The predefined operations are available in Java as {@code MPI.MAX},
2791 	 * {@code MPI.MIN}, {@code MPI.SUM}, {@code MPI.PROD}, {@code MPI.LAND},
2792 	 * {@code MPI.BAND}, {@code MPI.LOR}, {@code MPI.BOR}, {@code MPI.LXOR},
2793 	 * {@code MPI.BXOR}, {@code MPI.MINLOC} and {@code MPI.MAXLOC}.
2794 	 * @param sendbuf send buffer
2795 	 * @param recvbuf receive buffer
2796 	 * @param count   number of items in send buffer
2797 	 * @param type    data type of each item in send buffer
2798 	 * @param op      reduce operation
2799 	 * @param root    rank of root process
2800 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2801 	 */
reduce(Object sendbuf, Object recvbuf, int count, Datatype type, Op op, int root)2802 	public final void reduce(Object sendbuf, Object recvbuf, int count,
2803 			Datatype type, Op op, int root)
2804 					throws MPIException
2805 	{
2806 		MPI.check();
2807 		op.setDatatype(type);
2808 
2809 		int sendoff = 0,
2810 				recvoff = 0;
2811 
2812 		boolean sdb = false,
2813 				rdb = false;
2814 
2815 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
2816 		{
2817 			sendoff = type.getOffset(sendbuf);
2818 			sendbuf = ((Buffer)sendbuf).array();
2819 		}
2820 
2821 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
2822 		{
2823 			recvoff = type.getOffset(recvbuf);
2824 			recvbuf = ((Buffer)recvbuf).array();
2825 		}
2826 
2827 		reduce(handle, sendbuf, sdb, sendoff, recvbuf, rdb, recvoff,
2828 				count, type.handle, type.baseType, op, op.handle, root);
2829 	}
2830 
2831 	/**
2832 	 * Combine elements in input buffer of each process using the reduce
2833 	 * operation, and return the combined value in the output buffer of the
2834 	 * root process.
2835 	 * <p>Java binding of the MPI operation {@code MPI_REDUCE}
2836 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
2837 	 * @param buf   send/receive buffer
2838 	 * @param count number of items in buffer
2839 	 * @param type  data type of each item in buffer
2840 	 * @param op    reduce operation
2841 	 * @param root  rank of root process
2842 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2843 	 */
reduce(Object buf, int count, Datatype type, Op op, int root)2844 	public final void reduce(Object buf, int count, Datatype type, Op op, int root)
2845 			throws MPIException
2846 	{
2847 		MPI.check();
2848 		op.setDatatype(type);
2849 		int off = 0;
2850 		boolean db = false;
2851 
2852 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
2853 		{
2854 			off = type.getOffset(buf);
2855 			buf = ((Buffer)buf).array();
2856 		}
2857 
2858 		reduce(handle, null, false, 0, buf, db, off, count,
2859 				type.handle, type.baseType, op, op.handle, root);
2860 	}
2861 
reduce( long comm, Object sendbuf, boolean sdb, int sendoff, Object recvbuf, boolean rdb, int recvoff, int count, long type, int baseType, Op jOp, long hOp, int root)2862 	private native void reduce(
2863 			long comm, Object sendbuf, boolean sdb, int sendoff,
2864 			Object recvbuf, boolean rdb, int recvoff, int count,
2865 			long type, int baseType, Op jOp, long hOp, int root)
2866 					throws MPIException;
2867 
2868 	/**
2869 	 * Combine elements in input buffer of each process using the reduce
2870 	 * operation, and return the combined value in the output buffer of the
2871 	 * root process.
2872 	 * <p>Java binding of the MPI operation {@code MPI_IREDUCE}.
2873 	 * @param sendbuf send buffer
2874 	 * @param recvbuf receive buffer
2875 	 * @param count   number of items in send buffer
2876 	 * @param type    data type of each item in send buffer
2877 	 * @param op      reduce operation
2878 	 * @param root    rank of root process
2879 	 * @return communication request
2880 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2881 	 */
iReduce(Buffer sendbuf, Buffer recvbuf, int count, Datatype type, Op op, int root)2882 	public final Request iReduce(Buffer sendbuf, Buffer recvbuf,
2883 			int count, Datatype type, Op op, int root)
2884 					throws MPIException
2885 	{
2886 		MPI.check();
2887 		assertDirectBuffer(sendbuf, recvbuf);
2888 		op.setDatatype(type);
2889 		Request req = new Request(iReduce(
2890 				handle, sendbuf, recvbuf, count,
2891 				type.handle, type.baseType, op, op.handle, root));
2892 		req.addSendBufRef(sendbuf);
2893 		req.addRecvBufRef(recvbuf);
2894 		return req;
2895 	}
2896 
2897 	/**
2898 	 * Combine elements in input buffer of each process using the reduce
2899 	 * operation, and return the combined value in the output buffer of the
2900 	 * root process.
2901 	 * <p>Java binding of the MPI operation {@code MPI_IREDUCE}
2902 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
2903 	 * @param buf   send/receive buffer
2904 	 * @param count number of items in buffer
2905 	 * @param type  data type of each item in buffer
2906 	 * @param op    reduce operation
2907 	 * @param root  rank of root process
2908 	 * @return communication request
2909 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2910 	 */
iReduce(Buffer buf, int count, Datatype type, Op op, int root)2911 	public final Request iReduce(Buffer buf, int count,
2912 			Datatype type, Op op, int root)
2913 					throws MPIException
2914 	{
2915 		MPI.check();
2916 		assertDirectBuffer(buf);
2917 		op.setDatatype(type);
2918 		Request req = new Request(iReduce(
2919 				handle, null, buf, count,
2920 				type.handle, type.baseType, op, op.handle, root));
2921 		req.addSendBufRef(buf);
2922 		return req;
2923 	}
2924 
iReduce( long comm, Buffer sendbuf, Buffer recvbuf, int count, long type, int baseType, Op jOp, long hOp, int root)2925 	private native long iReduce(
2926 			long comm, Buffer sendbuf, Buffer recvbuf, int count,
2927 			long type, int baseType, Op jOp, long hOp, int root)
2928 					throws MPIException;
2929 
2930 	/**
2931 	 * Same as {@code reduce} except that the result appears in receive
2932 	 * buffer of all process in the group.
2933 	 * <p>Java binding of the MPI operation {@code MPI_ALLREDUCE}.
2934 	 * @param sendbuf send buffer
2935 	 * @param recvbuf receive buffer
2936 	 * @param count   number of items in send buffer
2937 	 * @param type    data type of each item in send buffer
2938 	 * @param op      reduce operation
2939 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2940 	 */
allReduce(Object sendbuf, Object recvbuf, int count, Datatype type, Op op)2941 	public final void allReduce(Object sendbuf, Object recvbuf,
2942 			int count, Datatype type, Op op)
2943 					throws MPIException
2944 	{
2945 		MPI.check();
2946 		op.setDatatype(type);
2947 
2948 		int sendoff = 0,
2949 				recvoff = 0;
2950 
2951 		boolean sdb = false,
2952 				rdb = false;
2953 
2954 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
2955 		{
2956 			sendoff = type.getOffset(sendbuf);
2957 			sendbuf = ((Buffer)sendbuf).array();
2958 		}
2959 
2960 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
2961 		{
2962 			recvoff = type.getOffset(recvbuf);
2963 			recvbuf = ((Buffer)recvbuf).array();
2964 		}
2965 
2966 		allReduce(handle, sendbuf, sdb, sendoff, recvbuf, rdb, recvoff,
2967 				count, type.handle, type.baseType, op, op.handle);
2968 	}
2969 
2970 	/**
2971 	 * Same as {@code reduce} except that the result appears in receive
2972 	 * buffer of all process in the group.
2973 	 * <p>Java binding of the MPI operation {@code MPI_ALLREDUCE}
2974 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
2975 	 * @param buf   receive buffer
2976 	 * @param count number of items in send buffer
2977 	 * @param type  data type of each item in send buffer
2978 	 * @param op    reduce operation
2979 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
2980 	 */
allReduce(Object buf, int count, Datatype type, Op op)2981 	public final void allReduce(Object buf, int count, Datatype type, Op op)
2982 			throws MPIException
2983 	{
2984 		MPI.check();
2985 		op.setDatatype(type);
2986 		int off = 0;
2987 		boolean db = false;
2988 
2989 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
2990 		{
2991 			off = type.getOffset(buf);
2992 			buf = ((Buffer)buf).array();
2993 		}
2994 
2995 		allReduce(handle, null, false, 0, buf, db, off, count,
2996 				type.handle, type.baseType, op, op.handle);
2997 	}
2998 
allReduce( long comm, Object sendbuf, boolean sdb, int sendoff, Object recvbuf, boolean rdb, int recvoff, int count, long type, int baseType, Op jOp, long hOp)2999 	private native void allReduce(
3000 			long comm, Object sendbuf, boolean sdb, int sendoff,
3001 			Object recvbuf, boolean rdb, int recvoff, int count,
3002 			long type, int baseType, Op jOp, long hOp) throws MPIException;
3003 
3004 	/**
3005 	 * Same as {@code reduce} except that the result appears in receive
3006 	 * buffer of all process in the group.
3007 	 * <p>Java binding of the MPI operation {@code MPI_IALLREDUCE}.
3008 	 * @param sendbuf send buffer
3009 	 * @param recvbuf receive buffer
3010 	 * @param count   number of items in send buffer
3011 	 * @param type    data type of each item in send buffer
3012 	 * @param op      reduce operation
3013 	 * @return communication request
3014 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3015 	 */
iAllReduce(Buffer sendbuf, Buffer recvbuf, int count, Datatype type, Op op)3016 	public final Request iAllReduce(Buffer sendbuf, Buffer recvbuf,
3017 			int count, Datatype type, Op op)
3018 					throws MPIException
3019 	{
3020 		MPI.check();
3021 		assertDirectBuffer(sendbuf, recvbuf);
3022 		op.setDatatype(type);
3023 		Request req = new Request(iAllReduce(handle, sendbuf, recvbuf, count,
3024 				type.handle, type.baseType, op, op.handle));
3025 		req.addSendBufRef(sendbuf);
3026 		req.addRecvBufRef(recvbuf);
3027 		return req;
3028 	}
3029 
3030 	/**
3031 	 * Same as {@code reduce} except that the result appears in receive
3032 	 * buffer of all process in the group.
3033 	 * <p>Java binding of the MPI operation {@code MPI_IALLREDUCE}
3034 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
3035 	 * @param buf   receive buffer
3036 	 * @param count number of items in send buffer
3037 	 * @param type  data type of each item in send buffer
3038 	 * @param op    reduce operation
3039 	 * @return communication request
3040 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3041 	 */
iAllReduce(Buffer buf, int count, Datatype type, Op op)3042 	public final Request iAllReduce(Buffer buf, int count, Datatype type, Op op)
3043 			throws MPIException
3044 	{
3045 		MPI.check();
3046 		op.setDatatype(type);
3047 		assertDirectBuffer(buf);
3048 		Request req = new Request(iAllReduce(
3049 				handle, null, buf, count,
3050 				type.handle, type.baseType, op, op.handle));
3051 		req.addRecvBufRef(buf);
3052 		return req;
3053 	}
3054 
iAllReduce( long comm, Buffer sendbuf, Buffer recvbuf, int count, long type, int baseType, Op jOp, long hOp)3055 	private native long iAllReduce(
3056 			long comm, Buffer sendbuf, Buffer recvbuf, int count,
3057 			long type, int baseType, Op jOp, long hOp) throws MPIException;
3058 
3059 	/**
3060 	 * Combine elements in input buffer of each process using the reduce
3061 	 * operation, and scatter the combined values over the output buffers
3062 	 * of the processes.
3063 	 * <p>Java binding of the MPI operation {@code MPI_REDUCE_SCATTER}.
3064 	 * @param sendbuf    send buffer
3065 	 * @param recvbuf    receive buffer
3066 	 * @param recvcounts numbers of result elements distributed to each process
3067 	 * @param type       data type of each item in send buffer
3068 	 * @param op         reduce operation
3069 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3070 	 */
reduceScatter(Object sendbuf, Object recvbuf, int[] recvcounts, Datatype type, Op op)3071 	public final void reduceScatter(Object sendbuf, Object recvbuf,
3072 			int[] recvcounts, Datatype type, Op op)
3073 					throws MPIException
3074 	{
3075 		MPI.check();
3076 		op.setDatatype(type);
3077 
3078 		int sendoff = 0,
3079 				recvoff = 0;
3080 
3081 		boolean sdb = false,
3082 				rdb = false;
3083 
3084 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
3085 		{
3086 			sendoff = type.getOffset(sendbuf);
3087 			sendbuf = ((Buffer)sendbuf).array();
3088 		}
3089 
3090 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
3091 		{
3092 			recvoff = type.getOffset(recvbuf);
3093 			recvbuf = ((Buffer)recvbuf).array();
3094 		}
3095 
3096 		reduceScatter(handle, sendbuf, sdb, sendoff, recvbuf, rdb, recvoff,
3097 				recvcounts, type.handle, type.baseType, op, op.handle);
3098 	}
3099 
3100 	/**
3101 	 * Combine elements in input buffer of each process using the reduce
3102 	 * operation, and scatter the combined values over the output buffers
3103 	 * of the processes.
3104 	 * <p>Java binding of the MPI operation {@code MPI_REDUCE_SCATTER}
3105 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
3106 	 * @param buf    receive buffer
3107 	 * @param counts numbers of result elements distributed to each process
3108 	 * @param type   data type of each item in send buffer
3109 	 * @param op     reduce operation
3110 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3111 	 */
reduceScatter(Object buf, int[] counts, Datatype type, Op op)3112 	public final void reduceScatter(Object buf, int[] counts, Datatype type, Op op)
3113 			throws MPIException
3114 	{
3115 		MPI.check();
3116 		op.setDatatype(type);
3117 		int off = 0;
3118 		boolean db = false;
3119 
3120 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
3121 		{
3122 			off = type.getOffset(buf);
3123 			buf = ((Buffer)buf).array();
3124 		}
3125 
3126 		reduceScatter(handle, null, false, 0, buf, db, off, counts,
3127 				type.handle, type.baseType, op, op.handle);
3128 	}
3129 
reduceScatter( long comm, Object sendbuf, boolean sdb, int sendoff, Object recvbuf, boolean rdb, int recvoff, int[] recvcounts, long type, int baseType, Op jOp, long hOp)3130 	private native void reduceScatter(
3131 			long comm, Object sendbuf, boolean sdb, int sendoff,
3132 			Object recvbuf, boolean rdb, int recvoff, int[] recvcounts,
3133 			long type, int baseType, Op jOp, long hOp) throws MPIException;
3134 
3135 	/**
3136 	 * Combine elements in input buffer of each process using the reduce
3137 	 * operation, and scatter the combined values over the output buffers
3138 	 * of the processes.
3139 	 * <p>Java binding of the MPI operation {@code MPI_IREDUCE_SCATTER}.
3140 	 * @param sendbuf    send buffer
3141 	 * @param recvbuf    receive buffer
3142 	 * @param recvcounts numbers of result elements distributed to each process
3143 	 * @param type       data type of each item in send buffer
3144 	 * @param op         reduce operation
3145 	 * @return communication request
3146 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3147 	 */
iReduceScatter(Buffer sendbuf, Buffer recvbuf, int[] recvcounts, Datatype type, Op op)3148 	public final Request iReduceScatter(Buffer sendbuf, Buffer recvbuf,
3149 			int[] recvcounts, Datatype type, Op op)
3150 					throws MPIException
3151 	{
3152 		MPI.check();
3153 		op.setDatatype(type);
3154 		assertDirectBuffer(sendbuf, recvbuf);
3155 		Request req = new Request(iReduceScatter(
3156 				handle, sendbuf, recvbuf, recvcounts,
3157 				type.handle, type.baseType, op, op.handle));
3158 		req.addSendBufRef(sendbuf);
3159 		req.addRecvBufRef(recvbuf);
3160 		return req;
3161 	}
3162 
3163 	/**
3164 	 * Combine elements in input buffer of each process using the reduce
3165 	 * operation, and scatter the combined values over the output buffers
3166 	 * of the processes.
3167 	 * <p>Java binding of the MPI operation {@code MPI_IREDUCE_SCATTER}
3168 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
3169 	 * @param buf    receive buffer
3170 	 * @param counts numbers of result elements distributed to each process
3171 	 * @param type   data type of each item in send buffer
3172 	 * @param op     reduce operation
3173 	 * @return communication request
3174 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3175 	 */
iReduceScatter( Buffer buf, int[] counts, Datatype type, Op op)3176 	public final Request iReduceScatter(
3177 			Buffer buf, int[] counts, Datatype type, Op op)
3178 					throws MPIException
3179 	{
3180 		MPI.check();
3181 		op.setDatatype(type);
3182 		assertDirectBuffer(buf);
3183 		Request req = new Request(iReduceScatter(
3184 				handle, null, buf, counts,
3185 				type.handle, type.baseType, op, op.handle));
3186 		req.addRecvBufRef(buf);
3187 		return req;
3188 	}
3189 
iReduceScatter( long handle, Buffer sendbuf, Object recvbuf, int[] recvcounts, long type, int baseType, Op jOp, long hOp)3190 	private native long iReduceScatter(
3191 			long handle, Buffer sendbuf, Object recvbuf, int[] recvcounts,
3192 			long type, int baseType, Op jOp, long hOp) throws MPIException;
3193 
3194 	/**
3195 	 * Combine values and scatter the results.
3196 	 * <p>Java binding of the MPI operation {@code MPI_REDUCE_SCATTER_BLOCK}.
3197 	 * @param sendbuf   send buffer
3198 	 * @param recvbuf   receive buffer
3199 	 * @param recvcount element count per block
3200 	 * @param type      data type of each item in send buffer
3201 	 * @param op        reduce operation
3202 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3203 	 */
reduceScatterBlock(Object sendbuf, Object recvbuf, int recvcount, Datatype type, Op op)3204 	public final void reduceScatterBlock(Object sendbuf, Object recvbuf,
3205 			int recvcount, Datatype type, Op op)
3206 					throws MPIException
3207 	{
3208 		MPI.check();
3209 		op.setDatatype(type);
3210 
3211 		int sendoff = 0,
3212 				recvoff = 0;
3213 
3214 		boolean sdb = false,
3215 				rdb = false;
3216 
3217 		if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
3218 		{
3219 			sendoff = type.getOffset(sendbuf);
3220 			sendbuf = ((Buffer)sendbuf).array();
3221 		}
3222 
3223 		if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
3224 		{
3225 			recvoff = type.getOffset(recvbuf);
3226 			recvbuf = ((Buffer)recvbuf).array();
3227 		}
3228 
3229 		reduceScatterBlock(handle, sendbuf, sdb, sendoff, recvbuf, rdb, recvoff,
3230 				recvcount, type.handle, type.baseType, op, op.handle);
3231 	}
3232 
3233 	/**
3234 	 * Combine values and scatter the results.
3235 	 * <p>Java binding of the MPI operation {@code MPI_REDUCE_SCATTER_BLOCK}
3236 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
3237 	 * @param buf   receive buffer
3238 	 * @param count element count per block
3239 	 * @param type  data type of each item in send buffer
3240 	 * @param op    reduce operation
3241 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3242 	 */
reduceScatterBlock( Object buf, int count, Datatype type, Op op)3243 	public final void reduceScatterBlock(
3244 			Object buf, int count, Datatype type, Op op)
3245 					throws MPIException
3246 	{
3247 		MPI.check();
3248 		op.setDatatype(type);
3249 		int off = 0;
3250 		boolean db = false;
3251 
3252 		if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
3253 		{
3254 			off = type.getOffset(buf);
3255 			buf = ((Buffer)buf).array();
3256 		}
3257 
3258 		reduceScatterBlock(handle, null, false, 0, buf, db, off, count,
3259 				type.handle, type.baseType, op, op.handle);
3260 	}
3261 
reduceScatterBlock( long comm, Object sendBuf, boolean sdb, int sOffset, Object recvBuf, boolean rdb, int rOffset, int rCount, long type, int baseType, Op jOp, long hOp)3262 	private native void reduceScatterBlock(
3263 			long comm, Object sendBuf, boolean sdb, int sOffset,
3264 			Object recvBuf, boolean rdb, int rOffset, int rCount,
3265 			long type, int baseType, Op jOp, long hOp) throws MPIException;
3266 
3267 	/**
3268 	 * Combine values and scatter the results.
3269 	 * <p>Java binding of the MPI operation {@code MPI_IREDUCE_SCATTER_BLOCK}.
3270 	 * @param sendbuf   send buffer
3271 	 * @param recvbuf   receive buffer
3272 	 * @param recvcount element count per block
3273 	 * @param type      data type of each item in send buffer
3274 	 * @param op        reduce operation
3275 	 * @return communication request
3276 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3277 	 */
iReduceScatterBlock( Buffer sendbuf, Buffer recvbuf, int recvcount, Datatype type, Op op)3278 	public final Request iReduceScatterBlock(
3279 			Buffer sendbuf, Buffer recvbuf, int recvcount, Datatype type, Op op)
3280 					throws MPIException
3281 	{
3282 		MPI.check();
3283 		op.setDatatype(type);
3284 		assertDirectBuffer(sendbuf, recvbuf);
3285 		Request req = new Request(iReduceScatterBlock(
3286 				handle, sendbuf, recvbuf, recvcount,
3287 				type.handle, type.baseType, op, op.handle));
3288 		req.addSendBufRef(sendbuf);
3289 		req.addRecvBufRef(recvbuf);
3290 		return req;
3291 	}
3292 
3293 	/**
3294 	 * Combine values and scatter the results.
3295 	 * <p>Java binding of the MPI operation {@code MPI_IREDUCE_SCATTER_BLOCK}
3296 	 * using {@code MPI_IN_PLACE} instead of the send buffer.
3297 	 * @param buf   receive buffer
3298 	 * @param count element count per block
3299 	 * @param type  data type of each item in send buffer
3300 	 * @param op    reduce operation
3301 	 * @return communication request
3302 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3303 	 */
iReduceScatterBlock( Buffer buf, int count, Datatype type, Op op)3304 	public final Request iReduceScatterBlock(
3305 			Buffer buf, int count, Datatype type, Op op)
3306 					throws MPIException
3307 	{
3308 		MPI.check();
3309 		op.setDatatype(type);
3310 		assertDirectBuffer(buf);
3311 		Request req = new Request(iReduceScatterBlock(
3312 				handle, null, buf, count, type.handle,
3313 				type.baseType, op, op.handle));
3314 		req.addRecvBufRef(buf);
3315 		return req;
3316 	}
3317 
iReduceScatterBlock( long handle, Buffer sendbuf, Buffer recvbuf, int recvcount, long type, int baseType, Op jOp, long hOp)3318 	private native long iReduceScatterBlock(
3319 			long handle, Buffer sendbuf, Buffer recvbuf, int recvcount,
3320 			long type, int baseType, Op jOp, long hOp) throws MPIException;
3321 
3322 	/**
3323 	 * Apply the operation given by {@code op} element-wise to the
3324 	 * elements of {@code inBuf} and {@code inOutBuf} with the result
3325 	 * stored element-wise in {@code inOutBuf}.
3326 	 * <p>Java binding of the MPI operation {@code MPI_REDUCE_LOCAL}.
3327 	 * @param inBuf    input buffer
3328 	 * @param inOutBuf input buffer, will contain combined output
3329 	 * @param count    number of elements
3330 	 * @param type     data type of each item
3331 	 * @param op       reduce operation
3332 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3333 	 */
reduceLocal( Object inBuf, Object inOutBuf, int count, Datatype type, Op op)3334 	public static void reduceLocal(
3335 			Object inBuf, Object inOutBuf, int count, Datatype type, Op op)
3336 					throws MPIException
3337 	{
3338 		MPI.check();
3339 		op.setDatatype(type);
3340 
3341 		int inOff    = 0,
3342 				inOutOff = 0;
3343 
3344 		boolean idb  = false,
3345 				iodb = false;
3346 
3347 		if(inBuf instanceof Buffer && !(idb = ((Buffer)inBuf).isDirect()))
3348 		{
3349 			inOff = type.getOffset(inBuf);
3350 			inBuf = ((Buffer)inBuf).array();
3351 		}
3352 
3353 		if(inOutBuf instanceof Buffer && !(iodb = ((Buffer)inOutBuf).isDirect()))
3354 		{
3355 			inOutOff = type.getOffset(inOutBuf);
3356 			inOutBuf = ((Buffer)inOutBuf).array();
3357 		}
3358 
3359 		if(op.uf == null)
3360 		{
3361 			reduceLocal(inBuf, idb, inOff, inOutBuf, iodb, inOutOff,
3362 					count, type.handle, op.handle);
3363 		}
3364 		else
3365 		{
3366 			reduceLocalUf(inBuf, idb, inOff, inOutBuf, iodb, inOutOff,
3367 					count, type.handle, type.baseType, op, op.handle);
3368 		}
3369 	}
3370 
reduceLocal( Object inBuf, boolean idb, int inOff, Object inOutBuf, boolean iodb, int inOutOff, int count, long type, long op)3371 	private static native void reduceLocal(
3372 			Object inBuf, boolean idb, int inOff,
3373 			Object inOutBuf, boolean iodb, int inOutOff, int count,
3374 			long type, long op) throws MPIException;
3375 
reduceLocalUf( Object inBuf, boolean idb, int inOff, Object inOutBuf, boolean iodb, int inOutOff, int count, long type, int baseType, Op jOp, long hOp)3376 	private static native void reduceLocalUf(
3377 			Object inBuf, boolean idb, int inOff,
3378 			Object inOutBuf, boolean iodb, int inOutOff, int count,
3379 			long type, int baseType, Op jOp, long hOp) throws MPIException;
3380 
3381 	/**
3382 	 * Sets the print name for the communicator.
3383 	 * @param name name for the communicator
3384 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3385 	 */
setName(String name)3386 	public final void setName(String name) throws MPIException
3387 	{
3388 		MPI.check();
3389 		setName(handle, name);
3390 	}
3391 
setName(long handle, String name)3392 	private native void setName(long handle, String name) throws MPIException;
3393 
3394 	/**
3395 	 * Return the print name from the communicator.
3396 	 * @return name of the communicator
3397 	 * @throws MPIException Signals that an MPI exception of some sort has occurred.
3398 	 */
getName()3399 	public final String getName() throws MPIException
3400 	{
3401 		MPI.check();
3402 		return getName(handle);
3403 	}
3404 
getName(long handle)3405 	private native String getName(long handle) throws MPIException;
3406 
3407 	/**
3408 	 * A helper method to convert an array of Datatypes to
3409 	 * an array of longs (handles).
3410 	 * @param dArray	Array of Datatypes
3411 	 * @return converted Datatypes
3412 	 */
convertTypeArray(Datatype[] dArray)3413 	private long[] convertTypeArray(Datatype[] dArray) {
3414 		long[] lArray = new long[dArray.length];
3415 
3416 		for(int i = 0; i < lArray.length; i++) {
3417 			if(dArray[i] != null) {
3418 				lArray[i] = dArray[i].handle;
3419 			}
3420 		}
3421 		return lArray;
3422 	}
3423 
3424 } // Comm
3425