1 /*
2 * mpi_manager.h
3 *
4 * This file is part of NEST.
5 *
6 * Copyright (C) 2004 The NEST Initiative
7 *
8 * NEST is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * NEST is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with NEST. If not, see <http://www.gnu.org/licenses/>.
20 *
21 */
22
23 #ifndef MPI_MANAGER_H
24 #define MPI_MANAGER_H
25
26 // Generated includes:
27 #include "config.h"
28
29 // C includes:
30 #include <unistd.h>
31 #ifdef HAVE_MPI
32 #include <mpi.h>
33 #endif
34
35 // C++ includes:
36 #include <cassert>
37 #include <cmath>
38 #include <iostream>
39 #include <limits>
40 #include <numeric>
41 #include <vector>
42
43 // Includes from libnestutil:
44 #include "manager_interface.h"
45
46 // Includes from nestkernel:
47 #include "nest_types.h"
48 #include "spike_data.h"
49 #include "target_data.h"
50
51 // Includes from sli:
52 #include "dictdatum.h"
53
54 namespace nest
55 {
56
57 class MPIManager : public ManagerInterface
58 {
59 public:
60 // forward declaration of internal classes
61 class OffGridSpike;
62
63 MPIManager();
~MPIManager()64 ~MPIManager()
65 {
66 }
67
68 virtual void initialize();
69 virtual void finalize();
70
71 virtual void set_status( const DictionaryDatum& );
72 virtual void get_status( DictionaryDatum& );
73
74 void init_mpi( int* argc, char** argv[] );
75 #ifdef HAVE_MPI
76 void set_communicator( MPI_Comm );
77
78 MPI_Comm
get_communicator()79 get_communicator()
80 {
81 return comm;
82 };
83 #endif
84
85 /**
86 * Return the number of processes used during simulation.
87 * This functions returns the number of processes.
88 * Since each process has the same number of threads, the total number
89 * of threads is given by get_num_threads()*get_num_processes().
90 */
91 thread get_num_processes() const;
92
93 /**
94 * Set the number of processes state variable.
95 * This is used by dryrun_mode.
96 */
97 void set_num_processes( thread n_procs );
98
99 /**
100 * Get rank of MPI process
101 */
102 thread get_rank() const;
103
104 /**
105 * Return the process id for a given virtual process. The real process' id
106 * of a virtual process is defined by the relation: p = (vp mod P), where
107 * P is the total number of processes.
108 */
109 thread get_process_id_of_vp( const thread vp ) const;
110
111 /*
112 * Return the process id of the node with the specified node ID.
113 */
114 thread get_process_id_of_node_id( const index node_id ) const;
115
116 /**
117 * Finalize MPI communication (needs to be separate from MPIManager::finalize
118 * when compiled with MUSIC since spikes can arrive and handlers called here)
119 */
120 void mpi_finalize( int exitcode );
121
122 /**
123 * If MPI is available, this method calls MPI_Abort with the exitcode.
124 */
125 void mpi_abort( int exitcode );
126
127 /*
128 * gather all send_buffer vectors on other mpi process to recv_buffer
129 * vector
130 */
131 void communicate( std::vector< long >& send_buffer, std::vector< long >& recv_buffer );
132
133 void communicate( std::vector< unsigned int >& send_buffer,
134 std::vector< unsigned int >& recv_buffer,
135 std::vector< int >& displacements );
136
137 void communicate( std::vector< OffGridSpike >& send_buffer,
138 std::vector< OffGridSpike >& recv_buffer,
139 std::vector< int >& displacements );
140
141 void communicate( std::vector< double >& send_buffer,
142 std::vector< double >& recv_buffer,
143 std::vector< int >& displacements );
144
145 void communicate( std::vector< unsigned long >& send_buffer,
146 std::vector< unsigned long >& recv_buffer,
147 std::vector< int >& displacements );
148
149 void
150 communicate( std::vector< int >& send_buffer, std::vector< int >& recv_buffer, std::vector< int >& displacements );
151
152 void communicate( double, std::vector< double >& );
153 void communicate( std::vector< int >& );
154 void communicate( std::vector< long >& );
155
156 /*
157 * Sum across all rank
158 */
159 void communicate_Allreduce_sum_in_place( double buffer );
160 void communicate_Allreduce_sum_in_place( std::vector< double >& buffer );
161 void communicate_Allreduce_sum_in_place( std::vector< int >& buffer );
162 void communicate_Allreduce_sum( std::vector< double >& send_buffer, std::vector< double >& recv_buffer );
163
164 /**
165 * Minimum across all ranks.
166 *
167 * @param value value on calling rank
168 * @return minimum value across all ranks
169 */
170 double min_cross_ranks( double value );
171
172 /**
173 * Maximum across all ranks.
174 *
175 * @param value value on calling rank
176 * @return maximum value across all ranks
177 */
178 double max_cross_ranks( double value );
179
180
181 std::string get_processor_name();
182
183 bool is_mpi_used();
184
185 /**
186 * Returns total size of MPI buffer for communication of connections.
187 */
188 size_t get_buffer_size_target_data() const;
189
190 /**
191 * Returns size of MPI buffer for connections divided by number of processes.
192 */
193 unsigned int get_send_recv_count_target_data_per_rank() const;
194
195 /**
196 * Returns total size of MPI buffer for communication of spikes.
197 */
198 size_t get_buffer_size_spike_data() const;
199
200 /**
201 * Returns size of MPI buffer for spikes divided by number of processes.
202 */
203 unsigned int get_send_recv_count_spike_data_per_rank() const;
204
205 /**
206 * Returns total size of MPI send buffer for communication of secondary events.
207 */
208 size_t get_send_buffer_size_secondary_events_in_int() const;
209
210 /**
211 * Returns total size of MPI recv buffer for communication of secondary events.
212 */
213 size_t get_recv_buffer_size_secondary_events_in_int() const;
214
215 #ifdef HAVE_MPI
216
217 void communicate_Alltoall_( void* send_buffer, void* recv_buffer, const unsigned int send_recv_count );
218
219 void communicate_Alltoallv_( void* send_buffer,
220 const int* send_counts,
221 const int* send_displacements,
222 void* recv_buffer,
223 const int* recv_counts,
224 const int* recv_displacements );
225
226 #endif // HAVE_MPI
227
228 template < class D >
229 void communicate_Alltoall( std::vector< D >& send_buffer,
230 std::vector< D >& recv_buffer,
231 const unsigned int send_recv_count );
232 template < class D >
233 void communicate_target_data_Alltoall( std::vector< D >& send_buffer, std::vector< D >& recv_buffer );
234 template < class D >
235 void communicate_spike_data_Alltoall( std::vector< D >& send_buffer, std::vector< D >& recv_buffer );
236 template < class D >
237 void communicate_off_grid_spike_data_Alltoall( std::vector< D >& send_buffer, std::vector< D >& recv_buffer );
238 template < class D >
239 void communicate_secondary_events_Alltoallv( std::vector< D >& send_buffer, std::vector< D >& recv_buffer );
240
241 void synchronize();
242
243 bool any_true( const bool );
244
245 /**
246 * Benchmark communication time of different MPI methods
247 *
248 * The methods `time_communicate*` can be used to benchmark the timing
249 * of different MPI communication methods.
250 */
251 double time_communicate( int num_bytes, int samples = 1000 );
252 double time_communicatev( int num_bytes, int samples = 1000 );
253 double time_communicate_offgrid( int num_bytes, int samples = 1000 );
254 double time_communicate_alltoall( int num_bytes, int samples = 1000 );
255 double time_communicate_alltoallv( int num_bytes, int samples = 1000 );
256
257 void set_buffer_size_target_data( size_t buffer_size );
258 void set_buffer_size_spike_data( size_t buffer_size );
259
260 /**
261 * Increases the size of the MPI buffer for communication of connections if it
262 * needs to be increased. Returns whether the size was changed.
263 */
264 bool increase_buffer_size_target_data();
265
266 /**
267 * Increases the size of the MPI buffer for communication of spikes if it
268 * needs to be increased. Returns whether the size was changed.
269 */
270 bool increase_buffer_size_spike_data();
271
272 /**
273 * Decreases the size of the MPI buffer for communication of spikes if it
274 * can be decreased.
275 */
276 void decrease_buffer_size_spike_data();
277
278 /**
279 * Returns whether MPI buffers for communication of connections are adaptive.
280 */
281 bool adaptive_target_buffers() const;
282
283 /**
284 * Returns whether MPI buffers for communication of spikes are adaptive.
285 */
286 bool adaptive_spike_buffers() const;
287
288 /**
289 * Sets the recvcounts parameter of Alltoallv for communication of
290 * secondary events, i.e., the number of elements (in ints) to recv
291 * from the corresponding rank.
292 */
293 void set_recv_counts_secondary_events_in_int_per_rank( const std::vector< int >& recv_counts_in_int_per_rank );
294
295 /**
296 * Returns the recvcounts parameter of Alltoallv for communication of
297 * secondary events, i.e., the number of elements (in ints) to recv
298 * from `source_rank`.
299 */
300 size_t get_recv_count_secondary_events_in_int( const size_t source_rank ) const;
301
302 /**
303 * Returns the rdispls parameter of Alltoallv for communication of
304 * secondary events, i.e., the offset in the MPI buffer where
305 * elements from `source_rank` are written.
306 */
307 size_t get_recv_displacement_secondary_events_in_int( const size_t source_rank ) const;
308
309 /**
310 * Returns the number of elements (in ints) to be sent to `target_rank`.
311 */
312 size_t get_send_count_secondary_events_in_int( const size_t target_rank ) const;
313
314 /**
315 * Returns the send displacement of elements (in ints) to be sent to rank `target_rank`.
316 */
317 size_t get_send_displacement_secondary_events_in_int( const size_t target_rank ) const;
318
319 /**
320 * Returns where the done marker is located in the MPI send buffer for `target_rank`.
321 */
322 size_t get_done_marker_position_in_secondary_events_send_buffer( const size_t target_rank ) const;
323
324 /**
325 * Returns where the done marker is located in the MPI recv buffer for `source_rank`.
326 */
327 size_t get_done_marker_position_in_secondary_events_recv_buffer( const size_t source_rank ) const;
328
329 void communicate_recv_counts_secondary_events();
330
331 private:
332 int num_processes_; //!< number of MPI processes
333 int rank_; //!< rank of the MPI process
334 int send_buffer_size_; //!< expected size of send buffer
335 int recv_buffer_size_; //!< size of receive buffer
336 bool use_mpi_; //!< whether MPI is used
337 size_t buffer_size_target_data_; //!< total size of MPI buffer for
338 // communication of connections
339
340 size_t buffer_size_spike_data_; //!< total size of MPI buffer for
341 // communication of spikes
342
343 size_t max_buffer_size_target_data_; //!< maximal size of MPI buffer for
344 // communication of connections
345
346 size_t max_buffer_size_spike_data_; //!< maximal size of MPI buffer for
347 // communication of spikes
348
349 bool adaptive_target_buffers_; //!< whether MPI buffers for communication of
350 // connections resize on the fly
351
352 bool adaptive_spike_buffers_; //!< whether MPI buffers for communication of
353 // spikes resize on the fly
354
355 double growth_factor_buffer_spike_data_;
356 double growth_factor_buffer_target_data_;
357
358 double shrink_factor_buffer_spike_data_;
359
360 unsigned int send_recv_count_spike_data_per_rank_;
361 unsigned int send_recv_count_target_data_per_rank_;
362
363 std::vector< int > recv_counts_secondary_events_in_int_per_rank_; //!< how many secondary elements (in ints) will be
364 //!< received from each rank
365 std::vector< int >
366 send_counts_secondary_events_in_int_per_rank_; //!< how many secondary elements (in ints) will be sent to each rank
367
368 std::vector< int > recv_displacements_secondary_events_in_int_per_rank_; //!< offset in the MPI receive buffer (in
369 //!< ints) at which elements received from each
370 //!< rank will be written
371
372 std::vector< int > send_displacements_secondary_events_in_int_per_rank_; //!< offset in the MPI send buffer (in ints)
373 //!< from which elements send to each rank will
374 //!< be read
375
376 #ifdef HAVE_MPI
377 //! array containing communication partner for each step.
378 std::vector< int > comm_step_;
379 unsigned int COMM_OVERFLOW_ERROR;
380
381 //! Variable to hold the MPI communicator to use (the datatype matters).
382 MPI_Comm comm;
383 MPI_Datatype MPI_OFFGRID_SPIKE;
384
385 void communicate_Allgather( std::vector< unsigned int >& send_buffer,
386 std::vector< unsigned int >& recv_buffer,
387 std::vector< int >& displacements );
388
389 void communicate_Allgather( std::vector< OffGridSpike >& send_buffer,
390 std::vector< OffGridSpike >& recv_buffer,
391 std::vector< int >& displacements );
392
393 void communicate_Allgather( std::vector< int >& );
394 void communicate_Allgather( std::vector< long >& );
395
396 template < typename T >
397 void communicate_Allgatherv( std::vector< T >& send_buffer,
398 std::vector< T >& recv_buffer,
399 std::vector< int >& displacements,
400 std::vector< int >& recv_counts );
401
402 template < typename T >
403 void communicate_Allgather( std::vector< T >& send_buffer,
404 std::vector< T >& recv_buffer,
405 std::vector< int >& displacements );
406
407 #endif /* #ifdef HAVE_MPI */
408
409 public:
410 /**
411 * Combined storage of node ID and offset information for off-grid spikes.
412 *
413 * @note This class actually stores the node ID as @c double internally.
414 * This is done so that the user-defined MPI type MPI_OFFGRID_SPIKE,
415 * which we use to communicate off-grid spikes, is homogeneous.
416 * Otherwise, OpenMPI spends extreme amounts of time on packing
417 * and unpacking the data, see #458.
418 */
419 class OffGridSpike
420 {
421 friend void MPIManager::init_mpi( int*, char*** );
422
423 public:
424 //! We defined this type explicitly, so that the assert function below
425 //! always tests the correct type.
426 typedef unsigned int node_id_external_type;
427
OffGridSpike()428 OffGridSpike()
429 : node_id_( 0 )
430 , offset_( 0.0 )
431 {
432 }
OffGridSpike(node_id_external_type node_idv,double offsetv)433 OffGridSpike( node_id_external_type node_idv, double offsetv )
434 : node_id_( node_idv )
435 , offset_( offsetv )
436 {
437 }
438
439 unsigned int
get_node_id()440 get_node_id() const
441 {
442 return static_cast< node_id_external_type >( node_id_ );
443 }
444 void
set_node_id(node_id_external_type node_id)445 set_node_id( node_id_external_type node_id )
446 {
447 node_id_ = static_cast< double >( node_id );
448 }
449 double
get_offset()450 get_offset() const
451 {
452 return offset_;
453 }
454
455 private:
456 double node_id_; //!< node ID of neuron that spiked
457 double offset_; //!< offset of spike from grid
458
459 //! This function asserts that doubles can hold node IDs without loss
460 static void
assert_datatype_compatibility_()461 assert_datatype_compatibility_()
462 {
463 assert( std::numeric_limits< double >::digits > std::numeric_limits< node_id_external_type >::digits );
464
465 // the next one is doubling up, better be safe than sorry
466 const node_id_external_type maxnode_id = std::numeric_limits< node_id_external_type >::max();
467 OffGridSpike ogs( maxnode_id, 0.0 );
468 assert( maxnode_id == ogs.get_node_id() );
469 }
470 };
471 };
472
473 inline void
set_recv_counts_secondary_events_in_int_per_rank(const std::vector<int> & recv_counts_in_int_per_rank)474 MPIManager::set_recv_counts_secondary_events_in_int_per_rank( const std::vector< int >& recv_counts_in_int_per_rank )
475 {
476 recv_counts_secondary_events_in_int_per_rank_ = recv_counts_in_int_per_rank;
477
478 std::partial_sum( recv_counts_secondary_events_in_int_per_rank_.begin(),
479 recv_counts_secondary_events_in_int_per_rank_.end() - 1,
480 recv_displacements_secondary_events_in_int_per_rank_.begin() + 1 );
481 }
482
483 inline size_t
get_recv_count_secondary_events_in_int(const size_t source_rank)484 MPIManager::get_recv_count_secondary_events_in_int( const size_t source_rank ) const
485 {
486 return recv_counts_secondary_events_in_int_per_rank_[ source_rank ];
487 }
488
489 inline size_t
get_recv_displacement_secondary_events_in_int(const size_t source_rank)490 MPIManager::get_recv_displacement_secondary_events_in_int( const size_t source_rank ) const
491 {
492 return recv_displacements_secondary_events_in_int_per_rank_[ source_rank ];
493 }
494
495 inline size_t
get_send_count_secondary_events_in_int(const size_t target_rank)496 MPIManager::get_send_count_secondary_events_in_int( const size_t target_rank ) const
497 {
498 return send_counts_secondary_events_in_int_per_rank_[ target_rank ];
499 }
500
501 inline size_t
get_send_displacement_secondary_events_in_int(const size_t target_rank)502 MPIManager::get_send_displacement_secondary_events_in_int( const size_t target_rank ) const
503 {
504 return send_displacements_secondary_events_in_int_per_rank_[ target_rank ];
505 }
506
507 inline size_t
get_done_marker_position_in_secondary_events_send_buffer(const size_t target_rank)508 MPIManager::get_done_marker_position_in_secondary_events_send_buffer( const size_t target_rank ) const
509 {
510 return get_send_displacement_secondary_events_in_int( target_rank )
511 + get_send_count_secondary_events_in_int( target_rank ) - 1;
512 }
513
514 inline size_t
get_done_marker_position_in_secondary_events_recv_buffer(const size_t source_rank)515 MPIManager::get_done_marker_position_in_secondary_events_recv_buffer( const size_t source_rank ) const
516 {
517 return get_recv_displacement_secondary_events_in_int( source_rank )
518 + get_recv_count_secondary_events_in_int( source_rank ) - 1;
519 }
520
521 inline thread
get_num_processes()522 MPIManager::get_num_processes() const
523 {
524 return num_processes_;
525 }
526
527 inline void
set_num_processes(thread n_procs)528 MPIManager::set_num_processes( thread n_procs )
529 {
530 num_processes_ = n_procs;
531 }
532
533 inline thread
get_rank()534 MPIManager::get_rank() const
535 {
536 return rank_;
537 }
538
539 inline bool
is_mpi_used()540 MPIManager::is_mpi_used()
541 {
542 return use_mpi_;
543 }
544
545 inline size_t
get_buffer_size_target_data()546 MPIManager::get_buffer_size_target_data() const
547 {
548 return buffer_size_target_data_;
549 }
550
551 inline unsigned int
get_send_recv_count_target_data_per_rank()552 MPIManager::get_send_recv_count_target_data_per_rank() const
553 {
554 return send_recv_count_target_data_per_rank_;
555 }
556
557 inline size_t
get_buffer_size_spike_data()558 MPIManager::get_buffer_size_spike_data() const
559 {
560 return buffer_size_spike_data_;
561 }
562
563 inline unsigned int
get_send_recv_count_spike_data_per_rank()564 MPIManager::get_send_recv_count_spike_data_per_rank() const
565 {
566 return send_recv_count_spike_data_per_rank_;
567 }
568
569 inline size_t
get_send_buffer_size_secondary_events_in_int()570 MPIManager::get_send_buffer_size_secondary_events_in_int() const
571 {
572 return send_displacements_secondary_events_in_int_per_rank_[ send_displacements_secondary_events_in_int_per_rank_
573 .size() - 1 ]
574 + send_counts_secondary_events_in_int_per_rank_[ send_counts_secondary_events_in_int_per_rank_.size() - 1 ];
575 }
576
577 inline size_t
get_recv_buffer_size_secondary_events_in_int()578 MPIManager::get_recv_buffer_size_secondary_events_in_int() const
579 {
580 return recv_displacements_secondary_events_in_int_per_rank_[ recv_displacements_secondary_events_in_int_per_rank_
581 .size() - 1 ]
582 + recv_counts_secondary_events_in_int_per_rank_[ recv_counts_secondary_events_in_int_per_rank_.size() - 1 ];
583 }
584
585 inline void
set_buffer_size_target_data(const size_t buffer_size)586 MPIManager::set_buffer_size_target_data( const size_t buffer_size )
587 {
588 assert( buffer_size >= static_cast< size_t >( 2 * get_num_processes() ) );
589 if ( buffer_size <= max_buffer_size_target_data_ )
590 {
591 buffer_size_target_data_ = buffer_size;
592 }
593 else
594 {
595 buffer_size_target_data_ = max_buffer_size_target_data_;
596 }
597 send_recv_count_target_data_per_rank_ = static_cast< size_t >(
598 floor( static_cast< double >( get_buffer_size_target_data() ) / static_cast< double >( get_num_processes() ) ) );
599
600 assert( send_recv_count_target_data_per_rank_ * get_num_processes() <= get_buffer_size_target_data() );
601 }
602
603 inline void
set_buffer_size_spike_data(const size_t buffer_size)604 MPIManager::set_buffer_size_spike_data( const size_t buffer_size )
605 {
606 assert( buffer_size >= static_cast< size_t >( 2 * get_num_processes() ) );
607 if ( buffer_size <= max_buffer_size_spike_data_ )
608 {
609 buffer_size_spike_data_ = buffer_size;
610 }
611 else
612 {
613 buffer_size_spike_data_ = max_buffer_size_spike_data_;
614 }
615
616 send_recv_count_spike_data_per_rank_ = floor( get_buffer_size_spike_data() / get_num_processes() );
617
618 assert( send_recv_count_spike_data_per_rank_ * get_num_processes() <= get_buffer_size_spike_data() );
619 }
620
621 inline bool
increase_buffer_size_target_data()622 MPIManager::increase_buffer_size_target_data()
623 {
624 assert( adaptive_target_buffers_ );
625 if ( buffer_size_target_data_ >= max_buffer_size_target_data_ )
626 {
627 return false;
628 }
629 else
630 {
631 if ( buffer_size_target_data_ * growth_factor_buffer_target_data_ < max_buffer_size_target_data_ )
632 {
633 // this also adjusts send_recv_count_target_data_per_rank_
634 set_buffer_size_target_data(
635 static_cast< size_t >( floor( buffer_size_target_data_ * growth_factor_buffer_target_data_ ) ) );
636 }
637 else
638 {
639 // this also adjusts send_recv_count_target_data_per_rank_
640 set_buffer_size_target_data( max_buffer_size_target_data_ );
641 }
642 return true;
643 }
644 }
645
646 inline bool
increase_buffer_size_spike_data()647 MPIManager::increase_buffer_size_spike_data()
648 {
649 assert( adaptive_spike_buffers_ );
650 if ( buffer_size_spike_data_ >= max_buffer_size_spike_data_ )
651 {
652 return false;
653 }
654 else
655 {
656 if ( buffer_size_spike_data_ * growth_factor_buffer_spike_data_ < max_buffer_size_spike_data_ )
657 {
658 set_buffer_size_spike_data( floor( buffer_size_spike_data_ * growth_factor_buffer_spike_data_ ) );
659 }
660 else
661 {
662 set_buffer_size_spike_data( max_buffer_size_spike_data_ );
663 }
664 return true;
665 }
666 }
667
668 inline void
decrease_buffer_size_spike_data()669 MPIManager::decrease_buffer_size_spike_data()
670 {
671 assert( adaptive_spike_buffers_ );
672 // the minimum is set to 4.0 * get_num_processes() to differentiate the initial size
673 if ( buffer_size_spike_data_ / shrink_factor_buffer_spike_data_ > 4.0 * get_num_processes() )
674 {
675 set_buffer_size_spike_data( floor( buffer_size_spike_data_ / shrink_factor_buffer_spike_data_ ) );
676 }
677 }
678
679 inline bool
adaptive_target_buffers()680 MPIManager::adaptive_target_buffers() const
681 {
682 return adaptive_target_buffers_;
683 }
684
685 inline bool
adaptive_spike_buffers()686 MPIManager::adaptive_spike_buffers() const
687 {
688 return adaptive_spike_buffers_;
689 }
690
691 #ifndef HAVE_MPI
692 inline std::string
get_processor_name()693 MPIManager::get_processor_name()
694 {
695 char name[ 1024 ];
696 name[ 1023 ] = '\0';
697 gethostname( name, 1023 );
698 return name;
699 }
700
701 inline void
mpi_abort(int)702 MPIManager::mpi_abort( int )
703 {
704 }
705
706 inline void
communicate(std::vector<int> &)707 MPIManager::communicate( std::vector< int >& )
708 {
709 }
710
711 inline void
communicate(std::vector<long> &)712 MPIManager::communicate( std::vector< long >& )
713 {
714 }
715
716 inline void
synchronize()717 MPIManager::synchronize()
718 {
719 }
720
721 inline void
test_link(int,int)722 test_link( int, int )
723 {
724 }
725
726 inline void
test_links()727 test_links()
728 {
729 }
730
731 inline bool
any_true(const bool my_bool)732 MPIManager::any_true( const bool my_bool )
733 {
734 return my_bool;
735 }
736
737 inline double
time_communicate(int,int)738 MPIManager::time_communicate( int, int )
739 {
740 return 0.0;
741 }
742
743 inline double
time_communicatev(int,int)744 MPIManager::time_communicatev( int, int )
745 {
746 return 0.0;
747 }
748
749 inline double
time_communicate_offgrid(int,int)750 MPIManager::time_communicate_offgrid( int, int )
751 {
752 return 0.0;
753 }
754
755 inline double
time_communicate_alltoall(int,int)756 MPIManager::time_communicate_alltoall( int, int )
757 {
758 return 0.0;
759 }
760
761 inline double
time_communicate_alltoallv(int,int)762 MPIManager::time_communicate_alltoallv( int, int )
763 {
764 return 0.0;
765 }
766
767 #endif /* HAVE_MPI */
768
769 #ifdef HAVE_MPI
770 template < class D >
771 void
communicate_Alltoall(std::vector<D> & send_buffer,std::vector<D> & recv_buffer,const unsigned int send_recv_count)772 MPIManager::communicate_Alltoall( std::vector< D >& send_buffer,
773 std::vector< D >& recv_buffer,
774 const unsigned int send_recv_count )
775 {
776 void* send_buffer_int = static_cast< void* >( &send_buffer[ 0 ] );
777 void* recv_buffer_int = static_cast< void* >( &recv_buffer[ 0 ] );
778
779 communicate_Alltoall_( send_buffer_int, recv_buffer_int, send_recv_count );
780 }
781
782 template < class D >
783 void
communicate_secondary_events_Alltoallv(std::vector<D> & send_buffer,std::vector<D> & recv_buffer)784 MPIManager::communicate_secondary_events_Alltoallv( std::vector< D >& send_buffer, std::vector< D >& recv_buffer )
785 {
786 void* send_buffer_int = static_cast< void* >( &send_buffer[ 0 ] );
787 void* recv_buffer_int = static_cast< void* >( &recv_buffer[ 0 ] );
788
789 communicate_Alltoallv_( send_buffer_int,
790 &send_counts_secondary_events_in_int_per_rank_[ 0 ],
791 &send_displacements_secondary_events_in_int_per_rank_[ 0 ],
792 recv_buffer_int,
793 &recv_counts_secondary_events_in_int_per_rank_[ 0 ],
794 &recv_displacements_secondary_events_in_int_per_rank_[ 0 ] );
795 }
796
797 #else // HAVE_MPI
798 template < class D >
799 void
communicate_Alltoall(std::vector<D> & send_buffer,std::vector<D> & recv_buffer,const unsigned int)800 MPIManager::MPIManager::communicate_Alltoall( std::vector< D >& send_buffer,
801 std::vector< D >& recv_buffer,
802 const unsigned int )
803 {
804 recv_buffer.swap( send_buffer );
805 }
806
807 template < class D >
808 void
communicate_secondary_events_Alltoallv(std::vector<D> & send_buffer,std::vector<D> & recv_buffer)809 MPIManager::communicate_secondary_events_Alltoallv( std::vector< D >& send_buffer, std::vector< D >& recv_buffer )
810 {
811 recv_buffer.swap( send_buffer );
812 }
813
814 #endif // HAVE_MPI
815
816 template < class D >
817 void
communicate_target_data_Alltoall(std::vector<D> & send_buffer,std::vector<D> & recv_buffer)818 MPIManager::communicate_target_data_Alltoall( std::vector< D >& send_buffer, std::vector< D >& recv_buffer )
819 {
820 const size_t send_recv_count_target_data_in_int_per_rank =
821 sizeof( TargetData ) / sizeof( unsigned int ) * send_recv_count_target_data_per_rank_;
822
823 communicate_Alltoall( send_buffer, recv_buffer, send_recv_count_target_data_in_int_per_rank );
824 }
825
826 template < class D >
827 void
communicate_spike_data_Alltoall(std::vector<D> & send_buffer,std::vector<D> & recv_buffer)828 MPIManager::communicate_spike_data_Alltoall( std::vector< D >& send_buffer, std::vector< D >& recv_buffer )
829 {
830 const size_t send_recv_count_spike_data_in_int_per_rank =
831 sizeof( SpikeData ) / sizeof( unsigned int ) * send_recv_count_spike_data_per_rank_;
832
833 communicate_Alltoall( send_buffer, recv_buffer, send_recv_count_spike_data_in_int_per_rank );
834 }
835
836 template < class D >
837 void
communicate_off_grid_spike_data_Alltoall(std::vector<D> & send_buffer,std::vector<D> & recv_buffer)838 MPIManager::communicate_off_grid_spike_data_Alltoall( std::vector< D >& send_buffer, std::vector< D >& recv_buffer )
839 {
840 const size_t send_recv_count_off_grid_spike_data_in_int_per_rank =
841 sizeof( OffGridSpikeData ) / sizeof( unsigned int ) * send_recv_count_spike_data_per_rank_;
842
843 communicate_Alltoall( send_buffer, recv_buffer, send_recv_count_off_grid_spike_data_in_int_per_rank );
844 }
845 }
846
847 #endif /* MPI_MANAGER_H */
848