1 /*
2  *  connection_creator_impl.h
3  *
4  *  This file is part of NEST.
5  *
6  *  Copyright (C) 2004 The NEST Initiative
7  *
8  *  NEST is free software: you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation, either version 2 of the License, or
11  *  (at your option) any later version.
12  *
13  *  NEST is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with NEST.  If not, see <http://www.gnu.org/licenses/>.
20  *
21  */
22 
23 #ifndef CONNECTION_CREATOR_IMPL_H
24 #define CONNECTION_CREATOR_IMPL_H
25 
26 #include "connection_creator.h"
27 
28 // C++ includes:
29 #include <vector>
30 
31 // Includes from nestkernel:
32 #include "kernel_manager.h"
33 #include "nest.h"
34 
35 namespace nest
36 {
37 template < int D >
38 void
connect(Layer<D> & source,NodeCollectionPTR source_nc,Layer<D> & target,NodeCollectionPTR target_nc)39 ConnectionCreator::connect( Layer< D >& source,
40   NodeCollectionPTR source_nc,
41   Layer< D >& target,
42   NodeCollectionPTR target_nc )
43 {
44   switch ( type_ )
45   {
46   case Pairwise_bernoulli_on_source:
47 
48     pairwise_bernoulli_on_source_( source, source_nc, target, target_nc );
49     break;
50 
51   case Fixed_indegree:
52 
53     fixed_indegree_( source, source_nc, target, target_nc );
54     break;
55 
56   case Fixed_outdegree:
57 
58     fixed_outdegree_( source, source_nc, target, target_nc );
59     break;
60 
61   case Pairwise_bernoulli_on_target:
62 
63     pairwise_bernoulli_on_target_( source, source_nc, target, target_nc );
64     break;
65 
66   default:
67     throw BadProperty( "Unknown connection type." );
68   }
69 }
70 
71 template < typename Iterator, int D >
72 void
connect_to_target_(Iterator from,Iterator to,Node * tgt_ptr,const Position<D> & tgt_pos,thread tgt_thread,const Layer<D> & source)73 ConnectionCreator::connect_to_target_( Iterator from,
74   Iterator to,
75   Node* tgt_ptr,
76   const Position< D >& tgt_pos,
77   thread tgt_thread,
78   const Layer< D >& source )
79 {
80   RngPtr rng = get_vp_specific_rng( tgt_thread );
81 
82   // We create a source pos vector here that can be updated with the
83   // source position. This is done to avoid creating and destroying
84   // unnecessarily many vectors.
85   std::vector< double > source_pos( D );
86   const std::vector< double > target_pos = tgt_pos.get_vector();
87 
88   const bool without_kernel = not kernel_.get();
89   for ( Iterator iter = from; iter != to; ++iter )
90   {
91     if ( ( not allow_autapses_ ) and ( iter->second == tgt_ptr->get_node_id() ) )
92     {
93       continue;
94     }
95     iter->first.get_vector( source_pos );
96 
97     if ( without_kernel or rng->drand() < kernel_->value( rng, source_pos, target_pos, source, tgt_ptr ) )
98     {
99       for ( size_t indx = 0; indx < synapse_model_.size(); ++indx )
100       {
101         kernel().connection_manager.connect( iter->second,
102           tgt_ptr,
103           tgt_thread,
104           synapse_model_[ indx ],
105           param_dicts_[ indx ][ tgt_thread ],
106           delay_[ indx ]->value( rng, source_pos, target_pos, source, tgt_ptr ),
107           weight_[ indx ]->value( rng, source_pos, target_pos, source, tgt_ptr ) );
108       }
109     }
110   }
111 }
112 
113 template < int D >
PoolWrapper_()114 ConnectionCreator::PoolWrapper_< D >::PoolWrapper_()
115   : masked_layer_( 0 )
116   , positions_( 0 )
117 {
118 }
119 
120 template < int D >
~PoolWrapper_()121 ConnectionCreator::PoolWrapper_< D >::~PoolWrapper_()
122 {
123   if ( masked_layer_ )
124   {
125     delete masked_layer_;
126   }
127 }
128 
129 template < int D >
130 void
define(MaskedLayer<D> * ml)131 ConnectionCreator::PoolWrapper_< D >::define( MaskedLayer< D >* ml )
132 {
133   assert( masked_layer_ == 0 );
134   assert( positions_ == 0 );
135   assert( ml != 0 );
136   masked_layer_ = ml;
137 }
138 
139 template < int D >
140 void
define(std::vector<std::pair<Position<D>,index>> * pos)141 ConnectionCreator::PoolWrapper_< D >::define( std::vector< std::pair< Position< D >, index > >* pos )
142 {
143   assert( masked_layer_ == 0 );
144   assert( positions_ == 0 );
145   assert( pos != 0 );
146   positions_ = pos;
147 }
148 
149 template < int D >
150 typename Ntree< D, index >::masked_iterator
masked_begin(const Position<D> & pos)151 ConnectionCreator::PoolWrapper_< D >::masked_begin( const Position< D >& pos ) const
152 {
153   return masked_layer_->begin( pos );
154 }
155 
156 template < int D >
157 typename Ntree< D, index >::masked_iterator
masked_end()158 ConnectionCreator::PoolWrapper_< D >::masked_end() const
159 {
160   return masked_layer_->end();
161 }
162 
163 template < int D >
164 typename std::vector< std::pair< Position< D >, index > >::iterator
begin()165 ConnectionCreator::PoolWrapper_< D >::begin() const
166 {
167   return positions_->begin();
168 }
169 
170 template < int D >
171 typename std::vector< std::pair< Position< D >, index > >::iterator
end()172 ConnectionCreator::PoolWrapper_< D >::end() const
173 {
174   return positions_->end();
175 }
176 
177 
178 template < int D >
179 void
pairwise_bernoulli_on_source_(Layer<D> & source,NodeCollectionPTR source_nc,Layer<D> & target,NodeCollectionPTR target_nc)180 ConnectionCreator::pairwise_bernoulli_on_source_( Layer< D >& source,
181   NodeCollectionPTR source_nc,
182   Layer< D >& target,
183   NodeCollectionPTR target_nc )
184 {
185   // Connect using pairwise Bernoulli drawing source nodes (target driven)
186   // For each local target node:
187   //  1. Apply Mask to source layer
188   //  2. For each source node: Compute probability, draw random number, make
189   //     connection conditionally
190 
191   // retrieve global positions, either for masked or unmasked pool
192   PoolWrapper_< D > pool;
193   if ( mask_.get() ) // MaskedLayer will be freed by PoolWrapper d'tor
194   {
195     pool.define( new MaskedLayer< D >( source, mask_, allow_oversized_, source_nc ) );
196   }
197   else
198   {
199     pool.define( source.get_global_positions_vector( source_nc ) );
200   }
201 
202   std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() );
203 
204 // sharing specs on next line commented out because gcc 4.2 cannot handle them
205 #pragma omp parallel // default(none) shared(source, target, masked_layer,
206                      // target_begin, target_end)
207   {
208     const int thread_id = kernel().vp_manager.get_thread_id();
209     try
210     {
211       NodeCollection::const_iterator target_begin = target_nc->begin();
212       NodeCollection::const_iterator target_end = target_nc->end();
213 
214       for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it )
215       {
216         Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id );
217 
218         if ( not tgt->is_proxy() )
219         {
220           const Position< D > target_pos = target.get_position( ( *tgt_it ).lid );
221 
222           if ( mask_.get() )
223           {
224             connect_to_target_(
225               pool.masked_begin( target_pos ), pool.masked_end(), tgt, target_pos, thread_id, source );
226           }
227           else
228           {
229             connect_to_target_( pool.begin(), pool.end(), tgt, target_pos, thread_id, source );
230           }
231         }
232       } // for target_begin
233     }
234     catch ( std::exception& err )
235     {
236       // We must create a new exception here, err's lifetime ends at
237       // the end of the catch block.
238       exceptions_raised_.at( thread_id ) =
239         std::shared_ptr< WrappedThreadException >( new WrappedThreadException( err ) );
240     }
241   } // omp parallel
242   // check if any exceptions have been raised
243   for ( thread thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr )
244   {
245     if ( exceptions_raised_.at( thr ).get() )
246     {
247       throw WrappedThreadException( *( exceptions_raised_.at( thr ) ) );
248     }
249   }
250 }
251 
252 
253 template < int D >
254 void
pairwise_bernoulli_on_target_(Layer<D> & source,NodeCollectionPTR source_nc,Layer<D> & target,NodeCollectionPTR target_nc)255 ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source,
256   NodeCollectionPTR source_nc,
257   Layer< D >& target,
258   NodeCollectionPTR target_nc )
259 {
260   // Connecting using pairwise Bernoulli drawing target nodes (source driven)
261   // It is actually implemented as pairwise Bernoulli on source nodes,
262   // but with displacements computed in the target layer. The Mask has been
263   // reversed so that it can be applied to the source instead of the target.
264   // For each local target node:
265   //  1. Apply (Converse)Mask to source layer
266   //  2. For each source node: Compute probability, draw random number, make
267   //     connection conditionally
268 
269   PoolWrapper_< D > pool;
270   if ( mask_.get() ) // MaskedLayer will be freed by PoolWrapper d'tor
271   {
272     // By supplying the target layer to the MaskedLayer constructor, the
273     // mask is mirrored so it may be applied to the source layer instead
274     pool.define( new MaskedLayer< D >( source, mask_, allow_oversized_, target, source_nc ) );
275   }
276   else
277   {
278     pool.define( source.get_global_positions_vector( source_nc ) );
279   }
280 
281   std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() );
282 
283   // We only need to check the first in the NodeCollection
284   Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) );
285   if ( not first_in_tgt->has_proxies() )
286   {
287     throw IllegalConnection( "Spatial Connect with pairwise_bernoulli to devices is not possible." );
288   }
289 
290 // sharing specs on next line commented out because gcc 4.2 cannot handle them
291 #pragma omp parallel // default(none) shared(source, target, masked_layer,
292                      // target_begin, target_end)
293   {
294     const int thread_id = kernel().vp_manager.get_thread_id();
295     try
296     {
297       NodeCollection::const_iterator target_begin = target_nc->local_begin();
298       NodeCollection::const_iterator target_end = target_nc->end();
299 
300       for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it )
301       {
302         Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id );
303 
304         assert( not tgt->is_proxy() );
305 
306         const Position< D > target_pos = target.get_position( ( *tgt_it ).lid );
307 
308         if ( mask_.get() )
309         {
310           // We do the same as in the target driven case, except that we calculate displacements in the target layer.
311           // We therefore send in target as last parameter.
312           connect_to_target_( pool.masked_begin( target_pos ), pool.masked_end(), tgt, target_pos, thread_id, target );
313         }
314         else
315         {
316           // We do the same as in the target driven case, except that we calculate displacements in the target layer.
317           // We therefore send in target as last parameter.
318           connect_to_target_( pool.begin(), pool.end(), tgt, target_pos, thread_id, target );
319         }
320 
321       } // end for
322     }
323     catch ( std::exception& err )
324     {
325       // We must create a new exception here, err's lifetime ends at the end of the catch block.
326       exceptions_raised_.at( thread_id ) =
327         std::shared_ptr< WrappedThreadException >( new WrappedThreadException( err ) );
328     }
329   } // omp parallel
330   // check if any exceptions have been raised
331   for ( thread thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr )
332   {
333     if ( exceptions_raised_.at( thr ).get() )
334     {
335       throw WrappedThreadException( *( exceptions_raised_.at( thr ) ) );
336     }
337   }
338 }
339 
340 template < int D >
341 void
fixed_indegree_(Layer<D> & source,NodeCollectionPTR source_nc,Layer<D> & target,NodeCollectionPTR target_nc)342 ConnectionCreator::fixed_indegree_( Layer< D >& source,
343   NodeCollectionPTR source_nc,
344   Layer< D >& target,
345   NodeCollectionPTR target_nc )
346 {
347   if ( number_of_connections_ < 1 )
348   {
349     return;
350   }
351 
352   // fixed_indegree connections (fixed fan in)
353   //
354   // For each local target node:
355   // 1. Apply Mask to source layer
356   // 2. Compute connection probability for each source position
357   // 3. Draw source nodes and make connections
358 
359   // We only need to check the first in the NodeCollection
360   Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) );
361   if ( not first_in_tgt->has_proxies() )
362   {
363     throw IllegalConnection( "Spatial Connect with fixed_indegree to devices is not possible." );
364   }
365 
366   NodeCollection::const_iterator target_begin = target_nc->MPI_local_begin();
367   NodeCollection::const_iterator target_end = target_nc->end();
368 
369   // protect against connecting to devices without proxies
370   // we need to do this before creating the first connection to leave
371   // the network untouched if any target does not have proxies
372   for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it )
373   {
374     Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id );
375 
376     assert( not tgt->is_proxy() );
377   }
378 
379   if ( mask_.get() )
380   {
381     MaskedLayer< D > masked_source( source, mask_, allow_oversized_, source_nc );
382     const auto masked_source_end = masked_source.end();
383 
384     std::vector< std::pair< Position< D >, index > > positions;
385 
386     for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it )
387     {
388       index target_id = ( *tgt_it ).node_id;
389       Node* const tgt = kernel().node_manager.get_node_or_proxy( target_id );
390 
391       thread target_thread = tgt->get_thread();
392       RngPtr rng = get_vp_specific_rng( target_thread );
393       Position< D > target_pos = target.get_position( ( *tgt_it ).lid );
394 
395       // We create a source pos vector here that can be updated with the
396       // source position. This is done to avoid creating and destroying
397       // unnecessarily many vectors.
398       std::vector< double > source_pos_vector( D );
399       const std::vector< double > target_pos_vector = target_pos.get_vector();
400 
401       // Get (position,node ID) pairs for sources inside mask
402       positions.resize( std::distance( masked_source.begin( target_pos ), masked_source_end ) );
403       std::copy( masked_source.begin( target_pos ), masked_source_end, positions.begin() );
404 
405       // We will select `number_of_connections_` sources within the mask.
406       // If there is no kernel, we can just draw uniform random numbers,
407       // but with a kernel we have to set up a probability distribution
408       // function using a discrete_distribution.
409       if ( kernel_.get() )
410       {
411 
412         std::vector< double > probabilities;
413         probabilities.reserve( positions.size() );
414 
415         // Collect probabilities for the sources
416         for ( typename std::vector< std::pair< Position< D >, index > >::iterator iter = positions.begin();
417               iter != positions.end();
418               ++iter )
419         {
420           iter->first.get_vector( source_pos_vector );
421           probabilities.push_back( kernel_->value( rng, source_pos_vector, target_pos_vector, source, tgt ) );
422         }
423 
424         if ( positions.empty()
425           or ( ( not allow_autapses_ ) and ( positions.size() == 1 ) and ( positions[ 0 ].second == target_id ) )
426           or ( ( not allow_multapses_ ) and ( positions.size() < number_of_connections_ ) ) )
427         {
428           std::string msg = String::compose( "Global target ID %1: Not enough sources found inside mask", target_id );
429           throw KernelException( msg.c_str() );
430         }
431 
432         // A discrete_distribution draws random integers with a non-uniform
433         // distribution.
434         discrete_distribution lottery;
435         const discrete_distribution::param_type param( probabilities.begin(), probabilities.end() );
436         lottery.param( param );
437 
438         // If multapses are not allowed, we must keep track of which
439         // sources have been selected already.
440         std::vector< bool > is_selected( positions.size() );
441 
442         // Draw `number_of_connections_` sources
443         for ( int i = 0; i < ( int ) number_of_connections_; ++i )
444         {
445           index random_id = lottery( rng );
446           if ( ( not allow_multapses_ ) and ( is_selected[ random_id ] ) )
447           {
448             --i;
449             continue;
450           }
451 
452           index source_id = positions[ random_id ].second;
453           if ( ( not allow_autapses_ ) and ( source_id == target_id ) )
454           {
455             --i;
456             continue;
457           }
458           positions[ random_id ].first.get_vector( source_pos_vector );
459           for ( size_t indx = 0; indx < synapse_model_.size(); ++indx )
460           {
461             const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt );
462             const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt );
463             kernel().connection_manager.connect(
464               source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w );
465           }
466 
467           is_selected[ random_id ] = true;
468         }
469       }
470       else
471       {
472 
473         // no kernel
474 
475         if ( positions.empty()
476           or ( ( not allow_autapses_ ) and ( positions.size() == 1 ) and ( positions[ 0 ].second == target_id ) )
477           or ( ( not allow_multapses_ ) and ( positions.size() < number_of_connections_ ) ) )
478         {
479           std::string msg = String::compose( "Global target ID %1: Not enough sources found inside mask", target_id );
480           throw KernelException( msg.c_str() );
481         }
482 
483         // If multapses are not allowed, we must keep track of which
484         // sources have been selected already.
485         std::vector< bool > is_selected( positions.size() );
486 
487         // Draw `number_of_connections_` sources
488         for ( int i = 0; i < ( int ) number_of_connections_; ++i )
489         {
490           index random_id = rng->ulrand( positions.size() );
491           if ( ( not allow_multapses_ ) and ( is_selected[ random_id ] ) )
492           {
493             --i;
494             continue;
495           }
496           positions[ random_id ].first.get_vector( source_pos_vector );
497           index source_id = positions[ random_id ].second;
498           for ( size_t indx = 0; indx < synapse_model_.size(); ++indx )
499           {
500             const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt );
501             const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt );
502             kernel().connection_manager.connect(
503               source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w );
504           }
505 
506           is_selected[ random_id ] = true;
507         }
508       }
509     }
510   }
511   else
512   {
513     // no mask
514 
515     // Get (position,node ID) pairs for all nodes in source layer
516     std::vector< std::pair< Position< D >, index > >* positions = source.get_global_positions_vector( source_nc );
517 
518     for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it )
519     {
520       index target_id = ( *tgt_it ).node_id;
521       Node* const tgt = kernel().node_manager.get_node_or_proxy( target_id );
522       thread target_thread = tgt->get_thread();
523       RngPtr rng = get_vp_specific_rng( target_thread );
524       Position< D > target_pos = target.get_position( ( *tgt_it ).lid );
525 
526       std::vector< double > source_pos_vector( D );
527       const std::vector< double > target_pos_vector = target_pos.get_vector();
528 
529       if ( ( positions->size() == 0 )
530         or ( ( not allow_autapses_ ) and ( positions->size() == 1 ) and ( ( *positions )[ 0 ].second == target_id ) )
531         or ( ( not allow_multapses_ ) and ( positions->size() < number_of_connections_ ) ) )
532       {
533         std::string msg = String::compose( "Global target ID %1: Not enough sources found", target_id );
534         throw KernelException( msg.c_str() );
535       }
536 
537       // We will select `number_of_connections_` sources within the mask.
538       // If there is no kernel, we can just draw uniform random numbers,
539       // but with a kernel we have to set up a probability distribution
540       // function using a discrete_distribution.
541       if ( kernel_.get() )
542       {
543 
544         std::vector< double > probabilities;
545         probabilities.reserve( positions->size() );
546 
547         // Collect probabilities for the sources
548         for ( typename std::vector< std::pair< Position< D >, index > >::iterator iter = positions->begin();
549               iter != positions->end();
550               ++iter )
551         {
552           iter->first.get_vector( source_pos_vector );
553           probabilities.push_back( kernel_->value( rng, source_pos_vector, target_pos_vector, source, tgt ) );
554         }
555 
556         // A discrete_distribution draws random integers with a non-uniform
557         // distribution.
558         discrete_distribution lottery;
559         const discrete_distribution::param_type param( probabilities.begin(), probabilities.end() );
560         lottery.param( param );
561 
562         // If multapses are not allowed, we must keep track of which
563         // sources have been selected already.
564         std::vector< bool > is_selected( positions->size() );
565 
566         // Draw `number_of_connections_` sources
567         for ( int i = 0; i < ( int ) number_of_connections_; ++i )
568         {
569           index random_id = lottery( rng );
570           if ( ( not allow_multapses_ ) and ( is_selected[ random_id ] ) )
571           {
572             --i;
573             continue;
574           }
575 
576           index source_id = ( *positions )[ random_id ].second;
577           if ( ( not allow_autapses_ ) and ( source_id == target_id ) )
578           {
579             --i;
580             continue;
581           }
582 
583           ( *positions )[ random_id ].first.get_vector( source_pos_vector );
584           for ( size_t indx = 0; indx < synapse_model_.size(); ++indx )
585           {
586             const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt );
587             const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt );
588             kernel().connection_manager.connect(
589               source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w );
590           }
591 
592           is_selected[ random_id ] = true;
593         }
594       }
595       else
596       {
597 
598         // no kernel
599 
600         // If multapses are not allowed, we must keep track of which
601         // sources have been selected already.
602         std::vector< bool > is_selected( positions->size() );
603 
604         // Draw `number_of_connections_` sources
605         for ( int i = 0; i < ( int ) number_of_connections_; ++i )
606         {
607           index random_id = rng->ulrand( positions->size() );
608           if ( ( not allow_multapses_ ) and ( is_selected[ random_id ] ) )
609           {
610             --i;
611             continue;
612           }
613 
614           index source_id = ( *positions )[ random_id ].second;
615           if ( ( not allow_autapses_ ) and ( source_id == target_id ) )
616           {
617             --i;
618             continue;
619           }
620 
621           ( *positions )[ random_id ].first.get_vector( source_pos_vector );
622           for ( size_t indx = 0; indx < synapse_model_.size(); ++indx )
623           {
624             const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt );
625             const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt );
626             kernel().connection_manager.connect(
627               source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w );
628           }
629 
630           is_selected[ random_id ] = true;
631         }
632       }
633     }
634   }
635 }
636 
637 
638 template < int D >
639 void
fixed_outdegree_(Layer<D> & source,NodeCollectionPTR source_nc,Layer<D> & target,NodeCollectionPTR target_nc)640 ConnectionCreator::fixed_outdegree_( Layer< D >& source,
641   NodeCollectionPTR source_nc,
642   Layer< D >& target,
643   NodeCollectionPTR target_nc )
644 {
645   if ( number_of_connections_ < 1 )
646   {
647     return;
648   }
649 
650   // protect against connecting to devices without proxies
651   // we need to do this before creating the first connection to leave
652   // the network untouched if any target does not have proxies
653 
654   // We only need to check the first in the NodeCollection
655   Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) );
656   if ( not first_in_tgt->has_proxies() )
657   {
658     throw IllegalConnection( "Spatial Connect with fixed_outdegree to devices is not possible." );
659   }
660 
661   NodeCollection::const_iterator target_begin = target_nc->MPI_local_begin();
662   NodeCollection::const_iterator target_end = target_nc->end();
663 
664   for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it )
665   {
666     Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id );
667 
668     assert( not tgt->is_proxy() );
669   }
670 
671   // Fixed_outdegree connections (fixed fan out)
672   //
673   // For each (global) source: (All connections made on all mpi procs)
674   // 1. Apply mask to global targets
675   // 2. If using kernel: Compute connection probability for each global target
676   // 3. Draw connections to make using global rng
677 
678   MaskedLayer< D > masked_target( target, mask_, allow_oversized_, target_nc );
679   const auto masked_target_end = masked_target.end();
680 
681   // We create a target positions vector here that can be updated with the
682   // position and node ID pairs. This is done to avoid creating and destroying
683   // unnecessarily many vectors.
684   std::vector< std::pair< Position< D >, index > > target_pos_node_id_pairs;
685   std::vector< std::pair< Position< D >, index > > source_pos_node_id_pairs =
686     *source.get_global_positions_vector( source_nc );
687 
688   for ( const auto& source_pos_node_id_pair : source_pos_node_id_pairs )
689   {
690     const Position< D > source_pos = source_pos_node_id_pair.first;
691     const index source_id = source_pos_node_id_pair.second;
692     const std::vector< double > source_pos_vector = source_pos.get_vector();
693 
694     // We create a target pos vector here that can be updated with the
695     // target position. This is done to avoid creating and destroying
696     // unnecessarily many vectors.
697     std::vector< double > target_pos_vector( D );
698     std::vector< double > probabilities;
699 
700     // Find potential targets and probabilities
701     RngPtr grng = get_rank_synced_rng();
702     target_pos_node_id_pairs.resize( std::distance( masked_target.begin( source_pos ), masked_target_end ) );
703     std::copy( masked_target.begin( source_pos ), masked_target_end, target_pos_node_id_pairs.begin() );
704 
705     probabilities.reserve( target_pos_node_id_pairs.size() );
706     if ( kernel_.get() )
707     {
708       for ( const auto& target_pos_node_id_pair : target_pos_node_id_pairs )
709       {
710         // TODO: Why is probability calculated in source layer, but weight and delay in target layer?
711         target_pos_node_id_pair.first.get_vector( target_pos_vector );
712         const auto tgt = kernel().node_manager.get_node_or_proxy( target_pos_node_id_pair.second );
713         probabilities.push_back( kernel_->value( grng, source_pos_vector, target_pos_vector, source, tgt ) );
714       }
715     }
716     else
717     {
718       probabilities.resize( target_pos_node_id_pairs.size(), 1.0 );
719     }
720 
721     if ( target_pos_node_id_pairs.empty()
722       or ( ( not allow_multapses_ ) and ( target_pos_node_id_pairs.size() < number_of_connections_ ) ) )
723     {
724       std::string msg = String::compose( "Global source ID %1: Not enough targets found", source_id );
725       throw KernelException( msg.c_str() );
726     }
727 
728     // Draw targets.  A discrete_distribution draws random integers with a
729     // non-uniform distribution.
730     discrete_distribution lottery;
731     const discrete_distribution::param_type param( probabilities.begin(), probabilities.end() );
732     lottery.param( param );
733 
734     // If multapses are not allowed, we must keep track of which
735     // targets have been selected already.
736     std::vector< bool > is_selected( target_pos_node_id_pairs.size() );
737 
738     // Draw `number_of_connections_` targets
739     for ( long i = 0; i < ( long ) number_of_connections_; ++i )
740     {
741       index random_id = lottery( get_rank_synced_rng() );
742       if ( ( not allow_multapses_ ) and ( is_selected[ random_id ] ) )
743       {
744         --i;
745         continue;
746       }
747       index target_id = target_pos_node_id_pairs[ random_id ].second;
748       if ( ( not allow_autapses_ ) and ( source_id == target_id ) )
749       {
750         --i;
751         continue;
752       }
753 
754       is_selected[ random_id ] = true;
755 
756       target_pos_node_id_pairs[ random_id ].first.get_vector( target_pos_vector );
757 
758       std::vector< double > rng_weight_vec;
759       std::vector< double > rng_delay_vec;
760       for ( size_t indx = 0; indx < weight_.size(); ++indx )
761       {
762         const auto tgt = kernel().node_manager.get_node_or_proxy( target_pos_node_id_pairs[ indx ].second );
763         rng_weight_vec.push_back( weight_[ indx ]->value( grng, source_pos_vector, target_pos_vector, target, tgt ) );
764         rng_delay_vec.push_back( delay_[ indx ]->value( grng, source_pos_vector, target_pos_vector, target, tgt ) );
765       }
766 
767       // We bail out for non-local neurons only now after all possible
768       // random numbers haven been drawn. Bailing out any earlier may lead
769       // to desynchronized global rngs.
770       if ( not kernel().node_manager.is_local_node_id( target_id ) )
771       {
772         continue;
773       }
774 
775       Node* target_ptr = kernel().node_manager.get_node_or_proxy( target_id );
776       thread target_thread = target_ptr->get_thread();
777 
778       for ( size_t indx = 0; indx < synapse_model_.size(); ++indx )
779       {
780         kernel().connection_manager.connect( source_id,
781           target_ptr,
782           target_thread,
783           synapse_model_[ indx ],
784           param_dicts_[ indx ][ target_thread ],
785           rng_delay_vec[ indx ],
786           rng_weight_vec[ indx ] );
787       }
788     }
789   }
790 }
791 
792 } // namespace nest
793 
794 #endif
795