1 /*
2 * event_delivery_manager_impl.h
3 *
4 * This file is part of NEST.
5 *
6 * Copyright (C) 2004 The NEST Initiative
7 *
8 * NEST is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * NEST is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with NEST. If not, see <http://www.gnu.org/licenses/>.
20 *
21 */
22
23 #ifndef EVENT_DELIVERY_MANAGER_IMPL_H
24 #define EVENT_DELIVERY_MANAGER_IMPL_H
25
26 #include "event_delivery_manager.h"
27
28 // Includes from nestkernel:
29 #include "connection_manager_impl.h"
30 #include "kernel_manager.h"
31
32 namespace nest
33 {
34
35 template < class EventT >
36 inline void
send_local_(Node & source,EventT & e,const long lag)37 EventDeliveryManager::send_local_( Node& source, EventT& e, const long lag )
38 {
39 assert( not source.has_proxies() );
40 e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( lag + 1 ) );
41 e.set_sender( source );
42 const thread t = source.get_thread();
43 const index ldid = source.get_local_device_id();
44 kernel().connection_manager.send_from_device( t, ldid, e );
45 }
46
47 inline void
send_local_(Node & source,SecondaryEvent & e,const long)48 EventDeliveryManager::send_local_( Node& source, SecondaryEvent& e, const long )
49 {
50 assert( not source.has_proxies() );
51 e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( 1 ) );
52 e.set_sender( source );
53 const thread t = source.get_thread();
54 const index ldid = source.get_local_device_id();
55 kernel().connection_manager.send_from_device( t, ldid, e );
56 }
57
58 template < class EventT >
59 inline void
send(Node & source,EventT & e,const long lag)60 EventDeliveryManager::send( Node& source, EventT& e, const long lag )
61 {
62 send_local_( source, e, lag );
63 }
64
65 template <>
66 inline void
67 EventDeliveryManager::send< SpikeEvent >( Node& source, SpikeEvent& e, const long lag )
68 {
69 const thread tid = source.get_thread();
70 const index source_node_id = source.get_node_id();
71 e.set_sender_node_id( source_node_id );
72 if ( source.has_proxies() )
73 {
74 local_spike_counter_[ tid ] += e.get_multiplicity();
75
76 e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( lag + 1 ) );
77 e.set_sender( source );
78
79 if ( source.is_off_grid() )
80 {
81 send_off_grid_remote( tid, e, lag );
82 }
83 else
84 {
85 send_remote( tid, e, lag );
86 }
87 kernel().connection_manager.send_to_devices( tid, source_node_id, e );
88 }
89 else
90 {
91 send_local_( source, e, lag );
92 }
93 }
94
95 template <>
96 inline void
97 EventDeliveryManager::send< DSSpikeEvent >( Node& source, DSSpikeEvent& e, const long lag )
98 {
99 e.set_sender_node_id( source.get_node_id() );
100 send_local_( source, e, lag );
101 }
102
103 inline void
send_remote(thread tid,SpikeEvent & e,const long lag)104 EventDeliveryManager::send_remote( thread tid, SpikeEvent& e, const long lag )
105 {
106 // Put the spike in a buffer for the remote machines
107 const index lid = kernel().vp_manager.node_id_to_lid( e.get_sender().get_node_id() );
108 const std::vector< Target >& targets = kernel().connection_manager.get_remote_targets_of_local_node( tid, lid );
109
110 for ( std::vector< Target >::const_iterator it = targets.begin(); it != targets.end(); ++it )
111 {
112 const thread assigned_tid = ( *it ).get_rank() / kernel().vp_manager.get_num_assigned_ranks_per_thread();
113
114 // Unroll spike multiplicity as plastic synapses only handle individual spikes.
115 for ( int i = 0; i < e.get_multiplicity(); ++i )
116 {
117 spike_register_[ tid ][ assigned_tid ][ lag ].push_back( *it );
118 }
119 }
120 }
121
122 inline void
send_off_grid_remote(thread tid,SpikeEvent & e,const long lag)123 EventDeliveryManager::send_off_grid_remote( thread tid, SpikeEvent& e, const long lag )
124 {
125 // Put the spike in a buffer for the remote machines
126 const index lid = kernel().vp_manager.node_id_to_lid( e.get_sender().get_node_id() );
127 const std::vector< Target >& targets = kernel().connection_manager.get_remote_targets_of_local_node( tid, lid );
128
129 for ( std::vector< Target >::const_iterator it = targets.begin(); it != targets.end(); ++it )
130 {
131 const thread assigned_tid = ( *it ).get_rank() / kernel().vp_manager.get_num_assigned_ranks_per_thread();
132
133 // Unroll spike multiplicity as plastic synapses only handle individual spikes.
134 for ( int i = 0; i < e.get_multiplicity(); ++i )
135 {
136 off_grid_spike_register_[ tid ][ assigned_tid ][ lag ].push_back( OffGridTarget( *it, e.get_offset() ) );
137 }
138 }
139 }
140
141 inline void
send_secondary(Node & source,SecondaryEvent & e)142 EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e )
143 {
144 const thread tid = kernel().vp_manager.get_thread_id();
145 const index source_node_id = source.get_node_id();
146 const index lid = kernel().vp_manager.node_id_to_lid( source_node_id );
147
148 if ( source.has_proxies() )
149 {
150
151 // We need to consider every synapse type this event supports to
152 // make sure also labeled and connection created by CopyModel are
153 // considered.
154 const std::vector< synindex >& supported_syn_ids = e.get_supported_syn_ids();
155 for ( std::vector< synindex >::const_iterator cit = supported_syn_ids.begin(); cit != supported_syn_ids.end();
156 ++cit )
157 {
158 const std::vector< size_t >& positions =
159 kernel().connection_manager.get_secondary_send_buffer_positions( tid, lid, *cit );
160
161 for ( size_t i = 0; i < positions.size(); ++i )
162 {
163 std::vector< unsigned int >::iterator it = send_buffer_secondary_events_.begin() + positions[ i ];
164 e >> it;
165 }
166 }
167 kernel().connection_manager.send_to_devices( tid, source_node_id, e );
168 }
169 else
170 {
171 send_local_( source, e, 0 ); // need to pass lag (last argument), but not
172 // used in template specialization, so pass
173 // zero as dummy value
174 }
175 }
176
177 inline size_t
write_toggle()178 EventDeliveryManager::write_toggle() const
179 {
180 return kernel().simulation_manager.get_slice() % 2;
181 }
182
183
184 } // of namespace nest
185
186 #endif
187