1 /******************************************************************************
2 * Copyright (c) 2011, Duane Merrill. All rights reserved.
3 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the NVIDIA CORPORATION nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 ******************************************************************************/
28
29 /**
30 * \file
31 * cub::GridQueue is a descriptor utility for dynamic queue management.
32 */
33
34 #pragma once
35
36 #include "../util_namespace.cuh"
37 #include "../util_debug.cuh"
38
39 /// Optional outer namespace(s)
40 CUB_NS_PREFIX
41
42 /// CUB namespace
43 namespace cub {
44
45
46 /**
47 * \addtogroup GridModule
48 * @{
49 */
50
51
52 /**
53 * \brief GridQueue is a descriptor utility for dynamic queue management.
54 *
55 * \par Overview
56 * GridQueue descriptors provides abstractions for "filling" or
57 * "draining" globally-shared vectors.
58 *
59 * \par
60 * A "filling" GridQueue works by atomically-adding to a zero-initialized counter,
61 * returning a unique offset for the calling thread to write its items.
62 * The GridQueue maintains the total "fill-size". The fill counter must be reset
63 * using GridQueue::ResetFill by the host or kernel instance prior to the kernel instance that
64 * will be filling.
65 *
66 * \par
67 * Similarly, a "draining" GridQueue works by works by atomically-incrementing a
68 * zero-initialized counter, returning a unique offset for the calling thread to
69 * read its items. Threads can safely drain until the array's logical fill-size is
70 * exceeded. The drain counter must be reset using GridQueue::ResetDrain or
71 * GridQueue::FillAndResetDrain by the host or kernel instance prior to the kernel instance that
72 * will be filling. (For dynamic work distribution of existing data, the corresponding fill-size
73 * is simply the number of elements in the array.)
74 *
75 * \par
76 * Iterative work management can be implemented simply with a pair of flip-flopping
77 * work buffers, each with an associated set of fill and drain GridQueue descriptors.
78 *
79 * \tparam OffsetT Signed integer type for global offsets
80 */
81 template <typename OffsetT>
82 class GridQueue
83 {
84 private:
85
86 /// Counter indices
87 enum
88 {
89 FILL = 0,
90 DRAIN = 1,
91 };
92
93 /// Pair of counters
94 OffsetT *d_counters;
95
96 public:
97
98 /// Returns the device allocation size in bytes needed to construct a GridQueue instance
99 __host__ __device__ __forceinline__
AllocationSize()100 static size_t AllocationSize()
101 {
102 return sizeof(OffsetT) * 2;
103 }
104
105
106 /// Constructs an invalid GridQueue descriptor
GridQueue()107 __host__ __device__ __forceinline__ GridQueue()
108 :
109 d_counters(NULL)
110 {}
111
112
113 /// Constructs a GridQueue descriptor around the device storage allocation
GridQueue(void * d_storage)114 __host__ __device__ __forceinline__ GridQueue(
115 void *d_storage) ///< Device allocation to back the GridQueue. Must be at least as big as <tt>AllocationSize()</tt>.
116 :
117 d_counters((OffsetT*) d_storage)
118 {}
119
120
121 /// This operation sets the fill-size and resets the drain counter, preparing the GridQueue for draining in the next kernel instance. To be called by the host or by a kernel prior to that which will be draining.
FillAndResetDrain(OffsetT fill_size,cudaStream_t stream=0)122 __host__ __device__ __forceinline__ cudaError_t FillAndResetDrain(
123 OffsetT fill_size,
124 cudaStream_t stream = 0)
125 {
126 #if (CUB_PTX_ARCH > 0)
127 (void)stream;
128 d_counters[FILL] = fill_size;
129 d_counters[DRAIN] = 0;
130 return cudaSuccess;
131 #else
132 OffsetT counters[2];
133 counters[FILL] = fill_size;
134 counters[DRAIN] = 0;
135 return CubDebug(cudaMemcpyAsync(d_counters, counters, sizeof(OffsetT) * 2, cudaMemcpyHostToDevice, stream));
136 #endif
137 }
138
139
140 /// This operation resets the drain so that it may advance to meet the existing fill-size. To be called by the host or by a kernel prior to that which will be draining.
ResetDrain(cudaStream_t stream=0)141 __host__ __device__ __forceinline__ cudaError_t ResetDrain(cudaStream_t stream = 0)
142 {
143 #if (CUB_PTX_ARCH > 0)
144 (void)stream;
145 d_counters[DRAIN] = 0;
146 return cudaSuccess;
147 #else
148 return CubDebug(cudaMemsetAsync(d_counters + DRAIN, 0, sizeof(OffsetT), stream));
149 #endif
150 }
151
152
153 /// This operation resets the fill counter. To be called by the host or by a kernel prior to that which will be filling.
ResetFill(cudaStream_t stream=0)154 __host__ __device__ __forceinline__ cudaError_t ResetFill(cudaStream_t stream = 0)
155 {
156 #if (CUB_PTX_ARCH > 0)
157 (void)stream;
158 d_counters[FILL] = 0;
159 return cudaSuccess;
160 #else
161 return CubDebug(cudaMemsetAsync(d_counters + FILL, 0, sizeof(OffsetT), stream));
162 #endif
163 }
164
165
166 /// Returns the fill-size established by the parent or by the previous kernel.
FillSize(OffsetT & fill_size,cudaStream_t stream=0)167 __host__ __device__ __forceinline__ cudaError_t FillSize(
168 OffsetT &fill_size,
169 cudaStream_t stream = 0)
170 {
171 #if (CUB_PTX_ARCH > 0)
172 (void)stream;
173 fill_size = d_counters[FILL];
174 return cudaSuccess;
175 #else
176 return CubDebug(cudaMemcpyAsync(&fill_size, d_counters + FILL, sizeof(OffsetT), cudaMemcpyDeviceToHost, stream));
177 #endif
178 }
179
180
181 /// Drain \p num_items from the queue. Returns offset from which to read items. To be called from CUDA kernel.
Drain(OffsetT num_items)182 __device__ __forceinline__ OffsetT Drain(OffsetT num_items)
183 {
184 return atomicAdd(d_counters + DRAIN, num_items);
185 }
186
187
188 /// Fill \p num_items into the queue. Returns offset from which to write items. To be called from CUDA kernel.
Fill(OffsetT num_items)189 __device__ __forceinline__ OffsetT Fill(OffsetT num_items)
190 {
191 return atomicAdd(d_counters + FILL, num_items);
192 }
193 };
194
195
196 #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
197
198
199 /**
200 * Reset grid queue (call with 1 block of 1 thread)
201 */
202 template <typename OffsetT>
FillAndResetDrainKernel(GridQueue<OffsetT> grid_queue,OffsetT num_items)203 __global__ void FillAndResetDrainKernel(
204 GridQueue<OffsetT> grid_queue,
205 OffsetT num_items)
206 {
207 grid_queue.FillAndResetDrain(num_items);
208 }
209
210
211
212 #endif // DOXYGEN_SHOULD_SKIP_THIS
213
214
215 /** @} */ // end group GridModule
216
217 } // CUB namespace
218 CUB_NS_POSTFIX // Optional outer namespace(s)
219
220
221