1 
2 /******************************************************************************
3  * Copyright (c) 2011, Duane Merrill.  All rights reserved.
4  * Copyright (c) 2011-2018, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *     * Redistributions of source code must retain the above copyright
9  *       notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above copyright
11  *       notice, this list of conditions and the following disclaimer in the
12  *       documentation and/or other materials provided with the distribution.
13  *     * Neither the name of the NVIDIA CORPORATION nor the
14  *       names of its contributors may be used to endorse or promote products
15  *       derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  ******************************************************************************/
29 
30 /**
31  * \file
32  * cub::DeviceRunLengthEncode provides device-wide, parallel operations for computing a run-length encoding across a sequence of data items residing within device-accessible memory.
33  */
34 
35 #pragma once
36 
37 #include <stdio.h>
38 #include <iterator>
39 
40 #include "dispatch/dispatch_rle.cuh"
41 #include "dispatch/dispatch_reduce_by_key.cuh"
42 #include "../util_namespace.cuh"
43 
44 /// Optional outer namespace(s)
45 CUB_NS_PREFIX
46 
47 /// CUB namespace
48 namespace cub {
49 
50 
51 /**
52  * \brief DeviceRunLengthEncode provides device-wide, parallel operations for demarcating "runs" of same-valued items within a sequence residing within device-accessible memory. ![](run_length_encode_logo.png)
53  * \ingroup SingleModule
54  *
55  * \par Overview
56  * A <a href="http://en.wikipedia.org/wiki/Run-length_encoding"><em>run-length encoding</em></a>
57  * computes a simple compressed representation of a sequence of input elements such that each
58  * maximal "run" of consecutive same-valued data items is encoded as a single data value along with a
59  * count of the elements in that run.
60  *
61  * \par Usage Considerations
62  * \cdp_class{DeviceRunLengthEncode}
63  *
64  * \par Performance
65  * \linear_performance{run-length encode}
66  *
67  * \par
68  * The following chart illustrates DeviceRunLengthEncode::RunLengthEncode performance across
69  * different CUDA architectures for \p int32 items.
70  * Segments have lengths uniformly sampled from [1,1000].
71  *
72  * \image html rle_int32_len_500.png
73  *
74  * \par
75  * \plots_below
76  *
77  */
78 struct DeviceRunLengthEncode
79 {
80 
81     /**
82      * \brief Computes a run-length encoding of the sequence \p d_in.
83      *
84      * \par
85      * - For the <em>i</em><sup>th</sup> run encountered, the first key of the run and its length are written to
86      *   <tt>d_unique_out[<em>i</em>]</tt> and <tt>d_counts_out[<em>i</em>]</tt>,
87      *   respectively.
88      * - The total number of runs encountered is written to \p d_num_runs_out.
89      * - The <tt>==</tt> equality operator is used to determine whether values are equivalent
90      * - \devicestorage
91      *
92      * \par Performance
93      * The following charts illustrate saturated encode performance across different
94      * CUDA architectures for \p int32 and \p int64 items, respectively.  Segments have
95      * lengths uniformly sampled from [1,1000].
96      *
97      * \image html rle_int32_len_500.png
98      * \image html rle_int64_len_500.png
99      *
100      * \par
101      * The following charts are similar, but with segment lengths uniformly sampled from [1,10]:
102      *
103      * \image html rle_int32_len_5.png
104      * \image html rle_int64_len_5.png
105      *
106      * \par Snippet
107      * The code snippet below illustrates the run-length encoding of a sequence of \p int values.
108      * \par
109      * \code
110      * #include <cub/cub.cuh>   // or equivalently <cub/device/device_run_length_encode.cuh>
111      *
112      * // Declare, allocate, and initialize device-accessible pointers for input and output
113      * int          num_items;          // e.g., 8
114      * int          *d_in;              // e.g., [0, 2, 2, 9, 5, 5, 5, 8]
115      * int          *d_unique_out;      // e.g., [ ,  ,  ,  ,  ,  ,  ,  ]
116      * int          *d_counts_out;      // e.g., [ ,  ,  ,  ,  ,  ,  ,  ]
117      * int          *d_num_runs_out;    // e.g., [ ]
118      * ...
119      *
120      * // Determine temporary device storage requirements
121      * void     *d_temp_storage = NULL;
122      * size_t   temp_storage_bytes = 0;
123      * cub::DeviceRunLengthEncode::Encode(d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items);
124      *
125      * // Allocate temporary storage
126      * cudaMalloc(&d_temp_storage, temp_storage_bytes);
127      *
128      * // Run encoding
129      * cub::DeviceRunLengthEncode::Encode(d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items);
130      *
131      * // d_unique_out      <-- [0, 2, 9, 5, 8]
132      * // d_counts_out      <-- [1, 2, 1, 3, 1]
133      * // d_num_runs_out    <-- [5]
134      *
135      * \endcode
136      *
137      * \tparam InputIteratorT           <b>[inferred]</b> Random-access input iterator type for reading input items \iterator
138      * \tparam UniqueOutputIteratorT    <b>[inferred]</b> Random-access output iterator type for writing unique output items \iterator
139      * \tparam LengthsOutputIteratorT   <b>[inferred]</b> Random-access output iterator type for writing output counts \iterator
140      * \tparam NumRunsOutputIteratorT   <b>[inferred]</b> Output iterator type for recording the number of runs encountered \iterator
141      */
142     template <
143         typename                    InputIteratorT,
144         typename                    UniqueOutputIteratorT,
145         typename                    LengthsOutputIteratorT,
146         typename                    NumRunsOutputIteratorT>
147     CUB_RUNTIME_FUNCTION __forceinline__
Encodecub::DeviceRunLengthEncode148     static cudaError_t Encode(
149         void*                       d_temp_storage,                ///< [in] %Device-accessible allocation of temporary storage.  When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
150         size_t                      &temp_storage_bytes,            ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
151         InputIteratorT              d_in,                           ///< [in] Pointer to the input sequence of keys
152         UniqueOutputIteratorT       d_unique_out,                   ///< [out] Pointer to the output sequence of unique keys (one key per run)
153         LengthsOutputIteratorT      d_counts_out,                   ///< [out] Pointer to the output sequence of run-lengths (one count per run)
154         NumRunsOutputIteratorT      d_num_runs_out,                     ///< [out] Pointer to total number of runs
155         int                         num_items,                      ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values)
156         cudaStream_t                stream             = 0,         ///< [in] <b>[optional]</b> CUDA stream to launch kernels within.  Default is stream<sub>0</sub>.
157         bool                        debug_synchronous  = false)     ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors.  May cause significant slowdown.  Default is \p false.
158     {
159         typedef int         OffsetT;                    // Signed integer type for global offsets
160         typedef NullType*   FlagIterator;               // FlagT iterator type (not used)
161         typedef NullType    SelectOp;                   // Selection op (not used)
162         typedef Equality    EqualityOp;                 // Default == operator
163         typedef cub::Sum    ReductionOp;                // Value reduction operator
164 
165         // The lengths output value type
166         typedef typename If<(Equals<typename std::iterator_traits<LengthsOutputIteratorT>::value_type, void>::VALUE),   // LengthT =  (if output iterator's value type is void) ?
167             OffsetT,                                                                                                    // ... then the OffsetT type,
168             typename std::iterator_traits<LengthsOutputIteratorT>::value_type>::Type LengthT;                           // ... else the output iterator's value type
169 
170         // Generator type for providing 1s values for run-length reduction
171         typedef ConstantInputIterator<LengthT, OffsetT> LengthsInputIteratorT;
172 
173         return DispatchReduceByKey<InputIteratorT, UniqueOutputIteratorT, LengthsInputIteratorT, LengthsOutputIteratorT, NumRunsOutputIteratorT, EqualityOp, ReductionOp, OffsetT>::Dispatch(
174             d_temp_storage,
175             temp_storage_bytes,
176             d_in,
177             d_unique_out,
178             LengthsInputIteratorT((LengthT) 1),
179             d_counts_out,
180             d_num_runs_out,
181             EqualityOp(),
182             ReductionOp(),
183             num_items,
184             stream,
185             debug_synchronous);
186     }
187 
188 
189     /**
190      * \brief Enumerates the starting offsets and lengths of all non-trivial runs (of length > 1) of same-valued keys in the sequence \p d_in.
191      *
192      * \par
193      * - For the <em>i</em><sup>th</sup> non-trivial run, the run's starting offset
194      *   and its length are written to <tt>d_offsets_out[<em>i</em>]</tt> and
195      *   <tt>d_lengths_out[<em>i</em>]</tt>, respectively.
196      * - The total number of runs encountered is written to \p d_num_runs_out.
197      * - The <tt>==</tt> equality operator is used to determine whether values are equivalent
198      * - \devicestorage
199      *
200      * \par Performance
201      *
202      * \par Snippet
203      * The code snippet below illustrates the identification of non-trivial runs within a sequence of \p int values.
204      * \par
205      * \code
206      * #include <cub/cub.cuh>   // or equivalently <cub/device/device_run_length_encode.cuh>
207      *
208      * // Declare, allocate, and initialize device-accessible pointers for input and output
209      * int          num_items;          // e.g., 8
210      * int          *d_in;              // e.g., [0, 2, 2, 9, 5, 5, 5, 8]
211      * int          *d_offsets_out;     // e.g., [ ,  ,  ,  ,  ,  ,  ,  ]
212      * int          *d_lengths_out;     // e.g., [ ,  ,  ,  ,  ,  ,  ,  ]
213      * int          *d_num_runs_out;    // e.g., [ ]
214      * ...
215      *
216      * // Determine temporary device storage requirements
217      * void     *d_temp_storage = NULL;
218      * size_t   temp_storage_bytes = 0;
219      * cub::DeviceRunLengthEncode::NonTrivialRuns(d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, num_items);
220      *
221      * // Allocate temporary storage
222      * cudaMalloc(&d_temp_storage, temp_storage_bytes);
223      *
224      * // Run encoding
225      * cub::DeviceRunLengthEncode::NonTrivialRuns(d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, num_items);
226      *
227      * // d_offsets_out         <-- [1, 4]
228      * // d_lengths_out         <-- [2, 3]
229      * // d_num_runs_out        <-- [2]
230      *
231      * \endcode
232      *
233      * \tparam InputIteratorT           <b>[inferred]</b> Random-access input iterator type for reading input items \iterator
234      * \tparam OffsetsOutputIteratorT   <b>[inferred]</b> Random-access output iterator type for writing run-offset values \iterator
235      * \tparam LengthsOutputIteratorT   <b>[inferred]</b> Random-access output iterator type for writing run-length values \iterator
236      * \tparam NumRunsOutputIteratorT   <b>[inferred]</b> Output iterator type for recording the number of runs encountered \iterator
237      */
238     template <
239         typename                InputIteratorT,
240         typename                OffsetsOutputIteratorT,
241         typename                LengthsOutputIteratorT,
242         typename                NumRunsOutputIteratorT>
243     CUB_RUNTIME_FUNCTION __forceinline__
NonTrivialRunscub::DeviceRunLengthEncode244     static cudaError_t NonTrivialRuns(
245         void*               d_temp_storage,                ///< [in] %Device-accessible allocation of temporary storage.  When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
246         size_t                  &temp_storage_bytes,            ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
247         InputIteratorT          d_in,                           ///< [in] Pointer to input sequence of data items
248         OffsetsOutputIteratorT  d_offsets_out,                  ///< [out] Pointer to output sequence of run-offsets (one offset per non-trivial run)
249         LengthsOutputIteratorT  d_lengths_out,                  ///< [out] Pointer to output sequence of run-lengths (one count per non-trivial run)
250         NumRunsOutputIteratorT  d_num_runs_out,                 ///< [out] Pointer to total number of runs (i.e., length of \p d_offsets_out)
251         int                     num_items,                      ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values)
252         cudaStream_t            stream             = 0,         ///< [in] <b>[optional]</b> CUDA stream to launch kernels within.  Default is stream<sub>0</sub>.
253         bool                    debug_synchronous  = false)     ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors.  May cause significant slowdown.  Default is \p false.
254     {
255         typedef int         OffsetT;                    // Signed integer type for global offsets
256         typedef Equality    EqualityOp;                 // Default == operator
257 
258         return DeviceRleDispatch<InputIteratorT, OffsetsOutputIteratorT, LengthsOutputIteratorT, NumRunsOutputIteratorT, EqualityOp, OffsetT>::Dispatch(
259             d_temp_storage,
260             temp_storage_bytes,
261             d_in,
262             d_offsets_out,
263             d_lengths_out,
264             d_num_runs_out,
265             EqualityOp(),
266             num_items,
267             stream,
268             debug_synchronous);
269     }
270 
271 
272 };
273 
274 
275 }               // CUB namespace
276 CUB_NS_POSTFIX  // Optional outer namespace(s)
277 
278 
279