1 /******************************************************************************
2  * Copyright (c) 2011-2018, NVIDIA CORPORATION.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *     * Redistributions of source code must retain the above copyright
7  *       notice, this list of conditions and the following disclaimer.
8  *     * Redistributions in binary form must reproduce the above copyright
9  *       notice, this list of conditions and the following disclaimer in the
10  *       documentation and/or other materials provided with the distribution.
11  *     * Neither the name of the NVIDIA CORPORATION nor the
12  *       names of its contributors may be used to endorse or promote products
13  *       derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  ******************************************************************************/
27 
28 #include <test/test_util.h>
29 
30 namespace histogram_gmem_atomics
31 {
32     // Decode float4 pixel into bins
33     template <int NUM_BINS, int ACTIVE_CHANNELS>
DecodePixel(float4 pixel,unsigned int (& bins)[ACTIVE_CHANNELS])34     __device__ __forceinline__ void DecodePixel(float4 pixel, unsigned int (&bins)[ACTIVE_CHANNELS])
35     {
36         float* samples = reinterpret_cast<float*>(&pixel);
37 
38         #pragma unroll
39         for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL)
40             bins[CHANNEL] = (unsigned int) (samples[CHANNEL] * float(NUM_BINS));
41     }
42 
43     // Decode uchar4 pixel into bins
44     template <int NUM_BINS, int ACTIVE_CHANNELS>
DecodePixel(uchar4 pixel,unsigned int (& bins)[ACTIVE_CHANNELS])45     __device__ __forceinline__ void DecodePixel(uchar4 pixel, unsigned int (&bins)[ACTIVE_CHANNELS])
46     {
47         unsigned char* samples = reinterpret_cast<unsigned char*>(&pixel);
48 
49         #pragma unroll
50         for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL)
51             bins[CHANNEL] = (unsigned int) (samples[CHANNEL]);
52     }
53 
54     // Decode uchar1 pixel into bins
55     template <int NUM_BINS, int ACTIVE_CHANNELS>
DecodePixel(uchar1 pixel,unsigned int (& bins)[ACTIVE_CHANNELS])56     __device__ __forceinline__ void DecodePixel(uchar1 pixel, unsigned int (&bins)[ACTIVE_CHANNELS])
57     {
58         bins[0] = (unsigned int) pixel.x;
59     }
60 
61     // First-pass histogram kernel (binning into privatized counters)
62     template <
63         int         NUM_PARTS,
64         int         ACTIVE_CHANNELS,
65         int         NUM_BINS,
66         typename    PixelType>
histogram_gmem_atomics(const PixelType * in,int width,int height,unsigned int * out)67     __global__ void histogram_gmem_atomics(
68         const PixelType *in,
69         int width,
70         int height,
71         unsigned int *out)
72     {
73         // global position and size
74         int x = blockIdx.x * blockDim.x + threadIdx.x;
75         int y = blockIdx.y * blockDim.y + threadIdx.y;
76         int nx = blockDim.x * gridDim.x;
77         int ny = blockDim.y * gridDim.y;
78 
79         // threads in workgroup
80         int t = threadIdx.x + threadIdx.y * blockDim.x; // thread index in workgroup, linear in 0..nt-1
81         int nt = blockDim.x * blockDim.y; // total threads in workgroup
82 
83         // group index in 0..ngroups-1
84         int g = blockIdx.x + blockIdx.y * gridDim.x;
85 
86         // initialize smem
87         unsigned int *gmem = out + g * NUM_PARTS;
88         for (int i = t; i < ACTIVE_CHANNELS * NUM_BINS; i += nt)
89             gmem[i] = 0;
90         __syncthreads();
91 
92         // process pixels (updates our group's partial histogram in gmem)
93         for (int col = x; col < width; col += nx)
94         {
95             for (int row = y; row < height; row += ny)
96             {
97                 PixelType pixel = in[row * width + col];
98 
99                 unsigned int bins[ACTIVE_CHANNELS];
100                 DecodePixel<NUM_BINS>(pixel, bins);
101 
102                 #pragma unroll
103                 for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL)
104                     atomicAdd(&gmem[(NUM_BINS * CHANNEL) + bins[CHANNEL]], 1);
105             }
106         }
107     }
108 
109     // Second pass histogram kernel (accumulation)
110     template <
111         int         NUM_PARTS,
112         int         ACTIVE_CHANNELS,
113         int         NUM_BINS>
histogram_gmem_accum(const unsigned int * in,int n,unsigned int * out)114     __global__ void histogram_gmem_accum(
115         const unsigned int *in,
116         int n,
117         unsigned int *out)
118     {
119         int i = blockIdx.x * blockDim.x + threadIdx.x;
120         if (i > ACTIVE_CHANNELS * NUM_BINS)
121             return; // out of range
122 
123         unsigned int total = 0;
124         for (int j = 0; j < n; j++)
125             total += in[i + NUM_PARTS * j];
126 
127         out[i] = total;
128     }
129 
130 
131 }   // namespace histogram_gmem_atomics
132 
133 
134 template <
135     int         ACTIVE_CHANNELS,
136     int         NUM_BINS,
137     typename    PixelType>
run_gmem_atomics(PixelType * d_image,int width,int height,unsigned int * d_hist,bool warmup)138 double run_gmem_atomics(
139     PixelType *d_image,
140     int width,
141     int height,
142     unsigned int *d_hist,
143     bool warmup)
144 {
145     enum
146     {
147         NUM_PARTS = 1024
148     };
149 
150     cudaDeviceProp props;
151     cudaGetDeviceProperties(&props, 0);
152 
153     dim3 block(32, 4);
154     dim3 grid(16, 16);
155     int total_blocks = grid.x * grid.y;
156 
157     // allocate partial histogram
158     unsigned int *d_part_hist;
159     cudaMalloc(&d_part_hist, total_blocks * NUM_PARTS * sizeof(unsigned int));
160 
161     dim3 block2(128);
162     dim3 grid2((3 * NUM_BINS + block.x - 1) / block.x);
163 
164     GpuTimer gpu_timer;
165     gpu_timer.Start();
166 
167     histogram_gmem_atomics::histogram_gmem_atomics<NUM_PARTS, ACTIVE_CHANNELS, NUM_BINS><<<grid, block>>>(
168         d_image,
169         width,
170         height,
171         d_part_hist);
172 
173     histogram_gmem_atomics::histogram_gmem_accum<NUM_PARTS, ACTIVE_CHANNELS, NUM_BINS><<<grid2, block2>>>(
174         d_part_hist,
175         total_blocks,
176         d_hist);
177 
178     gpu_timer.Stop();
179     float elapsed_millis = gpu_timer.ElapsedMillis();
180 
181     cudaFree(d_part_hist);
182 
183     return elapsed_millis;
184 }
185 
186