OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
block_histogram.cuh
Go to the documentation of this file.
1 /******************************************************************************
2  * Copyright (c) 2011, Duane Merrill. All rights reserved.
3  * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  * * Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * * Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  * * Neither the name of the NVIDIA CORPORATION nor the
13  * names of its contributors may be used to endorse or promote products
14  * derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  ******************************************************************************/
28 
34 #pragma once
35 
38 #include "../util_ptx.cuh"
39 #include "../util_arch.cuh"
40 #include "../util_namespace.cuh"
41 
43 CUB_NS_PREFIX
44 
46 namespace cub {
47 
48 
49 /******************************************************************************
50  * Algorithmic variants
51  ******************************************************************************/
52 
57 {
58 
69 
70 
82 };
83 
84 
85 
86 /******************************************************************************
87  * Block histogram
88  ******************************************************************************/
89 
90 
148 template <
149  typename T,
150  int BLOCK_DIM_X,
151  int ITEMS_PER_THREAD,
152  int BINS,
154  int BLOCK_DIM_Y = 1,
155  int BLOCK_DIM_Z = 1,
156  int PTX_ARCH = CUB_PTX_ARCH>
158 {
159 private:
160 
161  /******************************************************************************
162  * Constants and type definitions
163  ******************************************************************************/
164 
166  enum
167  {
169  BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
170  };
171 
179  ((ALGORITHM == BLOCK_HISTO_ATOMIC) && (PTX_ARCH < 120)) ?
181  ALGORITHM;
182 
184  typedef typename If<(SAFE_ALGORITHM == BLOCK_HISTO_SORT),
187 
190 
191 
192  /******************************************************************************
193  * Thread fields
194  ******************************************************************************/
195 
198 
200  unsigned int linear_tid;
201 
202 
203  /******************************************************************************
204  * Utility methods
205  ******************************************************************************/
206 
208  __device__ __forceinline__ _TempStorage& PrivateStorage()
209  {
210  __shared__ _TempStorage private_storage;
211  return private_storage;
212  }
213 
214 
215 public:
216 
218  struct TempStorage : Uninitialized<_TempStorage> {};
219 
220 
221  /******************************************************************/
225 
229  __device__ __forceinline__ BlockHistogram()
230  :
232  linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
233  {}
234 
235 
239  __device__ __forceinline__ BlockHistogram(
241  :
242  temp_storage(temp_storage.Alias()),
243  linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
244  {}
245 
246 
248  /******************************************************************/
252 
253 
290  template <typename CounterT >
291  __device__ __forceinline__ void InitHistogram(CounterT histogram[BINS])
292  {
293  // Initialize histogram bin counts to zeros
294  int histo_offset = 0;
295 
296  #pragma unroll
297  for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS)
298  {
299  histogram[histo_offset + linear_tid] = 0;
300  }
301  // Finish up with guarded initialization if necessary
302  if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS))
303  {
304  histogram[histo_offset + linear_tid] = 0;
305  }
306  }
307 
308 
345  template <
346  typename CounterT >
347  __device__ __forceinline__ void Histogram(
348  T (&items)[ITEMS_PER_THREAD],
349  CounterT histogram[BINS])
350  {
351  // Initialize histogram bin counts to zeros
352  InitHistogram(histogram);
353 
354  CTA_SYNC();
355 
356  // Composite the histogram
357  InternalBlockHistogram(temp_storage).Composite(items, histogram);
358  }
359 
360 
361 
402  template <
403  typename CounterT >
404  __device__ __forceinline__ void Composite(
405  T (&items)[ITEMS_PER_THREAD],
406  CounterT histogram[BINS])
407  {
408  InternalBlockHistogram(temp_storage).Composite(items, histogram);
409  }
410 
411 };
412 
413 } // CUB namespace
414 CUB_NS_POSTFIX // Optional outer namespace(s)
415 
static const BlockHistogramAlgorithm SAFE_ALGORITHM
The BlockHistogram class provides collective methods for constructing block-wide histograms from data...
_TempStorage & temp_storage
Shared storage reference.
__device__ __forceinline__ void Histogram(T(&items)[ITEMS_PER_THREAD], CounterT histogram[BINS])
Constructs a block-wide histogram in shared/device-accessible memory. Each thread contributes an arra...
\smemstorage{BlockHistogram}
Optional outer namespace(s)
The BlockHistogramSort class provides sorting-based methods for constructing block-wide histograms fr...
#define CUB_PTX_ARCH
CUB_PTX_ARCH reflects the PTX version targeted by the active compiler pass (or zero during the host p...
Definition: util_arch.cuh:53
__device__ __forceinline__ void Composite(T(&items)[ITEMS_PER_THREAD], CounterT histogram[BINS])
Updates an existing block-wide histogram in shared/device-accessible memory. Each thread composites a...
CTA_SYNC()
Definition: util_ptx.cuh:255
__device__ __forceinline__ BlockHistogram(TempStorage &temp_storage)
Collective constructor using the specified memory allocation as temporary storage.
BlockHistogramAlgorithm
BlockHistogramAlgorithm enumerates alternative algorithms for the parallel construction of block-wide...
BlockRadixRank provides operations for ranking unsigned integer types within a CUDA thread block.
__device__ __forceinline__ void InitHistogram(CounterT histogram[BINS])
Initialize the shared histogram counters to zero.
If<(SAFE_ALGORITHM==BLOCK_HISTO_SORT), BlockHistogramSort< T, BLOCK_DIM_X, ITEMS_PER_THREAD, BINS, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH >, BlockHistogramAtomic< BINS > >::Type InternalBlockHistogram
Internal specialization.
__device__ __forceinline__ int RowMajorTid(int block_dim_x, int block_dim_y, int block_dim_z)
Returns the row-major linear thread identifier for a multidimensional thread block.
Definition: util_ptx.cuh:409
InternalBlockHistogram::TempStorage _TempStorage
Shared memory storage layout type for BlockHistogram.
A storage-backing wrapper that allows types with non-trivial constructors to be aliased in unions.
Definition: util_type.cuh:634
__device__ __forceinline__ _TempStorage & PrivateStorage()
Internal storage allocator.
Alias wrapper allowing storage to be unioned.
The thread block size in threads.
Type selection (IF ? ThenType : ElseType)
Definition: util_type.cuh:72
__device__ __forceinline__ BlockHistogram()
Collective constructor using a private static allocation of shared memory as temporary storage.
The BlockHistogramAtomic class provides atomic-based methods for constructing block-wide histograms f...
unsigned int linear_tid
Linear thread-id.