OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
block_reduce_raking.cuh
Go to the documentation of this file.
1 /******************************************************************************
2  * Copyright (c) 2011, Duane Merrill. All rights reserved.
3  * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  * * Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * * Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  * * Neither the name of the NVIDIA CORPORATION nor the
13  * names of its contributors may be used to endorse or promote products
14  * derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  ******************************************************************************/
28 
34 #pragma once
35 
36 #include "../../block/block_raking_layout.cuh"
37 #include "../../warp/warp_reduce.cuh"
38 #include "../../thread/thread_reduce.cuh"
39 #include "../../util_ptx.cuh"
40 #include "../../util_namespace.cuh"
41 
43 CUB_NS_PREFIX
44 
46 namespace cub {
47 
48 
62 template <
63  typename T,
64  int BLOCK_DIM_X,
65  int BLOCK_DIM_Y,
66  int BLOCK_DIM_Z,
67  int PTX_ARCH>
69 {
71  enum
72  {
74  BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
75  };
76 
79 
82 
84  enum
85  {
88 
91 
94 
97 
100 
101  };
102 
103 
106  {
109  };
110 
111 
113  struct TempStorage : Uninitialized<_TempStorage> {};
114 
115 
116  // Thread fields
117  _TempStorage &temp_storage;
118  unsigned int linear_tid;
119 
120 
122  __device__ __forceinline__ BlockReduceRaking(
123  TempStorage &temp_storage)
124  :
125  temp_storage(temp_storage.Alias()),
126  linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
127  {}
128 
129 
130  template <bool IS_FULL_TILE, typename ReductionOp, int ITERATION>
131  __device__ __forceinline__ T RakingReduction(
132  ReductionOp reduction_op,
133  T *raking_segment,
134  T partial,
135  int num_valid,
136  Int2Type<ITERATION> /*iteration*/)
137  {
138  // Update partial if addend is in range
139  if ((IS_FULL_TILE && RAKING_UNGUARDED) || ((linear_tid * SEGMENT_LENGTH) + ITERATION < num_valid))
140  {
141  T addend = raking_segment[ITERATION];
142  partial = reduction_op(partial, addend);
143  }
144  return RakingReduction<IS_FULL_TILE>(reduction_op, raking_segment, partial, num_valid, Int2Type<ITERATION + 1>());
145  }
146 
147  template <bool IS_FULL_TILE, typename ReductionOp>
148  __device__ __forceinline__ T RakingReduction(
149  ReductionOp /*reduction_op*/,
150  T * /*raking_segment*/,
151  T partial,
152  int /*num_valid*/,
153  Int2Type<SEGMENT_LENGTH> /*iteration*/)
154  {
155  return partial;
156  }
157 
158 
159 
161  template <
162  bool IS_FULL_TILE,
163  typename ReductionOp>
164  __device__ __forceinline__ T Reduce(
165  T partial,
166  int num_valid,
167  ReductionOp reduction_op)
168  {
169  if (WARP_SYNCHRONOUS)
170  {
171  // Short-circuit directly to warp synchronous reduction (unguarded if active threads is a power-of-two)
172  partial = WarpReduce(temp_storage.warp_storage).template Reduce<IS_FULL_TILE>(
173  partial,
174  num_valid,
175  reduction_op);
176  }
177  else
178  {
179  // Place partial into shared memory grid.
180  *BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid) = partial;
181 
182  CTA_SYNC();
183 
184  // Reduce parallelism to one warp
185  if (linear_tid < RAKING_THREADS)
186  {
187  // Raking reduction in grid
188  T *raking_segment = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid);
189  partial = raking_segment[0];
190 
191  partial = RakingReduction<IS_FULL_TILE>(reduction_op, raking_segment, partial, num_valid, Int2Type<1>());
192 
193  int valid_raking_threads = (IS_FULL_TILE) ?
195  (num_valid + SEGMENT_LENGTH - 1) / SEGMENT_LENGTH;
196 
197  partial = WarpReduce(temp_storage.warp_storage).template Reduce<IS_FULL_TILE && RAKING_UNGUARDED>(
198  partial,
199  valid_raking_threads,
200  reduction_op);
201 
202  }
203  }
204 
205  return partial;
206  }
207 
208 
210  template <bool IS_FULL_TILE>
211  __device__ __forceinline__ T Sum(
212  T partial,
213  int num_valid)
214  {
216 
217  return Reduce<IS_FULL_TILE>(partial, num_valid, reduction_op);
218  }
219 
220 
221 
222 };
223 
224 } // CUB namespace
225 CUB_NS_POSTFIX // Optional outer namespace(s)
226 
Alias wrapper allowing storage to be unioned.
Optional outer namespace(s)
Never use a raking thread that will have no valid data (e.g., when BLOCK_THREADS is 62 and SEGMENT_LE...
__device__ __forceinline__ T Reduce(T partial, int num_valid, ReductionOp reduction_op)
Computes a thread block-wide reduction using the specified reduction operator. The first num_valid th...
Number of raking elements per warp-synchronous raking thread (rounded up)
__device__ __forceinline__ BlockReduceRaking(TempStorage &temp_storage)
Constructor.
__device__ __forceinline__ T RakingReduction(ReductionOp, T *, T partial, int, Int2Type< SEGMENT_LENGTH >)
CTA_SYNC()
Definition: util_ptx.cuh:255
BlockReduceRaking provides raking-based methods of parallel reduction across a CUDA thread block....
The thread block size in threads.
Whether or not we need bounds checking during raking (the number of reduction elements is not a multi...
__device__ __forceinline__ T RakingReduction(ReductionOp reduction_op, T *raking_segment, T partial, int num_valid, Int2Type< ITERATION >)
static __device__ __forceinline__ T * PlacementPtr(TempStorage &temp_storage, unsigned int linear_tid)
Returns the location for the calling thread to place data into the grid.
BlockRakingLayout< T, BLOCK_THREADS, PTX_ARCH > BlockRakingLayout
Layout type for padded thread block raking grid.
Statically determine if N is a power-of-two.
Definition: util_type.cuh:155
static __device__ __forceinline__ T * RakingPtr(TempStorage &temp_storage, unsigned int linear_tid)
Returns the location for the calling thread to begin sequential raking.
If<(PTX_ARCH >=300) &&(IS_POW_OF_TWO), WarpReduceShfl< T, LOGICAL_WARP_THREADS, PTX_ARCH >, WarpReduceSmem< T, LOGICAL_WARP_THREADS, PTX_ARCH > >::Type InternalWarpReduce
Internal specialization. Use SHFL-based reduction if (architecture is >= SM30) and (LOGICAL_WARP_THRE...
Cooperative work can be entirely warp synchronous.
OutputIteratorT OffsetT GridEvenShare< OffsetT > ReductionOpT reduction_op
< [in] Binary reduction functor
Allows for the treatment of an integral constant as a type at compile-time (e.g., to achieve static c...
Definition: util_type.cuh:275
__device__ __forceinline__ int RowMajorTid(int block_dim_x, int block_dim_y, int block_dim_z)
Returns the row-major linear thread identifier for a multidimensional thread block.
Definition: util_ptx.cuh:409
Alias wrapper allowing storage to be unioned.
A storage-backing wrapper that allows types with non-trivial constructors to be aliased in unions.
Definition: util_type.cuh:634
Whether or not warp-synchronous reduction should be unguarded (i.e., the warp-reduction elements is a...
BlockRakingLayout::TempStorage raking_grid
Padded thread block raking grid.
Whether or not accesses into smem are unguarded.
\smemstorage{WarpReduce}
WarpReduce< T, BlockRakingLayout::RAKING_THREADS, PTX_ARCH >::InternalWarpReduce WarpReduce
WarpReduce utility type.
__device__ __forceinline__ T Sum(T partial, int num_valid)
Computes a thread block-wide reduction using addition (+) as the reduction operator....
Shared memory storage layout type.
WarpReduce::TempStorage warp_storage
Storage for warp-synchronous reduction.
Default sum functor.
Number of raking elements per warp synchronous raking thread.