OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
block_reduce_warp_reductions.cuh
Go to the documentation of this file.
1 /******************************************************************************
2  * Copyright (c) 2011, Duane Merrill. All rights reserved.
3  * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  * * Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * * Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  * * Neither the name of the NVIDIA CORPORATION nor the
13  * names of its contributors may be used to endorse or promote products
14  * derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  ******************************************************************************/
28 
34 #pragma once
35 
36 #include "../../warp/warp_reduce.cuh"
37 #include "../../util_ptx.cuh"
38 #include "../../util_arch.cuh"
39 #include "../../util_namespace.cuh"
40 
42 CUB_NS_PREFIX
43 
45 namespace cub {
46 
47 
51 template <
52  typename T,
53  int BLOCK_DIM_X,
54  int BLOCK_DIM_Y,
55  int BLOCK_DIM_Z,
56  int PTX_ARCH>
58 {
60  enum
61  {
63  BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
64 
66  WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH),
67 
70 
73 
76  };
77 
78 
81 
82 
84  struct _TempStorage
85  {
89  };
90 
92  struct TempStorage : Uninitialized<_TempStorage> {};
93 
94 
95  // Thread fields
96  _TempStorage &temp_storage;
97  int linear_tid;
98  int warp_id;
99  int lane_id;
100 
101 
103  __device__ __forceinline__ BlockReduceWarpReductions(
104  TempStorage &temp_storage)
105  :
106  temp_storage(temp_storage.Alias()),
107  linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)),
108  warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS),
109  lane_id(LaneId())
110  {}
111 
112 
113  template <bool FULL_TILE, typename ReductionOp, int SUCCESSOR_WARP>
114  __device__ __forceinline__ T ApplyWarpAggregates(
115  ReductionOp reduction_op,
116  T warp_aggregate,
117  int num_valid,
118  Int2Type<SUCCESSOR_WARP> /*successor_warp*/)
119  {
120  if (FULL_TILE || (SUCCESSOR_WARP * LOGICAL_WARP_SIZE < num_valid))
121  {
122  T addend = temp_storage.warp_aggregates[SUCCESSOR_WARP];
123  warp_aggregate = reduction_op(warp_aggregate, addend);
124  }
125  return ApplyWarpAggregates<FULL_TILE>(reduction_op, warp_aggregate, num_valid, Int2Type<SUCCESSOR_WARP + 1>());
126  }
127 
128  template <bool FULL_TILE, typename ReductionOp>
129  __device__ __forceinline__ T ApplyWarpAggregates(
130  ReductionOp /*reduction_op*/,
131  T warp_aggregate,
132  int /*num_valid*/,
133  Int2Type<WARPS> /*successor_warp*/)
134  {
135  return warp_aggregate;
136  }
137 
138 
140  template <
141  bool FULL_TILE,
142  typename ReductionOp>
143  __device__ __forceinline__ T ApplyWarpAggregates(
144  ReductionOp reduction_op,
145  T warp_aggregate,
146  int num_valid)
147  {
148  // Share lane aggregates
149  if (lane_id == 0)
150  {
151  temp_storage.warp_aggregates[warp_id] = warp_aggregate;
152  }
153 
154  CTA_SYNC();
155 
156  // Update total aggregate in warp 0, lane 0
157  if (linear_tid == 0)
158  {
159  warp_aggregate = ApplyWarpAggregates<FULL_TILE>(reduction_op, warp_aggregate, num_valid, Int2Type<1>());
160  }
161 
162  return warp_aggregate;
163  }
164 
165 
167  template <bool FULL_TILE>
168  __device__ __forceinline__ T Sum(
169  T input,
170  int num_valid)
171  {
173  int warp_offset = (warp_id * LOGICAL_WARP_SIZE);
174  int warp_num_valid = ((FULL_TILE && EVEN_WARP_MULTIPLE) || (warp_offset + LOGICAL_WARP_SIZE <= num_valid)) ?
176  num_valid - warp_offset;
177 
178  // Warp reduction in every warp
179  T warp_aggregate = WarpReduce(temp_storage.warp_reduce[warp_id]).template Reduce<(FULL_TILE && EVEN_WARP_MULTIPLE)>(
180  input,
181  warp_num_valid,
182  cub::Sum());
183 
184  // Update outputs and block_aggregate with warp-wide aggregates from lane-0s
185  return ApplyWarpAggregates<FULL_TILE>(reduction_op, warp_aggregate, num_valid);
186  }
187 
188 
190  template <
191  bool FULL_TILE,
192  typename ReductionOp>
193  __device__ __forceinline__ T Reduce(
194  T input,
195  int num_valid,
196  ReductionOp reduction_op)
197  {
198  int warp_offset = warp_id * LOGICAL_WARP_SIZE;
199  int warp_num_valid = ((FULL_TILE && EVEN_WARP_MULTIPLE) || (warp_offset + LOGICAL_WARP_SIZE <= num_valid)) ?
201  num_valid - warp_offset;
202 
203  // Warp reduction in every warp
204  T warp_aggregate = WarpReduce(temp_storage.warp_reduce[warp_id]).template Reduce<(FULL_TILE && EVEN_WARP_MULTIPLE)>(
205  input,
206  warp_num_valid,
207  reduction_op);
208 
209  // Update outputs and block_aggregate with warp-wide aggregates from lane-0s
210  return ApplyWarpAggregates<FULL_TILE>(reduction_op, warp_aggregate, num_valid);
211  }
212 
213 };
214 
215 
216 } // CUB namespace
217 CUB_NS_POSTFIX // Optional outer namespace(s)
218 
__device__ __forceinline__ BlockReduceWarpReductions(TempStorage &temp_storage)
Constructor.
Optional outer namespace(s)
Alias wrapper allowing storage to be unioned.
BlockReduceWarpReductions provides variants of warp-reduction-based parallel reduction across a CUDA ...
T block_prefix
Shared prefix for the entire thread block.
CTA_SYNC()
Definition: util_ptx.cuh:255
__device__ __forceinline__ unsigned int LaneId()
Returns the warp lane ID of the calling thread.
Definition: util_ptx.cuh:420
WarpReduce::TempStorage warp_reduce[WARPS]
Buffer for warp-synchronous scan.
__device__ __forceinline__ T Reduce(T input, int num_valid, ReductionOp reduction_op)
Computes a thread block-wide reduction using the specified reduction operator. The first num_valid th...
__device__ __forceinline__ T Sum(T input, int num_valid)
Computes a thread block-wide reduction using addition (+) as the reduction operator....
Whether or not the logical warp size evenly divides the thread block size.
If<(PTX_ARCH >=300) &&(IS_POW_OF_TWO), WarpReduceShfl< T, LOGICAL_WARP_THREADS, PTX_ARCH >, WarpReduceSmem< T, LOGICAL_WARP_THREADS, PTX_ARCH > >::Type InternalWarpReduce
Internal specialization. Use SHFL-based reduction if (architecture is >= SM30) and (LOGICAL_WARP_THRE...
OutputIteratorT OffsetT GridEvenShare< OffsetT > ReductionOpT reduction_op
< [in] Binary reduction functor
Allows for the treatment of an integral constant as a type at compile-time (e.g., to achieve static c...
Definition: util_type.cuh:275
__device__ __forceinline__ int RowMajorTid(int block_dim_x, int block_dim_y, int block_dim_z)
Returns the row-major linear thread identifier for a multidimensional thread block.
Definition: util_ptx.cuh:409
__device__ __forceinline__ T ApplyWarpAggregates(ReductionOp reduction_op, T warp_aggregate, int num_valid)
Returns block-wide aggregate in thread0.
A storage-backing wrapper that allows types with non-trivial constructors to be aliased in unions.
Definition: util_type.cuh:634
__device__ __forceinline__ T ApplyWarpAggregates(ReductionOp reduction_op, T warp_aggregate, int num_valid, Int2Type< SUCCESSOR_WARP >)
\smemstorage{WarpReduce}
#define CUB_MIN(a, b)
Select minimum(a, b)
Definition: util_macro.cuh:66
WarpReduce< T, LOGICAL_WARP_SIZE, PTX_ARCH >::InternalWarpReduce WarpReduce
WarpReduce utility type.
T warp_aggregates[WARPS]
Shared totals from each warp-synchronous scan.
Default sum functor.
__device__ __forceinline__ T ApplyWarpAggregates(ReductionOp, T warp_aggregate, int, Int2Type< WARPS >)