OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
block_reduce_raking_commutative_only.cuh
Go to the documentation of this file.
1 /******************************************************************************
2  * Copyright (c) 2011, Duane Merrill. All rights reserved.
3  * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  * * Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * * Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  * * Neither the name of the NVIDIA CORPORATION nor the
13  * names of its contributors may be used to endorse or promote products
14  * derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  ******************************************************************************/
28 
34 #pragma once
35 
36 #include "block_reduce_raking.cuh"
37 #include "../../warp/warp_reduce.cuh"
38 #include "../../thread/thread_reduce.cuh"
39 #include "../../util_ptx.cuh"
40 #include "../../util_namespace.cuh"
41 
43 CUB_NS_PREFIX
44 
46 namespace cub {
47 
48 
52 template <
53  typename T,
54  int BLOCK_DIM_X,
55  int BLOCK_DIM_Y,
56  int BLOCK_DIM_Z,
57  int PTX_ARCH>
59 {
61  enum
62  {
64  BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
65  };
66 
67  // The fall-back implementation to use when BLOCK_THREADS is not a multiple of the warp size or not all threads have valid values
69 
71  enum
72  {
74  WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH),
75 
78 
81 
84 
87  };
88 
91 
94 
97  {
98  struct
99  {
102  };
104  };
105 
106 
108  struct TempStorage : Uninitialized<_TempStorage> {};
109 
110 
111  // Thread fields
112  _TempStorage &temp_storage;
113  unsigned int linear_tid;
114 
115 
117  __device__ __forceinline__ BlockReduceRakingCommutativeOnly(
118  TempStorage &temp_storage)
119  :
120  temp_storage(temp_storage.Alias()),
121  linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
122  {}
123 
124 
126  template <bool FULL_TILE>
127  __device__ __forceinline__ T Sum(
128  T partial,
129  int num_valid)
130  {
131  if (USE_FALLBACK || !FULL_TILE)
132  {
133  return FallBack(temp_storage.fallback_storage).template Sum<FULL_TILE>(partial, num_valid);
134  }
135  else
136  {
137  // Place partial into shared memory grid
138  if (linear_tid >= RAKING_THREADS)
139  *BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid - RAKING_THREADS) = partial;
140 
141  CTA_SYNC();
142 
143  // Reduce parallelism to one warp
144  if (linear_tid < RAKING_THREADS)
145  {
146  // Raking reduction in grid
147  T *raking_segment = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid);
148  partial = internal::ThreadReduce<SEGMENT_LENGTH>(raking_segment, cub::Sum(), partial);
149 
150  // Warpscan
151  partial = WarpReduce(temp_storage.warp_storage).Sum(partial);
152  }
153  }
154 
155  return partial;
156  }
157 
158 
160  template <
161  bool FULL_TILE,
162  typename ReductionOp>
163  __device__ __forceinline__ T Reduce(
164  T partial,
165  int num_valid,
166  ReductionOp reduction_op)
167  {
168  if (USE_FALLBACK || !FULL_TILE)
169  {
170  return FallBack(temp_storage.fallback_storage).template Reduce<FULL_TILE>(partial, num_valid, reduction_op);
171  }
172  else
173  {
174  // Place partial into shared memory grid
175  if (linear_tid >= RAKING_THREADS)
176  *BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid - RAKING_THREADS) = partial;
177 
178  CTA_SYNC();
179 
180  // Reduce parallelism to one warp
181  if (linear_tid < RAKING_THREADS)
182  {
183  // Raking reduction in grid
184  T *raking_segment = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid);
185  partial = internal::ThreadReduce<SEGMENT_LENGTH>(raking_segment, reduction_op, partial);
186 
187  // Warpscan
188  partial = WarpReduce(temp_storage.warp_storage).Reduce(partial, reduction_op);
189  }
190  }
191 
192  return partial;
193  }
194 
195 };
196 
197 } // CUB namespace
198 CUB_NS_POSTFIX // Optional outer namespace(s)
199 
Alias wrapper allowing storage to be unioned.
Optional outer namespace(s)
Number of raking elements per warp synchronous raking thread.
BlockRakingLayout< T, SHARING_THREADS, PTX_ARCH > BlockRakingLayout
Layout type for padded thread block raking grid.
CTA_SYNC()
Definition: util_ptx.cuh:255
BlockReduceRaking provides raking-based methods of parallel reduction across a CUDA thread block....
__device__ __forceinline__ BlockReduceRakingCommutativeOnly(TempStorage &temp_storage)
Constructor.
WarpReduce< T, RAKING_THREADS, PTX_ARCH > WarpReduce
WarpReduce utility type.
__device__ __forceinline__ T Sum(T partial, int num_valid)
Computes a thread block-wide reduction using addition (+) as the reduction operator....
static __device__ __forceinline__ T * PlacementPtr(TempStorage &temp_storage, unsigned int linear_tid)
Returns the location for the calling thread to place data into the grid.
static __device__ __forceinline__ T * RakingPtr(TempStorage &temp_storage, unsigned int linear_tid)
Returns the location for the calling thread to begin sequential raking.
OutputIteratorT OffsetT GridEvenShare< OffsetT > ReductionOpT reduction_op
< [in] Binary reduction functor
__device__ __forceinline__ int RowMajorTid(int block_dim_x, int block_dim_y, int block_dim_z)
Returns the row-major linear thread identifier for a multidimensional thread block.
Definition: util_ptx.cuh:409
WarpReduce::TempStorage warp_storage
Storage for warp-synchronous reduction.
Alias wrapper allowing storage to be unioned.
A storage-backing wrapper that allows types with non-trivial constructors to be aliased in unions.
Definition: util_type.cuh:634
Number of threads actually sharing items with the raking threads.
\smemstorage{WarpReduce}
BlockRakingLayout::TempStorage raking_grid
Padded thread block raking grid.
Default sum functor.
#define CUB_MAX(a, b)
Select maximum(a, b)
Definition: util_macro.cuh:61
__device__ __forceinline__ T Reduce(T partial, int num_valid, ReductionOp reduction_op)
Computes a thread block-wide reduction using the specified reduction operator. The first num_valid th...
BlockReduceRakingCommutativeOnly provides raking-based methods of parallel reduction across a CUDA th...
FallBack::TempStorage fallback_storage
Fall-back storage for non-commutative block scan.