OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
 
Loading...
Searching...
No Matches
block_histogram_sort.cuh
Go to the documentation of this file.
1/******************************************************************************
2 * Copyright (c) 2011, Duane Merrill. All rights reserved.
3 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the NVIDIA CORPORATION nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 ******************************************************************************/
28
34#pragma once
35
36#include "../../block/block_radix_sort.cuh"
37#include "../../block/block_discontinuity.cuh"
38#include "../../util_ptx.cuh"
39#include "../../util_namespace.cuh"
40
42CUB_NS_PREFIX
43
45namespace cub {
46
47
48
52template <
53 typename T,
54 int BLOCK_DIM_X,
55 int ITEMS_PER_THREAD,
56 int BINS,
57 int BLOCK_DIM_Y,
58 int BLOCK_DIM_Z,
59 int PTX_ARCH>
61{
63 enum
64 {
66 BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
67 };
68
69 // Parameterize BlockRadixSort type for our thread block
70 typedef BlockRadixSort<
71 T,
72 BLOCK_DIM_X,
73 ITEMS_PER_THREAD,
75 4,
76 (PTX_ARCH >= 350) ? true : false,
78 cudaSharedMemBankSizeFourByte,
79 BLOCK_DIM_Y,
80 BLOCK_DIM_Z,
81 PTX_ARCH>
83
84 // Parameterize BlockDiscontinuity type for our thread block
85 typedef BlockDiscontinuity<
86 T,
87 BLOCK_DIM_X,
88 BLOCK_DIM_Y,
89 BLOCK_DIM_Z,
90 PTX_ARCH>
91 BlockDiscontinuityT;
92
95 {
96 // Storage for sorting bin values
97 typename BlockRadixSortT::TempStorage sort;
98
99 struct
100 {
101 // Storage for detecting discontinuities in the tile of sorted bin values
103
104 // Storage for noting begin/end offsets of bin runs in the tile of sorted bin values
105 unsigned int run_begin[BINS];
106 unsigned int run_end[BINS];
107 };
108 };
109
110
112 struct TempStorage : Uninitialized<_TempStorage> {};
113
114
115 // Thread fields
116 _TempStorage &temp_storage;
117 unsigned int linear_tid;
118
119
121 __device__ __forceinline__ BlockHistogramSort(
122 TempStorage &temp_storage)
123 :
124 temp_storage(temp_storage.Alias()),
125 linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
126 {}
127
128
129 // Discontinuity functor
131 {
132 // Reference to temp_storage
133 _TempStorage &temp_storage;
134
135 // Constructor
136 __device__ __forceinline__ DiscontinuityOp(_TempStorage &temp_storage) :
137 temp_storage(temp_storage)
138 {}
139
140 // Discontinuity predicate
141 __device__ __forceinline__ bool operator()(const T &a, const T &b, int b_index)
142 {
143 if (a != b)
144 {
145 // Note the begin/end offsets in shared storage
146 temp_storage.run_begin[b] = b_index;
147 temp_storage.run_end[a] = b_index;
148
149 return true;
150 }
151 else
152 {
153 return false;
154 }
155 }
156 };
157
158
159 // Composite data onto an existing histogram
160 template <
161 typename CounterT >
162 __device__ __forceinline__ void Composite(
163 T (&items)[ITEMS_PER_THREAD],
164 CounterT histogram[BINS])
165 {
166 enum { TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD };
167
168 // Sort bytes in blocked arrangement
169 BlockRadixSortT(temp_storage.sort).Sort(items);
170
171 CTA_SYNC();
172
173 // Initialize the shared memory's run_begin and run_end for each bin
174 int histo_offset = 0;
175
176 #pragma unroll
177 for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS)
178 {
179 temp_storage.run_begin[histo_offset + linear_tid] = TILE_SIZE;
180 temp_storage.run_end[histo_offset + linear_tid] = TILE_SIZE;
181 }
182 // Finish up with guarded initialization if necessary
183 if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS))
184 {
185 temp_storage.run_begin[histo_offset + linear_tid] = TILE_SIZE;
186 temp_storage.run_end[histo_offset + linear_tid] = TILE_SIZE;
187 }
188
189 CTA_SYNC();
190
191 int flags[ITEMS_PER_THREAD]; // unused
192
193 // Compute head flags to demarcate contiguous runs of the same bin in the sorted tile
194 DiscontinuityOp flag_op(temp_storage);
195 BlockDiscontinuityT(temp_storage.flag).FlagHeads(flags, items, flag_op);
196
197 // Update begin for first item
198 if (linear_tid == 0) temp_storage.run_begin[items[0]] = 0;
199
200 CTA_SYNC();
201
202 // Composite into histogram
203 histo_offset = 0;
204
205 #pragma unroll
206 for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS)
207 {
208 int thread_offset = histo_offset + linear_tid;
209 CounterT count = temp_storage.run_end[thread_offset] - temp_storage.run_begin[thread_offset];
210 histogram[thread_offset] += count;
211 }
212
213 // Finish up with guarded composition if necessary
214 if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS))
215 {
216 int thread_offset = histo_offset + linear_tid;
217 CounterT count = temp_storage.run_end[thread_offset] - temp_storage.run_begin[thread_offset];
218 histogram[thread_offset] += count;
219 }
220 }
221
222};
223
224} // CUB namespace
225CUB_NS_POSTFIX // Optional outer namespace(s)
226
The BlockDiscontinuity class provides collective methods for flagging discontinuities within an order...
__device__ __forceinline__ void FlagHeads(FlagT(&head_flags)[ITEMS_PER_THREAD], T(&input)[ITEMS_PER_THREAD], T(&preds)[ITEMS_PER_THREAD], FlagOp flag_op)
The BlockRadixSort class provides collective methods for sorting items partitioned across a CUDA thre...
__device__ __forceinline__ void Sort(KeyT(&keys)[ITEMS_PER_THREAD], int begin_bit=0, int end_bit=sizeof(KeyT) *8)
Performs an ascending block-wide radix sort over a blocked arrangement of keys.
__device__ __forceinline__ int RowMajorTid(int block_dim_x, int block_dim_y, int block_dim_z)
Returns the row-major linear thread identifier for a multidimensional thread block.
Definition util_ptx.cuh:409
CTA_SYNC()
Definition util_ptx.cuh:255
Optional outer namespace(s)
@ BLOCK_SCAN_WARP_SCANS
\smemstorage{BlockDiscontinuity}
Alias wrapper allowing storage to be unioned.
The BlockHistogramSort class provides sorting-based methods for constructing block-wide histograms fr...
__device__ __forceinline__ BlockHistogramSort(TempStorage &temp_storage)
Constructor.
__device__ __forceinline__ void Composite(T(&items)[ITEMS_PER_THREAD], CounterT histogram[BINS])
@ BLOCK_THREADS
The thread block size in threads.
\smemstorage{BlockRadixSort}
A simple "NULL" marker type.
A storage-backing wrapper that allows types with non-trivial constructors to be aliased in unions.