OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
 
Loading...
Searching...
No Matches
block_radix_sort.cuh
Go to the documentation of this file.
1/******************************************************************************
2 * Copyright (c) 2011, Duane Merrill. All rights reserved.
3 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the NVIDIA CORPORATION nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 ******************************************************************************/
28
35#pragma once
36
37#include "block_exchange.cuh"
38#include "block_radix_rank.cuh"
39#include "../util_ptx.cuh"
40#include "../util_arch.cuh"
41#include "../util_type.cuh"
42#include "../util_namespace.cuh"
43
45CUB_NS_PREFIX
46
48namespace cub {
49
120template <
121 typename KeyT,
122 int BLOCK_DIM_X,
123 int ITEMS_PER_THREAD,
124 typename ValueT = NullType,
125 int RADIX_BITS = 4,
126 bool MEMOIZE_OUTER_SCAN = (CUB_PTX_ARCH >= 350) ? true : false,
127 BlockScanAlgorithm INNER_SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS,
128 cudaSharedMemConfig SMEM_CONFIG = cudaSharedMemBankSizeFourByte,
129 int BLOCK_DIM_Y = 1,
130 int BLOCK_DIM_Z = 1,
131 int PTX_ARCH = CUB_PTX_ARCH>
133{
134private:
135
136 /******************************************************************************
137 * Constants and type definitions
138 ******************************************************************************/
139
140 enum
141 {
142 // The thread block size in threads
143 BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
144
145 // Whether or not there are values to be trucked along with keys
147 };
148
149 // KeyT traits and unsigned bits type
150 typedef Traits<KeyT> KeyTraits;
151 typedef typename KeyTraits::UnsignedBits UnsignedBits;
152
154 typedef BlockRadixRank<
155 BLOCK_DIM_X,
156 RADIX_BITS,
157 false,
158 MEMOIZE_OUTER_SCAN,
159 INNER_SCAN_ALGORITHM,
160 SMEM_CONFIG,
161 BLOCK_DIM_Y,
162 BLOCK_DIM_Z,
163 PTX_ARCH>
165
167 typedef BlockRadixRank<
168 BLOCK_DIM_X,
169 RADIX_BITS,
170 true,
171 MEMOIZE_OUTER_SCAN,
172 INNER_SCAN_ALGORITHM,
173 SMEM_CONFIG,
174 BLOCK_DIM_Y,
175 BLOCK_DIM_Z,
176 PTX_ARCH>
178
181
184
187 {
188 typename AscendingBlockRadixRank::TempStorage asending_ranking_storage;
189 typename DescendingBlockRadixRank::TempStorage descending_ranking_storage;
190 typename BlockExchangeKeys::TempStorage exchange_keys;
191 typename BlockExchangeValues::TempStorage exchange_values;
192 };
193
194
195 /******************************************************************************
196 * Thread fields
197 ******************************************************************************/
198
201
203 unsigned int linear_tid;
204
205 /******************************************************************************
206 * Utility methods
207 ******************************************************************************/
208
210 __device__ __forceinline__ _TempStorage& PrivateStorage()
211 {
212 __shared__ _TempStorage private_storage;
213 return private_storage;
214 }
215
217 __device__ __forceinline__ void RankKeys(
218 UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD],
219 int (&ranks)[ITEMS_PER_THREAD],
220 int begin_bit,
221 int pass_bits,
222 Int2Type<false> /*is_descending*/)
223 {
224 AscendingBlockRadixRank(temp_storage.asending_ranking_storage).RankKeys(
225 unsigned_keys,
226 ranks,
227 begin_bit,
228 pass_bits);
229 }
230
232 __device__ __forceinline__ void RankKeys(
233 UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD],
234 int (&ranks)[ITEMS_PER_THREAD],
235 int begin_bit,
236 int pass_bits,
237 Int2Type<true> /*is_descending*/)
238 {
239 DescendingBlockRadixRank(temp_storage.descending_ranking_storage).RankKeys(
240 unsigned_keys,
241 ranks,
242 begin_bit,
243 pass_bits);
244 }
245
247 __device__ __forceinline__ void ExchangeValues(
248 ValueT (&values)[ITEMS_PER_THREAD],
249 int (&ranks)[ITEMS_PER_THREAD],
250 Int2Type<false> /*is_keys_only*/,
251 Int2Type<true> /*is_blocked*/)
252 {
253 CTA_SYNC();
254
255 // Exchange values through shared memory in blocked arrangement
256 BlockExchangeValues(temp_storage.exchange_values).ScatterToBlocked(values, ranks);
257 }
258
260 __device__ __forceinline__ void ExchangeValues(
261 ValueT (&values)[ITEMS_PER_THREAD],
262 int (&ranks)[ITEMS_PER_THREAD],
263 Int2Type<false> /*is_keys_only*/,
264 Int2Type<false> /*is_blocked*/)
265 {
266 CTA_SYNC();
267
268 // Exchange values through shared memory in blocked arrangement
269 BlockExchangeValues(temp_storage.exchange_values).ScatterToStriped(values, ranks);
270 }
271
273 template <int IS_BLOCKED>
274 __device__ __forceinline__ void ExchangeValues(
275 ValueT (&/*values*/)[ITEMS_PER_THREAD],
276 int (&/*ranks*/)[ITEMS_PER_THREAD],
277 Int2Type<true> /*is_keys_only*/,
278 Int2Type<IS_BLOCKED> /*is_blocked*/)
279 {}
280
282 template <int DESCENDING, int KEYS_ONLY>
283 __device__ __forceinline__ void SortBlocked(
284 KeyT (&keys)[ITEMS_PER_THREAD],
285 ValueT (&values)[ITEMS_PER_THREAD],
286 int begin_bit,
287 int end_bit,
288 Int2Type<DESCENDING> is_descending,
289 Int2Type<KEYS_ONLY> is_keys_only)
290 {
291 UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD] =
292 reinterpret_cast<UnsignedBits (&)[ITEMS_PER_THREAD]>(keys);
293
294 // Twiddle bits if necessary
295 #pragma unroll
296 for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++)
297 {
298 unsigned_keys[KEY] = KeyTraits::TwiddleIn(unsigned_keys[KEY]);
299 }
300
301 // Radix sorting passes
302 while (true)
303 {
304 int pass_bits = CUB_MIN(RADIX_BITS, end_bit - begin_bit);
305
306 // Rank the blocked keys
307 int ranks[ITEMS_PER_THREAD];
308 RankKeys(unsigned_keys, ranks, begin_bit, pass_bits, is_descending);
309 begin_bit += RADIX_BITS;
310
311 CTA_SYNC();
312
313 // Exchange keys through shared memory in blocked arrangement
314 BlockExchangeKeys(temp_storage.exchange_keys).ScatterToBlocked(keys, ranks);
315
316 // Exchange values through shared memory in blocked arrangement
317 ExchangeValues(values, ranks, is_keys_only, Int2Type<true>());
318
319 // Quit if done
320 if (begin_bit >= end_bit) break;
321
322 CTA_SYNC();
323 }
324
325 // Untwiddle bits if necessary
326 #pragma unroll
327 for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++)
328 {
329 unsigned_keys[KEY] = KeyTraits::TwiddleOut(unsigned_keys[KEY]);
330 }
331 }
332
333public:
334
335#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
336
338 template <int DESCENDING, int KEYS_ONLY>
339 __device__ __forceinline__ void SortBlockedToStriped(
340 KeyT (&keys)[ITEMS_PER_THREAD],
341 ValueT (&values)[ITEMS_PER_THREAD],
342 int begin_bit,
343 int end_bit,
344 Int2Type<DESCENDING> is_descending,
345 Int2Type<KEYS_ONLY> is_keys_only)
346 {
347 UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD] =
348 reinterpret_cast<UnsignedBits (&)[ITEMS_PER_THREAD]>(keys);
349
350 // Twiddle bits if necessary
351 #pragma unroll
352 for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++)
353 {
354 unsigned_keys[KEY] = KeyTraits::TwiddleIn(unsigned_keys[KEY]);
355 }
356
357 // Radix sorting passes
358 while (true)
359 {
360 int pass_bits = CUB_MIN(RADIX_BITS, end_bit - begin_bit);
361
362 // Rank the blocked keys
363 int ranks[ITEMS_PER_THREAD];
364 RankKeys(unsigned_keys, ranks, begin_bit, pass_bits, is_descending);
365 begin_bit += RADIX_BITS;
366
367 CTA_SYNC();
368
369 // Check if this is the last pass
370 if (begin_bit >= end_bit)
371 {
372 // Last pass exchanges keys through shared memory in striped arrangement
373 BlockExchangeKeys(temp_storage.exchange_keys).ScatterToStriped(keys, ranks);
374
375 // Last pass exchanges through shared memory in striped arrangement
376 ExchangeValues(values, ranks, is_keys_only, Int2Type<false>());
377
378 // Quit
379 break;
380 }
381
382 // Exchange keys through shared memory in blocked arrangement
383 BlockExchangeKeys(temp_storage.exchange_keys).ScatterToBlocked(keys, ranks);
384
385 // Exchange values through shared memory in blocked arrangement
386 ExchangeValues(values, ranks, is_keys_only, Int2Type<true>());
387
388 CTA_SYNC();
389 }
390
391 // Untwiddle bits if necessary
392 #pragma unroll
393 for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++)
394 {
395 unsigned_keys[KEY] = KeyTraits::TwiddleOut(unsigned_keys[KEY]);
396 }
397 }
398
399#endif // DOXYGEN_SHOULD_SKIP_THIS
400
402 struct TempStorage : Uninitialized<_TempStorage> {};
403
404
405 /******************************************************************/
409
413 __device__ __forceinline__ BlockRadixSort()
414 :
415 temp_storage(PrivateStorage()),
416 linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
417 {}
418
419
423 __device__ __forceinline__ BlockRadixSort(
424 TempStorage &temp_storage)
425 :
426 temp_storage(temp_storage.Alias()),
427 linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
428 {}
429
430
432 /******************************************************************/
436
474 __device__ __forceinline__ void Sort(
475 KeyT (&keys)[ITEMS_PER_THREAD],
476 int begin_bit = 0,
477 int end_bit = sizeof(KeyT) * 8)
478 {
479 NullType values[ITEMS_PER_THREAD];
480
481 SortBlocked(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>());
482 }
483
484
529 __device__ __forceinline__ void Sort(
530 KeyT (&keys)[ITEMS_PER_THREAD],
531 ValueT (&values)[ITEMS_PER_THREAD],
532 int begin_bit = 0,
533 int end_bit = sizeof(KeyT) * 8)
534 {
535 SortBlocked(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>());
536 }
537
575 __device__ __forceinline__ void SortDescending(
576 KeyT (&keys)[ITEMS_PER_THREAD],
577 int begin_bit = 0,
578 int end_bit = sizeof(KeyT) * 8)
579 {
580 NullType values[ITEMS_PER_THREAD];
581
582 SortBlocked(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>());
583 }
584
585
630 __device__ __forceinline__ void SortDescending(
631 KeyT (&keys)[ITEMS_PER_THREAD],
632 ValueT (&values)[ITEMS_PER_THREAD],
633 int begin_bit = 0,
634 int end_bit = sizeof(KeyT) * 8)
635 {
636 SortBlocked(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>());
637 }
638
639
641 /******************************************************************/
645
646
685 __device__ __forceinline__ void SortBlockedToStriped(
686 KeyT (&keys)[ITEMS_PER_THREAD],
687 int begin_bit = 0,
688 int end_bit = sizeof(KeyT) * 8)
689 {
690 NullType values[ITEMS_PER_THREAD];
691
692 SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>());
693 }
694
695
740 __device__ __forceinline__ void SortBlockedToStriped(
741 KeyT (&keys)[ITEMS_PER_THREAD],
742 ValueT (&values)[ITEMS_PER_THREAD],
743 int begin_bit = 0,
744 int end_bit = sizeof(KeyT) * 8)
745 {
746 SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>());
747 }
748
749
788 __device__ __forceinline__ void SortDescendingBlockedToStriped(
789 KeyT (&keys)[ITEMS_PER_THREAD],
790 int begin_bit = 0,
791 int end_bit = sizeof(KeyT) * 8)
792 {
793 NullType values[ITEMS_PER_THREAD];
794
795 SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>());
796 }
797
798
843 __device__ __forceinline__ void SortDescendingBlockedToStriped(
844 KeyT (&keys)[ITEMS_PER_THREAD],
845 ValueT (&values)[ITEMS_PER_THREAD],
846 int begin_bit = 0,
847 int end_bit = sizeof(KeyT) * 8)
848 {
849 SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>());
850 }
851
852
854
855};
856
861} // CUB namespace
862CUB_NS_POSTFIX // Optional outer namespace(s)
863
The BlockExchange class provides collective methods for rearranging data partitioned across a CUDA th...
__device__ __forceinline__ void ScatterToBlocked(InputT input_items[ITEMS_PER_THREAD], OutputT output_items[ITEMS_PER_THREAD], OffsetT ranks[ITEMS_PER_THREAD], Int2Type< false >)
__device__ __forceinline__ void ScatterToStriped(InputT input_items[ITEMS_PER_THREAD], OutputT output_items[ITEMS_PER_THREAD], OffsetT ranks[ITEMS_PER_THREAD], Int2Type< false >)
BlockRadixRank provides operations for ranking unsigned integer types within a CUDA thread block.
__device__ __forceinline__ void RankKeys(UnsignedBits(&keys)[KEYS_PER_THREAD], int(&ranks)[KEYS_PER_THREAD], int current_bit, int num_bits)
Rank keys.
The BlockRadixSort class provides collective methods for sorting items partitioned across a CUDA thre...
unsigned int linear_tid
Linear thread-id.
__device__ __forceinline__ void ExchangeValues(ValueT(&values)[ITEMS_PER_THREAD], int(&ranks)[ITEMS_PER_THREAD], Int2Type< false >, Int2Type< true >)
ExchangeValues (specialized for key-value sort, to-blocked arrangement)
__device__ __forceinline__ void RankKeys(UnsignedBits(&unsigned_keys)[ITEMS_PER_THREAD], int(&ranks)[ITEMS_PER_THREAD], int begin_bit, int pass_bits, Int2Type< false >)
Rank keys (specialized for ascending sort)
_TempStorage & temp_storage
Shared storage reference.
__device__ __forceinline__ void SortBlockedToStriped(KeyT(&keys)[ITEMS_PER_THREAD], ValueT(&values)[ITEMS_PER_THREAD], int begin_bit, int end_bit, Int2Type< DESCENDING > is_descending, Int2Type< KEYS_ONLY > is_keys_only)
Sort blocked -> striped arrangement.
__device__ __forceinline__ _TempStorage & PrivateStorage()
Internal storage allocator.
__device__ __forceinline__ void SortDescending(KeyT(&keys)[ITEMS_PER_THREAD], ValueT(&values)[ITEMS_PER_THREAD], int begin_bit=0, int end_bit=sizeof(KeyT) *8)
Performs a descending block-wide radix sort across a blocked arrangement of keys and values.
__device__ __forceinline__ void ExchangeValues(ValueT(&values)[ITEMS_PER_THREAD], int(&ranks)[ITEMS_PER_THREAD], Int2Type< false >, Int2Type< false >)
ExchangeValues (specialized for key-value sort, to-striped arrangement)
__device__ __forceinline__ void SortDescending(KeyT(&keys)[ITEMS_PER_THREAD], int begin_bit=0, int end_bit=sizeof(KeyT) *8)
Performs a descending block-wide radix sort over a blocked arrangement of keys.
__device__ __forceinline__ void SortDescendingBlockedToStriped(KeyT(&keys)[ITEMS_PER_THREAD], int begin_bit=0, int end_bit=sizeof(KeyT) *8)
Performs a descending radix sort across a blocked arrangement of keys, leaving them in a striped arra...
BlockRadixRank< BLOCK_DIM_X, RADIX_BITS, true, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH > DescendingBlockRadixRank
Descending BlockRadixRank utility type.
__device__ __forceinline__ void Sort(KeyT(&keys)[ITEMS_PER_THREAD], ValueT(&values)[ITEMS_PER_THREAD], int begin_bit=0, int end_bit=sizeof(KeyT) *8)
Performs an ascending block-wide radix sort across a blocked arrangement of keys and values.
__device__ __forceinline__ void ExchangeValues(ValueT(&)[ITEMS_PER_THREAD], int(&)[ITEMS_PER_THREAD], Int2Type< true >, Int2Type< IS_BLOCKED >)
ExchangeValues (specialized for keys-only sort)
__device__ __forceinline__ void SortBlocked(KeyT(&keys)[ITEMS_PER_THREAD], ValueT(&values)[ITEMS_PER_THREAD], int begin_bit, int end_bit, Int2Type< DESCENDING > is_descending, Int2Type< KEYS_ONLY > is_keys_only)
Sort blocked arrangement.
__device__ __forceinline__ BlockRadixSort()
Collective constructor using a private static allocation of shared memory as temporary storage.
__device__ __forceinline__ void RankKeys(UnsignedBits(&unsigned_keys)[ITEMS_PER_THREAD], int(&ranks)[ITEMS_PER_THREAD], int begin_bit, int pass_bits, Int2Type< true >)
Rank keys (specialized for descending sort)
BlockExchange< KeyT, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH > BlockExchangeKeys
BlockExchange utility type for keys.
__device__ __forceinline__ void SortDescendingBlockedToStriped(KeyT(&keys)[ITEMS_PER_THREAD], ValueT(&values)[ITEMS_PER_THREAD], int begin_bit=0, int end_bit=sizeof(KeyT) *8)
Performs a descending radix sort across a blocked arrangement of keys and values, leaving them in a s...
__device__ __forceinline__ void Sort(KeyT(&keys)[ITEMS_PER_THREAD], int begin_bit=0, int end_bit=sizeof(KeyT) *8)
Performs an ascending block-wide radix sort over a blocked arrangement of keys.
__device__ __forceinline__ void SortBlockedToStriped(KeyT(&keys)[ITEMS_PER_THREAD], ValueT(&values)[ITEMS_PER_THREAD], int begin_bit=0, int end_bit=sizeof(KeyT) *8)
Performs an ascending radix sort across a blocked arrangement of keys and values, leaving them in a s...
__device__ __forceinline__ void SortBlockedToStriped(KeyT(&keys)[ITEMS_PER_THREAD], int begin_bit=0, int end_bit=sizeof(KeyT) *8)
Performs an ascending radix sort across a blocked arrangement of keys, leaving them in a striped arra...
BlockExchange< ValueT, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH > BlockExchangeValues
BlockExchange utility type for values.
BlockRadixRank< BLOCK_DIM_X, RADIX_BITS, false, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH > AscendingBlockRadixRank
Ascending BlockRadixRank utility type.
__device__ __forceinline__ BlockRadixSort(TempStorage &temp_storage)
Collective constructor using the specified memory allocation as temporary storage.
#define CUB_MIN(a, b)
Select minimum(a, b)
__device__ __forceinline__ int RowMajorTid(int block_dim_x, int block_dim_y, int block_dim_z)
Returns the row-major linear thread identifier for a multidimensional thread block.
Definition util_ptx.cuh:409
CTA_SYNC()
Definition util_ptx.cuh:255
Optional outer namespace(s)
KeyT const ValueT ValueT OffsetT int int end_bit
< [in] The past-the-end (most-significant) bit index needed for key comparison
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
BlockScanAlgorithm
BlockScanAlgorithm enumerates alternative algorithms for cub::BlockScan to compute a parallel prefix ...
@ BLOCK_SCAN_WARP_SCANS
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int int pass_bits
< [in] Number of bits of current radix digit
\smemstorage{BlockExchange}
\smemstorage{BlockRadixSort}
Type equality test.
Definition util_type.cuh:99
Allows for the treatment of an integral constant as a type at compile-time (e.g., to achieve static c...
A simple "NULL" marker type.
Type traits.
A storage-backing wrapper that allows types with non-trivial constructors to be aliased in unions.
Shared memory storage layout type.
#define CUB_PTX_ARCH
CUB_PTX_ARCH reflects the PTX version targeted by the active compiler pass (or zero during the host p...
Definition util_arch.cuh:53