OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
 
Loading...
Searching...
No Matches
map_grid.hpp
1#ifndef MAP_HPP_
2#define MAP_HPP_
3
4
5#include "config.h"
6#include "util/cuda_launch.hpp"
7#include "util/object_util.hpp"
8#include "Grid/util.hpp"
9#include "Vector/vect_isel.hpp"
10#include "Vector/util.hpp"
11#include "Vector/map_vector_grow_p.hpp"
12#include "memory/ExtPreAlloc.hpp"
13#include "util/util_debug.hpp"
14#include "util/Pack_stat.hpp"
15#include <boost/fusion/include/mpl.hpp>
16#include <boost/fusion/sequence/intrinsic/at_c.hpp>
17#include <boost/fusion/include/at_c.hpp>
18#include <boost/fusion/include/for_each.hpp>
19#include <boost/fusion/container/vector.hpp>
20#include <boost/fusion/include/vector.hpp>
21#include <boost/fusion/container/vector/vector_fwd.hpp>
22#include <boost/fusion/include/vector_fwd.hpp>
23#include <boost/type_traits.hpp>
24#include <boost/fusion/include/for_each.hpp>
25#include <boost/mpl/range_c.hpp>
26#include <boost/mpl/for_each.hpp>
27#include "memory_ly/memory_conf.hpp"
28#include "util/copy_compare/meta_copy.hpp"
29#include "util/for_each_ref.hpp"
30#include "util.hpp"
31#include <utility>
32#ifdef CUDA_GPU
33#include "memory/CudaMemory.cuh"
34#endif
35#include "grid_sm.hpp"
36#include "grid_zm.hpp"
37#include "memory_ly/Encap.hpp"
38#include "memory_ly/memory_array.hpp"
39#include "memory_ly/memory_c.hpp"
40#include <vector>
41#include "se_grid.hpp"
42#include "memory/HeapMemory.hpp"
43#include "memory/PtrMemory.hpp"
44#include "grid_common.hpp"
45#include "util/se_util.hpp"
46#include "iterators/grid_key_dx_iterator.hpp"
47#include "iterators/grid_key_dx_iterator_sub.hpp"
48#include "iterators/grid_key_dx_iterator_sp.hpp"
49#include "iterators/grid_key_dx_iterator_sub_bc.hpp"
50#include "Packer_Unpacker/Packer_util.hpp"
51#include "Packer_Unpacker/has_pack_agg.hpp"
52#include "cuda/cuda_grid_gpu_funcs.cuh"
53#include "grid_base_implementation.hpp"
54#include "util/for_each_ref.hpp"
55#include "Geometry/grid_smb.hpp"
56#include "Geometry/grid_zmb.hpp"
57
58#ifndef CUDA_GPU
60#endif
61
66template<typename SGridGpu>
68{
69 typedef typename SGridGpu::device_grid_type::container type;
70};
71
75template<unsigned int dim, typename T, typename S=HeapMemory, typename layout = typename memory_traits_lin<T>::type, typename linearizer = grid_sm<dim,void> >
77{
78};
79
111template<unsigned int dim, typename T, typename S, typename linearizer>
112class grid_base<dim,T,S,typename memory_traits_lin<T>::type, linearizer> : public grid_base_impl<dim,T,S, memory_traits_lin,linearizer>
113{
114 typedef typename apply_transform<memory_traits_lin,T>::type T_;
115
116 T background;
117
118public:
119
122
124 // you can access all the properties of T
126
128 typedef void grow_policy;
129
132
135
138
140 inline grid_base() THROW
141 :grid_base_impl<dim,T,S,memory_traits_lin, linearizer>()
142 {}
143
152 inline grid_base(const grid_base<dim,T,S,typename memory_traits_lin<T>::type> & g) THROW
154 {
155 }
156
162 inline grid_base(const size_t & sz) THROW
164 {
165 }
166
172 inline grid_base(const size_t (& sz)[dim]) THROW
174 {
175 }
176
180 template<typename pointers_type,
181 typename headers_type,
182 typename result_type,
183 unsigned int ... prp >
184 static void unpack_headers(pointers_type & pointers, headers_type & headers, result_type & result, int n_slot)
185 {}
186
187 template<unsigned int ... prp, typename S2, typename header_type, typename ite_type, typename context_type>
188 void unpack_with_headers(ExtPreAlloc<S2> & mem,
189 ite_type & sub_it,
190 header_type & headers,
191 int ih,
192 Unpack_stat & ps,
193 context_type &context,
194 rem_copy_opt opt = rem_copy_opt::NONE_OPT)
195 {}
196
197#if defined(__HIP__)
198
204 __device__ grid_base<dim,T,S> & operator=(const grid_base<dim,T,S> & g)
205 {
206 printf("Error grid_base operator= is not defined in device code\n");
207
208 return *this;
209 }
210
211#endif
212
219 {
220 (static_cast<grid_base_impl<dim,T,S, memory_traits_lin> *>(this))->swap(g.duplicate());
221
222 meta_copy<T>::meta_copy_(g.background,background);
223
224 return *this;
225 }
226
233 {
234 (static_cast<grid_base_impl<dim,T,S, memory_traits_lin> *>(this))->swap(g);
235
236 meta_copy<T>::meta_copy_(g.background,background);
237
238 return *this;
239 }
240
246 static bool noPointers()
247 {
248 return false;
249 }
250
258 template<unsigned int id> void * getDeviceBuffer()
259 {
260 return this->data_.mem->getDevicePointer();
261 }
262
268 template <typename stencil = no_stencil>
270 {
272 }
273
279 static constexpr bool isCompressed()
280 {
281 return false;
282 }
283
290 {
292 }
293
300 void convert_key(grid_key_dx<dim> & key_out, const grid_key_dx<dim> & key_in) const
301 {
302 for (size_t i = 0 ; i < dim ; i++)
303 {key_out.set_d(i,key_in.get(i));}
304 }
305
314 {
315 return background;
316 }
317
326 {
327 return background;
328 }
329
335 template<unsigned int p>
336 void setBackgroundValue(const typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type & val)
337 {
338 meta_copy<typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type>::meta_copy_(val,background.template get<p>());
339 }
340
341
348 {
350
351 return *this;
352 }
353
360 {
362
363 return *this;
364 }
365};
366
367
368
379template<typename T_type, unsigned int ... prp>
381{
384
386 typedef typename to_boost_vmpl<prp...>::type v_prp;
387
395 :dst(dst)
396 {
397 };
398
399
401 template<typename T>
402 inline void operator()(T& t) const
403 {
404 boost::fusion::at_c<boost::mpl::at<v_prp,boost::mpl::int_<T::value>>::type::value>(dst).switchToDevicePtr();
405 }
406};
407
408
419template<typename T_type, template<typename> class layout_base , typename Memory>
421{
424
432 :dst(dst)
433 {};
434
435
437 template<typename T>
438 inline void operator()(T& t) const
439 {
440 typedef decltype(boost::fusion::at_c<T::value>(dst).mem_r) mem_r_type;
441
442 typedef typename boost::mpl::at<typename T_type::type,T>::type type_prp;
443
445
446 typedef boost::mpl::int_<(is_vector<typename mem_r_type::value_type>::value ||
448 is_gpu_celllist<typename mem_r_type::value_type>::value) + 2*std::is_array<type_prp>::value + std::rank<type_prp>::value> crh_cond;
449
450 call_recursive_destructor_if_vector<typename mem_r_type::value_type,
451 kernel_type,
452 type_prp,
453 layout_base,
454 crh_cond::value>
455 ::template destruct<Memory,mem_r_type>(static_cast<Memory *>(boost::fusion::at_c<T::value>(dst).mem),
456 boost::fusion::at_c<T::value>(dst).mem_r);
457 }
458};
459
460
461
462struct dim3_
463{
465 unsigned int x;
466
468 unsigned int y;
469
471 unsigned int z;
472};
473
474template<unsigned int dim>
476{
479
482};
483
484
501template<unsigned int dim, typename T, typename S, typename linearizer>
502class grid_base<dim,T,S,typename memory_traits_inte<T>::type,linearizer> : public grid_base_impl<dim,T,S, memory_traits_inte,linearizer>
503{
504 typedef typename apply_transform<memory_traits_inte,T>::type T_;
505
506 T background;
507
508public:
509
512
514 // you can access all the properties of T
516
519
521 inline grid_base() THROW
522 :grid_base_impl<dim,T,S,memory_traits_inte,linearizer>()
523 {
524 }
525
531 inline grid_base(const grid_base & g) THROW
533 {
534 }
535
541 inline grid_base(grid_base && g) THROW
543 {
544 }
545
551 inline grid_base(const size_t & sz) THROW
553 {
554 }
555
557 inline grid_base(const size_t (& sz)[dim]) THROW
559 {
560 }
561
565 static void unpack_headers()
566 {}
567
571 template<unsigned int id> void fill(unsigned char c)
572 {
573 boost::fusion::at_c<id>(this->data_).mem->fill(c);
574 }
575
583 template<unsigned int id> void * getDeviceBuffer()
584 {
585 return boost::fusion::at_c<id>(this->data_).mem->getDevicePointer();
586 }
587
593 template <typename stencil = no_stencil>
595 {
597 }
598
604 static constexpr bool isCompressed()
605 {
606 return false;
607 }
608
615 {
617 }
618
625 void convert_key(grid_key_dx<dim> & key_out, const grid_key_dx<dim> & key_in) const
626 {
627 for (size_t i = 0 ; i < dim ; i++)
628 {key_out.set_d(i,key_in.get(i));}
629 }
630
639 {
640 return background;
641 }
642
651 {
652 return background;
653 }
654
661 {
663
664 return *this;
665 }
666
673 {
675
676 return *this;
677 }
678
679 ~grid_base()
680 {
682
683 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(dth);
684 }
685};
686
688template <unsigned int dim, typename T, typename linearizer = grid_sm<dim,void> > using grid_gpu = grid_base<dim,T,CudaMemory,typename memory_traits_inte<T>::type,linearizer>;
689
691template <unsigned int dim, typename T, typename linearizer = grid_sm<dim,void> > using grid_cpu = grid_base<dim,T,HeapMemory,typename memory_traits_lin<T>::type,linearizer>;
692
693
694#endif
695
696
This class allocate, and destroy CPU memory.
Unpacking status object.
Definition Pack_stat.hpp:16
grid_base(grid_base &&g) THROW
create a grid from another grid
Definition map_grid.hpp:541
grid_base(const grid_base &g) THROW
create a grid from another grid
Definition map_grid.hpp:531
void fill(unsigned char c)
Fill the memory with a byte.
Definition map_grid.hpp:571
grid_base< dim, T, S, typename memory_traits_inte< T >::type, linearizer > & operator=(const grid_base_impl< dim, T, S, memory_traits_inte, linearizer > &base)
assign operator
Definition map_grid.hpp:660
static grid_key_dx_iterator_sub< dim, stencil > type_of_subiterator()
This is a meta-function return which type of sub iterator a grid produce.
Definition map_grid.hpp:594
grid_base(const size_t(&sz)[dim]) THROW
Constructor allocate memory and give them a representation.
Definition map_grid.hpp:557
grid_base_impl< dim, T, S, memory_traits_inte, linearizer >::container container
Object container for T, it is the return type of get_o it return a object type trough.
Definition map_grid.hpp:515
grid_base(const size_t &sz) THROW
create a grid of size sz on each direction
Definition map_grid.hpp:551
grid_base< dim, T, S, typename memory_traits_inte< T >::type, linearizer > & operator=(grid_base_impl< dim, T, S, memory_traits_inte, linearizer > &&base)
assign operator
Definition map_grid.hpp:672
void convert_key(grid_key_dx< dim > &key_out, const grid_key_dx< dim > &key_in) const
In this case it just copy the key_in in key_out.
Definition map_grid.hpp:625
static constexpr bool isCompressed()
Return if in this representation data are stored is a compressed way.
Definition map_grid.hpp:604
grid_base_impl< dim, T, S, memory_traits_inte, linearizer >::linearizer_type linearizer_type
linearizer type Z-morton Hilbert curve , normal striding
Definition map_grid.hpp:518
static grid_key_dx_iterator< dim > type_of_iterator()
This is a meta-function return which type of iterator a grid produce.
Definition map_grid.hpp:614
static grid_key_dx_iterator_sub< dim, stencil > type_of_subiterator()
This is a meta-function return which type of sub iterator a grid produce.
Definition map_grid.hpp:269
grid_base_impl< dim, T, S, memory_traits_lin >::container container
Object container for T, it is the return type of get_o it return a object type trough.
Definition map_grid.hpp:125
grid_base(const grid_base< dim, T, S, typename memory_traits_lin< T >::type > &g) THROW
create a grid from another grid
Definition map_grid.hpp:152
static void unpack_headers(pointers_type &pointers, headers_type &headers, result_type &result, int n_slot)
Stub does not do anything.
Definition map_grid.hpp:184
memory_traits_lin< T >::type layout
type of layout of the structure
Definition map_grid.hpp:121
grid_base_impl< dim, T, S, memory_traits_lin >::linearizer_type linearizer_type
linearizer type Z-morton Hilbert curve , normal striding
Definition map_grid.hpp:137
grid_key_dx_iterator_sub< dim > sub_grid_iterator_type
sub-grid iterator type
Definition map_grid.hpp:134
grid_base< dim, T, S, typename memory_traits_lin< T >::type > & operator=(grid_base< dim, T, S, typename memory_traits_lin< T >::type > &&g)
It copy a grid.
Definition map_grid.hpp:232
grid_key_dx< dim > base_key
type that identify one point in the grid
Definition map_grid.hpp:131
void convert_key(grid_key_dx< dim > &key_out, const grid_key_dx< dim > &key_in) const
In this case it just copy the key_in in key_out.
Definition map_grid.hpp:300
grid_base(const size_t &sz) THROW
create a grid of size sz on each direction
Definition map_grid.hpp:162
static grid_key_dx_iterator< dim > type_of_iterator()
This is a meta-function return which type of iterator a grid produce.
Definition map_grid.hpp:289
grid_base< dim, T, S, typename memory_traits_lin< T >::type > & operator=(const grid_base_impl< dim, T, S, memory_traits_lin > &base)
assign operator
Definition map_grid.hpp:347
void setBackgroundValue(const typename boost::mpl::at< typename T::type, boost::mpl::int_< p > >::type &val)
Set the background value.
Definition map_grid.hpp:336
static constexpr bool isCompressed()
Return if in this representation data are stored is a compressed way.
Definition map_grid.hpp:279
grid_base(const size_t(&sz)[dim]) THROW
Constructor allocate memory.
Definition map_grid.hpp:172
grid_base< dim, T, S, typename memory_traits_lin< T >::type > & operator=(grid_base_impl< dim, T, S, memory_traits_lin > &&base)
assign operator
Definition map_grid.hpp:359
__host__ grid_base< dim, T, S > & operator=(const grid_base< dim, T, S > &g)
It copy a grid.
Definition map_grid.hpp:218
Implementation of a N-dimensional grid.
grid_base_impl< dim, T, S, layout_base > & operator=(const grid_base_impl< dim, T, S, layout_base > &g)
It copy a grid.
Declaration grid_key_dx_iterator_sub.
grid_key_dx is the key to access any element in the grid
Definition grid_key.hpp:19
__device__ __host__ void set_d(index_type i, index_type id)
Set the i index.
Definition grid_key.hpp:516
__device__ __host__ index_type get(index_type i) const
Get the i index.
Definition grid_key.hpp:503
Declaration grid_sm.
Definition grid_sm.hpp:167
get the type of the SetBlock
Definition map_grid.hpp:68
this class is a functor for "for_each" algorithm
Definition map_grid.hpp:421
memory_traits_inte< T_type >::type & dst
object to destruct
Definition map_grid.hpp:423
void operator()(T &t) const
It call the copy function for each property.
Definition map_grid.hpp:438
deconstruct_impl(typename memory_traits_inte< T_type >::type &dst)
constructor
Definition map_grid.hpp:431
dim3_ grids
number of grid for the kernel execution
Definition map_grid.hpp:481
dim3_ threads
number of treads in each block
Definition map_grid.hpp:478
unsigned int z
size in z dimension
Definition map_grid.hpp:471
unsigned int x
size in x dimension
Definition map_grid.hpp:465
unsigned int y
size in y dimension
Definition map_grid.hpp:468
Check this is a gpu or cpu type cell-list.
Definition util.hpp:74
Transform the boost::fusion::vector into memory specification (memory_traits)
inter_memc< typenameT::type >::type type
for each element in the vector interleave memory_c
Transform the boost::fusion::vector into memory specification (memory_traits)
This class copy general objects.
Definition meta_copy.hpp:53
__device__ static __host__ void meta_copy_(const T &src, T &dst)
copy and object from src to dst
Definition meta_copy.hpp:60
this class is a functor for "for_each" algorithm
Definition map_grid.hpp:381
switch_copy_host_to_device(typename memory_traits_inte< T_type >::type &dst)
constructor
Definition map_grid.hpp:394
to_boost_vmpl< prp... >::type v_prp
Convert the packed properties into an MPL vector.
Definition map_grid.hpp:386
void operator()(T &t) const
It call the copy function for each property.
Definition map_grid.hpp:402
memory_traits_inte< T_type >::type & dst
encapsulated destination object
Definition map_grid.hpp:383