OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
grid_base_implementation.hpp
1 /*
2  * grid_base_implementation.hpp
3  *
4  * Created on: May 2, 2016
5  * Author: i-bird
6  */
7 
8 #ifndef OPENFPM_DATA_SRC_GRID_GRID_BASE_IMPLEMENTATION_HPP_
9 #define OPENFPM_DATA_SRC_GRID_GRID_BASE_IMPLEMENTATION_HPP_
10 
11 #include "grid_base_impl_layout.hpp"
12 #include "util/cuda_util.hpp"
13 #include "cuda/cuda_grid_gpu_funcs.cuh"
14 #include "util/create_vmpl_sequence.hpp"
15 #include "util/cuda_launch.hpp"
16 #include "util/object_si_di.hpp"
17 
18 constexpr int DATA_ON_HOST = 32;
19 constexpr int DATA_ON_DEVICE = 64;
20 constexpr int EXACT_RESIZE = 128;
21 
22 template<bool np,typename T>
23 struct skip_init
24 {
25  static bool skip_()
26  {
27  return true;
28  }
29 };
30 
31 template<typename T>
32 struct skip_init<true,T>
33 {
34  static bool skip_()
35  {
36  return T::noPointers();
37  }
38 };
39 
40 #ifdef __NVCC__
41 
42 template<bool active>
43 struct copy_ndim_grid_device_active_impl
44  {
45  template<typename grid_type1, typename grid_type2, typename ite_gpu_type>
46  static inline void copy(grid_type1 & g1, grid_type2 & g2, ite_gpu_type & ite)
47  {
48 
49  }
50 
51  template<typename grid_type1, typename grid_type2, typename ite_gpu_type>
52  static inline void copy_block(grid_type1 & g1, grid_type2 & g2, ite_gpu_type & ite)
53  {
54  }
55 };
56 
57 template<>
58 struct copy_ndim_grid_device_active_impl<true>
59 {
60  template<typename grid_type1, typename grid_type2, typename ite_gpu_type>
61  static inline void copy(grid_type1 & g1, grid_type2 & g2, ite_gpu_type & ite)
62  {
63  CUDA_LAUNCH((copy_ndim_grid_device<grid_type1::dims,decltype(g1.toKernel())>),ite,g2.toKernel(),g1.toKernel());
64  }
65 
66  template<typename grid_type1, typename grid_type2, typename ite_gpu_type>
67  static inline void copy_block(grid_type1 & g1, grid_type2 & g2, ite_gpu_type & ite)
68  {
69  CUDA_LAUNCH((copy_ndim_grid_block_device<grid_type1::dims,decltype(g1.toKernel())>),ite,g2.toKernel(),g1.toKernel());
70  }
71 };
72 
73 template<typename S,typename grid_dst_type, typename grid_src_type>
74 void copy_grid_to_grid(grid_dst_type & gdst, const grid_src_type & gsrc,
76  int blockSize)
77 {
78  if (grid_dst_type::dims <= 3)
79  {
80  auto ite = gsrc.getGPUIterator(start,stop);
81  bool has_work = has_work_gpu(ite);
82 
83  if (has_work == true)
84  {
85  if (blockSize == 1)
86  {
87  copy_ndim_grid_device_active_impl<S::isDeviceHostSame() == false>::copy(gdst,gsrc,ite);
88  }
89  else
90  {
91  move_work_to_blocks(ite);
92 
93  ite.thr.x = blockSize;
94 
95  copy_ndim_grid_device_active_impl<S::isDeviceHostSame() == false>::copy_block(gdst,gsrc,ite);
96  }
97  }
98  }
99  else
100  {
101  grid_key_dx<1> start;
102  start.set_d(0,0);
103  grid_key_dx<1> stop({});
104  stop.set_d(0,gsrc.getGrid().size());
105 
106  size_t sz[1];
107  sz[0]= gsrc.getGrid().size();
108 
109  grid_sm<1,void> g_sm_copy(sz);
110 
111  auto ite = getGPUIterator_impl<1>(g_sm_copy,start,stop);
112 
113  copy_ndim_grid_device_active_impl<S::isDeviceHostSame() == false>::copy(gdst,gsrc,ite);
114  }
115 }
116 
117 #endif
118 
119 template<typename dest_type, typename src_type, unsigned int ... prp>
120 void copy_with_openmp_prp(const dest_type & dst, const src_type & src, ite_gpu<dest_type::dims> ite)
121 {
122  #ifdef CUDIFY_USE_OPENMP
123  auto lamb = [&dst,&src,&ite] __device__ (dim3 & blockIdx, dim3 & threadIdx)
124  {
126 
127  if (dest_type::dims == 1)
128  {
129  i.set_d(0,blockIdx.x*blockDim.x + threadIdx.x + ite.start.get(0));
130  if (i.get(0) >= src.size(0)) {return;}
131  }
132  else if (dest_type::dims == 2)
133  {
134  i.set_d(0,blockIdx.x*blockDim.x + threadIdx.x + ite.start.get(0));
135  i.set_d(1,blockIdx.y*blockDim.y + threadIdx.y + ite.start.get(1));
136  if (i.get(0) >= src.size(0) || i.get(1) >= src.size(1)) {return;}
137  }
138  else if (dest_type::dims == 3)
139  {
140  i.set_d(0,blockIdx.x*blockDim.x + threadIdx.x + ite.start.get(0));
141  i.set_d(1,blockIdx.y*blockDim.y + threadIdx.y + ite.start.get(1));
142  i.set_d(2,blockIdx.z*blockDim.z + threadIdx.z + ite.start.get(2));
143  if (i.get(0) >= src.size(0) || i.get(1) >= src.size(1) || i.get(2) >= src.size(2)) {return;}
144  }
145 
146  object_si_di<decltype(src.get_o(i)),decltype(dst.get_o(i)),OBJ_ENCAP,prp ...>(src.get_o(i),dst.get_o(i));
147  };
148 
149  CUDA_LAUNCH_LAMBDA(ite,lamb);
150  #else
151  std::cout << __FILE__ << ":" << __LINE__ << " error CUDA on back end is disabled" << std::endl;
152  #endif
153 }
154 
155 
156 #ifdef CUDA_GPU
157 
158 #define GRID_ID_3_RAW(start,stop) int x[3] = {threadIdx.x + blockIdx.x * blockDim.x + start.get(0),\
159  threadIdx.y + blockIdx.y * blockDim.y + start.get(1),\
160  threadIdx.z + blockIdx.z * blockDim.z + start.get(2)};\
161  \
162  if (x[0] > stop.get(0) || x[1] > stop.get(1) || x[2] > stop.get(2))\
163  {return;}
164 
165 #define GRID_ID_3_TRAW(start,stop) int tx = threadIdx.x + blockIdx.x * blockDim.x + start.get(0);\
166  int ty = threadIdx.y + blockIdx.y * blockDim.y + start.get(1);\
167  int tz = threadIdx.z + blockIdx.z * blockDim.z + start.get(2);\
168  \
169  if (tx > stop.get(0) || ty > stop.get(1) || tz > stop.get(2))\
170  {return;}
171 
172 #define GRID_ID_3(ite_gpu) grid_key_dx<3,int> key;\
173  key.set_d(0,threadIdx.x + blockIdx.x * blockDim.x + ite_gpu.start.get(0));\
174  key.set_d(1,threadIdx.y + blockIdx.y * blockDim.y + ite_gpu.start.get(1));\
175  key.set_d(2,threadIdx.z + blockIdx.z * blockDim.z + ite_gpu.start.get(2));\
176  \
177  if (key.get(0) > ite_gpu.stop.get(0) || key.get(1) > ite_gpu.stop.get(1) || key.get(2) > ite_gpu.stop.get(2))\
178  {return;}
179 
180 #define GRID_ID_2(ite_gpu) grid_key_dx<2,int> key;\
181  key.set_d(0,threadIdx.x + blockIdx.x * blockDim.x + ite_gpu.start.get(0));\
182  key.set_d(1,threadIdx.y + blockIdx.y * blockDim.y + ite_gpu.start.get(1));\
183  \
184  if (key.get(0) > ite_gpu.stop.get(0) || key.get(1) > ite_gpu.stop.get(1))\
185  {return;}
186 
187 #ifdef __NVCC__
188 
189 
190 template<unsigned int dim, typename ids_type = int>
191 struct grid_p
192 {
193  __device__ static inline grid_key_dx<dim,ids_type> get_grid_point(const grid_sm<dim,void> & g)
194  {
196 
197  key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
198  key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
199 
200  unsigned int bz = blockIdx.z * blockDim.z + threadIdx.z;
201  key.set_d(2,bz % g.size(2));
202 
203  for (unsigned int i = 3 ; i < dim ; i++)
204  {
205  bz /= g.size(i);
206  key.set_d(i,bz % g.size(i));
207  }
208 
209  return key;
210  }
211 
212  __device__ static inline grid_key_dx<dim,ids_type> get_grid_point(const openfpm::array<ids_type,dim,unsigned int> & g)
213  {
215 
216  key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
217  key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
218 
219  unsigned int bz = blockIdx.z * blockDim.z + threadIdx.z;
220  key.set_d(2,bz % g[2]);
221 
222  for (unsigned int i = 3 ; i < dim ; i++)
223  {
224  bz /= g[i];
225  key.set_d(i,bz % g[i]);
226  }
227 
228  return key;
229  }
230 };
231 
232 template<typename ids_type>
233 struct grid_p<3,ids_type>
234 {
235  __device__ static inline grid_key_dx<3,ids_type> get_grid_point(const grid_sm<3,void> & g)
236  {
238 
239  key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
240  key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
241  key.set_d(2,blockIdx.z * blockDim.z + threadIdx.z);
242 
243  return key;
244  }
245 
246  __device__ static inline grid_key_dx<3,ids_type> get_grid_point(const openfpm::array<ids_type,3,unsigned int> & g)
247  {
249 
250  key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
251  key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
252  key.set_d(2,blockIdx.z * blockDim.z + threadIdx.z);
253 
254  return key;
255  }
256 };
257 
258 template<typename ids_type>
259 struct grid_p<2,ids_type>
260 {
261  __device__ static inline grid_key_dx<2,ids_type> get_grid_point(const grid_sm<2,void> & g)
262  {
264 
265  key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
266  key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
267 
268  return key;
269  }
270 
271  __device__ static inline grid_key_dx<2,ids_type> get_grid_point(const openfpm::array<ids_type,2,unsigned int> & g)
272  {
274 
275  key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
276  key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
277 
278  return key;
279  }
280 };
281 
282 template<typename ids_type>
283 struct grid_p<1,ids_type>
284 {
285  __device__ static inline grid_key_dx<1,unsigned int> get_grid_point(const grid_sm<1,void> & g)
286  {
288 
289  key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
290 
291  return key;
292  }
293 };
294 
295 #endif
296 
297 
298 template<unsigned int dim>
299 void move_work_to_blocks(ite_gpu<dim> & ite)
300 {
301  if (dim == 1)
302  {
303  ite.wthr.x = ite.wthr.x * ite.thr.x;
304  ite.thr.x = 1;
305  }
306  else if(dim == 2)
307  {
308  ite.wthr.x = ite.wthr.x * ite.thr.x;
309  ite.wthr.y = ite.wthr.y * ite.thr.y;
310  ite.thr.x = 1;
311  ite.thr.y = 1;
312 
313  }
314  else
315  {
316  ite.wthr.x = ite.wthr.x * ite.thr.x;
317  ite.wthr.x = ite.wthr.y * ite.thr.y;
318  ite.wthr.x = ite.wthr.z * ite.thr.z;
319  ite.thr.x = 1;
320  ite.thr.y = 1;
321  ite.thr.z = 1;
322  }
323 }
324 
325 #endif
326 
327 #include "copy_grid_fast.hpp"
328 
329 template<typename T>
331 {
332  template<typename grid_type>
333  static void call(grid_type & gd, const grid_type & gs, const Box<grid_type::dims,size_t> & box_src, const Box<grid_type::dims,size_t> & box_dst)
334  {
335  std::cout << "Error: " << __FILE__ << ":" << __LINE__ << " copy_grid_fast_caller failure" << std::endl;
336  }
337 };
338 
339 template<int ... prp>
341 {
342  template<typename grid_type>
343  static void call(grid_type & gd, const grid_type & gs, const Box<grid_type::dims,size_t> & box_src, const Box<grid_type::dims,size_t> & box_dst)
344  {
346  cnt[0].zero();
347 
348  typedef typename std::remove_reference<decltype(gd)>::type grid_cp;
349  typedef typename std::remove_reference<decltype(gd.getGrid())>::type grid_info_cp;
350 
352 
355  grid_cp,
356  grid_info_cp>::copy(gs.getGrid(),
357  gd.getGrid(),
358  box_src,
359  box_dst,
360  gs,gd,cnt);
361  }
362 };
363 
374 template<unsigned int dim,
375  typename T,
376  typename S,
377  template<typename> class layout_base,
378  typename ord_type = grid_sm<dim,void> >
380 {
382  typedef typename layout_base<T>::type layout;
383 
384  typedef typename apply_transform<layout_base,T>::type T_;
385 
386 public:
387 
390 
392  static constexpr unsigned int dims = dim;
393 
396 
398  typedef typename T::type T_type;
399 
401  typedef layout_base<T> layout_base_;
402 
403  typedef ord_type linearizer_type;
404 
405  typedef T background_type;
406 
407 protected:
408 
411 
413  ord_type g1;
414 
415 private:
416 
418  bool is_mem_init = false;
419 
422 
423 #ifdef SE_CLASS1
429  inline void check_init() const
430  {
431 #ifndef __CUDA_ARCH__
432  if (is_mem_init == false)
433  {
434  std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " you must call SetMemory before access the grid\n";
435  ACTION_ON_ERROR(GRID_ERROR_OBJECT);
436  }
437 #endif
438  }
439 
445  inline void check_bound(const grid_key_dx<dim> & v1) const
446  {
447 #ifndef __CUDA_ARCH__
448  for (long int i = 0 ; i < dim ; i++)
449  {
450  if (v1.get(i) >= (long int)getGrid().size(i))
451  {
452  std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << v1.get(i) << " >= " << getGrid().size(i) << "\n";
453  ACTION_ON_ERROR(GRID_ERROR_OBJECT);
454  }
455  else if (v1.get(i) < 0)
456  {
457  std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << v1.get(i) << " is negative " << "\n";
458  ACTION_ON_ERROR(GRID_ERROR_OBJECT);
459  }
460  }
461 #endif
462  }
463 
469  inline void check_bound(size_t v1) const
470  {
471 #ifndef __CUDA_ARCH__
472  if (v1 >= getGrid().size())
473  {
474  std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << v1<< " >= " << getGrid().size() << "\n";
475  ACTION_ON_ERROR(GRID_ERROR_OBJECT);
476  }
477 #endif
478  }
479 
488  template<typename Mem> inline void check_bound(const grid_base_impl<dim,T,Mem,layout_base, ord_type> & g,const grid_key_dx<dim> & key2) const
489  {
490 #ifndef __CUDA_ARCH__
491  for (size_t i = 0 ; i < dim ; i++)
492  {
493  if (key2.get(i) >= (long int)g.getGrid().size(i))
494  {
495  std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << key2.get(i) << " >= " << g.getGrid().size(i) << "\n";
496  ACTION_ON_ERROR(GRID_ERROR_OBJECT);
497  }
498  else if (key2.get(i) < 0)
499  {
500  std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << key2.get(i) << " is negative " << "\n";
501  ACTION_ON_ERROR(GRID_ERROR_OBJECT);
502  }
503  }
504 #endif
505  }
506 
515  template<typename Mem, template <typename> class layout_base2>
516  inline void check_bound(const grid_base_impl<dim,T,Mem,layout_base2,ord_type> & g,const grid_key_dx<dim> & key2) const
517  {
518 #ifndef __CUDA_ARCH__
519  for (size_t i = 0 ; i < dim ; i++)
520  {
521  if (key2.get(i) >= (long int)g.getGrid().size(i))
522  {
523  std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << key2.get(i) << " >= " << g.getGrid().size(i) << "\n";
524  ACTION_ON_ERROR(GRID_ERROR_OBJECT);
525  }
526  else if (key2.get(i) < 0)
527  {
528  std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << key2.get(i) << " is negative " << "\n";
529  ACTION_ON_ERROR(GRID_ERROR_OBJECT);
530  }
531  }
532 #endif
533  }
534 
543  template<typename Mem> inline void check_bound(const grid_base_impl<dim,T,Mem,layout_base> & g,const size_t & key2) const
544  {
545 #ifndef __CUDA_ARCH__
546  if (key2 >= g.getGrid().size())
547  {
548  std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << key2 << " >= " << getGrid().size() << "\n";
549  ACTION_ON_ERROR(GRID_ERROR_OBJECT);
550  }
551 #endif
552  }
553 
554 #endif
555 
556  void resize_impl_device(const size_t (& sz)[dim],grid_base_impl<dim,T,S,layout_base,ord_type> & grid_new, unsigned int blockSize = 1)
557  {
558 #if defined(CUDA_GPU) && defined(__NVCC__)
559 
560  // Compile time-cheking that make sense to call a GPU kernel to copy.
561 
562 
563  grid_key_dx<dim> start;
564  grid_key_dx<dim> stop;
565 
566  for (size_t i = 0 ; i < dim ; i++)
567  {
568  start.set_d(i,0);
569 
570  // take the smallest
571  if (grid_new.g1.size(i) < sz[i])
572  {stop.set_d(i,grid_new.g1.size(i)-1);}
573  else
574  {stop.set_d(i,sz[i]-1);}
575  }
576 
577  copy_grid_to_grid<S>(grid_new,*this,start,stop,blockSize);
578 
579 #else
580 
581  std::cout << __FILE__ << ":" << __LINE__ << " error: the function resize require the launch of a kernel, but it seem that this" <<
582  " file (grid_base_implementation.hpp) has not been compiled with NVCC " << std::endl;
583 
584 #endif
585  }
586 
587  void resize_impl_host(const size_t (& sz)[dim], grid_base_impl<dim,T,S,layout_base,ord_type> & grid_new)
588  {
589  size_t sz_c[dim];
590  for (size_t i = 0 ; i < dim ; i++)
591  {sz_c[i] = (g1.size(i) < sz[i])?g1.size(i):sz[i];}
592 
593  grid_sm<dim,void> g1_c(sz_c);
594 
596  grid_key_dx_iterator<dim> it(g1_c);
597 
598  while(it.isNext())
599  {
600  // get the grid key
601  grid_key_dx<dim> key = it.get();
602 
603  // create a copy element
604 
605  grid_new.get_o(key) = this->get_o(key);
606 
607  ++it;
608  }
609  }
610 
612  {
614  if (isExternal == true)
615  {
616  mem_setext<typename std::remove_reference<decltype(grid_new)>::type,S,layout_base<T>,decltype(data_)>::set(grid_new,*this,this->data_);
617  }
618  else
619  grid_new.setMemory();
620 
621 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
622 
624 
625 #endif
626  }
627 
628 public:
629 
630  // Implementation of packer and unpacker for grid
631  #include "grid_pack_unpack.ipp"
632 
634  typedef int yes_i_am_grid;
635 
638 
640  // you can access all the properties of T
642 
644  typedef T value_type;
645 
648  :g1(0),isExternal(false)
649  {
650  // Add this pointer
651  }
652 
660  :isExternal(false)
661  {
662  this->operator=(g);
663  }
664 
670  grid_base_impl(const size_t & sz) THROW
671  :g1(sz),isExternal(false)
672  {
673  // Add this pointer
674  }
675 
683  grid_base_impl(const size_t (& sz)[dim]) THROW
684  :g1(sz),isExternal(false)
685  {
686  // Add this pointer
687  }
688 
691  {
692  // delete this pointer
693  }
694 
703  {
704  swap(g.duplicate());
705 
706 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
707 
710 
711 #endif
712 
713  return *this;
714  }
715 
724  {
725  swap(g);
726 
727 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
728 
730 
731 #endif
732 
733  return *this;
734  }
735 
744  {
745  // check if the have the same size
746  if (g1 != g.g1)
747  return false;
748 
749  auto it = getIterator();
750 
751  while (it.isNext())
752  {
753  auto key = it.get();
754 
755  if (this->get_o(key) != this->get_o(key))
756  return false;
757 
758  ++it;
759  }
760 
761  return true;
762  }
763 
770  {
772 
773  grid_base_impl<dim,T,S,layout_base> grid_new(g1.getSize());
774 
776  grid_new.setMemory();
777 
778  // We know that, if it is 1D we can safely copy the memory
779 // if (dim == 1)
780 // {
782 // grid_new.data_.mem->copy(*data_.mem);
783 // }
784 // else
785 // {
787 
790 
791  while(it.isNext())
792  {
793  grid_new.set(it.get(),*this,it.get());
794 
795  ++it;
796  }
797 // }
798 
799  // copy grid_new to the base
800 
801  return grid_new;
802  }
803 
804 #ifdef CUDA_GPU
805 
812  struct ite_gpu<dim> getGPUIterator(const grid_key_dx<dim,long int> & key1, const grid_key_dx<dim,long int> & key2, size_t n_thr = default_kernel_wg_threads_) const
813  {
814  return getGPUIterator_impl<dim>(g1,key1,key2,n_thr);
815  }
816 #endif
817 
821  int size(int i) const
822  {
823  return g1.size(i);
824  }
825 
834  const ord_type & getGrid() const
835  {
836  return g1;
837  }
838 
847  void setMemory()
848  {
849  mem_setm<S,layout_base<T>,decltype(this->data_),decltype(this->g1)>::setMemory(data_,g1,is_mem_init);
850 
851 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
852 
854 
855 #endif
856 
857  }
858 
866  template<unsigned int p>
867  auto getMemory() -> decltype(boost::fusion::at_c<p>(data_).getMemory())
868  {
869  return boost::fusion::at_c<p>(data_).getMemory();
870  }
871 
883  template<unsigned int p = 0> void setMemory(S & m)
884  {
886  isExternal = true;
887 
888  bool skip_ini = skip_init<has_noPointers<T>::value,T>::skip_();
889 
890  mem_setmemory<decltype(data_),S,layout_base<T>>::template setMemory<p>(data_,m,g1.size(),skip_ini);
891 
892  is_mem_init = true;
893 
894 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
895 
897 
898 #endif
899  }
900 
912  void setMemoryArray(S * m)
913  {
915  isExternal = true;
916 
917  bool skip_ini = skip_init<has_noPointers<T>::value,T>::skip_();
918 
919  mem_setmemory<decltype(data_),S,layout_base<T>>::template setMemoryArray(*this,m,g1.size(),skip_ini);
920 
921  is_mem_init = true;
922 
923 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
924 
926 
927 #endif
928  }
929 
938  template<unsigned int p = 0> void * getPointer()
939  {
940  return mem_getpointer<decltype(data_),layout_base_>::template getPointer<p>(data_);
941  }
942 
951  template<unsigned int p = 0> const void * getPointer() const
952  {
953  return mem_getpointer<decltype(data_),layout_base_>::template getPointer<p>(data_);
954  }
955 
956 
964  template <unsigned int p, typename r_type=decltype(layout_base<T>::template get<p>(data_,g1,grid_key_dx<dim>()))>
965  inline r_type insert(const grid_key_dx<dim> & v1)
966  {
967 #ifdef SE_CLASS1
968  check_init();
969  check_bound(v1);
970 #endif
971  return this->get<p>(v1);
972  }
973 
974 
982  template <unsigned int p, typename r_type=decltype(layout_base<T>::template get<p>(data_,g1,grid_key_dx<dim>()))>
983  __device__ __host__ inline r_type get_usafe(const grid_key_dx<dim> & v1)
984  {
985 #ifdef SE_CLASS1
986  check_init();
987 #endif
988  return layout_base<T>::template get<p>(data_,g1,v1);
989  }
990 
996  {
997  return 1;
998  }
999 
1006  void setGPUInsertBuffer(unsigned int nb, unsigned int nt)
1007  {}
1008 
1016  template <unsigned int p, typename r_type=decltype(layout_base<T>::template get_c<p>(data_,g1,grid_key_dx<dim>()))>
1017  __device__ __host__ inline r_type get_unsafe(const grid_key_dx<dim> & v1) const
1018  {
1019 #ifdef SE_CLASS1
1020  check_init();
1021 #endif
1022  return layout_base<T>::template get_c<p>(data_,g1,v1);
1023  }
1024 
1032  template <unsigned int p, typename r_type=decltype(layout_base<T>::template get<p>(data_,g1,grid_key_dx<dim>()))>
1033  __device__ __host__ inline r_type get(const grid_key_dx<dim> & v1)
1034  {
1035 #ifdef SE_CLASS1
1036  check_init();
1037  check_bound(v1);
1038 #endif
1039  return layout_base<T>::template get<p>(data_,g1,v1);
1040  }
1041 
1049  __device__ __host__ inline unsigned char getFlag(const grid_key_dx<dim> & v1) const
1050  {
1051  return 0;
1052  }
1053 
1061  template <unsigned int p, typename r_type=decltype(layout_base<T>::template get_c<p>(data_,g1,grid_key_dx<dim>()))>
1062  __device__ __host__ inline r_type get(const grid_key_dx<dim> & v1) const
1063  {
1064 #ifdef SE_CLASS1
1065  check_init();
1066  check_bound(v1);
1067 #endif
1068  return layout_base<T>::template get_c<p>(data_,g1,v1);
1069  }
1070 
1078  template <unsigned int p, typename r_type=decltype(layout_base<T>::template get_lin<p>(data_,g1,0))>
1079  __device__ __host__ inline r_type get(const size_t lin_id)
1080  {
1081 #ifdef SE_CLASS1
1082  check_init();
1083  check_bound(lin_id);
1084 #endif
1085  return layout_base<T>::template get_lin<p>(data_,g1,lin_id);
1086  }
1087 
1095  template <unsigned int p, typename r_type=decltype(layout_base<T>::template get_lin<p>(data_,g1,0))>
1096  __device__ __host__ inline const r_type get(size_t lin_id) const
1097  {
1098 #ifdef SE_CLASS1
1099  check_init();
1100  check_bound(lin_id);
1101 #endif
1102  return layout_base<T>::template get_lin_const(data_,g1,lin_id);
1103  }
1104 
1105 
1118  {
1119 #ifdef SE_CLASS1
1120  check_init();
1121  check_bound(v1);
1122 #endif
1123  return mem_geto<dim,T,layout_base<T>,decltype(this->data_),decltype(this->g1),decltype(v1)>::get(data_,g1,v1);
1124  }
1125 
1137  inline const encapc<dim,T,layout> get_o(const grid_key_dx<dim> & v1) const
1138  {
1139 #ifdef SE_CLASS1
1140  check_init();
1141  check_bound(v1);
1142 #endif
1143  return mem_geto<dim,T,layout_base<T>,decltype(this->data_),decltype(this->g1),decltype(v1)>
1144  ::get(const_cast<typename std::add_lvalue_reference<decltype(this->data_)>::type>(data_),
1145  g1,v1);
1146  }
1147 
1148 
1161  {
1162 #ifdef SE_CLASS1
1163  check_init();
1164  check_bound(v1);
1165 #endif
1166  return mem_geto<dim,T,layout_base<T>,decltype(this->data_),decltype(this->g1),decltype(v1)>::get(data_,g1,v1);
1167  }
1168 
1180  inline encapc<dim,T,layout> get_o(size_t v1)
1181  {
1182 #ifdef SE_CLASS1
1183  check_init();
1184  check_bound(v1);
1185 #endif
1186  return mem_geto<dim,T,layout_base<T>,decltype(this->data_),decltype(this->g1),decltype(v1)>::get_lin(data_,v1);
1187  }
1188 
1200  inline const encapc<dim,T,layout> get_o(size_t v1) const
1201  {
1202 #ifdef SE_CLASS1
1203  check_init();
1204  check_bound(v1);
1205 #endif
1206  return mem_geto<dim,T,layout_base<T>,decltype(this->data_),decltype(this->g1),decltype(v1)>
1207  ::get_lin(const_cast<typename std::add_lvalue_reference<decltype(this->data_)>::type>(data_),v1);
1208  }
1209 
1217  template<int prp>
1218  void fill(unsigned char fl)
1219  {
1220  if (prp != 0 || is_layout_mlin<layout_base<T>>::type::value == false)
1221  {
1222  std::cout << "Error: " << __FILE__ << ":" << __LINE__ << " unsupported fill operation " << std::endl;
1223  }
1224 
1225  memset(getPointer(),fl,size() * sizeof(T));
1226  }
1227 
1235  void remove(Box<dim,long int> & section_to_delete)
1236  {}
1237 
1244  {
1245  }
1246 
1253  {
1254  return false;
1255  }
1256 
1270  const Box<dim,long int> & box_src,
1271  const Box<dim,long int> & box_dst)
1272  {
1273  // fix box_dst
1274 
1275  Box<dim,size_t> box_src_;
1276  Box<dim,size_t> box_dst_;
1277 
1278  for (size_t i = 0 ; i < dim ; i++)
1279  {
1280  if (box_dst.getHigh(i) >= (long int)g1.size(i))
1281  {
1282  long int shift = box_dst.getHigh(i) - g1.size(i) + 1;
1283  box_dst_.setHigh(i,box_dst.getHigh(i) - shift);
1284  box_src_.setHigh(i,box_src.getHigh(i) - shift);
1285  }
1286  else
1287  {
1288  box_dst_.setHigh(i,box_dst.getHigh(i));
1289  box_src_.setHigh(i,box_src.getHigh(i));
1290  }
1291 
1292  if (box_dst.getLow(i) < 0)
1293  {
1294  long int shift = -box_dst.getLow(i);
1295  box_dst_.setLow(i,box_dst.getLow(i) - shift);
1296  box_src_.setLow(i,box_src.getLow(i) - shift);
1297  }
1298  else
1299  {
1300  box_dst_.setLow(i,box_dst.getLow(i));
1301  box_src_.setLow(i,box_src.getLow(i));
1302  }
1303  }
1304 
1305  typedef typename to_int_sequence<0,T::max_prop>::type result;
1306 
1307  copy_grid_fast_caller<result>::call(*this,grid_src,box_src_,box_dst_);
1308 
1309 /* copy_grid_fast<!is_contiguos<prp...>::type::value || has_pack_gen<typename device_grid::value_type>::value,
1310  dim,
1311  grid_cp,
1312  grid_info_cp>::copy(grid_src.getGrid(),
1313  this->getGrid(),
1314  box_src,
1315  box_dst,
1316  grid_src,*this,cnt);*/
1317 
1319  }
1320 
1331  template<unsigned int ... prp>
1333  const Box<dim,size_t> & box_src,
1334  const Box<dim,size_t> & box_dst)
1335  {
1336  typedef typename std::remove_reference<decltype(grid_src)>::type grid_cp;
1337  typedef typename std::remove_reference<decltype(grid_src.getGrid())>::type grid_info_cp;
1338 
1339  grid_key_dx<dim> cnt[1];
1340  cnt[0].zero();
1341 
1342  copy_grid_fast<!is_contiguos<prp...>::type::value || has_pack_gen<T>::value,
1343  dim,
1344  grid_cp,
1345  grid_info_cp>::copy(grid_src.getGrid(),
1346  this->getGrid(),
1347  box_src,
1348  box_dst,
1349  grid_src,*this,cnt);
1350  }
1351 
1357  void clear()
1358  {}
1359 
1370  template<template<typename,typename> class op, unsigned int ... prp>
1372  const Box<dim,size_t> & bx_src,
1373  const Box<dim,size_t> & bx_dst)
1374  {
1375  grid_key_dx_iterator_sub<dim> sub_src(gs.getGrid(),bx_src.getKP1(),bx_src.getKP2());
1376  grid_key_dx_iterator_sub<dim> sub_dst(this->getGrid(),bx_dst.getKP1(),bx_dst.getKP2());
1377 
1378  while (sub_src.isNext())
1379  {
1380  // write the object in the last element
1381  object_si_di_op<op,decltype(gs.get_o(sub_src.get())),decltype(this->get_o(sub_dst.get())),OBJ_ENCAP,prp...>(gs.get_o(sub_src.get()),this->get_o(sub_dst.get()));
1382 
1383  ++sub_src;
1384  ++sub_dst;
1385  }
1386  }
1387 
1394  {return false;}
1395 
1410  void resize(const size_t (& sz)[dim], size_t opt = DATA_ON_HOST | DATA_ON_DEVICE, unsigned int blockSize = 1)
1411  {
1413 
1415 
1416  resize_impl_memset(grid_new);
1417 
1419 
1420  if (opt & DATA_ON_HOST)
1421  {resize_impl_host(sz,grid_new);}
1422 
1423  if (opt & DATA_ON_DEVICE && S::isDeviceHostSame() == false)
1424  {resize_impl_device(sz,grid_new,blockSize);}
1425 
1426 // }
1427 
1428  // copy grid_new to the base
1429 
1430  this->swap(grid_new);
1431  }
1432 
1444  void resize_no_device(const size_t (& sz)[dim])
1445  {
1447 
1449 
1450  resize_impl_memset(grid_new);
1451  resize_impl_host(sz,grid_new);
1452 
1453  this->swap(grid_new);
1454  }
1455 
1461  void remove(size_t key)
1462  {
1463  if (dim != 1)
1464  {
1465 #ifdef SE_CLASS1
1466  std::cerr << "Error: " << __FILE__ << " " << __LINE__ << " remove work only on dimension == 1 " << "\n";
1467 #endif
1468  return;
1469  }
1470 
1471  // It is safe to do a memory copy
1472 
1473  data_.move(&this->template get<0>());
1474  }
1475 
1509  {
1510  mem_swap<T,layout_base<T>,decltype(data_),decltype(grid)>::template swap_nomode<S>(data_,grid.data_);
1511 
1512  // exchange the grid info
1513  g1.swap(grid.g1);
1514 
1515  // exchange the init status
1516  bool exg = is_mem_init;
1517  is_mem_init = grid.is_mem_init;
1518  grid.is_mem_init = exg;
1519 
1520 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1521 
1523 
1524 #endif
1525  }
1526 
1534  {
1535  mem_swap<T,layout_base<T>,decltype(data_),decltype(grid)>::swap(data_,grid.data_);
1536 
1537  // exchange the grid info
1538  g1.swap(grid.g1);
1539 
1540  // exchange the init status
1541  bool exg = is_mem_init;
1542  is_mem_init = grid.is_mem_init;
1543  grid.is_mem_init = exg;
1544 
1545  // exchange the is external status
1546  exg = isExternal;
1547  isExternal = grid.isExternal;
1548  grid.isExternal = exg;
1549 
1550 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1551 
1554 
1555 #endif
1556  }
1557 
1568  {
1569  swap(grid);
1570  }
1571 
1578  template<unsigned int ... prp>
1579  __device__ __host__ inline void set(const grid_key_dx<dim> & key1,const grid_base_impl & g, const grid_key_dx<dim> & key2)
1580  {
1581  auto edest = this->get_o(key1);
1582  auto esrc = g.get_o(key2);
1583 
1584  copy_cpu_encap_encap_prp<decltype(g.get_o(key2)),decltype(this->get_o(key1)),prp...> ec(esrc,edest);
1585 
1586  boost::mpl::for_each_ref<boost::mpl::range_c<int,0,sizeof...(prp)>>(ec);
1587  }
1588 
1597  template<typename Memory> inline void set(grid_key_dx<dim> dx, const encapc<1,T,Memory> & obj)
1598  {
1599 #ifdef SE_CLASS1
1600  check_init();
1601  check_bound(dx);
1602 #endif
1603 
1604  // create the object to copy the properties
1606 
1607  // copy each property
1608  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(cp);
1609  }
1610 
1620  inline void set(grid_key_dx<dim> dx, const T & obj)
1621  {
1622 #ifdef SE_CLASS1
1623  check_init();
1624  check_bound(dx);
1625 #endif
1626 
1627  this->get_o(dx) = obj;
1628  }
1629 
1630 
1639  inline void set(const grid_key_dx<dim> & key1,
1641  const grid_key_dx<dim> & key2)
1642  {
1643 #ifdef SE_CLASS1
1644  check_init();
1645  check_bound(key1);
1646  check_bound(g,key2);
1647 #endif
1648 
1649  this->get_o(key1) = g.get_o(key2);
1650  }
1651 
1660  inline void set(const size_t key1,
1662  const size_t key2)
1663  {
1664 #ifdef SE_CLASS1
1665  check_init();
1666  check_bound(key1);
1667  check_bound(g,key2);
1668 #endif
1669 
1670  this->get_o(key1) = g.get_o(key2);
1671  }
1672 
1681  template<typename Mem> inline void set(const grid_key_dx<dim> & key1,const grid_base_impl<dim,T,Mem,layout_base> & g, const grid_key_dx<dim> & key2)
1682  {
1683 #ifdef SE_CLASS1
1684  check_init();
1685  check_bound(key1);
1686  check_bound(g,key2);
1687 #endif
1688 
1689  this->get_o(key1) = g.get_o(key2);
1690  }
1691 
1700  template<typename Mem, template <typename> class layout_base2> inline void set_general(const grid_key_dx<dim> & key1,
1702  const grid_key_dx<dim> & key2)
1703  {
1704 #ifdef SE_CLASS1
1705  check_init();
1706  check_bound(key1);
1707  check_bound(g,key2);
1708 #endif
1709 
1710  this->get_o(key1) = g.get_o(key2);
1711  }
1712 
1718  inline size_t size() const
1719  {
1720  return g1.size();
1721  }
1722 
1734  {
1735  return g1.getSubIterator(start,stop);
1736  }
1737 
1748  {
1750  }
1751 
1760  {
1761  size_t sz[dim];
1762 
1763  for (int i = 0 ; i < dim ; i++)
1764  {sz[i] = g1.size(i);}
1765 
1766  grid_sm<dim,void> gvoid(sz);
1767 
1768  return grid_key_dx_iterator<dim>(gvoid);
1769  }
1770 
1775  template<unsigned int ... prp> void deviceToHost()
1776  {
1777  layout_base<T>::template deviceToHost<decltype(data_), prp ...>(data_,0,this->getGrid().size() - 1);
1778  }
1779 
1784  template<unsigned int ... prp> void deviceToHost(size_t start, size_t stop)
1785  {
1786  layout_base<T>::template deviceToHost<decltype(data_), prp ...>(data_,start,stop);
1787  }
1788 
1793  template<unsigned int ... prp> void hostToDeviceNUMA(size_t start, size_t stop)
1794  {
1795  #ifdef CUDIFY_USE_OPENMP
1796  grid_key_dx<dim> start_;
1797  grid_key_dx<dim> stop_;
1798 
1799  for (size_t i = 0 ; i < dim ; i++)
1800  {
1801  start_.set_d(i,start);
1802  stop_.set_d(i,stop);
1803  }
1804 
1805  auto ite = this->getGPUIterator(start_,stop_);
1806 
1807  // We have to carefull with openmp, numtiple thread can end up in numa
1808  copy_with_openmp_prp<decltype(this->toKernel()),typename std::remove_reference<decltype(*this)>::type,prp ...>(this->toKernel(),*this,ite);
1809  #else
1810  this->template hostToDevice<prp ...>(start,stop);
1811  #endif
1812  }
1813 
1818  template<unsigned int ... prp> void hostToDeviceNUMA()
1819  {
1820  #ifdef CUDIFY_USE_OPENMP
1821 
1822  grid_key_dx<dim> start_;
1823  grid_key_dx<dim> stop_;
1824 
1825  for (size_t i = 0 ; i < dim ; i++)
1826  {
1827  start_.set_d(i,0);
1828  stop_.set_d(i,this->g1.size() - 1);
1829  }
1830 
1831  auto ite = this->getGPUIterator(start_,stop_);
1832 
1833  // We have to carefull with openmp, numtiple thread can end up in numa
1834  copy_with_openmp_prp<decltype(this->toKernel()),typename std::remove_reference<decltype(*this)>::type,prp ...>(this->toKernel(),*this,ite);
1835  #else
1836  this->template hostToDevice<prp ...>();
1837  #endif
1838  }
1839 
1844  template<unsigned int ... prp> void hostToDevice(size_t start, size_t stop)
1845  {
1846  layout_base<T>::template hostToDevice<S, decltype(data_),prp ...>(data_,start,stop);
1847  }
1848 
1849 
1854  template<unsigned int ... prp> void hostToDevice()
1855  {
1856  layout_base<T>::template hostToDevice<S,decltype(data_),prp ...>(data_,0,this->getGrid().size() - 1);
1857  }
1858 
1859 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1860 
1862 
1869  {
1871  }
1872 
1879  {
1881  }
1882 
1883 #else
1884 
1891  {
1893  }
1894 
1901  {
1903  }
1904 
1905 #endif
1906 
1914  template<unsigned int Np>
1916  getIteratorStencil(const grid_key_dx<dim> (& stencil_pnt)[Np]) const
1917  {
1919  }
1920 
1933  inline grid_key_dx_iterator_sub<dim> getIterator(const grid_key_dx<dim> & start, const grid_key_dx<dim> & stop, bool to_init = false) const
1934  {
1935  size_t sz[dim];
1936 
1937  for (int i = 0 ; i < dim ; i++)
1938  {sz[i] = g1.size(i);}
1939 
1940  grid_sm<dim,void> gvoid(sz);
1941 
1942  // get the starting point and the end point of the real domain
1943  return grid_key_dx_iterator_sub<dim>(gvoid,start,stop);
1944  }
1945 
1952  {
1953  return data_;
1954  }
1955 
1961  const layout & get_internal_data_() const
1962  {
1963  return data_;
1964  }
1965 
1972  {}
1973 
1981  template<unsigned int ... prp, typename context_type>
1982  void removeAddUnpackFinalize(const context_type & ctx, int opt)
1983  {}
1984 
1992  template<unsigned int ... prp, typename context_type>
1993  void removeCopyToFinalize(const context_type & ctx, int opt)
1994  {}
1995 
2000  void resetFlush()
2001  {}
2002 };
2003 
2004 
2005 #endif /* OPENFPM_DATA_SRC_GRID_GRID_BASE_IMPLEMENTATION_HPP_ */
void set(const size_t key1, const grid_base_impl< dim, T, S, layout_base > &g, const size_t key2)
Set an element of the grid from another element of another grid.
grid_key_dx_iterator< dim > getIterator() const
Return a grid iterator.
grid_base_impl(const size_t &sz) THROW
create a grid of size sz on each direction
__device__ __host__ void set(const grid_key_dx< dim > &key1, const grid_base_impl &g, const grid_key_dx< dim > &key2)
set only some properties
const void * getPointer() const
Return a plain pointer to the internal data.
void hostToDeviceNUMA(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host (respecting the NUMA domains)
void set(grid_key_dx< dim > dx, const encapc< 1, T, Memory > &obj)
set an element of the grid
void * getPointer()
Return a plain pointer to the internal data.
void resetFlush()
It does nothing.
grid_base_impl(const grid_base_impl &g) THROW
create a grid from another grid
grid_key_dx_iterator_sub< dim > getSubIterator(const grid_key_dx< dim > &start, const grid_key_dx< dim > &stop) const
Return a sub-grid iterator.
int size(int i) const
Get the size if the grid in the direction i.
static const unsigned int dims
Number of dimensions.
grid_key_dx is the key to access any element in the grid
Definition: grid_key.hpp:18
void set(const grid_key_dx< dim > &key1, const grid_base_impl< dim, T, S, layout_base > &g, const grid_key_dx< dim > &key2)
Set an element of the grid from another element of another grid.
__device__ __host__ size_t size() const
Return the size of the grid.
Definition: grid_sm.hpp:637
__device__ __host__ r_type get(const grid_key_dx< dim > &v1) const
Get the const reference of the selected element.
void remove(Box< dim, long int > &section_to_delete)
Remove all the points in this region.
__device__ __host__ T getLow(int i) const
get the i-coordinate of the low bound interval of the box
Definition: Box.hpp:556
Case memory_traits_lin.
size_t size() const
return the size of the grid
void setMemory(S &m)
Set the object that provide memory from outside.
void copyRemoveReset()
Reset the queue to remove and copy section of grids.
void clear()
It does nothing.
__device__ __host__ r_type get(const grid_key_dx< dim > &v1)
Get the reference of the selected element.
grid_gpu_ker< dim, T_, layout_base, linearizer_type > toKernel()
Convert the grid into a data-structure compatible for computing into GPU.
ord_type g1
This is a structure that store all information related to the grid and how indexes are linearized.
layout & get_internal_data_()
return the internal data_
static constexpr unsigned int dims
expose the dimansionality as a static const
T value_type
The object type the grid is storing.
void copy_to_op(const grid_base_impl< dim, T, S, layout_base > &gs, const Box< dim, size_t > &bx_src, const Box< dim, size_t > &bx_dst)
copy an external grid into a specific place into this grid
layout_base< T > layout_base_
base layout type
__device__ __host__ r_type get_usafe(const grid_key_dx< dim > &v1)
Get the reference of the selected element.
void hostToDeviceNUMA()
Synchronize the memory buffer in the device with the memory in the host (respecting the NUMA domains)
void swap(grid_base_impl< dim, T, S, layout_base, ord_type > &grid)
It swap the objects A become B and B become A using A.swap(B);.
grid_key_dx< dim > access_key
Access key.
__device__ __host__ r_type get(const size_t lin_id)
Get the reference of the selected element.
bool isExternal
The memory allocator is not internally created.
__device__ __host__ unsigned char getFlag(const grid_key_dx< dim > &v1) const
Get the point flag (in this case just return 0)
__device__ __host__ index_type get(index_type i) const
Get the i index.
Definition: grid_key.hpp:503
void removeAddUnpackFinalize(const context_type &ctx, int opt)
In this case it does nothing.
grid_key_dx_iterator< dim, stencil_offset_compute< dim, Np > > getIteratorStencil(const grid_key_dx< dim >(&stencil_pnt)[Np]) const
Return a grid iterator.
grid interface available when on gpu
void set_general(const grid_key_dx< dim > &key1, const grid_base_impl< dim, T, Mem, layout_base2 > &g, const grid_key_dx< dim > &key2)
Set an element of the grid from another element of another grid.
This is a way to quickly copy a grid into another grid.
grid_base_impl() THROW
Default constructor.
bool is_mem_init
Is the memory initialized.
Case memory_traits_lin.
void removeCopyToFinalize(const context_type &ctx, int opt)
In this case it does nothing.
void setMemoryArray(S *m)
Set the object that provide memory from outside.
to_variadic_impl< N+1, M, exit_::value, N >::type type
generate the boost::fusion::vector apply H on each term
this class is a functor for "for_each" algorithm
const encapc< dim, T, layout > get_o(const grid_key_dx< dim > &v1) const
Get the of the selected element as a boost::fusion::vector.
const grid_gpu_ker< dim, T_, layout_base, linearizer_type > toKernel() const
Convert the grid into a data-structure compatible for computing into GPU.
void deviceToHost(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
void swap(grid_base_impl< dim, T, S, layout_base > &&grid)
It move the allocated object from one grid to another.
void fill(unsigned char fl)
Fill the memory with the selected byte.
Transform the boost::fusion::vector into memory specification (memory_traits)
Definition: memory_conf.hpp:83
__device__ __host__ void setHigh(int i, T val)
set the high interval of the box
Definition: Box.hpp:544
void swap_nomode(grid_base_impl< dim, T, S, layout_base > &grid)
It swap the objects A become B and B become A using A.swap(B);.
void set(const grid_key_dx< dim > &key1, const grid_base_impl< dim, T, Mem, layout_base > &g, const grid_key_dx< dim > &key2)
Set an element of the grid from another element of another grid.
Case memory_traits_lin.
Definition: Encap.hpp:925
grid_key_dx< dim > getKP2() const
Get the point p12 as grid_key_dx.
Definition: Box.hpp:669
Case memory_traits_lin.
const layout & get_internal_data_() const
return the internal data_
grid_base_impl< dim, T, S, layout_base > & operator=(const grid_base_impl< dim, T, S, layout_base > &g)
It copy a grid.
void remove(size_t key)
Remove one element valid only on 1D.
T::type T_type
boost::vector that describe the data type
void set(grid_key_dx< dim > dx, const T &obj)
set an element of the grid
grid_base_impl< dim, T, S, layout_base > & operator=(grid_base_impl< dim, T, S, layout_base > &&g)
It copy a grid.
This is a distributed grid.
this class is a functor for "for_each" algorithm
Definition: Encap.hpp:32
bool operator==(const grid_base_impl< dim, T, S, layout_base > &g)
Compare two grids.
const grid_key_dx< dim > & get() const
Get the actual key.
__device__ __host__ void setLow(int i, T val)
set the low interval of the box
Definition: Box.hpp:533
void hostToDevice()
Copy the memory from host to device.
void deviceToHost()
Synchronize the memory buffer in the device with the memory in the host.
It copy the properties from one object to another applying an operation.
encapc< dim, T, layout > get_o(const grid_key_dx< dim > &v1)
Get the of the selected element as a boost::fusion::vector.
void resize_impl_memset(grid_base_impl< dim, T, S, layout_base, ord_type > &grid_new)
encapc< dim, T, layout > container
Object container for T, it is the return type of get_o it return a object type trough.
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
bool isSkipLabellingPossible()
This function check if keep geometry is possible for this grid.
const ord_type & getGrid() const
Return the internal grid information.
It copy the properties from one object to another applying an operation.
memory_traits_lin< typename T::type >::type memory_lin
Definition of the layout.
void copy_to_prp(const grid_base_impl< dim, T, S, layout_base > &grid_src, const Box< dim, size_t > &box_src, const Box< dim, size_t > &box_dst)
copy an external grid into a specific place into this grid
void resize_no_device(const size_t(&sz)[dim])
Resize the space.
This class represent an N-dimensional box.
Definition: Box.hpp:60
grid_base_impl< dim, T, S, layout_base > duplicate() const THROW
create a duplicated version of the grid
~grid_base_impl() THROW
Destructor.
It return true if the object T require complex serialization.
grid_key_dx_iterator_sub< dim > getSubIterator(size_t m)
Return a sub-grid iterator.
void setMemory()
Create the object that provide memory.
auto getMemory() -> decltype(boost::fusion::at_c< p >(data_).getMemory())
Return the memory object.
encapc< dim, T, layout > insert_o(const grid_key_dx< dim > &v1)
Get the of the selected element as a boost::fusion::vector.
const encapc< dim, T, layout > get_o(size_t v1) const
Get the of the selected element as a boost::fusion::vector.
grid_base_impl(const size_t(&sz)[dim]) THROW
Constructor.
layout_base< T >::type layout
memory layout
encapc< dim, T, layout > get_o(size_t v1)
Get the of the selected element as a boost::fusion::vector.
layout data_
Memory layout specification + memory chunk pointer.
grid_key_dx< dim > getKP1() const
Get the point p1 as grid_key_dx.
Definition: Box.hpp:656
void removeAddUnpackReset()
In this case it does nothing.
void hostToDevice(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
__device__ __host__ void set_d(index_type i, index_type id)
Set the i index.
Definition: grid_key.hpp:516
layout layout_type
memory layout
int yes_i_am_grid
it define that this data-structure is a grid
void copy_to(const grid_base_impl< dim, T, S, layout_base > &grid_src, const Box< dim, long int > &box_src, const Box< dim, long int > &box_dst)
copy an external grid into a specific place into this grid
static bool is_unpack_header_supported()
Indicate that unpacking the header is supported.
__device__ __host__ r_type get_unsafe(const grid_key_dx< dim > &v1) const
Get the const reference of the selected element.
void resize(const size_t(&sz)[dim], size_t opt=DATA_ON_HOST|DATA_ON_DEVICE, unsigned int blockSize=1)
Resize the grid.
int getBlockEdgeSize()
No blocks here, it return 1.
__device__ __host__ T getHigh(int i) const
get the high interval of the box
Definition: Box.hpp:567
bool isNext()
Check if there is the next element.
grid_key_dx_iterator_sub< dim > getIterator(const grid_key_dx< dim > &start, const grid_key_dx< dim > &stop, bool to_init=false) const
Return a grid iterator over all points included between start and stop point.
void setGPUInsertBuffer(unsigned int nb, unsigned int nt)
No blocks here, it does nothing.
__device__ __host__ const r_type get(size_t lin_id) const
Get the const reference of the selected element.
void resize_impl_host(const size_t(&sz)[dim], grid_base_impl< dim, T, S, layout_base, ord_type > &grid_new)
Implementation of a N-dimensional grid.
r_type insert(const grid_key_dx< dim > &v1)
In this case insert is equivalent to get.