OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
grid_dist_amr.hpp
1 /*
2  * grid_amr_dist.hpp
3  *
4  * Created on: Sep 21, 2017
5  * Author: i-bird
6  */
7 
8 #ifndef AMR_GRID_AMR_DIST_HPP_
9 #define AMR_GRID_AMR_DIST_HPP_
10 
11 #define OPENFPM_DATA_ENABLE_IO_MODULE
12 
13 #include "Grid/grid_dist_id.hpp"
14 #include "Amr/grid_dist_amr_key_iterator.hpp"
15 
16 #ifdef __NVCC__
17 #include "SparseGridGpu/SparseGridGpu.hpp"
18 #endif
19 
20 #define AMR_IMPL_TRIVIAL 1
21 #define AMR_IMPL_PATCHES 2
22 #define AMR_IMPL_OPENVDB 3
23 
24 template<typename Decomposition, typename garray>
26 {
27  Decomposition & dec;
28  garray & gd_array;
29 
30 public:
31 
32  Decomposition_encap(Decomposition & dec, garray & gd_array)
33  :dec(dec),gd_array(gd_array)
34  {}
35 
36  Decomposition & internal_dec() const
37  {
38  return dec;
39  }
40 
44  void decompose()
45  {
46  dec.decompose();
47 
48  for(size_t i = 0 ; i < gd_array.size() ; i++)
49  {
50  Ghost<Decomposition::dims,typename Decomposition::stype> gold = gd_array.get(i).getDecomposition().getGhost();
51  gd_array.get(i).getDecomposition() = dec.duplicate(gold);
52  }
53  }
54 
60  void refine(size_t ts)
61  {
62  dec.refine();
63 
64  for(size_t i = 0 ; i < gd_array.size() ; i++)
65  {
66  Ghost<Decomposition::dims,typename Decomposition::stype> gold = gd_array.get(i).getDecomposition().getGhost();
67  gd_array.get(i).getDecomposition() = dec.duplicate(gold);
68  }
69  }
70 
76  void redecompose(size_t ts)
77  {
78  dec.redecompose();
79 
80  for(size_t i = 0 ; i < gd_array.size() ; i++)
81  {
82  Ghost<Decomposition::dims,typename Decomposition::stype> gold = gd_array.get(i).getDecomposition().getGhost();
83  gd_array.get(i).getDecomposition() = dec.duplicate(gold);
84  }
85  }
86 
87  auto getDistribution() -> decltype(dec.getDistribution())
88  {
89  return dec.getDistribution();
90  }
91 
93  {
94  for(size_t i = 0 ; i < gd_array.size() ; i++)
95  {gd_array.get(i).getDecomposition() = de.gd_array.get(i).getDecomposition();}
96 
97  return *this;
98  }
99 
100  bool write(std::string output) const
101  {
102  return dec.write(output);
103  }
104 };
105 
106 template<unsigned int dim,
107  typename St,
108  typename T,
109  unsigned int impl=AMR_IMPL_TRIVIAL ,
111  typename Memory=HeapMemory ,
112  typename device_grid=grid_cpu<dim,T> >
114 {
115 
116 };
117 
118 
127 template<unsigned int dim,
128  typename St,
129  typename T,
130  typename Decomposition,
131  typename Memory,
132  typename device_grid >
133 class grid_dist_amr<dim,St,T,AMR_IMPL_TRIVIAL,Decomposition,Memory,device_grid>
134 {
137 
140 
143 
145  //
147  HeapMemory,
150 
152  typedef decltype(device_grid::type_of_subiterator()) device_sub_it;
153 
155  typedef decltype(device_grid::type_of_iterator()) device_it;
156 
158  openfpm::vector<grid_dist_iterator<dim,device_grid,device_sub_it,FREE>> git;
159 
161  openfpm::vector<grid_dist_iterator<dim,device_grid,device_it,FIXED>> git_g;
162 
165 
167  openfpm::vector<openfpm::vector<offset_mv<dim>>> mv_off;
168 
170  T bck;
171 
178  void initialize_other(size_t n_lvl, size_t (& g_sz_lvl)[dim])
179  {
180  for (size_t i = 0; i < n_lvl - 1 ; i++)
181  {
182  for (size_t j = 0 ; j < dim ; j++)
183  {
184  if (bc.bc[j] == NON_PERIODIC)
185  {g_sz_lvl[j] = (g_sz_lvl[j]-1)*2 + 1;}
186  else
187  {g_sz_lvl[j] = g_sz_lvl[j]*2;}
188  }
189 
190  gd_array.add(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>(gd_array.get(0).getDecomposition(),g_sz_lvl,g_int));
191  gd_array.last().setBackgroundValue(bck);
192 
193  gd_array.last().getDecomposition().free_geo_cell();
194  gd_array.last().getDecomposition().getDistribution().destroy_internal_graph();
195  gd_array.last().getDecomposition().free_fines();
196  }
197 
198  recalculate_mvoff();
199  }
200 
201 public:
202 
203 
210  grid_dist_amr(const Box<dim,St> & domain, const Ghost<dim,long int> & g)
211  :domain(domain),g_int(g)
212  {
213  // set boundary consitions to non periodic
214 
215  for (size_t i = 0; i < dim ; i++)
216  {bc.bc[i] = NON_PERIODIC;}
217  }
218 
227  :domain(domain),g_int(g),bc(bc)
228  {
229  }
230 
238  void initLevels(const Decomposition & dec, size_t n_lvl,const size_t (& g_sz)[dim])
239  {
240  size_t g_sz_lvl[dim];
241 
242  for (size_t i = 0; i < dim ; i++)
243  {g_sz_lvl[i] = g_sz[i];}
244 
245  // Add the coarse level
246  gd_array.add(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>(dec,g_sz,g_int));
247  gd_array.last().setBackgroundValue(bck);
248 
249  initialize_other(n_lvl,g_sz_lvl);
250  }
251 
259  template<typename TT> void initLevels(const Decomposition_encap<Decomposition,TT> & dec, size_t n_lvl,const size_t (& g_sz)[dim])
260  {
261  initLevels(dec.internal_dec(),n_lvl,g_sz);
262  }
263 
270  {
271  // Here we calculate the offset to move one level up and one level down
272  // in global coordinated moving one level up is multiply the coordinates by 2
273  // and moving one level down is dividing by 2. In local coordinates is the same
274  // with the exception that because of the decomposition you can have an offset
275  // look at the picture below
276  //
277  // (-1) (0)
278  // * | * * coarse level
279  // * |* * * * finer level
280  // |(0)(1)
281  //
282  // Line of the decomposition
283  //
284  // The coarse level point 0 in local coordinates converted to the finer level is not
285  // just 2*0 = 0 but is 2*(0) + 1 so a formula like 2*x+offset is required. here we calculate
286  // these offset. In the case of moving from finer to coarse is the same the formula is
287  // Integer_round(x+1)/2 - 1
288  //
289  mv_off.resize(gd_array.size());
290 
291  for (size_t i = 1 ; i < gd_array.size() ; i++)
292  {
293  auto & g_box_c = gd_array.get(i-1).getLocalGridsInfo();
294  auto & g_box_f = gd_array.get(i).getLocalGridsInfo();
295 
296 #ifdef SE_CLASS1
297 
298  if (g_box_c.size() != g_box_f.size())
299  {
300  std::cerr << __FILE__ << ":" << __LINE__ << " error it seem that the AMR construction between level " <<
301  i << " and " << i-1 << " is inconsistent" << std::endl;
302  }
303 
304 #endif
305 
306  mv_off.get(i-1).resize(g_box_f.size());
307  mv_off.get(i).resize(g_box_f.size());
308 
309  for (size_t j = 0 ; j < g_box_f.size() ; j++)
310  {
311  for (size_t s = 0 ; s < dim ; s++)
312  {
313  size_t d_orig_c = g_box_c.get(j).origin.get(s);
314  size_t d_orig_f = g_box_f.get(j).origin.get(s);
315 
316  mv_off.get(i-1).get(j).dw.get(s) = d_orig_c*2 - d_orig_f;
317  mv_off.get(i).get(j).up.get(s) = d_orig_c*2 - d_orig_f;
318  }
319  }
320  }
321  }
322 
330  void initLevels(size_t n_lvl,const size_t (& g_sz)[dim], size_t opt = 0)
331  {
332  size_t g_sz_lvl[dim];
333 
334  for (size_t i = 0; i < dim ; i++)
335  {g_sz_lvl[i] = g_sz[i];}
336 
337  // Add the coarse level
338  gd_array.add(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>(g_sz,domain,g_int,bc,opt));
339 
340  initialize_other(n_lvl,g_sz_lvl);
341  }
342 
351  template <typename Model>inline void addComputationCosts(Model md=Model(), size_t ts = 1)
352  {
353  gd_array.get(0).addComputationCosts(md,ts);
354  }
355 
362  {
363  Decomposition_encap<Decomposition,decltype(gd_array)> tmp(gd_array.get(0).getDecomposition(),gd_array);
364 
365  return tmp;
366  }
367 
376  {
377  return gd_array.get(lvl);
378  }
379 
380 
384  getDomainIteratorCells()
385  {
386  git_sub.clear();
387 
388  for (size_t i = 0 ; i < gd_array.size() ; i++)
389  {
390  grid_key_dx<dim> start;
391  grid_key_dx<dim> stop;
392 
393  for (size_t j = 0 ; j < dim ; j++)
394  {
395  start.set_d(j,0);
396  if (bc.bc[j] == NON_PERIODIC)
397  {stop.set_d(j,getGridInfoVoid(i).size(j) - 2);}
398  else
399  {stop.set_d(j,getGridInfoVoid(i).size(j) - 1);}
400  }
401 
402  git_sub.add(gd_array.get(i).getSubDomainIterator(start,stop));
403  }
404 
408  }
409 
410  grid_dist_iterator_sub<dim,device_grid> getDomainIteratorCells(size_t lvl)
411  {
412  grid_key_dx<dim> start;
413  grid_key_dx<dim> stop;
414 
415  for (size_t j = 0 ; j < dim ; j++)
416  {
417  start.set_d(j,0);
418  if (bc.bc[j] == NON_PERIODIC)
419  {stop.set_d(j,getGridInfoVoid(lvl).size(j) - 2);}
420  else
421  {stop.set_d(j,getGridInfoVoid(lvl).size(j) - 1);}
422  }
423 
424  return gd_array.get(lvl).getSubDomainIterator(start,stop);
425  }
426 
432  auto getGridGhostIterator(size_t lvl) -> decltype(gd_array.get(lvl).getGridGhostIterator(grid_key_dx<dim>(),grid_key_dx<dim>()))
433  {
434  grid_key_dx<dim> key_start;
435  grid_key_dx<dim> key_stop;
436 
437  for (size_t i = 0 ; i < dim ; i++)
438  {
439  key_start.set_d(i,g_int.getLow(i));
440  key_stop.set_d(i,g_int.getHigh(i) + getGridInfoVoid(lvl).size(i) -1);
441  }
442 
443  return gd_array.get(lvl).getGridGhostIterator(key_start,key_stop);
444  }
445 
451  auto getGridIterator(size_t lvl) -> decltype(gd_array.get(lvl).getGridIterator())
452  {
453  return gd_array.get(lvl).getGridIterator();
454  }
455 
461  auto getGridIterator(size_t lvl, grid_key_dx<dim> & start, grid_key_dx<dim> & stop) -> decltype(gd_array.get(lvl).getGridIterator(start,stop))
462  {
463  return gd_array.get(lvl).getGridIterator(start,stop);
464  }
465 
466 #ifdef __NVCC__
467 
473  auto getGridIteratorGPU(size_t lvl) -> decltype(gd_array.get(lvl).getGridIteratorGPU())
474  {
475  return gd_array.get(lvl).getGridIteratorGPU();
476  }
477 
478 #endif
479 
485  auto getGridIteratorCells(size_t lvl) -> decltype(gd_array.get(lvl).getGridIterator())
486  {
487  grid_key_dx<dim> start;
488  grid_key_dx<dim> stop;
489 
490  for (size_t j = 0 ; j < dim ; j++)
491  {
492  start.set_d(j,0);
493  if (bc.bc[j] == NON_PERIODIC)
494  {stop.set_d(j,getGridInfoVoid(lvl).size(j) - 2);}
495  else
496  {stop.set_d(j,getGridInfoVoid(lvl).size(j) - 1);}
497  }
498 
499  return gd_array.get(lvl).getGridIterator(start,stop);
500  }
501 
502 
510  grid_dist_iterator<dim,device_grid,decltype(device_grid::type_of_subiterator()),FREE>
511  getDomainIterator(size_t lvl) const
512  {
513  return gd_array.get(lvl).getDomainIterator();
514  }
515 
524  decltype(device_grid::type_of_iterator()),
525  FIXED>
526  getDomainGhostIterator(size_t lvl) const
527  {
528  return gd_array.get(lvl).getDomainGhostIterator();
529  }
530 
536  grid_dist_amr_key_iterator<dim,device_grid, decltype(device_grid::type_of_subiterator())>
538  {
539  git.clear();
540 
541  for (size_t i = 0 ; i < gd_array.size() ; i++)
542  {
543  git.add(gd_array.get(i).getDomainIterator());
544  }
545 
547  }
548 
554  grid_dist_amr_key_iterator<dim,device_grid, decltype(device_grid::type_of_iterator()),
555  grid_dist_iterator<dim,device_grid,decltype(device_grid::type_of_iterator()),FIXED>>
557  {
558  git_g.clear();
559 
560  for (size_t i = 0 ; i < gd_array.size() ; i++)
561  {
562  git_g.add(gd_array.get(i).getDomainGhostIterator());
563  }
564 
565  return grid_dist_amr_key_iterator<dim,device_grid,decltype(device_grid::type_of_iterator()),
566  grid_dist_iterator<dim,device_grid,decltype(device_grid::type_of_iterator()),FIXED>>(git_g);
567  }
568 
577  template <unsigned int p>inline auto get(const grid_dist_amr_key<dim> & v1) const -> decltype(gd_array.get(v1.getLvl()).template get<p>(v1.getKey()))
578  {
579 #ifdef SE_CLASS2
580  check_valid(this,8);
581 #endif
582  return gd_array.get(v1.getLvl()).template get<p>(v1.getKey());
583  }
584 
593  template <unsigned int p>inline auto get(const grid_dist_amr_key<dim> & v1) -> decltype(gd_array.get(v1.getLvl()).template get<p>(v1.getKey()))
594  {
595 #ifdef SE_CLASS2
596  check_valid(this,8);
597 #endif
598  return gd_array.get(v1.getLvl()).template get<p>(v1.getKey());
599  }
600 
601 
610  template <unsigned int p>inline auto get(size_t lvl, const grid_dist_key_dx<dim> & v1) const -> decltype(gd_array.get(lvl).template get<p>(v1))
611  {
612 #ifdef SE_CLASS2
613  check_valid(this,8);
614 #endif
615  return gd_array.get(lvl).template get<p>(v1);
616  }
617 
626  template <unsigned int p>inline auto get(size_t lvl, const grid_dist_key_dx<dim> & v1) -> decltype(gd_array.get(lvl).template get<p>(v1))
627  {
628 #ifdef SE_CLASS2
629  check_valid(this,8);
630 #endif
631  return gd_array.get(lvl).template get<p>(v1);
632  }
633 
635 
636 
645  template <unsigned int p>
646  inline auto insert(const grid_dist_amr_key<dim> & v1)
647  -> decltype(gd_array.get(v1.getLvl()).template insert<p>(v1.getKey()))
648  {
649 #ifdef SE_CLASS2
650  check_valid(this,8);
651 #endif
652  return gd_array.get(v1.getLvl()).template insert<p>(v1.getKey());
653  }
654 
655 
656 
665  template <unsigned int p>inline auto insert(size_t lvl, const grid_dist_key_dx<dim> & v1)
666  -> decltype(gd_array.get(lvl).template insert<p>(v1))
667  {
668 #ifdef SE_CLASS2
669  check_valid(this,8);
670 #endif
671  return gd_array.get(lvl).template insert<p>(v1);
672  }
673 
675 
684  {
685  return gd_array.get(lvl);
686  }
687 
689 
696  inline void remove(const grid_dist_amr_key<dim> & v1)
697  {
698 #ifdef SE_CLASS2
699  check_valid(this,8);
700 #endif
701  return gd_array.get(v1.getLvl()).remove(v1.getKey());
702  }
703 
710  void remove(size_t lvl, const grid_dist_key_dx<dim> & v1)
711  {
712 #ifdef SE_CLASS2
713  check_valid(this,8);
714 #endif
715  return gd_array.get(lvl).remove(v1);
716  }
717 
723  {
724  for (int lvl = 0 ; lvl < gd_array.size() ; lvl++)
725  {
726  if (lvl == 0)
727  {
728  gd_array.get(lvl).construct_link_dw(gd_array.get(lvl+1),mv_off.get(lvl));
729  }
730  else if (lvl == gd_array.size() - 1)
731  {gd_array.get(lvl).construct_link_up(gd_array.get(lvl-1),mv_off.get(lvl));}
732  else
733  {
734  gd_array.get(lvl).construct_link_dw(gd_array.get(lvl+1),mv_off.get(lvl));
735  gd_array.get(lvl).construct_link_up(gd_array.get(lvl-1),mv_off.get(lvl));
736  }
737  }
738  }
739 
745  template<typename stencil_type>
747  {
748  for (int lvl = 0 ; lvl < gd_array.size() ; lvl++)
749  {
750  gd_array.get(lvl).template tagBoundaries<stencil_type>();
751  }
752  }
753 
755 
761  template<int... prp> void ghost_get(size_t opt = 0)
762  {
763  for (size_t i = 0 ; i < gd_array.size() ; i++)
764  {
765  gd_array.get(i).template ghost_get<prp...>(opt);
766  }
767  }
768 
772  void map(size_t opt = 0)
773  {
774  for (size_t i = 0 ; i < gd_array.size() ; i++)
775  {
776  gd_array.get(i).map();
777  }
778 
779  recalculate_mvoff();
780  }
781 
787  template<template<typename,typename> class op,int... prp> void ghost_put()
788  {
789  for (size_t i = 0 ; i < gd_array.size() ; i++)
790  {
791  gd_array.get(i).template ghost_put<op,prp...>();
792  }
793  }
794 
800  size_t size_inserted(size_t lvl)
801  {
802  return gd_array.get(lvl).size_local_inserted();
803  }
804 
811  void setBackgroundValue(T & bv)
812  {
813  for (size_t i = 0 ; i < getNLvl() ; i++)
814  {gd_array.get(i).setBackgroundValue(bv);}
815 
816  meta_copy<T>::meta_copy_(bv,bck);
817  }
818 
825  void clear()
826  {
827  for (size_t i = 0 ; i < getNLvl() ; i++)
828  {gd_array.get(i).clear();}
829  }
830 
838  const grid_sm<dim,void> & getGridInfoVoid(size_t lvl) const
839  {
840  return gd_array.get(lvl).getGridInfoVoid();
841  }
842 
848  size_t getNLvl()
849  {
850  return gd_array.size();
851  }
852 
859  {
860 #ifdef SE_CLASS1
861 
862  if (key.getLvl() >= getNLvl() - 1)
863  {std::cerr << __FILE__ << ":" << __LINE__ << " error: we are already at the last level, we cannot go one level down" << std::endl;}
864 
865 #endif
866 
867  auto & key_ref = key.getKeyRef().getKeyRef();
868  size_t lvl = key.getLvl();
869 
870  for (size_t i = 0 ; i < dim ; i++)
871  {
872  key_ref.set_d(i,(key_ref.get(i) << 1) + mv_off.get(key.getLvl()).get(key.getKeyRef().getSub()).dw.get(i) );
873  }
874 
875  key.setLvl(lvl+1);
876  }
877 
885  {
886 #ifdef SE_CLASS1
887 
888  if (lvl >= getNLvl() - 1)
889  {std::cerr << __FILE__ << ":" << __LINE__ << " error: we are already at the last level, we cannot go one level down" << std::endl;}
890 
891 #endif
892 
894 
895  for (size_t i = 0 ; i < dim ; i++)
896  {
897  out.getKeyRef().set_d(i,(key.getKeyRef().get(i) << 1) + mv_off.get(lvl).get(key.getSub()).dw.get(i) );
898  }
899 
900  out.setSub(key.getSub());
901 
902  return out;
903  }
904 
912  {
913  return grid_dist_amr_key<dim>(lvl,key);
914  }
915 
922  {
923 #ifdef SE_CLASS1
924 
925  if (key.getLvl() == 0)
926  {std::cerr << __FILE__ << ":" << __LINE__ << " error: we are already at the top level, we cannot go one level up" << std::endl;}
927 
928 #endif
929 
930  auto & key_ref = key.getKeyRef().getKeyRef();
931  size_t lvl = key.getLvl();
932 
933  for (size_t i = 0 ; i < dim ; i++)
934  {
935  key_ref.set_d(i,(key_ref.get(i) - mv_off.get(key.getLvl()).get(key.getKeyRef().getSub()).up.get(i)) >> 1);
936  }
937 
938  key.setLvl(lvl-1);
939  }
940 
948  {
949 #ifdef SE_CLASS1
950 
951  if (lvl == 0)
952  {std::cerr << __FILE__ << ":" << __LINE__ << " error: we are already at the top level, we cannot go one level up" << std::endl;}
953 
954 #endif
955 
957 
958  for (size_t i = 0 ; i < dim ; i++)
959  {
960  out.getKeyRef().set_d(i,(key.getKeyRef().get(i) - mv_off.get(lvl).get(key.getSub()).up.get(i)) >> 1);
961  }
962 
963  out.setSub(key.getSub());
964 
965  return out;
966  }
967 
976  {
977  return gd_array.get(v1.getLvl()).getGKey(v1.getKey());
978  }
979 
989  {
990  return gd_array.get(lvl).getGKey(v1);
991  }
992 
1001  {
1002  return gd_array.get(v1.getLvl()).getPos(v1.getKey());
1003  }
1004 
1013  {
1014  return gd_array.get(lvl).getPos(v1);
1015  }
1016 
1025  {
1026  return gd_array.get(lvl).getSpacing();
1027  }
1028 
1029  /* \brief Check if a point exist
1030  *
1031  * \param v1 point to checl
1032  *
1033  */
1034  bool existPoint(const grid_dist_amr_key<dim> & v1)
1035  {
1036  return gd_array.get(v1.getLvl()).existPoint(v1.getKey());
1037  }
1038 
1039  /* \brief Check if a point exist
1040  *
1041  * \param v1 point to checl
1042  *
1043  */
1044  bool existPoint(int lvl, const grid_dist_key_dx<dim> & v1)
1045  {
1046  return gd_array.get(lvl).existPoint(v1);
1047  }
1048 
1049 
1055  bool write(std::string output, size_t opt = VTK_WRITER | FORMAT_ASCII )
1056  {
1057  bool ret = true;
1058 
1059  for (size_t i = 0 ; i < gd_array.size() ; i++)
1060  {
1061  ret &= gd_array.get(i).write(output + "_" + std::to_string(i),opt);
1062  }
1063 
1064  return ret;
1065  }
1066 
1067 #ifdef __NVCC__
1068 
1072  template<unsigned int ... prp> void deviceToHost()
1073  {
1074  for (size_t i = 0 ; i < gd_array.size() ; i++)
1075  {
1076  gd_array.get(i).template deviceToHost<prp ...>();
1077  }
1078  }
1079 
1083  template<unsigned int ... prp> void hostToDevice()
1084  {
1085  for (size_t i = 0 ; i < gd_array.size() ; i++)
1086  {
1087  gd_array.get(i).template hostToDevice<prp ...>();
1088  }
1089  }
1090 
1091 #endif
1092 };
1093 
1094 template<unsigned int dim, typename St, typename T>
1096 
1097 #ifdef __NVCC__
1098 
1099 template<unsigned int dim, typename St, typename T, unsigned int blockEdgeSize = 8>
1101 
1102 #endif
1103 
1104 #endif /* AMR_GRID_AMR_DIST_HPP_ */
void moveLvlDw(grid_dist_amr_key< dim > &key)
Move down (to finer level) the key.
convert a type into constant type
Definition: aggregate.hpp:292
void tagBoundaries()
construct level connections for padding particles
void redecompose(size_t ts)
Refine the decomposition, available only for ParMetis distribution, for Metis it is a null call.
grid_key_dx< dim > getGKey(int lvl, const grid_dist_key_dx< dim > &v1)
Get the position on the grid in global coordinates.
base_key & getKeyRef()
Get the reference key.
Transform the boost::fusion::vector into memory specification (memory_traits)
__device__ static __host__ void meta_copy_(const T &src, T &dst)
copy and object from src to dst
Definition: meta_copy.hpp:60
grid_dist_id< dim, St, T, Decomposition, Memory, device_grid > & getLevel(size_t lvl)
Get the underlying grid level.
size_t getLvl() const
Return the level.
grid_dist_amr_key< dim > getAMRKey(size_t lvl, grid_dist_key_dx< dim > key)
From a distributed key it return a AMR key that contain also the grid level.
grid_dist_amr(const Box< dim, St > &domain, const Ghost< dim, long int > &g, periodicity< dim > &bc)
Constructor.
grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_subiterator()), FREE > getDomainIterator(size_t lvl) const
return an iterator over the level lvl
__device__ __host__ T getLow(int i) const
get the i-coordinate of the low bound interval of the box
Definition: Box.hpp:556
Point< dim, St > getPos(int lvl, const grid_dist_key_dx< dim > &v1)
Get the the position of the point.
grid_dist_id< dim, St, T, Decomposition, Memory, device_grid > & getDistGrid(size_t lvl)
Get the internal distributed grid.
grid_dist_key_dx< dim > moveUp(int lvl, const grid_dist_key_dx< dim > &key)
Move up (to coarser level) the key.
__device__ __host__ index_type get(index_type i) const
Get the i index.
Definition: grid_key.hpp:503
Point< dim, St > getPos(const grid_dist_amr_key< dim > &v1)
Get the the position of the point.
This class implement the point shape in an N-dimensional space.
Definition: Point.hpp:27
auto get(size_t lvl, const grid_dist_key_dx< dim > &v1) -> decltype(gd_array.get(lvl).template get< p >(v1))
Get the reference of the selected element.
void refine(size_t ts)
Refine the decomposition, available only for ParMetis distribution, for Metis it is a null call.
void recalculate_mvoff()
Recalculate the offset array for the moveLvlUp and moveLvlDw.
This class allocate, and destroy CPU memory.
Definition: HeapMemory.hpp:39
auto getGridIteratorCells(size_t lvl) -> decltype(gd_array.get(lvl).getGridIterator())
Get an iterator to the grid.
Definition: Ghost.hpp:39
void setLvl(size_t lvl)
Return the level.
auto getGridIterator(size_t lvl) -> decltype(gd_array.get(lvl).getGridIterator())
Get an iterator to the grid.
size_t size_inserted(size_t lvl)
Return the number of inserted points on a particular level.
This class define the domain decomposition interface.
void setSub(size_t sub)
Set the local grid.
Point< dim, St > getSpacing(size_t lvl)
return the spacing for the grid in the level lvl
size_t getNLvl()
Return the maximum number of levels in the AMR struct.
Grow policy define how the vector should grow every time we exceed the size.
This class decompose a space into sub-sub-domains and distribute them across processors.
This is a distributed grid.
It contain the offset necessary to move to coarser and finer level grids.
grid_key_dx< dim > getGKey(const grid_dist_amr_key< dim > &v1)
Get the position on the grid in global coordinates.
grid_dist_amr_key_iterator< dim, device_grid, decltype(device_grid::type_of_iterator()), grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_iterator()), FIXED > > getDomainGhostIterator()
Get domain iterator.
auto getGridIterator(size_t lvl, grid_key_dx< dim > &start, grid_key_dx< dim > &stop) -> decltype(gd_array.get(lvl).getGridIterator(start, stop))
Get an iterator to the grid.
void addComputationCosts(Model md=Model(), size_t ts=1)
Add the computation cost on the decomposition using a resolution function.
grid_dist_key_dx< dim > & getKeyRef()
Return the grid key (as reference)
auto get(size_t lvl, const grid_dist_key_dx< dim > &v1) const -> decltype(gd_array.get(lvl).template get< p >(v1))
Get the reference of the selected element.
Distributed grid iterator.
void initLevels(const Decomposition &dec, size_t n_lvl, const size_t(&g_sz)[dim])
Initialize the amr grid.
bool write(std::string output, size_t opt=VTK_WRITER|FORMAT_ASCII)
Write on vtk file.
grid_dist_key_dx< dim > moveDw(int lvl, const grid_dist_key_dx< dim > &key)
Move down (to finer level) the key.
Amr grid distributed key.
auto get(const grid_dist_amr_key< dim > &v1) const -> decltype(gd_array.get(v1.getLvl()).template get< p >(v1.getKey()))
Get the reference of the selected element.
Decomposition_encap< Decomposition, decltype(gd_array)> getDecomposition()
Get the object that store the information about the decomposition.
auto get(const grid_dist_amr_key< dim > &v1) -> decltype(gd_array.get(v1.getLvl()).template get< p >(v1.getKey()))
Get the reference of the selected element.
size_t getSub() const
Get the local grid.
const grid_sm< dim, void > & getGridInfoVoid(size_t lvl) const
Get an object containing the grid informations for a specific level.
auto insert(size_t lvl, const grid_dist_key_dx< dim > &v1) -> decltype(gd_array.get(lvl).template insert< p >(v1))
Get the reference of the selected element.
grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_iterator()), FIXED > getDomainGhostIterator(size_t lvl) const
return an iterator over the level lvl
void decompose()
Start decomposition.
openfpm::vector< grid_dist_id< dim, St, T, Decomposition, Memory, device_grid >, HeapMemory, memory_traits_lin, openfpm::grow_policy_identity, STD_VECTOR > gd_array
array of grids
Distributed grid iterator.
__device__ __host__ void set_d(index_type i, index_type id)
Set the i index.
Definition: grid_key.hpp:516
const grid_dist_key_dx< dim > & getKey() const
Return the grid key.
auto getGridGhostIterator(size_t lvl) -> decltype(gd_array.get(lvl).getGridGhostIterator(grid_key_dx< dim >(), grid_key_dx< dim >()))
Get an iterator to the grid.
void moveLvlUp(grid_dist_amr_key< dim > &key)
Move up (to coarser level) the key.
Implementation of 1-D std::vector like structure.
Definition: map_vector.hpp:202
__device__ __host__ T getHigh(int i) const
get the high interval of the box
Definition: Box.hpp:567
void remove(size_t lvl, const grid_dist_key_dx< dim > &v1)
Remove a grid point (this function make sense only in case of sparse grid)
void remove(const grid_dist_amr_key< dim > &v1)
Remove a grid point (this function make sense only in case of sparse grid)
grid_dist_amr(const Box< dim, St > &domain, const Ghost< dim, long int > &g)
Constructor.
void construct_level_connections()
construct level connections for padding particles
auto insert(const grid_dist_amr_key< dim > &v1) -> decltype(gd_array.get(v1.getLvl()).template insert< p >(v1.getKey()))
Get the reference of the selected element.
void map(size_t opt=0)
It move all the grid parts that do not belong to the local processor to the respective processor.
Boundary conditions.
Definition: common.hpp:21
grid_dist_amr_key_iterator< dim, device_grid, decltype(device_grid::type_of_subiterator())> getDomainIterator()
Get domain iterator.
void initLevels(const Decomposition_encap< Decomposition, TT > &dec, size_t n_lvl, const size_t(&g_sz)[dim])
Initialize the amr grid.
void initLevels(size_t n_lvl, const size_t(&g_sz)[dim], size_t opt=0)
Initialize the amr grid.