OpenFPM_pdata  1.1.0
Project that contain the implementation of distributed structures
 All Data Structures Namespaces Functions Variables Typedefs Enumerations Friends Pages
grid_dist_id.hpp
1 #ifndef COM_UNIT_HPP
2 #define COM_UNIT_HPP
3 
4 #include <vector>
5 #include <unordered_map>
6 #include "Grid/map_grid.hpp"
7 #include "VCluster/VCluster.hpp"
8 #include "Space/SpaceBox.hpp"
9 #include "util/mathutil.hpp"
10 #include "Iterators/grid_dist_id_iterator_dec.hpp"
11 #include "Iterators/grid_dist_id_iterator.hpp"
12 #include "Iterators/grid_dist_id_iterator_sub.hpp"
13 #include "grid_dist_key.hpp"
14 #include "NN/CellList/CellDecomposer.hpp"
15 #include "util/object_util.hpp"
16 #include "memory/ExtPreAlloc.hpp"
17 #include "VTKWriter/VTKWriter.hpp"
18 #include "Packer_Unpacker/Packer.hpp"
19 #include "Packer_Unpacker/Unpacker.hpp"
20 #include "Decomposition/CartDecomposition.hpp"
21 #include "data_type/aggregate.hpp"
22 #include "hdf5.h"
23 #include "grid_dist_id_comm.hpp"
24 #include "HDF5_wr/HDF5_wr.hpp"
25 
27 template<unsigned int dim>
28 struct Box_fix
29 {
35  size_t g_id;
37  size_t r_sub;
38 };
39 
40 #define GRID_SUB_UNIT_FACTOR 64
41 
68 template<unsigned int dim, typename St, typename T, typename Decomposition = CartDecomposition<dim,St>,typename Memory=HeapMemory , typename device_grid=grid_cpu<dim,T> >
69 class grid_dist_id : public grid_dist_id_comm<dim,St,T,Decomposition,Memory,device_grid>
70 {
73 
76 
79 
82 
85 
88 
91 
94 
97 
99  size_t g_sz[dim];
100 
102  CellDecomposer_sm<dim,St,shift<dim,St>> cd_sm;
103 
106 
109 
112  std::unordered_map<size_t,size_t> g_id_to_external_ghost_box;
113 
117 
120 
123 
126 
129 
131  bool init_local_i_g_box = false;
132 
134  bool init_local_e_g_box = false;
135 
147  static void * msg_alloc_external_box(size_t msg_i ,size_t total_msg, size_t total_p, size_t i, size_t ri, void * ptr)
148  {
150 
151  g->recv_sz.resize(g->dec.getNNProcessors());
152  g->recv_mem_gg.resize(g->dec.getNNProcessors());
153 
154  // Get the local processor id
155  size_t lc_id = g->dec.ProctoID(i);
156 
157  // resize the receive buffer
158  g->recv_mem_gg.get(lc_id).resize(msg_i);
159  g->recv_sz.get(lc_id) = msg_i;
160 
161  return g->recv_mem_gg.get(lc_id).getPointer();
162  }
163 
173  {
174  Box<dim,long int> flp;
175 
176  for (size_t i = 0 ; i < dim; i++)
177  {
178  if (cmb[i] == 0)
179  {
180  flp.setLow(i,box.getLow(i));
181  flp.setHigh(i,box.getHigh(i));
182  }
183  else if (cmb[i] == 1)
184  {
185  flp.setLow(i,box.getLow(i) + ginfo.size(i));
186  flp.setHigh(i,box.getHigh(i) + ginfo.size(i));
187  }
188  else if (cmb[i] == -1)
189  {
190  flp.setLow(i,box.getLow(i) - ginfo.size(i));
191  flp.setHigh(i,box.getHigh(i) - ginfo.size(i));
192  }
193  }
194 
195  return flp;
196  }
197 
210  void set_for_adjustment(const Box<dim,long int> & sub_domain,
211  const Box<dim,St> & sub_domain_other,
212  const comb<dim> & cmb,
213  Box<dim,long int> & ib,
215  {
216  if (g.isInvalidGhost() == true)
217  {return;}
218 
219  // Convert from SpaceBox<dim,St> to SpaceBox<dim,long int>
220  Box<dim,long int> sub_domain_other_exp = cd_sm.convertDomainSpaceIntoGridUnits(sub_domain_other,dec.periodicity());
221 
222  // translate sub_domain_other based on cmb
223  for (size_t i = 0 ; i < dim ; i++)
224  {
225  if (cmb.c[i] == 1)
226  {
227  sub_domain_other_exp.setLow(i,sub_domain_other_exp.getLow(i) - ginfo.size(i));
228  sub_domain_other_exp.setHigh(i,sub_domain_other_exp.getHigh(i) - ginfo.size(i));
229  }
230  else if (cmb.c[i] == -1)
231  {
232  sub_domain_other_exp.setLow(i,sub_domain_other_exp.getLow(i) + ginfo.size(i));
233  sub_domain_other_exp.setHigh(i,sub_domain_other_exp.getHigh(i) + ginfo.size(i));
234  }
235  }
236 
237  sub_domain_other_exp.enlarge(g);
238  if (sub_domain_other_exp.Intersect(sub_domain,ib) == false)
239  {
240  for (size_t i = 0 ; i < dim ; i++)
241  {ib.setHigh(i,ib.getLow(i) - 1);}
242  }
243  }
244 
249  {
250  if (init_i_g_box == true) return;
251 
252  // Get the grid info
253  auto g = cd_sm.getGrid();
254 
255  g_id_to_internal_ghost_box.resize(dec.getNNProcessors());
256 
257  // Get the number of near processors
258  for (size_t i = 0 ; i < dec.getNNProcessors() ; i++)
259  {
260  ig_box.add();
261  auto&& pib = ig_box.last();
262 
263  pib.prc = dec.IDtoProc(i);
264  for (size_t j = 0 ; j < dec.getProcessorNIGhost(i) ; j++)
265  {
266  // Get the internal ghost boxes and transform into grid units
267  ::Box<dim,St> ib_dom = dec.getProcessorIGhostBox(i,j);
268  ::Box<dim,long int> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom,dec.periodicity());
269 
270  // Check if ib is valid if not it mean that the internal ghost does not contain information so skip it
271  if (ib.isValid() == false)
272  continue;
273 
274  size_t sub_id = dec.getProcessorIGhostSub(i,j);
275  size_t r_sub = dec.getProcessorIGhostSSub(i,j);
276 
277  auto & n_box = dec.getNearSubdomains(dec.IDtoProc(i));
278 
279  Box<dim,long int> sub = gdb_ext.get(sub_id).Dbox;
280  sub += gdb_ext.get(sub_id).origin;
281 
282  set_for_adjustment(sub,
283  n_box.get(r_sub),dec.getProcessorIGhostPos(i,j),
284  ib,ghost_int);
285 
286  if (ib.isValid() == false)
287  continue;
288 
289  // save the box and the sub-domain id (it is calculated as the linearization of P1)
290  ::Box<dim,size_t> cvt = ib;
291 
292  i_box_id<dim> bid_t;
293  bid_t.box = cvt;
294  bid_t.g_id = dec.getProcessorIGhostId(i,j);
295  bid_t.sub = dec.getProcessorIGhostSub(i,j);
296  bid_t.cmb = dec.getProcessorIGhostPos(i,j);
297  bid_t.r_sub = dec.getProcessorIGhostSSub(i,j);
298  pib.bid.add(bid_t);
299 
300  g_id_to_internal_ghost_box.get(i)[bid_t.g_id] = pib.bid.size()-1;
301  }
302  }
303 
304  init_i_g_box = true;
305  }
306 
311  {
312  // Get the grid info
313  auto g = cd_sm.getGrid();
314 
315  if (init_e_g_box == true) return;
316 
317  // Here we collect all the calculated internal ghost box in the sector different from 0 that this processor has
318 
320  openfpm::vector<size_t> prc_recv;
321  openfpm::vector<size_t> sz_recv;
322  openfpm::vector<openfpm::vector<Box_fix<dim>>> box_int_send(dec.getNNProcessors());
324 
325  for(size_t i = 0 ; i < dec.getNNProcessors() ; i++)
326  {
327  for (size_t j = 0 ; j < ig_box.get(i).bid.size() ; j++)
328  {
329  box_int_send.get(i).add();
330  box_int_send.get(i).last().bx = ig_box.get(i).bid.get(j).box;
331  box_int_send.get(i).last().g_id = ig_box.get(i).bid.get(j).g_id;
332  box_int_send.get(i).last().r_sub = ig_box.get(i).bid.get(j).r_sub;
333  box_int_send.get(i).last().cmb = ig_box.get(i).bid.get(j).cmb;
334  }
335  prc.add(dec.IDtoProc(i));
336  }
337 
338  v_cl.SSendRecv(box_int_send,box_int_recv,prc,prc_recv,sz_recv);
339 
340  eg_box.resize(dec.getNNProcessors());
341 
342  for (size_t i = 0 ; i < eg_box.size() ; i++)
343  eg_box.get(i).prc = dec.IDtoProc(i);
344 
345  for (size_t i = 0 ; i < box_int_recv.size() ; i++)
346  {
347  size_t p_id = dec.ProctoID(prc_recv.get(i));
348  auto&& pib = eg_box.get(p_id);
349  pib.prc = prc_recv.get(i);
350 
351  // For each received internal ghost box
352  for (size_t j = 0 ; j < box_int_recv.get(i).size() ; j++)
353  {
354  size_t send_list_id = box_int_recv.get(i).get(j).r_sub;
355 
356  // Get the list of the sent sub-domains
357  // and recover the id of the sub-domain from
358  // the sent list
359  const openfpm::vector<size_t> & s_sub = dec.getSentSubdomains(p_id);
360  size_t sub_id = s_sub.get(send_list_id);
361 
362  e_box_id<dim> bid_t;
363  bid_t.sub = sub_id;
364  bid_t.cmb = box_int_recv.get(i).get(j).cmb;
365  bid_t.cmb.sign_flip();
366  ::Box<dim,long int> ib = flip_box(box_int_recv.get(i).get(j).bx,box_int_recv.get(i).get(j).cmb);
367  bid_t.g_e_box = ib;
368  bid_t.g_id = box_int_recv.get(i).get(j).g_id;
369  // Translate in local coordinate
370  Box<dim,long int> tb = ib;
371  tb -= gdb_ext.get(sub_id).origin;
372  bid_t.l_e_box = tb;
373 
374  pib.bid.add(bid_t);
375 
376  g_id_to_external_ghost_box[bid_t.g_id] = pib.bid.size()-1;
377  }
378  }
379 
380  init_e_g_box = true;
381  }
382 
387  {
388  // Get the grid info
389  auto g = cd_sm.getGrid();
390 
391  if (init_local_i_g_box == true) return;
392 
393  // Get the number of sub-domains
394  for (size_t i = 0 ; i < dec.getNSubDomain() ; i++)
395  {
396  loc_ig_box.add();
397  auto&& pib = loc_ig_box.last();
398 
399  for (size_t j = 0 ; j < dec.getLocalNIGhost(i) ; j++)
400  {
401  // Get the internal ghost boxes and transform into grid units
402  ::Box<dim,St> ib_dom = dec.getLocalIGhostBox(i,j);
403  ::Box<dim,long int> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom,dec.periodicity());
404 
405  // Check if ib is valid if not it mean that the internal ghost does not contain information so skip it
406  if (ib.isValid() == false)
407  continue;
408 
409  size_t sub_id = i;
410  size_t r_sub = dec.getLocalIGhostSub(i,j);
411 
412  Box<dim,long int> sub = gdb_ext.get(sub_id).Dbox;
413  sub += gdb_ext.get(sub_id).origin;
414 
415  set_for_adjustment(sub,dec.getSubDomain(r_sub),
416  dec.getLocalIGhostPos(i,j),ib,ghost_int);
417 
418  // Check if ib is valid if not it mean that the internal ghost does not contain information so skip it
419  if (ib.isValid() == false)
420  continue;
421 
422  pib.bid.add();
423  pib.bid.last().box = ib;
424  pib.bid.last().sub = dec.getLocalIGhostSub(i,j);
425  pib.bid.last().k = dec.getLocalIGhostE(i,j);
426  pib.bid.last().cmb = dec.getLocalIGhostPos(i,j);
427  }
428  }
429 
430  init_local_i_g_box = true;
431  }
432 
437  {
438  // Get the grid info
439  auto g = cd_sm.getGrid();
440 
441  if (init_local_e_g_box == true) return;
442 
443  loc_eg_box.resize(dec.getNSubDomain());
444 
445  // Get the number of sub-domain
446  for (size_t i = 0 ; i < dec.getNSubDomain() ; i++)
447  {
448  for (size_t j = 0 ; j < loc_ig_box.get(i).bid.size() ; j++)
449  {
450  size_t k = loc_ig_box.get(i).bid.get(j).sub;
451  auto & pib = loc_eg_box.get(k);
452 
453  size_t s = loc_ig_box.get(i).bid.get(j).k;
454  pib.bid.resize(dec.getLocalNEGhost(k));
455 
456  pib.bid.get(s).box = flip_box(loc_ig_box.get(i).bid.get(j).box,loc_ig_box.get(i).bid.get(j).cmb);
457  pib.bid.get(s).sub = dec.getLocalEGhostSub(k,s);
458  pib.bid.get(s).cmb = loc_ig_box.get(i).bid.get(j).cmb;
459  pib.bid.get(s).cmb.sign_flip();
460  pib.bid.get(s).k = j;
461  pib.bid.get(s).initialized = true;
462  }
463  }
464 
465  init_local_e_g_box = true;
466  }
467 
473  inline void check_size(const size_t (& g_sz)[dim])
474  {
475  for (size_t i = 0 ; i < dim ; i++)
476  {
477  if (g_sz[i] < 2)
478  std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " distrobuted grids with size smaller than 2 are not supported\n";
479  }
480  }
481 
485  void Create()
486  {
487  // Get the number of local grid needed
488  size_t n_grid = dec.getNSubDomain();
489 
490  // create gdb
491  create_gdb_ext<dim,Decomposition>(gdb_ext,dec,cd_sm);
492 
493  // create local grids for each hyper-cube
494  loc_grid.resize(n_grid);
495 
496  // Size of the grid on each dimension
497  size_t l_res[dim];
498 
499  // Allocate the grids
500  for (size_t i = 0 ; i < n_grid ; i++)
501  {
502 
503  SpaceBox<dim,long int> sp_tg = gdb_ext.get(i).GDbox;
504 
505  // Get the size of the local grid
506  // The boxes indicate the extension of the index the size
507  // is this extension +1
508  // for example a 1D box (interval) from 0 to 3 in one dimension have
509  // the points 0,1,2,3 = so a total of 4 points
510  for (size_t j = 0 ; j < dim ; j++)
511  l_res[j] = (sp_tg.getHigh(j) >= 0)?(sp_tg.getHigh(j)+1):0;
512 
513  // Set the dimensions of the local grid
514  loc_grid.get(i).resize(l_res);
515  }
516  }
517 
524  :v_cl(g.v_cl)
525  {
526 #ifdef SE_CLASS2
527  check_new(this,8,GRID_DIST_EVENT,4);
528 #endif
529  }
530 
537  inline void InitializeCellDecomposer(const CellDecomposer_sm<dim,St,shift<dim,St>> & cd_old, const Box<dim,size_t> & ext)
538  {
539  // Initialize the cell decomposer
540  cd_sm.setDimensions(cd_old,ext);
541  }
542 
549  inline void InitializeCellDecomposer(const size_t (& g_sz)[dim], const size_t (& bc)[dim])
550  {
551  // check that the grid has valid size
552  check_size(g_sz);
553 
554  // get the size of the cell decomposer
555  size_t c_g[dim];
556  getCellDecomposerPar<dim>(c_g,g_sz,bc);
557 
558  // Initialize the cell decomposer
559  cd_sm.setDimensions(domain,c_g,0);
560  }
561 
568  inline void InitializeDecomposition(const size_t (& g_sz)[dim], const size_t (& bc)[dim])
569  {
570  // fill the global size of the grid
571  for (size_t i = 0 ; i < dim ; i++) {this->g_sz[i] = g_sz[i];}
572 
573  // Get the number of processor and calculate the number of sub-domain
574  // for decomposition
575  size_t n_proc = v_cl.getProcessingUnits();
576  size_t n_sub = n_proc * GRID_SUB_UNIT_FACTOR;
577 
578  // Calculate the maximum number (before merging) of sub-domain on
579  // each dimension
580  size_t div[dim];
581  for (size_t i = 0 ; i < dim ; i++)
582  {div[i] = openfpm::math::round_big_2(pow(n_sub,1.0/dim));}
583 
584  // Create the sub-domains
585  dec.setParameters(div,domain,bc,ghost);
586  dec.decompose();
587  }
588 
594  inline void InitializeStructures(const size_t (& g_sz)[dim])
595  {
596  // fill the global size of the grid
597  for (size_t i = 0 ; i < dim ; i++) {this->g_sz[i] = g_sz[i];}
598 
599  // Create local grid
600  Create();
601  }
602 
603 protected:
604 
613  {
614  return gdb_ext.get(i).Dbox;
615  }
616 
625  static inline Ghost<dim,float> convert_ghost(const Ghost<dim,long int> & gd, const CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm)
626  {
627  Ghost<dim,float> gc;
628 
629  // get the grid spacing
630  Box<dim,St> sp = cd_sm.getCellBox();
631 
632  // enlarge 0.001 of the spacing
633  sp.magnify_fix_P1(1.1);
634 
635  // set the ghost
636  for (size_t i = 0 ; i < dim ; i++)
637  {
638  gc.setLow(i,gd.getLow(i)*(sp.getHigh(i)));
639  gc.setHigh(i,gd.getHigh(i)*(sp.getHigh(i)));
640  }
641 
642  return gc;
643  }
644 
645 public:
646 
648  typedef device_grid d_grid;
649 
652 
654  typedef T value_type;
655 
657  typedef St stype;
658 
660  typedef Memory memory_type;
661 
663  typedef device_grid device_grid_type;
664 
666  static const unsigned int dims = dim;
667 
673  inline const Box<dim,St> getDomain() const
674  {
675  return domain;
676  }
677 
686  {
687  return pmul(Point<dim,St>(gdb_ext.get(i).origin), cd_sm.getCellBox().getP2()) + getDomain().getP1();
688  }
689 
697  inline St spacing(size_t i) const
698  {
699  return cd_sm.getCellBox().getHigh(i);
700  }
701 
707  size_t size() const
708  {
709  return ginfo_v.size();
710  }
711 
719  size_t size(size_t i) const
720  {
721  return ginfo_v.size(i);
722  }
723 
734  template<typename H>
735  grid_dist_id(const grid_dist_id<dim,St,H,typename Decomposition::base_type,Memory,grid_cpu<dim,H>> & g,
736  const Ghost<dim,long int> & gh,
737  Box<dim,size_t> ext)
738  :ghost_int(gh),dec(create_vcluster()),v_cl(create_vcluster())
739  {
740 #ifdef SE_CLASS2
741  check_new(this,8,GRID_DIST_EVENT,4);
742 #endif
743 
744  size_t ext_dim[dim];
745  for (size_t i = 0 ; i < dim ; i++) {ext_dim[i] = g.getGridInfoVoid().size(i) + ext.getKP1().get(i) + ext.getKP2().get(i);}
746 
747  // Set the grid info of the extended grid
748  ginfo.setDimensions(ext_dim);
749  ginfo_v.setDimensions(ext_dim);
750 
751  InitializeCellDecomposer(g.getCellDecomposer(),ext);
752 
753  ghost = convert_ghost(gh,cd_sm);
754 
755  // Extend the grid by the extension part and calculate the domain
756 
757  for (size_t i = 0 ; i < dim ; i++)
758  {
759  g_sz[i] = g.size(i) + ext.getLow(i) + ext.getHigh(i);
760 
761  if (g.getDecomposition().periodicity(i) == NON_PERIODIC)
762  {
763  this->domain.setLow(i,g.getDomain().getLow(i) - ext.getLow(i) * g.spacing(i) - g.spacing(i) / 2.0);
764  this->domain.setHigh(i,g.getDomain().getHigh(i) + ext.getHigh(i) * g.spacing(i) + g.spacing(i) / 2.0);
765  }
766  else
767  {
768  this->domain.setLow(i,g.getDomain().getLow(i) - ext.getLow(i) * g.spacing(i));
769  this->domain.setHigh(i,g.getDomain().getHigh(i) + ext.getHigh(i) * g.spacing(i));
770  }
771  }
772 
773  dec.setParameters(g.getDecomposition(),ghost,this->domain);
774 
775  InitializeStructures(g.getGridInfoVoid().getSize());
776  }
777 
786  const size_t (& g_sz)[dim],
787  const Ghost<dim,St> & ghost)
788  :domain(dec.getDomain()),ghost(ghost),ghost_int(INVALID_GHOST),dec(dec),v_cl(create_vcluster()),
790  {
791 #ifdef SE_CLASS2
792  check_new(this,8,GRID_DIST_EVENT,4);
793 #endif
794 
795  InitializeCellDecomposer(g_sz,dec.periodicity());
797  }
798 
806  grid_dist_id(Decomposition && dec, const size_t (& g_sz)[dim],
807  const Ghost<dim,St> & ghost)
808  :domain(dec.getDomain()),ghost(ghost),dec(dec),ginfo(g_sz),
809  ginfo_v(g_sz),v_cl(create_vcluster()),ghost_int(INVALID_GHOST)
810  {
811 #ifdef SE_CLASS2
812  check_new(this,8,GRID_DIST_EVENT,4);
813 #endif
814 
815  InitializeCellDecomposer(g_sz,dec.periodicity());
817  }
818 
828  grid_dist_id(const Decomposition & dec, const size_t (& g_sz)[dim],
829  const Ghost<dim,long int> & g)
830  :domain(dec.getDomain()),ghost_int(g),dec(create_vcluster()),v_cl(create_vcluster()),
832  {
833 #ifdef SE_CLASS2
834  check_new(this,8,GRID_DIST_EVENT,4);
835 #endif
836 
837  InitializeCellDecomposer(g_sz,dec.periodicity());
838 
840  this->dec = dec.duplicate(ghost);
841 
842  // Initialize structures
844  }
845 
855  grid_dist_id(Decomposition && dec, const size_t (& g_sz)[dim],
856  const Ghost<dim,long int> & g)
857  :domain(dec.getDomain()),dec(dec),v_cl(create_vcluster()),ginfo(g_sz),
859  {
860 #ifdef SE_CLASS2
861  check_new(this,8,GRID_DIST_EVENT,4);
862 #endif
863  InitializeCellDecomposer(g_sz,dec.periodicity());
864 
866 
867  // Initialize structures
869  }
870 
880  grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain,
881  const Ghost<dim,St> & g)
882  :grid_dist_id(g_sz,domain,g,create_non_periodic<dim>())
883  {
884  }
885 
895  grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,long int> & g)
896  :grid_dist_id(g_sz,domain,g,create_non_periodic<dim>())
897  {
898  }
899 
910  grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain,
911  const Ghost<dim,St> & g, const periodicity<dim> & p)
912  :domain(domain),ghost(g),ghost_int(INVALID_GHOST),dec(create_vcluster()),v_cl(create_vcluster()),
914  {
915 #ifdef SE_CLASS2
916  check_new(this,8,GRID_DIST_EVENT,4);
917 #endif
918 
922  }
923 
934  grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain,
935  const Ghost<dim,long int> & g, const periodicity<dim> & p)
936  :domain(domain),ghost_int(g),dec(create_vcluster()),v_cl(create_vcluster()),ginfo(g_sz),
937  ginfo_v(g_sz)
938  {
939 #ifdef SE_CLASS2
940  check_new(this,8,GRID_DIST_EVENT,4);
941 #endif
943 
945 
947  // Initialize structures
949  }
950 
956  const grid_sm<dim,T> & getGridInfo() const
957  {
958 #ifdef SE_CLASS2
959  check_valid(this,8);
960 #endif
961  return ginfo;
962  }
963 
970  {
971 #ifdef SE_CLASS2
972  check_valid(this,8);
973 #endif
974  return ginfo_v;
975  }
976 
983  {
984 #ifdef SE_CLASS2
985  check_valid(this,8);
986 #endif
987  return dec;
988  }
989 
996  {
997 #ifdef SE_CLASS2
998  check_valid(this,8);
999 #endif
1000  return dec;
1001  }
1002 
1008  const CellDecomposer_sm<dim,St,shift<dim,St>> & getCellDecomposer() const
1009  {
1010 #ifdef SE_CLASS2
1011  check_valid(this,8);
1012 #endif
1013  return cd_sm;
1014  }
1015 
1023  bool isInside(const grid_key_dx<dim> & gk) const
1024  {
1025 #ifdef SE_CLASS2
1026  check_valid(this,8);
1027 #endif
1028  for (size_t i = 0 ; i < dim ; i++)
1029  {
1030  if (gk.get(i) < 0 || gk.get(i) >= (long int)g_sz[i])
1031  {return false;}
1032  }
1033 
1034  return true;
1035  }
1036 
1042  size_t getLocalDomainSize() const
1043  {
1044 #ifdef SE_CLASS2
1045  check_valid(this,8);
1046 #endif
1047  size_t total = 0;
1048 
1049  for (size_t i = 0 ; i < gdb_ext.size() ; i++)
1050  {
1051  total += gdb_ext.get(i).Dbox.getVolumeKey();
1052  }
1053 
1054  return total;
1055  }
1056 
1063  {
1064 #ifdef SE_CLASS2
1065  check_valid(this,8);
1066 #endif
1067  size_t total = 0;
1068 
1069  for (size_t i = 0 ; i < gdb_ext.size() ; i++)
1070  {
1071  total += gdb_ext.get(i).GDbox.getVolumeKey();
1072  }
1073 
1074  return total;
1075  }
1076 
1077 
1084  {
1085 #ifdef SE_CLASS2
1086  check_valid(this,8);
1087 #endif
1088  return gdb_ext;
1089  }
1090 
1097  {
1098 #ifdef SE_CLASS2
1099  check_valid(this,8);
1100 #endif
1102  v_cl.execute();
1103 
1104  size_t size_r;
1105  size_t size = gdb_ext_global.size();
1106 
1107  if (v_cl.getProcessUnitID() == 0)
1108  {
1109  for (size_t i = 1; i < v_cl.getProcessingUnits(); i++)
1110  v_cl.send(i,0,&size,sizeof(size_t));
1111 
1112  size_r = size;
1113  }
1114  else
1115  v_cl.recv(0,0,&size_r,sizeof(size_t));
1116 
1117  v_cl.execute();
1118 
1119  gdb_ext_global.resize(size_r);
1120 
1121 
1122  if (v_cl.getProcessUnitID() == 0)
1123  {
1124  for (size_t i = 1; i < v_cl.getProcessingUnits(); i++)
1125  v_cl.send(i,0,gdb_ext_global);
1126  }
1127  else
1128  v_cl.recv(0,0,gdb_ext_global);
1129 
1130  v_cl.execute();
1131  }
1132 
1133 
1140  {
1141 #ifdef SE_CLASS2
1142  check_valid(this,8);
1143 #endif
1144 
1146  grid_key_dx<dim> one;
1147  one.one();
1148  stop = stop - one;
1149 
1151 
1152  return it;
1153  }
1154 
1161  {
1162 #ifdef SE_CLASS2
1163  check_valid(this,8);
1164 #endif
1165 
1167  grid_key_dx<dim> one;
1168  one.one();
1169  stop = stop - one;
1170 
1172 
1173  return it;
1174  }
1175 
1183  template<unsigned int Np>
1185  getDomainIteratorStencil(const grid_key_dx<dim> (& stencil_pnt)[Np]) const
1186  {
1187 #ifdef SE_CLASS2
1188  check_valid(this,8);
1189 #endif
1190 
1192  grid_key_dx<dim> one;
1193  one.one();
1194  stop = stop - one;
1195 
1197 
1198  return it;
1199  }
1200 
1207  {
1208 #ifdef SE_CLASS2
1209  check_valid(this,8);
1210 #endif
1212 
1213  return it;
1214  }
1215 
1229  {
1230 #ifdef SE_CLASS2
1231  check_valid(this,8);
1232 #endif
1234 
1235  return it;
1236  }
1237 
1250  grid_dist_iterator_sub<dim,device_grid> getSubDomainIterator(const long int (& start)[dim], const long int (& stop)[dim]) const
1251  {
1253 
1254  return it;
1255  }
1256 
1259  {
1260 #ifdef SE_CLASS2
1261  check_delete(this);
1262 #endif
1263  dec.decRef();
1264  }
1265 
1272  {
1273 #ifdef SE_CLASS2
1274  check_valid(this,8);
1275 #endif
1276  return v_cl;
1277  }
1278 
1285  {
1286  return false;
1287  }
1288 
1297  template <unsigned int p = 0>inline auto get(const grid_dist_key_dx<dim> & v1) const -> typename std::add_lvalue_reference<decltype(loc_grid.get(v1.getSub()).template get<p>(v1.getKey()))>::type
1298  {
1299 #ifdef SE_CLASS2
1300  check_valid(this,8);
1301 #endif
1302  return loc_grid.get(v1.getSub()).template get<p>(v1.getKey());
1303  }
1304 
1313  template <unsigned int p = 0>inline auto get(const grid_dist_key_dx<dim> & v1) -> typename std::add_lvalue_reference<decltype(loc_grid.get(v1.getSub()).template get<p>(v1.getKey()))>::type
1314  {
1315 #ifdef SE_CLASS2
1316  check_valid(this,8);
1317 #endif
1318  return loc_grid.get(v1.getSub()).template get<p>(v1.getKey());
1319  }
1320 
1329  template <unsigned int p = 0>inline auto get(grid_dist_g_dx<device_grid> & v1) const -> typename std::add_lvalue_reference<decltype(v1.getSub()->template get<p>(v1.getKey()))>::type
1330  {
1331 #ifdef SE_CLASS2
1332  check_valid(this,8);
1333 #endif
1334  return v1.getSub()->template get<p>(v1.getKey());
1335  }
1336 
1345  template <unsigned int p = 0>inline auto get(grid_dist_g_dx<device_grid> & v1) -> typename std::add_lvalue_reference<decltype(v1.getSub()->template get<p>(v1.getKey()))>::type
1346  {
1347 #ifdef SE_CLASS2
1348  check_valid(this,8);
1349 #endif
1350  return v1.getSub()->template get<p>(v1.getKey());
1351  }
1352 
1361  template <unsigned int p = 0>inline auto get(const grid_dist_lin_dx & v1) const -> typename std::add_lvalue_reference<decltype(loc_grid.get(v1.getSub()).template get<p>(v1.getKey()))>::type
1362  {
1363 #ifdef SE_CLASS2
1364  check_valid(this,8);
1365 #endif
1366  return loc_grid.get(v1.getSub()).template get<p>(v1.getKey());
1367  }
1368 
1377  template <unsigned int p = 0>inline auto get(const grid_dist_lin_dx & v1) -> typename std::add_lvalue_reference<decltype(loc_grid.get(v1.getSub()).template get<p>(v1.getKey()))>::type
1378  {
1379 #ifdef SE_CLASS2
1380  check_valid(this,8);
1381 #endif
1382  return loc_grid.get(v1.getSub()).template get<p>(v1.getKey());
1383  }
1384 
1393  template <unsigned int p = 0>inline auto getProp(const grid_dist_key_dx<dim> & v1) const -> decltype(this->template get<p>(v1))
1394  {
1395  return this->template get<p>(v1);
1396  }
1397 
1406  template <unsigned int p = 0>inline auto getProp(const grid_dist_key_dx<dim> & v1) -> decltype(this->template get<p>(v1))
1407  {
1408  return this->template get<p>(v1);
1409  }
1410 
1412  bool init_e_g_box = false;
1413 
1415  bool init_i_g_box = false;
1416 
1418  bool init_fix_ie_g_box = false;
1419 
1422 
1425 
1428 
1431 
1437  template<int... prp> void ghost_get()
1438  {
1439 #ifdef SE_CLASS2
1440  check_valid(this,8);
1441 #endif
1442 
1443  // Convert the ghost internal boxes into grid unit boxes
1444  create_ig_box();
1445 
1446  // Convert the ghost external boxes into grid unit boxes
1447  create_eg_box();
1448 
1449  // Convert the local ghost internal boxes into grid unit boxes
1451 
1452  // Convert the local external ghost boxes into grid unit boxes
1454 
1456  eg_box,
1457  loc_ig_box,
1458  loc_eg_box,
1459  gdb_ext,
1460  loc_grid,
1462  }
1463 
1469  template<template<typename,typename> class op,int... prp> void ghost_put()
1470  {
1471 #ifdef SE_CLASS2
1472  check_valid(this,8);
1473 #endif
1474 
1475  // Convert the ghost internal boxes into grid unit boxes
1476  create_ig_box();
1477 
1478  // Convert the ghost external boxes into grid unit boxes
1479  create_eg_box();
1480 
1481  // Convert the local ghost internal boxes into grid unit boxes
1483 
1484  // Convert the local external ghost boxes into grid unit boxes
1486 
1488  eg_box,
1489  loc_ig_box,
1490  loc_eg_box,
1491  gdb_ext,
1492  loc_grid,
1494  }
1495 
1496 
1510  {
1511  if (T::noPointers() == true && use_memcpy)
1512  {
1513  for (size_t i = 0 ; i < this->getN_loc_grid() ; i++)
1514  {
1515  auto & gs_src = this->get_loc_grid(i).getGrid();
1516 
1517  long int start = gs_src.LinId(gdb_ext.get(i).Dbox.getKP1());
1518  long int stop = gs_src.LinId(gdb_ext.get(i).Dbox.getKP2());
1519 
1520  if (stop < start) {continue;}
1521 
1522  void * dst = static_cast<void *>(static_cast<char *>(this->get_loc_grid(i).getPointer()) + start*sizeof(T));
1523  void * src = static_cast<void *>(static_cast<char *>(g.get_loc_grid(i).getPointer()) + start*sizeof(T));
1524 
1525  memcpy(dst,src,sizeof(T) * (stop + 1 - start));
1526  }
1527  }
1528  else
1529  {
1530  grid_key_dx<dim> cnt[1];
1531  cnt[0].zero();
1532 
1533  for (size_t i = 0 ; i < this->getN_loc_grid() ; i++)
1534  {
1535  auto & dst = this->get_loc_grid(i);
1536  auto & src = g.get_loc_grid(i);
1537 
1538  auto it = this->get_loc_grid_iterator_stencil(i,cnt);
1539 
1540  while (it.isNext())
1541  {
1542  // center point
1543  auto Cp = it.template getStencil<0>();
1544 
1545  dst.get_o(Cp) = src.get_o(Cp);
1546 
1547  ++it;
1548  }
1549  }
1550  }
1551 
1552  return *this;
1553  }
1554 
1561  {
1562  return cd_sm.getCellBox().getP2();
1563  }
1564 
1576  {
1577 #ifdef SE_CLASS2
1578  check_valid(this,8);
1579 #endif
1580  // Get the sub-domain id
1581  size_t sub_id = k.getSub();
1582 
1583  grid_key_dx<dim> k_glob = k.getKey();
1584 
1585  // shift
1586  k_glob = k_glob + gdb_ext.get(sub_id).origin;
1587 
1588  return k_glob;
1589  }
1590 
1602  bool write(std::string output, size_t opt = VTK_WRITER | FORMAT_ASCII)
1603  {
1604 #ifdef SE_CLASS2
1605  check_valid(this,8);
1606 #endif
1607  file_type ft = file_type::ASCII;
1608 
1609  if (opt & FORMAT_BINARY)
1610  ft = file_type::BINARY;
1611 
1612  // Create a writer and write
1614  for (size_t i = 0 ; i < loc_grid.size() ; i++)
1615  {
1616  Point<dim,St> offset = getOffset(i);
1617  vtk_g.add(loc_grid.get(i),offset,cd_sm.getCellBox().getP2(),gdb_ext.get(i).Dbox);
1618  }
1619  vtk_g.write(output + "_" + std::to_string(v_cl.getProcessUnitID()) + ".vtk", prp_names, "grids", ft);
1620 
1621  return true;
1622  }
1623 
1636  bool write_frame(std::string output, size_t i, size_t opt = VTK_WRITER | FORMAT_ASCII)
1637  {
1638 #ifdef SE_CLASS2
1639  check_valid(this,8);
1640 #endif
1641  file_type ft = file_type::ASCII;
1642 
1643  if (opt & FORMAT_BINARY)
1644  ft = file_type::BINARY;
1645 
1646  // Create a writer and write
1648  for (size_t i = 0 ; i < loc_grid.size() ; i++)
1649  {
1650  Point<dim,St> offset = getOffset(i);
1651  vtk_g.add(loc_grid.get(i),offset,cd_sm.getCellBox().getP2(),gdb_ext.get(i).Dbox);
1652  }
1653  vtk_g.write(output + "_" + std::to_string(v_cl.getProcessUnitID()) + "_" + std::to_string(i) + ".vtk",prp_names,"grids",ft);
1654 
1655  return true;
1656  }
1657 
1658 
1659 
1667  device_grid & get_loc_grid(size_t i)
1668  {
1669  return loc_grid.get(i);
1670  }
1671 
1680  {
1681  return grid_key_dx_iterator_sub<dim,no_stencil>(loc_grid.get(i).getGrid(),
1682  gdb_ext.get(i).Dbox.getKP1(),
1683  gdb_ext.get(i).Dbox.getKP2());
1684  }
1685 
1693  template<unsigned int Np>
1695  {
1697  gdb_ext.get(i).Dbox.getKP1(),
1698  gdb_ext.get(i).Dbox.getKP2(),
1699  stencil_pnt);
1700  }
1701 
1707  size_t getN_loc_grid()
1708  {
1709  return loc_grid.size();
1710  }
1711 
1712 
1720  long int who()
1721  {
1722 #ifdef SE_CLASS2
1723  return check_whoami(this,8);
1724 #else
1725  return -1;
1726 #endif
1727  }
1728 
1733  void debugPrint()
1734  {
1735  std::cout << "-------- External Ghost boxes ---------- " << std::endl;
1736 
1737  for (size_t i = 0 ; i < eg_box.size() ; i++)
1738  {
1739  std::cout << "Processor: " << eg_box.get(i).prc << " Boxes:" << std::endl;
1740 
1741  for (size_t j = 0; j < eg_box.get(i).bid.size() ; j++)
1742  {
1743  std::cout << " Box: " << eg_box.get(i).bid.get(j).g_e_box.toString() << " Id: " << eg_box.get(i).bid.get(j).g_id << std::endl;
1744  }
1745  }
1746 
1747  std::cout << "-------- Internal Ghost boxes ---------- " << std::endl;
1748 
1749  for (size_t i = 0 ; i < ig_box.size() ; i++)
1750  {
1751  std::cout << "Processor: " << ig_box.get(i).prc << " Boxes:" << std::endl;
1752 
1753  for (size_t j = 0 ; j < ig_box.get(i).bid.size() ; j++)
1754  {
1755  std::cout << " Box: " << ig_box.get(i).bid.get(j).box.toString() << " Id: " << ig_box.get(i).bid.get(j).g_id << std::endl;
1756  }
1757  }
1758  }
1759 
1768  {
1769  prp_names = names;
1770  }
1771 
1772 
1779  void map()
1780  {
1782 
1784 
1785  loc_grid_old.clear();
1786  gdb_ext_old.clear();
1787  }
1788 
1789  inline void save(const std::string & filename) const
1790  {
1792 
1793  h5s.save(filename,loc_grid,gdb_ext);
1794  }
1795 
1796  inline void load(const std::string & filename)
1797  {
1799 
1800  h5l.load<device_grid>(filename,loc_grid_old,gdb_ext_old);
1801 
1802  // Map the distributed grid
1803  map();
1804  }
1805 
1812  {
1813  return this->loc_ig_box;
1814  }
1815 
1822  {
1823  return this->ig_box;
1824  }
1825 
1827  //\cond
1829  //\endcond
1830 };
1831 
1832 
1833 
1834 #endif
bool init_e_g_box
Flag that indicate if the external ghost box has been initialized.
size_t g_id
Global id of the internal ghost box.
std::unordered_map< size_t, size_t > g_id_to_external_ghost_box
Decomposition decomposition
Decomposition used.
static Ghost< dim, float > convert_ghost(const Ghost< dim, long int > &gd, const CellDecomposer_sm< dim, St, shift< dim, St >> &cd_sm)
Convert a ghost from grid point units into continus space.
openfpm::vector< size_t > recv_sz
Receiving size.
size_t g_sz[dim]
Size of the grid on each dimension.
grid_dist_id(const size_t(&g_sz)[dim], const Box< dim, St > &domain, const Ghost< dim, St > &g, const periodicity< dim > &p)
Vcluster & v_cl
Communicator class.
void magnify_fix_P1(T mg)
Magnify the box by a factor keeping fix the point P1.
Definition: Box.hpp:784
openfpm::vector< e_lbox_grid< dim > > loc_eg_box
Local external ghost boxes in grid units.
This class represent an N-dimensional box.
Definition: SpaceBox.hpp:26
comb< dim > cmb
In which sector live the box.
auto getProp(const grid_dist_key_dx< dim > &v1) -> decltype(this->template get< p >(v1))
Get the reference of the selected element.
grid_dist_iterator< dim, device_grid, FREE > getDomainIterator() const
It return an iterator that span the full grid domain (each processor span its local domain) ...
Point< dim, St > getSpacing()
Get the spacing on each dimension.
void ghost_put_(const openfpm::vector< ip_box_grid< dim >> &ig_box, const openfpm::vector< ep_box_grid< dim >> &eg_box, const openfpm::vector< i_lbox_grid< dim >> &loc_ig_box, const openfpm::vector< e_lbox_grid< dim >> &loc_eg_box, const openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext, openfpm::vector< device_grid > &loc_grid, openfpm::vector< std::unordered_map< size_t, size_t >> &g_id_to_internal_ghost_box)
It merge the information in the ghost with the real information.
comb< dim > cmb
Sector position of the external ghost.
grid_dist_id(const Decomposition &dec, const size_t(&g_sz)[dim], const Ghost< dim, long int > &g)
void getGlobalGridsInfo(openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext_global) const
It gathers the information about local grids for all of the processors.
openfpm::vector< GBoxes< device_grid::dims > > gdb_ext
Extension of each grid: Domain and ghost + domain.
const openfpm::vector< i_lbox_grid< dim > > & get_ig_box()
Get the internal ghost box.
size_t getLocalDomainWithGhostSize() const
Get the total number of grid points with ghost for the calling processor.
Decomposition dec
Space Decomposition.
bool write_frame(std::string output, size_t i, size_t opt=VTK_WRITER|FORMAT_ASCII)
Write the distributed grid information.
static const unsigned int dims
Number of dimensions.
T getLow(int i) const
get the i-coordinate of the low bound interval of the box
Definition: Box.hpp:479
This class is an helper for the communication of grid_dist_id.
bool isValid() const
Check if the Box is a valid box P2 >= P1.
Definition: Box.hpp:997
grid_key_dx is the key to access any element in the grid
Definition: grid_key.hpp:18
Position of the element of dimension d in the hyper-cube of dimension dim.
Definition: comb.hpp:34
size_t getProcessUnitID()
Get the process unit id.
Distributed linearized key.
const grid_sm< dim, void > & getGridInfoVoid() const
Get an object containing the grid informations without type.
const CellDecomposer_sm< dim, St, shift< dim, St > > & getCellDecomposer() const
Return the cell decomposer.
Ghost< dim, St > ghost
Ghost expansion.
void execute()
Execute all the requests.
const grid_sm< dim, T > & getGridInfo() const
Get an object containing the grid informations.
size_t sub
sub_id in which sub-domain this box live
const openfpm::vector< i_lbox_grid< dim > > & get_loc_ig_box()
Get the internal local ghost box.
bool init_local_i_g_box
Indicate if the local internal ghost box has been initialized.
grid_key_dx< dim > getKP2() const
Get the point p12 as grid_key_dx.
Definition: Box.hpp:592
grid_key_dx_iterator_sub< dim, stencil_offset_compute< dim, Np > > get_loc_grid_iterator_stencil(size_t i, const grid_key_dx< dim >(&stencil_pnt)[Np])
Get the i sub-domain grid.
openfpm::vector< device_grid > loc_grid_old
Old local grids.
size_t size() const
Return the size of the grid.
Definition: grid_sm.hpp:572
const Box< dim, St > getDomain() const
Get the domain where the grid is defined.
grid_dist_id(const grid_dist_id< dim, St, T, Decomposition, Memory, device_grid > &g)
Default Copy constructor on this class make no sense and is unsafe, this definition disable it...
grid_sm< dim, T > ginfo
Grid informations object.
const Decomposition & getDecomposition() const
Get the object that store the information about the decomposition.
grid_dist_id(const grid_dist_id< dim, St, H, typename Decomposition::base_type, Memory, grid_cpu< dim, H >> &g, const Ghost< dim, long int > &gh, Box< dim, size_t > ext)
This constructor is special, it construct an expanded grid that perfectly overlap with the previous...
void Create()
Create the grids on memory.
void create_eg_box()
Create per-processor internal ghost box list in grid units.
bool write(std::string output, size_t opt=VTK_WRITER|FORMAT_ASCII)
Write the distributed grid information.
T value_type
value_type
T getHigh(int i) const
get the high interval of the box
Definition: Box.hpp:490
device_grid device_grid_type
Type of device grid.
void create_local_eg_box()
Create per-processor external ghost boxes list in grid units.
size_t size() const
Return the total number of points in the grid.
size_t g_id
id
Box< dim, size_t > bx
Box in global unit.
void setHigh(int i, T val)
set the high interval of the box
Definition: Box.hpp:467
openfpm::vector< GBoxes< device_grid::dims > > gdb_ext_global
Global gdb_ext.
size_t size()
Stub size.
Definition: map_vector.hpp:70
size_t sub
sub
Grid key for a distributed grid.
void check_size(const size_t(&g_sz)[dim])
Check the grid has a valid size.
This class implement the point shape in an N-dimensional space.
Definition: Point.hpp:22
::Box< dim, long int > box
Box.
grid_dist_id(const Decomposition &dec, const size_t(&g_sz)[dim], const Ghost< dim, St > &ghost)
comb< dim > cmb
Sector where it live the linked external ghost box.
Point< dim, T > getP1() const
Get the point p1.
Definition: Box.hpp:605
grid_dist_iterator_sub< dim, device_grid > getSubDomainIterator(const grid_key_dx< dim > &start, const grid_key_dx< dim > &stop) const
It return an iterator that span the grid domain only in the specified part.
grid_key_dx< dim > getKP1() const
Get the point p1 as grid_key_dx.
Definition: Box.hpp:579
openfpm::vector< i_lbox_grid< dim > > loc_ig_box
Local internal ghost boxes in grid units.
grid_dist_id(const size_t(&g_sz)[dim], const Box< dim, St > &domain, const Ghost< dim, long int > &g)
Internal ghost box sent to construct external ghost box into the other processors.
bool send(size_t proc, size_t tag, const void *mem, size_t sz)
Send data to a processor.
grid_dist_id(const size_t(&g_sz)[dim], const Box< dim, St > &domain, const Ghost< dim, long int > &g, const periodicity< dim > &p)
mem_id get(size_t i) const
Get the i index.
Definition: grid_key.hpp:394
size_t g_id
Id.
Implementation of VCluster class.
Definition: VCluster.hpp:36
void map()
It move all the grid parts that do not belong to the local processor to the respective processor...
This structure store the Box that define the domain inside the Ghost + domain box.
Definition: GBoxes.hpp:39
void InitializeStructures(const size_t(&g_sz)[dim])
Initialize the grid.
This class define the domain decomposition interface.
bool init_i_g_box
Flag that indicate if the internal ghost box has been initialized.
device_grid d_grid
Which kind of grid the structure store.
bool init_local_e_g_box
Indicate if the local external ghost box has been initialized.
void InitializeCellDecomposer(const size_t(&g_sz)[dim], const size_t(&bc)[dim])
Initialize the Cell decomposer of the grid.
bool Intersect(const Box< dim, T > &b, Box< dim, T > &b_out) const
Intersect.
Definition: Box.hpp:88
~grid_dist_id()
Destructor.
bool SSendRecv(openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
Semantic Send and receive, send the data to processors and receive from the other processors...
Definition: VCluster.hpp:639
void ghost_get_(const openfpm::vector< ip_box_grid< dim >> &ig_box, const openfpm::vector< ep_box_grid< dim >> &eg_box, const openfpm::vector< i_lbox_grid< dim >> &loc_ig_box, const openfpm::vector< e_lbox_grid< dim >> &loc_eg_box, const openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext, openfpm::vector< device_grid > &loc_grid, std::unordered_map< size_t, size_t > &g_id_to_external_ghost_box)
It fill the ghost part of the grids.
auto getProp(const grid_dist_key_dx< dim > &v1) const -> decltype(this->template get< p >(v1))
Get the reference of the selected element.
grid_dist_iterator_sub< dim, device_grid > getSubDomainIterator(const long int(&start)[dim], const long int(&stop)[dim]) const
It return an iterator that span the grid domain only in the specified part.
grid_key_dx< dim > getGKey(const grid_dist_key_dx< dim > &k)
Convert a g_dist_key_dx into a global key.
St stype
Type of space.
bool isInside(const grid_key_dx< dim > &gk) const
Check that the global grid key is inside the grid domain.
This is a distributed grid.
size_t getN_loc_grid()
Return the number of local grid.
CellDecomposer_sm< dim, St, shift< dim, St > > cd_sm
Structure that divide the space into cells.
Box< dim, size_t > getDomain(size_t i)
Given a local sub-domain i with a local grid Domain + ghost return the part of the local grid that is...
grid_dist_iterator< dim, device_grid, FREE > getOldDomainIterator() const
It return an iterator that span the full grid domain (each processor span its local domain) ...
grid_sm< dim, void > ginfo_v
Grid informations object without type.
it store a box, its unique id and the sub-domain from where it come from
const openfpm::vector< GBoxes< device_grid::dims > > & getLocalGridsInfo()
It return the informations about the local grids.
void set_for_adjustment(const Box< dim, long int > &sub_domain, const Box< dim, St > &sub_domain_other, const comb< dim > &cmb, Box< dim, long int > &ib, Ghost< dim, long int > &g)
this function is for optimization of the ghost size
void enlarge(const Box< dim, T > &gh)
Enlarge the box with ghost margin.
Definition: Box.hpp:700
void zero()
Set to zero the key.
Definition: grid_key.hpp:116
bool recv(size_t proc, size_t tag, void *v, size_t sz)
Recv data from a processor.
const size_t(& getSize() const)[N]
Return the size of the grid as an array.
Definition: grid_sm.hpp:677
grid_dist_id(Decomposition &&dec, const size_t(&g_sz)[dim], const Ghost< dim, long int > &g)
static void * msg_alloc_external_box(size_t msg_i, size_t total_msg, size_t total_p, size_t i, size_t ri, void *ptr)
Call-back to allocate buffer to receive incoming objects (external ghost boxes)
bool is_staggered()
Indicate that this grid is not staggered.
openfpm::vector< ip_box_grid< dim > > ig_box
Internal ghost boxes in grid units.
void ghost_put()
It synchronize the ghost parts.
Distributed grid iterator.
bool SGather(T &send, S &recv, size_t root)
Semantic Gather, gather the data from all processors into one node.
Definition: VCluster.hpp:330
void setLow(int i, T val)
set the low interval of the box
Definition: Box.hpp:456
It store the information about the external ghost box.
Ghost< dim, long int > ghost_int
Ghost expansion.
device_grid & get_loc_grid(size_t i)
Get the i sub-domain grid.
Box< dim, St > domain
Domain.
bool init_fix_ie_g_box
Flag that indicate if the internal and external ghost box has been fixed.
void setPropNames(const openfpm::vector< std::string > &names)
Set the properties names.
Memory memory_type
Type of Memory.
void InitializeCellDecomposer(const CellDecomposer_sm< dim, St, shift< dim, St >> &cd_old, const Box< dim, size_t > &ext)
Initialize the Cell decomposer of the grid enforcing perfect overlap of the cells.
void ghost_get()
It synchronize the ghost parts.
grid_dist_id< dim, St, T, Decomposition, Memory, device_grid > & copy(grid_dist_id< dim, St, T, Decomposition, Memory, device_grid > &g, bool use_memcpy=true)
Copy the give grid into this grid.
::Box< dim, long int > l_e_box
Box defining the external ghost box in local coordinates.
::Box< dim, long int > g_e_box
Box defining the external ghost box in global coordinates.
void map_(Decomposition &dec, CellDecomposer_sm< dim, St, shift< dim, St >> &cd_sm, openfpm::vector< device_grid > &loc_grid, openfpm::vector< device_grid > &loc_grid_old, openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext, openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext_old, openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext_global)
Moves all the grids that does not belong to the local processor to the respective processor...
grid_dist_iterator< dim, device_grid, FIXED > getDomainGhostIterator() const
It return an iterator that span the grid domain + ghost part.
openfpm::vector< std::string > prp_names
properties names
Declaration grid_key_dx_iterator_sub.
Definition: grid_sm.hpp:77
size_t r_sub
r_sub id of the sub-domain in the sent list
size_t size(size_t i) const
Return the total number of points in the grid.
void InitializeDecomposition(const size_t(&g_sz)[dim], const size_t(&bc)[dim])
Initialize the grid.
openfpm::vector< HeapMemory > recv_mem_gg
Receiving buffer for particles ghost get.
grid_key_dx_iterator_sub< dim, no_stencil > get_loc_grid_iterator(size_t i)
Get the i sub-domain grid.
grid_dist_iterator< dim, device_grid, FREE, stencil_offset_compute< dim, Np > > getDomainIteratorStencil(const grid_key_dx< dim >(&stencil_pnt)[Np]) const
It return an iterator that span the full grid domain (each processor span its local domain) ...
void debugPrint()
It print the internal ghost boxes and external ghost boxes in global unit.
size_t getLocalDomainSize() const
Get the total number of grid points for the calling processor.
void create_ig_box()
Create per-processor internal ghost boxes list in grid units and g_id_to_external_ghost_box.
grid_dist_id(Decomposition &&dec, const size_t(&g_sz)[dim], const Ghost< dim, St > &ghost)
Distributed grid iterator.
openfpm::vector< ep_box_grid< dim > > eg_box
External ghost boxes in grid units.
Box< dim, long int > flip_box(const Box< dim, long int > &box, const comb< dim > &cmb)
flip box just convert and internal ghost box into an external ghost box
void create_local_ig_box()
Create local internal ghost box in grid units.
openfpm::vector< device_grid > loc_grid
Local grids.
void one()
Set to one the key.
Definition: grid_key.hpp:125
grid_dist_id(const size_t(&g_sz)[dim], const Box< dim, St > &domain, const Ghost< dim, St > &g)
size_t getProcessingUnits()
Get the total number of processors.
size_t r_sub
from which sub-domain this internal ghost box is generated (or with which sub-domain is overlapping) ...
openfpm::vector< std::unordered_map< size_t, size_t > > g_id_to_internal_ghost_box
bool isInvalidGhost()
check if the Ghost is valid
Definition: Ghost.hpp:96
void setDimensions(const size_t(&dims)[N])
Reset the dimension of the grid.
Definition: grid_sm.hpp:229
Decomposition & getDecomposition()
Get the object that store the information about the decomposition.
size_t getSub() const
Get the local grid.
long int who()
It return the id of structure in the allocation list.
char c[dim]
Array that store the combination.
Definition: comb.hpp:37
St spacing(size_t i) const
Get the spacing of the grid in direction i.
Point< dim, St > getOffset(size_t i)
Get the point where it start the origin of the grid of the sub-domain i.
Vcluster & getVC()
Get the Virtual Cluster machine.
grid_key_dx< dim > getKey() const
Get the key.
Distributed linearized key.
openfpm::vector< GBoxes< device_grid::dims > > gdb_ext_old
Extension of each old grid (old): Domain and ghost + domain.