OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
SparseGrid.hpp
1 /*
2  * SparseGrid.hpp
3  *
4  * Created on: Oct 22, 2017
5  * Author: i-bird
6  */
7 
8 #ifndef OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_
9 #define OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_
10 
11 #include "memory_ly/memory_array.hpp"
12 #include "memory_ly/memory_c.hpp"
13 #include "memory_ly/memory_conf.hpp"
14 #include "hash_map/hopscotch_map.h"
15 #include "hash_map/hopscotch_set.h"
16 #include "Vector/map_vector.hpp"
17 #include "util/variadic_to_vmpl.hpp"
18 #include "data_type/aggregate.hpp"
19 #include "SparseGridUtil.hpp"
20 #include "SparseGrid_iterator.hpp"
21 #include "SparseGrid_iterator_block.hpp"
22 #include "SparseGrid_conv_opt.hpp"
23 //#include "util/debug.hpp"
24 // We do not want parallel writer
25 
26 #ifdef OPENFPM_DATA_ENABLE_IO_MODULE
27 #define NO_PARALLEL
28 #include "VTKWriter/VTKWriter.hpp"
29 #endif
30 
31 
32 
33 
34 template<typename Tsrc,typename Tdst>
35 class copy_bck
36 {
38  Tsrc & src;
39 
41  Tdst & dst;
42 
43  size_t pos;
44 
45 public:
46 
47  copy_bck(Tsrc & src, Tdst & dst,size_t pos)
48  :src(src),dst(dst),pos(pos)
49  {}
50 
52  template<typename T>
53  inline void operator()(T& t) const
54  {
55  typedef typename std::remove_reference<decltype(src.template get<T::value>())>::type copy_rtype;
56 
58  }
59 
60 };
61 
62 template<typename Tsrc,typename Tdst>
64 {
66  Tsrc src;
67 
69  Tdst dst;
70 
71  size_t pos;
72 
73 public:
74 
75  copy_prop_to_vector(Tsrc src, Tdst dst,size_t pos)
76  :src(src),dst(dst),pos(pos)
77  {}
78 
80  template<typename T>
81  inline void operator()(T& t) const
82  {
83  typedef typename std::remove_reference<decltype(dst.template get<T::value>())>::type copy_rtype;
84 
85  meta_copy<copy_rtype>::meta_copy_(src.template get<T::value>()[pos],dst.template get<T::value>());
86  }
87 
88 };
89 
90 template<unsigned int dim, typename Tsrc,typename Tdst>
92 {
94  const Tsrc & src;
95 
97  Tdst & dst;
98 
101 
104 
105 public:
106 
109  {}
110 
112  template<typename T>
113  inline void operator()(T& t) const
114  {
115  typedef typename std::remove_reference<decltype(dst.template insert<T::value>(pos_dst))>::type copy_rtype;
116 
117  meta_copy<copy_rtype>::meta_copy_(src.template get<T::value>(pos_src),dst.template insert<T::value>(pos_dst));
118  }
119 
120 };
121 
122 template<typename T>
124 {
125  template<unsigned int prop, typename Tsrc, typename Tdst>
126  static void copy(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
127  {
128  typedef typename std::remove_reference<decltype(dst.template get<prop>()[pos_id_dst])>::type copy_rtype;
129 
130  meta_copy<copy_rtype>::meta_copy_(src.template get<prop>()[pos_id_src],dst.template get<prop>()[pos_id_dst]);
131  }
132 };
133 
134 template<typename T, unsigned int N1>
136 {
137  template<unsigned int prop, typename Tsrc, typename Tdst>
138  static void copy(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
139  {
140  typedef typename std::remove_reference<decltype(dst.template get<prop>()[0][pos_id_dst])>::type copy_rtype;
141 
142  for (int i = 0 ; i < N1 ; i++)
143  {
144  meta_copy<copy_rtype>::meta_copy_(src.template get<prop>()[i][pos_id_src],dst.template get<prop>()[i][pos_id_dst]);
145  }
146  }
147 };
148 
149 template<typename T, unsigned int N1, unsigned int N2>
151 {
152  template<unsigned int prop, typename Tsrc, typename Tdst>
153  static void copy(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
154  {
155  typedef typename std::remove_reference<decltype(dst.template get<prop>()[0][0][pos_id_dst])>::type copy_rtype;
156 
157  for (int i = 0 ; i < N1 ; i++)
158  {
159  for (int j = 0 ; j < N2 ; j++)
160  {
161  meta_copy<copy_rtype>::meta_copy_(src.template get<prop>()[i][j][pos_id_src],dst.template get<prop>()[i][j][pos_id_dst]);
162  }
163  }
164  }
165 };
166 
167 template<unsigned int dim, typename Tsrc,typename Tdst, typename aggrType>
169 {
171  const Tsrc & src;
172 
174  Tdst & dst;
175 
177  short int pos_id_src;
178 
180  short int pos_id_dst;
181 
182 public:
183 
184  copy_sparse_to_sparse_bb(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
186  {}
187 
189  template<typename T>
190  inline void operator()(T& t) const
191  {
192 /* typedef typename std::remove_reference<decltype(dst.template get<T::value>())>::type copy_rtype;
193 
194  meta_copy<copy_rtype>::meta_copy_(src.template get<T::value>()[pos_id_src],dst.template get<T::value>()[pos_id_dst]);*/
195 
196  typedef typename boost::mpl::at<typename aggrType::type, T>::type copy_rtype;
197 
199  }
200 
201 };
202 
203 template< template<typename,typename> class op,unsigned int dim, typename Tsrc,typename Tdst, unsigned int ... prp>
205 {
207  const Tsrc & src;
208 
210  Tdst & dst;
211 
214 
217 
219  typedef typename to_boost_vmpl<prp...>::type v_prp;
220 
221 public:
222 
225  {}
226 
228  template<typename T>
229  inline void operator()(T& t) const
230  {
231  typedef typename boost::mpl::at<v_prp,boost::mpl::int_<T::value>>::type idx_type;
232  typedef typename std::remove_reference<decltype(dst.template insert<idx_type::value>(pos_dst))>::type copy_rtype;
233 
234  if (dst.existPoint(pos_dst) == false)
235  {meta_copy_op<replace_,copy_rtype>::meta_copy_op_(src.template get<idx_type::value>(pos_src),dst.template insert<idx_type::value>(pos_dst));}
236  else
237  {meta_copy_op<op,copy_rtype>::meta_copy_op_(src.template get<idx_type::value>(pos_src),dst.template insert<idx_type::value>(pos_dst));}
238  }
239 
240 };
241 
242 
243 
252 template<unsigned int dim, typename mpl_v>
253 struct copy_sz
254 {
256  size_t (& sz)[dim];
257 
258 
264  inline copy_sz(size_t (& sz)[dim])
265  :sz(sz)
266  {
267  };
268 
270  template<typename T>
271  inline void operator()(T& t) const
272  {
273  sz[T::value] = boost::mpl::at<mpl_v,boost::mpl::int_<T::value>>::type::value;
274  }
275 };
276 
277 
278 template<unsigned int N>
280 {
281  template<typename Vc_type>
282  static inline void load(Vc_type & Vc)
283  {
284  std::cout << __FILE__ << ":" << __LINE__ << " unknown size " << std::endl;
285  }
286 };
287 
288 template<>
289 struct load_mask_impl<1>
290 {
291  template<typename Vc_type>
292  static inline void load(Vc_type & Vc, unsigned char * mask_sum)
293  {
294  Vc[0] = mask_sum[0];
295  }
296 };
297 
298 template<>
299 struct load_mask_impl<2>
300 {
301  template<typename Vc_type>
302  static inline void load(Vc_type & Vc, unsigned char * mask_sum)
303  {
304  Vc[0] = mask_sum[0];
305  Vc[1] = mask_sum[1];
306  }
307 };
308 
309 template<>
310 struct load_mask_impl<4>
311 {
312  template<typename Vc_type>
313  static inline void load(Vc_type & Vc, unsigned char * mask_sum)
314  {
315  Vc[0] = mask_sum[0];
316  Vc[1] = mask_sum[1];
317  Vc[2] = mask_sum[2];
318  Vc[3] = mask_sum[3];
319  }
320 };
321 
322 template<>
323 struct load_mask_impl<8>
324 {
325  template<typename Vc_type>
326  static inline void load(Vc_type & Vc, unsigned char * mask_sum)
327  {
328  Vc[0] = mask_sum[0];
329  Vc[1] = mask_sum[1];
330  Vc[2] = mask_sum[2];
331  Vc[3] = mask_sum[3];
332  Vc[4] = mask_sum[4];
333  Vc[5] = mask_sum[5];
334  Vc[6] = mask_sum[6];
335  Vc[7] = mask_sum[7];
336  }
337 };
338 
339 template<>
340 struct load_mask_impl<16>
341 {
342  template<typename Vc_type>
343  static inline void load(Vc_type & Vc, unsigned char * mask_sum)
344  {
345  Vc[0] = mask_sum[0];
346  Vc[1] = mask_sum[1];
347  Vc[2] = mask_sum[2];
348  Vc[3] = mask_sum[3];
349  Vc[4] = mask_sum[4];
350  Vc[5] = mask_sum[5];
351  Vc[6] = mask_sum[6];
352  Vc[7] = mask_sum[7];
353  Vc[8] = mask_sum[8];
354  Vc[9] = mask_sum[9];
355  Vc[10] = mask_sum[10];
356  Vc[11] = mask_sum[11];
357  Vc[12] = mask_sum[12];
358  Vc[13] = mask_sum[13];
359  Vc[14] = mask_sum[14];
360  Vc[15] = mask_sum[15];
361  }
362 };
363 
364 template<typename Vc_type>
365 inline Vc_type load_mask(unsigned char * mask_sum)
366 {
367  Vc_type v;
368 
370 
371  return v;
372 }
373 
374 template<unsigned int dim,
375  typename T,
376  typename S,
377  typename grid_lin,
378  typename layout,
379  template<typename> class layout_base,
380  typename chunking>
382 {
384  mutable size_t cache_pnt;
385 
387  mutable long int cache[SGRID_CACHE];
388 
390  mutable long int cached_id[SGRID_CACHE];
391 
394 
397 
399 
400  //Definition of the chunks
401  typedef typename v_transform_two_v2<Ft_chunk,boost::mpl::int_<chunking::size::value>,typename T::type>::type chunk_def;
402 
404  //aggregate_bfv<chunk_def> background;
405 
407 
410 
412  grid_lin g_sm;
413 
415  grid_lin g_sm_shift;
416 
418  grid_key_dx<dim> pos_chunk[chunking::size::value];
419 
421  size_t sz_cnk[dim];
422 
423  openfpm::vector<size_t> empty_v;
424 
426  bool findNN;
427 
430 
439  inline void find_active_chunk_from_point(const grid_key_dx<dim> & v1,size_t & active_cnk, short int & sub_id)
440  {
441  grid_key_dx<dim> kh = v1;
442  grid_key_dx<dim> kl;
443 
444  // shift the key
446 
447  find_active_chunk(kh,active_cnk);
448 
450  }
451 
456  template<unsigned int n_ele>
457  inline void remove_from_chunk(size_t sub_id,
458  int & nele,
459  unsigned char (& mask)[n_ele])
460  {
461  nele = (mask[sub_id])?nele-1:nele;
462 
463  mask[sub_id] = 0;
464  }
465 
471  inline void reconstruct_map()
472  {
473  // reconstruct map
474 
475  map.clear();
476  for (size_t i = 1 ; i < header_inf.size() ; i++)
477  {
478  grid_key_dx<dim> kh = header_inf.get(i).pos;
479  grid_key_dx<dim> kl;
480 
481  // shift the key
483 
484  long int lin_id = g_sm_shift.LinId(kh);
485 
486  map[lin_id] = i;
487  }
488  }
489 
496  inline void remove_empty()
497  {
498  if (empty_v.size() >= FLUSH_REMOVE)
499  {
500  // eliminate double entry
501 
502  empty_v.sort();
503  empty_v.unique();
504 
505  // Because chunks can be refilled the empty list can contain chunks that are
506  // filled so before remove we have to check that they are really empty
507 
508  for (int i = empty_v.size() - 1 ; i >= 0 ; i--)
509  {
510  if (header_inf.get(empty_v.get(i)).nele != 0)
511  {empty_v.remove(i);}
512  }
513 
514  header_inf.remove(empty_v);
515  header_mask.remove(empty_v);
516  chunks.remove(empty_v);
517 
518  // reconstruct map
519 
520  reconstruct_map();
521 
522  empty_v.clear();
523 
524  // cache must be cleared
525 
526  clear_cache();
527  }
528  }
529 
536  inline void add_on_cache(size_t lin_id, size_t active_cnk) const
537  {
538  // Add on cache the chunk
539  cache[cache_pnt] = lin_id;
540  cached_id[cache_pnt] = active_cnk;
541  cache_pnt++;
542  cache_pnt = (cache_pnt >= SGRID_CACHE)?0:cache_pnt;
543  }
544 
549  inline void clear_cache()
550  {
551  cache_pnt = 0;
552  for (size_t i = 0 ; i < SGRID_CACHE ; i++)
553  {cache[i] = -1;}
554  }
555 
562  void set_g_shift_from_size(const size_t (& sz)[dim], grid_lin & g_sm_shift)
563  {
564  grid_key_dx<dim> cs;
565  grid_key_dx<dim> unused;
566 
567  for (size_t i = 0 ; i < dim ; i++)
568  {cs.set_d(i,sz[i]);}
569 
571 
572  size_t sz_i[dim];
573 
574  for (size_t i = 0 ; i < dim ; i++)
575  {sz_i[i] = cs.get(i) + 1;}
576 
577  g_sm_shift.setDimensions(sz_i);
578  }
579 
584  void init()
585  {
586  findNN = false;
587 
588  for (size_t i = 0 ; i < SGRID_CACHE ; i++)
589  {cache[i] = -1;}
590 
591  // fill pos_g
592 
594  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(cpsz);
595 
597 
599  size_t cnt = 0;
600 
601  while (it.isNext())
602  {
603  auto key = it.get();
604 
605  for (size_t i = 0 ; i < dim ; i++)
606  {
607  pos_chunk[cnt].set_d(i,key.get(i));
608  }
609 
610  ++cnt;
611  ++it;
612  }
613 
614  // Add the bachground chunk at the begining
615 
616  chunks.add();
617  header_inf.add();
618  for(int i = 0 ; i < dim ; i++)
619  {header_inf.last().pos.set_d(i,std::numeric_limits<long int>::min());};
620  header_inf.last().nele = 0;
621  header_mask.add();
622 
623  // set the mask to null
624  auto & h = header_mask.last().mask;
625 
626  for (size_t i = 0 ; i < chunking::size::value ; i++)
627  {h[i] = 0;}
628 
629  // set the data to background
630  for (size_t i = 0 ; i < chunking::size::value ; i++)
631  {
632  auto c = chunks.get(0);
633 
634  //copy_bck<decltype(background),decltype(c)> cb(background,c,i);
635 
636  //boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(cb);
637  }
638  }
639 
648  inline void find_active_chunk(const grid_key_dx<dim> & kh,size_t & active_cnk,bool & exist) const
649  {
650  long int lin_id = g_sm_shift.LinId(kh);
651 
652  size_t id = 0;
653  for (size_t k = 0 ; k < SGRID_CACHE; k++)
654  {id += (cache[k] == lin_id)?k+1:0;}
655 
656  if (id == 0)
657  {
658  // we do not have it in cache we check if we have it in the map
659 
660  auto fnd = map.find(lin_id);
661  if (fnd == map.end())
662  {
663  exist = false;
664  active_cnk = 0;
665  return;
666  }
667  else
668  {active_cnk = fnd->second;}
669 
670  // Add on cache the chunk
671  cache[cache_pnt] = lin_id;
672  cached_id[cache_pnt] = active_cnk;
673  cache_pnt++;
674  cache_pnt = (cache_pnt >= SGRID_CACHE)?0:cache_pnt;
675  }
676  else
677  {
678  active_cnk = cached_id[id-1];
679  cache_pnt = id;
680  cache_pnt = (cache_pnt == SGRID_CACHE)?0:cache_pnt;
681  }
682 
683  exist = true;
684  }
685 
693  inline void pre_get(const grid_key_dx<dim> & v1, size_t & active_cnk, size_t & sub_id, bool & exist) const
694  {
695  grid_key_dx<dim> kh = v1;
696  grid_key_dx<dim> kl;
697 
698  // shift the key
700 
701  find_active_chunk(kh,active_cnk,exist);
702 
704  }
705 
713  inline bool pre_insert(const grid_key_dx<dim> & v1, size_t & active_cnk, size_t & sub_id)
714  {
715  bool exist = true;
716  active_cnk = 0;
717 
718  grid_key_dx<dim> kh = v1;
719  grid_key_dx<dim> kl;
720 
721  // shift the key
723 
724  long int lin_id = g_sm_shift.LinId(kh);
725 
726  size_t id = 0;
727  for (size_t k = 0 ; k < SGRID_CACHE; k++)
728  {id += (cache[k] == lin_id)?k+1:0;}
729 
730  if (id == 0)
731  {
732  // we do not have it in cache we check if we have it in the map
733 
734  auto fnd = map.find(lin_id);
735  if (fnd == map.end())
736  {
737  // we do not have it in the map create a chunk
738 
739  map[lin_id] = chunks.size();
740  chunks.add();
741  header_inf.add();
742  header_inf.last().pos = kh;
743  header_inf.last().nele = 0;
744  header_mask.add();
745 
746  // set the mask to null
747  auto & h = header_mask.last().mask;
748 
749  for (size_t i = 0 ; i < chunking::size::value ; i++)
750  {h[i] = 0;}
751 
753 
754  active_cnk = chunks.size() - 1;
755  }
756  else
757  {
758  // we have it in the map
759 
760  active_cnk = fnd->second;
761  }
762 
763  // Add on cache the chunk
764  cache[cache_pnt] = lin_id;
765  cached_id[cache_pnt] = active_cnk;
766  cache_pnt++;
767  cache_pnt = (cache_pnt >= SGRID_CACHE)?0:cache_pnt;
768  }
769  else
770  {
771  active_cnk = cached_id[id-1];
772  cache_pnt = id;
773  cache_pnt = (cache_pnt == SGRID_CACHE)?0:cache_pnt;
774  }
775 
777 
778  // the chunk is in cache, solve
779 
780  // we notify that we added one element
781  auto & hc = header_inf.get(active_cnk);
782  auto & hm = header_mask.get(active_cnk);
783 
784  exist = hm.mask[sub_id];
785  hc.nele = (exist)?hc.nele:hc.nele + 1;
786  hm.mask[sub_id] |= 1;
787 
788  return exist;
789  }
790 
791  inline void remove_point(const grid_key_dx<dim> & v1)
792  {
793  bool exist;
794  size_t active_cnk = 0;
795  size_t sub_id;
796 
797  pre_get(v1,active_cnk,sub_id,exist);
798 
799  if (exist == false)
800  {return;}
801 
802  // eliminate the element
803 
804  auto & hm = header_mask.get(active_cnk);
805  auto & hc = header_inf.get(active_cnk);
806  unsigned char swt = hm.mask[sub_id];
807 
808  hc.nele = (swt)?hc.nele-1:hc.nele;
809 
810  hm.mask[sub_id] = 0;
811 
812  if (hc.nele == 0 && swt != 0)
813  {
814  // Add the chunks in the empty list
815  empty_v.add(active_cnk);
816  }
817  }
818 
819 public:
820 
822  typedef int yes_i_am_grid;
823 
826 
828  static constexpr unsigned int dims = dim;
829 
832  typedef T value_type;
833 
836 
839 
840  typedef layout_base<T> memory_traits;
841 
842  typedef chunking chunking_type;
843 
844  typedef grid_lin linearizer_type;
845 
850  inline sgrid_cpu()
851  :cache_pnt(0)
852  {
853  init();
854  }
855 
861  inline sgrid_cpu(const sgrid_cpu & g) THROW
862  {
863  this->operator=(g);
864  }
865 
871  inline sgrid_cpu(const sgrid_cpu && g) THROW
872  {
873  this->operator=(g);
874  }
875 
881  sgrid_cpu(const size_t (& sz)[dim])
882  :cache_pnt(0),g_sm(sz)
883  {
884  // calculate the chunks grid
885 
887 
888  // fill pos_g
889 
890  init();
891  }
892 
898  template<unsigned int p>
899  void setBackgroundValue(const typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type & val)
900  {
901  for (int i = 0 ; i < chunking::size::value ; i++)
902  {
904  meta_copy_(val,
905  get_selector<typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type>::template get<p>(chunks,0,i));
906  }
907  }
908 
914  sparse_grid_bck_value<typename std::remove_reference<decltype(chunks.get(0))>::type> getBackgroundValue()
915  {
916  return sparse_grid_bck_value<typename std::remove_reference<decltype(chunks.get(0))>::type>(chunks.get(0));
917  }
918 
922  template<typename pointers_type,
923  typename headers_type,
924  typename result_type,
925  unsigned int ... prp >
926  static void unpack_headers(pointers_type & pointers, headers_type & headers, result_type & result, int n_slot)
927  {}
928 
929  template<unsigned int ... prp, typename S2, typename header_type, typename ite_type, typename context_type>
930  void unpack_with_headers(ExtPreAlloc<S2> & mem,
931  ite_type & sub_it,
932  header_type & headers,
933  int ih,
934  Unpack_stat & ps,
935  context_type &context,
936  rem_copy_opt opt = rem_copy_opt::NONE_OPT)
937  {}
938 
944  auto getBackgroundValueAggr() -> decltype(chunks.get(0))
945  {
946  return chunks.get(0);
947  }
948 
954  static constexpr bool isCompressed()
955  {
956  return true;
957  }
958 
964  inline auto insert_o(const grid_key_dx<dim> & v1, size_t & ele_id) -> decltype(chunks.get_o(0))
965  {
966  size_t active_cnk;
967 
968  pre_insert(v1,active_cnk,ele_id);
969 
970  return chunks.get_o(active_cnk);
971  }
972 
980  template <unsigned int p, typename r_type=decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,0,0))>
981  inline r_type insert(const grid_key_dx<dim> & v1)
982  {
983  size_t active_cnk = 0;
984  size_t ele_id = 0;
985 
986  pre_insert(v1,active_cnk,ele_id);
987 
988  return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,active_cnk,ele_id);
989  }
990 
998  template <unsigned int p, typename r_type=decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,0,0))>
999  inline r_type insert(const grid_key_sparse_lin_dx & v1)
1000  {
1001  size_t active_cnk = v1.getChunk();
1002  size_t sub_id = v1.getPos();
1003 
1004  // the chunk is in cache, solve
1005 
1006  // we notify that we added one element
1007  auto & hm = header_mask.get(active_cnk);
1008  auto & hc = header_inf.get(active_cnk);
1009 
1010  // we set the mask
1011  hc.nele = (hm.mask[sub_id] & 1)?hc.nele:hc.nele + 1;
1012  hm.mask[sub_id] |= 1;
1013 
1014  return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,active_cnk,sub_id);
1015  }
1016 
1024  template <unsigned int p>
1025  inline auto get(const grid_key_dx<dim> & v1) const -> decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,0))
1026  {
1027  bool exist;
1028  size_t active_cnk;
1029  size_t sub_id;
1030 
1031  pre_get(v1,active_cnk,sub_id,exist);
1032 
1033  if (exist == false)
1034  {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,sub_id);}
1035 
1036  // we check the mask
1037  auto & hm = header_mask.get(active_cnk);
1038 
1039  if ((hm.mask[sub_id] & 1) == 0)
1040  {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,sub_id);}
1041 
1042  return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,active_cnk,sub_id);
1043  }
1044 
1051  {return false;}
1052 
1060  template <unsigned int p>
1061  inline auto get(const grid_key_dx<dim> & v1) -> decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,0))
1062  {
1063  bool exist;
1064  size_t active_cnk;
1065  size_t sub_id;
1066 
1067  pre_get(v1,active_cnk,sub_id,exist);
1068 
1069  if (exist == false)
1070  {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,active_cnk,sub_id);}
1071 
1072  // we check the mask
1073  auto & hc = header_inf.get(active_cnk);
1074  auto & hm = header_mask.get(active_cnk);
1075 
1076  if ((hm.mask[sub_id] & 1) == 0)
1077  {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,sub_id);}
1078 
1079  return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,active_cnk,sub_id);
1080  }
1081 
1089  inline bool existPoint(const grid_key_dx<dim> & v1) const
1090  {
1091  bool exist;
1092  size_t active_cnk;
1093  size_t sub_id;
1094 
1095  pre_get(v1,active_cnk,sub_id,exist);
1096 
1097  if (exist == false)
1098  {return false;}
1099 
1100  // we check the mask
1101  auto & hm = header_mask.get(active_cnk);
1102 
1103  if ((hm.mask[sub_id] & 1) == 0)
1104  {return false;}
1105 
1106  return true;
1107  }
1108 
1116  template <unsigned int p>
1117  inline auto get(const grid_key_sparse_lin_dx & v1) -> decltype(chunks.template get<p>(0)[0])
1118  {
1119  return chunks.template get<p>(v1.getChunk())[v1.getPos()];
1120  }
1121 
1129  inline auto getBlock(const grid_key_sparse_lin_dx & v1) const -> decltype(chunks.get(0))
1130  {
1131  return chunks.get(v1.getChunk());
1132  }
1133 
1141  inline unsigned char getFlag(const grid_key_dx<dim> & v1) const
1142  {
1143  return 0;
1144  }
1145 
1152  getIterator(size_t opt = 0) const
1153  {
1155  }
1156 
1163  getIterator(const grid_key_dx<dim> & start, const grid_key_dx<dim> & stop, size_t opt = 0) const
1164  {
1166  }
1167 
1177  template<unsigned int stencil_size = 0>
1178  grid_key_sparse_dx_iterator_block_sub<dim,stencil_size,self,chunking>
1180  {
1181  return grid_key_sparse_dx_iterator_block_sub<dim,stencil_size,self,chunking>(*this,start,stop);
1182  }
1183 
1191  const grid_lin & getGrid() const
1192  {
1193  return g_sm;
1194  }
1195 
1201  void remove(const grid_key_dx<dim> & v1)
1202  {
1203  remove_point(v1);
1204  remove_empty();
1205  }
1206 
1213  {
1214  remove_point(v1);
1215  }
1216 
1224  template<typename stencil_type>
1226  {
1227 
1228  }
1229 
1236  {
1237  remove_empty();
1238  }
1239 
1248  void resize(const size_t (& sz)[dim])
1249  {
1250  bool is_bigger = true;
1251 
1252  // we check if we are resizing bigger, because if is the case we do not have to do
1253  // much
1254 
1255  for (size_t i = 0 ; i < dim ; i++)
1256  {
1257  if (sz[i] < g_sm.size(i))
1258  {is_bigger = false;}
1259  }
1260 
1261  g_sm.setDimensions(sz);
1262 
1263  // set g_sm_shift
1264 
1266 
1267  clear_cache();
1268 
1269  if (is_bigger == true)
1270  {
1271 
1272  // if we resize bigger we do not have to do anything in the headers
1273  // and in the chunks we just have to update g_sm and reset the cache
1274  // and reconstruct the map. So we reconstruct the map and we just
1275  // finish
1276 
1277  reconstruct_map();
1278 
1279  return;
1280  }
1281 
1282  // create a box that is as big as the grid
1283 
1284  Box<dim,size_t> gs_box;
1285 
1286  for (size_t i = 0 ; i < dim ; i++)
1287  {
1288  gs_box.setLow(i,0);
1289  gs_box.setHigh(i,g_sm.size(i));
1290  }
1291 
1292  // we take a list of all chunks to remove
1294 
1295  // in this case we have to crop data, we go through all the headers
1296 
1297  for (size_t i = 1 ; i < header_inf.size() ; i++)
1298  {
1299  Box<dim,size_t> cnk;
1300 
1301  for (size_t j = 0 ; j < dim ; j++)
1302  {
1303  cnk.setLow(j,header_inf.get(i).pos.get(j));
1304  cnk.setHigh(j,sz_cnk[j] + header_inf.get(i).pos.get(j));
1305  }
1306 
1307  // if the chunk is not fully contained in the new smaller sparse grid
1308  // we have to crop it
1309  if (!cnk.isContained(gs_box))
1310  {
1311  // We check if the chunks is fully out or only partially in
1312  // cheking the intersection between the new grid and the box
1313  // enclosing the chunk as it was before
1314  Box<dim,size_t> inte;
1315 
1316  if (gs_box.Intersect(cnk,inte))
1317  {
1318  // part of the chunk is in, part is out
1319 
1320  // shift P1 to the origin
1321  // this is the valid box everything out must me reset
1322  inte -= inte.getP1();
1323 
1324  int mask_nele;
1325  short unsigned int mask_it[chunking::size::value];
1326 
1327  auto & mask = header_mask.get(i).mask;
1328  auto & n_ele = header_inf.get(i).nele;
1329 
1330  // ok so the box is not fully contained so we must crop data
1331 
1332  fill_mask(mask_it,mask,mask_nele);
1333 
1334  // now we have the mask of all the filled elements
1335 
1336  for (size_t j = 0 ; j < mask_nele ; j++)
1337  {
1338  if (!inte.isInside(pos_chunk[mask_it[j]].toPoint()))
1339  {
1340  // if is not inside, the point must be deleted
1341 
1342  remove_from_chunk<chunking::size::value>(mask_it[j],n_ele,mask);
1343  }
1344  }
1345  }
1346  else
1347  {
1348  // the chunk is completely out and must be removed completely
1349  // we add it to the list of the chunks to remove
1350 
1351  rmh.add(i);
1352  }
1353  }
1354  }
1355 
1356  header_inf.remove(rmh,0);
1357  header_mask.remove(rmh,0);
1358  chunks.remove(rmh,0);
1359 
1360  reconstruct_map();
1361  }
1362 
1373  template<int ... prp> static inline size_t packMem(size_t n, size_t e)
1374  {
1375  if (sizeof...(prp) == 0)
1376  {return n * sizeof(typename T::type);}
1377 
1378  typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
1379 
1380  return n * sizeof(prp_object);
1381  }
1382 
1388  void packReset()
1389  {}
1390 
1399  template<int ... prp, typename context_type> inline
1400  void packCalculate(size_t & req, const context_type & context)
1401  {}
1402 
1412  template<int ... prp> inline
1413  void packRequest(size_t & req) const
1414  {
1415  grid_sm<dim,void> gs_cnk(sz_cnk);
1416 
1417  // For sure we have to pack the number of chunk we want to pack
1418 
1419  req += sizeof(size_t);
1420  req += dim*sizeof(size_t);
1421 
1422  // Here we have to calculate the number of points to pack (skip the background)
1423 
1424  for (size_t i = 1 ; i < header_inf.size() ; i++)
1425  {
1426  auto & hm = header_mask.get(i);
1427 
1428  int mask_nele;
1429  short unsigned int mask_it[chunking::size::value];
1430 
1431  fill_mask(mask_it,hm.mask,mask_nele);
1432 
1433  for (size_t j = 0 ; j < mask_nele ; j++)
1434  {
1435  // If all of the aggregate properties do not have a "pack()" member
1437  {
1438  // here we count how many chunks must be sent
1439 
1440  size_t alloc_ele = this->packMem<prp...>(1,0);
1441  req += alloc_ele;
1442  }
1443  else
1444  {
1445  //Call a pack request
1446  call_aggregatePackRequestChunking<decltype(chunks.get_o(i)),
1447  S,prp ... >
1448  ::call_packRequest(chunks.get_o(i),mask_it[j],req);
1449  }
1450  }
1451 
1452  // There are point to send. So we have to save the mask chunk
1453  req += sizeof(header_mask.get(i).mask);
1454  // the chunk position
1455  req += sizeof(header_inf.get(i).pos);
1456  // and the number of element
1457  req += sizeof(header_inf.get(i).nele);
1458  }
1459  }
1460 
1467  {
1468  }
1469 
1479  template<int ... prp> inline
1481  size_t & req) const
1482  {
1483  grid_sm<dim,void> gs_cnk(sz_cnk);
1484 
1485  // For sure we have to pack the number of chunk we want to pack
1486 
1487  req += sizeof(size_t);
1488  req += dim*sizeof(size_t);
1489 
1490  // Here we have to calculate the number of points to pack
1491 
1492  Box<dim,size_t> section_to_pack;
1493 
1494  for (size_t i = 0; i < dim ; i++)
1495  {
1496  section_to_pack.setLow(i,sub_it.getStart().get(i));
1497  section_to_pack.setHigh(i,sub_it.getStop().get(i));
1498  }
1499 
1500  for (size_t i = 0 ; i < header_inf.size() ; i++)
1501  {
1502  auto & hm = header_mask.get(i);
1503 
1504  Box<dim,size_t> bc;
1505 
1506  for (size_t j = 0 ; j < dim ; j++)
1507  {
1508  bc.setLow(j,header_inf.get(i).pos.get(j));
1509  bc.setHigh(j,header_inf.get(i).pos.get(j) + sz_cnk[j] - 1);
1510  }
1511 
1512  // now we intersect the chunk box with the box
1513 
1514  Box<dim,size_t> inte;
1515  bool stp = bc.Intersect(section_to_pack,inte);
1516 
1517  if (stp == true)
1518  {
1519  // If it is intersect ok we have to check if there are points to pack
1520  // we shift inte to be relative to the chunk origin
1521 
1522  inte -= header_inf.get(i).pos.toPoint();
1523 
1524  // we iterate all the points
1525 
1526  size_t old_req = req;
1528 
1529  while (sit.isNext())
1530  {
1531  auto key = sit.get();
1532 
1533  size_t sub_id = gs_cnk.LinId(key);
1534 
1535  if (hm.mask[sub_id] & 1)
1536  {
1537  // If all of the aggregate properties do not have a "pack()" member
1539  {
1540  // here we count how many chunks must be sent
1541 
1542  size_t alloc_ele = this->packMem<prp...>(1,0);
1543  req += alloc_ele;
1544  }
1545  //If at least one property has "pack()"
1546  else
1547  {
1548  //Call a pack request
1549  call_aggregatePackRequestChunking<decltype(chunks.get_o(i)),
1550  S,prp ... >
1551  ::call_packRequest(chunks.get_o(i),sub_id,req);
1552  }
1553  }
1554 
1555  ++sit;
1556  }
1557 
1558  if (old_req != req)
1559  {
1560  // There are point to send. So we have to save the mask chunk
1561  req += sizeof(header_mask.get(i));
1562  // the chunk position
1563  req += sizeof(header_inf.get(i).pos);
1564  // and the number of element
1565  req += sizeof(header_inf.get(i).nele);
1566  }
1567  }
1568  }
1569  }
1570 
1580  template<int ... prp> void pack(ExtPreAlloc<S> & mem,
1582  Pack_stat & sts)
1583  {
1584  grid_sm<dim,void> gs_cnk(sz_cnk);
1585 
1586  // Here we allocate a size_t that indicate the number of chunk we are packing,
1587  // because we do not know a priory, we will fill it later
1588 
1589  mem.allocate(sizeof(size_t));
1590  size_t * number_of_chunks = (size_t *)mem.getPointer();
1591 
1592  // Pack the size of the grid
1593 
1594  for (size_t i = 0 ; i < dim ; i++)
1595  {Packer<size_t,S>::pack(mem,getGrid().size(i),sts);}
1596 
1597  // Here we have to calculate the number of points to pack
1598 
1599  Box<dim,size_t> section_to_pack;
1600 
1601  for (size_t i = 0; i < dim ; i++)
1602  {
1603  section_to_pack.setLow(i,sub_it.getStart().get(i));
1604  section_to_pack.setHigh(i,sub_it.getStop().get(i));
1605  }
1606 
1607  size_t n_packed_chunk = 0;
1608 
1609  for (size_t i = 0 ; i < header_inf.size() ; i++)
1610  {
1611  auto & hc = header_inf.get(i);
1612  auto & hm = header_mask.get(i);
1613 
1614  Box<dim,size_t> bc;
1615 
1616  for (size_t j = 0 ; j < dim ; j++)
1617  {
1618  bc.setLow(j,hc.pos.get(j));
1619  bc.setHigh(j,hc.pos.get(j) + sz_cnk[j] - 1);
1620  }
1621 
1622  // now we intersect the chunk box with the box
1623 
1624  Box<dim,size_t> inte;
1625  bool stp = bc.Intersect(section_to_pack,inte);
1626 
1627  if (stp == true)
1628  {
1629  // This flag indicate if something has been packed from this chunk
1630  bool has_packed = false;
1631 
1632  unsigned char mask_to_pack[chunking::size::value];
1633  memset(mask_to_pack,0,sizeof(mask_to_pack));
1634  mem.allocate_nocheck(sizeof(header_mask.get(i)) + sizeof(header_inf.get(i).pos) + sizeof(header_inf.get(i).nele));
1635 
1636  // here we get the pointer of the memory in case we have to pack the header
1637  // and we also shift the memory pointer by an offset equal to the header
1638  // to pack
1639  unsigned char * ptr_start = (unsigned char *)mem.getPointer();
1640 
1641  // If it is intersect ok we have to check if there are points to pack
1642  // we shift inte intp the chunk origin
1643 
1644  inte -= hc.pos.toPoint();
1645 
1646  // we iterate all the points
1647 
1649 
1650  while (sit.isNext())
1651  {
1652  auto key = sit.get();
1653 
1654  size_t sub_id = gs_cnk.LinId(key);
1655 
1656  if (hm.mask[sub_id] & 1)
1657  {
1658  Packer<decltype(chunks.get_o(i)),
1659  S,
1660  PACKER_ENCAP_OBJECTS_CHUNKING>::template pack<T,prp...>(mem,chunks.get_o(i),sub_id,sts);
1661 
1662  mask_to_pack[sub_id] |= 1;
1663  has_packed = true;
1664 
1665  }
1666 
1667  ++sit;
1668  }
1669 
1670  if (has_packed == true)
1671  {
1672  unsigned char * ptr_final = (unsigned char *)mem.getPointer();
1673  unsigned char * ptr_final_for = (unsigned char *)mem.getPointerEnd();
1674 
1675  // Ok we packed something so we have to pack the header
1676  size_t shift = ptr_final - ptr_start;
1677 
1678  mem.shift_backward(shift);
1679 
1680  // The position of the chunks
1681 
1682  grid_key_dx<dim> pos = header_inf.get(i).pos - sub_it.getStart();
1683 
1684  Packer<decltype(header_mask.get(i).mask),S>::pack(mem,mask_to_pack,sts);
1685  Packer<decltype(header_inf.get(i).pos),S>::pack(mem,pos,sts);
1686  Packer<decltype(header_inf.get(i).nele),S>::pack(mem,header_inf.get(i).nele,sts);
1687 
1688  size_t shift_for = ptr_final_for - (unsigned char *)mem.getPointer();
1689 
1690  mem.shift_forward(shift_for);
1691 
1692  n_packed_chunk++;
1693  }
1694  else
1695  {
1696  // This just reset the last allocation
1697  mem.shift_backward(0);
1698  }
1699  }
1700  }
1701 
1702  // Now we fill the number of packed chunks
1703  *number_of_chunks = n_packed_chunk;
1704  }
1705 
1712  {}
1713 
1718  void resetFlush()
1719  {}
1720 
1728  template<unsigned int ... prp, typename context_type>
1729  void removeAddUnpackFinalize(const context_type & ctx, int opt)
1730  {}
1731 
1732 
1740  template<unsigned int ... prp, typename context_type>
1741  void removeCopyToFinalize(const context_type & ctx, int opt)
1742  {}
1743 
1750  {
1751  return false;
1752  }
1753 
1764  template<int ... prp> void packFinalize(ExtPreAlloc<S> & mem, Pack_stat & sts, int opt, bool is_pack_remote)
1765  {}
1766 
1776  template<int ... prp> void pack(ExtPreAlloc<S> & mem,
1777  Pack_stat & sts) const
1778  {
1779  grid_sm<dim,void> gs_cnk(sz_cnk);
1780 
1781  // Here we allocate a size_t that indicate the number of chunk we are packing,
1782  // because we do not know a priory, we will fill it later
1783 
1784  Packer<size_t,S>::pack(mem,header_inf.size()-1,sts);
1785 
1786  for (size_t i = 0 ; i < dim ; i++)
1787  {Packer<size_t,S>::pack(mem,getGrid().size(i),sts);}
1788 
1789  // Here we pack the memory (skip the first background chunk)
1790 
1791  for (size_t i = 1 ; i < header_inf.size() ; i++)
1792  {
1793  auto & hm = header_mask.get(i);
1794  auto & hc = header_inf.get(i);
1795 
1796  Packer<decltype(hm.mask),S>::pack(mem,hm.mask,sts);
1797  Packer<decltype(hc.pos),S>::pack(mem,hc.pos,sts);
1798  Packer<decltype(hc.nele),S>::pack(mem,hc.nele,sts);
1799 
1800  // we iterate all the points
1801 
1802  int mask_nele;
1803  short unsigned int mask_it[chunking::size::value];
1804 
1805  fill_mask(mask_it,hm.mask,mask_nele);
1806 
1807  for (size_t j = 0 ; j < mask_nele ; j++)
1808  {
1809  Packer<decltype(chunks.get_o(i)),
1810  S,
1811  PACKER_ENCAP_OBJECTS_CHUNKING>::template pack<T,prp...>(mem,chunks.get_o(i),mask_it[j],sts);
1812  };
1813  }
1814  }
1815 
1819  void setMemory()
1820  {}
1821 
1822 
1830  size_t size() const
1831  {
1832  size_t tot = 0;
1833 
1834  for (size_t i = 1 ; i < header_inf.size() ; i++)
1835  {
1836  tot += header_inf.get(i).nele;
1837  }
1838 
1839  return tot;
1840  }
1841 
1849  size_t size_all() const
1850  {
1852  }
1853 
1859  void remove(Box<dim,long int> & section_to_delete)
1860  {
1861  grid_sm<dim,void> gs_cnk(sz_cnk);
1862 
1863  for (size_t i = 0 ; i < header_inf.size() ; i++)
1864  {
1865  auto & hm = header_mask.get(i);
1866  auto & hc = header_inf.get(i);
1867 
1868  Box<dim,size_t> bc;
1869 
1870  for (size_t j = 0 ; j < dim ; j++)
1871  {
1872  bc.setLow(j,hc.pos.get(j));
1873  bc.setHigh(j,hc.pos.get(j) + sz_cnk[j] - 1);
1874  }
1875 
1876  // now we intersect the chunk box with the box
1877 
1878  Box<dim,size_t> inte;
1879  bool stp = bc.Intersect(section_to_delete,inte);
1880 
1881  if (stp == true)
1882  {
1883  // If it is intersect ok we have to check if there are points to pack
1884  // we shift inte intp the chunk origin
1885 
1886  inte -= header_inf.get(i).pos.toPoint();
1887 
1888  // we iterate all the points
1889 
1891 
1892  while (sit.isNext())
1893  {
1894  auto key = sit.get();
1895 
1896  size_t sub_id = gs_cnk.LinId(key);
1897 
1898  unsigned char swt = header_mask.get(i).mask[sub_id];
1899 
1900  hc.nele = (swt)?hc.nele-1:hc.nele;
1901  hm.mask[sub_id] = 0;
1902 
1903  if (hc.nele == 0 && swt != 0)
1904  {
1905  // Add the chunks in the empty list
1906  empty_v.add(i);
1907  }
1908 
1909  ++sit;
1910  }
1911  }
1912  }
1913 
1914  remove_empty();
1915  }
1916 
1917  void copy_to(const self & grid_src,
1918  const Box<dim,size_t> & box_src,
1919  const Box<dim,size_t> & box_dst)
1920  {
1921 /* auto it = grid_src.getIterator(box_src.getKP1(),box_src.getKP2());
1922 
1923  while (it.isNext())
1924  {
1925  auto key_src = it.get();
1926  grid_key_dx<dim> key_dst = key_src + box_dst.getKP1();
1927  key_dst -= box_src.getKP1();
1928 
1929  typedef typename std::remove_const<typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
1930 
1931  copy_sparse_to_sparse<dim,gcopy,gcopy> caps(grid_src,*this,key_src,key_dst);
1932  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(caps);
1933 
1934  ++it;
1935  }*/
1936 
1937  auto it = grid_src.getIterator(box_src.getKP1(),box_src.getKP2());
1938 
1939  while (it.isNext())
1940  {
1941  auto key_src = it.get();
1942  grid_key_dx<dim> key_dst = key_src + box_dst.getKP1();
1943  key_dst -= box_src.getKP1();
1944  auto key_src_s = it.getKeyF();
1945 
1946  typedef typename std::remove_const<typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
1947 
1948  size_t pos_src_id = key_src_s.getPos();
1949  size_t pos_dst_id;
1950 
1953  auto block_dst = this->insert_o(key_dst,pos_dst_id);
1954 
1955  auto block_src = grid_src.getBlock(key_src_s);
1956 
1957  copy_sparse_to_sparse_bb<dim,decltype(block_src),decltype(block_dst),T> caps(block_src,block_dst,pos_src_id,pos_dst_id);
1958  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(caps);
1959 
1960  ++it;
1961  }
1962 
1963 // copy_remove_to_impl(grid_src,*this,box_src,box_dst);
1964  }
1965 
1966  template<template <typename,typename> class op, unsigned int ... prp >
1967  void copy_to_op(const self & grid_src,
1968  const Box<dim,size_t> & box_src,
1969  const Box<dim,size_t> & box_dst)
1970  {
1971  auto it = grid_src.getIterator(box_src.getKP1(),box_src.getKP2());
1972 
1973  while (it.isNext())
1974  {
1975  auto key_src = it.get();
1976  grid_key_dx<dim> key_dst = key_src + box_dst.getKP1();
1977  key_dst -= box_src.getKP1();
1978 
1979  typedef typename std::remove_const<typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
1980 
1981  copy_sparse_to_sparse_op<op,dim,gcopy,gcopy,prp ...> caps(grid_src,*this,key_src,key_dst);
1982  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,sizeof...(prp)> >(caps);
1983 
1984  ++it;
1985  }
1986  }
1987 
1997  size_t getChunk(grid_key_dx<dim> & v1, bool & exist)
1998  {
1999  size_t act_cnk = chunks.size()-1;
2000 
2001  find_active_chunk(v1,act_cnk,exist);
2002 
2003  return act_cnk;
2004  }
2005 
2014  {
2015  grid_key_dx<dim> kl;
2016  grid_key_dx<dim> kh = header_inf.get(chunk_id).pos;
2017 
2018  // shift the key
2020 
2021  return kh;
2022  }
2023 
2028  template<unsigned int prop_src, unsigned int prop_dst, unsigned int stencil_size, unsigned int N, typename lambda_f, typename ... ArgsT >
2029  void conv(int (& stencil)[N][dim], grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2030  {
2031  NNlist.resize(NNStar_c<dim>::nNN * chunks.size());
2032 
2033  if (findNN == false)
2034  {conv_impl<dim>::template conv<false,NNStar_c<dim>,prop_src,prop_dst,stencil_size>(stencil,start,stop,*this,func);}
2035  else
2036  {conv_impl<dim>::template conv<true,NNStar_c<dim>,prop_src,prop_dst,stencil_size>(stencil,start,stop,*this,func);}
2037 
2038  findNN = true;
2039  }
2040 
2049  template<unsigned int prop_src, unsigned int prop_dst, unsigned int stencil_size, typename lambda_f, typename ... ArgsT >
2050  void conv_cross(grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2051  {
2052  NNlist.resize(2*dim * chunks.size());
2053 
2054  if (findNN == false)
2055  {conv_impl<dim>::template conv_cross<false,prop_src,prop_dst,stencil_size>(start,stop,*this,func);}
2056  else
2057  {conv_impl<dim>::template conv_cross<true,prop_src,prop_dst,stencil_size>(start,stop,*this,func);}
2058 
2059  findNN = true;
2060  }
2061 
2070  template<unsigned int stencil_size, typename prop_type, typename lambda_f, typename ... ArgsT >
2071  void conv_cross_ids(grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2072  {
2073  if (layout_base<aggregate<int>>::type_value::value != SOA_layout_IA)
2074  {
2075  std::cout << __FILE__ << ":" << __LINE__ << " Error this function can be only used with the SOA version of the data-structure" << std::endl;
2076  }
2077 
2078  NNlist.resize(2*dim * chunks.size());
2079 
2080  if (findNN == false)
2081  {conv_impl<dim>::template conv_cross_ids<false,stencil_size,prop_type>(start,stop,*this,func);}
2082  else
2083  {conv_impl<dim>::template conv_cross_ids<true,stencil_size,prop_type>(start,stop,*this,func);}
2084 
2085  findNN = true;
2086  }
2087 
2092  template<unsigned int prop_src1, unsigned int prop_src2 ,unsigned int prop_dst1, unsigned int prop_dst2 ,unsigned int stencil_size, unsigned int N, typename lambda_f, typename ... ArgsT >
2093  void conv2(int (& stencil)[N][dim], grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2094  {
2095  NNlist.resize(NNStar_c<dim>::nNN * chunks.size());
2096 
2097  if (findNN == false)
2098  {conv_impl<dim>::template conv2<false,NNStar_c<dim>,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(stencil,start,stop,*this,func);}
2099  else
2100  {conv_impl<dim>::template conv2<true,NNStar_c<dim>,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(stencil,start,stop,*this,func);}
2101 
2102  findNN = true;
2103  }
2104 
2109  template<unsigned int prop_src1, unsigned int prop_src2 ,unsigned int prop_dst1, unsigned int prop_dst2 ,unsigned int stencil_size, typename lambda_f, typename ... ArgsT >
2110  void conv_cross2(grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2111  {
2112  NNlist.resize(NNStar_c<dim>::nNN * chunks.size());
2113 
2114  if (findNN == false)
2115  {conv_impl<dim>::template conv_cross2<false,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(start,stop,*this,func);}
2116  else
2117  {conv_impl<dim>::template conv_cross2<true,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(start,stop,*this,func);}
2118 
2119  findNN = true;
2120  }
2121 
2131  template<unsigned int ... prp, typename S2,typename context_type>
2134  Unpack_stat & ps,
2135  context_type & context,
2136  rem_copy_opt opt)
2137  {
2138  short unsigned int mask_it[chunking::size::value];
2139 
2140  // first we unpack the number of chunks
2141 
2142  size_t n_chunks;
2143 
2144  Unpacker<size_t,S2>::unpack(mem,n_chunks,ps);
2145 
2146  size_t sz[dim];
2147  for (size_t i = 0 ; i < dim ; i++)
2148  {Unpacker<size_t,S2>::unpack(mem,sz[i],ps);}
2149 
2150  openfpm::vector<cheader<dim>> header_inf_tmp;
2152  openfpm::vector<aggregate_bfv<chunk_def>,S,layout_base > chunks_tmp;
2153 
2154  header_inf_tmp.resize(n_chunks);
2155  header_mask_tmp.resize(n_chunks);
2156  chunks_tmp.resize(n_chunks);
2157 
2158  for (size_t i = 0 ; i < n_chunks ; i++)
2159  {
2160  auto & hc = header_inf_tmp.get(i);
2161  auto & hm = header_mask_tmp.get(i);
2162 
2163  Unpacker<typename std::remove_reference<decltype(header_mask.get(i).mask)>::type ,S2>::unpack(mem,hm.mask,ps);
2164  Unpacker<typename std::remove_reference<decltype(header_inf.get(i).pos)>::type ,S2>::unpack(mem,hc.pos,ps);
2165  Unpacker<typename std::remove_reference<decltype(header_inf.get(i).nele)>::type ,S2>::unpack(mem,hc.nele,ps);
2166 
2167  // fill the mask_it
2168 
2169  fill_mask(mask_it,hm.mask,hc.nele);
2170 
2171  // now we unpack the information
2172  size_t active_cnk;
2173  size_t ele_id;
2174 
2175  for (size_t k = 0 ; k < hc.nele ; k++)
2176  {
2177  // construct v1
2178  grid_key_dx<dim> v1;
2179  for (size_t i = 0 ; i < dim ; i++)
2180  {v1.set_d(i,hc.pos.get(i) + pos_chunk[mask_it[k]].get(i) + sub_it.getStart().get(i));}
2181 
2182  pre_insert(v1,active_cnk,ele_id);
2183 
2184  Unpacker<decltype(chunks.get_o(mask_it[k])),
2185  S2,
2186  PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack<T,prp...>(mem,chunks.get_o(active_cnk),ele_id,ps);
2187 
2188  }
2189  }
2190  }
2191 
2201  template<unsigned int ... prp, typename S2>
2203  Unpack_stat & ps)
2204  {
2205  this->clear();
2206 
2207  grid_key_dx<dim> start;
2208  grid_key_dx<dim> stop;
2209 
2210  // We preunpack some data
2211  Unpack_stat ps_tmp = ps;
2212 
2213  size_t unused;
2214  Unpacker<size_t,S2>::unpack(mem,unused,ps_tmp);
2215 
2216  size_t sz[dim];
2217  for (size_t i = 0 ; i < dim ; i++)
2218  {Unpacker<size_t,S2>::unpack(mem,sz[i],ps_tmp);}
2219 
2220  g_sm.setDimensions(sz);
2221  for (size_t i = 0 ; i < dim ; i++)
2222  {
2223  start.set_d(i,0);
2224  stop.set_d(i,getGrid().size(i)-1);
2225  }
2226 
2228 
2229  auto sub_it = this->getIterator(start,stop);
2230 
2231  int ctx;
2232  unpack<prp...>(mem,sub_it,ps,ctx,rem_copy_opt::NONE_OPT);
2233  }
2234 
2245  template<template<typename,typename> class op, typename S2, unsigned int ... prp>
2248  Unpack_stat & ps)
2249  {
2250  short unsigned int mask_it[chunking::size::value];
2251 
2252  // first we unpack the number of chunks
2253 
2254  size_t n_chunks;
2255 
2256  Unpacker<size_t,S2>::unpack(mem,n_chunks,ps);
2257 
2258  size_t sz[dim];
2259  for (size_t i = 0 ; i < dim ; i++)
2260  {Unpacker<size_t,S2>::unpack(mem,sz[i],ps);}
2261 
2262  openfpm::vector<cheader<dim>> header_inf_tmp;
2265 
2266  header_inf_tmp.resize(n_chunks);
2267  header_mask_tmp.resize(n_chunks);
2268  chunks_tmp.resize(n_chunks);
2269 
2270  for (size_t i = 0 ; i < n_chunks ; i++)
2271  {
2272  auto & hc = header_inf_tmp.get(i);
2273  auto & hm = header_mask_tmp.get(i);
2274 
2275  Unpacker<decltype(hm.mask),S2>::unpack(mem,hm.mask,ps);
2276  Unpacker<decltype(hc.pos),S2>::unpack(mem,hc.pos,ps);
2277  Unpacker<decltype(hc.nele),S2>::unpack(mem,hc.nele,ps);
2278 
2279  // fill the mask_it
2280 
2281  fill_mask(mask_it,hm.mask,hc.nele);
2282 
2283  // now we unpack the information
2284  size_t active_cnk;
2285  size_t ele_id;
2286 
2287  for (size_t k = 0 ; k < hc.nele ; k++)
2288  {
2289  // construct v1
2290  grid_key_dx<dim> v1;
2291  for (size_t i = 0 ; i < dim ; i++)
2292  {v1.set_d(i,hc.pos.get(i) + pos_chunk[mask_it[k]].get(i) + sub2.getStart().get(i));}
2293 
2294  bool exist = pre_insert(v1,active_cnk,ele_id);
2295 
2296  if (exist == false)
2297  {
2298  Unpacker<decltype(chunks.get_o(mask_it[k])),
2299  S2,
2300  PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack_op<replace_,prp...>(mem,chunks.get_o(active_cnk),ele_id,ps);
2301  }
2302  else
2303  {
2304  Unpacker<decltype(chunks.get_o(mask_it[k])),
2305  S2,
2306  PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack_op<op,prp...>(mem,chunks.get_o(active_cnk),ele_id,ps);
2307  }
2308 
2309  }
2310  }
2311  }
2312 
2318  template <typename stencil = no_stencil>
2321  {
2323  }
2324 
2332  {
2334  }
2335 
2342  void convert_key(grid_key_dx<dim> & key_out, const grid_key_sparse_lin_dx & key_in) const
2343  {
2344  auto & ph = header_inf.get(key_in.getChunk()).pos;
2345  auto & pos_h = pos_chunk[key_in.getPos()];
2346 
2347  for (size_t i = 0 ; i < dim ; i++)
2348  {
2349  key_out.set_d(i,ph.get(i) + pos_h.get(i));
2350  }
2351  }
2352 
2359  {
2360  cache_pnt = sg.cache_pnt;
2361 
2362  for (size_t i = 0 ; i < SGRID_CACHE ; i++)
2363  {
2364  cache[i] = sg.cache[i];
2365  cached_id[i] = sg.cached_id[i];
2366  }
2367 
2369  map = sg.map;
2370  header_inf = sg.header_inf;
2371  header_mask = sg.header_mask;
2372  chunks = sg.chunks;
2373  g_sm = sg.g_sm;
2374  g_sm_shift = sg.g_sm_shift;
2375 
2376  for (size_t i = 0 ; i < chunking::size::value ; i++)
2377  {
2378  pos_chunk[i] = sg.pos_chunk[i];
2379  }
2380 
2381 
2382  for (size_t i = 0 ; i < dim ; i++)
2383  {sz_cnk[i] = sg.sz_cnk[i];}
2384 
2385  empty_v = sg.empty_v;
2386 
2387  return *this;
2388  }
2389 
2394  void reorder()
2395  {
2396  openfpm::vector<cheader<dim>,S> header_inf_tmp;
2398  openfpm::vector<aggregate_bfv<chunk_def>,S,layout_base > chunks_tmp;
2399 
2400  header_inf_tmp.resize(header_inf.size());
2401  header_mask_tmp.resize(header_mask.size());
2402  chunks_tmp.resize(chunks.size());
2403 
2404  struct pair_int
2405  {
2406  int id;
2407  int pos;
2408 
2409  bool operator<(const pair_int & tmp) const
2410  {
2411  return id < tmp.id;
2412  }
2413  };
2414 
2416  srt.resize(header_inf.size());
2417 
2418  for (int i = 0 ; i < header_inf.size() ; i++)
2419  {
2420  grid_key_dx<dim> kh = header_inf.get(i).pos;
2421  grid_key_dx<dim> kl;
2422 
2423  // shift the key
2425 
2426  long int lin_id = g_sm_shift.LinId(kh);
2427 
2428  srt.get(i).id = lin_id;
2429  srt.get(i).pos = i;
2430  }
2431 
2432  srt.sort();
2433 
2434  // now reoder
2435 
2436  for (int i = 0 ; i < srt.size() ; i++)
2437  {
2438  chunks_tmp.get(i) = chunks.get(srt.get(i).pos);
2439  header_inf_tmp.get(i) = header_inf.get(srt.get(i).pos);
2440  header_mask_tmp.get(i) = header_mask.get(srt.get(i).pos);
2441  }
2442 
2443  chunks_tmp.swap(chunks);
2444  header_inf_tmp.swap(header_inf);
2445  header_mask_tmp.swap(header_mask);
2446 
2447  clear_cache();
2448  reconstruct_map();
2449 
2450  empty_v.clear();
2451  findNN = false;
2452  NNlist.clear();
2453  }
2454 
2461  {
2462  cache_pnt = sg.cache_pnt;
2463 
2464  for (size_t i = 0 ; i < SGRID_CACHE ; i++)
2465  {
2466  cache[i] = sg.cache[i];
2467  cached_id[i] = sg.cached_id[i];
2468  }
2469 
2471  map.swap(sg.map);
2472  header_inf.swap(sg.header_inf);
2473  header_mask.swap(sg.header_mask);
2474  chunks.swap(sg.chunks);
2475  g_sm = sg.g_sm;
2476  g_sm_shift = sg.g_sm_shift;
2477 
2478  for (size_t i = 0 ; i < chunking::size::value ; i++)
2479  {
2480  pos_chunk[i] = sg.pos_chunk[i];
2481  }
2482 
2483 
2484  for (size_t i = 0 ; i < dim ; i++)
2485  {sz_cnk[i] = sg.sz_cnk[i];}
2486 
2487  empty_v = sg.empty_v;
2488 
2489  return *this;
2490  }
2491 
2497  size_t size_inserted()
2498  {
2499  return size();
2500  }
2501 
2506  void clear()
2507  {
2508  header_inf.resize(1);
2509  header_mask.resize(1);
2510  chunks.resize(1);
2511 
2512  clear_cache();
2513  reconstruct_map();
2514  }
2515 
2522  {
2523  clear_cache();
2524  }
2525 
2526 #ifdef OPENFPM_DATA_ENABLE_IO_MODULE
2527 
2533  template<typename Tw = float> bool write(const std::string & output)
2534  {
2535  file_type ft = file_type::BINARY;
2536 
2538  openfpm::vector<T> tmp_prp;
2539 
2540  // copy position and properties
2541 
2542  auto it = getIterator();
2543 
2544  while(it.isNext())
2545  {
2546  auto key = it.getKey();
2547  auto keyg = it.getKeyF();
2548 
2549  Point<dim,Tw> p;
2550 
2551  for (size_t i = 0 ; i < dim ; i++)
2552  {p.get(i) = keyg.get(i);}
2553 
2554  tmp_pos.add(p);
2555 
2556  tmp_prp.add();
2557  copy_prop_to_vector<decltype(chunks.get_o(key.getChunk())),decltype(tmp_prp.last())>
2558  cp(chunks.get_o(key.getChunk()),tmp_prp.last(),key.getPos());
2559 
2560  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(cp);
2561 
2562  ++it;
2563  }
2564 
2565  // VTKWriter for a set of points
2567  vtk_writer.add(tmp_pos,tmp_prp,tmp_pos.size());
2568 
2569  openfpm::vector<std::string> prp_names;
2570 
2571  // Write the VTK file
2572  return vtk_writer.write(output,prp_names,"sparse_grid",ft);
2573  }
2574 
2575 #endif
2576 
2577  //Functions to check if the packing object is complex
2578  static bool pack()
2579  {
2580  return false;
2581  }
2582 
2583  static bool packRequest()
2584  {
2585  return false;
2586  }
2587 
2588  static bool packMem()
2589  {
2590  return false;
2591  }
2592 
2599  {
2600  return header_inf;
2601  }
2602 
2609  {
2610  return header_mask;
2611  }
2612 
2619  {
2620  return NNlist;
2621  }
2622 
2629  {
2630  return chunks;
2631  }
2632 
2639  {
2640  return header_inf;
2641  }
2642 
2649  {
2650  return header_mask;
2651  }
2652 
2659  {
2660  return chunks;
2661  }
2662 
2668  {
2669  size_t tot = 0;
2670  for (int i = 1 ; i < header_mask.size() ; i++)
2671  {
2672  auto & m = header_mask.get(i);
2673 
2674  size_t np_mask = 0;
2675 
2676  for (int j = 0 ; j < chunking::size::value ; j++)
2677  {
2678  if (m.mask[j] & 0x1)
2679  {np_mask++;}
2680  }
2681 
2682  if (header_inf.get(i).nele != np_mask)
2683  {
2684  std::cout << __FILE__ << ":" << __LINE__ << " error chunk: " << i << " has " << np_mask << " points but header report " << header_inf.get(i).nele << std::endl;
2685  }
2686  tot += np_mask;
2687  }
2688 
2689  if (size() != tot)
2690  {
2691  std::cout << __FILE__ << ":" << __LINE__ << " Total point is inconsistent: " << size() << " " << tot << std::endl;
2692  }
2693  }
2694 };
2695 
2696 template<unsigned int dim,
2697  typename T,
2698  typename S,
2699  typename grid_lin = grid_zm<dim,void>,
2700  typename layout = typename memory_traits_inte<T>::type,
2701  template<typename> class layout_base = memory_traits_inte,
2702  typename chunking = default_chunking<dim>>
2704 
2705 
2706 #endif /* OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_ */
to_boost_vmpl< prp... >::type v_prp
Convert the packed properties into an MPL vector.
Definition: SparseGrid.hpp:219
void set_g_shift_from_size(const size_t(&sz)[dim], grid_lin &g_sm_shift)
set the grid shift from size
Definition: SparseGrid.hpp:562
openfpm::vector< int > & private_get_nnlist()
return the NN list for each block
grid_key_dx< dim > getChunkPos(size_t chunk_id)
Get the position of a chunk.
bool findNN
bool that indicate if the NNlist is filled
Definition: SparseGrid.hpp:426
bool isContained(const Box< dim, T > &b) const
Check if the box is contained.
Definition: Box.hpp:986
short int pos_id_src
source position
Definition: SparseGrid.hpp:177
void flush_remove()
Remove the point.
void setBackgroundValue(const typename boost::mpl::at< typename T::type, boost::mpl::int_< p >>::type &val)
Set the background value for the property p.
Definition: SparseGrid.hpp:899
grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > getIterator(const grid_key_dx< dim > &start, const grid_key_dx< dim > &stop, size_t opt=0) const
Return an iterator over a sub-grid.
__device__ static __host__ void meta_copy_(const T &src, T &dst)
copy and object from src to dst
Definition: meta_copy.hpp:60
Unpacker class.
Definition: Packer_util.hpp:20
void conv_cross2(grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution using the stencil N
bool isSkipLabellingPossible()
This function check if keep geometry is possible for this grid.
void packFinalize(ExtPreAlloc< S > &mem, Pack_stat &sts, int opt, bool is_pack_remote)
Pack finalize Finalize the pack of this object. In this case it does nothing.
void conv2(int(&stencil)[N][dim], grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution using the stencil N
void remove_empty()
Eliminate empty chunks.
Definition: SparseGrid.hpp:496
grid_key_dx< dim > & pos_dst
destination position
Definition: SparseGrid.hpp:103
const openfpm::vector< aggregate_bfv< chunk_def > > & private_get_data() const
return the data of the blocks
static bool is_unpack_header_supported()
Indicate that unpacking the header is supported.
sgrid_cpu(const sgrid_cpu &&g) THROW
create a sparse grid from another grid
Definition: SparseGrid.hpp:871
void resize(const size_t(&sz)[dim])
Resize the grid.
auto insert_o(const grid_key_dx< dim > &v1, size_t &ele_id) -> decltype(chunks.get_o(0))
Insert a full element (with all properties)
Definition: SparseGrid.hpp:964
const Tsrc & src
source
Definition: SparseGrid.hpp:207
openfpm::vector< aggregate_bfv< chunk_def >, S, layout_base > & private_get_data()
return the data of the blocks
sgrid_cpu()
Trivial constructor.
Definition: SparseGrid.hpp:850
static void meta_copy_op_(const T &src, T &dst)
Meta-copy applying an operation.
Definition: meta_copy.hpp:710
auto getBlock(const grid_key_sparse_lin_dx &v1) const -> decltype(chunks.get(0))
Get the reference of the selected block.
void clear_cache()
reset the cache
Definition: SparseGrid.hpp:549
virtual void * getPointer()
Return the pointer of the last allocation.
const grid_key_dx< dim > & getStop() const
Return the stop point for the iteration.
static constexpr bool isCompressed()
This is a multiresolution sparse grid so is a compressed format.
Definition: SparseGrid.hpp:954
void remove_from_chunk(size_t sub_id, int &nele, unsigned char(&mask)[n_ele])
Remove.
Definition: SparseGrid.hpp:457
static grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > type_of_subiterator()
This is a meta-function return which type of sub iterator a grid produce.
openfpm::vector< aggregate_bfv< chunk_def >, S, layout_base > chunks
vector of chunks
Definition: SparseGrid.hpp:409
__host__ __device__ Point< dim, typeT > toPoint() const
Convert to a point the grid_key_dx.
Definition: grid_key.hpp:457
short int pos_id_dst
destination position
Definition: SparseGrid.hpp:180
void packRequest(grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > &sub_it, size_t &req) const
Insert an allocation request.
__device__ __host__ index_type get(index_type i) const
Get the i index.
Definition: grid_key.hpp:503
size_t cache_pnt
cache pointer
Definition: SparseGrid.hpp:384
Grid key sparse iterator.
void shift_forward(size_t sz)
shift the pointer forward
size_t sz_cnk[dim]
size of the chunk
Definition: SparseGrid.hpp:421
grid_key_sparse_dx_iterator_block_sub< dim, stencil_size, self, chunking > getBlockIterator(const grid_key_dx< dim > &start, const grid_key_dx< dim > &stop)
Return an iterator over a sub-grid.
const Tsrc & src
source
Definition: SparseGrid.hpp:171
This class implement the point shape in an N-dimensional space.
Definition: Point.hpp:27
class that store the information of the grid like number of point on each direction and define the in...
Definition: grid_zm.hpp:21
sparse_grid_bck_value< typename std::remove_reference< decltype(chunks.get(0))>::type > getBackgroundValue()
Get the background value.
Definition: SparseGrid.hpp:914
size_t size_all() const
__host__ __device__ bool isInside(const Point< dim, T > &p) const
Check if the point is inside the box.
Definition: Box.hpp:1004
size_t size()
Stub size.
Definition: map_vector.hpp:211
static void unpack_headers(pointers_type &pointers, headers_type &headers, result_type &result, int n_slot)
Stub does not do anything.
Definition: SparseGrid.hpp:926
const grid_key_dx< dim > & getStart() const
Return the starting point for the iteration.
void shift_backward(size_t sz)
shift the pointer backward
grid_lin g_sm
grid size information
Definition: SparseGrid.hpp:412
void unpack(ExtPreAlloc< S2 > &mem, grid_key_sparse_dx_iterator_sub< dims, chunking::size::value > &sub_it, Unpack_stat &ps, context_type &context, rem_copy_opt opt)
unpack the sub-grid object
void init()
initialize
Definition: SparseGrid.hpp:584
void packCalculate(size_t &req, const context_type &context)
Calculate the size of the information to pack.
auto get(const grid_key_sparse_lin_dx &v1) -> decltype(chunks.template get< p >(0)[0])
Get the reference of the selected element.
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:190
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:113
void conv_cross_ids(grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution from start to stop point using the function func and arguments args
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:81
void unpack_with_op(ExtPreAlloc< S2 > &mem, grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > &sub2, Unpack_stat &ps)
unpack the sub-grid object applying an operation
static size_t packMem(size_t n, size_t e)
Calculate the memory size required to pack n elements.
virtual bool allocate(size_t sz)
Allocate a chunk of memory.
Transform the boost::fusion::vector into memory specification (memory_traits)
Definition: memory_conf.hpp:83
void conv_cross(grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution from start to stop point using the function func and arguments args
size_t(& sz)[dim]
sz site_t
Definition: SparseGrid.hpp:256
__device__ __host__ void setHigh(int i, T val)
set the high interval of the box
Definition: Box.hpp:544
This class copy general objects.
grid_key_dx< dim > & pos_dst
destination position
Definition: SparseGrid.hpp:216
mem_id LinId(const grid_key_dx< N, ids_type > &gk, const char sum_id[N]) const
Linearization of the grid_key_dx with a specified shift.
Definition: grid_sm.hpp:434
void unpack(ExtPreAlloc< S2 > &mem, Unpack_stat &ps)
unpack the sub-grid object
Tsrc & src
source
Definition: SparseGrid.hpp:38
sgrid_cpu(const size_t(&sz)[dim])
Constructor for sparse grid.
Definition: SparseGrid.hpp:881
Tdst & dst
destination
Definition: SparseGrid.hpp:97
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:229
grid_key_dx< dim > getKP2() const
Get the point p12 as grid_key_dx.
Definition: Box.hpp:669
inter_memc< typename T::type >::type type
for each element in the vector interleave memory_c
Definition: memory_conf.hpp:86
void pack(ExtPreAlloc< S > &mem, grid_key_sparse_dx_iterator_sub< dims, chunking::size::value > &sub_it, Pack_stat &sts)
Pack the object into the memory given an iterator.
grid_key_sparse_dx_iterator< dim, chunking::size::value > getIterator(size_t opt=0) const
Return a Domain iterator.
size_t getPos() const
Return the linearized position in the chunk.
void packRequest(size_t &req) const
Insert an allocation request.
size_t size() const
static grid_key_sparse_dx_iterator< dim, chunking::size::value > type_of_iterator()
This is a meta-function return which type of sub iterator a grid produce.
void clear()
delete all the points
Tdst & dst
destination
Definition: SparseGrid.hpp:41
grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > sub_grid_iterator_type
sub-grid iterator type
Definition: SparseGrid.hpp:835
void pre_get(const grid_key_dx< dim > &v1, size_t &active_cnk, size_t &sub_id, bool &exist) const
Definition: SparseGrid.hpp:693
aggregate_bfv< chunk_def > background_type
Background type.
Definition: SparseGrid.hpp:838
long int cached_id[SGRID_CACHE]
cached id
Definition: SparseGrid.hpp:390
bool existPoint(const grid_key_dx< dim > &v1) const
Check if the point exist.
__device__ __host__ const T & get(unsigned int i) const
Get coordinate.
Definition: Point.hpp:172
Tdst & dst
destination
Definition: SparseGrid.hpp:210
const grid_key_dx< dim > & get() const
Get the actual key.
copy_sz(size_t(&sz)[dim])
constructor
Definition: SparseGrid.hpp:264
sgrid_cpu(const sgrid_cpu &g) THROW
create a sparse grid from another grid
Definition: SparseGrid.hpp:861
grid_key_dx< dim > get() const
Return the actual grid key iterator.
void setMemory()
It does materially nothing.
void convert_key(grid_key_dx< dim > &key_out, const grid_key_sparse_lin_dx &key_in) const
Here we convert the linearized sparse key into the grid_key_dx.
__device__ __host__ void setLow(int i, T val)
set the low interval of the box
Definition: Box.hpp:533
Packing class.
Definition: Packer.hpp:49
void remove(const grid_key_dx< dim > &v1)
Remove the point.
void expandAndTagBoundaries(grid_key_dx< dim > &start, grid_key_dx< dim > &stop)
Expand and tag boundaries.
void remove_no_flush(const grid_key_dx< dim > &v1)
Remove the point but do not flush the remove.
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
void resetFlush()
It does nothing.
bool allocate_nocheck(size_t sz)
Allocate a chunk of memory.
grid_key_dx< dim > base_key
base_key for the grid
Definition: SparseGrid.hpp:825
Unpacking status object.
Definition: Pack_stat.hpp:15
int yes_i_am_grid
it define that this data-structure is a grid
Definition: SparseGrid.hpp:822
bool pre_insert(const grid_key_dx< dim > &v1, size_t &active_cnk, size_t &sub_id)
Before insert data you have to do this.
Definition: SparseGrid.hpp:713
void internal_clear_cache()
This is an internal function to clear the cache.
grid_lin g_sm_shift
grid size information with shift
Definition: SparseGrid.hpp:415
openfpm::vector< mheader< chunking::size::value > > & private_get_header_mask()
return the header section of the blocks
void copyRemoveReset()
Reset the queue to remove and copy section of grids.
openfpm::vector< cheader< dim >, S > header_inf
indicate which element in the chunk are really filled
Definition: SparseGrid.hpp:396
void reorder()
Reorder based on index.
const Tsrc & src
source
Definition: SparseGrid.hpp:94
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:53
void reconstruct_map()
reconstruct the map
Definition: SparseGrid.hpp:471
void removeAddUnpackFinalize(const context_type &ctx, int opt)
In this case it does nothing.
void pack(ExtPreAlloc< S > &mem, Pack_stat &sts) const
Pack the object into the memory given an iterator.
static void unpack(ExtPreAlloc< Mem >, T &obj)
Error, no implementation.
Definition: Unpacker.hpp:40
unsigned char getFlag(const grid_key_dx< dim > &v1) const
Get the point flag (in this case it always return 0)
size_t getChunk(grid_key_dx< dim > &v1, bool &exist)
Give a grid point it return the chunk containing that point. In case the point does not exist it retu...
return if true the aggregate type T has a property that has a complex packing(serialization) method
const openfpm::vector< cheader< dim > > & private_get_header_inf() const
return the header section of the blocks
size_t getChunk() const
Return the chunk id.
void consistency()
This function check the consistency of the sparse grid.
const grid_lin & getGrid() const
Return the internal grid information.
sgrid_cpu & operator=(sgrid_cpu &&sg)
copy an sparse grid
long int cache[SGRID_CACHE]
cache
Definition: SparseGrid.hpp:387
__device__ __host__ bool Intersect(const Box< dim, T > &b, Box< dim, T > &b_out) const
Intersect.
Definition: Box.hpp:95
auto get(const grid_key_dx< dim > &v1) -> decltype(get_selector< typename boost::mpl::at< typename T::type, boost::mpl::int_< p >>::type >::template get_const< p >(chunks, 0, 0))
Get the reference of the selected element.
Declaration grid_key_dx_iterator_sub.
Definition: grid_sm.hpp:156
Tdst & dst
destination
Definition: SparseGrid.hpp:174
Grid key sparse iterator on a sub-part of the domain.
void removeCopyToFinalize(const context_type &ctx, int opt)
In this case it does nothing.
const openfpm::vector< mheader< chunking::size::value > > & private_get_header_mask() const
return the header section of the blocks
openfpm::vector< int > NNlist
for each chunk store the neighborhood chunks
Definition: SparseGrid.hpp:429
auto getBackgroundValueAggr() -> decltype(chunks.get(0))
Get the background value.
Definition: SparseGrid.hpp:944
tsl::hopscotch_map< size_t, size_t > map
Map to convert from grid coordinates to chunk.
Definition: SparseGrid.hpp:393
It create a boost::fusion vector with the selected properties.
grid_key_dx< dim > getKP1() const
Get the point p1 as grid_key_dx.
Definition: Box.hpp:656
sgrid_cpu & operator=(const sgrid_cpu &sg)
copy an sparse grid
r_type insert(const grid_key_dx< dim > &v1)
Get the reference of the selected element.
Definition: SparseGrid.hpp:981
It store the position in space of the sparse grid.
grid_key_dx< dim > & pos_src
source position
Definition: SparseGrid.hpp:100
void remove(Box< dim, long int > &section_to_delete)
Remove all the points in this region.
__device__ __host__ void set_d(index_type i, index_type id)
Set the i index.
Definition: grid_key.hpp:516
openfpm::vector< cheader< dim > > & private_get_header_inf()
return the header section of the blocks
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
Definition: aggregate.hpp:214
void find_active_chunk_from_point(const grid_key_dx< dim > &v1, size_t &active_cnk, short int &sub_id)
Given a key return the chunk than contain that key, in case that chunk does not exist return the key ...
Definition: SparseGrid.hpp:439
void removeAddUnpackReset()
In this case it does nothing.
void add_on_cache(size_t lin_id, size_t active_cnk) const
add on cache
Definition: SparseGrid.hpp:536
r_type insert(const grid_key_sparse_lin_dx &v1)
Get the reference of the selected element.
Definition: SparseGrid.hpp:999
auto get(const grid_key_dx< dim > &v1) const -> decltype(get_selector< typename boost::mpl::at< typename T::type, boost::mpl::int_< p >>::type >::template get_const< p >(chunks, 0, 0))
Get the reference of the selected element.
Implementation of 1-D std::vector like structure.
Definition: map_vector.hpp:202
Packing status object.
Definition: Pack_stat.hpp:60
void packReset()
Reset the pack calculation.
this class is a functor for "for_each" algorithm
Definition: SparseGrid.hpp:253
static void pack(ExtPreAlloc< Mem >, const T &obj)
Error, no implementation.
Definition: Packer.hpp:56
void conv(int(&stencil)[N][dim], grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution using the stencil N
bool isNext()
Check if there is the next element.
size_t size_inserted()
Get the number of inserted points.
Tdst dst
destination
Definition: SparseGrid.hpp:69
Point< dim, T > getP1() const
Get the point p1.
Definition: Box.hpp:708
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:271
void * getPointerEnd()
Return the end pointer of the previous allocated memory.
grid_key_dx< dim > pos_chunk[chunking::size::value]
conversion position in the chunks
Definition: SparseGrid.hpp:418
static constexpr unsigned int dims
expose the dimansionality as a static const
Definition: SparseGrid.hpp:828
void find_active_chunk(const grid_key_dx< dim > &kh, size_t &active_cnk, bool &exist) const
Given a key return the chunk than contain that key, in case that chunk does not exist return the key ...
Definition: SparseGrid.hpp:648
An aggregate that accept a boost fusion vector as type.
Definition: aggregate.hpp:172
grid_key_dx< dim > & pos_src
source position
Definition: SparseGrid.hpp:213