OpenFPM  5.2.0
Project that contain the implementation of distributed structures
SparseGrid.hpp
1 /*
2  * SparseGrid.hpp
3  *
4  * Created on: Oct 22, 2017
5  * Author: i-bird
6  */
7 
8 #ifndef OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_
9 #define OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_
10 
11 #include "memory_ly/memory_array.hpp"
12 #include "memory_ly/memory_c.hpp"
13 #include "memory_ly/memory_conf.hpp"
14 #include "hash_map/hopscotch_map.h"
15 #include "hash_map/hopscotch_set.h"
16 #include "Vector/map_vector.hpp"
17 #include "util/variadic_to_vmpl.hpp"
18 #include "data_type/aggregate.hpp"
19 #include "SparseGridUtil.hpp"
20 #include "SparseGrid_iterator.hpp"
21 #include "SparseGrid_iterator_block.hpp"
22 #include "SparseGrid_conv_opt.hpp"
23 //#include "util/debug.hpp"
24 // We do not want parallel writer
25 
26 #ifdef OPENFPM_DATA_ENABLE_IO_MODULE
27 #define NO_PARALLEL
28 #include "VTKWriter/VTKWriter.hpp"
29 #endif
30 
31 
32 
33 
34 template<typename Tsrc,typename Tdst>
35 class copy_bck
36 {
38  Tsrc & src;
39 
41  Tdst & dst;
42 
43  size_t pos;
44 
45 public:
46 
47  copy_bck(Tsrc & src, Tdst & dst,size_t pos)
48  :src(src),dst(dst),pos(pos)
49  {}
50 
52  template<typename T>
53  inline void operator()(T& t) const
54  {
55  typedef typename std::remove_reference<decltype(src.template get<T::value>())>::type copy_rtype;
56 
58  }
59 
60 };
61 
62 template<typename Tsrc,typename Tdst>
64 {
66  Tsrc src;
67 
69  Tdst dst;
70 
71  size_t pos;
72 
73 public:
74 
75  copy_prop_to_vector(Tsrc src, Tdst dst,size_t pos)
76  :src(src),dst(dst),pos(pos)
77  {}
78 
80  template<typename T>
81  inline void operator()(T& t) const
82  {
83  typedef typename std::remove_reference<decltype(dst.template get<T::value>())>::type copy_rtype;
84 
85  meta_copy<copy_rtype>::meta_copy_(src.template get<T::value>()[pos],dst.template get<T::value>());
86  }
87 
88 };
89 
90 template<unsigned int dim, typename Tsrc,typename Tdst>
92 {
94  const Tsrc & src;
95 
97  Tdst & dst;
98 
101 
104 
105 public:
106 
109  {}
110 
112  template<typename T>
113  inline void operator()(T& t) const
114  {
115  typedef typename std::remove_reference<decltype(dst.template insert<T::value>(pos_dst))>::type copy_rtype;
116 
117  meta_copy<copy_rtype>::meta_copy_(src.template get<T::value>(pos_src),dst.template insert<T::value>(pos_dst));
118  }
119 
120 };
121 
122 template<typename T>
124 {
125  template<unsigned int prop, typename Tsrc, typename Tdst>
126  static void copy(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
127  {
128  typedef typename std::remove_reference<decltype(dst.template get<prop>()[pos_id_dst])>::type copy_rtype;
129 
130  meta_copy<copy_rtype>::meta_copy_(src.template get<prop>()[pos_id_src],dst.template get<prop>()[pos_id_dst]);
131  }
132 };
133 
134 template<typename T, unsigned int N1>
136 {
137  template<unsigned int prop, typename Tsrc, typename Tdst>
138  static void copy(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
139  {
140  typedef typename std::remove_reference<decltype(dst.template get<prop>()[0][pos_id_dst])>::type copy_rtype;
141 
142  for (int i = 0 ; i < N1 ; i++)
143  {
144  meta_copy<copy_rtype>::meta_copy_(src.template get<prop>()[i][pos_id_src],dst.template get<prop>()[i][pos_id_dst]);
145  }
146  }
147 };
148 
149 template<typename T, unsigned int N1, unsigned int N2>
151 {
152  template<unsigned int prop, typename Tsrc, typename Tdst>
153  static void copy(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
154  {
155  typedef typename std::remove_reference<decltype(dst.template get<prop>()[0][0][pos_id_dst])>::type copy_rtype;
156 
157  for (int i = 0 ; i < N1 ; i++)
158  {
159  for (int j = 0 ; j < N2 ; j++)
160  {
161  meta_copy<copy_rtype>::meta_copy_(src.template get<prop>()[i][j][pos_id_src],dst.template get<prop>()[i][j][pos_id_dst]);
162  }
163  }
164  }
165 };
166 
167 template<unsigned int dim, typename Tsrc,typename Tdst, typename aggrType>
169 {
171  const Tsrc & src;
172 
174  Tdst & dst;
175 
177  short int pos_id_src;
178 
180  short int pos_id_dst;
181 
182 public:
183 
184  copy_sparse_to_sparse_bb(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
186  {}
187 
189  template<typename T>
190  inline void operator()(T& t) const
191  {
192 /* typedef typename std::remove_reference<decltype(dst.template get<T::value>())>::type copy_rtype;
193 
194  meta_copy<copy_rtype>::meta_copy_(src.template get<T::value>()[pos_id_src],dst.template get<T::value>()[pos_id_dst]);*/
195 
196  typedef typename boost::mpl::at<typename aggrType::type, T>::type copy_rtype;
197 
199  }
200 
201 };
202 
203 template< template<typename,typename> class op,unsigned int dim, typename Tsrc,typename Tdst, unsigned int ... prp>
205 {
207  const Tsrc & src;
208 
210  Tdst & dst;
211 
214 
217 
219  typedef typename to_boost_vmpl<prp...>::type v_prp;
220 
221 public:
222 
225  {}
226 
228  template<typename T>
229  inline void operator()(T& t) const
230  {
231  typedef typename boost::mpl::at<v_prp,boost::mpl::int_<T::value>>::type idx_type;
232  typedef typename std::remove_reference<decltype(dst.template insert<idx_type::value>(pos_dst))>::type copy_rtype;
233 
234  if (dst.existPoint(pos_dst) == false)
235  {meta_copy_op<replace_,copy_rtype>::meta_copy_op_(src.template get<idx_type::value>(pos_src),dst.template insert<idx_type::value>(pos_dst));}
236  else
237  {meta_copy_op<op,copy_rtype>::meta_copy_op_(src.template get<idx_type::value>(pos_src),dst.template insert<idx_type::value>(pos_dst));}
238  }
239 
240 };
241 
242 
243 
252 template<unsigned int dim, typename mpl_v>
253 struct copy_sz
254 {
256  size_t (& sz)[dim];
257 
258 
264  inline copy_sz(size_t (& sz)[dim])
265  :sz(sz)
266  {
267  };
268 
270  template<typename T>
271  inline void operator()(T& t) const
272  {
273  sz[T::value] = boost::mpl::at<mpl_v,boost::mpl::int_<T::value>>::type::value;
274  }
275 };
276 
277 
278 template<unsigned int N>
280 {
281  template<typename Vc_type>
282  static inline void load(Vc_type & Vc)
283  {
284  std::cout << __FILE__ << ":" << __LINE__ << " unknown size " << std::endl;
285  }
286 };
287 
288 template<>
289 struct load_mask_impl<1>
290 {
291  template<typename Vc_type>
292  static inline void load(Vc_type & Vc, unsigned char * mask_sum)
293  {
294  Vc[0] = mask_sum[0];
295  }
296 };
297 
298 template<>
299 struct load_mask_impl<2>
300 {
301  template<typename Vc_type>
302  static inline void load(Vc_type & Vc, unsigned char * mask_sum)
303  {
304  Vc[0] = mask_sum[0];
305  Vc[1] = mask_sum[1];
306  }
307 };
308 
309 template<>
310 struct load_mask_impl<4>
311 {
312  template<typename Vc_type>
313  static inline void load(Vc_type & Vc, unsigned char * mask_sum)
314  {
315  Vc[0] = mask_sum[0];
316  Vc[1] = mask_sum[1];
317  Vc[2] = mask_sum[2];
318  Vc[3] = mask_sum[3];
319  }
320 };
321 
322 template<>
323 struct load_mask_impl<8>
324 {
325  template<typename Vc_type>
326  static inline void load(Vc_type & Vc, unsigned char * mask_sum)
327  {
328  Vc[0] = mask_sum[0];
329  Vc[1] = mask_sum[1];
330  Vc[2] = mask_sum[2];
331  Vc[3] = mask_sum[3];
332  Vc[4] = mask_sum[4];
333  Vc[5] = mask_sum[5];
334  Vc[6] = mask_sum[6];
335  Vc[7] = mask_sum[7];
336  }
337 };
338 
339 template<>
340 struct load_mask_impl<16>
341 {
342  template<typename Vc_type>
343  static inline void load(Vc_type & Vc, unsigned char * mask_sum)
344  {
345  Vc[0] = mask_sum[0];
346  Vc[1] = mask_sum[1];
347  Vc[2] = mask_sum[2];
348  Vc[3] = mask_sum[3];
349  Vc[4] = mask_sum[4];
350  Vc[5] = mask_sum[5];
351  Vc[6] = mask_sum[6];
352  Vc[7] = mask_sum[7];
353  Vc[8] = mask_sum[8];
354  Vc[9] = mask_sum[9];
355  Vc[10] = mask_sum[10];
356  Vc[11] = mask_sum[11];
357  Vc[12] = mask_sum[12];
358  Vc[13] = mask_sum[13];
359  Vc[14] = mask_sum[14];
360  Vc[15] = mask_sum[15];
361  }
362 };
363 
364 template<typename Vc_type>
365 inline Vc_type load_mask(unsigned char * mask_sum)
366 {
367  Vc_type v;
368 
370 
371  return v;
372 }
373 
374 template<typename T,typename aggr>
375 struct set_bck
376 {
377  template<unsigned int p, typename chunks_type>
378  static void set(const T & val, chunks_type & chunks, unsigned int i)
379  {
380  meta_copy<typename boost::mpl::at<typename aggr::type,boost::mpl::int_<p>>::type>::meta_copy_(val,chunks.get(0).template get<p>()[i]);
381  }
382 };
383 
384 template<typename T, unsigned int N1, typename aggr>
385 struct set_bck<T[N1],aggr>
386 {
387  template<unsigned int p, typename chunks_type>
388  static void set(const T (& val)[N1], chunks_type & chunks, unsigned int i)
389  {
390  for (int i1 = 0 ; i1 < N1; i1++)
391  {meta_copy<T>::meta_copy_(val[i1],chunks.get(0).template get<p>()[i1][i]);}
392  }
393 };
394 
395 template<typename T, unsigned int N1, unsigned int N2, typename aggr>
396 struct set_bck<T[N1][N2],aggr>
397 {
398  template<unsigned int p, typename chunks_type>
399  static void set(const T (& val)[N1][N2], chunks_type & chunks, unsigned int i)
400  {
401  for (int i1 = 0 ; i1 < N1; i1++)
402  {
403  for (int i2 = 0 ; i2 < N2; i2++)
404  {
405  meta_copy<T>::meta_copy_(val[i1][i2],chunks.get(0).template get<p>()[i1][i2][i]);
406  }
407  }
408  }
409 };
410 
411 template<unsigned int dim,
412  typename T,
413  typename S,
414  typename grid_lin,
415  typename layout,
416  template<typename> class layout_base,
417  typename chunking>
419 {
421  mutable size_t cache_pnt;
422 
424  mutable long int cache[SGRID_CACHE];
425 
427  mutable long int cached_id[SGRID_CACHE];
428 
431 
434 
436 
437  //Definition of the chunks
438  typedef typename v_transform_two_v2<Ft_chunk,boost::mpl::int_<chunking::size::value>,typename T::type>::type chunk_def;
439 
441  //aggregate_bfv<chunk_def> background;
442 
444 
447 
449  grid_lin g_sm;
450 
452  grid_lin g_sm_shift;
453 
455  grid_key_dx<dim> pos_chunk[chunking::size::value];
456 
458  size_t sz_cnk[dim];
459 
460  openfpm::vector<size_t> empty_v;
461 
463  bool findNN;
464 
467 
476  inline void find_active_chunk_from_point(const grid_key_dx<dim> & v1,size_t & active_cnk, short int & sub_id)
477  {
478  grid_key_dx<dim> kh = v1;
479  grid_key_dx<dim> kl;
480 
481  // shift the key
483 
484  find_active_chunk(kh,active_cnk);
485 
487  }
488 
489 
494  template<unsigned int n_ele>
495  inline void remove_from_chunk(size_t sub_id,
496  int & nele,
497  unsigned char (& mask)[n_ele])
498  {
499  nele = (mask[sub_id])?nele-1:nele;
500 
501  mask[sub_id] = 0;
502  }
503 
509  inline void reconstruct_map()
510  {
511  // reconstruct map
512 
513  map.clear();
514  for (size_t i = 1 ; i < header_inf.size() ; i++)
515  {
516  grid_key_dx<dim> kh = header_inf.get(i).pos;
517  grid_key_dx<dim> kl;
518 
519  // shift the key
521 
522  long int lin_id = g_sm_shift.LinId(kh);
523 
524  map[lin_id] = i;
525  }
526  }
527 
534  inline void remove_empty()
535  {
536  if (empty_v.size() >= FLUSH_REMOVE)
537  {
538  // eliminate double entry
539 
540  empty_v.sort();
541  empty_v.unique();
542 
543  // Because chunks can be refilled the empty list can contain chunks that are
544  // filled so before remove we have to check that they are really empty
545 
546  for (int i = empty_v.size() - 1 ; i >= 0 ; i--)
547  {
548  if (header_inf.get(empty_v.get(i)).nele != 0)
549  {empty_v.remove(i);}
550  }
551 
552  header_inf.remove(empty_v);
553  header_mask.remove(empty_v);
554  chunks.remove(empty_v);
555 
556  // reconstruct map
557 
558  reconstruct_map();
559 
560  empty_v.clear();
561 
562  // cache must be cleared
563 
564  clear_cache();
565  }
566  }
567 
574  inline void add_on_cache(size_t lin_id, size_t active_cnk) const
575  {
576  // Add on cache the chunk
577  cache[cache_pnt] = lin_id;
578  cached_id[cache_pnt] = active_cnk;
579  cache_pnt++;
580  cache_pnt = (cache_pnt >= SGRID_CACHE)?0:cache_pnt;
581  }
582 
587  inline void clear_cache()
588  {
589  cache_pnt = 0;
590  for (size_t i = 0 ; i < SGRID_CACHE ; i++)
591  {cache[i] = -1;}
592  }
593 
600  void set_g_shift_from_size(const size_t (& sz)[dim], grid_lin & g_sm_shift)
601  {
602  grid_key_dx<dim> cs;
603  grid_key_dx<dim> unused;
604 
605  for (size_t i = 0 ; i < dim ; i++)
606  {cs.set_d(i,sz[i]);}
607 
609 
610  size_t sz_i[dim];
611 
612  for (size_t i = 0 ; i < dim ; i++)
613  {sz_i[i] = cs.get(i) + 1;}
614 
615  g_sm_shift.setDimensions(sz_i);
616  }
617 
622  void init()
623  {
624  findNN = false;
625 
626  for (size_t i = 0 ; i < SGRID_CACHE ; i++)
627  {cache[i] = -1;}
628 
629  // fill pos_g
630 
632  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(cpsz);
633 
635 
637  size_t cnt = 0;
638 
639  while (it.isNext())
640  {
641  auto key = it.get();
642 
643  for (size_t i = 0 ; i < dim ; i++)
644  {
645  pos_chunk[cnt].set_d(i,key.get(i));
646  }
647 
648  ++cnt;
649  ++it;
650  }
651 
652  // Add the bachground chunk at the begining
653 
654  chunks.add();
655  header_inf.add();
656  for(int i = 0 ; i < dim ; i++)
657  {header_inf.last().pos.set_d(i,std::numeric_limits<long int>::min());};
658  header_inf.last().nele = 0;
659  header_mask.add();
660 
661  // set the mask to null
662  auto & h = header_mask.last().mask;
663 
664  for (size_t i = 0 ; i < chunking::size::value ; i++)
665  {h[i] = 0;}
666 
667  // set the data to background
668  for (size_t i = 0 ; i < chunking::size::value ; i++)
669  {
670  auto c = chunks.get(0);
671 
672  //copy_bck<decltype(background),decltype(c)> cb(background,c,i);
673 
674  //boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(cb);
675  }
676  }
677 
686  inline void find_active_chunk(const grid_key_dx<dim> & kh,size_t & active_cnk,bool & exist) const
687  {
688  long int lin_id = g_sm_shift.LinId(kh);
689 
690  size_t id = 0;
691  for (size_t k = 0 ; k < SGRID_CACHE; k++)
692  {id += (cache[k] == lin_id)?k+1:0;}
693 
694  if (id == 0)
695  {
696  // we do not have it in cache we check if we have it in the map
697 
698  auto fnd = map.find(lin_id);
699  if (fnd == map.end())
700  {
701  exist = false;
702  active_cnk = 0;
703  return;
704  }
705  else
706  {active_cnk = fnd->second;}
707 
708  // Add on cache the chunk
709  cache[cache_pnt] = lin_id;
710  cached_id[cache_pnt] = active_cnk;
711  cache_pnt++;
712  cache_pnt = (cache_pnt >= SGRID_CACHE)?0:cache_pnt;
713  }
714  else
715  {
716  active_cnk = cached_id[id-1];
717  cache_pnt = id;
718  cache_pnt = (cache_pnt == SGRID_CACHE)?0:cache_pnt;
719  }
720 
721  exist = true;
722  }
723 
731  inline void pre_get(const grid_key_dx<dim> & v1, size_t & active_cnk, size_t & sub_id, bool & exist) const
732  {
733  grid_key_dx<dim> kh = v1;
734  grid_key_dx<dim> kl;
735 
736  // shift the key
738 
739  find_active_chunk(kh,active_cnk,exist);
740 
742  }
743 
751  inline bool pre_insert(const grid_key_dx<dim> & v1, size_t & active_cnk, size_t & sub_id)
752  {
753  bool exist = true;
754  active_cnk = 0;
755 
756  grid_key_dx<dim> kh = v1;
757  grid_key_dx<dim> kl;
758 
759  // shift the key
761 
762  long int lin_id = g_sm_shift.LinId(kh);
763 
764  size_t id = 0;
765  for (size_t k = 0 ; k < SGRID_CACHE; k++)
766  {id += (cache[k] == lin_id)?k+1:0;}
767 
768  if (id == 0)
769  {
770  // we do not have it in cache we check if we have it in the map
771 
772  auto fnd = map.find(lin_id);
773  if (fnd == map.end())
774  {
775  // we do not have it in the map create a chunk
776 
777  map[lin_id] = chunks.size();
778  chunks.add();
779  header_inf.add();
780  header_inf.last().pos = kh;
781  header_inf.last().nele = 0;
782  header_mask.add();
783 
784  // set the mask to null
785  auto & h = header_mask.last().mask;
786 
787  for (size_t i = 0 ; i < chunking::size::value ; i++)
788  {h[i] = 0;}
789 
791 
792  active_cnk = chunks.size() - 1;
793  }
794  else
795  {
796  // we have it in the map
797 
798  active_cnk = fnd->second;
799  }
800 
801  // Add on cache the chunk
802  cache[cache_pnt] = lin_id;
803  cached_id[cache_pnt] = active_cnk;
804  cache_pnt++;
805  cache_pnt = (cache_pnt >= SGRID_CACHE)?0:cache_pnt;
806  }
807  else
808  {
809  active_cnk = cached_id[id-1];
810  cache_pnt = id;
811  cache_pnt = (cache_pnt == SGRID_CACHE)?0:cache_pnt;
812  }
813 
815 
816  // the chunk is in cache, solve
817 
818  // we notify that we added one element
819  auto & hc = header_inf.get(active_cnk);
820  auto & hm = header_mask.get(active_cnk);
821 
822  exist = hm.mask[sub_id];
823  hc.nele = (exist)?hc.nele:hc.nele + 1;
824  hm.mask[sub_id] |= 1;
825 
826  return exist;
827  }
828 
829  inline void remove_point(const grid_key_dx<dim> & v1)
830  {
831  bool exist;
832  size_t active_cnk = 0;
833  size_t sub_id;
834 
835  pre_get(v1,active_cnk,sub_id,exist);
836 
837  if (exist == false)
838  {return;}
839 
840  // eliminate the element
841 
842  auto & hm = header_mask.get(active_cnk);
843  auto & hc = header_inf.get(active_cnk);
844  unsigned char swt = hm.mask[sub_id];
845 
846  hc.nele = (swt)?hc.nele-1:hc.nele;
847 
848  hm.mask[sub_id] = 0;
849 
850  if (hc.nele == 0 && swt != 0)
851  {
852  // Add the chunks in the empty list
853  empty_v.add(active_cnk);
854  }
855  }
856 
857 public:
858 
860  typedef int yes_i_am_grid;
861 
864 
866  static constexpr unsigned int dims = dim;
867 
870  typedef T value_type;
871 
874 
877 
878  typedef layout_base<T> memory_traits;
879 
880  typedef chunking chunking_type;
881 
882  typedef grid_lin linearizer_type;
883 
888  inline sgrid_cpu()
889  :cache_pnt(0)
890  {
891  init();
892  }
893 
899  inline sgrid_cpu(const sgrid_cpu & g) THROW
900  {
901  this->operator=(g);
902  }
903 
909  inline sgrid_cpu(const sgrid_cpu && g) THROW
910  {
911  this->operator=(g);
912  }
913 
919  sgrid_cpu(const size_t (& sz)[dim])
920  :cache_pnt(0),g_sm(sz)
921  {
922  // calculate the chunks grid
923 
925 
926  // fill pos_g
927 
928  init();
929  }
930 
936  template<unsigned int p>
937  void setBackgroundValue(const typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type & val)
938  {
939  for (int i = 0 ; i < chunking::size::value ; i++)
940  {
942  }
943  }
944 
950  sparse_grid_bck_value<typename std::remove_reference<decltype(chunks.get(0))>::type> getBackgroundValue()
951  {
952  return sparse_grid_bck_value<typename std::remove_reference<decltype(chunks.get(0))>::type>(chunks.get(0));
953  }
954 
958  template<typename pointers_type,
959  typename headers_type,
960  typename result_type,
961  unsigned int ... prp >
962  static void unpack_headers(pointers_type & pointers, headers_type & headers, result_type & result, int n_slot)
963  {}
964 
965  template<unsigned int ... prp, typename S2, typename header_type, typename ite_type, typename context_type>
966  void unpack_with_headers(ExtPreAlloc<S2> & mem,
967  ite_type & sub_it,
968  header_type & headers,
969  int ih,
970  Unpack_stat & ps,
971  context_type& gpuContext,
972  rem_copy_opt opt = rem_copy_opt::NONE_OPT)
973  {}
974 
980  auto getBackgroundValueAggr() -> decltype(chunks.get(0))
981  {
982  return chunks.get(0);
983  }
984 
990  static constexpr bool isCompressed()
991  {
992  return true;
993  }
994 
1000  inline auto insert_o(const grid_key_dx<dim> & v1, size_t & ele_id) -> decltype(chunks.get_o(0))
1001  {
1002  size_t active_cnk;
1003 
1004  pre_insert(v1,active_cnk,ele_id);
1005 
1006  return chunks.get_o(active_cnk);
1007  }
1008 
1016  template <unsigned int p, typename r_type=decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,0,0))>
1017  inline r_type insert(const grid_key_dx<dim> & v1)
1018  {
1019  size_t active_cnk = 0;
1020  size_t ele_id = 0;
1021 
1022  pre_insert(v1,active_cnk,ele_id);
1023 
1024  return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,active_cnk,ele_id);
1025  }
1026 
1034  template <unsigned int p, typename r_type=decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,0,0))>
1035  inline r_type insert(const grid_key_sparse_lin_dx & v1)
1036  {
1037  size_t active_cnk = v1.getChunk();
1038  size_t sub_id = v1.getPos();
1039 
1040  // the chunk is in cache, solve
1041 
1042  // we notify that we added one element
1043  auto & hm = header_mask.get(active_cnk);
1044  auto & hc = header_inf.get(active_cnk);
1045 
1046  // we set the mask
1047  hc.nele = (hm.mask[sub_id] & 1)?hc.nele:hc.nele + 1;
1048  hm.mask[sub_id] |= 1;
1049 
1050  return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,active_cnk,sub_id);
1051  }
1052 
1060  template <unsigned int p>
1061  inline auto get(const grid_key_dx<dim> & v1) const -> decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,0))
1062  {
1063  bool exist;
1064  size_t active_cnk;
1065  size_t sub_id;
1066 
1067  pre_get(v1,active_cnk,sub_id,exist);
1068 
1069  if (exist == false)
1070  {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,sub_id);}
1071 
1072  // we check the mask
1073  auto & hm = header_mask.get(active_cnk);
1074 
1075  if ((hm.mask[sub_id] & 1) == 0)
1076  {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,sub_id);}
1077 
1078  return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,active_cnk,sub_id);
1079  }
1080 
1087  {return false;}
1088 
1096  template <unsigned int p>
1097  inline auto get(const grid_key_dx<dim> & v1) -> decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,0))
1098  {
1099  bool exist;
1100  size_t active_cnk;
1101  size_t sub_id;
1102 
1103  pre_get(v1,active_cnk,sub_id,exist);
1104 
1105  if (exist == false)
1106  {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,active_cnk,sub_id);}
1107 
1108  // we check the mask
1109  auto & hc = header_inf.get(active_cnk);
1110  auto & hm = header_mask.get(active_cnk);
1111 
1112  if ((hm.mask[sub_id] & 1) == 0)
1113  {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,sub_id);}
1114 
1115  return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,active_cnk,sub_id);
1116  }
1117 
1125  inline bool existPoint(const grid_key_dx<dim> & v1) const
1126  {
1127  bool exist;
1128  size_t active_cnk;
1129  size_t sub_id;
1130 
1131  pre_get(v1,active_cnk,sub_id,exist);
1132 
1133  if (exist == false)
1134  {return false;}
1135 
1136  // we check the mask
1137  auto & hm = header_mask.get(active_cnk);
1138 
1139  if ((hm.mask[sub_id] & 1) == 0)
1140  {return false;}
1141 
1142  return true;
1143  }
1144 
1152  template <unsigned int p>
1153  inline auto get(const grid_key_sparse_lin_dx & v1) -> decltype(chunks.template get<p>(0)[0])
1154  {
1155  return chunks.template get<p>(v1.getChunk())[v1.getPos()];
1156  }
1157 
1165  inline auto getBlock(const grid_key_sparse_lin_dx & v1) const -> decltype(chunks.get(0))
1166  {
1167  return chunks.get(v1.getChunk());
1168  }
1169 
1177  inline unsigned char getFlag(const grid_key_dx<dim> & v1) const
1178  {
1179  return 0;
1180  }
1181 
1188  getIterator(size_t opt = 0) const
1189  {
1191  }
1192 
1199  getIterator(const grid_key_dx<dim> & start, const grid_key_dx<dim> & stop, size_t opt = 0) const
1200  {
1202  }
1203 
1213  template<unsigned int stencil_size = 0>
1214  grid_key_sparse_dx_iterator_block_sub<dim,stencil_size,self,chunking>
1216  {
1217  return grid_key_sparse_dx_iterator_block_sub<dim,stencil_size,self,chunking>(*this,start,stop);
1218  }
1219 
1227  const grid_lin & getGrid() const
1228  {
1229  return g_sm;
1230  }
1231 
1237  void remove(const grid_key_dx<dim> & v1)
1238  {
1239  remove_point(v1);
1240  remove_empty();
1241  }
1242 
1249  {
1250  remove_point(v1);
1251  }
1252 
1260  template<typename stencil_type>
1262  {
1263 
1264  }
1265 
1272  {
1273  remove_empty();
1274  }
1275 
1284  void resize(const size_t (& sz)[dim])
1285  {
1286  bool is_bigger = true;
1287 
1288  // we check if we are resizing bigger, because if is the case we do not have to do
1289  // much
1290 
1291  for (size_t i = 0 ; i < dim ; i++)
1292  {
1293  if (sz[i] < g_sm.size(i))
1294  {is_bigger = false;}
1295  }
1296 
1297  g_sm.setDimensions(sz);
1298 
1299  // set g_sm_shift
1300 
1302 
1303  clear_cache();
1304 
1305  if (is_bigger == true)
1306  {
1307 
1308  // if we resize bigger we do not have to do anything in the headers
1309  // and in the chunks we just have to update g_sm and reset the cache
1310  // and reconstruct the map. So we reconstruct the map and we just
1311  // finish
1312 
1313  reconstruct_map();
1314 
1315  return;
1316  }
1317 
1318  // create a box that is as big as the grid
1319 
1320  Box<dim,size_t> gs_box;
1321 
1322  for (size_t i = 0 ; i < dim ; i++)
1323  {
1324  gs_box.setLow(i,0);
1325  gs_box.setHigh(i,g_sm.size(i));
1326  }
1327 
1328  // we take a list of all chunks to remove
1330 
1331  // in this case we have to crop data, we go through all the headers
1332 
1333  for (size_t i = 1 ; i < header_inf.size() ; i++)
1334  {
1335  Box<dim,size_t> cnk;
1336 
1337  for (size_t j = 0 ; j < dim ; j++)
1338  {
1339  cnk.setLow(j,header_inf.get(i).pos.get(j));
1340  cnk.setHigh(j,sz_cnk[j] + header_inf.get(i).pos.get(j));
1341  }
1342 
1343  // if the chunk is not fully contained in the new smaller sparse grid
1344  // we have to crop it
1345  if (!cnk.isContained(gs_box))
1346  {
1347  // We check if the chunks is fully out or only partially in
1348  // cheking the intersection between the new grid and the box
1349  // enclosing the chunk as it was before
1350  Box<dim,size_t> inte;
1351 
1352  if (gs_box.Intersect(cnk,inte))
1353  {
1354  // part of the chunk is in, part is out
1355 
1356  // shift P1 to the origin
1357  // this is the valid box everything out must me reset
1358  inte -= inte.getP1();
1359 
1360  int mask_nele;
1361  short unsigned int mask_it[chunking::size::value];
1362 
1363  auto & mask = header_mask.get(i).mask;
1364  auto & n_ele = header_inf.get(i).nele;
1365 
1366  // ok so the box is not fully contained so we must crop data
1367 
1368  fill_mask(mask_it,mask,mask_nele);
1369 
1370  // now we have the mask of all the filled elements
1371 
1372  for (size_t j = 0 ; j < mask_nele ; j++)
1373  {
1374  if (!inte.isInside(pos_chunk[mask_it[j]].toPoint()))
1375  {
1376  // if is not inside, the point must be deleted
1377 
1378  remove_from_chunk<chunking::size::value>(mask_it[j],n_ele,mask);
1379  }
1380  }
1381  }
1382  else
1383  {
1384  // the chunk is completely out and must be removed completely
1385  // we add it to the list of the chunks to remove
1386 
1387  rmh.add(i);
1388  }
1389  }
1390  }
1391 
1392  header_inf.remove(rmh,0);
1393  header_mask.remove(rmh,0);
1394  chunks.remove(rmh,0);
1395 
1396  reconstruct_map();
1397  }
1398 
1409  template<int ... prp> static inline size_t packMem(size_t n, size_t e)
1410  {
1411  if (sizeof...(prp) == 0)
1412  {return n * sizeof(typename T::type);}
1413 
1414  typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
1415 
1416  return n * sizeof(prp_object);
1417  }
1418 
1424  void packReset()
1425  {}
1426 
1435  template<int ... prp, typename context_type> inline
1436  void packCalculate(size_t & req, const context_type& gpuContext)
1437  {}
1438 
1448  template<int ... prp> inline
1449  void packRequest(size_t & req) const
1450  {
1451  grid_sm<dim,void> gs_cnk(sz_cnk);
1452 
1453  // For sure we have to pack the number of chunk we want to pack
1454 
1455  req += sizeof(size_t);
1456  req += dim*sizeof(size_t);
1457 
1458  // Here we have to calculate the number of points to pack (skip the background)
1459 
1460  for (size_t i = 1 ; i < header_inf.size() ; i++)
1461  {
1462  auto & hm = header_mask.get(i);
1463 
1464  int mask_nele;
1465  short unsigned int mask_it[chunking::size::value];
1466 
1467  fill_mask(mask_it,hm.mask,mask_nele);
1468 
1469  for (size_t j = 0 ; j < mask_nele ; j++)
1470  {
1471  // If all of the aggregate properties do not have a "pack()" member
1473  {
1474  // here we count how many chunks must be sent
1475 
1476  size_t alloc_ele = this->packMem<prp...>(1,0);
1477  req += alloc_ele;
1478  }
1479  else
1480  {
1481  //Call a pack request
1482  call_aggregatePackRequestChunking<decltype(chunks.get_o(i)),
1483  S,prp ... >
1484  ::call_packRequest(chunks.get_o(i),mask_it[j],req);
1485  }
1486  }
1487 
1488  // There are point to send. So we have to save the mask chunk
1489  req += sizeof(header_mask.get(i).mask);
1490  // the chunk position
1491  req += sizeof(header_inf.get(i).pos);
1492  // and the number of element
1493  req += sizeof(header_inf.get(i).nele);
1494  }
1495  }
1496 
1503  {
1504  }
1505 
1515  template<int ... prp> inline
1517  size_t & req) const
1518  {
1519  grid_sm<dim,void> gs_cnk(sz_cnk);
1520 
1521  // For sure we have to pack the number of chunk we want to pack
1522 
1523  req += sizeof(size_t);
1524  req += dim*sizeof(size_t);
1525 
1526  // Here we have to calculate the number of points to pack
1527 
1528  Box<dim,size_t> section_to_pack;
1529 
1530  for (size_t i = 0; i < dim ; i++)
1531  {
1532  section_to_pack.setLow(i,sub_it.getStart().get(i));
1533  section_to_pack.setHigh(i,sub_it.getStop().get(i));
1534  }
1535 
1536  for (size_t i = 0 ; i < header_inf.size() ; i++)
1537  {
1538  auto & hm = header_mask.get(i);
1539 
1540  Box<dim,size_t> bc;
1541 
1542  for (size_t j = 0 ; j < dim ; j++)
1543  {
1544  bc.setLow(j,header_inf.get(i).pos.get(j));
1545  bc.setHigh(j,header_inf.get(i).pos.get(j) + sz_cnk[j] - 1);
1546  }
1547 
1548  // now we intersect the chunk box with the box
1549 
1550  Box<dim,size_t> inte;
1551  bool stp = bc.Intersect(section_to_pack,inte);
1552 
1553  if (stp == true)
1554  {
1555  // If it is intersect ok we have to check if there are points to pack
1556  // we shift inte to be relative to the chunk origin
1557 
1558  inte -= header_inf.get(i).pos.toPoint();
1559 
1560  // we iterate all the points
1561 
1562  size_t old_req = req;
1564 
1565  while (sit.isNext())
1566  {
1567  auto key = sit.get();
1568 
1569  size_t sub_id = gs_cnk.LinId(key);
1570 
1571  if (hm.mask[sub_id] & 1)
1572  {
1573  // If all of the aggregate properties do not have a "pack()" member
1575  {
1576  // here we count how many chunks must be sent
1577 
1578  size_t alloc_ele = this->packMem<prp...>(1,0);
1579  req += alloc_ele;
1580  }
1581  //If at least one property has "pack()"
1582  else
1583  {
1584  //Call a pack request
1585  call_aggregatePackRequestChunking<decltype(chunks.get_o(i)),
1586  S,prp ... >
1587  ::call_packRequest(chunks.get_o(i),sub_id,req);
1588  }
1589  }
1590 
1591  ++sit;
1592  }
1593 
1594  if (old_req != req)
1595  {
1596  // There are point to send. So we have to save the mask chunk
1597  req += sizeof(header_mask.get(i));
1598  // the chunk position
1599  req += sizeof(header_inf.get(i).pos);
1600  // and the number of element
1601  req += sizeof(header_inf.get(i).nele);
1602  }
1603  }
1604  }
1605  }
1606 
1616  template<int ... prp> void pack(ExtPreAlloc<S> & mem,
1618  Pack_stat & sts)
1619  {
1620  grid_sm<dim,void> gs_cnk(sz_cnk);
1621 
1622  // Here we allocate a size_t that indicate the number of chunk we are packing,
1623  // because we do not know a priory, we will fill it later
1624 
1625  mem.allocate(sizeof(size_t));
1626  size_t * number_of_chunks = (size_t *)mem.getPointer();
1627 
1628  // Pack the size of the grid
1629 
1630  for (size_t i = 0 ; i < dim ; i++)
1631  {Packer<size_t,S>::pack(mem,getGrid().size(i),sts);}
1632 
1633  // Here we have to calculate the number of points to pack
1634 
1635  Box<dim,size_t> section_to_pack;
1636 
1637  for (size_t i = 0; i < dim ; i++)
1638  {
1639  section_to_pack.setLow(i,sub_it.getStart().get(i));
1640  section_to_pack.setHigh(i,sub_it.getStop().get(i));
1641  }
1642 
1643  size_t n_packed_chunk = 0;
1644 
1645  for (size_t i = 0 ; i < header_inf.size() ; i++)
1646  {
1647  auto & hc = header_inf.get(i);
1648  auto & hm = header_mask.get(i);
1649 
1650  Box<dim,size_t> bc;
1651 
1652  for (size_t j = 0 ; j < dim ; j++)
1653  {
1654  bc.setLow(j,hc.pos.get(j));
1655  bc.setHigh(j,hc.pos.get(j) + sz_cnk[j] - 1);
1656  }
1657 
1658  // now we intersect the chunk box with the box
1659 
1660  Box<dim,size_t> inte;
1661  bool stp = bc.Intersect(section_to_pack,inte);
1662 
1663  if (stp == true)
1664  {
1665  // This flag indicate if something has been packed from this chunk
1666  bool has_packed = false;
1667 
1668  unsigned char mask_to_pack[chunking::size::value];
1669  memset(mask_to_pack,0,sizeof(mask_to_pack));
1670  mem.allocate_nocheck(sizeof(header_mask.get(i)) + sizeof(header_inf.get(i).pos) + sizeof(header_inf.get(i).nele));
1671 
1672  // here we get the pointer of the memory in case we have to pack the header
1673  // and we also shift the memory pointer by an offset equal to the header
1674  // to pack
1675  unsigned char * ptr_start = (unsigned char *)mem.getPointer();
1676 
1677  // If it is intersect ok we have to check if there are points to pack
1678  // we shift inte intp the chunk origin
1679 
1680  inte -= hc.pos.toPoint();
1681 
1682  // we iterate all the points
1683 
1685 
1686  while (sit.isNext())
1687  {
1688  auto key = sit.get();
1689 
1690  size_t sub_id = gs_cnk.LinId(key);
1691 
1692  if (hm.mask[sub_id] & 1)
1693  {
1694  Packer<decltype(chunks.get_o(i)),
1695  S,
1696  PACKER_ENCAP_OBJECTS_CHUNKING>::template pack<T,prp...>(mem,chunks.get_o(i),sub_id,sts);
1697 
1698  mask_to_pack[sub_id] |= 1;
1699  has_packed = true;
1700 
1701  }
1702 
1703  ++sit;
1704  }
1705 
1706  if (has_packed == true)
1707  {
1708  unsigned char * ptr_final = (unsigned char *)mem.getPointer();
1709  unsigned char * ptr_final_for = (unsigned char *)mem.getPointerEnd();
1710 
1711  // Ok we packed something so we have to pack the header
1712  size_t shift = ptr_final - ptr_start;
1713 
1714  mem.shift_backward(shift);
1715 
1716  // The position of the chunks
1717 
1718  grid_key_dx<dim> pos = header_inf.get(i).pos - sub_it.getStart();
1719 
1720  Packer<decltype(header_mask.get(i).mask),S>::pack(mem,mask_to_pack,sts);
1721  Packer<decltype(header_inf.get(i).pos),S>::pack(mem,pos,sts);
1722  Packer<decltype(header_inf.get(i).nele),S>::pack(mem,header_inf.get(i).nele,sts);
1723 
1724  size_t shift_for = ptr_final_for - (unsigned char *)mem.getPointer();
1725 
1726  mem.shift_forward(shift_for);
1727 
1728  n_packed_chunk++;
1729  }
1730  else
1731  {
1732  // This just reset the last allocation
1733  mem.shift_backward(0);
1734  }
1735  }
1736  }
1737 
1738  // Now we fill the number of packed chunks
1739  *number_of_chunks = n_packed_chunk;
1740  }
1741 
1748  {}
1749 
1754  void resetFlush()
1755  {}
1756 
1764  template<unsigned int ... prp, typename context_type>
1765  void removeAddUnpackFinalize(const context_type& gpuContext, int opt)
1766  {}
1767 
1768 
1776  template<unsigned int ... prp, typename context_type>
1777  void removeCopyToFinalize(const context_type& gpuContext, int opt)
1778  {}
1779 
1786  {
1787  return false;
1788  }
1789 
1800  template<int ... prp> void packFinalize(ExtPreAlloc<S> & mem, Pack_stat & sts, int opt, bool is_pack_remote)
1801  {}
1802 
1812  template<int ... prp> void pack(ExtPreAlloc<S> & mem,
1813  Pack_stat & sts) const
1814  {
1815  grid_sm<dim,void> gs_cnk(sz_cnk);
1816 
1817  // Here we allocate a size_t that indicate the number of chunk we are packing,
1818  // because we do not know a priory, we will fill it later
1819 
1820  Packer<size_t,S>::pack(mem,header_inf.size()-1,sts);
1821 
1822  for (size_t i = 0 ; i < dim ; i++)
1823  {Packer<size_t,S>::pack(mem,getGrid().size(i),sts);}
1824 
1825  // Here we pack the memory (skip the first background chunk)
1826 
1827  for (size_t i = 1 ; i < header_inf.size() ; i++)
1828  {
1829  auto & hm = header_mask.get(i);
1830  auto & hc = header_inf.get(i);
1831 
1832  Packer<decltype(hm.mask),S>::pack(mem,hm.mask,sts);
1833  Packer<decltype(hc.pos),S>::pack(mem,hc.pos,sts);
1834  Packer<decltype(hc.nele),S>::pack(mem,hc.nele,sts);
1835 
1836  // we iterate all the points
1837 
1838  int mask_nele;
1839  short unsigned int mask_it[chunking::size::value];
1840 
1841  fill_mask(mask_it,hm.mask,mask_nele);
1842 
1843  for (size_t j = 0 ; j < mask_nele ; j++)
1844  {
1845  Packer<decltype(chunks.get_o(i)),
1846  S,
1847  PACKER_ENCAP_OBJECTS_CHUNKING>::template pack<T,prp...>(mem,chunks.get_o(i),mask_it[j],sts);
1848  };
1849  }
1850  }
1851 
1855  void setMemory()
1856  {}
1857 
1858 
1866  size_t size() const
1867  {
1868  size_t tot = 0;
1869 
1870  for (size_t i = 1 ; i < header_inf.size() ; i++)
1871  {
1872  tot += header_inf.get(i).nele;
1873  }
1874 
1875  return tot;
1876  }
1877 
1885  size_t size_all() const
1886  {
1888  }
1889 
1895  void remove(Box<dim,long int> & section_to_delete)
1896  {
1897  grid_sm<dim,void> gs_cnk(sz_cnk);
1898 
1899  for (size_t i = 0 ; i < header_inf.size() ; i++)
1900  {
1901  auto & hm = header_mask.get(i);
1902  auto & hc = header_inf.get(i);
1903 
1904  Box<dim,size_t> bc;
1905 
1906  for (size_t j = 0 ; j < dim ; j++)
1907  {
1908  bc.setLow(j,hc.pos.get(j));
1909  bc.setHigh(j,hc.pos.get(j) + sz_cnk[j] - 1);
1910  }
1911 
1912  // now we intersect the chunk box with the box
1913 
1914  Box<dim,size_t> inte;
1915  bool stp = bc.Intersect(section_to_delete,inte);
1916 
1917  if (stp == true)
1918  {
1919  // If it is intersect ok we have to check if there are points to pack
1920  // we shift inte intp the chunk origin
1921 
1922  inte -= header_inf.get(i).pos.toPoint();
1923 
1924  // we iterate all the points
1925 
1927 
1928  while (sit.isNext())
1929  {
1930  auto key = sit.get();
1931 
1932  size_t sub_id = gs_cnk.LinId(key);
1933 
1934  unsigned char swt = header_mask.get(i).mask[sub_id];
1935 
1936  hc.nele = (swt)?hc.nele-1:hc.nele;
1937  hm.mask[sub_id] = 0;
1938 
1939  if (hc.nele == 0 && swt != 0)
1940  {
1941  // Add the chunks in the empty list
1942  empty_v.add(i);
1943  }
1944 
1945  ++sit;
1946  }
1947  }
1948  }
1949 
1950  remove_empty();
1951  }
1952 
1953  void copy_to(const self & grid_src,
1954  const Box<dim,size_t> & box_src,
1955  const Box<dim,size_t> & box_dst)
1956  {
1957 /* auto it = grid_src.getIterator(box_src.getKP1(),box_src.getKP2());
1958 
1959  while (it.isNext())
1960  {
1961  auto key_src = it.get();
1962  grid_key_dx<dim> key_dst = key_src + box_dst.getKP1();
1963  key_dst -= box_src.getKP1();
1964 
1965  typedef typename std::remove_const<typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
1966 
1967  copy_sparse_to_sparse<dim,gcopy,gcopy> caps(grid_src,*this,key_src,key_dst);
1968  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(caps);
1969 
1970  ++it;
1971  }*/
1972 
1973  auto it = grid_src.getIterator(box_src.getKP1(),box_src.getKP2());
1974 
1975  while (it.isNext())
1976  {
1977  auto key_src = it.get();
1978  grid_key_dx<dim> key_dst = key_src + box_dst.getKP1();
1979  key_dst -= box_src.getKP1();
1980  auto key_src_s = it.getKeyF();
1981 
1982  typedef typename std::remove_const<typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
1983 
1984  size_t pos_src_id = key_src_s.getPos();
1985  size_t pos_dst_id;
1986 
1989  auto block_dst = this->insert_o(key_dst,pos_dst_id);
1990 
1991  auto block_src = grid_src.getBlock(key_src_s);
1992 
1993  copy_sparse_to_sparse_bb<dim,decltype(block_src),decltype(block_dst),T> caps(block_src,block_dst,pos_src_id,pos_dst_id);
1994  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(caps);
1995 
1996  ++it;
1997  }
1998 
1999 // copy_remove_to_impl(grid_src,*this,box_src,box_dst);
2000  }
2001 
2002  template<template <typename,typename> class op, unsigned int ... prp >
2003  void copy_to_op(const self & grid_src,
2004  const Box<dim,size_t> & box_src,
2005  const Box<dim,size_t> & box_dst)
2006  {
2007  auto it = grid_src.getIterator(box_src.getKP1(),box_src.getKP2());
2008 
2009  while (it.isNext())
2010  {
2011  auto key_src = it.get();
2012  grid_key_dx<dim> key_dst = key_src + box_dst.getKP1();
2013  key_dst -= box_src.getKP1();
2014 
2015  typedef typename std::remove_const<typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
2016 
2017  copy_sparse_to_sparse_op<op,dim,gcopy,gcopy,prp ...> caps(grid_src,*this,key_src,key_dst);
2018  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,sizeof...(prp)> >(caps);
2019 
2020  ++it;
2021  }
2022  }
2023 
2033  size_t getChunk(grid_key_dx<dim> & v1, bool & exist)
2034  {
2035  size_t act_cnk = chunks.size()-1;
2036 
2037  find_active_chunk(v1,act_cnk,exist);
2038 
2039  return act_cnk;
2040  }
2041 
2050  {
2051  grid_key_dx<dim> kl;
2052  grid_key_dx<dim> kh = header_inf.get(chunk_id).pos;
2053 
2054  // shift the key
2056 
2057  return kh;
2058  }
2059 
2064  template<unsigned int prop_src, unsigned int prop_dst, unsigned int stencil_size, unsigned int N, typename lambda_f, typename ... ArgsT >
2065  void conv(int (& stencil)[N][dim], grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2066  {
2067  NNlist.resize(NNStar_c<dim>::nNN * chunks.size());
2068 
2069  if (findNN == false)
2070  {conv_impl<dim>::template conv<false,NNStar_c<dim>,prop_src,prop_dst,stencil_size>(stencil,start,stop,*this,func);}
2071  else
2072  {conv_impl<dim>::template conv<true,NNStar_c<dim>,prop_src,prop_dst,stencil_size>(stencil,start,stop,*this,func);}
2073 
2074  findNN = true;
2075  }
2076 
2085  template<unsigned int prop_src, unsigned int prop_dst, unsigned int stencil_size, typename lambda_f, typename ... ArgsT >
2086  void conv_cross(grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2087  {
2088  NNlist.resize(2*dim * chunks.size());
2089 
2090  if (findNN == false)
2091  {conv_impl<dim>::template conv_cross<false,prop_src,prop_dst,stencil_size>(start,stop,*this,func);}
2092  else
2093  {conv_impl<dim>::template conv_cross<true,prop_src,prop_dst,stencil_size>(start,stop,*this,func);}
2094 
2095  findNN = true;
2096  }
2097 
2106  template<unsigned int stencil_size, typename prop_type, typename lambda_f, typename ... ArgsT >
2107  void conv_cross_ids(grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2108  {
2109  if (layout_base<aggregate<int>>::type_value::value != SOA_layout_IA)
2110  {
2111  std::cout << __FILE__ << ":" << __LINE__ << " Error this function can be only used with the SOA version of the data-structure" << std::endl;
2112  }
2113 
2114  NNlist.resize(2*dim * chunks.size());
2115 
2116  if (findNN == false)
2117  {conv_impl<dim>::template conv_cross_ids<false,stencil_size,prop_type>(start,stop,*this,func);}
2118  else
2119  {conv_impl<dim>::template conv_cross_ids<true,stencil_size,prop_type>(start,stop,*this,func);}
2120 
2121  findNN = true;
2122  }
2123 
2128  template<unsigned int prop_src1, unsigned int prop_src2 ,unsigned int prop_dst1, unsigned int prop_dst2 ,unsigned int stencil_size, unsigned int N, typename lambda_f, typename ... ArgsT >
2129  void conv2(int (& stencil)[N][dim], grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2130  {
2131  NNlist.resize(NNStar_c<dim>::nNN * chunks.size());
2132 
2133  if (findNN == false)
2134  {conv_impl<dim>::template conv2<false,NNStar_c<dim>,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(stencil,start,stop,*this,func);}
2135  else
2136  {conv_impl<dim>::template conv2<true,NNStar_c<dim>,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(stencil,start,stop,*this,func);}
2137 
2138  findNN = true;
2139  }
2140 
2145  template<unsigned int prop_src1, unsigned int prop_src2 ,unsigned int prop_dst1, unsigned int prop_dst2 ,unsigned int stencil_size, typename lambda_f, typename ... ArgsT >
2146  void conv_cross2(grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2147  {
2148  NNlist.resize(NNStar_c<dim>::nNN * chunks.size());
2149 
2150  if (findNN == false)
2151  {conv_impl<dim>::template conv_cross2<false,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(start,stop,*this,func);}
2152  else
2153  {conv_impl<dim>::template conv_cross2<true,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(start,stop,*this,func);}
2154 
2155  findNN = true;
2156  }
2157 
2167  template<unsigned int ... prp, typename S2, typename context_type>
2170  Unpack_stat & ps,
2171  context_type& gpuContext,
2172  rem_copy_opt opt)
2173  {
2174  short unsigned int mask_it[chunking::size::value];
2175 
2176  // first we unpack the number of chunks
2177 
2178  size_t n_chunks;
2179 
2180  Unpacker<size_t,S2>::unpack(mem,n_chunks,ps);
2181 
2182  size_t sz[dim];
2183  for (size_t i = 0 ; i < dim ; i++)
2184  {Unpacker<size_t,S2>::unpack(mem,sz[i],ps);}
2185 
2186  openfpm::vector<cheader<dim>> header_inf_tmp;
2188  openfpm::vector<aggregate_bfv<chunk_def>,S,layout_base > chunks_tmp;
2189 
2190  header_inf_tmp.resize(n_chunks);
2191  header_mask_tmp.resize(n_chunks);
2192  chunks_tmp.resize(n_chunks);
2193 
2194  for (size_t i = 0 ; i < n_chunks ; i++)
2195  {
2196  auto & hc = header_inf_tmp.get(i);
2197  auto & hm = header_mask_tmp.get(i);
2198 
2199  Unpacker<typename std::remove_reference<decltype(header_mask.get(i).mask)>::type ,S2>::unpack(mem,hm.mask,ps);
2200  Unpacker<typename std::remove_reference<decltype(header_inf.get(i).pos)>::type ,S2>::unpack(mem,hc.pos,ps);
2201  Unpacker<typename std::remove_reference<decltype(header_inf.get(i).nele)>::type ,S2>::unpack(mem,hc.nele,ps);
2202 
2203  // fill the mask_it
2204 
2205  fill_mask(mask_it,hm.mask,hc.nele);
2206 
2207  // now we unpack the information
2208  size_t active_cnk;
2209  size_t ele_id;
2210 
2211  for (size_t k = 0 ; k < hc.nele ; k++)
2212  {
2213  // construct v1
2214  grid_key_dx<dim> v1;
2215  for (size_t i = 0 ; i < dim ; i++)
2216  {v1.set_d(i,hc.pos.get(i) + pos_chunk[mask_it[k]].get(i) + sub_it.getStart().get(i));}
2217 
2218  pre_insert(v1,active_cnk,ele_id);
2219 
2220  Unpacker<decltype(chunks.get_o(mask_it[k])),
2221  S2,
2222  PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack<T,prp...>(mem,chunks.get_o(active_cnk),ele_id,ps);
2223 
2224  }
2225  }
2226  }
2227 
2237  template<unsigned int ... prp, typename S2>
2239  Unpack_stat & ps)
2240  {
2241  this->clear();
2242 
2243  grid_key_dx<dim> start;
2244  grid_key_dx<dim> stop;
2245 
2246  // We preunpack some data
2247  Unpack_stat ps_tmp = ps;
2248 
2249  size_t unused;
2250  Unpacker<size_t,S2>::unpack(mem,unused,ps_tmp);
2251 
2252  size_t sz[dim];
2253  for (size_t i = 0 ; i < dim ; i++)
2254  {Unpacker<size_t,S2>::unpack(mem,sz[i],ps_tmp);}
2255 
2256  g_sm.setDimensions(sz);
2257  for (size_t i = 0 ; i < dim ; i++)
2258  {
2259  start.set_d(i,0);
2260  stop.set_d(i,getGrid().size(i)-1);
2261  }
2262 
2264 
2265  auto sub_it = this->getIterator(start,stop);
2266 
2267  // the context in not used in SparseGrid
2268  // kept for interface compatibility with SparseGridGpu
2269  int gpuContext;
2270 
2271  unpack<prp...>(mem,sub_it,ps,gpuContext,rem_copy_opt::NONE_OPT);
2272  }
2273 
2284  template<template<typename,typename> class op, typename S2, unsigned int ... prp>
2287  Unpack_stat & ps)
2288  {
2289  short unsigned int mask_it[chunking::size::value];
2290 
2291  // first we unpack the number of chunks
2292 
2293  size_t n_chunks;
2294 
2295  Unpacker<size_t,S2>::unpack(mem,n_chunks,ps);
2296 
2297  size_t sz[dim];
2298  for (size_t i = 0 ; i < dim ; i++)
2299  {Unpacker<size_t,S2>::unpack(mem,sz[i],ps);}
2300 
2301  openfpm::vector<cheader<dim>> header_inf_tmp;
2304 
2305  header_inf_tmp.resize(n_chunks);
2306  header_mask_tmp.resize(n_chunks);
2307  chunks_tmp.resize(n_chunks);
2308 
2309  for (size_t i = 0 ; i < n_chunks ; i++)
2310  {
2311  auto & hc = header_inf_tmp.get(i);
2312  auto & hm = header_mask_tmp.get(i);
2313 
2314  Unpacker<decltype(hm.mask),S2>::unpack(mem,hm.mask,ps);
2315  Unpacker<decltype(hc.pos),S2>::unpack(mem,hc.pos,ps);
2316  Unpacker<decltype(hc.nele),S2>::unpack(mem,hc.nele,ps);
2317 
2318  // fill the mask_it
2319 
2320  fill_mask(mask_it,hm.mask,hc.nele);
2321 
2322  // now we unpack the information
2323  size_t active_cnk;
2324  size_t ele_id;
2325 
2326  for (size_t k = 0 ; k < hc.nele ; k++)
2327  {
2328  // construct v1
2329  grid_key_dx<dim> v1;
2330  for (size_t i = 0 ; i < dim ; i++)
2331  {v1.set_d(i,hc.pos.get(i) + pos_chunk[mask_it[k]].get(i) + sub2.getStart().get(i));}
2332 
2333  bool exist = pre_insert(v1,active_cnk,ele_id);
2334 
2335  if (exist == false)
2336  {
2337  Unpacker<decltype(chunks.get_o(mask_it[k])),
2338  S2,
2339  PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack_op<replace_,prp...>(mem,chunks.get_o(active_cnk),ele_id,ps);
2340  }
2341  else
2342  {
2343  Unpacker<decltype(chunks.get_o(mask_it[k])),
2344  S2,
2345  PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack_op<op,prp...>(mem,chunks.get_o(active_cnk),ele_id,ps);
2346  }
2347 
2348  }
2349  }
2350  }
2351 
2357  template <typename stencil = no_stencil>
2360  {
2362  }
2363 
2371  {
2373  }
2374 
2381  void convert_key(grid_key_dx<dim> & key_out, const grid_key_sparse_lin_dx & key_in) const
2382  {
2383  auto & ph = header_inf.get(key_in.getChunk()).pos;
2384  auto & pos_h = pos_chunk[key_in.getPos()];
2385 
2386  for (size_t i = 0 ; i < dim ; i++)
2387  {
2388  key_out.set_d(i,ph.get(i) + pos_h.get(i));
2389  }
2390  }
2391 
2398  {
2399  cache_pnt = sg.cache_pnt;
2400 
2401  for (size_t i = 0 ; i < SGRID_CACHE ; i++)
2402  {
2403  cache[i] = sg.cache[i];
2404  cached_id[i] = sg.cached_id[i];
2405  }
2406 
2408  map = sg.map;
2409  header_inf = sg.header_inf;
2410  header_mask = sg.header_mask;
2411  chunks = sg.chunks;
2412  g_sm = sg.g_sm;
2413  g_sm_shift = sg.g_sm_shift;
2414 
2415  for (size_t i = 0 ; i < chunking::size::value ; i++)
2416  {
2417  pos_chunk[i] = sg.pos_chunk[i];
2418  }
2419 
2420 
2421  for (size_t i = 0 ; i < dim ; i++)
2422  {sz_cnk[i] = sg.sz_cnk[i];}
2423 
2424  empty_v = sg.empty_v;
2425 
2426  return *this;
2427  }
2428 
2433  void reorder()
2434  {
2435  openfpm::vector<cheader<dim>,S> header_inf_tmp;
2437  openfpm::vector<aggregate_bfv<chunk_def>,S,layout_base > chunks_tmp;
2438 
2439  header_inf_tmp.resize(header_inf.size());
2440  header_mask_tmp.resize(header_mask.size());
2441  chunks_tmp.resize(chunks.size());
2442 
2443  struct pair_int
2444  {
2445  int id;
2446  int pos;
2447 
2448  bool operator<(const pair_int & tmp) const
2449  {
2450  return id < tmp.id;
2451  }
2452  };
2453 
2455  srt.resize(header_inf.size());
2456 
2457  for (int i = 0 ; i < header_inf.size() ; i++)
2458  {
2459  grid_key_dx<dim> kh = header_inf.get(i).pos;
2460  grid_key_dx<dim> kl;
2461 
2462  // shift the key
2464 
2465  long int lin_id = g_sm_shift.LinId(kh);
2466 
2467  srt.get(i).id = lin_id;
2468  srt.get(i).pos = i;
2469  }
2470 
2471  srt.sort();
2472 
2473  // now reoder
2474 
2475  for (int i = 0 ; i < srt.size() ; i++)
2476  {
2477  chunks_tmp.get(i) = chunks.get(srt.get(i).pos);
2478  header_inf_tmp.get(i) = header_inf.get(srt.get(i).pos);
2479  header_mask_tmp.get(i) = header_mask.get(srt.get(i).pos);
2480  }
2481 
2482  chunks_tmp.swap(chunks);
2483  header_inf_tmp.swap(header_inf);
2484  header_mask_tmp.swap(header_mask);
2485 
2486  clear_cache();
2487  reconstruct_map();
2488 
2489  empty_v.clear();
2490  findNN = false;
2491  NNlist.clear();
2492  }
2493 
2500  {
2501  cache_pnt = sg.cache_pnt;
2502 
2503  for (size_t i = 0 ; i < SGRID_CACHE ; i++)
2504  {
2505  cache[i] = sg.cache[i];
2506  cached_id[i] = sg.cached_id[i];
2507  }
2508 
2510  map.swap(sg.map);
2511  header_inf.swap(sg.header_inf);
2512  header_mask.swap(sg.header_mask);
2513  chunks.swap(sg.chunks);
2514  g_sm = sg.g_sm;
2515  g_sm_shift = sg.g_sm_shift;
2516 
2517  for (size_t i = 0 ; i < chunking::size::value ; i++)
2518  {
2519  pos_chunk[i] = sg.pos_chunk[i];
2520  }
2521 
2522 
2523  for (size_t i = 0 ; i < dim ; i++)
2524  {sz_cnk[i] = sg.sz_cnk[i];}
2525 
2526  empty_v = sg.empty_v;
2527 
2528  return *this;
2529  }
2530 
2536  size_t size_inserted()
2537  {
2538  return size();
2539  }
2540 
2545  void clear()
2546  {
2547  header_inf.resize(1);
2548  header_mask.resize(1);
2549  chunks.resize(1);
2550 
2551  clear_cache();
2552  reconstruct_map();
2553  }
2554 
2561  {
2562  clear_cache();
2563  }
2564 
2565 #ifdef OPENFPM_DATA_ENABLE_IO_MODULE
2566 
2572  template<typename Tw = float> bool write(const std::string & output)
2573  {
2574  file_type ft = file_type::BINARY;
2575 
2577  openfpm::vector<T> tmp_prp;
2578 
2579  // copy position and properties
2580 
2581  auto it = getIterator();
2582 
2583  while(it.isNext())
2584  {
2585  auto key = it.getKey();
2586  auto keyg = it.getKeyF();
2587 
2588  Point<dim,Tw> p;
2589 
2590  for (size_t i = 0 ; i < dim ; i++)
2591  {p.get(i) = keyg.get(i);}
2592 
2593  tmp_pos.add(p);
2594 
2595  tmp_prp.add();
2596  copy_prop_to_vector<decltype(chunks.get_o(key.getChunk())),decltype(tmp_prp.last())>
2597  cp(chunks.get_o(key.getChunk()),tmp_prp.last(),key.getPos());
2598 
2599  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(cp);
2600 
2601  ++it;
2602  }
2603 
2604  // VTKWriter for a set of points
2606  vtk_writer.add(tmp_pos,tmp_prp,tmp_pos.size());
2607 
2608  openfpm::vector<std::string> prp_names;
2609 
2610  // Write the VTK file
2611  return vtk_writer.write(output,prp_names,"sparse_grid",ft);
2612  }
2613 
2614 #endif
2615 
2616  //Functions to check if the packing object is complex
2617  static bool pack()
2618  {
2619  return false;
2620  }
2621 
2622  static bool packRequest()
2623  {
2624  return false;
2625  }
2626 
2627  static bool packMem()
2628  {
2629  return false;
2630  }
2631 
2638  {
2639  return header_inf;
2640  }
2641 
2648  {
2649  return header_mask;
2650  }
2651 
2658  {
2659  return NNlist;
2660  }
2661 
2668  {
2669  return chunks;
2670  }
2671 
2678  {
2679  return header_inf;
2680  }
2681 
2688  {
2689  return header_mask;
2690  }
2691 
2698  {
2699  return chunks;
2700  }
2701 
2707  {
2708  size_t tot = 0;
2709  for (int i = 1 ; i < header_mask.size() ; i++)
2710  {
2711  auto & m = header_mask.get(i);
2712 
2713  size_t np_mask = 0;
2714 
2715  for (int j = 0 ; j < chunking::size::value ; j++)
2716  {
2717  if (m.mask[j] & 0x1)
2718  {np_mask++;}
2719  }
2720 
2721  if (header_inf.get(i).nele != np_mask)
2722  {
2723  std::cout << __FILE__ << ":" << __LINE__ << " error chunk: " << i << " has " << np_mask << " points but header report " << header_inf.get(i).nele << std::endl;
2724  }
2725  tot += np_mask;
2726  }
2727 
2728  if (size() != tot)
2729  {
2730  std::cout << __FILE__ << ":" << __LINE__ << " Total point is inconsistent: " << size() << " " << tot << std::endl;
2731  }
2732  }
2733 };
2734 
2735 template<unsigned int dim,
2736  typename T,
2737  typename S,
2738  typename grid_lin = grid_zm<dim,void>,
2739  typename layout = typename memory_traits_inte<T>::type,
2740  template<typename> class layout_base = memory_traits_inte,
2741  typename chunking = default_chunking<dim>>
2743 
2744 
2745 #endif /* OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_ */
Point< dim, T > getP1() const
Get the point p1.
Definition: Box.hpp:707
__device__ __host__ bool Intersect(const Box< dim, T > &b, Box< dim, T > &b_out) const
Intersect.
Definition: Box.hpp:94
__host__ __device__ bool isInside(const Point< dim, T > &p) const
Check if the point is inside the box.
Definition: Box.hpp:1016
grid_key_dx< dim > getKP1() const
Get the point p1 as grid_key_dx.
Definition: Box.hpp:655
grid_key_dx< dim > getKP2() const
Get the point p12 as grid_key_dx.
Definition: Box.hpp:668
__device__ __host__ void setHigh(int i, T val)
set the high interval of the box
Definition: Box.hpp:543
__device__ __host__ void setLow(int i, T val)
set the low interval of the box
Definition: Box.hpp:532
bool isContained(const Box< dim, T > &b) const
Check if the box is contained.
Definition: Box.hpp:998
void shift_backward(size_t sz)
shift the pointer backward
void shift_forward(size_t sz)
shift the pointer forward
virtual void * getPointer()
Return the pointer of the last allocation.
virtual bool allocate(size_t sz)
Allocate a chunk of memory.
bool allocate_nocheck(size_t sz)
Allocate a chunk of memory.
void * getPointerEnd()
Return the end pointer of the previous allocated memory.
Packing status object.
Definition: Pack_stat.hpp:61
Packing class.
Definition: Packer.hpp:50
static void pack(ExtPreAlloc< Mem >, const T &obj)
Error, no implementation.
Definition: Packer.hpp:56
This class implement the point shape in an N-dimensional space.
Definition: Point.hpp:28
Unpacking status object.
Definition: Pack_stat.hpp:16
Unpacker class.
Definition: Unpacker.hpp:34
static void unpack(ExtPreAlloc< Mem >, T &obj)
Error, no implementation.
Definition: Unpacker.hpp:40
Tdst & dst
destination
Definition: SparseGrid.hpp:41
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:53
Tsrc & src
source
Definition: SparseGrid.hpp:38
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:81
Tdst dst
destination
Definition: SparseGrid.hpp:69
short int pos_id_src
source position
Definition: SparseGrid.hpp:177
short int pos_id_dst
destination position
Definition: SparseGrid.hpp:180
const Tsrc & src
source
Definition: SparseGrid.hpp:171
Tdst & dst
destination
Definition: SparseGrid.hpp:174
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:190
Tdst & dst
destination
Definition: SparseGrid.hpp:210
grid_key_dx< dim > & pos_src
source position
Definition: SparseGrid.hpp:213
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:229
const Tsrc & src
source
Definition: SparseGrid.hpp:207
to_boost_vmpl< prp... >::type v_prp
Convert the packed properties into an MPL vector.
Definition: SparseGrid.hpp:219
grid_key_dx< dim > & pos_dst
destination position
Definition: SparseGrid.hpp:216
const Tsrc & src
source
Definition: SparseGrid.hpp:94
grid_key_dx< dim > & pos_src
source position
Definition: SparseGrid.hpp:100
grid_key_dx< dim > & pos_dst
destination position
Definition: SparseGrid.hpp:103
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:113
Tdst & dst
destination
Definition: SparseGrid.hpp:97
Declaration grid_key_dx_iterator_sub.
bool isNext()
Check if there is the next element.
grid_key_dx< dim > get() const
Return the actual grid key iterator.
const grid_key_dx< dim > & get() const
Get the actual key.
bool isNext()
Check if there is the next element.
__host__ __device__ Point< dim, typeT > toPoint() const
Convert to a point the grid_key_dx.
Definition: grid_key.hpp:457
__device__ __host__ void set_d(index_type i, index_type id)
Set the i index.
Definition: grid_key.hpp:516
__device__ __host__ index_type get(index_type i) const
Get the i index.
Definition: grid_key.hpp:503
Grid key sparse iterator on a sub-part of the domain.
const grid_key_dx< dim > & getStop() const
Return the stop point for the iteration.
const grid_key_dx< dim > & getStart() const
Return the starting point for the iteration.
Grid key sparse iterator.
It store the position in space of the sparse grid.
size_t getPos() const
Return the linearized position in the chunk.
size_t getChunk() const
Return the chunk id.
mem_id LinId(const grid_key_dx< N, ids_type > &gk, const signed char sum_id[N]) const
Linearization of the grid_key_dx with a specified shift.
Definition: grid_sm.hpp:454
class that store the information of the grid like number of point on each direction and define the in...
Definition: grid_zm.hpp:22
Implementation of 1-D std::vector like structure.
Definition: map_vector.hpp:204
size_t size()
Stub size.
Definition: map_vector.hpp:212
sgrid_cpu & operator=(sgrid_cpu &&sg)
copy an sparse grid
static bool is_unpack_header_supported()
Indicate that unpacking the header is supported.
auto get(const grid_key_dx< dim > &v1) const -> decltype(get_selector< typename boost::mpl::at< typename T::type, boost::mpl::int_< p >>::type >::template get_const< p >(chunks, 0, 0))
Get the reference of the selected element.
void find_active_chunk(const grid_key_dx< dim > &kh, size_t &active_cnk, bool &exist) const
Given a key return the chunk than contain that key, in case that chunk does not exist return the key ...
Definition: SparseGrid.hpp:686
sgrid_cpu & operator=(const sgrid_cpu &sg)
copy an sparse grid
void clear_cache()
reset the cache
Definition: SparseGrid.hpp:587
void pack(ExtPreAlloc< S > &mem, Pack_stat &sts) const
Pack the object into the memory given an iterator.
bool pre_insert(const grid_key_dx< dim > &v1, size_t &active_cnk, size_t &sub_id)
Before insert data you have to do this.
Definition: SparseGrid.hpp:751
void consistency()
This function check the consistency of the sparse grid.
void init()
initialize
Definition: SparseGrid.hpp:622
sgrid_cpu()
Trivial constructor.
Definition: SparseGrid.hpp:888
grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > getIterator(const grid_key_dx< dim > &start, const grid_key_dx< dim > &stop, size_t opt=0) const
Return an iterator over a sub-grid.
void remove_empty()
Eliminate empty chunks.
Definition: SparseGrid.hpp:534
openfpm::vector< cheader< dim > > & private_get_header_inf()
return the header section of the blocks
void conv(int(&stencil)[N][dim], grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution using the stencil N
void resetFlush()
It does nothing.
void pre_get(const grid_key_dx< dim > &v1, size_t &active_cnk, size_t &sub_id, bool &exist) const
Definition: SparseGrid.hpp:731
void removeAddUnpackFinalize(const context_type &gpuContext, int opt)
In this case it does nothing.
void set_g_shift_from_size(const size_t(&sz)[dim], grid_lin &g_sm_shift)
set the grid shift from size
Definition: SparseGrid.hpp:600
long int cached_id[SGRID_CACHE]
cached id
Definition: SparseGrid.hpp:427
size_t size_all() const
void setBackgroundValue(const typename boost::mpl::at< typename T::type, boost::mpl::int_< p >>::type &val)
Set the background value for the property p.
Definition: SparseGrid.hpp:937
r_type insert(const grid_key_sparse_lin_dx &v1)
Get the reference of the selected element.
bool isSkipLabellingPossible()
This function check if keep geometry is possible for this grid.
long int cache[SGRID_CACHE]
cache
Definition: SparseGrid.hpp:424
sparse_grid_bck_value< typename std::remove_reference< decltype(chunks.get(0))>::type > getBackgroundValue()
Get the background value.
Definition: SparseGrid.hpp:950
tsl::hopscotch_map< size_t, size_t > map
Map to convert from grid coordinates to chunk.
Definition: SparseGrid.hpp:430
static constexpr unsigned int dims
expose the dimansionality as a static const
Definition: SparseGrid.hpp:866
grid_key_sparse_dx_iterator< dim, chunking::size::value > getIterator(size_t opt=0) const
Return a Domain iterator.
void remove(const grid_key_dx< dim > &v1)
Remove the point.
void find_active_chunk_from_point(const grid_key_dx< dim > &v1, size_t &active_cnk, short int &sub_id)
Given a key return the chunk than contain that key, in case that chunk does not exist return the key ...
Definition: SparseGrid.hpp:476
unsigned char getFlag(const grid_key_dx< dim > &v1) const
Get the point flag (in this case it always return 0)
grid_key_dx< dim > pos_chunk[chunking::size::value]
conversion position in the chunks
Definition: SparseGrid.hpp:455
void unpack(ExtPreAlloc< S2 > &mem, Unpack_stat &ps)
unpack the sub-grid object
openfpm::vector< int > & private_get_nnlist()
return the NN list for each block
void removeAddUnpackReset()
In this case it does nothing.
auto getBlock(const grid_key_sparse_lin_dx &v1) const -> decltype(chunks.get(0))
Get the reference of the selected block.
openfpm::vector< aggregate_bfv< chunk_def >, S, layout_base > chunks
vector of chunks
Definition: SparseGrid.hpp:446
size_t size_inserted()
Get the number of inserted points.
static grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > type_of_subiterator()
This is a meta-function return which type of sub iterator a grid produce.
static constexpr bool isCompressed()
This is a multiresolution sparse grid so is a compressed format.
Definition: SparseGrid.hpp:990
void convert_key(grid_key_dx< dim > &key_out, const grid_key_sparse_lin_dx &key_in) const
Here we convert the linearized sparse key into the grid_key_dx.
grid_key_sparse_dx_iterator_block_sub< dim, stencil_size, self, chunking > getBlockIterator(const grid_key_dx< dim > &start, const grid_key_dx< dim > &stop)
Return an iterator over a sub-grid.
void clear()
delete all the points
grid_lin g_sm
grid size information
Definition: SparseGrid.hpp:449
bool findNN
bool that indicate if the NNlist is filled
Definition: SparseGrid.hpp:463
grid_lin g_sm_shift
grid size information with shift
Definition: SparseGrid.hpp:452
void removeCopyToFinalize(const context_type &gpuContext, int opt)
In this case it does nothing.
void internal_clear_cache()
This is an internal function to clear the cache.
void setMemory()
It does materially nothing.
void packFinalize(ExtPreAlloc< S > &mem, Pack_stat &sts, int opt, bool is_pack_remote)
Pack finalize Finalize the pack of this object. In this case it does nothing.
void copyRemoveReset()
Reset the queue to remove and copy section of grids.
const grid_lin & getGrid() const
Return the internal grid information.
void conv_cross_ids(grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution from start to stop point using the function func and arguments args
void packRequest(size_t &req) const
Insert an allocation request.
void unpack_with_op(ExtPreAlloc< S2 > &mem, grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > &sub2, Unpack_stat &ps)
unpack the sub-grid object applying an operation
sgrid_cpu(const sgrid_cpu &&g) THROW
create a sparse grid from another grid
Definition: SparseGrid.hpp:909
void pack(ExtPreAlloc< S > &mem, grid_key_sparse_dx_iterator_sub< dims, chunking::size::value > &sub_it, Pack_stat &sts)
Pack the object into the memory given an iterator.
void reconstruct_map()
reconstruct the map
Definition: SparseGrid.hpp:509
size_t size() const
openfpm::vector< aggregate_bfv< chunk_def >, S, layout_base > & private_get_data()
return the data of the blocks
void expandAndTagBoundaries(grid_key_dx< dim > &start, grid_key_dx< dim > &stop)
Expand and tag boundaries.
void conv2(int(&stencil)[N][dim], grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution using the stencil N
void conv_cross(grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution from start to stop point using the function func and arguments args
const openfpm::vector< cheader< dim > > & private_get_header_inf() const
return the header section of the blocks
void conv_cross2(grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution using the stencil N
void flush_remove()
Remove the point.
void remove(Box< dim, long int > &section_to_delete)
Remove all the points in this region.
grid_key_dx< dim > base_key
base_key for the grid
Definition: SparseGrid.hpp:863
int yes_i_am_grid
it define that this data-structure is a grid
Definition: SparseGrid.hpp:860
sgrid_cpu(const size_t(&sz)[dim])
Constructor for sparse grid.
Definition: SparseGrid.hpp:919
aggregate_bfv< chunk_def > background_type
Background type.
Definition: SparseGrid.hpp:876
void packRequest(grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > &sub_it, size_t &req) const
Insert an allocation request.
void reorder()
Reorder based on index.
size_t cache_pnt
cache pointer
Definition: SparseGrid.hpp:421
void remove_from_chunk(size_t sub_id, int &nele, unsigned char(&mask)[n_ele])
Remove.
Definition: SparseGrid.hpp:495
void remove_no_flush(const grid_key_dx< dim > &v1)
Remove the point but do not flush the remove.
bool existPoint(const grid_key_dx< dim > &v1) const
Check if the point exist.
auto get(const grid_key_sparse_lin_dx &v1) -> decltype(chunks.template get< p >(0)[0])
Get the reference of the selected element.
static grid_key_sparse_dx_iterator< dim, chunking::size::value > type_of_iterator()
This is a meta-function return which type of sub iterator a grid produce.
void packReset()
Reset the pack calculation.
void add_on_cache(size_t lin_id, size_t active_cnk) const
add on cache
Definition: SparseGrid.hpp:574
openfpm::vector< int > NNlist
for each chunk store the neighborhood chunks
Definition: SparseGrid.hpp:466
openfpm::vector< mheader< chunking::size::value > > & private_get_header_mask()
return the header section of the blocks
size_t getChunk(grid_key_dx< dim > &v1, bool &exist)
Give a grid point it return the chunk containing that point. In case the point does not exist it retu...
const openfpm::vector< mheader< chunking::size::value > > & private_get_header_mask() const
return the header section of the blocks
static void unpack_headers(pointers_type &pointers, headers_type &headers, result_type &result, int n_slot)
Stub does not do anything.
Definition: SparseGrid.hpp:962
const openfpm::vector< aggregate_bfv< chunk_def > > & private_get_data() const
return the data of the blocks
auto get(const grid_key_dx< dim > &v1) -> decltype(get_selector< typename boost::mpl::at< typename T::type, boost::mpl::int_< p >>::type >::template get_const< p >(chunks, 0, 0))
Get the reference of the selected element.
static size_t packMem(size_t n, size_t e)
Calculate the memory size required to pack n elements.
auto insert_o(const grid_key_dx< dim > &v1, size_t &ele_id) -> decltype(chunks.get_o(0))
Insert a full element (with all properties)
void unpack(ExtPreAlloc< S2 > &mem, grid_key_sparse_dx_iterator_sub< dims, chunking::size::value > &sub_it, Unpack_stat &ps, context_type &gpuContext, rem_copy_opt opt)
unpack the sub-grid object
void packCalculate(size_t &req, const context_type &gpuContext)
Calculate the size of the information to pack.
sgrid_cpu(const sgrid_cpu &g) THROW
create a sparse grid from another grid
Definition: SparseGrid.hpp:899
auto getBackgroundValueAggr() -> decltype(chunks.get(0))
Get the background value.
Definition: SparseGrid.hpp:980
openfpm::vector< cheader< dim >, S > header_inf
indicate which element in the chunk are really filled
Definition: SparseGrid.hpp:433
grid_key_dx< dim > getChunkPos(size_t chunk_id)
Get the position of a chunk.
void resize(const size_t(&sz)[dim])
Resize the grid.
r_type insert(const grid_key_dx< dim > &v1)
Get the reference of the selected element.
grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > sub_grid_iterator_type
sub-grid iterator type
Definition: SparseGrid.hpp:873
size_t sz_cnk[dim]
size of the chunk
Definition: SparseGrid.hpp:458
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
An aggregate that accept a boost fusion vector as type.
Definition: aggregate.hpp:179
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
Definition: aggregate.hpp:221
this class is a functor for "for_each" algorithm
Definition: SparseGrid.hpp:254
copy_sz(size_t(&sz)[dim])
constructor
Definition: SparseGrid.hpp:264
size_t(& sz)[dim]
sz site_t
Definition: SparseGrid.hpp:256
void operator()(T &t) const
It call the copy function for each property.
Definition: SparseGrid.hpp:271
return if true the aggregate type T has a property that has a complex packing(serialization) method
Transform the boost::fusion::vector into memory specification (memory_traits)
Definition: memory_conf.hpp:84
inter_memc< typename T::type >::type type
for each element in the vector interleave memory_c
Definition: memory_conf.hpp:86
static void meta_copy_op_(const T &src, T &dst)
Meta-copy applying an operation.
Definition: meta_copy.hpp:808
This class copy general objects.
Definition: meta_copy.hpp:53
__device__ static __host__ void meta_copy_(const T &src, T &dst)
copy and object from src to dst
Definition: meta_copy.hpp:60
It create a boost::fusion vector with the selected properties.
This structure define the operation add to use with copy general.