8 #ifndef OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_
9 #define OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_
11 #include "memory_ly/memory_array.hpp"
12 #include "memory_ly/memory_c.hpp"
13 #include "memory_ly/memory_conf.hpp"
14 #include "hash_map/hopscotch_map.h"
15 #include "hash_map/hopscotch_set.h"
16 #include "Vector/map_vector.hpp"
17 #include "util/variadic_to_vmpl.hpp"
18 #include "data_type/aggregate.hpp"
19 #include "SparseGridUtil.hpp"
20 #include "SparseGrid_iterator.hpp"
21 #include "SparseGrid_iterator_block.hpp"
22 #include "SparseGrid_conv_opt.hpp"
26 #ifdef OPENFPM_DATA_ENABLE_IO_MODULE
28 #include "VTKWriter/VTKWriter.hpp"
34 template<
typename Tsrc,
typename Tdst>
55 typedef typename std::remove_reference<decltype(
src.template get<T::value>())>::type copy_rtype;
62 template<
typename Tsrc,
typename Tdst>
83 typedef typename std::remove_reference<decltype(
dst.template get<T::value>())>::type copy_rtype;
90 template<
unsigned int dim,
typename Tsrc,
typename Tdst>
115 typedef typename std::remove_reference<decltype(
dst.template insert<T::value>(
pos_dst))>::type copy_rtype;
125 template<
unsigned int prop,
typename Tsrc,
typename Tdst>
126 static void copy(
const Tsrc & src, Tdst & dst,
short int pos_id_src,
short int pos_id_dst)
128 typedef typename std::remove_reference<decltype(dst.template get<prop>()[pos_id_dst])>::type copy_rtype;
134 template<
typename T,
unsigned int N1>
137 template<
unsigned int prop,
typename Tsrc,
typename Tdst>
138 static void copy(
const Tsrc & src, Tdst & dst,
short int pos_id_src,
short int pos_id_dst)
140 typedef typename std::remove_reference<decltype(dst.template get<prop>()[0][pos_id_dst])>::type copy_rtype;
142 for (
int i = 0 ; i < N1 ; i++)
149 template<
typename T,
unsigned int N1,
unsigned int N2>
152 template<
unsigned int prop,
typename Tsrc,
typename Tdst>
153 static void copy(
const Tsrc & src, Tdst & dst,
short int pos_id_src,
short int pos_id_dst)
155 typedef typename std::remove_reference<decltype(dst.template get<prop>()[0][0][pos_id_dst])>::type copy_rtype;
157 for (
int i = 0 ; i < N1 ; i++)
159 for (
int j = 0 ; j < N2 ; j++)
167 template<
unsigned int dim,
typename Tsrc,
typename Tdst,
typename aggrType>
196 typedef typename boost::mpl::at<typename aggrType::type, T>::type copy_rtype;
203 template<
template<
typename,
typename>
class op,
unsigned int dim,
typename Tsrc,
typename Tdst,
unsigned int ... prp>
231 typedef typename boost::mpl::at<v_prp,boost::mpl::int_<T::value>>::type idx_type;
232 typedef typename std::remove_reference<decltype(
dst.template insert<idx_type::value>(
pos_dst))>::type copy_rtype;
252 template<
unsigned int dim,
typename mpl_v>
273 sz[T::value] = boost::mpl::at<mpl_v,boost::mpl::int_<T::value>>::type::value;
278 template<
unsigned int N>
281 template<
typename Vc_type>
282 static inline void load(Vc_type & Vc)
284 std::cout << __FILE__ <<
":" << __LINE__ <<
" unknown size " << std::endl;
291 template<
typename Vc_type>
292 static inline void load(Vc_type & Vc,
unsigned char * mask_sum)
301 template<
typename Vc_type>
302 static inline void load(Vc_type & Vc,
unsigned char * mask_sum)
312 template<
typename Vc_type>
313 static inline void load(Vc_type & Vc,
unsigned char * mask_sum)
325 template<
typename Vc_type>
326 static inline void load(Vc_type & Vc,
unsigned char * mask_sum)
342 template<
typename Vc_type>
343 static inline void load(Vc_type & Vc,
unsigned char * mask_sum)
355 Vc[10] = mask_sum[10];
356 Vc[11] = mask_sum[11];
357 Vc[12] = mask_sum[12];
358 Vc[13] = mask_sum[13];
359 Vc[14] = mask_sum[14];
360 Vc[15] = mask_sum[15];
364 template<
typename Vc_type>
365 inline Vc_type load_mask(
unsigned char * mask_sum)
374 template<
typename T,
typename aggr>
377 template<
unsigned int p,
typename chunks_type>
378 static void set(
const T & val, chunks_type & chunks,
unsigned int i)
384 template<
typename T,
unsigned int N1,
typename aggr>
387 template<
unsigned int p,
typename chunks_type>
388 static void set(
const T (& val)[N1], chunks_type & chunks,
unsigned int i)
390 for (
int i1 = 0 ; i1 < N1; i1++)
395 template<
typename T,
unsigned int N1,
unsigned int N2,
typename aggr>
398 template<
unsigned int p,
typename chunks_type>
399 static void set(
const T (& val)[N1][N2], chunks_type & chunks,
unsigned int i)
401 for (
int i1 = 0 ; i1 < N1; i1++)
403 for (
int i2 = 0 ; i2 < N2; i2++)
411 template<
unsigned int dim,
416 template<
typename>
class layout_base,
424 mutable long int cache[SGRID_CACHE];
494 template<
unsigned int n_ele>
497 unsigned char (& mask)[n_ele])
499 nele = (mask[sub_id])?nele-1:nele;
536 if (empty_v.
size() >= FLUSH_REMOVE)
546 for (
int i = empty_v.
size() - 1 ; i >= 0 ; i--)
553 header_mask.remove(empty_v);
590 for (
size_t i = 0 ; i < SGRID_CACHE ; i++)
605 for (
size_t i = 0 ; i < dim ; i++)
612 for (
size_t i = 0 ; i < dim ; i++)
613 {sz_i[i] = cs.
get(i) + 1;}
626 for (
size_t i = 0 ; i < SGRID_CACHE ; i++)
632 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(cpsz);
643 for (
size_t i = 0 ; i < dim ; i++)
656 for(
int i = 0 ; i < dim ; i++)
657 {
header_inf.last().pos.set_d(i,std::numeric_limits<long int>::min());};
662 auto & h = header_mask.last().mask;
664 for (
size_t i = 0 ; i < chunking::size::value ; i++)
668 for (
size_t i = 0 ; i < chunking::size::value ; i++)
691 for (
size_t k = 0 ; k < SGRID_CACHE; k++)
692 {
id += (
cache[k] == lin_id)?k+1:0;}
698 auto fnd =
map.find(lin_id);
699 if (fnd ==
map.end())
706 {active_cnk = fnd->second;}
765 for (
size_t k = 0 ; k < SGRID_CACHE; k++)
766 {
id += (
cache[k] == lin_id)?k+1:0;}
772 auto fnd =
map.find(lin_id);
773 if (fnd ==
map.end())
785 auto & h = header_mask.last().mask;
787 for (
size_t i = 0 ; i < chunking::size::value ; i++)
798 active_cnk = fnd->second;
820 auto & hm = header_mask.get(active_cnk);
822 exist = hm.mask[sub_id];
823 hc.nele = (exist)?hc.nele:hc.nele + 1;
824 hm.mask[sub_id] |= 1;
832 size_t active_cnk = 0;
835 pre_get(v1,active_cnk,sub_id,exist);
842 auto & hm = header_mask.get(active_cnk);
844 unsigned char swt = hm.mask[sub_id];
846 hc.nele = (swt)?hc.nele-1:hc.nele;
850 if (hc.nele == 0 && swt != 0)
853 empty_v.add(active_cnk);
866 static constexpr
unsigned int dims = dim;
878 typedef layout_base<T> memory_traits;
880 typedef chunking chunking_type;
882 typedef grid_lin linearizer_type;
936 template<
unsigned int p>
937 void setBackgroundValue(
const typename boost::mpl::at<
typename T::type,boost::mpl::int_<p>>::type & val)
939 for (
int i = 0 ; i < chunking::size::value ; i++)
958 template<
typename pointers_type,
959 typename headers_type,
960 typename result_type,
961 unsigned int ... prp >
962 static void unpack_headers(pointers_type & pointers, headers_type & headers, result_type & result,
int n_slot)
965 template<
unsigned int ... prp,
typename S2,
typename header_type,
typename ite_type,
typename context_type>
968 header_type & headers,
971 context_type& gpuContext,
972 rem_copy_opt opt = rem_copy_opt::NONE_OPT)
1006 return chunks.get_o(active_cnk);
1016 template <
unsigned int p,
typename r_type=decltype(get_selector<
typename boost::mpl::at<
typename T::type,boost::mpl::
int_<p>>::type >::
template get<p>(
chunks,0,0))>
1019 size_t active_cnk = 0;
1034 template <
unsigned int p,
typename r_type=decltype(get_selector<
typename boost::mpl::at<
typename T::type,boost::mpl::
int_<p>>::type >::
template get<p>(
chunks,0,0))>
1038 size_t sub_id = v1.
getPos();
1043 auto & hm = header_mask.get(active_cnk);
1047 hc.nele = (hm.mask[sub_id] & 1)?hc.nele:hc.nele + 1;
1048 hm.mask[sub_id] |= 1;
1050 return get_selector<
typename boost::mpl::at<
typename T::type,boost::mpl::int_<p>>::type >::
template get<p>(
chunks,active_cnk,sub_id);
1060 template <
unsigned int p>
1067 pre_get(v1,active_cnk,sub_id,exist);
1073 auto & hm = header_mask.get(active_cnk);
1075 if ((hm.mask[sub_id] & 1) == 0)
1096 template <
unsigned int p>
1103 pre_get(v1,active_cnk,sub_id,exist);
1110 auto & hm = header_mask.get(active_cnk);
1112 if ((hm.mask[sub_id] & 1) == 0)
1131 pre_get(v1,active_cnk,sub_id,exist);
1137 auto & hm = header_mask.get(active_cnk);
1139 if ((hm.mask[sub_id] & 1) == 0)
1152 template <
unsigned int p>
1155 return chunks.template get<p>(v1.getChunk())[v1.getPos()];
1167 return chunks.get(v1.getChunk());
1213 template<
unsigned int stencil_size = 0>
1214 grid_key_sparse_dx_iterator_block_sub<dim,stencil_size,self,chunking>
1217 return grid_key_sparse_dx_iterator_block_sub<dim,stencil_size,self,chunking>(*
this,start,stop);
1260 template<
typename stencil_type>
1286 bool is_bigger =
true;
1291 for (
size_t i = 0 ; i < dim ; i++)
1293 if (sz[i] <
g_sm.size(i))
1294 {is_bigger =
false;}
1297 g_sm.setDimensions(sz);
1305 if (is_bigger ==
true)
1322 for (
size_t i = 0 ; i < dim ; i++)
1337 for (
size_t j = 0 ; j < dim ; j++)
1358 inte -= inte.
getP1();
1361 short unsigned int mask_it[chunking::size::value];
1363 auto & mask = header_mask.get(i).mask;
1368 fill_mask(mask_it,mask,mask_nele);
1372 for (
size_t j = 0 ; j < mask_nele ; j++)
1378 remove_from_chunk<chunking::size::value>(mask_it[j],n_ele,mask);
1393 header_mask.remove(rmh,0);
1409 template<
int ... prp>
static inline size_t packMem(
size_t n,
size_t e)
1411 if (
sizeof...(prp) == 0)
1412 {
return n *
sizeof(
typename T::type);}
1414 typedef object<
typename object_creator<
typename T::type,prp...>::type> prp_object;
1416 return n *
sizeof(prp_object);
1435 template<
int ... prp,
typename context_type>
inline
1448 template<
int ... prp>
inline
1455 req +=
sizeof(size_t);
1456 req += dim*
sizeof(size_t);
1462 auto & hm = header_mask.get(i);
1465 short unsigned int mask_it[chunking::size::value];
1467 fill_mask(mask_it,hm.mask,mask_nele);
1469 for (
size_t j = 0 ; j < mask_nele ; j++)
1476 size_t alloc_ele = this->
packMem<prp...>(1,0);
1484 ::call_packRequest(
chunks.get_o(i),mask_it[j],req);
1489 req +=
sizeof(header_mask.get(i).mask);
1515 template<
int ... prp>
inline
1523 req +=
sizeof(size_t);
1524 req += dim*
sizeof(size_t);
1530 for (
size_t i = 0; i < dim ; i++)
1533 section_to_pack.setHigh(i,sub_it.
getStop().
get(i));
1538 auto & hm = header_mask.get(i);
1542 for (
size_t j = 0 ; j < dim ; j++)
1551 bool stp = bc.
Intersect(section_to_pack,inte);
1562 size_t old_req = req;
1567 auto key = sit.
get();
1569 size_t sub_id = gs_cnk.
LinId(key);
1571 if (hm.mask[sub_id] & 1)
1578 size_t alloc_ele = this->
packMem<prp...>(1,0);
1587 ::call_packRequest(
chunks.get_o(i),sub_id,req);
1597 req +=
sizeof(header_mask.get(i));
1626 size_t * number_of_chunks = (
size_t *)mem.
getPointer();
1630 for (
size_t i = 0 ; i < dim ; i++)
1637 for (
size_t i = 0; i < dim ; i++)
1643 size_t n_packed_chunk = 0;
1648 auto & hm = header_mask.get(i);
1652 for (
size_t j = 0 ; j < dim ; j++)
1654 bc.
setLow(j,hc.pos.get(j));
1661 bool stp = bc.
Intersect(section_to_pack,inte);
1666 bool has_packed =
false;
1668 unsigned char mask_to_pack[chunking::size::value];
1669 memset(mask_to_pack,0,
sizeof(mask_to_pack));
1675 unsigned char * ptr_start = (
unsigned char *)mem.
getPointer();
1680 inte -= hc.pos.toPoint();
1688 auto key = sit.
get();
1690 size_t sub_id = gs_cnk.
LinId(key);
1692 if (hm.mask[sub_id] & 1)
1696 PACKER_ENCAP_OBJECTS_CHUNKING>::template
pack<T,prp...>(mem,
chunks.get_o(i),sub_id,sts);
1698 mask_to_pack[sub_id] |= 1;
1706 if (has_packed ==
true)
1708 unsigned char * ptr_final = (
unsigned char *)mem.
getPointer();
1709 unsigned char * ptr_final_for = (
unsigned char *)mem.
getPointerEnd();
1712 size_t shift = ptr_final - ptr_start;
1720 Packer<decltype(header_mask.get(i).mask),S>::pack(mem,mask_to_pack,sts);
1724 size_t shift_for = ptr_final_for - (
unsigned char *)mem.
getPointer();
1739 *number_of_chunks = n_packed_chunk;
1764 template<
unsigned int ... prp,
typename context_type>
1776 template<
unsigned int ... prp,
typename context_type>
1822 for (
size_t i = 0 ; i < dim ; i++)
1829 auto & hm = header_mask.get(i);
1832 Packer<decltype(hm.mask),S>::pack(mem,hm.mask,sts);
1833 Packer<decltype(hc.pos),S>::pack(mem,hc.pos,sts);
1834 Packer<decltype(hc.nele),S>::pack(mem,hc.nele,sts);
1839 short unsigned int mask_it[chunking::size::value];
1841 fill_mask(mask_it,hm.mask,mask_nele);
1843 for (
size_t j = 0 ; j < mask_nele ; j++)
1847 PACKER_ENCAP_OBJECTS_CHUNKING>::template
pack<T,prp...>(mem,
chunks.get_o(i),mask_it[j],sts);
1901 auto & hm = header_mask.get(i);
1906 for (
size_t j = 0 ; j < dim ; j++)
1908 bc.
setLow(j,hc.pos.get(j));
1915 bool stp = bc.
Intersect(section_to_delete,inte);
1930 auto key = sit.
get();
1932 size_t sub_id = gs_cnk.
LinId(key);
1934 unsigned char swt = header_mask.get(i).mask[sub_id];
1936 hc.nele = (swt)?hc.nele-1:hc.nele;
1937 hm.mask[sub_id] = 0;
1939 if (hc.nele == 0 && swt != 0)
1953 void copy_to(
const self & grid_src,
1973 auto it = grid_src.getIterator(box_src.
getKP1(),box_src.
getKP2());
1977 auto key_src = it.get();
1979 key_dst -= box_src.
getKP1();
1980 auto key_src_s = it.getKeyF();
1982 typedef typename std::remove_const<
typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
1984 size_t pos_src_id = key_src_s.getPos();
1989 auto block_dst = this->
insert_o(key_dst,pos_dst_id);
1991 auto block_src = grid_src.getBlock(key_src_s);
1993 copy_sparse_to_sparse_bb<dim,decltype(block_src),decltype(block_dst),T> caps(block_src,block_dst,pos_src_id,pos_dst_id);
1994 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(caps);
2002 template<
template <
typename,
typename>
class op,
unsigned int ... prp >
2003 void copy_to_op(
const self & grid_src,
2007 auto it = grid_src.getIterator(box_src.
getKP1(),box_src.
getKP2());
2011 auto key_src = it.get();
2013 key_dst -= box_src.
getKP1();
2015 typedef typename std::remove_const<
typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
2018 boost::mpl::for_each_ref< boost::mpl::range_c<
int,0,
sizeof...(prp)> >(caps);
2064 template<
unsigned int prop_src,
unsigned int prop_dst,
unsigned int stencil_size,
unsigned int N,
typename lambda_f,
typename ... ArgsT >
2070 {
conv_impl<dim>::template conv<false,NNStar_c<dim>,prop_src,prop_dst,stencil_size>(stencil,start,stop,*
this,func);}
2085 template<
unsigned int prop_src,
unsigned int prop_dst,
unsigned int stencil_size,
typename lambda_f,
typename ... ArgsT >
2106 template<
unsigned int stencil_size,
typename prop_type,
typename lambda_f,
typename ... ArgsT >
2109 if (layout_base<
aggregate<int>>::type_value::value != SOA_layout_IA)
2111 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error this function can be only used with the SOA version of the data-structure" << std::endl;
2128 template<
unsigned int prop_src1,
unsigned int prop_src2 ,
unsigned int prop_dst1,
unsigned int prop_dst2 ,
unsigned int stencil_size,
unsigned int N,
typename lambda_f,
typename ... ArgsT >
2134 {
conv_impl<dim>::template conv2<false,NNStar_c<dim>,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(stencil,start,stop,*
this,func);}
2136 {
conv_impl<dim>::template conv2<true,NNStar_c<dim>,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(stencil,start,stop,*
this,func);}
2145 template<
unsigned int prop_src1,
unsigned int prop_src2 ,
unsigned int prop_dst1,
unsigned int prop_dst2 ,
unsigned int stencil_size,
typename lambda_f,
typename ... ArgsT >
2151 {
conv_impl<dim>::template conv_cross2<false,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(start,stop,*
this,func);}
2153 {
conv_impl<dim>::template conv_cross2<true,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(start,stop,*
this,func);}
2167 template<
unsigned int ... prp,
typename S2,
typename context_type>
2171 context_type& gpuContext,
2174 short unsigned int mask_it[chunking::size::value];
2183 for (
size_t i = 0 ; i < dim ; i++)
2190 header_inf_tmp.resize(n_chunks);
2191 header_mask_tmp.resize(n_chunks);
2192 chunks_tmp.resize(n_chunks);
2194 for (
size_t i = 0 ; i < n_chunks ; i++)
2196 auto & hc = header_inf_tmp.get(i);
2197 auto & hm = header_mask_tmp.get(i);
2199 Unpacker<
typename std::remove_reference<decltype(header_mask.get(i).mask)>::type ,S2>
::unpack(mem,hm.mask,ps);
2205 fill_mask(mask_it,hm.mask,hc.nele);
2211 for (
size_t k = 0 ; k < hc.nele ; k++)
2215 for (
size_t i = 0 ; i < dim ; i++)
2222 PACKER_ENCAP_OBJECTS_CHUNKING>::template
unpack<T,prp...>(mem,
chunks.get_o(active_cnk),ele_id,ps);
2237 template<
unsigned int ... prp,
typename S2>
2253 for (
size_t i = 0 ; i < dim ; i++)
2256 g_sm.setDimensions(sz);
2257 for (
size_t i = 0 ; i < dim ; i++)
2271 unpack<prp...>(mem,sub_it,ps,gpuContext,rem_copy_opt::NONE_OPT);
2284 template<
template<
typename,
typename>
class op,
typename S2,
unsigned int ... prp>
2289 short unsigned int mask_it[chunking::size::value];
2298 for (
size_t i = 0 ; i < dim ; i++)
2305 header_inf_tmp.resize(n_chunks);
2306 header_mask_tmp.resize(n_chunks);
2307 chunks_tmp.resize(n_chunks);
2309 for (
size_t i = 0 ; i < n_chunks ; i++)
2311 auto & hc = header_inf_tmp.get(i);
2312 auto & hm = header_mask_tmp.get(i);
2320 fill_mask(mask_it,hm.mask,hc.nele);
2326 for (
size_t k = 0 ; k < hc.nele ; k++)
2330 for (
size_t i = 0 ; i < dim ; i++)
2333 bool exist =
pre_insert(v1,active_cnk,ele_id);
2339 PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack_op<
replace_,prp...>(mem,
chunks.get_o(active_cnk),ele_id,ps);
2345 PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack_op<op,prp...>(mem,
chunks.get_o(active_cnk),ele_id,ps);
2357 template <
typename stencil = no_stencil>
2386 for (
size_t i = 0 ; i < dim ; i++)
2388 key_out.
set_d(i,ph.get(i) + pos_h.get(i));
2401 for (
size_t i = 0 ; i < SGRID_CACHE ; i++)
2410 header_mask = sg.header_mask;
2415 for (
size_t i = 0 ; i < chunking::size::value ; i++)
2421 for (
size_t i = 0 ; i < dim ; i++)
2424 empty_v = sg.empty_v;
2440 header_mask_tmp.resize(header_mask.
size());
2448 bool operator<(
const pair_int & tmp)
const
2467 srt.get(i).id = lin_id;
2475 for (
int i = 0 ; i < srt.
size() ; i++)
2477 chunks_tmp.get(i) =
chunks.get(srt.get(i).pos);
2478 header_inf_tmp.get(i) =
header_inf.get(srt.get(i).pos);
2479 header_mask_tmp.get(i) = header_mask.get(srt.get(i).pos);
2484 header_mask_tmp.swap(header_mask);
2503 for (
size_t i = 0 ; i < SGRID_CACHE ; i++)
2505 cache[i] = sg.cache[i];
2512 header_mask.swap(sg.header_mask);
2517 for (
size_t i = 0 ; i < chunking::size::value ; i++)
2523 for (
size_t i = 0 ; i < dim ; i++)
2524 {
sz_cnk[i] = sg.sz_cnk[i];}
2526 empty_v = sg.empty_v;
2548 header_mask.resize(1);
2565 #ifdef OPENFPM_DATA_ENABLE_IO_MODULE
2572 template<
typename Tw =
float>
bool write(
const std::string & output)
2574 file_type ft = file_type::BINARY;
2585 auto key = it.getKey();
2586 auto keyg = it.getKeyF();
2590 for (
size_t i = 0 ; i < dim ; i++)
2591 {p.get(i) = keyg.get(i);}
2597 cp(
chunks.get_o(key.getChunk()),tmp_prp.last(),key.getPos());
2599 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(cp);
2606 vtk_writer.add(tmp_pos,tmp_prp,tmp_pos.
size());
2611 return vtk_writer.write(output,prp_names,
"sparse_grid",ft);
2709 for (
int i = 1 ; i < header_mask.
size() ; i++)
2711 auto & m = header_mask.get(i);
2715 for (
int j = 0 ; j < chunking::size::value ; j++)
2717 if (m.mask[j] & 0x1)
2723 std::cout << __FILE__ <<
":" << __LINE__ <<
" error chunk: " << i <<
" has " << np_mask <<
" points but header report " <<
header_inf.get(i).nele << std::endl;
2730 std::cout << __FILE__ <<
":" << __LINE__ <<
" Total point is inconsistent: " <<
size() <<
" " << tot << std::endl;
2735 template<
unsigned int dim,
Point< dim, T > getP1() const
Get the point p1.
__device__ __host__ bool Intersect(const Box< dim, T > &b, Box< dim, T > &b_out) const
Intersect.
__host__ __device__ bool isInside(const Point< dim, T > &p) const
Check if the point is inside the box.
grid_key_dx< dim > getKP1() const
Get the point p1 as grid_key_dx.
grid_key_dx< dim > getKP2() const
Get the point p12 as grid_key_dx.
__device__ __host__ void setHigh(int i, T val)
set the high interval of the box
__device__ __host__ void setLow(int i, T val)
set the low interval of the box
bool isContained(const Box< dim, T > &b) const
Check if the box is contained.
void shift_backward(size_t sz)
shift the pointer backward
void shift_forward(size_t sz)
shift the pointer forward
virtual void * getPointer()
Return the pointer of the last allocation.
virtual bool allocate(size_t sz)
Allocate a chunk of memory.
bool allocate_nocheck(size_t sz)
Allocate a chunk of memory.
void * getPointerEnd()
Return the end pointer of the previous allocated memory.
static void pack(ExtPreAlloc< Mem >, const T &obj)
Error, no implementation.
This class implement the point shape in an N-dimensional space.
static void unpack(ExtPreAlloc< Mem >, T &obj)
Error, no implementation.
void operator()(T &t) const
It call the copy function for each property.
void operator()(T &t) const
It call the copy function for each property.
short int pos_id_src
source position
short int pos_id_dst
destination position
void operator()(T &t) const
It call the copy function for each property.
grid_key_dx< dim > & pos_src
source position
void operator()(T &t) const
It call the copy function for each property.
to_boost_vmpl< prp... >::type v_prp
Convert the packed properties into an MPL vector.
grid_key_dx< dim > & pos_dst
destination position
grid_key_dx< dim > & pos_src
source position
grid_key_dx< dim > & pos_dst
destination position
void operator()(T &t) const
It call the copy function for each property.
Declaration grid_key_dx_iterator_sub.
bool isNext()
Check if there is the next element.
grid_key_dx< dim > get() const
Return the actual grid key iterator.
const grid_key_dx< dim > & get() const
Get the actual key.
bool isNext()
Check if there is the next element.
__host__ __device__ Point< dim, typeT > toPoint() const
Convert to a point the grid_key_dx.
__device__ __host__ void set_d(index_type i, index_type id)
Set the i index.
__device__ __host__ index_type get(index_type i) const
Get the i index.
Grid key sparse iterator on a sub-part of the domain.
const grid_key_dx< dim > & getStop() const
Return the stop point for the iteration.
const grid_key_dx< dim > & getStart() const
Return the starting point for the iteration.
Grid key sparse iterator.
It store the position in space of the sparse grid.
size_t getPos() const
Return the linearized position in the chunk.
size_t getChunk() const
Return the chunk id.
mem_id LinId(const grid_key_dx< N, ids_type > &gk, const signed char sum_id[N]) const
Linearization of the grid_key_dx with a specified shift.
class that store the information of the grid like number of point on each direction and define the in...
Implementation of 1-D std::vector like structure.
sgrid_cpu & operator=(sgrid_cpu &&sg)
copy an sparse grid
static bool is_unpack_header_supported()
Indicate that unpacking the header is supported.
auto get(const grid_key_dx< dim > &v1) const -> decltype(get_selector< typename boost::mpl::at< typename T::type, boost::mpl::int_< p >>::type >::template get_const< p >(chunks, 0, 0))
Get the reference of the selected element.
void find_active_chunk(const grid_key_dx< dim > &kh, size_t &active_cnk, bool &exist) const
Given a key return the chunk than contain that key, in case that chunk does not exist return the key ...
sgrid_cpu & operator=(const sgrid_cpu &sg)
copy an sparse grid
void clear_cache()
reset the cache
void pack(ExtPreAlloc< S > &mem, Pack_stat &sts) const
Pack the object into the memory given an iterator.
bool pre_insert(const grid_key_dx< dim > &v1, size_t &active_cnk, size_t &sub_id)
Before insert data you have to do this.
void consistency()
This function check the consistency of the sparse grid.
sgrid_cpu()
Trivial constructor.
grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > getIterator(const grid_key_dx< dim > &start, const grid_key_dx< dim > &stop, size_t opt=0) const
Return an iterator over a sub-grid.
void remove_empty()
Eliminate empty chunks.
openfpm::vector< cheader< dim > > & private_get_header_inf()
return the header section of the blocks
void conv(int(&stencil)[N][dim], grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution using the stencil N
void resetFlush()
It does nothing.
void pre_get(const grid_key_dx< dim > &v1, size_t &active_cnk, size_t &sub_id, bool &exist) const
void removeAddUnpackFinalize(const context_type &gpuContext, int opt)
In this case it does nothing.
void set_g_shift_from_size(const size_t(&sz)[dim], grid_lin &g_sm_shift)
set the grid shift from size
long int cached_id[SGRID_CACHE]
cached id
void setBackgroundValue(const typename boost::mpl::at< typename T::type, boost::mpl::int_< p >>::type &val)
Set the background value for the property p.
r_type insert(const grid_key_sparse_lin_dx &v1)
Get the reference of the selected element.
bool isSkipLabellingPossible()
This function check if keep geometry is possible for this grid.
long int cache[SGRID_CACHE]
cache
sparse_grid_bck_value< typename std::remove_reference< decltype(chunks.get(0))>::type > getBackgroundValue()
Get the background value.
tsl::hopscotch_map< size_t, size_t > map
Map to convert from grid coordinates to chunk.
static constexpr unsigned int dims
expose the dimansionality as a static const
grid_key_sparse_dx_iterator< dim, chunking::size::value > getIterator(size_t opt=0) const
Return a Domain iterator.
void remove(const grid_key_dx< dim > &v1)
Remove the point.
void find_active_chunk_from_point(const grid_key_dx< dim > &v1, size_t &active_cnk, short int &sub_id)
Given a key return the chunk than contain that key, in case that chunk does not exist return the key ...
unsigned char getFlag(const grid_key_dx< dim > &v1) const
Get the point flag (in this case it always return 0)
grid_key_dx< dim > pos_chunk[chunking::size::value]
conversion position in the chunks
void unpack(ExtPreAlloc< S2 > &mem, Unpack_stat &ps)
unpack the sub-grid object
openfpm::vector< int > & private_get_nnlist()
return the NN list for each block
void removeAddUnpackReset()
In this case it does nothing.
auto getBlock(const grid_key_sparse_lin_dx &v1) const -> decltype(chunks.get(0))
Get the reference of the selected block.
openfpm::vector< aggregate_bfv< chunk_def >, S, layout_base > chunks
vector of chunks
size_t size_inserted()
Get the number of inserted points.
static grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > type_of_subiterator()
This is a meta-function return which type of sub iterator a grid produce.
static constexpr bool isCompressed()
This is a multiresolution sparse grid so is a compressed format.
void convert_key(grid_key_dx< dim > &key_out, const grid_key_sparse_lin_dx &key_in) const
Here we convert the linearized sparse key into the grid_key_dx.
grid_key_sparse_dx_iterator_block_sub< dim, stencil_size, self, chunking > getBlockIterator(const grid_key_dx< dim > &start, const grid_key_dx< dim > &stop)
Return an iterator over a sub-grid.
void clear()
delete all the points
grid_lin g_sm
grid size information
bool findNN
bool that indicate if the NNlist is filled
grid_lin g_sm_shift
grid size information with shift
void removeCopyToFinalize(const context_type &gpuContext, int opt)
In this case it does nothing.
void internal_clear_cache()
This is an internal function to clear the cache.
void setMemory()
It does materially nothing.
void packFinalize(ExtPreAlloc< S > &mem, Pack_stat &sts, int opt, bool is_pack_remote)
Pack finalize Finalize the pack of this object. In this case it does nothing.
void copyRemoveReset()
Reset the queue to remove and copy section of grids.
const grid_lin & getGrid() const
Return the internal grid information.
void conv_cross_ids(grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution from start to stop point using the function func and arguments args
void packRequest(size_t &req) const
Insert an allocation request.
void unpack_with_op(ExtPreAlloc< S2 > &mem, grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > &sub2, Unpack_stat &ps)
unpack the sub-grid object applying an operation
sgrid_cpu(const sgrid_cpu &&g) THROW
create a sparse grid from another grid
void pack(ExtPreAlloc< S > &mem, grid_key_sparse_dx_iterator_sub< dims, chunking::size::value > &sub_it, Pack_stat &sts)
Pack the object into the memory given an iterator.
void reconstruct_map()
reconstruct the map
openfpm::vector< aggregate_bfv< chunk_def >, S, layout_base > & private_get_data()
return the data of the blocks
void expandAndTagBoundaries(grid_key_dx< dim > &start, grid_key_dx< dim > &stop)
Expand and tag boundaries.
void conv2(int(&stencil)[N][dim], grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution using the stencil N
void conv_cross(grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution from start to stop point using the function func and arguments args
const openfpm::vector< cheader< dim > > & private_get_header_inf() const
return the header section of the blocks
void conv_cross2(grid_key_dx< 3 > start, grid_key_dx< 3 > stop, lambda_f func, ArgsT ... args)
apply a convolution using the stencil N
void flush_remove()
Remove the point.
void remove(Box< dim, long int > §ion_to_delete)
Remove all the points in this region.
grid_key_dx< dim > base_key
base_key for the grid
int yes_i_am_grid
it define that this data-structure is a grid
sgrid_cpu(const size_t(&sz)[dim])
Constructor for sparse grid.
aggregate_bfv< chunk_def > background_type
Background type.
void packRequest(grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > &sub_it, size_t &req) const
Insert an allocation request.
void reorder()
Reorder based on index.
size_t cache_pnt
cache pointer
void remove_from_chunk(size_t sub_id, int &nele, unsigned char(&mask)[n_ele])
Remove.
void remove_no_flush(const grid_key_dx< dim > &v1)
Remove the point but do not flush the remove.
bool existPoint(const grid_key_dx< dim > &v1) const
Check if the point exist.
auto get(const grid_key_sparse_lin_dx &v1) -> decltype(chunks.template get< p >(0)[0])
Get the reference of the selected element.
static grid_key_sparse_dx_iterator< dim, chunking::size::value > type_of_iterator()
This is a meta-function return which type of sub iterator a grid produce.
void packReset()
Reset the pack calculation.
void add_on_cache(size_t lin_id, size_t active_cnk) const
add on cache
openfpm::vector< int > NNlist
for each chunk store the neighborhood chunks
openfpm::vector< mheader< chunking::size::value > > & private_get_header_mask()
return the header section of the blocks
size_t getChunk(grid_key_dx< dim > &v1, bool &exist)
Give a grid point it return the chunk containing that point. In case the point does not exist it retu...
const openfpm::vector< mheader< chunking::size::value > > & private_get_header_mask() const
return the header section of the blocks
static void unpack_headers(pointers_type &pointers, headers_type &headers, result_type &result, int n_slot)
Stub does not do anything.
const openfpm::vector< aggregate_bfv< chunk_def > > & private_get_data() const
return the data of the blocks
auto get(const grid_key_dx< dim > &v1) -> decltype(get_selector< typename boost::mpl::at< typename T::type, boost::mpl::int_< p >>::type >::template get_const< p >(chunks, 0, 0))
Get the reference of the selected element.
static size_t packMem(size_t n, size_t e)
Calculate the memory size required to pack n elements.
auto insert_o(const grid_key_dx< dim > &v1, size_t &ele_id) -> decltype(chunks.get_o(0))
Insert a full element (with all properties)
void unpack(ExtPreAlloc< S2 > &mem, grid_key_sparse_dx_iterator_sub< dims, chunking::size::value > &sub_it, Unpack_stat &ps, context_type &gpuContext, rem_copy_opt opt)
unpack the sub-grid object
void packCalculate(size_t &req, const context_type &gpuContext)
Calculate the size of the information to pack.
sgrid_cpu(const sgrid_cpu &g) THROW
create a sparse grid from another grid
auto getBackgroundValueAggr() -> decltype(chunks.get(0))
Get the background value.
openfpm::vector< cheader< dim >, S > header_inf
indicate which element in the chunk are really filled
grid_key_dx< dim > getChunkPos(size_t chunk_id)
Get the position of a chunk.
void resize(const size_t(&sz)[dim])
Resize the grid.
r_type insert(const grid_key_dx< dim > &v1)
Get the reference of the selected element.
grid_key_sparse_dx_iterator_sub< dim, chunking::size::value > sub_grid_iterator_type
sub-grid iterator type
size_t sz_cnk[dim]
size of the chunk
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
An aggregate that accept a boost fusion vector as type.
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
this class is a functor for "for_each" algorithm
copy_sz(size_t(&sz)[dim])
constructor
size_t(& sz)[dim]
sz site_t
void operator()(T &t) const
It call the copy function for each property.
return if true the aggregate type T has a property that has a complex packing(serialization) method
Transform the boost::fusion::vector into memory specification (memory_traits)
inter_memc< typename T::type >::type type
for each element in the vector interleave memory_c
It create a boost::fusion vector with the selected properties.
This structure define the operation add to use with copy general.