13 #include "util/common.hpp"
14 #include "memory/PtrMemory.hpp"
15 #include "util/object_util.hpp"
16 #include "Grid/util.hpp"
17 #include "Vector/util.hpp"
18 #include "Vector/map_vector_grow_p.hpp"
19 #include "memory/ExtPreAlloc.hpp"
20 #include "util/util_debug.hpp"
21 #include "util/Pack_stat.hpp"
22 #include "Grid/map_grid.hpp"
23 #include "memory/HeapMemory.hpp"
24 #include "vect_isel.hpp"
25 #include "util/object_s_di.hpp"
27 #include "util/Pack_stat.hpp"
28 #include "memory/ExtPreAlloc.hpp"
30 #include "Packer_Unpacker/Unpacker.hpp"
31 #include "Packer_Unpacker/Packer.hpp"
33 #include "Packer_Unpacker/Packer_util.hpp"
34 #include "Packer_Unpacker/has_pack_agg.hpp"
36 #include "map_vector_std_util.hpp"
37 #include "data_type/aggregate.hpp"
38 #include "vector_map_iterator.hpp"
39 #include "util/cuda_util.hpp"
40 #include "cuda/map_vector_cuda_ker.cuh"
41 #include "map_vector_printers.hpp"
51 template<
typename vector_type1,
typename vector_type2>
57 template<
typename vector_type1,
typename vector_type2>
67 template<
typename vector_type1,
typename vector_type2>
73 auto it = v1.getGPUIterator();
74 CUDA_LAUNCH(copy_two_vectors,it,v1.toKernel(),v2.toKernel());
79 template<
typename vector_type1,
typename vector_type2>
85 auto it = v1.getGPUIterator();
86 CUDA_LAUNCH(copy_two_vectors,it,v1.toKernel(),v2.toKernel());
92 template<
bool is_ok_cuda,
typename T,
typename Memory,
93 template<
typename>
class layout_base,
101 template <
typename>
class layout_base2,
102 unsigned int ...args>
105 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error the function add_prp_device only work with cuda enabled vector" << std::endl;
109 template<
bool is_ok_cuda,
typename T,
typename Memory,
110 template<
typename>
class layout_base,
114 template <
typename S,
118 template <
typename>
class layout_base2,
119 unsigned int ...args>
124 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error the function merge_prp_device only work with cuda enabled vector" << std::endl;
128 template<
typename T,
typename Memory,
129 template<
typename>
class layout_base,
133 template <
typename S,
137 template <
typename>
class layout_base2,
138 unsigned int ...args>
143 #if defined(CUDA_GPU) && defined(__NVCC__)
145 size_t old_sz = this_.
size();
146 this_.resize(this_.
size() + v.size(),DATA_ON_DEVICE);
148 auto ite = v.getGPUIterator();
150 CUDA_LAUNCH((merge_add_prp_device_impl<decltype(v.toKernel()),decltype(this_.toKernel()),args...>),ite,v.toKernel(),this_.toKernel(),(
unsigned int)old_sz);
153 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error the function add_prp_device only work when map_vector is compiled with nvcc" << std::endl;
158 template<
typename T,
typename Memory,
159 template<
typename>
class layout_base,
163 template <
typename S,
167 template <
typename>
class layout_base2,
168 unsigned int ...args>
175 #if defined(CUDA_GPU) && defined(__NVCC__)
177 auto ite = v.getGPUIterator();
179 CUDA_LAUNCH((merge_add_prp_device_impl<decltype(v.toKernel()),decltype(this_.toKernel()),args...>),ite,v.toKernel(),this_.toKernel(),(
unsigned int)offset);
182 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error the function merge_prp_device only work when map_vector is compiled with nvcc" << std::endl;
202 template<
typename T,
typename Memory,
template<
typename>
class layout_base,
typename grow_p,
unsigned int impl>
214 std::cerr << __FILE__ <<
":" << __LINE__ <<
" Error stub vector created" << std::endl;
219 #include "map_vector_std.hpp"
220 #include "map_vector_std_ptr.hpp"
223 #include "cuda/map_vector_std_cuda.hpp"
241 template<
typename T,
typename Memory,
template <
typename>
class layout_base,
typename grow_p>
242 class vector<T,Memory,layout_base,grow_p,OPENFPM_NATIVE>
276 void check_overflow(
size_t id)
const
280 std::cerr <<
"Error " << __FILE__ <<
":" << __LINE__ <<
" overflow id: " <<
id <<
"\n";
281 throw std::invalid_argument(
"stacktrace" );
282 ACTION_ON_ERROR(VECTOR_ERROR_OBJECT);
318 template<
typename Tobj>
321 typedef layout_base<Tobj> type;
325 #include "vector_pack_unpack.ipp"
368 if (sp > base.size())
375 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
377 base_gpu.constructor_impl(v_size,this->base.toKernel());
399 size_t sz[1] = {
size()};
402 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
404 base_gpu.constructor_impl(v_size,this->base.toKernel());
421 void resize(
size_t slot,
size_t opt = DATA_ON_DEVICE | DATA_ON_HOST,
unsigned int blockSize = 1)
425 if (slot > base.size())
430 if (slot - base.size() == 1 && opt && (opt & EXACT_RESIZE) == 0)
432 gr = grow_p::grow(base.size(),slot);
438 base.resize(sz,opt,blockSize);
444 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
446 base_gpu.constructor_impl(v_size,this->base.toKernel());
464 if (slot > base.size())
466 size_t gr = grow_p::grow(base.size(),slot);
470 base.resize_no_device(sz);
476 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
478 base_gpu.constructor_impl(v_size,this->base.toKernel());
496 if (v_size >= base.size())
500 non_zero_one(sz,2*base.size());
507 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
509 base_gpu.constructor_impl(v_size,this->base.toKernel());
524 if (v_size >= base.size())
528 non_zero_one(sz,2*base.size());
529 base.resize_no_device(sz);
535 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
537 base_gpu.constructor_impl(v_size,this->base.toKernel());
554 if (v_size >= base.size())
558 non_zero_one(sz,2*base.size());
568 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
570 base_gpu.constructor_impl(v_size,this->base.toKernel());
588 if (v_size >= base.size())
592 non_zero_one(sz,2*base.size());
602 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
604 base_gpu.constructor_impl(v_size,this->base.toKernel());
617 for (
size_t i = 0 ; i < v.size() ; i++)
656 template <
template<
typename,
typename>
class op,
typename S,
typename M,
typename gp,
unsigned int ...args>
662 if (v.size() != opart.
size())
663 std::cerr << __FILE__ <<
":" << __LINE__ <<
" error merge_prp: v.size()=" << v.size() <<
" must be the same as o_part.size()" << opart.
size() << std::endl;
667 for (
size_t i = 0 ; i < v.size() ; i++)
671 if (opart.get(i) >
size())
672 std::cerr <<
"Error: " << __FILE__ <<
":" << __LINE__ <<
" try to access element " << opart.get(i) <<
" but the vector has size " <<
size() << std::endl;
676 object_s_di_op<op,decltype(v.get(i)),decltype(get(
size()-1)),OBJ_ENCAP,args...>(v.get(i),get(opart.get(i)));
715 template <
template<
typename,
typename>
class op,
typename S,
typename M,
typename gp,
unsigned int ...args>
720 ::template run<S,M,gp,OPENFPM_NATIVE,layout_base,args...>(*
this,v,start);
759 template <
template<
typename,
typename>
class op,
763 template <
typename>
class layout_base2,
764 typename vector_opart_type,
765 unsigned int ...args>
767 const vector_opart_type & opart)
771 if (v.size() != opart.size())
772 std::cerr << __FILE__ <<
":" << __LINE__ <<
" error merge_prp: v.size()=" << v.size() <<
" must be the same as o_part.size()" << opart.size() << std::endl;
776 for (
size_t i = 0 ; i < v.size() ; i++)
780 if (i >= opart.size())
781 std::cerr <<
"Error: " << __FILE__ <<
":" << __LINE__ <<
" try to access element " << opart.template get<0>(i) <<
" but the vector has size " <<
size() << std::endl;
785 object_s_di_op<op,decltype(v.get(i)),decltype(get(
size()-1)),OBJ_ENCAP,args...>(v.get(i),get(opart.template get<0>(i)));
825 template <
template<
typename,
typename>
class op,
829 template <
typename>
class layout_base2,
830 typename vector_opart_type,
831 unsigned int ...args>
834 const vector_opart_type & opart)
838 for (
size_t i = offset ; i < v.size() ; i++)
840 auto dst = v.get(opart.template get<0>(i2));
844 boost::mpl::for_each_ref< boost::mpl::range_c<
int,0,
sizeof...(args)> >(cp);
886 template <
template<
typename,
typename>
class op,
890 template <
typename>
class layout_base2,
891 typename vector_opart_type,
892 unsigned int ...args>
894 const vector_opart_type & opart,
900 if (v.size() != stop - start)
901 std::cerr << __FILE__ <<
":" << __LINE__ <<
" error merge_prp: v.size()=" << v.size() <<
" must be the same as stop - start" << stop - start << std::endl;
907 size_t sz[1] = {stop - start};
910 auto ite = nm.getGPUIterator();
913 CUDA_LAUNCH((merge_add_prp_device_impl_src_dst_opar_offset<op,
914 decltype(v.toKernel()),
915 decltype(this->toKernel()),
916 decltype(opart.toKernel()),
917 args...>),ite,v.toKernel(),this->toKernel(),opart.toKernel(),start);
921 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error you have to compile map_vector.hpp with nvcc to make GPU code working" << std::endl;
962 template <
template<
typename,
typename>
class op,
966 template <
typename>
class layout_base2,
967 typename vector_opart_type,
968 unsigned int ...args>
971 const vector_opart_type & opart)
975 if (v.size() < opart.size() + start)
976 std::cerr << __FILE__ <<
":" << __LINE__ <<
" error merge_prp: v.size()=" << v.size() <<
" must be snaller than o_part.size() + start " << opart.size() + start << std::endl;
982 auto ite = opart.getGPUIterator();
985 CUDA_LAUNCH((merge_add_prp_device_impl_src_offset_dst_opar<op,
986 decltype(v.toKernel()),
987 decltype(this->toKernel()),
988 decltype(opart.toKernel()),
989 args... >),ite,v.toKernel(),this->toKernel(),opart.toKernel(),start);
993 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error you have to compile map_vector.hpp with nvcc to make GPU code working" << std::endl;
1033 template <
template<
typename,
typename>
class op,
1037 template <
typename>
class layout_base2,
1038 unsigned int ...args>
1043 for (
size_t i = 0 ; i < v.size() ; i++)
1047 if (start + i >= v_size)
1048 std::cerr <<
"Error: " << __FILE__ <<
":" << __LINE__ <<
" try to access element " << start+i <<
" but the vector has size " <<
size() << std::endl;
1052 object_s_di_op<op,decltype(v.get(0)),decltype(get(0)),OBJ_ENCAP,args...>(v.get(i),get(start+i));
1070 template <
typename S,
1074 template <
typename>
class layout_base2,
1075 unsigned int ...args>
1079 for (
size_t i = 0 ; i < v.size() ; i++)
1085 object_s_di<decltype(v.get(i)),decltype(get(
size()-1)),OBJ_ENCAP,args...>(v.get(i),get(
size()-1));
1103 template <
typename S,
1107 template <
typename>
class layout_base2,
1108 unsigned int ...args>
1112 ::template run<S,M,gp,impl,layout_base2,args...>(*
this,v);
1124 long int d_k = (
long int)
size()-1;
1125 long int s_k = (
long int)
size()-2;
1128 while (s_k >= (
long int)key)
1145 size_t s_k = key + 1;
1148 while (s_k <
size())
1171 if (keys.size() <= start )
1174 size_t a_key = start;
1175 size_t d_k = keys.get(a_key);
1176 size_t s_k = keys.get(a_key) + 1;
1179 while (s_k <
size())
1182 while (a_key+1 < keys.size() && s_k == keys.get(a_key+1))
1185 s_k = keys.get(a_key) + 1;
1199 v_size -= keys.size() - start;
1213 if (keys.size() <= start )
1216 size_t a_key = start;
1217 size_t d_k = keys.template get<0>(a_key);
1218 size_t s_k = keys.template get<0>(a_key) + 1;
1221 while (s_k <
size())
1224 while (a_key+1 < keys.size() && s_k == keys.template get<0>(a_key+1))
1227 s_k = keys.template get<0>(a_key) + 1;
1241 v_size -= keys.size() - start;
1255 template <
unsigned int p>
1258 #if defined(SE_CLASS1) && !defined(__NVCC__)
1264 return base.template get<p>(key);
1288 #if defined(SE_CLASS1) && !defined(__NVCC__)
1293 return base.get_o(key);
1310 #if defined(SE_CLASS1) && !defined(__NVCC__)
1315 return base.get_o(key);
1323 template<
unsigned int id>
void fill(
unsigned char c)
1325 base.template fill<id>(c);
1336 return base.template getDeviceBuffer<id>();
1349 return base.template getDeviceBuffer<id>();
1362 return base.get_o(key);
1374 template <
unsigned int p>
1377 return this->
template get<p>(
id);
1389 template <
unsigned int p,
typename KeyType>
1393 return this->
template get<p>(
id.getKey());
1406 template <
unsigned int p,
typename keyType>
1409 return this->
template get<p>(
id.getKey());
1423 template <
unsigned int p>
1426 #if defined(SE_CLASS1) && !defined(__NVCC__)
1431 return base.template get<p>(key);
1446 check_valid(
this,8);
1448 #if defined(SE_CLASS1) && !defined(__NVCC__)
1453 return base.get_o(key);
1466 return base.get_o(key);
1485 dup.
base.swap(base.duplicate());
1487 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1489 dup.base_gpu.constructor_impl(v_size,dup.
base.toKernel());
1508 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1510 base_gpu.constructor_impl(v_size,this->base.toKernel());
1523 swap(v.duplicate());
1525 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1527 base_gpu.constructor_impl(v_size,this->base.toKernel());
1538 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1540 base_gpu.constructor_impl(v_size,this->base.toKernel());
1547 :v_size(sz),base(sz)
1551 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1553 base_gpu.constructor_impl(v_size,this->base.toKernel());
1588 template <
typename encap_S,
unsigned int ...args>
void set_o(
size_t i,
const encap_S & obj)
1591 object_s_di<encap_S,decltype(get(i)),OBJ_ENCAP,args...>(obj,get(i));
1600 void set(
size_t id,
const T & obj)
1621 base.set(
id,v.base,src);
1638 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1640 base_gpu.constructor_impl(v_size,this->base.toKernel());
1659 size_t rsz[1] = {v_size};
1660 if(rsz[0]>base.size()) {
1664 for (
size_t i = 0 ; i < v_size ; i++ )
1667 base.set(key,mv.base,key);
1670 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1672 base_gpu.constructor_impl(v_size,this->base.toKernel());
1695 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1697 base_gpu.constructor_impl(v_size,this->base.toKernel());
1715 v_size = mv.getInternal_v_size();
1716 size_t rsz[1] = {v_size};
1720 for (
size_t i = 0 ; i < v_size ; i++ )
1723 base.set(key,mv.getInternal_base(),key);
1726 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1728 base_gpu.constructor_impl(v_size,this->base.toKernel());
1746 template<
typename Mem,
template <
typename>
class layout_base2>
1752 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1754 base_gpu.constructor_impl(v_size,this->base.toKernel());
1770 template<
typename Mem,
1771 template <
typename>
class layout_base2,
1772 typename check =
typename std::enable_if<!std::is_same<typename layout_base2<T>::type,
typename layout_base<T>::type>::value >::type>
1776 v_size = mv.getInternal_v_size();
1777 size_t rsz[1] = {v_size};
1781 for (
size_t i = 0 ; i < v_size ; i++ )
1784 base.set_general(key,mv.getInternal_base(),key);
1787 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1789 base_gpu.constructor_impl(v_size,this->base.toKernel());
1805 return !this->operator==(v);
1815 if (v_size != v.v_size)
1819 for (
size_t i = 0 ; i < v_size ; i++ )
1823 if (base.get_o(key) != v.base.get_o(key))
1840 size_t sz_sp = v_size;
1845 base.swap_nomode(v.base);
1847 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1849 base_gpu.constructor_impl(v_size,this->base.toKernel());
1863 size_t sz_sp = v_size;
1872 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1874 base_gpu.constructor_impl(v_size,this->base.toKernel());
1875 v.base_gpu.constructor_impl(v.v_size,v.base.toKernel());
1887 size_t sz_sp = v_size;
1895 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1897 base_gpu.constructor_impl(v_size,this->base.toKernel());
1898 v.base_gpu.constructor_impl(v.v_size,v.base.toKernel());
1935 ite_gpu<1> getGPUIteratorTo(
long int stop,
size_t n_thr = default_kernel_wg_threads_)
const
1940 return base.getGPUIterator(start,stop_,n_thr);
1956 check_valid(
this,8);
1958 return getIterator();
1979 template<
unsigned int p>
1983 check_valid(
this,8);
1994 ite_gpu<1> getGPUIterator(
size_t n_thr = default_kernel_wg_threads_)
const
1999 return base.getGPUIterator(start,stop,n_thr);
2006 ite_gpu<1> getDomainIteratorGPU(
size_t n_thr = default_kernel_wg_threads_)
const
2008 return getGPUIterator(n_thr);
2020 return base.packObjectSize();
2032 return base.packObject(mem);
2045 template<
int ... prp>
static inline size_t calculateMem(
size_t n,
size_t e)
2053 if (
sizeof...(prp) == 0)
2054 return grow_p::grow(0,n) *
sizeof(
typename T::type);
2056 typedef object<
typename object_creator<
typename T::type,prp...>::type> prp_object;
2058 return grow_p::grow(0,n) *
sizeof(prp_object);
2072 template<
int ... prp>
static inline size_t packMem(
size_t n,
size_t e)
2074 if (
sizeof...(prp) == 0)
2075 return n *
sizeof(
typename T::type);
2077 typedef object<
typename object_creator<
typename T::type,prp...>::type> prp_object;
2079 return n *
sizeof(prp_object);
2101 template<
unsigned int p>
2104 return base.template getMemory<p>();
2114 base.template setMemory<p>(mem);
2116 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2118 base_gpu.constructor_impl(v_size,this->base.toKernel());
2130 base.setMemoryArray(mem);
2132 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2134 base_gpu.constructor_impl(v_size,this->base.toKernel());
2148 return base.template getPointer<p>();
2158 return base.getPointer();
2197 base.template hostToDevice<prp ...>();
2206 base.template deviceToHost<prp ...>();
2214 template<
unsigned int ... prp>
void deviceToHost(
size_t start,
size_t stop)
2216 base.template deviceToHost<prp ...>(start,stop);
2223 template<
unsigned int ... prp>
void hostToDevice(
size_t start,
size_t stop)
2225 base.template hostToDevice<prp ...>(start,stop);
2234 base.template hostToDeviceNUMA<prp ...>(start,stop);
2243 base.template hostToDeviceNUMA<prp ...>();
2246 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2271 inline const vector_gpu_ker_ref<typename apply_transform<layout_base,T>::type,layout_base> toKernel()
const
2275 return vector_gpu_ker_ref<typename apply_transform<layout_base,T>::type,layout_base>(base_gpu);
2303 if (base.size() == 0)
2304 {std::cout << __FILE__ <<
":" << __LINE__ <<
" Warning you are off-loading with toGPU a vector that seem to be empty or not initialized" << std::endl; }
2320 template<
unsigned int ... prps>
2321 const std::string
toString(std::string prefix = std::string())
2323 std::stringstream ss;
2324 auto it = getIterator();
2332 ss << prefix <<
" element[" << p <<
"]" <<
" ";
2335 boost::mpl::for_each_ref<boost::mpl::range_c<
int,0,
sizeof...(prps)>>(vp);
2345 void * internal_get_size_pointer() {
return &v_size;}
2349 #ifndef DISABLE_ALL_RTTI
2350 std::cout <<
"the size of: " << demangle(
typeid(self_type).name()) <<
" is " <<
sizeof(self_type) << std::endl;
2351 std::cout <<
" " << demangle(
typeid(decltype(v_size)).name()) <<
":" <<
sizeof(decltype(v_size)) << std::endl;
2352 std::cout <<
" " << demangle(
typeid(decltype(base)).name()) <<
":" <<
sizeof(decltype(base)) << std::endl;
2358 template <
typename T>
using vector_std = vector<T, HeapMemory, memory_traits_lin, openfpm::grow_policy_double, STD_VECTOR>;
2363 template<
typename T>
using vector_custd = vector<T, CudaMemory, memory_traits_inte, openfpm::grow_policy_double, STD_VECTOR>;
grid_key_dx is the key to access any element in the grid
Implementation of 1-D std::vector like structure.
const size_t & getInternal_v_size() const
Internal function.
void remove(openfpm::vector< size_t > &keys, size_t start=0)
Remove several entries from the vector.
static bool noPointers()
This class has pointer inside.
void remove(openfpm::vector< aggregate< int >> &keys, size_t start=0)
Remove several entries from the vector.
void hostToDevice()
Copy the memory from host to device.
size_t size_local() const
Return the size of the vector.
void set_o(size_t i, const encap_S &obj)
It set an element of the vector from a object that is a subset of the vector properties.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, unsigned int offset, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
void swap(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&v)
Swap the memory with another vector.
void add(const T &v)
It insert a new object on the vector, eventually it reallocate the grid.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&mv)
Assignment operator.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, size_t start)
It merge the elements of a source vector to this vector.
void non_zero_one(size_t sz[1], size_t arg)
If the argument is zero return 1 otherwise return the argument.
void add(const typename grid_base< 1, T, Memory, typename layout_base< T >::type >::container &v)
It insert a new object on the vector, eventually it reallocate the vector.
void * getDeviceBufferCopy()
It return the properties arrays.
void deviceToHost(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
bool operator==(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) const
Check that two vectors are not equal.
void setMemory(Memory &mem)
Set the memory of the base structure using an object.
void clear()
Clear the vector.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > duplicate() const
It duplicate the vector.
const std::string toString(std::string prefix=std::string())
vector(vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&v)
Constructor from another temporal vector.
void setMemoryArray(Memory *mem)
Set the memory of the base structure using an object.
void hostToDevice(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
void add_no_device()
It insert a new emtpy object on the vector, eventually it reallocate the grid.
void remove(size_t key)
Remove one entry from the vector.
static size_t calculateNMem(size_t n)
How many allocation are required to create n-elements.
vector_gpu_ker< typename apply_transform< layout_base, T >::type, layout_base > toKernel()
Convert the grid into a data-structure compatible for computing into GPU.
auto getProp(const unsigned int &id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
static size_t calculateMem(size_t n, size_t e)
Calculate the memory size required to allocate n elements.
void swap_nomode(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v)
Swap the memory with another vector.
void swap(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v)
Swap the memory with another vector.
const void * getPointer() const
Return the pointer that store the data.
vector(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) THROW
Constructor from another constant vector.
static size_t packMem(size_t n, size_t e)
Calculate the memory size required to pack n elements.
size_t packObjectSize()
Return the size of the message needed to pack this object.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Mem, layout_base, gp, OPENFPM_NATIVE > &mv)
Assignment operator.
layout_base< T >::type layout_type
Type of the encapsulation memory parameter.
void set(size_t id, const T &obj)
Set the object id to obj.
vector_key_iterator getIteratorTo(size_t stop) const
Get iterator over the particles from 0 until a particular index.
void insert(size_t key)
Insert an entry in the vector.
vector(size_t sz) THROW
Constructor, vector of size sz.
size_t capacity()
return the maximum capacity of the vector before reallocation
void resize_no_device(size_t slot)
Resize the vector ()
const vector_gpu_ker< typename apply_transform< layout_base, T >::type, layout_base > toKernel() const
Convert the grid into a data-structure compatible for computing into GPU.
void merge_prp(const vector< S, M, layout_base, gp, OPENFPM_NATIVE > &v, const openfpm::vector< size_t > &opart)
It merge the elements of a source vector to this vector.
auto get(size_t id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
Memory Memory_type
Type of memory this vector use.
void add()
It insert a new emtpy object on the vector, eventually it reallocate the grid.
void set(size_t id, const typename grid_base< 1, T, Memory, typename layout_base< T >::type >::container &obj)
Set the object id to obj.
vector_key_iterator getIterator() const
Get the vector elements iterator.
int yes_i_am_vector
it define that it is a vector
void reserve(size_t sp)
Reserve slots in the vector to avoid reallocation.
size_t access_key
Access key for the vector.
vector_key_iterator iterator_key
iterator for the vector
T value_type
Type of the value the vector is storing.
bool isSubset() const
Indicate that this class is not a subset.
void resize(size_t slot, size_t opt=DATA_ON_DEVICE|DATA_ON_HOST, unsigned int blockSize=1)
Resize the vector.
vector_key_iterator getDomainIterator() const
Get the vector elements iterator.
auto get(size_t id) const -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
size_t packObject(void *mem)
Pack the object into the given pointer.
grid_base< 1, T, Memory, typename layout_base< T >::type >::container last()
Get the last element of the vector.
auto getProp(const KeyType &id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
auto get(size_t id) const -> const decltype(base.get_o(grid_key_dx< 1 >(id)))
Get an element of the vector.
vector_key_iterator_ele< p, self_type > getIteratorElements() const
Get the vector elements iterator.
grid_base< 1, T, Memory, typename layout_base< T >::type > base
1-D static grid
auto getProp(const keyType &id) const -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
~vector() THROW
Destructor.
void hostToDeviceNUMA()
Synchronize the memory buffer in the device with the memory in the host respecing NUMA domains.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Mem, layout_base, gp, OPENFPM_NATIVE > &&mv)
Assignment operator.
void add_prp_device(const vector< S, M, layout_base2, gp, impl > &v)
It add the element of a source vector to this vector.
const grid_base< 1, T, Memory, layout_type > & getInternal_base() const
Internal function.
void merge_prp_device(const vector< S, M, layout_base, gp, OPENFPM_NATIVE > &v, unsigned int start)
It merge the elements of a source vector to this vector (on device)
void * getPointer()
Return the pointer that store the data.
void fill(unsigned char c)
Fill the buffer with a byte.
int yes_i_am_vector_native
it define that it is a vector
const grid_base< 1, T, Memory, layout_type >::container last() const
Get the last element of the vector.
const grid_base< 1, T, Memory, typename layout_base< T >::type >::container get_o(size_t id) const
Get an element of the vector.
auto getMemory() -> decltype(base.template getMemory< p >())
Return the memory object.
void add(const vector< T, M, layout_base, gp, OPENFPM_NATIVE > &v)
It add the element of another vector to this vector.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &mv)
Assignment operator.
vector() THROW
Constructor, vector of size 0.
layout_base< T > layout_base_
Type of the encapsulation memory parameter.
grid_base< 1, T, Memory, typename layout_base< T >::type >::container container
Object container for T, it is the return type of get_o it return a object type trough.
void add_prp(const vector< S, M, layout_base2, gp, impl > &v)
It add the element of a source vector to this vector.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
vector< T, Memory, layout_base2, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Mem, layout_base2, grow_p, OPENFPM_NATIVE > &&mv)
Assignment operator.
vector_key_iterator getIteratorFrom(size_t start) const
Get iterator over the particles from a particular index.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Mem, layout_base2, grow_p, OPENFPM_NATIVE > &mv)
Assignment operator.
void * getDeviceBuffer()
It return the properties arrays.
void merge_prp_v_device(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, unsigned int start, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
void hostToDeviceNUMA(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host respecting NUMA domains.
void merge_prp_v_device(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, const vector_opart_type &opart, unsigned int start, unsigned int stop)
It merge the elements of a source vector to this vector.
grow_p grow_policy
growing policy of this vector
void shrink_to_fit()
Clear the vector.
size_t size() const
Return the size of the vector.
void set(size_t id, vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v, size_t src)
Set the element of the vector v from another element of another vector.
auto get(size_t id) -> decltype(base.get_o(grid_key_dx< 1 >(id)))
Get an element of the vector.
bool operator!=(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) const
Check that two vectors are equal.
void deviceToHost()
Synchronize the memory buffer in the device with the memory in the host.
Implementation of 1-D std::vector like structure.
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
convert a type into constant type
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
It copy two encap object.
It create a boost::fusion vector with the selected properties.
It copy the properties from one object to another applying an operation.
It copy the properties from one object to another.
grid interface available when on gpu
grid interface available when on gpu
this class is a functor for "for_each" algorithm