11#include "util/cuda_launch.hpp"
14#include "util/common.hpp"
15#include "memory/PtrMemory.hpp"
16#include "util/object_util.hpp"
17#include "Grid/util.hpp"
18#include "Vector/util.hpp"
19#include "Vector/map_vector_grow_p.hpp"
20#include "memory/ExtPreAlloc.hpp"
21#include "util/util_debug.hpp"
22#include "util/Pack_stat.hpp"
23#include "Grid/map_grid.hpp"
24#include "memory/HeapMemory.hpp"
25#include "vect_isel.hpp"
26#include "util/object_s_di.hpp"
28#include "util/Pack_stat.hpp"
29#include "memory/ExtPreAlloc.hpp"
31#include "Packer_Unpacker/Unpacker.hpp"
32#include "Packer_Unpacker/Packer.hpp"
34#include "Packer_Unpacker/Packer_util.hpp"
35#include "Packer_Unpacker/has_pack_agg.hpp"
37#include "map_vector_std_util.hpp"
38#include "data_type/aggregate.hpp"
39#include "vector_map_iterator.hpp"
40#include "util/cuda_util.hpp"
41#include "cuda/map_vector_cuda_ker.cuh"
42#include "map_vector_printers.hpp"
50 template<
typename vector_type1,
typename vector_type2>
56 template<
typename vector_type1,
typename vector_type2>
66 template<
typename vector_type1,
typename vector_type2>
72 auto it = v1.getGPUIterator();
73 CUDA_LAUNCH(copy_two_vectors,it,v1.toKernel(),v2.toKernel());
78 template<
typename vector_type1,
typename vector_type2>
84 auto it = v1.getGPUIterator();
85 CUDA_LAUNCH(copy_two_vectors,it,v1.toKernel(),v2.toKernel());
91 template<
bool is_ok_cuda,
typename T,
typename Memory,
92 template<
typename>
class layout_base,
100 template <
typename>
class layout_base2,
101 unsigned int ...args>
104 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error the function add_prp_device only work with cuda enabled vector" << std::endl;
108 template<
bool is_ok_cuda,
typename T,
typename Memory,
109 template<
typename>
class layout_base,
113 template <
typename S,
117 template <
typename>
class layout_base2,
118 unsigned int ...args>
123 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error the function merge_prp_device only work with cuda enabled vector" << std::endl;
127 template<
typename T,
typename Memory,
128 template<
typename>
class layout_base,
132 template <
typename S,
136 template <
typename>
class layout_base2,
137 unsigned int ...args>
142 #if defined(CUDA_GPU) && defined(__NVCC__)
144 size_t old_sz = this_.
size();
145 this_.resize(this_.
size() + v.size(),DATA_ON_DEVICE);
147 auto ite = v.getGPUIterator();
149 CUDA_LAUNCH((merge_add_prp_device_impl<
decltype(v.toKernel()),
decltype(this_.toKernel()),args...>),ite,v.toKernel(),this_.toKernel(),(
unsigned int)old_sz);
152 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error the function add_prp_device only work when map_vector is compiled with nvcc" << std::endl;
157 template<
typename T,
typename Memory,
158 template<
typename>
class layout_base,
162 template <
typename S,
166 template <
typename>
class layout_base2,
167 unsigned int ...args>
174 #if defined(CUDA_GPU) && defined(__NVCC__)
176 auto ite = v.getGPUIterator();
178 CUDA_LAUNCH((merge_add_prp_device_impl<
decltype(v.toKernel()),
decltype(this_.toKernel()),args...>),ite,v.toKernel(),this_.toKernel(),(
unsigned int)offset);
181 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error the function merge_prp_device only work when map_vector is compiled with nvcc" << std::endl;
201 template<
typename T,
typename Memory,
template<
typename>
class layout_base,
typename grow_p,
unsigned int impl>
213 std::cerr << __FILE__ <<
":" << __LINE__ <<
" Error stub vector created" << std::endl;
218 #include "map_vector_std.hpp"
219 #include "map_vector_std_ptr.hpp"
222 #include "cuda/map_vector_std_cuda.hpp"
240 template<
typename T,
typename Memory,
template <
typename>
class layout_base,
typename grow_p>
241 class vector<T,Memory,layout_base,grow_p,OPENFPM_NATIVE>
275 void check_overflow(
size_t id)
const
279 std::cerr <<
"Error " << __FILE__ <<
":" << __LINE__ <<
" overflow id: " <<
id <<
"\n";
280 ACTION_ON_ERROR(VECTOR_ERROR_OBJECT);
316 template<
typename Tobj>
319 typedef layout_base<Tobj> type;
323#include "vector_pack_unpack.ipp"
366 if (sp > base.size())
373#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
375 base_gpu.constructor_impl(v_size,this->base.toKernel());
397 size_t sz[1] = {
size()};
400#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
402 base_gpu.constructor_impl(v_size,this->base.toKernel());
419 void resize(
size_t slot,
size_t opt = DATA_ON_DEVICE | DATA_ON_HOST,
unsigned int blockSize = 1)
423 if (slot > base.size())
428 if (slot - base.size() == 1 && opt && (opt & EXACT_RESIZE) == 0)
430 gr = grow_p::grow(base.size(),slot);
436 base.resize(sz,opt,blockSize);
442#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
444 base_gpu.constructor_impl(v_size,this->base.toKernel());
462 if (slot > base.size())
464 size_t gr = grow_p::grow(base.size(),slot);
468 base.resize_no_device(sz);
474#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
476 base_gpu.constructor_impl(v_size,this->base.toKernel());
494 if (v_size >= base.size())
498 non_zero_one(sz,2*base.size());
505#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
507 base_gpu.constructor_impl(v_size,this->base.toKernel());
522 if (v_size >= base.size())
526 non_zero_one(sz,2*base.size());
527 base.resize_no_device(sz);
533#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
535 base_gpu.constructor_impl(v_size,this->base.toKernel());
552 if (v_size >= base.size())
556 non_zero_one(sz,2*base.size());
566#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
568 base_gpu.constructor_impl(v_size,this->base.toKernel());
582 void add(
const typename grid_base<1,T,Memory,
typename layout_base<T>::type>::container & v)
586 if (v_size >= base.size())
590 non_zero_one(sz,2*base.size());
600#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
602 base_gpu.constructor_impl(v_size,this->base.toKernel());
615 for (
size_t i = 0 ; i < v.size() ; i++)
654 template <
template<
typename,
typename>
class op,
typename S,
typename M,
typename gp,
unsigned int ...args>
660 if (v.size() != opart.
size())
661 std::cerr << __FILE__ <<
":" << __LINE__ <<
" error merge_prp: v.size()=" << v.size() <<
" must be the same as o_part.size()" << opart.
size() << std::endl;
665 for (
size_t i = 0 ; i < v.size() ; i++)
669 if (opart.get(i) >
size())
670 std::cerr <<
"Error: " << __FILE__ <<
":" << __LINE__ <<
" try to access element " << opart.get(i) <<
" but the vector has size " <<
size() << std::endl;
674 object_s_di_op<op,
decltype(v.get(i)),
decltype(get(
size()-1)),OBJ_ENCAP,args...>(v.get(i),get(opart.get(i)));
713 template <
template<
typename,
typename>
class op,
typename S,
typename M,
typename gp,
unsigned int ...args>
718 ::template run<S,M,gp,OPENFPM_NATIVE,layout_base,args...>(*
this,v,start);
757 template <
template<
typename,
typename>
class op,
761 template <
typename>
class layout_base2,
762 typename vector_opart_type,
763 unsigned int ...args>
765 const vector_opart_type & opart)
769 if (v.size() != opart.size())
770 std::cerr << __FILE__ <<
":" << __LINE__ <<
" error merge_prp: v.size()=" << v.size() <<
" must be the same as o_part.size()" << opart.size() << std::endl;
774 for (
size_t i = 0 ; i < v.size() ; i++)
778 if (i >= opart.size())
779 std::cerr <<
"Error: " << __FILE__ <<
":" << __LINE__ <<
" try to access element " << opart.template get<0>(i) <<
" but the vector has size " <<
size() << std::endl;
783 object_s_di_op<op,
decltype(v.get(i)),
decltype(get(
size()-1)),OBJ_ENCAP,args...>(v.get(i),get(opart.template get<0>(i)));
823 template <
template<
typename,
typename>
class op,
827 template <
typename>
class layout_base2,
828 typename vector_opart_type,
829 unsigned int ...args>
832 const vector_opart_type & opart)
836 for (
size_t i = offset ; i < v.size() ; i++)
838 auto dst = v.get(opart.template get<0>(i2));
842 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,
sizeof...(args)> >(cp);
884 template <
template<
typename,
typename>
class op,
888 template <
typename>
class layout_base2,
889 typename vector_opart_type,
890 unsigned int ...args>
892 const vector_opart_type & opart,
898 if (v.size() != stop - start)
899 std::cerr << __FILE__ <<
":" << __LINE__ <<
" error merge_prp: v.size()=" << v.size() <<
" must be the same as stop - start" << stop - start << std::endl;
905 size_t sz[1] = {stop - start};
908 auto ite = nm.getGPUIterator();
911 CUDA_LAUNCH((merge_add_prp_device_impl_src_dst_opar_offset<op,
912 decltype(v.toKernel()),
913 decltype(this->toKernel()),
914 decltype(opart.toKernel()),
915 args...>),ite,v.toKernel(),this->toKernel(),opart.toKernel(),start);
919 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error you have to compile map_vector.hpp with nvcc to make GPU code working" << std::endl;
960 template <
template<
typename,
typename>
class op,
964 template <
typename>
class layout_base2,
965 typename vector_opart_type,
966 unsigned int ...args>
969 const vector_opart_type & opart)
973 if (v.size() < opart.size() + start)
974 std::cerr << __FILE__ <<
":" << __LINE__ <<
" error merge_prp: v.size()=" << v.size() <<
" must be snaller than o_part.size() + start " << opart.size() + start << std::endl;
980 auto ite = opart.getGPUIterator();
983 CUDA_LAUNCH((merge_add_prp_device_impl_src_offset_dst_opar<op,
984 decltype(v.toKernel()),
985 decltype(this->toKernel()),
986 decltype(opart.toKernel()),
987 args... >),ite,v.toKernel(),this->toKernel(),opart.toKernel(),start);
991 std::cout << __FILE__ <<
":" << __LINE__ <<
" Error you have to compile map_vector.hpp with nvcc to make GPU code working" << std::endl;
1031 template <
template<
typename,
typename>
class op,
1035 template <
typename>
class layout_base2,
1036 unsigned int ...args>
1041 for (
size_t i = 0 ; i < v.size() ; i++)
1045 if (start + i >= v_size)
1046 std::cerr <<
"Error: " << __FILE__ <<
":" << __LINE__ <<
" try to access element " << start+i <<
" but the vector has size " <<
size() << std::endl;
1050 object_s_di_op<op,
decltype(v.get(0)),
decltype(get(0)),OBJ_ENCAP,args...>(v.get(i),get(start+i));
1068 template <
typename S,
1072 template <
typename>
class layout_base2,
1073 unsigned int ...args>
1077 for (
size_t i = 0 ; i < v.size() ; i++)
1083 object_s_di<
decltype(v.get(i)),
decltype(get(
size()-1)),OBJ_ENCAP,args...>(v.get(i),get(
size()-1));
1101 template <
typename S,
1105 template <
typename>
class layout_base2,
1106 unsigned int ...args>
1110 ::template run<S,M,gp,impl,layout_base2,args...>(*
this,v);
1122 long int d_k = (
long int)
size()-1;
1123 long int s_k = (
long int)
size()-2;
1126 while (s_k >= (
long int)key)
1143 size_t s_k = key + 1;
1146 while (s_k <
size())
1169 if (keys.size() <= start )
1172 size_t a_key = start;
1173 size_t d_k = keys.get(a_key);
1174 size_t s_k = keys.get(a_key) + 1;
1177 while (s_k <
size())
1180 while (a_key+1 < keys.size() && s_k == keys.get(a_key+1))
1183 s_k = keys.get(a_key) + 1;
1197 v_size -= keys.size() - start;
1211 if (keys.size() <= start )
1214 size_t a_key = start;
1215 size_t d_k = keys.template get<0>(a_key);
1216 size_t s_k = keys.template get<0>(a_key) + 1;
1219 while (s_k <
size())
1222 while (a_key+1 < keys.size() && s_k == keys.template get<0>(a_key+1))
1225 s_k = keys.template get<0>(a_key) + 1;
1239 v_size -= keys.size() - start;
1253 template <
unsigned int p>
1256#if defined(SE_CLASS1) && !defined(__NVCC__)
1262 return base.template get<p>(key);
1286#if defined(SE_CLASS1) && !defined(__NVCC__)
1291 return base.get_o(key);
1308#if defined(SE_CLASS1) && !defined(__NVCC__)
1313 return base.get_o(key);
1321 template<
unsigned int id>
void fill(
unsigned char c)
1323 base.template fill<id>(c);
1334 return base.template getDeviceBuffer<id>();
1347 return base.template getDeviceBuffer<id>();
1360 return base.get_o(key);
1372 template <
unsigned int p>
1375 return this->
template get<p>(
id);
1387 template <
unsigned int p,
typename KeyType>
1391 return this->
template get<p>(
id.getKey());
1404 template <
unsigned int p,
typename keyType>
1407 return this->
template get<p>(
id.getKey());
1421 template <
unsigned int p>
1424#if defined(SE_CLASS1) && !defined(__NVCC__)
1429 return base.template get<p>(key);
1444 check_valid(
this,8);
1446#if defined(SE_CLASS1) && !defined(__NVCC__)
1451 return base.get_o(key);
1464 return base.get_o(key);
1482 dup.v_size = v_size;
1483 dup.base.swap(base.duplicate());
1485#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1487 dup.base_gpu.constructor_impl(v_size,dup.base.toKernel());
1506#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1508 base_gpu.constructor_impl(v_size,this->base.toKernel());
1521 swap(v.duplicate());
1523#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1525 base_gpu.constructor_impl(v_size,this->base.toKernel());
1536#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1538 base_gpu.constructor_impl(v_size,this->base.toKernel());
1545 :v_size(sz),base(sz)
1549#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1551 base_gpu.constructor_impl(v_size,this->base.toKernel());
1562 void set(
size_t id,
const typename grid_base<1,T,Memory,
typename layout_base<T>::type>::container & obj)
1586 template <
typename encap_S,
unsigned int ...args>
void set_o(
size_t i,
const encap_S & obj)
1589 object_s_di<encap_S,
decltype(get(i)),OBJ_ENCAP,args...>(obj,get(i));
1598 void set(
size_t id,
const T & obj)
1619 base.set(
id,v.base,src);
1622 template<
typename key_type>
1623 key_type getOriginKey(key_type vec_key)
1643#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1645 base_gpu.constructor_impl(v_size,this->base.toKernel());
1664 size_t rsz[1] = {v_size};
1665 if(rsz[0]>base.size()) {
1669 for (
size_t i = 0 ; i < v_size ; i++ )
1672 base.set(key,mv.base,key);
1675#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1677 base_gpu.constructor_impl(v_size,this->base.toKernel());
1700#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1702 base_gpu.constructor_impl(v_size,this->base.toKernel());
1720 v_size = mv.getInternal_v_size();
1721 size_t rsz[1] = {v_size};
1725 for (
size_t i = 0 ; i < v_size ; i++ )
1728 base.set(key,mv.getInternal_base(),key);
1731#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1733 base_gpu.constructor_impl(v_size,this->base.toKernel());
1751 template<
typename Mem,
template <
typename>
class layout_base2>
1757#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1759 base_gpu.constructor_impl(v_size,this->base.toKernel());
1775 template<
typename Mem,
1776 template <
typename>
class layout_base2,
1777 typename check =
typename std::enable_if<!std::is_same<typename layout_base2<T>::type,
typename layout_base<T>::type>::value >::type>
1781 v_size = mv.getInternal_v_size();
1782 size_t rsz[1] = {v_size};
1786 for (
size_t i = 0 ; i < v_size ; i++ )
1789 base.set_general(key,mv.getInternal_base(),key);
1792#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1794 base_gpu.constructor_impl(v_size,this->base.toKernel());
1810 return !this->operator==(v);
1820 if (v_size != v.v_size)
1824 for (
size_t i = 0 ; i < v_size ; i++ )
1828 if (base.get_o(key) != v.base.get_o(key))
1845 size_t sz_sp = v_size;
1850 base.swap_nomode(v.base);
1852#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1854 base_gpu.constructor_impl(v_size,this->base.toKernel());
1868 size_t sz_sp = v_size;
1877#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1879 base_gpu.constructor_impl(v_size,this->base.toKernel());
1880 v.base_gpu.constructor_impl(v.v_size,v.base.toKernel());
1892 size_t sz_sp = v_size;
1900#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1902 base_gpu.constructor_impl(v_size,this->base.toKernel());
1903 v.base_gpu.constructor_impl(v.v_size,v.base.toKernel());
1940 ite_gpu<1> getGPUIteratorTo(
long int stop,
size_t n_thr = default_kernel_wg_threads_)
const
1945 return base.getGPUIterator(start,stop_,n_thr);
1961 check_valid(
this,8);
1963 return getIterator();
1984 template<
unsigned int p>
1988 check_valid(
this,8);
1999 ite_gpu<1> getGPUIterator(
size_t n_thr = default_kernel_wg_threads_)
const
2004 return base.getGPUIterator(start,stop,n_thr);
2010 ite_gpu<1> getDomainIteratorGPU(
size_t n_thr = default_kernel_wg_threads_)
const
2012 return getGPUIterator(n_thr);
2024 return base.packObjectSize();
2036 return base.packObject(mem);
2049 template<
int ... prp>
static inline size_t calculateMem(
size_t n,
size_t e)
2057 if (
sizeof...(prp) == 0)
2058 return grow_p::grow(0,n) *
sizeof(
typename T::type);
2060 typedef object<
typename object_creator<
typename T::type,prp...>::type> prp_object;
2062 return grow_p::grow(0,n) *
sizeof(prp_object);
2076 template<
int ... prp>
static inline size_t packMem(
size_t n,
size_t e)
2078 if (
sizeof...(prp) == 0)
2079 return n *
sizeof(
typename T::type);
2081 typedef object<
typename object_creator<
typename T::type,prp...>::type> prp_object;
2083 return n *
sizeof(prp_object);
2105 template<
unsigned int p>
2108 return base.template getMemory<p>();
2118 base.template setMemory<p>(mem);
2120#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2122 base_gpu.constructor_impl(v_size,this->base.toKernel());
2134 base.setMemoryArray(mem);
2136#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2138 base_gpu.constructor_impl(v_size,this->base.toKernel());
2152 return base.template getPointer<p>();
2162 return base.getPointer();
2201 base.template hostToDevice<prp ...>();
2210 base.template deviceToHost<prp ...>();
2218 template<
unsigned int ... prp>
void deviceToHost(
size_t start,
size_t stop)
2220 base.template deviceToHost<prp ...>(start,stop);
2227 template<
unsigned int ... prp>
void hostToDevice(
size_t start,
size_t stop)
2229 base.template hostToDevice<prp ...>(start,stop);
2238 base.template hostToDeviceNUMA<prp ...>(start,stop);
2247 base.template hostToDeviceNUMA<prp ...>();
2250#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2275 inline const vector_gpu_ker_ref<typename apply_transform<layout_base,T>::type,layout_base> toKernel()
const
2279 return vector_gpu_ker_ref<typename apply_transform<layout_base,T>::type,layout_base>(base_gpu);
2307 if (base.size() == 0)
2308 {std::cout << __FILE__ <<
":" << __LINE__ <<
" Warning you are off-loading with toGPU a vector that seem to be empty or not initialized" << std::endl; }
2324 template<
unsigned int ... prps>
2325 const std::string
toString(std::string prefix = std::string())
2327 std::stringstream ss;
2328 auto it = getIterator();
2336 ss << prefix <<
" element[" << p <<
"]" <<
" ";
2339 boost::mpl::for_each_ref<boost::mpl::range_c<int,0,
sizeof...(prps)>>(vp);
2349 void * internal_get_size_pointer() {
return &v_size;}
2353#ifndef DISABLE_ALL_RTTI
2354 std::cout <<
"the size of: " << demangle(
typeid(self_type).name()) <<
" is " <<
sizeof(self_type) << std::endl;
2355 std::cout <<
" " << demangle(
typeid(
decltype(v_size)).name()) <<
":" <<
sizeof(
decltype(v_size)) << std::endl;
2356 std::cout <<
" " << demangle(
typeid(
decltype(base)).name()) <<
":" <<
sizeof(
decltype(base)) << std::endl;
2362 template <
typename T>
using vector_std = vector<T, HeapMemory, memory_traits_lin, openfpm::grow_policy_double, STD_VECTOR>;
2366 template<
typename T>
using vector_custd = vector<T, CudaMemory, memory_traits_inte, openfpm::grow_policy_double, STD_VECTOR>;
grid_key_dx is the key to access any element in the grid
Implementation of 1-D std::vector like structure.
void remove(openfpm::vector< size_t > &keys, size_t start=0)
Remove several entries from the vector.
static bool noPointers()
This class has pointer inside.
void hostToDevice()
Copy the memory from host to device.
size_t size_local() const
Return the size of the vector.
void set_o(size_t i, const encap_S &obj)
It set an element of the vector from a object that is a subset of the vector properties.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, unsigned int offset, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
void swap(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&v)
Swap the memory with another vector.
void add(const T &v)
It insert a new object on the vector, eventually it reallocate the grid.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, size_t start)
It merge the elements of a source vector to this vector.
void non_zero_one(size_t sz[1], size_t arg)
If the argument is zero return 1 otherwise return the argument.
void add(const typename grid_base< 1, T, Memory, typename layout_base< T >::type >::container &v)
It insert a new object on the vector, eventually it reallocate the vector.
vector_key_iterator_ele< p, self_type > getIteratorElements() const
Get the vector elements iterator.
void deviceToHost(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
bool operator==(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) const
Check that two vectors are not equal.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Mem, layout_base, gp, OPENFPM_NATIVE > &mv)
Assignment operator.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&mv)
Assignment operator.
void setMemory(Memory &mem)
Set the memory of the base structure using an object.
const void * getPointer() const
Return the pointer that store the data.
void clear()
Clear the vector.
void remove(openfpm::vector< aggregate< int > > &keys, size_t start=0)
Remove several entries from the vector.
const std::string toString(std::string prefix=std::string())
vector(vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&v)
Constructor from another temporal vector.
void * getDeviceBuffer()
It return the properties arrays.
void setMemoryArray(Memory *mem)
Set the memory of the base structure using an object.
void hostToDevice(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
const grid_base< 1, T, Memory, layout_type > & getInternal_base() const
Internal function.
void add_no_device()
It insert a new emtpy object on the vector, eventually it reallocate the grid.
void remove(size_t key)
Remove one entry from the vector.
void * getDeviceBufferCopy()
It return the properties arrays.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &mv)
Assignment operator.
static size_t calculateNMem(size_t n)
How many allocation are required to create n-elements.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Mem, layout_base2, grow_p, OPENFPM_NATIVE > &mv)
Assignment operator.
vector_gpu_ker< typename apply_transform< layout_base, T >::type, layout_base > toKernel()
Convert the grid into a data-structure compatible for computing into GPU.
auto getProp(const unsigned int &id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
static size_t calculateMem(size_t n, size_t e)
Calculate the memory size required to allocate n elements.
void swap_nomode(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v)
Swap the memory with another vector.
void swap(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v)
Swap the memory with another vector.
vector(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) THROW
Constructor from another constant vector.
const size_t & getInternal_v_size() const
Internal function.
static size_t packMem(size_t n, size_t e)
Calculate the memory size required to pack n elements.
size_t packObjectSize()
Return the size of the message needed to pack this object.
layout_base< T >::type layout_type
Type of the encapsulation memory parameter.
void set(size_t id, const T &obj)
Set the object id to obj.
vector_key_iterator getIteratorTo(size_t stop) const
Get iterator over the particles from 0 until a particular index.
void insert(size_t key)
Insert an entry in the vector.
vector(size_t sz) THROW
Constructor, vector of size sz.
size_t capacity()
return the maximum capacity of the vector before reallocation
void * getPointer()
Return the pointer that store the data.
void resize_no_device(size_t slot)
Resize the vector ()
void merge_prp(const vector< S, M, layout_base, gp, OPENFPM_NATIVE > &v, const openfpm::vector< size_t > &opart)
It merge the elements of a source vector to this vector.
auto get(size_t id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
Memory Memory_type
Type of memory this vector use.
void add()
It insert a new emtpy object on the vector, eventually it reallocate the grid.
void set(size_t id, const typename grid_base< 1, T, Memory, typename layout_base< T >::type >::container &obj)
Set the object id to obj.
vector< T, Memory, layout_base2, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Mem, layout_base2, grow_p, OPENFPM_NATIVE > &&mv)
Assignment operator.
const vector_gpu_ker< typename apply_transform< layout_base, T >::type, layout_base > toKernel() const
Convert the grid into a data-structure compatible for computing into GPU.
vector_key_iterator getIterator() const
Get the vector elements iterator.
int yes_i_am_vector
it define that it is a vector
const grid_base< 1, T, Memory, layout_type >::container last() const
Get the last element of the vector.
void reserve(size_t sp)
Reserve slots in the vector to avoid reallocation.
size_t access_key
Access key for the vector.
grid_base< 1, T, Memory, typenamelayout_base< T >::type >::container last()
Get the last element of the vector.
vector_key_iterator iterator_key
iterator for the vector
T value_type
Type of the value the vector is storing.
bool isSubset() const
Indicate that this class is not a subset.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > duplicate() const
It duplicate the vector.
void resize(size_t slot, size_t opt=DATA_ON_DEVICE|DATA_ON_HOST, unsigned int blockSize=1)
Resize the vector.
vector_key_iterator getDomainIterator() const
Get the vector elements iterator.
auto get(size_t id) const -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
size_t packObject(void *mem)
Pack the object into the given pointer.
auto getProp(const KeyType &id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
auto get(size_t id) const -> const decltype(base.get_o(grid_key_dx< 1 >(id)))
Get an element of the vector.
grid_base< 1, T, Memory, typename layout_base< T >::type > base
1-D static grid
auto getProp(const keyType &id) const -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
~vector() THROW
Destructor.
void hostToDeviceNUMA()
Synchronize the memory buffer in the device with the memory in the host respecing NUMA domains.
grid_base< 1, T, Memory, typenamelayout_base< T >::type >::container container
Object container for T, it is the return type of get_o it return a object type trough.
void add_prp_device(const vector< S, M, layout_base2, gp, impl > &v)
It add the element of a source vector to this vector.
void merge_prp_device(const vector< S, M, layout_base, gp, OPENFPM_NATIVE > &v, unsigned int start)
It merge the elements of a source vector to this vector (on device)
void fill(unsigned char c)
Fill the buffer with a byte.
int yes_i_am_vector_native
it define that it is a vector
auto getMemory() -> decltype(base.template getMemory< p >())
Return the memory object.
void add(const vector< T, M, layout_base, gp, OPENFPM_NATIVE > &v)
It add the element of another vector to this vector.
vector() THROW
Constructor, vector of size 0.
layout_base< T > layout_base_
Type of the encapsulation memory parameter.
void add_prp(const vector< S, M, layout_base2, gp, impl > &v)
It add the element of a source vector to this vector.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Mem, layout_base, gp, OPENFPM_NATIVE > &&mv)
Assignment operator.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
vector_key_iterator getIteratorFrom(size_t start) const
Get iterator over the particles from a particular index.
const grid_base< 1, T, Memory, typenamelayout_base< T >::type >::container get_o(size_t id) const
Get an element of the vector.
void merge_prp_v_device(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, unsigned int start, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
void hostToDeviceNUMA(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host respecting NUMA domains.
void merge_prp_v_device(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, const vector_opart_type &opart, unsigned int start, unsigned int stop)
It merge the elements of a source vector to this vector.
grow_p grow_policy
growing policy of this vector
void shrink_to_fit()
Clear the vector.
size_t size() const
Return the size of the vector.
void set(size_t id, vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v, size_t src)
Set the element of the vector v from another element of another vector.
auto get(size_t id) -> decltype(base.get_o(grid_key_dx< 1 >(id)))
Get an element of the vector.
bool operator!=(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) const
Check that two vectors are equal.
void deviceToHost()
Synchronize the memory buffer in the device with the memory in the host.
Implementation of 1-D std::vector like structure.
convert a type into constant type
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
It copy two encap object.
It create a boost::fusion vector with the selected properties.
It copy the properties from one object to another applying an operation.
It copy the properties from one object to another.
grid interface available when on gpu
grid interface available when on gpu
this class is a functor for "for_each" algorithm