OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
 
Loading...
Searching...
No Matches
map_vector.hpp
1/*
2 * map_vector.hpp
3 *
4 * Created on: Aug 30, 2014
5 * Author: Pietro Incardona
6 */
7
8#ifndef MAP_VECTOR_HPP
9#define MAP_VECTOR_HPP
10
11#include "util/cuda_launch.hpp"
12#include <iostream>
13#include <typeinfo>
14#include "util/common.hpp"
15#include "memory/PtrMemory.hpp"
16#include "util/object_util.hpp"
17#include "Grid/util.hpp"
18#include "Vector/util.hpp"
19#include "Vector/map_vector_grow_p.hpp"
20#include "memory/ExtPreAlloc.hpp"
21#include "util/util_debug.hpp"
22#include "util/Pack_stat.hpp"
23#include "Grid/map_grid.hpp"
24#include "memory/HeapMemory.hpp"
25#include "vect_isel.hpp"
26#include "util/object_s_di.hpp"
27#include "util.hpp"
28#include "util/Pack_stat.hpp"
29#include "memory/ExtPreAlloc.hpp"
30#include <string.h>
31#include "Packer_Unpacker/Unpacker.hpp"
32#include "Packer_Unpacker/Packer.hpp"
33#include <fstream>
34#include "Packer_Unpacker/Packer_util.hpp"
35#include "Packer_Unpacker/has_pack_agg.hpp"
36#include "timer.hpp"
37#include "map_vector_std_util.hpp"
38#include "data_type/aggregate.hpp"
39#include "vector_map_iterator.hpp"
40#include "util/cuda_util.hpp"
41#include "cuda/map_vector_cuda_ker.cuh"
42#include "map_vector_printers.hpp"
43
44namespace openfpm
45{
46
47 template<bool active>
49 {
50 template<typename vector_type1, typename vector_type2>
51 static void copy(vector_type1 & v1, vector_type2 & v2)
52 {
53
54 }
55
56 template<typename vector_type1, typename vector_type2>
57 static void copy2(vector_type1 & v1, vector_type2 & v2)
58 {
59
60 }
61 };
62
63 template<>
65 {
66 template<typename vector_type1, typename vector_type2>
67 static void copy(vector_type1 & v1, vector_type2 & v2)
68 {
69#ifdef __NVCC__
70 if (v1.size() != 0)
71 {
72 auto it = v1.getGPUIterator();
73 CUDA_LAUNCH(copy_two_vectors,it,v1.toKernel(),v2.toKernel());
74 }
75#endif
76 }
77
78 template<typename vector_type1, typename vector_type2>
79 static void copy2(vector_type1 & v1, vector_type2 & v2)
80 {
81#ifdef __NVCC__
82 if (v2.size() != 0)
83 {
84 auto it = v1.getGPUIterator();
85 CUDA_LAUNCH(copy_two_vectors,it,v1.toKernel(),v2.toKernel());
86 }
87#endif
88 }
89 };
90
91 template<bool is_ok_cuda,typename T, typename Memory,
92 template<typename> class layout_base,
93 typename grow_p>
95 {
96 template <typename S,
97 typename M,
98 typename gp,
99 unsigned int impl,
100 template <typename> class layout_base2,
101 unsigned int ...args>
103 {
104 std::cout << __FILE__ << ":" << __LINE__ << " Error the function add_prp_device only work with cuda enabled vector" << std::endl;
105 }
106 };
107
108 template<bool is_ok_cuda,typename T, typename Memory,
109 template<typename> class layout_base,
110 typename grow_p>
112 {
113 template <typename S,
114 typename M,
115 typename gp,
116 unsigned int impl,
117 template <typename> class layout_base2,
118 unsigned int ...args>
121 unsigned int offset)
122 {
123 std::cout << __FILE__ << ":" << __LINE__ << " Error the function merge_prp_device only work with cuda enabled vector" << std::endl;
124 }
125 };
126
127 template<typename T, typename Memory,
128 template<typename> class layout_base,
129 typename grow_p>
130 struct add_prp_device_impl<true,T,Memory,layout_base,grow_p>
131 {
132 template <typename S,
133 typename M,
134 typename gp,
135 unsigned int impl,
136 template <typename> class layout_base2,
137 unsigned int ...args>
139 {
140 // merge the data on device
141
142 #if defined(CUDA_GPU) && defined(__NVCC__)
143
144 size_t old_sz = this_.size();
145 this_.resize(this_.size() + v.size(),DATA_ON_DEVICE);
146
147 auto ite = v.getGPUIterator();
148
149 CUDA_LAUNCH((merge_add_prp_device_impl<decltype(v.toKernel()),decltype(this_.toKernel()),args...>),ite,v.toKernel(),this_.toKernel(),(unsigned int)old_sz);
150
151 #else
152 std::cout << __FILE__ << ":" << __LINE__ << " Error the function add_prp_device only work when map_vector is compiled with nvcc" << std::endl;
153 #endif
154 }
155 };
156
157 template<typename T, typename Memory,
158 template<typename> class layout_base,
159 typename grow_p>
160 struct merge_prp_device_impl<true,T,Memory,layout_base,grow_p>
161 {
162 template <typename S,
163 typename M,
164 typename gp,
165 unsigned int impl,
166 template <typename> class layout_base2,
167 unsigned int ...args>
168 static void run(vector<T,Memory,layout_base,grow_p,impl> & this_ ,
170 unsigned int offset)
171 {
172 // merge the data on device
173
174 #if defined(CUDA_GPU) && defined(__NVCC__)
175
176 auto ite = v.getGPUIterator();
177
178 CUDA_LAUNCH((merge_add_prp_device_impl<decltype(v.toKernel()),decltype(this_.toKernel()),args...>),ite,v.toKernel(),this_.toKernel(),(unsigned int)offset);
179
180 #else
181 std::cout << __FILE__ << ":" << __LINE__ << " Error the function merge_prp_device only work when map_vector is compiled with nvcc" << std::endl;
182 #endif
183 }
184 };
185
186
201 template<typename T, typename Memory, template<typename> class layout_base, typename grow_p, unsigned int impl>
202 class vector
203 {
211 size_t size()
212 {
213 std::cerr << __FILE__ << ":" << __LINE__ << " Error stub vector created" << std::endl;
214 return 0;
215 }
216 };
217
218 #include "map_vector_std.hpp"
219 #include "map_vector_std_ptr.hpp"
220
221#ifdef CUDA_GPU
222 #include "cuda/map_vector_std_cuda.hpp"
223#endif
224
240 template<typename T,typename Memory, template <typename> class layout_base, typename grow_p>
241 class vector<T,Memory,layout_base,grow_p,OPENFPM_NATIVE>
242 {
244
248 size_t v_size;
249
252
259 void non_zero_one(size_t sz[1], size_t arg)
260 {
261 if (arg == 0)
262 {sz[0] = 1;}
263 else
264 {sz[0] = arg;}
265 }
266
267#ifdef SE_CLASS1
268
275 void check_overflow(size_t id) const
276 {
277 if (id >= v_size)
278 {
279 std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " overflow id: " << id << "\n";
280 ACTION_ON_ERROR(VECTOR_ERROR_OBJECT);
281 }
282 }
283
284#endif
285
286 public:
287
289 typedef int yes_i_am_vector;
290
293
295 typedef typename layout_base<T>::type layout_type;
296
298 typedef layout_base<T> layout_base_;
299
302
304 // you can access all the properties of T
306
308 typedef T value_type;
309
311 typedef Memory Memory_type;
312
314 typedef grow_p grow_policy;
315
316 template<typename Tobj>
317 struct layout_base__
318 {
319 typedef layout_base<Tobj> type;
320 };
321
322 // Implementation of packer and unpacker for vector
323#include "vector_pack_unpack.ipp"
324
330 size_t size() const
331 {
332 return v_size;
333 }
334 //remove host device
340 size_t size_local() const
341 {
342 return v_size;
343 }
344
351 size_t capacity()
352 {
353 return base.size();
354 }
355
364 void reserve(size_t sp)
365 {
366 if (sp > base.size())
367 {
369 size_t sz[1] = {sp};
370 base.resize(sz);
371 }
372
373#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
374
375 base_gpu.constructor_impl(v_size,this->base.toKernel());
376
377#endif
378 }
379
385 void clear()
386 {
387 resize(0);
388 }
389
396 {
397 size_t sz[1] = {size()};
398 base.resize(sz);
399
400#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
401
402 base_gpu.constructor_impl(v_size,this->base.toKernel());
403
404#endif
405 }
406
419 void resize(size_t slot, size_t opt = DATA_ON_DEVICE | DATA_ON_HOST, unsigned int blockSize = 1)
420 {
421 // If we need more space than what we allocated, allocate new memory
422
423 if (slot > base.size())
424 {
425 size_t gr = slot;
426 // If you increase by one we smartly resize the internal capacity more than 1
427 // This is to make faster patterns like resize(size()+1)
428 if (slot - base.size() == 1 && opt && (opt & EXACT_RESIZE) == 0)
429 {
430 gr = grow_p::grow(base.size(),slot);
431 }
432
434 size_t sz[1] = {gr};
435
436 base.resize(sz,opt,blockSize);
437 }
438
439 // update the vector size
440 v_size = slot;
441
442#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
443
444 base_gpu.constructor_impl(v_size,this->base.toKernel());
445
446#endif
447 }
448
449
458 void resize_no_device(size_t slot)
459 {
460 // If we need more space than what we allocated, allocate new memory
461
462 if (slot > base.size())
463 {
464 size_t gr = grow_p::grow(base.size(),slot);
465
467 size_t sz[1] = {gr};
468 base.resize_no_device(sz);
469 }
470
471 // update the vector size
472 v_size = slot;
473
474#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
475
476 base_gpu.constructor_impl(v_size,this->base.toKernel());
477
478#endif
479 }
480
482 typedef size_t access_key;
483
490 void add()
491 {
493
494 if (v_size >= base.size())
495 {
497 size_t sz[1];
498 non_zero_one(sz,2*base.size());
499 base.resize(sz);
500 }
501
503 v_size++;
504
505#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
506
507 base_gpu.constructor_impl(v_size,this->base.toKernel());
508
509#endif
510 }
511
519 {
521
522 if (v_size >= base.size())
523 {
525 size_t sz[1];
526 non_zero_one(sz,2*base.size());
527 base.resize_no_device(sz);
528 }
529
531 v_size++;
532
533#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
534
535 base_gpu.constructor_impl(v_size,this->base.toKernel());
536
537#endif
538 }
539
548 void add(const T & v)
549 {
551
552 if (v_size >= base.size())
553 {
555 size_t sz[1];
556 non_zero_one(sz,2*base.size());
557 base.resize(sz);
558 }
559
561 base.set(v_size,v);
562
564 v_size++;
565
566#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
567
568 base_gpu.constructor_impl(v_size,this->base.toKernel());
569
570#endif
571 }
572
582 void add(const typename grid_base<1,T,Memory,typename layout_base<T>::type>::container & v)
583 {
585
586 if (v_size >= base.size())
587 {
589 size_t sz[1];
590 non_zero_one(sz,2*base.size());
591 base.resize(sz);
592 }
593
595 base.set(v_size,v);
596
598 v_size++;
599
600#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
601
602 base_gpu.constructor_impl(v_size,this->base.toKernel());
603
604#endif
605 }
606
612 template <typename M, typename gp> void add(const vector<T, M, layout_base,gp,OPENFPM_NATIVE> & v)
613 {
615 for (size_t i = 0 ; i < v.size() ; i++)
616 add(v.get(i));
617 }
618
654 template <template<typename,typename> class op, typename S, typename M, typename gp, unsigned int ...args>
656 const openfpm::vector<size_t> & opart)
657 {
658#ifdef SE_CLASS1
659
660 if (v.size() != opart.size())
661 std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be the same as o_part.size()" << opart.size() << std::endl;
662
663#endif
665 for (size_t i = 0 ; i < v.size() ; i++)
666 {
667#ifdef SE_CLASS1
668
669 if (opart.get(i) > size())
670 std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " try to access element " << opart.get(i) << " but the vector has size " << size() << std::endl;
671
672#endif
673 // write the object in the last element
674 object_s_di_op<op,decltype(v.get(i)),decltype(get(size()-1)),OBJ_ENCAP,args...>(v.get(i),get(opart.get(i)));
675 }
676 }
677
713 template <template<typename,typename> class op, typename S, typename M, typename gp, unsigned int ...args>
715 unsigned int start)
716 {
718 ::template run<S,M,gp,OPENFPM_NATIVE,layout_base,args...>(*this,v,start);
719 }
720
721
757 template <template<typename,typename> class op,
758 typename S,
759 typename M,
760 typename gp,
761 template <typename> class layout_base2,
762 typename vector_opart_type,
763 unsigned int ...args>
765 const vector_opart_type & opart)
766 {
767#ifdef SE_CLASS1
768
769 if (v.size() != opart.size())
770 std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be the same as o_part.size()" << opart.size() << std::endl;
771
772#endif
774 for (size_t i = 0 ; i < v.size() ; i++)
775 {
776#ifdef SE_CLASS1
777
778 if (i >= opart.size())
779 std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " try to access element " << opart.template get<0>(i) << " but the vector has size " << size() << std::endl;
780
781#endif
782 // write the object in the last element
783 object_s_di_op<op,decltype(v.get(i)),decltype(get(size()-1)),OBJ_ENCAP,args...>(v.get(i),get(opart.template get<0>(i)));
784 }
785 }
786
823 template <template<typename,typename> class op,
824 typename S,
825 typename M,
826 typename gp,
827 template <typename> class layout_base2,
828 typename vector_opart_type,
829 unsigned int ...args>
831 unsigned int offset,
832 const vector_opart_type & opart)
833 {
834 size_t i2 = 0;
835
836 for (size_t i = offset ; i < v.size() ; i++)
837 {
838 auto dst = v.get(opart.template get<0>(i2));
839 auto src = v.get(i);
840 copy_cpu_encap_encap_op_prp<op,decltype(v.get(0)),decltype(v.get(0)),args...> cp(src,dst);
841
842 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,sizeof...(args)> >(cp);
843 i2++;
844 }
845 }
846
884 template <template<typename,typename> class op,
885 typename S,
886 typename M,
887 typename gp,
888 template <typename> class layout_base2,
889 typename vector_opart_type,
890 unsigned int ...args>
892 const vector_opart_type & opart,
893 unsigned int start,
894 unsigned int stop)
895 {
896#ifdef SE_CLASS1
897
898 if (v.size() != stop - start)
899 std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be the same as stop - start" << stop - start << std::endl;
900
901#endif
902
903#ifdef __NVCC__
904
905 size_t sz[1] = {stop - start};
906 grid_sm<1,void> nm(sz);
907
908 auto ite = nm.getGPUIterator();
909
910 // write the object in the last element
911 CUDA_LAUNCH((merge_add_prp_device_impl_src_dst_opar_offset<op,
912 decltype(v.toKernel()),
913 decltype(this->toKernel()),
914 decltype(opart.toKernel()),
915 args...>),ite,v.toKernel(),this->toKernel(),opart.toKernel(),start);
916
917 // calculate
918#else
919 std::cout << __FILE__ << ":" << __LINE__ << " Error you have to compile map_vector.hpp with nvcc to make GPU code working" << std::endl;
920
921#endif
922 }
923
960 template <template<typename,typename> class op,
961 typename S,
962 typename M,
963 typename gp,
964 template <typename> class layout_base2,
965 typename vector_opart_type,
966 unsigned int ...args>
968 unsigned int start,
969 const vector_opart_type & opart)
970 {
971#ifdef SE_CLASS1
972
973 if (v.size() < opart.size() + start)
974 std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be snaller than o_part.size() + start " << opart.size() + start << std::endl;
975
976#endif
977
978#ifdef __NVCC__
979
980 auto ite = opart.getGPUIterator();
981
982 // write the object in the last element
983 CUDA_LAUNCH((merge_add_prp_device_impl_src_offset_dst_opar<op,
984 decltype(v.toKernel()),
985 decltype(this->toKernel()),
986 decltype(opart.toKernel()),
987 args... >),ite,v.toKernel(),this->toKernel(),opart.toKernel(),start);
988
989 // calculate
990#else
991 std::cout << __FILE__ << ":" << __LINE__ << " Error you have to compile map_vector.hpp with nvcc to make GPU code working" << std::endl;
992
993#endif
994 }
995
1031 template <template<typename,typename> class op,
1032 typename S,
1033 typename M,
1034 typename gp,
1035 template <typename> class layout_base2,
1036 unsigned int ...args>
1038 size_t start)
1039 {
1041 for (size_t i = 0 ; i < v.size() ; i++)
1042 {
1043#ifdef SE_CLASS1
1044
1045 if (start + i >= v_size)
1046 std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " try to access element " << start+i << " but the vector has size " << size() << std::endl;
1047
1048#endif
1049 // write the object in the last element
1050 object_s_di_op<op,decltype(v.get(0)),decltype(get(0)),OBJ_ENCAP,args...>(v.get(i),get(start+i));
1051 }
1052 }
1053
1068 template <typename S,
1069 typename M,
1070 typename gp,
1071 unsigned int impl,
1072 template <typename> class layout_base2,
1073 unsigned int ...args>
1075 {
1077 for (size_t i = 0 ; i < v.size() ; i++)
1078 {
1079 // Add a new element
1080 add();
1081
1082 // write the object in the last element
1083 object_s_di<decltype(v.get(i)),decltype(get(size()-1)),OBJ_ENCAP,args...>(v.get(i),get(size()-1));
1084 }
1085 }
1086
1101 template <typename S,
1102 typename M,
1103 typename gp,
1104 unsigned int impl,
1105 template <typename> class layout_base2,
1106 unsigned int ...args>
1108 {
1110 ::template run<S,M,gp,impl,layout_base2,args...>(*this,v);
1111 }
1112
1118 void insert(size_t key)
1119 {
1120 add();
1121
1122 long int d_k = (long int)size()-1;
1123 long int s_k = (long int)size()-2;
1124
1125 // keys
1126 while (s_k >= (long int)key)
1127 {
1128 set(d_k,get(s_k));
1129 d_k--;
1130 s_k--;
1131 }
1132 }
1133
1134
1140 void remove(size_t key)
1141 {
1142 size_t d_k = key;
1143 size_t s_k = key + 1;
1144
1145 // keys
1146 while (s_k < size())
1147 {
1148 set(d_k,get(s_k));
1149 d_k++;
1150 s_k++;
1151 }
1152
1153 // re-calculate the vector size
1154
1155 v_size--;
1156 }
1157
1166 void remove(openfpm::vector<size_t> & keys, size_t start = 0)
1167 {
1168 // Nothing to remove return
1169 if (keys.size() <= start )
1170 return;
1171
1172 size_t a_key = start;
1173 size_t d_k = keys.get(a_key);
1174 size_t s_k = keys.get(a_key) + 1;
1175
1176 // keys
1177 while (s_k < size())
1178 {
1179 // s_k should always point to a key that is not going to be deleted
1180 while (a_key+1 < keys.size() && s_k == keys.get(a_key+1))
1181 {
1182 a_key++;
1183 s_k = keys.get(a_key) + 1;
1184 }
1185
1186 // In case of overflow
1187 if (s_k >= size())
1188 break;
1189
1190 set(d_k,get(s_k));
1191 d_k++;
1192 s_k++;
1193 }
1194
1195 // re-calculate the vector size
1196
1197 v_size -= keys.size() - start;
1198 }
1199
1208 void remove(openfpm::vector<aggregate<int>> & keys, size_t start = 0)
1209 {
1210 // Nothing to remove return
1211 if (keys.size() <= start )
1212 return;
1213
1214 size_t a_key = start;
1215 size_t d_k = keys.template get<0>(a_key);
1216 size_t s_k = keys.template get<0>(a_key) + 1;
1217
1218 // keys
1219 while (s_k < size())
1220 {
1221 // s_k should always point to a key that is not going to be deleted
1222 while (a_key+1 < keys.size() && s_k == keys.template get<0>(a_key+1))
1223 {
1224 a_key++;
1225 s_k = keys.template get<0>(a_key) + 1;
1226 }
1227
1228 // In case of overflow
1229 if (s_k >= size())
1230 break;
1231
1232 set(d_k,get(s_k));
1233 d_k++;
1234 s_k++;
1235 }
1236
1237 // re-calculate the vector size
1238
1239 v_size -= keys.size() - start;
1240 }
1241
1253 template <unsigned int p>
1254 inline auto get(size_t id) const -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1255 {
1256#if defined(SE_CLASS1) && !defined(__NVCC__)
1257 check_overflow(id);
1258#endif
1259 grid_key_dx<1> key(id);
1260
1261
1262 return base.template get<p>(key);
1263 }
1264
1270 bool isSubset() const
1271 {
1272 return false;
1273 }
1274
1284 inline auto get(size_t id) -> decltype(base.get_o(grid_key_dx<1>(id)))
1285 {
1286#if defined(SE_CLASS1) && !defined(__NVCC__)
1287 check_overflow(id);
1288#endif
1289 grid_key_dx<1> key(id);
1290
1291 return base.get_o(key);
1292 }
1293
1306 inline const typename grid_base<1,T,Memory,typename layout_base<T>::type>::container get_o(size_t id) const
1307 {
1308#if defined(SE_CLASS1) && !defined(__NVCC__)
1309 check_overflow(id);
1310#endif
1311 grid_key_dx<1> key(id);
1312
1313 return base.get_o(key);
1314 }
1315
1321 template<unsigned int id> void fill(unsigned char c)
1322 {
1323 base.template fill<id>(c);
1324 }
1325
1332 template<unsigned int id> void * getDeviceBufferCopy()
1333 {
1334 return base.template getDeviceBuffer<id>();
1335 }
1336
1345 template<unsigned int id> void * getDeviceBuffer()
1346 {
1347 return base.template getDeviceBuffer<id>();
1348 }
1349
1350
1357 {
1358 grid_key_dx<1> key(size()-1);
1359
1360 return base.get_o(key);
1361 } //remove device host
1372 template <unsigned int p>
1373 inline auto getProp(const unsigned int & id) -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1374 { //uncomment this
1375 return this->template get<p>(id);
1376 }//remove host device
1387 template <unsigned int p,typename KeyType>
1388 inline auto getProp(const KeyType & id) -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1389 {
1390 //uncomment this
1391 return this->template get<p>(id.getKey());
1392 }
1393 //remove device host
1404 template <unsigned int p, typename keyType>
1405 inline auto getProp(const keyType & id) const -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1406 { //uncomment this
1407 return this->template get<p>(id.getKey());
1408 }
1409
1421 template <unsigned int p>
1422 inline auto get(size_t id) -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1423 {
1424#if defined(SE_CLASS1) && !defined(__NVCC__)
1425 check_overflow(id);
1426#endif
1427 grid_key_dx<1> key(id);
1428
1429 return base.template get<p>(key);
1430 }
1431
1441 inline auto get(size_t id) const -> const decltype(base.get_o(grid_key_dx<1>(id)))
1442 {
1443#ifdef SE_CLASS2
1444 check_valid(this,8);
1445#endif
1446#if defined(SE_CLASS1) && !defined(__NVCC__)
1447 check_overflow(id);
1448#endif
1449 grid_key_dx<1> key(id);
1450
1451 return base.get_o(key);
1452 }
1453
1461 {
1462 grid_key_dx<1> key(size()-1);
1463
1464 return base.get_o(key);
1465 }
1466
1468 ~vector() THROW
1469 {
1470 // Eliminate the pointer
1471 }
1472
1479 {
1481
1482 dup.v_size = v_size;
1483 dup.base.swap(base.duplicate());
1484
1485#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1486
1487 dup.base_gpu.constructor_impl(v_size,dup.base.toKernel());
1488
1489#endif
1490
1491 copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false>::copy(dup,*this);
1492
1493 return dup;
1494 }
1495
1502 :v_size(0)
1503 {
1504 swap(v);
1505
1506#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1507
1508 base_gpu.constructor_impl(v_size,this->base.toKernel());
1509
1510#endif
1511 }
1512
1519 :v_size(0)
1520 {
1521 swap(v.duplicate());
1522
1523#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1524
1525 base_gpu.constructor_impl(v_size,this->base.toKernel());
1526
1527#endif
1528 }
1529
1531 vector() THROW
1532 :v_size(0),base(0)
1533 {
1534 base.setMemory();
1535
1536#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1537
1538 base_gpu.constructor_impl(v_size,this->base.toKernel());
1539
1540#endif
1541 }
1542
1544 vector(size_t sz) THROW
1545 :v_size(sz),base(sz)
1546 {
1547 base.setMemory();
1548
1549#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1550
1551 base_gpu.constructor_impl(v_size,this->base.toKernel());
1552
1553#endif
1554 }
1555
1562 void set(size_t id, const typename grid_base<1,T,Memory,typename layout_base<T>::type>::container & obj)
1563 {
1564#ifdef SE_CLASS1
1565 check_overflow(id);
1566#endif
1568 base.set(id,obj);
1569 }
1570
1586 template <typename encap_S, unsigned int ...args> void set_o(size_t i, const encap_S & obj)
1587 {
1588 // write the object in the last element
1589 object_s_di<encap_S,decltype(get(i)),OBJ_ENCAP,args...>(obj,get(i));
1590 }
1591
1598 void set(size_t id, const T & obj)
1599 {
1600#ifdef SE_CLASS1
1601 check_overflow(id);
1602#endif
1604 base.set(id,obj);
1605 }
1606
1615 {
1616#ifdef SE_CLASS1
1617 check_overflow(id);
1618#endif
1619 base.set(id,v.base,src);
1620 }
1621
1622 template<typename key_type>
1623 key_type getOriginKey(key_type vec_key)
1624 {
1625 return vec_key;
1626 }
1627
1628
1639 {
1640 v_size = mv.v_size;
1641 base.swap(mv.base);
1642
1643#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1644
1645 base_gpu.constructor_impl(v_size,this->base.toKernel());
1646
1647#endif
1648
1649 return *this;
1650 }
1651
1662 {
1663 v_size = mv.v_size;
1664 size_t rsz[1] = {v_size};
1665 if(rsz[0]>base.size()) {
1666 base.resize(rsz);
1667 }
1668 // copy the object on cpu
1669 for (size_t i = 0 ; i < v_size ; i++ )
1670 {
1671 grid_key_dx<1> key(i);
1672 base.set(key,mv.base,key);
1673 }
1674
1675#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1676
1677 base_gpu.constructor_impl(v_size,this->base.toKernel());
1678
1679#endif
1680
1681 copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false>::copy2(*this,mv);
1682
1683 return *this;
1684 }
1685
1696 {
1697 v_size = mv.v_size;
1698 base.swap(mv.base);
1699
1700#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1701
1702 base_gpu.constructor_impl(v_size,this->base.toKernel());
1703
1704#endif
1705
1706 return *this;
1707 }
1708
1719 {
1720 v_size = mv.getInternal_v_size();
1721 size_t rsz[1] = {v_size};
1722 base.resize(rsz);
1723
1724 // copy the object
1725 for (size_t i = 0 ; i < v_size ; i++ )
1726 {
1727 grid_key_dx<1> key(i);
1728 base.set(key,mv.getInternal_base(),key);
1729 }
1730
1731#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1732
1733 base_gpu.constructor_impl(v_size,this->base.toKernel());
1734
1735#endif
1736
1737 copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false && Mem::isDeviceHostSame() == false>::copy2(*this,mv);
1738
1739 return *this;
1740 }
1741
1751 template<typename Mem, template <typename> class layout_base2>
1753 {
1754 v_size = mv.v_size;
1755 base.swap(mv.base);
1756
1757#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1758
1759 base_gpu.constructor_impl(v_size,this->base.toKernel());
1760
1761#endif
1762
1763 return *this;
1764 }
1765
1775 template<typename Mem,
1776 template <typename> class layout_base2,
1777 typename check = typename std::enable_if<!std::is_same<typename layout_base2<T>::type,typename layout_base<T>::type>::value >::type>
1780 {
1781 v_size = mv.getInternal_v_size();
1782 size_t rsz[1] = {v_size};
1783 base.resize(rsz);
1784
1785 // copy the object
1786 for (size_t i = 0 ; i < v_size ; i++ )
1787 {
1788 grid_key_dx<1> key(i);
1789 base.set_general(key,mv.getInternal_base(),key);
1790 }
1791
1792#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1793
1794 base_gpu.constructor_impl(v_size,this->base.toKernel());
1795
1796#endif
1797
1798 copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false && Mem::isDeviceHostSame() == false>::copy2(*this,mv);
1799
1800 return *this;
1801 }
1802
1809 {
1810 return !this->operator==(v);
1811 }
1812
1819 {
1820 if (v_size != v.v_size)
1821 return false;
1822
1823 // check object by object
1824 for (size_t i = 0 ; i < v_size ; i++ )
1825 {
1826 grid_key_dx<1> key(i);
1827
1828 if (base.get_o(key) != v.base.get_o(key))
1829 return false;
1830 }
1831
1832 return true;
1833 }
1834
1844 {
1845 size_t sz_sp = v_size;
1846
1847 // swap the v_size
1848 v_size = v.v_size;
1849
1850 base.swap_nomode(v.base);
1851
1852#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1853
1854 base_gpu.constructor_impl(v_size,this->base.toKernel());
1855
1856#endif
1857
1858 v.v_size = sz_sp;
1859 }
1860
1867 {
1868 size_t sz_sp = v_size;
1869
1870 // swap the v_size
1871 v_size = v.v_size;
1872
1873 base.swap(v.base);
1874 v.v_size = sz_sp;
1875
1876
1877#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1878
1879 base_gpu.constructor_impl(v_size,this->base.toKernel());
1880 v.base_gpu.constructor_impl(v.v_size,v.base.toKernel());
1881
1882#endif
1883 }
1884
1891 {
1892 size_t sz_sp = v_size;
1893
1894 // swap the v_size
1895 v_size = v.v_size;
1896
1897 base.swap(v.base);
1898 v.v_size = sz_sp;
1899
1900#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1901
1902 base_gpu.constructor_impl(v_size,this->base.toKernel());
1903 v.base_gpu.constructor_impl(v.v_size,v.base.toKernel());
1904
1905#endif
1906 }
1907
1916 {
1917 return vector_key_iterator(v_size,start);
1918 }
1919
1930 {
1931 return vector_key_iterator(stop,0);
1932 }
1933
1934#ifdef CUDA_GPU
1935
1940 ite_gpu<1> getGPUIteratorTo(long int stop, size_t n_thr = default_kernel_wg_threads_) const
1941 {
1942 grid_key_dx<1,long int> start(0);
1943 grid_key_dx<1,long int> stop_(stop);
1944
1945 return base.getGPUIterator(start,stop_,n_thr);
1946 }
1947
1948
1949#endif
1950
1959 {
1960#ifdef SE_CLASS2
1961 check_valid(this,8);
1962#endif
1963 return getIterator();
1964 }
1965
1974 {
1975 return vector_key_iterator(v_size);
1976 }
1977
1984 template<unsigned int p>
1986 {
1987#ifdef SE_CLASS2
1988 check_valid(this,8);
1989#endif
1991 }
1992
1993#ifdef CUDA_GPU
1994
1999 ite_gpu<1> getGPUIterator(size_t n_thr = default_kernel_wg_threads_) const
2000 {
2001 grid_key_dx<1> start(0);
2002 grid_key_dx<1> stop(size()-1);
2003
2004 return base.getGPUIterator(start,stop,n_thr);
2005 }
2010 ite_gpu<1> getDomainIteratorGPU(size_t n_thr = default_kernel_wg_threads_) const
2011 {
2012 return getGPUIterator(n_thr);
2013 }
2014
2015#endif
2023 {
2024 return base.packObjectSize();
2025 }
2026
2034 size_t packObject(void * mem)
2035 {
2036 return base.packObject(mem);
2037 }
2038
2049 template<int ... prp> static inline size_t calculateMem(size_t n, size_t e)
2050 {
2051 if (n == 0)
2052 {
2053 return 0;
2054 }
2055 else
2056 {
2057 if (sizeof...(prp) == 0)
2058 return grow_p::grow(0,n) * sizeof(typename T::type);
2059
2060 typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
2061
2062 return grow_p::grow(0,n) * sizeof(prp_object);
2063 }
2064 }
2065
2076 template<int ... prp> static inline size_t packMem(size_t n, size_t e)
2077 {
2078 if (sizeof...(prp) == 0)
2079 return n * sizeof(typename T::type);
2080
2081 typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
2082
2083 return n * sizeof(prp_object);
2084 }
2085
2093 inline static size_t calculateNMem(size_t n)
2094 {
2095 return 1;
2096 }
2097
2105 template<unsigned int p>
2106 auto getMemory() -> decltype(base.template getMemory<p>())
2107 {
2108 return base.template getMemory<p>();
2109 }
2110
2116 template<unsigned int p = 0> void setMemory(Memory & mem)
2117 {
2118 base.template setMemory<p>(mem);
2119
2120#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2121
2122 base_gpu.constructor_impl(v_size,this->base.toKernel());
2123
2124#endif
2125 }
2126
2132 void setMemoryArray(Memory * mem)
2133 {
2134 base.setMemoryArray(mem);
2135
2136#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2137
2138 base_gpu.constructor_impl(v_size,this->base.toKernel());
2139
2140#endif
2141 }
2142
2150 template<unsigned int p = 0> void * getPointer()
2151 {
2152 return base.template getPointer<p>();
2153 }
2154
2160 template<unsigned int p = 0> const void * getPointer() const
2161 {
2162 return base.getPointer();
2163 }
2164
2170 static bool noPointers()
2171 {
2172 return false;
2173 }
2174
2180 const size_t & getInternal_v_size() const
2181 {
2182 return v_size;
2183 }
2184
2191 {
2192 return base;
2193 }
2194
2199 template<unsigned int ... prp> void hostToDevice()
2200 {
2201 base.template hostToDevice<prp ...>();
2202 }
2203
2208 template<unsigned int ... prp> void deviceToHost()
2209 {
2210 base.template deviceToHost<prp ...>();
2211 }
2212
2213
2218 template<unsigned int ... prp> void deviceToHost(size_t start, size_t stop)
2219 {
2220 base.template deviceToHost<prp ...>(start,stop);
2221 }
2222
2227 template<unsigned int ... prp> void hostToDevice(size_t start, size_t stop)
2228 {
2229 base.template hostToDevice<prp ...>(start,stop);
2230 }
2231
2236 template<unsigned int ... prp> void hostToDeviceNUMA(size_t start, size_t stop)
2237 {
2238 base.template hostToDeviceNUMA<prp ...>(start,stop);
2239 }
2240
2245 template<unsigned int ... prp> void hostToDeviceNUMA()
2246 {
2247 base.template hostToDeviceNUMA<prp ...>();
2248 }
2249
2250#if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2251
2253
2262 {
2263// base_gpu.constructor_impl(v_size,this->base.toKernel());
2264
2266 }
2267
2275 inline const vector_gpu_ker_ref<typename apply_transform<layout_base,T>::type,layout_base> toKernel() const
2276 {
2277// base_gpu.constructor_impl(v_size,this->base.toKernel());
2278
2279 return vector_gpu_ker_ref<typename apply_transform<layout_base,T>::type,layout_base>(base_gpu);
2280 }
2281
2282#else
2283
2292 {
2293 vector_gpu_ker<typename apply_transform<layout_base,T>::type,layout_base> v(v_size,this->base.toKernel());
2294
2295 return v;
2296 }
2297
2306 {
2307 if (base.size() == 0)
2308 {std::cout << __FILE__ << ":" << __LINE__ << " Warning you are off-loading with toGPU a vector that seem to be empty or not initialized" << std::endl; }
2309
2310 vector_gpu_ker<typename apply_transform<layout_base,T>::type,layout_base> v(v_size,this->base.toKernel());
2311
2312 return v;
2313 }
2314
2315#endif
2316
2324 template<unsigned int ... prps>
2325 const std::string toString(std::string prefix = std::string())
2326 {
2327 std::stringstream ss;
2328 auto it = getIterator();
2329
2330 while (it.isNext())
2331 {
2332 auto p = it.get();
2333
2334 ss << prefix;
2335
2336 ss << prefix << " element[" << p << "]" << " ";
2337
2338 vector_printer<self_type,prps ...> vp(*this,p,ss);
2339 boost::mpl::for_each_ref<boost::mpl::range_c<int,0,sizeof...(prps)>>(vp);
2340
2341 ss << std::endl;
2342
2343 ++it;
2344 }
2345
2346 return ss.str();
2347 }
2348
2349 void * internal_get_size_pointer() {return &v_size;}
2350
2351 void print_size()
2352 {
2353#ifndef DISABLE_ALL_RTTI
2354 std::cout << "the size of: " << demangle(typeid(self_type).name()) << " is " << sizeof(self_type) << std::endl;
2355 std::cout << " " << demangle(typeid(decltype(v_size)).name()) << ":" << sizeof(decltype(v_size)) << std::endl;
2356 std::cout << " " << demangle(typeid(decltype(base)).name()) << ":" << sizeof(decltype(base)) << std::endl;
2357#endif
2358 }
2359
2360 };
2361
2362 template <typename T> using vector_std = vector<T, HeapMemory, memory_traits_lin, openfpm::grow_policy_double, STD_VECTOR>;
2363 template<typename T> using vector_gpu = openfpm::vector<T,CudaMemory,memory_traits_inte>;
2364 template<typename T> using vector_gpu_lin = openfpm::vector<T,CudaMemory,memory_traits_lin>;
2365 template<typename T> using vector_gpu_single = openfpm::vector<T,CudaMemory,memory_traits_inte,openfpm::grow_policy_identity>;
2366 template<typename T> using vector_custd = vector<T, CudaMemory, memory_traits_inte, openfpm::grow_policy_double, STD_VECTOR>;
2367}
2368
2369#endif
grid_key_dx is the key to access any element in the grid
Definition grid_key.hpp:19
Declaration grid_sm.
Definition grid_sm.hpp:167
Implementation of 1-D std::vector like structure.
void remove(openfpm::vector< size_t > &keys, size_t start=0)
Remove several entries from the vector.
static bool noPointers()
This class has pointer inside.
void hostToDevice()
Copy the memory from host to device.
size_t size_local() const
Return the size of the vector.
void set_o(size_t i, const encap_S &obj)
It set an element of the vector from a object that is a subset of the vector properties.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, unsigned int offset, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
void swap(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&v)
Swap the memory with another vector.
void add(const T &v)
It insert a new object on the vector, eventually it reallocate the grid.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, size_t start)
It merge the elements of a source vector to this vector.
void non_zero_one(size_t sz[1], size_t arg)
If the argument is zero return 1 otherwise return the argument.
void add(const typename grid_base< 1, T, Memory, typename layout_base< T >::type >::container &v)
It insert a new object on the vector, eventually it reallocate the vector.
vector_key_iterator_ele< p, self_type > getIteratorElements() const
Get the vector elements iterator.
void deviceToHost(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
bool operator==(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) const
Check that two vectors are not equal.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Mem, layout_base, gp, OPENFPM_NATIVE > &mv)
Assignment operator.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&mv)
Assignment operator.
void setMemory(Memory &mem)
Set the memory of the base structure using an object.
const void * getPointer() const
Return the pointer that store the data.
void remove(openfpm::vector< aggregate< int > > &keys, size_t start=0)
Remove several entries from the vector.
const std::string toString(std::string prefix=std::string())
vector(vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&v)
Constructor from another temporal vector.
void setMemoryArray(Memory *mem)
Set the memory of the base structure using an object.
void hostToDevice(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
const grid_base< 1, T, Memory, layout_type > & getInternal_base() const
Internal function.
void add_no_device()
It insert a new emtpy object on the vector, eventually it reallocate the grid.
void remove(size_t key)
Remove one entry from the vector.
void * getDeviceBufferCopy()
It return the properties arrays.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &mv)
Assignment operator.
static size_t calculateNMem(size_t n)
How many allocation are required to create n-elements.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Mem, layout_base2, grow_p, OPENFPM_NATIVE > &mv)
Assignment operator.
vector_gpu_ker< typename apply_transform< layout_base, T >::type, layout_base > toKernel()
Convert the grid into a data-structure compatible for computing into GPU.
auto getProp(const unsigned int &id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
static size_t calculateMem(size_t n, size_t e)
Calculate the memory size required to allocate n elements.
void swap_nomode(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v)
Swap the memory with another vector.
void swap(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v)
Swap the memory with another vector.
vector(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) THROW
Constructor from another constant vector.
const size_t & getInternal_v_size() const
Internal function.
static size_t packMem(size_t n, size_t e)
Calculate the memory size required to pack n elements.
size_t packObjectSize()
Return the size of the message needed to pack this object.
layout_base< T >::type layout_type
Type of the encapsulation memory parameter.
void set(size_t id, const T &obj)
Set the object id to obj.
vector_key_iterator getIteratorTo(size_t stop) const
Get iterator over the particles from 0 until a particular index.
void insert(size_t key)
Insert an entry in the vector.
vector(size_t sz) THROW
Constructor, vector of size sz.
size_t capacity()
return the maximum capacity of the vector before reallocation
void * getPointer()
Return the pointer that store the data.
void merge_prp(const vector< S, M, layout_base, gp, OPENFPM_NATIVE > &v, const openfpm::vector< size_t > &opart)
It merge the elements of a source vector to this vector.
auto get(size_t id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
void add()
It insert a new emtpy object on the vector, eventually it reallocate the grid.
void set(size_t id, const typename grid_base< 1, T, Memory, typename layout_base< T >::type >::container &obj)
Set the object id to obj.
vector< T, Memory, layout_base2, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Mem, layout_base2, grow_p, OPENFPM_NATIVE > &&mv)
Assignment operator.
const vector_gpu_ker< typename apply_transform< layout_base, T >::type, layout_base > toKernel() const
Convert the grid into a data-structure compatible for computing into GPU.
vector_key_iterator getIterator() const
Get the vector elements iterator.
const grid_base< 1, T, Memory, layout_type >::container last() const
Get the last element of the vector.
void reserve(size_t sp)
Reserve slots in the vector to avoid reallocation.
grid_base< 1, T, Memory, typenamelayout_base< T >::type >::container last()
Get the last element of the vector.
vector_key_iterator iterator_key
iterator for the vector
bool isSubset() const
Indicate that this class is not a subset.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > duplicate() const
It duplicate the vector.
void resize(size_t slot, size_t opt=DATA_ON_DEVICE|DATA_ON_HOST, unsigned int blockSize=1)
Resize the vector.
vector_key_iterator getDomainIterator() const
Get the vector elements iterator.
auto get(size_t id) const -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
size_t packObject(void *mem)
Pack the object into the given pointer.
auto getProp(const KeyType &id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
auto get(size_t id) const -> const decltype(base.get_o(grid_key_dx< 1 >(id)))
Get an element of the vector.
grid_base< 1, T, Memory, typename layout_base< T >::type > base
1-D static grid
auto getProp(const keyType &id) const -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
void hostToDeviceNUMA()
Synchronize the memory buffer in the device with the memory in the host respecing NUMA domains.
grid_base< 1, T, Memory, typenamelayout_base< T >::type >::container container
Object container for T, it is the return type of get_o it return a object type trough.
void add_prp_device(const vector< S, M, layout_base2, gp, impl > &v)
It add the element of a source vector to this vector.
void merge_prp_device(const vector< S, M, layout_base, gp, OPENFPM_NATIVE > &v, unsigned int start)
It merge the elements of a source vector to this vector (on device)
void fill(unsigned char c)
Fill the buffer with a byte.
auto getMemory() -> decltype(base.template getMemory< p >())
Return the memory object.
void add(const vector< T, M, layout_base, gp, OPENFPM_NATIVE > &v)
It add the element of another vector to this vector.
layout_base< T > layout_base_
Type of the encapsulation memory parameter.
void add_prp(const vector< S, M, layout_base2, gp, impl > &v)
It add the element of a source vector to this vector.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Mem, layout_base, gp, OPENFPM_NATIVE > &&mv)
Assignment operator.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
vector_key_iterator getIteratorFrom(size_t start) const
Get iterator over the particles from a particular index.
const grid_base< 1, T, Memory, typenamelayout_base< T >::type >::container get_o(size_t id) const
Get an element of the vector.
void merge_prp_v_device(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, unsigned int start, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
void hostToDeviceNUMA(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host respecting NUMA domains.
void merge_prp_v_device(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, const vector_opart_type &opart, unsigned int start, unsigned int stop)
It merge the elements of a source vector to this vector.
size_t size() const
Return the size of the vector.
void set(size_t id, vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v, size_t src)
Set the element of the vector v from another element of another vector.
auto get(size_t id) -> decltype(base.get_o(grid_key_dx< 1 >(id)))
Get an element of the vector.
bool operator!=(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) const
Check that two vectors are equal.
void deviceToHost()
Synchronize the memory buffer in the device with the memory in the host.
Implementation of 1-D std::vector like structure.
size_t size()
Stub size.
convert a type into constant type
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
It copy two encap object.
Definition Encap.hpp:276
It create a boost::fusion vector with the selected properties.
It copy the properties from one object to another applying an operation.
It copy the properties from one object to another.
grid interface available when on gpu
grid interface available when on gpu
this class is a functor for "for_each" algorithm