OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
map_vector.hpp
1 /*
2  * map_vector.hpp
3  *
4  * Created on: Aug 30, 2014
5  * Author: Pietro Incardona
6  */
7 
8 #ifndef MAP_VECTOR_HPP
9 #define MAP_VECTOR_HPP
10 
11 #include "util/cuda_launch.hpp"
12 #include <iostream>
13 #include <typeinfo>
14 #include "util/common.hpp"
15 #include "memory/PtrMemory.hpp"
16 #include "util/object_util.hpp"
17 #include "Grid/util.hpp"
18 #include "Vector/util.hpp"
19 #include "Vector/map_vector_grow_p.hpp"
20 #include "memory/ExtPreAlloc.hpp"
21 #include "util/util_debug.hpp"
22 #include "util/Pack_stat.hpp"
23 #include "Grid/map_grid.hpp"
24 #include "memory/HeapMemory.hpp"
25 #include "vect_isel.hpp"
26 #include "util/object_s_di.hpp"
27 #include "util.hpp"
28 #include "util/Pack_stat.hpp"
29 #include "memory/ExtPreAlloc.hpp"
30 #include <string.h>
31 #include "Packer_Unpacker/Unpacker.hpp"
32 #include "Packer_Unpacker/Packer.hpp"
33 #include <fstream>
34 #include "Packer_Unpacker/Packer_util.hpp"
35 #include "Packer_Unpacker/has_pack_agg.hpp"
36 #include "timer.hpp"
37 #include "map_vector_std_util.hpp"
38 #include "data_type/aggregate.hpp"
39 #include "vector_map_iterator.hpp"
40 #include "util/cuda_util.hpp"
41 #include "cuda/map_vector_cuda_ker.cuh"
42 #include "map_vector_printers.hpp"
43 
44 namespace openfpm
45 {
46 
47  template<bool active>
49  {
50  template<typename vector_type1, typename vector_type2>
51  static void copy(vector_type1 & v1, vector_type2 & v2)
52  {
53 
54  }
55 
56  template<typename vector_type1, typename vector_type2>
57  static void copy2(vector_type1 & v1, vector_type2 & v2)
58  {
59 
60  }
61  };
62 
63  template<>
65  {
66  template<typename vector_type1, typename vector_type2>
67  static void copy(vector_type1 & v1, vector_type2 & v2)
68  {
69 #ifdef __NVCC__
70  if (v1.size() != 0)
71  {
72  auto it = v1.getGPUIterator();
73  CUDA_LAUNCH(copy_two_vectors,it,v1.toKernel(),v2.toKernel());
74  }
75 #endif
76  }
77 
78  template<typename vector_type1, typename vector_type2>
79  static void copy2(vector_type1 & v1, vector_type2 & v2)
80  {
81 #ifdef __NVCC__
82  if (v2.size() != 0)
83  {
84  auto it = v1.getGPUIterator();
85  CUDA_LAUNCH(copy_two_vectors,it,v1.toKernel(),v2.toKernel());
86  }
87 #endif
88  }
89  };
90 
91  template<bool is_ok_cuda,typename T, typename Memory,
92  template<typename> class layout_base,
93  typename grow_p>
95  {
96  template <typename S,
97  typename M,
98  typename gp,
99  unsigned int impl,
100  template <typename> class layout_base2,
101  unsigned int ...args>
103  {
104  std::cout << __FILE__ << ":" << __LINE__ << " Error the function add_prp_device only work with cuda enabled vector" << std::endl;
105  }
106  };
107 
108  template<bool is_ok_cuda,typename T, typename Memory,
109  template<typename> class layout_base,
110  typename grow_p>
112  {
113  template <typename S,
114  typename M,
115  typename gp,
116  unsigned int impl,
117  template <typename> class layout_base2,
118  unsigned int ...args>
119  static void run(openfpm::vector<T,Memory,layout_base,grow_p,impl> & this_ ,
121  unsigned int offset)
122  {
123  std::cout << __FILE__ << ":" << __LINE__ << " Error the function merge_prp_device only work with cuda enabled vector" << std::endl;
124  }
125  };
126 
127  template<typename T, typename Memory,
128  template<typename> class layout_base,
129  typename grow_p>
130  struct add_prp_device_impl<true,T,Memory,layout_base,grow_p>
131  {
132  template <typename S,
133  typename M,
134  typename gp,
135  unsigned int impl,
136  template <typename> class layout_base2,
137  unsigned int ...args>
139  {
140  // merge the data on device
141 
142  #if defined(CUDA_GPU) && defined(__NVCC__)
143 
144  size_t old_sz = this_.size();
145  this_.resize(this_.size() + v.size(),DATA_ON_DEVICE);
147  auto ite = v.getGPUIterator();
148 
149  CUDA_LAUNCH((merge_add_prp_device_impl<decltype(v.toKernel()),decltype(this_.toKernel()),args...>),ite,v.toKernel(),this_.toKernel(),(unsigned int)old_sz);
151  #else
152  std::cout << __FILE__ << ":" << __LINE__ << " Error the function add_prp_device only work when map_vector is compiled with nvcc" << std::endl;
153  #endif
154  }
155  };
156 
157  template<typename T, typename Memory,
158  template<typename> class layout_base,
159  typename grow_p>
160  struct merge_prp_device_impl<true,T,Memory,layout_base,grow_p>
161  {
162  template <typename S,
163  typename M,
164  typename gp,
165  unsigned int impl,
166  template <typename> class layout_base2,
167  unsigned int ...args>
168  static void run(vector<T,Memory,layout_base,grow_p,impl> & this_ ,
170  unsigned int offset)
171  {
172  // merge the data on device
173 
174  #if defined(CUDA_GPU) && defined(__NVCC__)
175 
176  auto ite = v.getGPUIterator();
178  CUDA_LAUNCH((merge_add_prp_device_impl<decltype(v.toKernel()),decltype(this_.toKernel()),args...>),ite,v.toKernel(),this_.toKernel(),(unsigned int)offset);
179 
180  #else
181  std::cout << __FILE__ << ":" << __LINE__ << " Error the function merge_prp_device only work when map_vector is compiled with nvcc" << std::endl;
182  #endif
183  }
184  };
185 
201  template<typename T, typename Memory, template<typename> class layout_base, typename grow_p, unsigned int impl>
202  class vector
203  {
211  size_t size()
212  {
213  std::cerr << __FILE__ << ":" << __LINE__ << " Error stub vector created" << std::endl;
214  return 0;
215  }
216  };
217 
218  #include "map_vector_std.hpp"
219  #include "map_vector_std_ptr.hpp"
220 
221 #ifdef CUDA_GPU
222  #include "cuda/map_vector_std_cuda.hpp"
223 #endif
224 
240  template<typename T,typename Memory, template <typename> class layout_base, typename grow_p>
241  class vector<T,Memory,layout_base,grow_p,OPENFPM_NATIVE>
242  {
244 
248  size_t v_size;
249 
252 
259  void non_zero_one(size_t sz[1], size_t arg)
260  {
261  if (arg == 0)
262  {sz[0] = 1;}
263  else
264  {sz[0] = arg;}
265  }
266 
267 #ifdef SE_CLASS1
268 
275  void check_overflow(size_t id) const
276  {
277  if (id >= v_size)
278  {
279  std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " overflow id: " << id << "\n";
280  ACTION_ON_ERROR(VECTOR_ERROR_OBJECT);
281  }
282  }
283 
284 #endif
285 
286  public:
289  typedef int yes_i_am_vector;
293 
295  typedef typename layout_base<T>::type layout_type;
296 
298  typedef layout_base<T> layout_base_;
299 
302 
304  // you can access all the properties of T
308  typedef T value_type;
309 
311  typedef Memory Memory_type;
314  typedef grow_p grow_policy;
316  template<typename Tobj>
317  struct layout_base__
318  {
319  typedef layout_base<Tobj> type;
320  };
321 
322  // Implementation of packer and unpacker for vector
323 #include "vector_pack_unpack.ipp"
324 
330  size_t size() const
331  {
332  return v_size;
333  }
334 
340  size_t size_local() const
341  {
342  return v_size;
343  }
344 
351  size_t capacity()
352  {
353  return base.size();
354  }
355 
364  void reserve(size_t sp)
365  {
366  if (sp > base.size())
367  {
369  size_t sz[1] = {sp};
370  base.resize(sz);
371  }
372 
373 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
374 
375  base_gpu.constructor_impl(v_size,this->base.toKernel());
376 
377 #endif
378  }
379 
385  void clear()
386  {
387  resize(0);
388  }
389 
396  {
397  size_t sz[1] = {size()};
398  base.resize(sz);
399 
400 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
401 
402  base_gpu.constructor_impl(v_size,this->base.toKernel());
403 
404 #endif
405  }
419  void resize(size_t slot, size_t opt = DATA_ON_DEVICE | DATA_ON_HOST, unsigned int blockSize = 1)
420  {
421  // If we need more space than what we allocated, allocate new memory
423  if (slot > base.size())
424  {
425  size_t gr = slot;
426  // If you increase by one we smartly resize the internal capacity more than 1
427  // This is to make faster patterns like resize(size()+1)
428  if (slot - base.size() == 1 && opt && (opt & EXACT_RESIZE) == 0)
429  {
430  gr = grow_p::grow(base.size(),slot);
431  }
434  size_t sz[1] = {gr};
435 
436  base.resize(sz,opt,blockSize);
437  }
438 
439  // update the vector size
440  v_size = slot;
441 
442 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
443 
444  base_gpu.constructor_impl(v_size,this->base.toKernel());
445 
446 #endif
447  }
448 
458  void resize_no_device(size_t slot)
459  {
460  // If we need more space than what we allocated, allocate new memory
461 
462  if (slot > base.size())
463  {
464  size_t gr = grow_p::grow(base.size(),slot);
465 
467  size_t sz[1] = {gr};
468  base.resize_no_device(sz);
469  }
470 
471  // update the vector size
472  v_size = slot;
473 
474 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
475 
476  base_gpu.constructor_impl(v_size,this->base.toKernel());
477 
478 #endif
479  }
482  typedef size_t access_key;
483 
490  void add()
491  {
493 
494  if (v_size >= base.size())
495  {
497  size_t sz[1];
498  non_zero_one(sz,2*base.size());
499  base.resize(sz);
500  }
501 
503  v_size++;
504 
505 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
506 
507  base_gpu.constructor_impl(v_size,this->base.toKernel());
508 
509 #endif
510  }
519  {
521 
522  if (v_size >= base.size())
523  {
525  size_t sz[1];
526  non_zero_one(sz,2*base.size());
527  base.resize_no_device(sz);
528  }
529 
531  v_size++;
532 
533 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
534 
535  base_gpu.constructor_impl(v_size,this->base.toKernel());
536 
537 #endif
538  }
539 
548  void add(const T & v)
549  {
551 
552  if (v_size >= base.size())
553  {
555  size_t sz[1];
556  non_zero_one(sz,2*base.size());
557  base.resize(sz);
558  }
559 
561  base.set(v_size,v);
562 
564  v_size++;
566 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
567 
568  base_gpu.constructor_impl(v_size,this->base.toKernel());
569 
570 #endif
571  }
572 
582  void add(const typename grid_base<1,T,Memory,typename layout_base<T>::type>::container & v)
583  {
585 
586  if (v_size >= base.size())
587  {
589  size_t sz[1];
590  non_zero_one(sz,2*base.size());
591  base.resize(sz);
592  }
593 
595  base.set(v_size,v);
598  v_size++;
599 
600 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
601 
602  base_gpu.constructor_impl(v_size,this->base.toKernel());
603 
604 #endif
605  }
612  template <typename M, typename gp> void add(const vector<T, M, layout_base,gp,OPENFPM_NATIVE> & v)
613  {
615  for (size_t i = 0 ; i < v.size() ; i++)
616  add(v.get(i));
617  }
654  template <template<typename,typename> class op, typename S, typename M, typename gp, unsigned int ...args>
656  const openfpm::vector<size_t> & opart)
657  {
658 #ifdef SE_CLASS1
659 
660  if (v.size() != opart.size())
661  std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be the same as o_part.size()" << opart.size() << std::endl;
662 
663 #endif
664  for (size_t i = 0 ; i < v.size() ; i++)
666  {
667 #ifdef SE_CLASS1
668 
669  if (opart.get(i) > size())
670  std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " try to access element " << opart.get(i) << " but the vector has size " << size() << std::endl;
671 
672 #endif
673  // write the object in the last element
674  object_s_di_op<op,decltype(v.get(i)),decltype(get(size()-1)),OBJ_ENCAP,args...>(v.get(i),get(opart.get(i)));
675  }
676  }
677 
713  template <template<typename,typename> class op, typename S, typename M, typename gp, unsigned int ...args>
715  unsigned int start)
716  {
718  ::template run<S,M,gp,OPENFPM_NATIVE,layout_base,args...>(*this,v,start);
719  }
720 
757  template <template<typename,typename> class op,
758  typename S,
759  typename M,
760  typename gp,
761  template <typename> class layout_base2,
762  typename vector_opart_type,
763  unsigned int ...args>
765  const vector_opart_type & opart)
766  {
767 #ifdef SE_CLASS1
768 
769  if (v.size() != opart.size())
770  std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be the same as o_part.size()" << opart.size() << std::endl;
771 
772 #endif
773  for (size_t i = 0 ; i < v.size() ; i++)
775  {
776 #ifdef SE_CLASS1
777 
778  if (i >= opart.size())
779  std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " try to access element " << opart.template get<0>(i) << " but the vector has size " << size() << std::endl;
780 
781 #endif
782  // write the object in the last element
783  object_s_di_op<op,decltype(v.get(i)),decltype(get(size()-1)),OBJ_ENCAP,args...>(v.get(i),get(opart.template get<0>(i)));
784  }
785  }
786 
823  template <template<typename,typename> class op,
824  typename S,
825  typename M,
826  typename gp,
827  template <typename> class layout_base2,
828  typename vector_opart_type,
829  unsigned int ...args>
831  unsigned int offset,
832  const vector_opart_type & opart)
833  {
834  size_t i2 = 0;
835 
836  for (size_t i = offset ; i < v.size() ; i++)
837  {
838  auto dst = v.get(opart.template get<0>(i2));
839  auto src = v.get(i);
840  copy_cpu_encap_encap_op_prp<op,decltype(v.get(0)),decltype(v.get(0)),args...> cp(src,dst);
841 
842  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,sizeof...(args)> >(cp);
843  i2++;
844  }
845  }
846 
884  template <template<typename,typename> class op,
885  typename S,
886  typename M,
887  typename gp,
888  template <typename> class layout_base2,
889  typename vector_opart_type,
890  unsigned int ...args>
892  const vector_opart_type & opart,
893  unsigned int start,
894  unsigned int stop)
895  {
896 #ifdef SE_CLASS1
897 
898  if (v.size() != stop - start)
899  std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be the same as stop - start" << stop - start << std::endl;
900 
901 #endif
902 
903 #ifdef __NVCC__
904 
905  size_t sz[1] = {stop - start};
906  grid_sm<1,void> nm(sz);
907 
908  auto ite = nm.getGPUIterator();
909 
910  // write the object in the last element
911  CUDA_LAUNCH((merge_add_prp_device_impl_src_dst_opar_offset<op,
912  decltype(v.toKernel()),
913  decltype(this->toKernel()),
914  decltype(opart.toKernel()),
915  args...>),ite,v.toKernel(),this->toKernel(),opart.toKernel(),start);
916 
917  // calculate
918 #else
919  std::cout << __FILE__ << ":" << __LINE__ << " Error you have to compile map_vector.hpp with nvcc to make GPU code working" << std::endl;
921 #endif
922  }
923 
960  template <template<typename,typename> class op,
961  typename S,
962  typename M,
963  typename gp,
964  template <typename> class layout_base2,
965  typename vector_opart_type,
966  unsigned int ...args>
968  unsigned int start,
969  const vector_opart_type & opart)
970  {
971 #ifdef SE_CLASS1
973  if (v.size() < opart.size() + start)
974  std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be snaller than o_part.size() + start " << opart.size() + start << std::endl;
975 
976 #endif
977 
978 #ifdef __NVCC__
979 
980  auto ite = opart.getGPUIterator();
982  // write the object in the last element
983  CUDA_LAUNCH((merge_add_prp_device_impl_src_offset_dst_opar<op,
984  decltype(v.toKernel()),
985  decltype(this->toKernel()),
986  decltype(opart.toKernel()),
987  args... >),ite,v.toKernel(),this->toKernel(),opart.toKernel(),start);
988 
989  // calculate
990 #else
991  std::cout << __FILE__ << ":" << __LINE__ << " Error you have to compile map_vector.hpp with nvcc to make GPU code working" << std::endl;
992 
993 #endif
994  }
995 
1031  template <template<typename,typename> class op,
1032  typename S,
1033  typename M,
1034  typename gp,
1035  template <typename> class layout_base2,
1036  unsigned int ...args>
1038  size_t start)
1039  {
1041  for (size_t i = 0 ; i < v.size() ; i++)
1042  {
1043 #ifdef SE_CLASS1
1044 
1045  if (start + i >= v_size)
1046  std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " try to access element " << start+i << " but the vector has size " << size() << std::endl;
1047 
1048 #endif
1049  // write the object in the last element
1050  object_s_di_op<op,decltype(v.get(0)),decltype(get(0)),OBJ_ENCAP,args...>(v.get(i),get(start+i));
1051  }
1052  }
1053 
1068  template <typename S,
1069  typename M,
1070  typename gp,
1071  unsigned int impl,
1072  template <typename> class layout_base2,
1073  unsigned int ...args>
1075  {
1077  for (size_t i = 0 ; i < v.size() ; i++)
1078  {
1079  // Add a new element
1080  add();
1081 
1082  // write the object in the last element
1083  object_s_di<decltype(v.get(i)),decltype(get(size()-1)),OBJ_ENCAP,args...>(v.get(i),get(size()-1));
1084  }
1085  }
1086 
1101  template <typename S,
1102  typename M,
1103  typename gp,
1104  unsigned int impl,
1105  template <typename> class layout_base2,
1106  unsigned int ...args>
1108  {
1110  ::template run<S,M,gp,impl,layout_base2,args...>(*this,v);
1111  }
1112 
1118  void insert(size_t key)
1119  {
1120  add();
1121 
1122  long int d_k = (long int)size()-1;
1123  long int s_k = (long int)size()-2;
1124 
1125  // keys
1126  while (s_k >= (long int)key)
1127  {
1128  set(d_k,get(s_k));
1129  d_k--;
1130  s_k--;
1131  }
1132  }
1133 
1134 
1140  void remove(size_t key)
1141  {
1142  size_t d_k = key;
1143  size_t s_k = key + 1;
1144 
1145  // keys
1146  while (s_k < size())
1147  {
1148  set(d_k,get(s_k));
1149  d_k++;
1150  s_k++;
1151  }
1152 
1153  // re-calculate the vector size
1154 
1155  v_size--;
1156  }
1157 
1166  void remove(openfpm::vector<size_t> & keys, size_t start = 0)
1167  {
1168  // Nothing to remove return
1169  if (keys.size() <= start )
1170  return;
1171 
1172  size_t a_key = start;
1173  size_t d_k = keys.get(a_key);
1174  size_t s_k = keys.get(a_key) + 1;
1175 
1176  // keys
1177  while (s_k < size())
1178  {
1179  // s_k should always point to a key that is not going to be deleted
1180  while (a_key+1 < keys.size() && s_k == keys.get(a_key+1))
1181  {
1182  a_key++;
1183  s_k = keys.get(a_key) + 1;
1184  }
1185 
1186  // In case of overflow
1187  if (s_k >= size())
1188  break;
1189 
1190  set(d_k,get(s_k));
1191  d_k++;
1192  s_k++;
1193  }
1194 
1195  // re-calculate the vector size
1196 
1197  v_size -= keys.size() - start;
1198  }
1199 
1208  void remove(openfpm::vector<aggregate<int>> & keys, size_t start = 0)
1209  {
1210  // Nothing to remove return
1211  if (keys.size() <= start )
1212  return;
1213 
1214  size_t a_key = start;
1215  size_t d_k = keys.template get<0>(a_key);
1216  size_t s_k = keys.template get<0>(a_key) + 1;
1217 
1218  // keys
1219  while (s_k < size())
1220  {
1221  // s_k should always point to a key that is not going to be deleted
1222  while (a_key+1 < keys.size() && s_k == keys.template get<0>(a_key+1))
1223  {
1224  a_key++;
1225  s_k = keys.template get<0>(a_key) + 1;
1226  }
1227 
1228  // In case of overflow
1229  if (s_k >= size())
1230  break;
1231 
1232  set(d_k,get(s_k));
1233  d_k++;
1234  s_k++;
1235  }
1236 
1237  // re-calculate the vector size
1238 
1239  v_size -= keys.size() - start;
1240  }
1241 
1253  template <unsigned int p>
1254  inline auto get(size_t id) const -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1255  {
1256 #if defined(SE_CLASS1) && !defined(__NVCC__)
1257  check_overflow(id);
1258 #endif
1259  grid_key_dx<1> key(id);
1260 
1261 
1262  return base.template get<p>(key);
1263  }
1264 
1270  bool isSubset() const
1271  {
1272  return false;
1273  }
1274 
1284  inline auto get(size_t id) -> decltype(base.get_o(grid_key_dx<1>(id)))
1285  {
1286 #if defined(SE_CLASS1) && !defined(__NVCC__)
1287  check_overflow(id);
1288 #endif
1289  grid_key_dx<1> key(id);
1290 
1291  return base.get_o(key);
1292  }
1293 
1307  {
1308 #if defined(SE_CLASS1) && !defined(__NVCC__)
1309  check_overflow(id);
1310 #endif
1311  grid_key_dx<1> key(id);
1312 
1313  return base.get_o(key);
1314  }
1315 
1321  template<unsigned int id> void fill(unsigned char c)
1322  {
1323  base.template fill<id>(c);
1324  }
1325 
1332  template<unsigned int id> void * getDeviceBufferCopy()
1333  {
1334  return base.template getDeviceBuffer<id>();
1335  }
1336 
1345  template<unsigned int id> void * getDeviceBuffer()
1346  {
1347  return base.template getDeviceBuffer<id>();
1348  }
1349 
1350 
1357  {
1358  grid_key_dx<1> key(size()-1);
1359 
1360  return base.get_o(key);
1361  }
1362 
1374  template <unsigned int p,typename KeyType>
1375  inline auto getProp(const KeyType & id) -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1376  {
1377  return this->template get<p>(id.getKey());
1378  }
1379 
1390  template <unsigned int p, typename keyType>
1391  inline auto getProp(const keyType & id) const -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1392  {
1393  return this->template get<p>(id.getKey());
1394  }
1395 
1407  template <unsigned int p>
1408  inline auto get(size_t id) -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1409  {
1410 #if defined(SE_CLASS1) && !defined(__NVCC__)
1411  check_overflow(id);
1412 #endif
1413  grid_key_dx<1> key(id);
1414 
1415  return base.template get<p>(key);
1416  }
1417 
1427  inline auto get(size_t id) const -> const decltype(base.get_o(grid_key_dx<1>(id)))
1428  {
1429 #ifdef SE_CLASS2
1430  check_valid(this,8);
1431 #endif
1432 #if defined(SE_CLASS1) && !defined(__NVCC__)
1433  check_overflow(id);
1434 #endif
1435  grid_key_dx<1> key(id);
1436 
1437  return base.get_o(key);
1438  }
1439 
1447  {
1448  grid_key_dx<1> key(size()-1);
1449 
1450  return base.get_o(key);
1451  }
1452 
1454  ~vector() THROW
1455  {
1456  // Eliminate the pointer
1457  }
1458 
1465  {
1467 
1468  dup.v_size = v_size;
1469  dup.base.swap(base.duplicate());
1470 
1471 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1472 
1473  dup.base_gpu.constructor_impl(v_size,dup.base.toKernel());
1474 
1475 #endif
1476 
1477  copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false>::copy(dup,*this);
1478 
1479  return dup;
1480  }
1481 
1488  :v_size(0)
1489  {
1490  swap(v);
1491 
1492 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1493 
1494  base_gpu.constructor_impl(v_size,this->base.toKernel());
1495 
1496 #endif
1497  }
1498 
1505  :v_size(0)
1506  {
1507  swap(v.duplicate());
1508 
1509 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1510 
1511  base_gpu.constructor_impl(v_size,this->base.toKernel());
1512 
1513 #endif
1514  }
1515 
1517  vector() THROW
1518  :v_size(0),base(0)
1519  {
1520  base.setMemory();
1521 
1522 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1523 
1524  base_gpu.constructor_impl(v_size,this->base.toKernel());
1525 
1526 #endif
1527  }
1528 
1530  vector(size_t sz) THROW
1531  :v_size(sz),base(sz)
1532  {
1533  base.setMemory();
1534 
1535 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1536 
1537  base_gpu.constructor_impl(v_size,this->base.toKernel());
1538 
1539 #endif
1540  }
1541 
1548  void set(size_t id, const typename grid_base<1,T,Memory,typename layout_base<T>::type>::container & obj)
1549  {
1550 #ifdef SE_CLASS1
1551  check_overflow(id);
1552 #endif
1553  base.set(id,obj);
1555  }
1556 
1572  template <typename encap_S, unsigned int ...args> void set_o(size_t i, const encap_S & obj)
1573  {
1574  // write the object in the last element
1575  object_s_di<encap_S,decltype(get(i)),OBJ_ENCAP,args...>(obj,get(i));
1576  }
1577 
1584  void set(size_t id, const T & obj)
1585  {
1586 #ifdef SE_CLASS1
1587  check_overflow(id);
1588 #endif
1589  base.set(id,obj);
1591  }
1592 
1601  {
1602 #ifdef SE_CLASS1
1603  check_overflow(id);
1604 #endif
1605  base.set(id,v.base,src);
1606  }
1607 
1608  template<typename key_type>
1609  key_type getOriginKey(key_type vec_key)
1610  {
1611  return vec_key;
1612  }
1613 
1614 
1625  {
1626  v_size = mv.v_size;
1627  base.swap(mv.base);
1628 
1629 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1630 
1631  base_gpu.constructor_impl(v_size,this->base.toKernel());
1632 
1633 #endif
1634 
1635  return *this;
1636  }
1637 
1648  {
1649  v_size = mv.v_size;
1650  size_t rsz[1] = {v_size};
1651  base.resize(rsz);
1652 
1653  // copy the object on cpu
1654  for (size_t i = 0 ; i < v_size ; i++ )
1655  {
1656  grid_key_dx<1> key(i);
1657  base.set(key,mv.base,key);
1658  }
1659 
1660 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1661 
1662  base_gpu.constructor_impl(v_size,this->base.toKernel());
1663 
1664 #endif
1665 
1666  copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false>::copy2(*this,mv);
1667 
1668  return *this;
1669  }
1670 
1681  {
1682  v_size = mv.v_size;
1683  base.swap(mv.base);
1684 
1685 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1686 
1687  base_gpu.constructor_impl(v_size,this->base.toKernel());
1688 
1689 #endif
1690 
1691  return *this;
1692  }
1693 
1704  {
1705  v_size = mv.getInternal_v_size();
1706  size_t rsz[1] = {v_size};
1707  base.resize(rsz);
1708 
1709  // copy the object
1710  for (size_t i = 0 ; i < v_size ; i++ )
1711  {
1712  grid_key_dx<1> key(i);
1713  base.set(key,mv.getInternal_base(),key);
1714  }
1715 
1716 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1717 
1718  base_gpu.constructor_impl(v_size,this->base.toKernel());
1719 
1720 #endif
1721 
1722  copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false && Mem::isDeviceHostSame() == false>::copy2(*this,mv);
1723 
1724  return *this;
1725  }
1726 
1736  template<typename Mem, template <typename> class layout_base2>
1738  {
1739  v_size = mv.v_size;
1740  base.swap(mv.base);
1741 
1742 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1743 
1744  base_gpu.constructor_impl(v_size,this->base.toKernel());
1745 
1746 #endif
1747 
1748  return *this;
1749  }
1750 
1760  template<typename Mem,
1761  template <typename> class layout_base2,
1762  typename check = typename std::enable_if<!std::is_same<typename layout_base2<T>::type,typename layout_base<T>::type>::value >::type>
1765  {
1766  v_size = mv.getInternal_v_size();
1767  size_t rsz[1] = {v_size};
1768  base.resize(rsz);
1769 
1770  // copy the object
1771  for (size_t i = 0 ; i < v_size ; i++ )
1772  {
1773  grid_key_dx<1> key(i);
1774  base.set_general(key,mv.getInternal_base(),key);
1775  }
1776 
1777 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1778 
1779  base_gpu.constructor_impl(v_size,this->base.toKernel());
1780 
1781 #endif
1782 
1783  copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false && Mem::isDeviceHostSame() == false>::copy2(*this,mv);
1784 
1785  return *this;
1786  }
1787 
1794  {
1795  return !this->operator==(v);
1796  }
1797 
1804  {
1805  if (v_size != v.v_size)
1806  return false;
1807 
1808  // check object by object
1809  for (size_t i = 0 ; i < v_size ; i++ )
1810  {
1811  grid_key_dx<1> key(i);
1812 
1813  if (base.get_o(key) != v.base.get_o(key))
1814  return false;
1815  }
1816 
1817  return true;
1818  }
1819 
1829  {
1830  size_t sz_sp = v_size;
1831 
1832  // swap the v_size
1833  v_size = v.v_size;
1834 
1835  base.swap_nomode(v.base);
1836 
1837 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1838 
1839  base_gpu.constructor_impl(v_size,this->base.toKernel());
1840 
1841 #endif
1842 
1843  v.v_size = sz_sp;
1844  }
1845 
1852  {
1853  size_t sz_sp = v_size;
1854 
1855  // swap the v_size
1856  v_size = v.v_size;
1857 
1858  base.swap(v.base);
1859  v.v_size = sz_sp;
1860 
1861 
1862 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1863 
1864  base_gpu.constructor_impl(v_size,this->base.toKernel());
1865  v.base_gpu.constructor_impl(v.v_size,v.base.toKernel());
1866 
1867 #endif
1868  }
1869 
1876  {
1877  size_t sz_sp = v_size;
1878 
1879  // swap the v_size
1880  v_size = v.v_size;
1881 
1882  base.swap(v.base);
1883  v.v_size = sz_sp;
1884 
1885 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1886 
1887  base_gpu.constructor_impl(v_size,this->base.toKernel());
1888  v.base_gpu.constructor_impl(v.v_size,v.base.toKernel());
1889 
1890 #endif
1891  }
1892 
1901  {
1902  return vector_key_iterator(v_size,start);
1903  }
1904 
1915  {
1916  return vector_key_iterator(stop,0);
1917  }
1918 
1919 #ifdef CUDA_GPU
1920 
1925  ite_gpu<1> getGPUIteratorTo(long int stop, size_t n_thr = default_kernel_wg_threads_) const
1926  {
1927  grid_key_dx<1,long int> start(0);
1928  grid_key_dx<1,long int> stop_(stop);
1929 
1930  return base.getGPUIterator(start,stop_,n_thr);
1931  }
1932 
1933 #endif
1934 
1943  {
1944 #ifdef SE_CLASS2
1945  check_valid(this,8);
1946 #endif
1947  return getIterator();
1948  }
1949 
1958  {
1959  return vector_key_iterator(v_size);
1960  }
1961 
1968  template<unsigned int p>
1970  {
1971 #ifdef SE_CLASS2
1972  check_valid(this,8);
1973 #endif
1975  }
1976 
1977 #ifdef CUDA_GPU
1978 
1983  ite_gpu<1> getGPUIterator(size_t n_thr = default_kernel_wg_threads_) const
1984  {
1985  grid_key_dx<1> start(0);
1986  grid_key_dx<1> stop(size()-1);
1987 
1988  return base.getGPUIterator(start,stop,n_thr);
1989  }
1990 
1991 #endif
1992 
1999  {
2000  return base.packObjectSize();
2001  }
2002 
2010  size_t packObject(void * mem)
2011  {
2012  return base.packObject(mem);
2013  }
2014 
2025  template<int ... prp> static inline size_t calculateMem(size_t n, size_t e)
2026  {
2027  if (n == 0)
2028  {
2029  return 0;
2030  }
2031  else
2032  {
2033  if (sizeof...(prp) == 0)
2034  return grow_p::grow(0,n) * sizeof(typename T::type);
2035 
2036  typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
2037 
2038  return grow_p::grow(0,n) * sizeof(prp_object);
2039  }
2040  }
2041 
2052  template<int ... prp> static inline size_t packMem(size_t n, size_t e)
2053  {
2054  if (sizeof...(prp) == 0)
2055  return n * sizeof(typename T::type);
2056 
2057  typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
2058 
2059  return n * sizeof(prp_object);
2060  }
2061 
2069  inline static size_t calculateNMem(size_t n)
2070  {
2071  return 1;
2072  }
2073 
2081  template<unsigned int p>
2082  auto getMemory() -> decltype(base.template getMemory<p>())
2083  {
2084  return base.template getMemory<p>();
2085  }
2086 
2092  template<unsigned int p = 0> void setMemory(Memory & mem)
2093  {
2094  base.template setMemory<p>(mem);
2095 
2096 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2097 
2098  base_gpu.constructor_impl(v_size,this->base.toKernel());
2099 
2100 #endif
2101  }
2102 
2108  void setMemoryArray(Memory * mem)
2109  {
2110  base.setMemoryArray(mem);
2111 
2112 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2113 
2114  base_gpu.constructor_impl(v_size,this->base.toKernel());
2115 
2116 #endif
2117  }
2118 
2126  template<unsigned int p = 0> void * getPointer()
2127  {
2128  return base.template getPointer<p>();
2129  }
2130 
2136  template<unsigned int p = 0> const void * getPointer() const
2137  {
2138  return base.getPointer();
2139  }
2140 
2146  static bool noPointers()
2147  {
2148  return false;
2149  }
2150 
2156  const size_t & getInternal_v_size() const
2157  {
2158  return v_size;
2159  }
2160 
2167  {
2168  return base;
2169  }
2170 
2175  template<unsigned int ... prp> void hostToDevice()
2176  {
2177  base.template hostToDevice<prp ...>();
2178  }
2179 
2184  template<unsigned int ... prp> void deviceToHost()
2185  {
2186  base.template deviceToHost<prp ...>();
2187  }
2188 
2189 
2194  template<unsigned int ... prp> void deviceToHost(size_t start, size_t stop)
2195  {
2196  base.template deviceToHost<prp ...>(start,stop);
2197  }
2198 
2203  template<unsigned int ... prp> void hostToDevice(size_t start, size_t stop)
2204  {
2205  base.template hostToDevice<prp ...>(start,stop);
2206  }
2207 
2212  template<unsigned int ... prp> void hostToDeviceNUMA(size_t start, size_t stop)
2213  {
2214  base.template hostToDeviceNUMA<prp ...>(start,stop);
2215  }
2216 
2221  template<unsigned int ... prp> void hostToDeviceNUMA()
2222  {
2223  base.template hostToDeviceNUMA<prp ...>();
2224  }
2225 
2226 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2227 
2229 
2238  {
2239 // base_gpu.constructor_impl(v_size,this->base.toKernel());
2240 
2242  }
2243 
2251  inline const vector_gpu_ker_ref<typename apply_transform<layout_base,T>::type,layout_base> toKernel() const
2252  {
2253 // base_gpu.constructor_impl(v_size,this->base.toKernel());
2254 
2255  return vector_gpu_ker_ref<typename apply_transform<layout_base,T>::type,layout_base>(base_gpu);
2256  }
2257 
2258 #else
2259 
2268  {
2269  vector_gpu_ker<typename apply_transform<layout_base,T>::type,layout_base> v(v_size,this->base.toKernel());
2270 
2271  return v;
2272  }
2273 
2282  {
2283  if (base.size() == 0)
2284  {std::cout << __FILE__ << ":" << __LINE__ << " Warning you are off-loading with toGPU a vector that seem to be empty or not initialized" << std::endl; }
2285 
2286  vector_gpu_ker<typename apply_transform<layout_base,T>::type,layout_base> v(v_size,this->base.toKernel());
2287 
2288  return v;
2289  }
2290 
2291 #endif
2292 
2300  template<unsigned int ... prps>
2301  const std::string toString(std::string prefix = std::string())
2302  {
2303  std::stringstream ss;
2304  auto it = getIterator();
2305 
2306  while (it.isNext())
2307  {
2308  auto p = it.get();
2309 
2310  ss << prefix;
2311 
2312  ss << prefix << " element[" << p << "]" << " ";
2313 
2314  vector_printer<self_type,prps ...> vp(*this,p,ss);
2315  boost::mpl::for_each_ref<boost::mpl::range_c<int,0,sizeof...(prps)>>(vp);
2316 
2317  ss << std::endl;
2318 
2319  ++it;
2320  }
2321 
2322  return ss.str();
2323  }
2324 
2325  void * internal_get_size_pointer() {return &v_size;}
2326 
2327  void print_size()
2328  {
2329 #ifndef DISABLE_ALL_RTTI
2330  std::cout << "the size of: " << demangle(typeid(self_type).name()) << " is " << sizeof(self_type) << std::endl;
2331  std::cout << " " << demangle(typeid(decltype(v_size)).name()) << ":" << sizeof(decltype(v_size)) << std::endl;
2332  std::cout << " " << demangle(typeid(decltype(base)).name()) << ":" << sizeof(decltype(base)) << std::endl;
2333 #endif
2334  }
2335 
2336  };
2337 
2338  template <typename T> using vector_std = vector<T, HeapMemory, memory_traits_lin, openfpm::grow_policy_double, STD_VECTOR>;
2339  template<typename T> using vector_gpu = openfpm::vector<T,CudaMemory,memory_traits_inte>;
2340  template<typename T> using vector_gpu_lin = openfpm::vector<T,CudaMemory,memory_traits_lin>;
2341  template<typename T> using vector_gpu_single = openfpm::vector<T,CudaMemory,memory_traits_inte,openfpm::grow_policy_identity>;
2342  template<typename T> using vector_custd = vector<T, CudaMemory, memory_traits_inte, openfpm::grow_policy_double, STD_VECTOR>;
2343 }
2344 
2345 #endif
void swap(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&v)
Swap the memory with another vector.
convert a type into constant type
Definition: aggregate.hpp:292
void * getPointer()
Return the pointer that store the data.
It copy the properties from one object to another.
vector(size_t sz) THROW
Constructor, vector of size sz.
void set(size_t id, const typename grid_base< 1, T, Memory, typename layout_base< T >::type >::container &obj)
Set the object id to obj.
size_t size_local() const
Return the size of the vector.
Definition: map_vector.hpp:340
void insert(size_t key)
Insert an entry in the vector.
void non_zero_one(size_t sz[1], size_t arg)
If the argument is zero return 1 otherwise return the argument.
Definition: map_vector.hpp:259
Transform the boost::fusion::vector into memory specification (memory_traits)
void remove(size_t key)
Remove one entry from the vector.
T value_type
Type of the value the vector is storing.
Definition: map_vector.hpp:308
void set(size_t id, vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v, size_t src)
Set the element of the vector v from another element of another vector.
static bool noPointers()
This class has pointer inside.
vector_key_iterator getIterator() const
Get the vector elements iterator.
grid_key_dx is the key to access any element in the grid
Definition: grid_key.hpp:18
void reserve(size_t sp)
Reserve slots in the vector to avoid reallocation.
Definition: map_vector.hpp:364
void remove(openfpm::vector< aggregate< int >> &keys, size_t start=0)
Remove several entries from the vector.
void set_o(size_t i, const encap_S &obj)
It set an element of the vector from a object that is a subset of the vector properties.
grid interface available when on gpu
vector_key_iterator getIteratorFrom(size_t start) const
Get iterator over the particles from a particular index.
void setMemory(Memory &mem)
Set the memory of the base structure using an object.
void add(const T &v)
It insert a new object on the vector, eventually it reallocate the grid.
Definition: map_vector.hpp:548
auto get(size_t id) const -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
vector(vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&v)
Constructor from another temporal vector.
void swap_nomode(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v)
Swap the memory with another vector.
size_t capacity()
return the maximum capacity of the vector before reallocation
Definition: map_vector.hpp:351
static size_t calculateNMem(size_t n)
How many allocation are required to create n-elements.
void merge_prp_v_device(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, unsigned int start, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
Definition: map_vector.hpp:967
void add_prp(const vector< S, M, layout_base2, gp, impl > &v)
It add the element of a source vector to this vector.
bool operator!=(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) const
Check that two vectors are equal.
auto getProp(const KeyType &id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
auto get(size_t id) const -> const decltype(base.get_o(grid_key_dx< 1 >(id)))
Get an element of the vector.
size_t size()
Stub size.
Definition: map_vector.hpp:211
static size_t calculateMem(size_t n, size_t e)
Calculate the memory size required to allocate n elements.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Mem, layout_base2, grow_p, OPENFPM_NATIVE > &mv)
Assignment operator.
void set(size_t id, const T &obj)
Set the object id to obj.
void setMemoryArray(Memory *mem)
Set the memory of the base structure using an object.
vector_gpu_ker< typename apply_transform< layout_base, T >::type, layout_base > toKernel()
Convert the grid into a data-structure compatible for computing into GPU.
const void * getPointer() const
Return the pointer that store the data.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&mv)
Assignment operator.
grid_base< 1, T, Memory, typename layout_base< T >::type > base
1-D static grid
Definition: map_vector.hpp:251
const grid_base< 1, T, Memory, layout_type >::container last() const
Get the last element of the vector.
void * getDeviceBufferCopy()
It return the properties arrays.
this class is a functor for "for_each" algorithm
const size_t & getInternal_v_size() const
Internal function.
void resize(size_t slot, size_t opt=DATA_ON_DEVICE|DATA_ON_HOST, unsigned int blockSize=1)
Resize the vector.
Definition: map_vector.hpp:419
auto getProp(const keyType &id) const -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Mem, layout_base, gp, OPENFPM_NATIVE > &&mv)
Assignment operator.
void fill(unsigned char c)
Fill the buffer with a byte.
layout_base< T > layout_base_
Type of the encapsulation memory parameter.
Definition: map_vector.hpp:298
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &mv)
Assignment operator.
vector_key_iterator getIteratorTo(size_t stop) const
Get iterator over the particles from 0 until a particular index.
const grid_base< 1, T, Memory, layout_type > & getInternal_base() const
Internal function.
auto get(size_t id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
It copy the properties from one object to another applying an operation.
void deviceToHost(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
Implementation of 1-D std::vector like structure.
Definition: map_vector.hpp:241
void merge_prp(const vector< S, M, layout_base, gp, OPENFPM_NATIVE > &v, const openfpm::vector< size_t > &opart)
It merge the elements of a source vector to this vector.
Definition: map_vector.hpp:655
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
Definition: map_vector.hpp:764
vector_key_iterator_ele< p, self_type > getIteratorElements() const
Get the vector elements iterator.
void hostToDeviceNUMA(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host respecting NUMA domains.
void hostToDeviceNUMA()
Synchronize the memory buffer in the device with the memory in the host respecing NUMA domains.
size_t packObjectSize()
Return the size of the message needed to pack this object.
grid interface available when on gpu
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
void * getDeviceBuffer()
It return the properties arrays.
void hostToDevice()
Copy the memory from host to device.
void remove(openfpm::vector< size_t > &keys, size_t start=0)
Remove several entries from the vector.
vector_key_iterator getDomainIterator() const
Get the vector elements iterator.
const grid_base< 1, T, Memory, typename layout_base< T >::type >::container get_o(size_t id) const
Get an element of the vector.
layout_base< T >::type layout_type
Type of the encapsulation memory parameter.
Definition: map_vector.hpp:295
void add_no_device()
It insert a new emtpy object on the vector, eventually it reallocate the grid.
Definition: map_vector.hpp:518
It copy two encap object.
Definition: Encap.hpp:275
grid_base< 1, T, Memory, typename layout_base< T >::type >::container container
Object container for T, it is the return type of get_o it return a object type trough.
Definition: map_vector.hpp:305
size_t packObject(void *mem)
Pack the object into the given pointer.
void swap(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v)
Swap the memory with another vector.
void deviceToHost()
Synchronize the memory buffer in the device with the memory in the host.
bool operator==(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) const
Check that two vectors are not equal.
static size_t packMem(size_t n, size_t e)
Calculate the memory size required to pack n elements.
void add_prp_device(const vector< S, M, layout_base2, gp, impl > &v)
It add the element of a source vector to this vector.
It create a boost::fusion vector with the selected properties.
bool isSubset() const
Indicate that this class is not a subset.
void merge_prp_v_device(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, const vector_opart_type &opart, unsigned int start, unsigned int stop)
It merge the elements of a source vector to this vector.
Definition: map_vector.hpp:891
auto get(size_t id) -> decltype(base.get_o(grid_key_dx< 1 >(id)))
Get an element of the vector.
size_t size() const
Return the size of the vector.
Definition: map_vector.hpp:330
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
Definition: aggregate.hpp:214
void hostToDevice(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Mem, layout_base, gp, OPENFPM_NATIVE > &mv)
Assignment operator.
Implementation of 1-D std::vector like structure.
Definition: map_vector.hpp:202
auto getMemory() -> decltype(base.template getMemory< p >())
Return the memory object.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, size_t start)
It merge the elements of a source vector to this vector.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > duplicate() const
It duplicate the vector.
vector< T, Memory, layout_base2, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Mem, layout_base2, grow_p, OPENFPM_NATIVE > &&mv)
Assignment operator.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, unsigned int offset, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
Definition: map_vector.hpp:830
const vector_gpu_ker< typename apply_transform< layout_base, T >::type, layout_base > toKernel() const
Convert the grid into a data-structure compatible for computing into GPU.
grid_base< 1, T, Memory, typename layout_base< T >::type >::container last()
Get the last element of the vector.
void merge_prp_device(const vector< S, M, layout_base, gp, OPENFPM_NATIVE > &v, unsigned int start)
It merge the elements of a source vector to this vector (on device)
Definition: map_vector.hpp:714
vector(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) THROW
Constructor from another constant vector.
const std::string toString(std::string prefix=std::string())
void add(const typename grid_base< 1, T, Memory, typename layout_base< T >::type >::container &v)
It insert a new object on the vector, eventually it reallocate the vector.
Definition: map_vector.hpp:582