OpenFPM  5.2.0
Project that contain the implementation of distributed structures
map_vector.hpp
1 /*
2  * map_vector.hpp
3  *
4  * Created on: Aug 30, 2014
5  * Author: Pietro Incardona
6  */
7 
8 #ifndef MAP_VECTOR_HPP
9 #define MAP_VECTOR_HPP
10 
11 #include <iostream>
12 #include <typeinfo>
13 #include "util/common.hpp"
14 #include "memory/PtrMemory.hpp"
15 #include "util/object_util.hpp"
16 #include "Grid/util.hpp"
17 #include "Vector/util.hpp"
18 #include "Vector/map_vector_grow_p.hpp"
19 #include "memory/ExtPreAlloc.hpp"
20 #include "util/util_debug.hpp"
21 #include "util/Pack_stat.hpp"
22 #include "Grid/map_grid.hpp"
23 #include "memory/HeapMemory.hpp"
24 #include "vect_isel.hpp"
25 #include "util/object_s_di.hpp"
26 #include "util.hpp"
27 #include "util/Pack_stat.hpp"
28 #include "memory/ExtPreAlloc.hpp"
29 #include <string.h>
30 #include "Packer_Unpacker/Unpacker.hpp"
31 #include "Packer_Unpacker/Packer.hpp"
32 #include <fstream>
33 #include "Packer_Unpacker/Packer_util.hpp"
34 #include "Packer_Unpacker/has_pack_agg.hpp"
35 #include "timer.hpp"
36 #include "map_vector_std_util.hpp"
37 #include "data_type/aggregate.hpp"
38 #include "vector_map_iterator.hpp"
39 #include "util/cuda_util.hpp"
40 #include "cuda/map_vector_cuda_ker.cuh"
41 #include "map_vector_printers.hpp"
42 
43 #include <stdexcept>
44 
45 namespace openfpm
46 {
47 
48  template<bool active>
50  {
51  template<typename vector_type1, typename vector_type2>
52  static void copy(vector_type1 & v1, vector_type2 & v2)
53  {
54 
55  }
56 
57  template<typename vector_type1, typename vector_type2>
58  static void copy2(vector_type1 & v1, vector_type2 & v2)
59  {
60 
61  }
62  };
63 
64  template<>
66  {
67  template<typename vector_type1, typename vector_type2>
68  static void copy(vector_type1 & v1, vector_type2 & v2)
69  {
70 #ifdef __NVCC__
71  if (v1.size() != 0)
72  {
73  auto it = v1.getGPUIterator();
74  CUDA_LAUNCH(copy_two_vectors,it,v1.toKernel(),v2.toKernel());
75  }
76 #endif
77  }
78 
79  template<typename vector_type1, typename vector_type2>
80  static void copy2(vector_type1 & v1, vector_type2 & v2)
81  {
82 #ifdef __NVCC__
83  if (v2.size() != 0)
84  {
85  auto it = v1.getGPUIterator();
86  CUDA_LAUNCH(copy_two_vectors,it,v1.toKernel(),v2.toKernel());
87  }
88 #endif
89  }
90  };
91 
92  template<bool is_ok_cuda,typename T, typename Memory,
93  template<typename> class layout_base,
94  typename grow_p>
96  {
97  template <typename S,
98  typename M,
99  typename gp,
100  unsigned int impl,
101  template <typename> class layout_base2,
102  unsigned int ...args>
104  {
105  std::cout << __FILE__ << ":" << __LINE__ << " Error the function add_prp_device only work with cuda enabled vector" << std::endl;
106  }
107  };
108 
109  template<bool is_ok_cuda,typename T, typename Memory,
110  template<typename> class layout_base,
111  typename grow_p>
113  {
114  template <typename S,
115  typename M,
116  typename gp,
117  unsigned int impl,
118  template <typename> class layout_base2,
119  unsigned int ...args>
120  static void run(openfpm::vector<T,Memory,layout_base,grow_p,impl> & this_ ,
122  unsigned int offset)
123  {
124  std::cout << __FILE__ << ":" << __LINE__ << " Error the function merge_prp_device only work with cuda enabled vector" << std::endl;
125  }
126  };
127 
128  template<typename T, typename Memory,
129  template<typename> class layout_base,
130  typename grow_p>
131  struct add_prp_device_impl<true,T,Memory,layout_base,grow_p>
132  {
133  template <typename S,
134  typename M,
135  typename gp,
136  unsigned int impl,
137  template <typename> class layout_base2,
138  unsigned int ...args>
140  {
141  // merge the data on device
142 
143  #if defined(CUDA_GPU) && defined(__NVCC__)
144 
145  size_t old_sz = this_.size();
146  this_.resize(this_.size() + v.size(),DATA_ON_DEVICE);
147 
148  auto ite = v.getGPUIterator();
149 
150  CUDA_LAUNCH((merge_add_prp_device_impl<decltype(v.toKernel()),decltype(this_.toKernel()),args...>),ite,v.toKernel(),this_.toKernel(),(unsigned int)old_sz);
151 
152  #else
153  std::cout << __FILE__ << ":" << __LINE__ << " Error the function add_prp_device only work when map_vector is compiled with nvcc" << std::endl;
154  #endif
155  }
156  };
157 
158  template<typename T, typename Memory,
159  template<typename> class layout_base,
160  typename grow_p>
161  struct merge_prp_device_impl<true,T,Memory,layout_base,grow_p>
162  {
163  template <typename S,
164  typename M,
165  typename gp,
166  unsigned int impl,
167  template <typename> class layout_base2,
168  unsigned int ...args>
169  static void run(vector<T,Memory,layout_base,grow_p,impl> & this_ ,
171  unsigned int offset)
172  {
173  // merge the data on device
174 
175  #if defined(CUDA_GPU) && defined(__NVCC__)
176 
177  auto ite = v.getGPUIterator();
178 
179  CUDA_LAUNCH((merge_add_prp_device_impl<decltype(v.toKernel()),decltype(this_.toKernel()),args...>),ite,v.toKernel(),this_.toKernel(),(unsigned int)offset);
180 
181  #else
182  std::cout << __FILE__ << ":" << __LINE__ << " Error the function merge_prp_device only work when map_vector is compiled with nvcc" << std::endl;
183  #endif
184  }
185  };
186 
187 
202  template<typename T, typename Memory, template<typename> class layout_base, typename grow_p, unsigned int impl>
203  class vector
204  {
212  size_t size()
213  {
214  std::cerr << __FILE__ << ":" << __LINE__ << " Error stub vector created" << std::endl;
215  return 0;
216  }
217  };
218 
219  #include "map_vector_std.hpp"
220  #include "map_vector_std_ptr.hpp"
221 
222 #ifdef CUDA_GPU
223  #include "cuda/map_vector_std_cuda.hpp"
224 #endif
225 
241  template<typename T,typename Memory, template <typename> class layout_base, typename grow_p>
242  class vector<T,Memory,layout_base,grow_p,OPENFPM_NATIVE>
243  {
245 
249  size_t v_size;
250 
253 
260  void non_zero_one(size_t sz[1], size_t arg)
261  {
262  if (arg == 0)
263  {sz[0] = 1;}
264  else
265  {sz[0] = arg;}
266  }
267 
268 #ifdef SE_CLASS1
269 
276  void check_overflow(size_t id) const
277  {
278  if (id >= v_size)
279  {
280  std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " overflow id: " << id << "\n";
281  throw std::invalid_argument( "stacktrace" );
282  ACTION_ON_ERROR(VECTOR_ERROR_OBJECT);
283  }
284  }
285 
286 #endif
287 
288  public:
289 
291  typedef int yes_i_am_vector;
292 
295 
297  typedef typename layout_base<T>::type layout_type;
298 
300  typedef layout_base<T> layout_base_;
301 
304 
306  // you can access all the properties of T
308 
310  typedef T value_type;
311 
313  typedef Memory Memory_type;
314 
316  typedef grow_p grow_policy;
317 
318  template<typename Tobj>
319  struct layout_base__
320  {
321  typedef layout_base<Tobj> type;
322  };
323 
324  // Implementation of packer and unpacker for vector
325 #include "vector_pack_unpack.ipp"
326 
332  size_t size() const
333  {
334  return v_size;
335  }
336  //remove host device
342  size_t size_local() const
343  {
344  return v_size;
345  }
346 
353  size_t capacity()
354  {
355  return base.size();
356  }
357 
366  void reserve(size_t sp)
367  {
368  if (sp > base.size())
369  {
371  size_t sz[1] = {sp};
372  base.resize(sz);
373  }
374 
375 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
376 
377  base_gpu.constructor_impl(v_size,this->base.toKernel());
378 
379 #endif
380  }
381 
387  void clear()
388  {
389  resize(0);
390  }
391 
398  {
399  size_t sz[1] = {size()};
400  base.resize(sz);
401 
402 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
403 
404  base_gpu.constructor_impl(v_size,this->base.toKernel());
405 
406 #endif
407  }
408 
421  void resize(size_t slot, size_t opt = DATA_ON_DEVICE | DATA_ON_HOST, unsigned int blockSize = 1)
422  {
423  // If we need more space than what we allocated, allocate new memory
424 
425  if (slot > base.size())
426  {
427  size_t gr = slot;
428  // If you increase by one we smartly resize the internal capacity more than 1
429  // This is to make faster patterns like resize(size()+1)
430  if (slot - base.size() == 1 && opt && (opt & EXACT_RESIZE) == 0)
431  {
432  gr = grow_p::grow(base.size(),slot);
433  }
434 
436  size_t sz[1] = {gr};
437 
438  base.resize(sz,opt,blockSize);
439  }
440 
441  // update the vector size
442  v_size = slot;
443 
444 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
445 
446  base_gpu.constructor_impl(v_size,this->base.toKernel());
447 
448 #endif
449  }
450 
451 
460  void resize_no_device(size_t slot)
461  {
462  // If we need more space than what we allocated, allocate new memory
463 
464  if (slot > base.size())
465  {
466  size_t gr = grow_p::grow(base.size(),slot);
467 
469  size_t sz[1] = {gr};
470  base.resize_no_device(sz);
471  }
472 
473  // update the vector size
474  v_size = slot;
475 
476 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
477 
478  base_gpu.constructor_impl(v_size,this->base.toKernel());
479 
480 #endif
481  }
482 
484  typedef size_t access_key;
485 
492  void add()
493  {
495 
496  if (v_size >= base.size())
497  {
499  size_t sz[1];
500  non_zero_one(sz,2*base.size());
501  base.resize(sz);
502  }
503 
505  v_size++;
506 
507 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
508 
509  base_gpu.constructor_impl(v_size,this->base.toKernel());
510 
511 #endif
512  }
513 
521  {
523 
524  if (v_size >= base.size())
525  {
527  size_t sz[1];
528  non_zero_one(sz,2*base.size());
529  base.resize_no_device(sz);
530  }
531 
533  v_size++;
534 
535 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
536 
537  base_gpu.constructor_impl(v_size,this->base.toKernel());
538 
539 #endif
540  }
541 
550  void add(const T & v)
551  {
553 
554  if (v_size >= base.size())
555  {
557  size_t sz[1];
558  non_zero_one(sz,2*base.size());
559  base.resize(sz);
560  }
561 
563  base.set(v_size,v);
564 
566  v_size++;
567 
568 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
569 
570  base_gpu.constructor_impl(v_size,this->base.toKernel());
571 
572 #endif
573  }
574 
584  void add(const typename grid_base<1,T,Memory,typename layout_base<T>::type>::container & v)
585  {
587 
588  if (v_size >= base.size())
589  {
591  size_t sz[1];
592  non_zero_one(sz,2*base.size());
593  base.resize(sz);
594  }
595 
597  base.set(v_size,v);
598 
600  v_size++;
601 
602 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
603 
604  base_gpu.constructor_impl(v_size,this->base.toKernel());
605 
606 #endif
607  }
608 
614  template <typename M, typename gp> void add(const vector<T, M, layout_base,gp,OPENFPM_NATIVE> & v)
615  {
617  for (size_t i = 0 ; i < v.size() ; i++)
618  add(v.get(i));
619  }
620 
656  template <template<typename,typename> class op, typename S, typename M, typename gp, unsigned int ...args>
658  const openfpm::vector<size_t> & opart)
659  {
660 #ifdef SE_CLASS1
661 
662  if (v.size() != opart.size())
663  std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be the same as o_part.size()" << opart.size() << std::endl;
664 
665 #endif
667  for (size_t i = 0 ; i < v.size() ; i++)
668  {
669 #ifdef SE_CLASS1
670 
671  if (opart.get(i) > size())
672  std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " try to access element " << opart.get(i) << " but the vector has size " << size() << std::endl;
673 
674 #endif
675  // write the object in the last element
676  object_s_di_op<op,decltype(v.get(i)),decltype(get(size()-1)),OBJ_ENCAP,args...>(v.get(i),get(opart.get(i)));
677  }
678  }
679 
715  template <template<typename,typename> class op, typename S, typename M, typename gp, unsigned int ...args>
717  unsigned int start)
718  {
720  ::template run<S,M,gp,OPENFPM_NATIVE,layout_base,args...>(*this,v,start);
721  }
722 
723 
759  template <template<typename,typename> class op,
760  typename S,
761  typename M,
762  typename gp,
763  template <typename> class layout_base2,
764  typename vector_opart_type,
765  unsigned int ...args>
767  const vector_opart_type & opart)
768  {
769 #ifdef SE_CLASS1
770 
771  if (v.size() != opart.size())
772  std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be the same as o_part.size()" << opart.size() << std::endl;
773 
774 #endif
776  for (size_t i = 0 ; i < v.size() ; i++)
777  {
778 #ifdef SE_CLASS1
779 
780  if (i >= opart.size())
781  std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " try to access element " << opart.template get<0>(i) << " but the vector has size " << size() << std::endl;
782 
783 #endif
784  // write the object in the last element
785  object_s_di_op<op,decltype(v.get(i)),decltype(get(size()-1)),OBJ_ENCAP,args...>(v.get(i),get(opart.template get<0>(i)));
786  }
787  }
788 
825  template <template<typename,typename> class op,
826  typename S,
827  typename M,
828  typename gp,
829  template <typename> class layout_base2,
830  typename vector_opart_type,
831  unsigned int ...args>
833  unsigned int offset,
834  const vector_opart_type & opart)
835  {
836  size_t i2 = 0;
837 
838  for (size_t i = offset ; i < v.size() ; i++)
839  {
840  auto dst = v.get(opart.template get<0>(i2));
841  auto src = v.get(i);
842  copy_cpu_encap_encap_op_prp<op,decltype(v.get(0)),decltype(v.get(0)),args...> cp(src,dst);
843 
844  boost::mpl::for_each_ref< boost::mpl::range_c<int,0,sizeof...(args)> >(cp);
845  i2++;
846  }
847  }
848 
886  template <template<typename,typename> class op,
887  typename S,
888  typename M,
889  typename gp,
890  template <typename> class layout_base2,
891  typename vector_opart_type,
892  unsigned int ...args>
894  const vector_opart_type & opart,
895  unsigned int start,
896  unsigned int stop)
897  {
898 #ifdef SE_CLASS1
899 
900  if (v.size() != stop - start)
901  std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be the same as stop - start" << stop - start << std::endl;
902 
903 #endif
904 
905 #ifdef __NVCC__
906 
907  size_t sz[1] = {stop - start};
908  grid_sm<1,void> nm(sz);
909 
910  auto ite = nm.getGPUIterator();
911 
912  // write the object in the last element
913  CUDA_LAUNCH((merge_add_prp_device_impl_src_dst_opar_offset<op,
914  decltype(v.toKernel()),
915  decltype(this->toKernel()),
916  decltype(opart.toKernel()),
917  args...>),ite,v.toKernel(),this->toKernel(),opart.toKernel(),start);
918 
919  // calculate
920 #else
921  std::cout << __FILE__ << ":" << __LINE__ << " Error you have to compile map_vector.hpp with nvcc to make GPU code working" << std::endl;
922 
923 #endif
924  }
925 
962  template <template<typename,typename> class op,
963  typename S,
964  typename M,
965  typename gp,
966  template <typename> class layout_base2,
967  typename vector_opart_type,
968  unsigned int ...args>
970  unsigned int start,
971  const vector_opart_type & opart)
972  {
973 #ifdef SE_CLASS1
974 
975  if (v.size() < opart.size() + start)
976  std::cerr << __FILE__ << ":" << __LINE__ << " error merge_prp: v.size()=" << v.size() << " must be snaller than o_part.size() + start " << opart.size() + start << std::endl;
977 
978 #endif
979 
980 #ifdef __NVCC__
981 
982  auto ite = opart.getGPUIterator();
983 
984  // write the object in the last element
985  CUDA_LAUNCH((merge_add_prp_device_impl_src_offset_dst_opar<op,
986  decltype(v.toKernel()),
987  decltype(this->toKernel()),
988  decltype(opart.toKernel()),
989  args... >),ite,v.toKernel(),this->toKernel(),opart.toKernel(),start);
990 
991  // calculate
992 #else
993  std::cout << __FILE__ << ":" << __LINE__ << " Error you have to compile map_vector.hpp with nvcc to make GPU code working" << std::endl;
994 
995 #endif
996  }
997 
1033  template <template<typename,typename> class op,
1034  typename S,
1035  typename M,
1036  typename gp,
1037  template <typename> class layout_base2,
1038  unsigned int ...args>
1040  size_t start)
1041  {
1043  for (size_t i = 0 ; i < v.size() ; i++)
1044  {
1045 #ifdef SE_CLASS1
1046 
1047  if (start + i >= v_size)
1048  std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " try to access element " << start+i << " but the vector has size " << size() << std::endl;
1049 
1050 #endif
1051  // write the object in the last element
1052  object_s_di_op<op,decltype(v.get(0)),decltype(get(0)),OBJ_ENCAP,args...>(v.get(i),get(start+i));
1053  }
1054  }
1055 
1070  template <typename S,
1071  typename M,
1072  typename gp,
1073  unsigned int impl,
1074  template <typename> class layout_base2,
1075  unsigned int ...args>
1077  {
1079  for (size_t i = 0 ; i < v.size() ; i++)
1080  {
1081  // Add a new element
1082  add();
1083 
1084  // write the object in the last element
1085  object_s_di<decltype(v.get(i)),decltype(get(size()-1)),OBJ_ENCAP,args...>(v.get(i),get(size()-1));
1086  }
1087  }
1088 
1103  template <typename S,
1104  typename M,
1105  typename gp,
1106  unsigned int impl,
1107  template <typename> class layout_base2,
1108  unsigned int ...args>
1110  {
1112  ::template run<S,M,gp,impl,layout_base2,args...>(*this,v);
1113  }
1114 
1120  void insert(size_t key)
1121  {
1122  add();
1123 
1124  long int d_k = (long int)size()-1;
1125  long int s_k = (long int)size()-2;
1126 
1127  // keys
1128  while (s_k >= (long int)key)
1129  {
1130  set(d_k,get(s_k));
1131  d_k--;
1132  s_k--;
1133  }
1134  }
1135 
1136 
1142  void remove(size_t key)
1143  {
1144  size_t d_k = key;
1145  size_t s_k = key + 1;
1146 
1147  // keys
1148  while (s_k < size())
1149  {
1150  set(d_k,get(s_k));
1151  d_k++;
1152  s_k++;
1153  }
1154 
1155  // re-calculate the vector size
1156 
1157  v_size--;
1158  }
1159 
1168  void remove(openfpm::vector<size_t> & keys, size_t start = 0)
1169  {
1170  // Nothing to remove return
1171  if (keys.size() <= start )
1172  return;
1173 
1174  size_t a_key = start;
1175  size_t d_k = keys.get(a_key);
1176  size_t s_k = keys.get(a_key) + 1;
1177 
1178  // keys
1179  while (s_k < size())
1180  {
1181  // s_k should always point to a key that is not going to be deleted
1182  while (a_key+1 < keys.size() && s_k == keys.get(a_key+1))
1183  {
1184  a_key++;
1185  s_k = keys.get(a_key) + 1;
1186  }
1187 
1188  // In case of overflow
1189  if (s_k >= size())
1190  break;
1191 
1192  set(d_k,get(s_k));
1193  d_k++;
1194  s_k++;
1195  }
1196 
1197  // re-calculate the vector size
1198 
1199  v_size -= keys.size() - start;
1200  }
1201 
1210  void remove(openfpm::vector<aggregate<int>> & keys, size_t start = 0)
1211  {
1212  // Nothing to remove return
1213  if (keys.size() <= start )
1214  return;
1215 
1216  size_t a_key = start;
1217  size_t d_k = keys.template get<0>(a_key);
1218  size_t s_k = keys.template get<0>(a_key) + 1;
1219 
1220  // keys
1221  while (s_k < size())
1222  {
1223  // s_k should always point to a key that is not going to be deleted
1224  while (a_key+1 < keys.size() && s_k == keys.template get<0>(a_key+1))
1225  {
1226  a_key++;
1227  s_k = keys.template get<0>(a_key) + 1;
1228  }
1229 
1230  // In case of overflow
1231  if (s_k >= size())
1232  break;
1233 
1234  set(d_k,get(s_k));
1235  d_k++;
1236  s_k++;
1237  }
1238 
1239  // re-calculate the vector size
1240 
1241  v_size -= keys.size() - start;
1242  }
1243 
1255  template <unsigned int p>
1256  inline auto get(size_t id) const -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1257  {
1258 #if defined(SE_CLASS1) && !defined(__NVCC__)
1259  check_overflow(id);
1260 #endif
1261  grid_key_dx<1> key(id);
1262 
1263 
1264  return base.template get<p>(key);
1265  }
1266 
1272  bool isSubset() const
1273  {
1274  return false;
1275  }
1276 
1286  inline auto get(size_t id) -> decltype(base.get_o(grid_key_dx<1>(id)))
1287  {
1288 #if defined(SE_CLASS1) && !defined(__NVCC__)
1289  check_overflow(id);
1290 #endif
1291  grid_key_dx<1> key(id);
1292 
1293  return base.get_o(key);
1294  }
1295 
1309  {
1310 #if defined(SE_CLASS1) && !defined(__NVCC__)
1311  check_overflow(id);
1312 #endif
1313  grid_key_dx<1> key(id);
1314 
1315  return base.get_o(key);
1316  }
1317 
1323  template<unsigned int id> void fill(unsigned char c)
1324  {
1325  base.template fill<id>(c);
1326  }
1327 
1334  template<unsigned int id> void * getDeviceBufferCopy()
1335  {
1336  return base.template getDeviceBuffer<id>();
1337  }
1338 
1347  template<unsigned int id> void * getDeviceBuffer()
1348  {
1349  return base.template getDeviceBuffer<id>();
1350  }
1351 
1352 
1359  {
1360  grid_key_dx<1> key(size()-1);
1361 
1362  return base.get_o(key);
1363  } //remove device host
1374  template <unsigned int p>
1375  inline auto getProp(const unsigned int & id) -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1376  { //uncomment this
1377  return this->template get<p>(id);
1378  }//remove host device
1389  template <unsigned int p,typename KeyType>
1390  inline auto getProp(const KeyType & id) -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1391  {
1392  //uncomment this
1393  return this->template get<p>(id.getKey());
1394  }
1395  //remove device host
1406  template <unsigned int p, typename keyType>
1407  inline auto getProp(const keyType & id) const -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1408  { //uncomment this
1409  return this->template get<p>(id.getKey());
1410  }
1411 
1423  template <unsigned int p>
1424  inline auto get(size_t id) -> decltype(base.template get<p>(grid_key_dx<1>(0)))
1425  {
1426 #if defined(SE_CLASS1) && !defined(__NVCC__)
1427  check_overflow(id);
1428 #endif
1429  grid_key_dx<1> key(id);
1430 
1431  return base.template get<p>(key);
1432  }
1433 
1443  inline auto get(size_t id) const -> const decltype(base.get_o(grid_key_dx<1>(id)))
1444  {
1445 #ifdef SE_CLASS2
1446  check_valid(this,8);
1447 #endif
1448 #if defined(SE_CLASS1) && !defined(__NVCC__)
1449  check_overflow(id);
1450 #endif
1451  grid_key_dx<1> key(id);
1452 
1453  return base.get_o(key);
1454  }
1455 
1463  {
1464  grid_key_dx<1> key(size()-1);
1465 
1466  return base.get_o(key);
1467  }
1468 
1470  ~vector() THROW
1471  {
1472  // Eliminate the pointer
1473  }
1474 
1481  {
1483 
1484  dup.v_size = v_size;
1485  dup.base.swap(base.duplicate());
1486 
1487 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1488 
1489  dup.base_gpu.constructor_impl(v_size,dup.base.toKernel());
1490 
1491 #endif
1492 
1493  copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false>::copy(dup,*this);
1494 
1495  return dup;
1496  }
1497 
1504  :v_size(0)
1505  {
1506  swap(v);
1507 
1508 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1509 
1510  base_gpu.constructor_impl(v_size,this->base.toKernel());
1511 
1512 #endif
1513  }
1514 
1521  :v_size(0)
1522  {
1523  swap(v.duplicate());
1524 
1525 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1526 
1527  base_gpu.constructor_impl(v_size,this->base.toKernel());
1528 
1529 #endif
1530  }
1531 
1533  vector() THROW
1534  :v_size(0),base(0)
1535  {
1536  base.setMemory();
1537 
1538 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1539 
1540  base_gpu.constructor_impl(v_size,this->base.toKernel());
1541 
1542 #endif
1543  }
1544 
1546  vector(size_t sz) THROW
1547  :v_size(sz),base(sz)
1548  {
1549  base.setMemory();
1550 
1551 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1552 
1553  base_gpu.constructor_impl(v_size,this->base.toKernel());
1554 
1555 #endif
1556  }
1557 
1564  void set(size_t id, const typename grid_base<1,T,Memory,typename layout_base<T>::type>::container & obj)
1565  {
1566 #ifdef SE_CLASS1
1567  check_overflow(id);
1568 #endif
1570  base.set(id,obj);
1571  }
1572 
1588  template <typename encap_S, unsigned int ...args> void set_o(size_t i, const encap_S & obj)
1589  {
1590  // write the object in the last element
1591  object_s_di<encap_S,decltype(get(i)),OBJ_ENCAP,args...>(obj,get(i));
1592  }
1593 
1600  void set(size_t id, const T & obj)
1601  {
1602 #ifdef SE_CLASS1
1603  check_overflow(id);
1604 #endif
1606  base.set(id,obj);
1607  }
1608 
1617  {
1618 #ifdef SE_CLASS1
1619  check_overflow(id);
1620 #endif
1621  base.set(id,v.base,src);
1622  }
1623 
1634  {
1635  v_size = mv.v_size;
1636  base.swap(mv.base);
1637 
1638 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1639 
1640  base_gpu.constructor_impl(v_size,this->base.toKernel());
1641 
1642 #endif
1643 
1644  return *this;
1645  }
1646 
1657  {
1658  v_size = mv.v_size;
1659  size_t rsz[1] = {v_size};
1660  if(rsz[0]>base.size()) {
1661  base.resize(rsz);
1662  }
1663  // copy the object on cpu
1664  for (size_t i = 0 ; i < v_size ; i++ )
1665  {
1666  grid_key_dx<1> key(i);
1667  base.set(key,mv.base,key);
1668  }
1669 
1670 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1671 
1672  base_gpu.constructor_impl(v_size,this->base.toKernel());
1673 
1674 #endif
1675 
1676  copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false>::copy2(*this,mv);
1677 
1678  return *this;
1679  }
1680 
1691  {
1692  v_size = mv.v_size;
1693  base.swap(mv.base);
1694 
1695 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1696 
1697  base_gpu.constructor_impl(v_size,this->base.toKernel());
1698 
1699 #endif
1700 
1701  return *this;
1702  }
1703 
1714  {
1715  v_size = mv.getInternal_v_size();
1716  size_t rsz[1] = {v_size};
1717  base.resize(rsz);
1718 
1719  // copy the object
1720  for (size_t i = 0 ; i < v_size ; i++ )
1721  {
1722  grid_key_dx<1> key(i);
1723  base.set(key,mv.getInternal_base(),key);
1724  }
1725 
1726 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1727 
1728  base_gpu.constructor_impl(v_size,this->base.toKernel());
1729 
1730 #endif
1731 
1732  copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false && Mem::isDeviceHostSame() == false>::copy2(*this,mv);
1733 
1734  return *this;
1735  }
1736 
1746  template<typename Mem, template <typename> class layout_base2>
1748  {
1749  v_size = mv.v_size;
1750  base.swap(mv.base);
1751 
1752 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1753 
1754  base_gpu.constructor_impl(v_size,this->base.toKernel());
1755 
1756 #endif
1757 
1758  return *this;
1759  }
1760 
1770  template<typename Mem,
1771  template <typename> class layout_base2,
1772  typename check = typename std::enable_if<!std::is_same<typename layout_base2<T>::type,typename layout_base<T>::type>::value >::type>
1775  {
1776  v_size = mv.getInternal_v_size();
1777  size_t rsz[1] = {v_size};
1778  base.resize(rsz);
1779 
1780  // copy the object
1781  for (size_t i = 0 ; i < v_size ; i++ )
1782  {
1783  grid_key_dx<1> key(i);
1784  base.set_general(key,mv.getInternal_base(),key);
1785  }
1786 
1787 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1788 
1789  base_gpu.constructor_impl(v_size,this->base.toKernel());
1790 
1791 #endif
1792 
1793  copy_two_vectors_activate_impl<Memory::isDeviceHostSame() == false && Mem::isDeviceHostSame() == false>::copy2(*this,mv);
1794 
1795  return *this;
1796  }
1797 
1804  {
1805  return !this->operator==(v);
1806  }
1807 
1814  {
1815  if (v_size != v.v_size)
1816  return false;
1817 
1818  // check object by object
1819  for (size_t i = 0 ; i < v_size ; i++ )
1820  {
1821  grid_key_dx<1> key(i);
1822 
1823  if (base.get_o(key) != v.base.get_o(key))
1824  return false;
1825  }
1826 
1827  return true;
1828  }
1829 
1839  {
1840  size_t sz_sp = v_size;
1841 
1842  // swap the v_size
1843  v_size = v.v_size;
1844 
1845  base.swap_nomode(v.base);
1846 
1847 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1848 
1849  base_gpu.constructor_impl(v_size,this->base.toKernel());
1850 
1851 #endif
1852 
1853  v.v_size = sz_sp;
1854  }
1855 
1862  {
1863  size_t sz_sp = v_size;
1864 
1865  // swap the v_size
1866  v_size = v.v_size;
1867 
1868  base.swap(v.base);
1869  v.v_size = sz_sp;
1870 
1871 
1872 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1873 
1874  base_gpu.constructor_impl(v_size,this->base.toKernel());
1875  v.base_gpu.constructor_impl(v.v_size,v.base.toKernel());
1876 
1877 #endif
1878  }
1879 
1886  {
1887  size_t sz_sp = v_size;
1888 
1889  // swap the v_size
1890  v_size = v.v_size;
1891 
1892  base.swap(v.base);
1893  v.v_size = sz_sp;
1894 
1895 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
1896 
1897  base_gpu.constructor_impl(v_size,this->base.toKernel());
1898  v.base_gpu.constructor_impl(v.v_size,v.base.toKernel());
1899 
1900 #endif
1901  }
1902 
1911  {
1912  return vector_key_iterator(v_size,start);
1913  }
1914 
1925  {
1926  return vector_key_iterator(stop,0);
1927  }
1928 
1929 #ifdef CUDA_GPU
1930 
1935  ite_gpu<1> getGPUIteratorTo(long int stop, size_t n_thr = default_kernel_wg_threads_) const
1936  {
1937  grid_key_dx<1,long int> start(0);
1938  grid_key_dx<1,long int> stop_(stop);
1939 
1940  return base.getGPUIterator(start,stop_,n_thr);
1941  }
1942 
1943 
1944 #endif
1945 
1954  {
1955 #ifdef SE_CLASS2
1956  check_valid(this,8);
1957 #endif
1958  return getIterator();
1959  }
1960 
1969  {
1970  return vector_key_iterator(v_size);
1971  }
1972 
1979  template<unsigned int p>
1981  {
1982 #ifdef SE_CLASS2
1983  check_valid(this,8);
1984 #endif
1986  }
1987 
1988 #ifdef CUDA_GPU
1989 
1994  ite_gpu<1> getGPUIterator(size_t n_thr = default_kernel_wg_threads_) const
1995  {
1996  grid_key_dx<1> start(0);
1997  grid_key_dx<1> stop(size()-1);
1998 
1999  return base.getGPUIterator(start,stop,n_thr);
2000  }
2001 
2006  ite_gpu<1> getDomainIteratorGPU(size_t n_thr = default_kernel_wg_threads_) const
2007  {
2008  return getGPUIterator(n_thr);
2009  }
2010 
2011 #endif
2019  {
2020  return base.packObjectSize();
2021  }
2022 
2030  size_t packObject(void * mem)
2031  {
2032  return base.packObject(mem);
2033  }
2034 
2045  template<int ... prp> static inline size_t calculateMem(size_t n, size_t e)
2046  {
2047  if (n == 0)
2048  {
2049  return 0;
2050  }
2051  else
2052  {
2053  if (sizeof...(prp) == 0)
2054  return grow_p::grow(0,n) * sizeof(typename T::type);
2055 
2056  typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
2057 
2058  return grow_p::grow(0,n) * sizeof(prp_object);
2059  }
2060  }
2061 
2072  template<int ... prp> static inline size_t packMem(size_t n, size_t e)
2073  {
2074  if (sizeof...(prp) == 0)
2075  return n * sizeof(typename T::type);
2076 
2077  typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
2078 
2079  return n * sizeof(prp_object);
2080  }
2081 
2089  inline static size_t calculateNMem(size_t n)
2090  {
2091  return 1;
2092  }
2093 
2101  template<unsigned int p>
2102  auto getMemory() -> decltype(base.template getMemory<p>())
2103  {
2104  return base.template getMemory<p>();
2105  }
2106 
2112  template<unsigned int p = 0> void setMemory(Memory & mem)
2113  {
2114  base.template setMemory<p>(mem);
2115 
2116 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2117 
2118  base_gpu.constructor_impl(v_size,this->base.toKernel());
2119 
2120 #endif
2121  }
2122 
2128  void setMemoryArray(Memory * mem)
2129  {
2130  base.setMemoryArray(mem);
2131 
2132 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2133 
2134  base_gpu.constructor_impl(v_size,this->base.toKernel());
2135 
2136 #endif
2137  }
2138 
2146  template<unsigned int p = 0> void * getPointer()
2147  {
2148  return base.template getPointer<p>();
2149  }
2150 
2156  template<unsigned int p = 0> const void * getPointer() const
2157  {
2158  return base.getPointer();
2159  }
2160 
2166  static bool noPointers()
2167  {
2168  return false;
2169  }
2170 
2176  const size_t & getInternal_v_size() const
2177  {
2178  return v_size;
2179  }
2180 
2187  {
2188  return base;
2189  }
2190 
2195  template<unsigned int ... prp> void hostToDevice()
2196  {
2197  base.template hostToDevice<prp ...>();
2198  }
2199 
2204  template<unsigned int ... prp> void deviceToHost()
2205  {
2206  base.template deviceToHost<prp ...>();
2207  }
2208 
2209 
2214  template<unsigned int ... prp> void deviceToHost(size_t start, size_t stop)
2215  {
2216  base.template deviceToHost<prp ...>(start,stop);
2217  }
2218 
2223  template<unsigned int ... prp> void hostToDevice(size_t start, size_t stop)
2224  {
2225  base.template hostToDevice<prp ...>(start,stop);
2226  }
2227 
2232  template<unsigned int ... prp> void hostToDeviceNUMA(size_t start, size_t stop)
2233  {
2234  base.template hostToDeviceNUMA<prp ...>(start,stop);
2235  }
2236 
2241  template<unsigned int ... prp> void hostToDeviceNUMA()
2242  {
2243  base.template hostToDeviceNUMA<prp ...>();
2244  }
2245 
2246 #if defined(CUDIFY_USE_SEQUENTIAL) || defined(CUDIFY_USE_OPENMP)
2247 
2249 
2258  {
2259 // base_gpu.constructor_impl(v_size,this->base.toKernel());
2260 
2262  }
2263 
2271  inline const vector_gpu_ker_ref<typename apply_transform<layout_base,T>::type,layout_base> toKernel() const
2272  {
2273 // base_gpu.constructor_impl(v_size,this->base.toKernel());
2274 
2275  return vector_gpu_ker_ref<typename apply_transform<layout_base,T>::type,layout_base>(base_gpu);
2276  }
2277 
2278 #else
2279 
2288  {
2289  vector_gpu_ker<typename apply_transform<layout_base,T>::type,layout_base> v(v_size,this->base.toKernel());
2290 
2291  return v;
2292  }
2293 
2302  {
2303  if (base.size() == 0)
2304  {std::cout << __FILE__ << ":" << __LINE__ << " Warning you are off-loading with toGPU a vector that seem to be empty or not initialized" << std::endl; }
2305 
2306  vector_gpu_ker<typename apply_transform<layout_base,T>::type,layout_base> v(v_size,this->base.toKernel());
2307 
2308  return v;
2309  }
2310 
2311 #endif
2312 
2320  template<unsigned int ... prps>
2321  const std::string toString(std::string prefix = std::string())
2322  {
2323  std::stringstream ss;
2324  auto it = getIterator();
2325 
2326  while (it.isNext())
2327  {
2328  auto p = it.get();
2329 
2330  ss << prefix;
2331 
2332  ss << prefix << " element[" << p << "]" << " ";
2333 
2334  vector_printer<self_type,prps ...> vp(*this,p,ss);
2335  boost::mpl::for_each_ref<boost::mpl::range_c<int,0,sizeof...(prps)>>(vp);
2336 
2337  ss << std::endl;
2338 
2339  ++it;
2340  }
2341 
2342  return ss.str();
2343  }
2344 
2345  void * internal_get_size_pointer() {return &v_size;}
2346 
2347  void print_size()
2348  {
2349 #ifndef DISABLE_ALL_RTTI
2350  std::cout << "the size of: " << demangle(typeid(self_type).name()) << " is " << sizeof(self_type) << std::endl;
2351  std::cout << " " << demangle(typeid(decltype(v_size)).name()) << ":" << sizeof(decltype(v_size)) << std::endl;
2352  std::cout << " " << demangle(typeid(decltype(base)).name()) << ":" << sizeof(decltype(base)) << std::endl;
2353 #endif
2354  }
2355 
2356  };
2357 
2358  template <typename T> using vector_std = vector<T, HeapMemory, memory_traits_lin, openfpm::grow_policy_double, STD_VECTOR>;
2359  template<typename T> using vector_gpu = openfpm::vector<T,CudaMemory,memory_traits_inte>;
2360  template<typename T> using vector_soa = openfpm::vector<T,HeapMemory,memory_traits_inte>;
2361  template<typename T> using vector_gpu_lin = openfpm::vector<T,CudaMemory,memory_traits_lin>;
2362  template<typename T> using vector_gpu_single = openfpm::vector<T,CudaMemory,memory_traits_inte,openfpm::grow_policy_identity>;
2363  template<typename T> using vector_custd = vector<T, CudaMemory, memory_traits_inte, openfpm::grow_policy_double, STD_VECTOR>;
2364 }
2365 
2366 #endif
grid_key_dx is the key to access any element in the grid
Definition: grid_key.hpp:19
Declaration grid_sm.
Definition: grid_sm.hpp:167
Implementation of 1-D std::vector like structure.
Definition: map_vector.hpp:243
const size_t & getInternal_v_size() const
Internal function.
void remove(openfpm::vector< size_t > &keys, size_t start=0)
Remove several entries from the vector.
static bool noPointers()
This class has pointer inside.
void remove(openfpm::vector< aggregate< int >> &keys, size_t start=0)
Remove several entries from the vector.
void hostToDevice()
Copy the memory from host to device.
size_t size_local() const
Return the size of the vector.
Definition: map_vector.hpp:342
void set_o(size_t i, const encap_S &obj)
It set an element of the vector from a object that is a subset of the vector properties.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, unsigned int offset, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
Definition: map_vector.hpp:832
void swap(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&v)
Swap the memory with another vector.
void add(const T &v)
It insert a new object on the vector, eventually it reallocate the grid.
Definition: map_vector.hpp:550
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&mv)
Assignment operator.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, size_t start)
It merge the elements of a source vector to this vector.
void non_zero_one(size_t sz[1], size_t arg)
If the argument is zero return 1 otherwise return the argument.
Definition: map_vector.hpp:260
void add(const typename grid_base< 1, T, Memory, typename layout_base< T >::type >::container &v)
It insert a new object on the vector, eventually it reallocate the vector.
Definition: map_vector.hpp:584
void * getDeviceBufferCopy()
It return the properties arrays.
void deviceToHost(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
bool operator==(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) const
Check that two vectors are not equal.
void setMemory(Memory &mem)
Set the memory of the base structure using an object.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > duplicate() const
It duplicate the vector.
const std::string toString(std::string prefix=std::string())
vector(vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &&v)
Constructor from another temporal vector.
void setMemoryArray(Memory *mem)
Set the memory of the base structure using an object.
void hostToDevice(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host.
void add_no_device()
It insert a new emtpy object on the vector, eventually it reallocate the grid.
Definition: map_vector.hpp:520
void remove(size_t key)
Remove one entry from the vector.
static size_t calculateNMem(size_t n)
How many allocation are required to create n-elements.
vector_gpu_ker< typename apply_transform< layout_base, T >::type, layout_base > toKernel()
Convert the grid into a data-structure compatible for computing into GPU.
auto getProp(const unsigned int &id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
static size_t calculateMem(size_t n, size_t e)
Calculate the memory size required to allocate n elements.
void swap_nomode(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v)
Swap the memory with another vector.
void swap(openfpm::vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v)
Swap the memory with another vector.
const void * getPointer() const
Return the pointer that store the data.
vector(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) THROW
Constructor from another constant vector.
static size_t packMem(size_t n, size_t e)
Calculate the memory size required to pack n elements.
size_t packObjectSize()
Return the size of the message needed to pack this object.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Mem, layout_base, gp, OPENFPM_NATIVE > &mv)
Assignment operator.
layout_base< T >::type layout_type
Type of the encapsulation memory parameter.
Definition: map_vector.hpp:297
void set(size_t id, const T &obj)
Set the object id to obj.
vector_key_iterator getIteratorTo(size_t stop) const
Get iterator over the particles from 0 until a particular index.
void insert(size_t key)
Insert an entry in the vector.
vector(size_t sz) THROW
Constructor, vector of size sz.
size_t capacity()
return the maximum capacity of the vector before reallocation
Definition: map_vector.hpp:353
const vector_gpu_ker< typename apply_transform< layout_base, T >::type, layout_base > toKernel() const
Convert the grid into a data-structure compatible for computing into GPU.
void merge_prp(const vector< S, M, layout_base, gp, OPENFPM_NATIVE > &v, const openfpm::vector< size_t > &opart)
It merge the elements of a source vector to this vector.
Definition: map_vector.hpp:657
auto get(size_t id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
void add()
It insert a new emtpy object on the vector, eventually it reallocate the grid.
Definition: map_vector.hpp:492
void set(size_t id, const typename grid_base< 1, T, Memory, typename layout_base< T >::type >::container &obj)
Set the object id to obj.
vector_key_iterator getIterator() const
Get the vector elements iterator.
void reserve(size_t sp)
Reserve slots in the vector to avoid reallocation.
Definition: map_vector.hpp:366
vector_key_iterator iterator_key
iterator for the vector
Definition: map_vector.hpp:303
T value_type
Type of the value the vector is storing.
Definition: map_vector.hpp:310
bool isSubset() const
Indicate that this class is not a subset.
void resize(size_t slot, size_t opt=DATA_ON_DEVICE|DATA_ON_HOST, unsigned int blockSize=1)
Resize the vector.
Definition: map_vector.hpp:421
vector_key_iterator getDomainIterator() const
Get the vector elements iterator.
auto get(size_t id) const -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
size_t packObject(void *mem)
Pack the object into the given pointer.
grid_base< 1, T, Memory, typename layout_base< T >::type >::container last()
Get the last element of the vector.
auto getProp(const KeyType &id) -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
auto get(size_t id) const -> const decltype(base.get_o(grid_key_dx< 1 >(id)))
Get an element of the vector.
vector_key_iterator_ele< p, self_type > getIteratorElements() const
Get the vector elements iterator.
grid_base< 1, T, Memory, typename layout_base< T >::type > base
1-D static grid
Definition: map_vector.hpp:252
auto getProp(const keyType &id) const -> decltype(base.template get< p >(grid_key_dx< 1 >(0)))
Get an element of the vector.
void hostToDeviceNUMA()
Synchronize the memory buffer in the device with the memory in the host respecing NUMA domains.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Mem, layout_base, gp, OPENFPM_NATIVE > &&mv)
Assignment operator.
void add_prp_device(const vector< S, M, layout_base2, gp, impl > &v)
It add the element of a source vector to this vector.
const grid_base< 1, T, Memory, layout_type > & getInternal_base() const
Internal function.
void merge_prp_device(const vector< S, M, layout_base, gp, OPENFPM_NATIVE > &v, unsigned int start)
It merge the elements of a source vector to this vector (on device)
Definition: map_vector.hpp:716
void * getPointer()
Return the pointer that store the data.
void fill(unsigned char c)
Fill the buffer with a byte.
const grid_base< 1, T, Memory, layout_type >::container last() const
Get the last element of the vector.
const grid_base< 1, T, Memory, typename layout_base< T >::type >::container get_o(size_t id) const
Get an element of the vector.
auto getMemory() -> decltype(base.template getMemory< p >())
Return the memory object.
void add(const vector< T, M, layout_base, gp, OPENFPM_NATIVE > &v)
It add the element of another vector to this vector.
Definition: map_vector.hpp:614
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &mv)
Assignment operator.
layout_base< T > layout_base_
Type of the encapsulation memory parameter.
Definition: map_vector.hpp:300
grid_base< 1, T, Memory, typename layout_base< T >::type >::container container
Object container for T, it is the return type of get_o it return a object type trough.
Definition: map_vector.hpp:307
void add_prp(const vector< S, M, layout_base2, gp, impl > &v)
It add the element of a source vector to this vector.
void merge_prp_v(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
Definition: map_vector.hpp:766
vector< T, Memory, layout_base2, grow_p, OPENFPM_NATIVE > & operator=(vector< T, Mem, layout_base2, grow_p, OPENFPM_NATIVE > &&mv)
Assignment operator.
vector_key_iterator getIteratorFrom(size_t start) const
Get iterator over the particles from a particular index.
vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > & operator=(const vector< T, Mem, layout_base2, grow_p, OPENFPM_NATIVE > &mv)
Assignment operator.
void * getDeviceBuffer()
It return the properties arrays.
void merge_prp_v_device(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, unsigned int start, const vector_opart_type &opart)
It merge the elements of a source vector to this vector.
Definition: map_vector.hpp:969
void hostToDeviceNUMA(size_t start, size_t stop)
Synchronize the memory buffer in the device with the memory in the host respecting NUMA domains.
void merge_prp_v_device(const vector< S, M, layout_base2, gp, OPENFPM_NATIVE > &v, const vector_opart_type &opart, unsigned int start, unsigned int stop)
It merge the elements of a source vector to this vector.
Definition: map_vector.hpp:893
size_t size() const
Return the size of the vector.
Definition: map_vector.hpp:332
void set(size_t id, vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v, size_t src)
Set the element of the vector v from another element of another vector.
auto get(size_t id) -> decltype(base.get_o(grid_key_dx< 1 >(id)))
Get an element of the vector.
bool operator!=(const vector< T, Memory, layout_base, grow_p, OPENFPM_NATIVE > &v) const
Check that two vectors are equal.
void deviceToHost()
Synchronize the memory buffer in the device with the memory in the host.
Implementation of 1-D std::vector like structure.
Definition: map_vector.hpp:204
size_t size()
Stub size.
Definition: map_vector.hpp:212
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
convert a type into constant type
Definition: aggregate.hpp:302
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
Definition: aggregate.hpp:221
It copy two encap object.
Definition: Encap.hpp:276
It create a boost::fusion vector with the selected properties.
It copy the properties from one object to another applying an operation.
It copy the properties from one object to another.
grid interface available when on gpu
grid interface available when on gpu
this class is a functor for "for_each" algorithm