OpenFPM_pdata  1.1.0
Project that contain the implementation of distributed structures
 All Data Structures Namespaces Functions Variables Typedefs Enumerations Friends Pages
VCluster.hpp
1 /*
2  * Vcluster.hpp
3  *
4  * Created on: Feb 8, 2016
5  * Author: Pietro Incardona
6  */
7 
8 #ifndef VCLUSTER_HPP
9 #define VCLUSTER_HPP
10 
11 #include <signal.h>
12 #include "VCluster_base.hpp"
13 #include "VCluster_meta_function.hpp"
14 
15 void bt_sighandler(int sig, siginfo_t * info, void * ctx);
16 
36 class Vcluster: public Vcluster_base
37 {
38  template<typename T>
39  struct index_gen {};
40 
42  template<int ... prp>
43  struct index_gen<index_tuple<prp...>>
44  {
46  template<typename op,
47  typename T,
48  typename S,
49  template <typename> class layout_base = memory_traits_lin>
50  inline static void process_recv(Vcluster & vcl, S & recv, openfpm::vector<size_t> * sz_recv, openfpm::vector<size_t> * sz_recv_byte, op & op_param)
51  {
52  vcl.process_receive_buffer_with_prp<op,T,S,layout_base,prp...>(recv,sz_recv,sz_recv_byte,op_param);
53  }
54  };
55 
74  template<typename op, typename T, typename S, template <typename> class layout_base> void prepare_send_buffer(openfpm::vector<T> & send,
75  S & recv,
76  openfpm::vector<size_t> & prc_send,
77  openfpm::vector<size_t> & prc_recv,
78  openfpm::vector<size_t> & sz_recv,
79  size_t opt)
80  {
81  openfpm::vector<size_t> sz_recv_byte(sz_recv.size());
82 
83  // Reset the receive buffer
85 
86  #ifdef SE_CLASS1
87 
88  if (send.size() != prc_send.size())
89  std::cerr << __FILE__ << ":" << __LINE__ << " Error, the number of processor involved \"prc.size()\" must match the number of sending buffers \"send.size()\" " << std::endl;
90 
91  #endif
92 
93  // Prepare the sending buffer
95  openfpm::vector<size_t> send_sz_byte;
96 
97  size_t tot_size = 0;
98 
99  for (size_t i = 0; i < send.size() ; i++)
100  {
101  size_t req = 0;
102 
103  //Pack requesting
104  pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value,op, T, S, layout_base>::packingRequest(send.get(i), req, send_sz_byte);
105  tot_size += req;
106  }
107 
108  HeapMemory pmem;
109 
110  ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(tot_size,pmem));
111  mem.incRef();
112 
113  for (size_t i = 0; i < send.size() ; i++)
114  {
115  //Packing
116 
117  Pack_stat sts;
118 
119  pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value, op, T, S, layout_base>::packing(mem, send.get(i), sts, send_buf);
120  }
121 
122  // receive information
123  base_info bi(&recv_buf,prc_recv,sz_recv_byte);
124 
125  // Send and recv multiple messages
126  if (opt & RECEIVE_KNOWN)
127  {
128  // We we are passing the number of element but not the byte, calculate the byte
129  if (opt & KNOWN_ELEMENT_OR_BYTE)
130  {
131  // We know the number of element convert to byte (ONLY if it is possible)
133  {
134  for (size_t i = 0 ; i < sz_recv.size() ; i++)
135  sz_recv_byte.get(i) = sz_recv.get(i) * sizeof(typename T::value_type);
136  }
137  else
138  {std::cout << __FILE__ << ":" << __LINE__ << " Error " << demangle(typeid(T).name()) << " the type does not work with the option or NO_CHANGE_ELEMENTS" << std::endl;}
139 
140  Vcluster_base::sendrecvMultipleMessagesNBX(prc_send.size(),(size_t *)send_sz_byte.getPointer(),(size_t *)prc_send.getPointer(),(void **)send_buf.getPointer(),
141  prc_recv.size(),(size_t *)prc_recv.getPointer(),(size_t *)sz_recv_byte.getPointer(),msg_alloc_known,(void *)&bi);
142  }
143  else
144  {
145  Vcluster_base::sendrecvMultipleMessagesNBX(prc_send.size(),(size_t *)send_sz_byte.getPointer(),(size_t *)prc_send.getPointer(),(void **)send_buf.getPointer(),
146  prc_recv.size(),(size_t *)prc_recv.getPointer(),msg_alloc_known,(void *)&bi);
147  sz_recv_byte = sz_recv_tmp;
148  }
149  }
150  else
151  {
152  prc_recv.clear();
153  sendrecvMultipleMessagesNBX(prc_send.size(),(size_t *)send_sz_byte.getPointer(),(size_t *)prc_send.getPointer(),(void **)send_buf.getPointer(),msg_alloc,(void *)&bi);
154  }
155 
156  // Reorder the buffer
157  reorder_buffer(prc_recv,sz_recv_byte);
158 
159  mem.decRef();
160  delete &mem;
161  }
162 
163 
169  {
170  for (size_t i = 0 ; i < recv_buf.size() ; i++)
171  recv_buf.get(i).resize(0);
172 
173  recv_buf.resize(0);
174  }
175 
183  struct base_info
184  {
191 
194  :recv_buf(recv_buf),prc(prc),sz(sz)
195  {}
196  };
197 
211  static void * msg_alloc(size_t msg_i ,size_t total_msg, size_t total_p, size_t i, size_t ri, void * ptr)
212  {
213  base_info & rinfo = *(base_info *)ptr;
214 
215  if (rinfo.recv_buf == NULL)
216  {
217  std::cerr << __FILE__ << ":" << __LINE__ << " Internal error this processor is not suppose to receive\n";
218  return NULL;
219  }
220 
221  rinfo.recv_buf->resize(ri+1);
222 
223  rinfo.recv_buf->get(ri).resize(msg_i);
224 
225  // Receive info
226  rinfo.prc.add(i);
227  rinfo.sz.add(msg_i);
228 
229  // return the pointer
230  return rinfo.recv_buf->last().getPointer();
231  }
232 
233 
247  static void * msg_alloc_known(size_t msg_i ,size_t total_msg, size_t total_p, size_t i, size_t ri, void * ptr)
248  {
249  base_info & rinfo = *(base_info *)ptr;
250 
251  if (rinfo.recv_buf == NULL)
252  {
253  std::cerr << __FILE__ << ":" << __LINE__ << " Internal error this processor is not suppose to receive\n";
254  return NULL;
255  }
256 
257  rinfo.recv_buf->resize(ri+1);
258 
259  rinfo.recv_buf->get(ri).resize(msg_i);
260 
261  // return the pointer
262  return rinfo.recv_buf->last().getPointer();
263  }
264 
278  template<typename op, typename T, typename S, template <typename> class layout_base ,unsigned int ... prp >
281  openfpm::vector<size_t> * sz_byte,
282  op & op_param)
283  {
284  if (sz != NULL)
285  sz->resize(recv_buf.size());
286 
287  pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value,op, T, S, layout_base, prp... >::unpacking(recv, recv_buf, sz, sz_byte, op_param);
288  }
289 
290  public:
291 
298  Vcluster(int *argc, char ***argv)
299  :Vcluster_base(argc,argv)
300  {
301  }
302 
330  template<typename T, typename S> bool SGather(T & send, S & recv,size_t root)
331  {
334 
335  return SGather(send,recv,prc,sz,root);
336  }
337 
339  template<size_t index, size_t N> struct MetaFuncOrd {
340  enum { value = index };
341  };
342 
372  template<typename T,
373  typename S,
374  template <typename> class layout_base = memory_traits_lin>
375  bool SGather(T & send,
376  S & recv,
379  size_t root)
380  {
381 #ifdef SE_CLASS1
382  if (&send == (T *)&recv)
383  {std::cerr << "Error: " << __FILE__ << ":" << __LINE__ << " using SGather in general the sending object and the receiving object must be different" << std::endl;}
384 #endif
385 
386  // Reset the receive buffer
387  reset_recv_buf();
388 
389  // If we are on master collect the information
390  if (getProcessUnitID() == root)
391  {
392  // send buffer (master does not send anything) so send req and send_buf
393  // remain buffer with size 0
394  openfpm::vector<size_t> send_req;
395 
396  // receive information
397  base_info bi(&recv_buf,prc,sz);
398 
399  // Send and recv multiple messages
400  sendrecvMultipleMessagesNBX(send_req.size(),NULL,NULL,NULL,msg_alloc,&bi);
401 
402  // we generate the list of the properties to pack
403  typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;
404 
405  // operation object
407 
408  index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S,layout_base>(*this,recv,&sz,NULL,opa);
409 
410  recv.add(send);
411  prc.add(root);
412  sz.add(send.size());
413  }
414  else
415  {
416  // send buffer (master does not send anything) so send req and send_buf
417  // remain buffer with size 0
418  openfpm::vector<size_t> send_prc;
419  send_prc.add(root);
420 
422 
424 
425  //Pack requesting
426 
427  size_t tot_size = 0;
428 
429  pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value,op_ssend_recv_add<void>, T, S, layout_base>::packingRequest(send, tot_size, sz);
430 
431  HeapMemory pmem;
432 
433  ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(tot_size,pmem));
434  mem.incRef();
435 
436  //Packing
437 
438  Pack_stat sts;
439 
440  pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value,op_ssend_recv_add<void>, T, S, layout_base>::packing(mem, send, sts, send_buf);
441 
442  // receive information
443  base_info bi(NULL,prc,sz);
444 
445  // Send and recv multiple messages
446  sendrecvMultipleMessagesNBX(send_prc.size(),(size_t *)sz.getPointer(),(size_t *)send_prc.getPointer(),(void **)send_buf.getPointer(),msg_alloc,(void *)&bi,NONE);
447 
448  mem.decRef();
449  delete &mem;
450  }
451 
452  return true;
453  }
454 
481  template<typename T, typename S> bool SScatter(T & send, S & recv, openfpm::vector<size_t> & prc, openfpm::vector<size_t> & sz, size_t root)
482  {
483  // Reset the receive buffer
484  reset_recv_buf();
485 
486  // If we are on master scatter the information
487  if (getProcessUnitID() == root)
488  {
489  // Prepare the sending buffer
491 
492 
493  openfpm::vector<size_t> sz_byte;
494  sz_byte.resize(sz.size());
495 
496  size_t ptr = 0;
497 
498  for (size_t i = 0; i < sz.size() ; i++)
499  {
500  send_buf.add((char *)send.getPointer() + sizeof(typename T::value_type)*ptr );
501  sz_byte.get(i) = sz.get(i) * sizeof(typename T::value_type);
502  ptr += sz.get(i);
503  }
504 
505  // receive information
506  base_info bi(&recv_buf,prc,sz);
507 
508  // Send and recv multiple messages
509  sendrecvMultipleMessagesNBX(prc.size(),(size_t *)sz_byte.getPointer(),(size_t *)prc.getPointer(),(void **)send_buf.getPointer(),msg_alloc,(void *)&bi);
510 
511  // we generate the list of the properties to pack
512  typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;
513 
514  // operation object
516 
517  index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S>(*this,recv,NULL,NULL,opa);
518  }
519  else
520  {
521  // The non-root receive
522  openfpm::vector<size_t> send_req;
523 
524  // receive information
525  base_info bi(&recv_buf,prc,sz);
526 
527  // Send and recv multiple messages
528  sendrecvMultipleMessagesNBX(send_req.size(),NULL,NULL,NULL,msg_alloc,&bi);
529 
530  // we generate the list of the properties to pack
531  typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;
532 
533  // operation object
535 
536  index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S>(*this,recv,NULL,NULL,opa);
537  }
538 
539  return true;
540  }
541 
549  {
550 
551  struct recv_buff_reorder
552  {
554  size_t proc;
555 
557  size_t pos;
558 
560  recv_buff_reorder()
561  :proc(0),pos(0)
562  {};
563 
565  bool operator<(const recv_buff_reorder & rd) const
566  {
567  return proc < rd.proc;
568  }
569  };
570 
572 
573  rcv.resize(recv_buf.size());
574 
575  for (size_t i = 0 ; i < rcv.size() ; i++)
576  {
577  rcv.get(i).proc = prc.get(i);
578  rcv.get(i).pos = i;
579  }
580 
581  // we sort based on processor
582  rcv.sort();
583 
585  recv_ord.resize(rcv.size());
586 
587  openfpm::vector<size_t> prc_ord;
588  prc_ord.resize(rcv.size());
589 
590  openfpm::vector<size_t> sz_recv_ord;
591  sz_recv_ord.resize(rcv.size());
592 
593  // Now we reorder rcv
594  for (size_t i = 0 ; i < rcv.size() ; i++)
595  {
596  recv_ord.get(i).swap(recv_buf.get(rcv.get(i).pos));
597  prc_ord.get(i) = rcv.get(i).proc;
598  sz_recv_ord.get(i) = sz_recv.get(rcv.get(i).pos);
599  }
600 
601  // move rcv into recv
602  recv_buf.swap(recv_ord);
603  prc.swap(prc_ord);
604  sz_recv.swap(sz_recv_ord);
605 
606  // reorder prc_recv and recv_sz
607  }
608 
636  template<typename T,
637  typename S,
638  template <typename> class layout_base = memory_traits_lin>
640  S & recv,
641  openfpm::vector<size_t> & prc_send,
642  openfpm::vector<size_t> & prc_recv,
643  openfpm::vector<size_t> & sz_recv,
644  size_t opt = NONE)
645  {
646  prepare_send_buffer<op_ssend_recv_add<void>,T,S,layout_base>(send,recv,prc_send,prc_recv,sz_recv,opt);
647 
648  // we generate the list of the properties to pack
649  typedef typename ::generate_indexes<int, has_max_prop<T, has_value_type<T>::value>::number, MetaFuncOrd>::result ind_prop_to_pack;
650 
652 
653  index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S,layout_base>(*this,recv,&sz_recv,NULL,opa);
654 
655  return true;
656  }
657 
658 
687  template<typename T, typename S, template <typename> class layout_base, int ... prp> bool SSendRecvP(openfpm::vector<T> & send,
688  S & recv,
689  openfpm::vector<size_t> & prc_send,
690  openfpm::vector<size_t> & prc_recv,
691  openfpm::vector<size_t> & sz_recv,
692  openfpm::vector<size_t> & sz_recv_byte,
693  size_t opt = NONE)
694  {
695  prepare_send_buffer<op_ssend_recv_add<void>,T,S,layout_base>(send,recv,prc_send,prc_recv,sz_recv,opt);
696 
697  // operation object
699 
700  // process the received information
701  process_receive_buffer_with_prp<op_ssend_recv_add<void>,T,S,layout_base,prp...>(recv,&sz_recv,&sz_recv_byte,opa);
702 
703  return true;
704  }
705 
706 
734  template<typename T, typename S, template <typename> class layout_base, int ... prp>
736  S & recv,
737  openfpm::vector<size_t> & prc_send,
738  openfpm::vector<size_t> & prc_recv,
739  openfpm::vector<size_t> & sz_recv,
740  size_t opt = NONE)
741  {
742  prepare_send_buffer<op_ssend_recv_add<void>,T,S,layout_base>(send,recv,prc_send,prc_recv,sz_recv,opt);
743 
744  // operation object
746 
747  // process the received information
748  process_receive_buffer_with_prp<op_ssend_recv_add<void>,T,S,layout_base,prp...>(recv,&sz_recv,NULL,opa);
749 
750  return true;
751  }
752 
789  template<typename op,
790  typename T,
791  typename S,
792  template <typename> class layout_base,
793  int ... prp>
795  S & recv,
796  openfpm::vector<size_t> & prc_send,
797  op & op_param,
798  openfpm::vector<size_t> & prc_recv,
799  openfpm::vector<size_t> & recv_sz,
800  size_t opt = NONE)
801  {
802  prepare_send_buffer<op,T,S,layout_base>(send,recv,prc_send,prc_recv,recv_sz,opt);
803 
804  // process the received information
805  process_receive_buffer_with_prp<op,T,S,layout_base,prp...>(recv,NULL,NULL,op_param);
806 
807  return true;
808  }
809 
810 };
811 
812 
813 
814 // Function to initialize the global VCluster //
815 
816 extern Vcluster * global_v_cluster_private;
817 
824 static inline void init_global_v_cluster_private(int *argc, char ***argv)
825 {
826  if (global_v_cluster_private == NULL)
827  global_v_cluster_private = new Vcluster(argc,argv);
828 }
829 
830 static inline void delete_global_v_cluster_private()
831 {
832  delete global_v_cluster_private;
833 }
834 
835 static inline Vcluster & create_vcluster()
836 {
837 #ifdef SE_CLASS1
838 
839  if (global_v_cluster_private == NULL)
840  std::cerr << __FILE__ << ":" << __LINE__ << " Error you must call openfpm_init before using any distributed data structures";
841 
842 #endif
843 
844  return *global_v_cluster_private;
845 }
846 
847 
848 
854 static inline bool is_openfpm_init()
855 {
856  return ofp_initialized;
857 }
858 
864 static inline void openfpm_init(int *argc, char ***argv)
865 {
866 #ifdef HAVE_PETSC
867 
868  PetscInitialize(argc,argv,NULL,NULL);
869 
870 #endif
871 
872  init_global_v_cluster_private(argc,argv);
873 
874 #ifdef SE_CLASS1
875  std::cout << "OpenFPM is compiled with debug mode LEVEL:1. Remember to remove SE_CLASS1 when you go in production" << std::endl;
876 #endif
877 
878 #ifdef SE_CLASS2
879  std::cout << "OpenFPM is compiled with debug mode LEVEL:2. Remember to remove SE_CLASS2 when you go in production" << std::endl;
880 #endif
881 
882 #ifdef SE_CLASS3
883  std::cout << "OpenFPM is compiled with debug mode LEVEL:3. Remember to remove SE_CLASS3 when you go in production" << std::endl;
884 #endif
885 
886  // install segmentation fault signal handler
887 
888  struct sigaction sa;
889 
890  sa.sa_sigaction = bt_sighandler;
891  sigemptyset(&sa.sa_mask);
892  sa.sa_flags = SA_RESTART;
893 
894  sigaction(SIGSEGV, &sa, NULL);
895 
896  if (*argc != 0)
897  program_name = std::string(*argv[0]);
898 
899  ofp_initialized = true;
900 }
901 
902 
908 static inline void openfpm_finalize()
909 {
910 #ifdef HAVE_PETSC
911 
912  PetscFinalize();
913 
914 #endif
915 
916  delete_global_v_cluster_private();
917  ofp_initialized = false;
918 }
919 
920 
921 #endif
922 
void process_receive_buffer_with_prp(S &recv, openfpm::vector< size_t > *sz, openfpm::vector< size_t > *sz_byte, op &op_param)
Process the receive buffer.
Definition: VCluster.hpp:279
Transform the boost::fusion::vector into memory specification (memory_traits)
Definition: memory_conf.hpp:93
base_info(openfpm::vector< BHeapMemory > *recv_buf, openfpm::vector< size_t > &prc, openfpm::vector< size_t > &sz)
constructor
Definition: VCluster.hpp:193
size_t getProcessUnitID()
Get the process unit id.
bool SSendRecvP_op(openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, op &op_param, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &recv_sz, size_t opt=NONE)
Semantic Send and receive, send the data to processors and receive from the other processors...
Definition: VCluster.hpp:794
There is max_prop inside.
void reset_recv_buf()
Reset the receive buffer.
Definition: VCluster.hpp:168
size_t size()
Stub size.
Definition: map_vector.hpp:70
void sendrecvMultipleMessagesNBX(openfpm::vector< size_t > &prc, openfpm::vector< T > &data, openfpm::vector< size_t > prc_recv, openfpm::vector< size_t > &recv_sz, void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
Send and receive multiple messages.
static void * msg_alloc(size_t msg_i, size_t total_msg, size_t total_p, size_t i, size_t ri, void *ptr)
Call-back to allocate buffer to receive data.
Definition: VCluster.hpp:211
void prepare_send_buffer(openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt)
Prepare the send buffer and send the message to other processors.
Definition: VCluster.hpp:74
This class allocate, and destroy CPU memory.
Definition: HeapMemory.hpp:39
bool send(size_t proc, size_t tag, const void *mem, size_t sz)
Send data to a processor.
static void * msg_alloc_known(size_t msg_i, size_t total_msg, size_t total_p, size_t i, size_t ri, void *ptr)
Call-back to allocate buffer to receive data.
Definition: VCluster.hpp:247
bool SSendRecvP(openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, openfpm::vector< size_t > &sz_recv_byte, size_t opt=NONE)
Semantic Send and receive, send the data to processors and receive from the other processors...
Definition: VCluster.hpp:687
Implementation of VCluster class.
Definition: VCluster.hpp:36
This class virtualize the cluster of PC as a set of processes that communicate.
bool SSendRecv(openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
Semantic Send and receive, send the data to processors and receive from the other processors...
Definition: VCluster.hpp:639
openfpm::vector< BHeapMemory > * recv_buf
Receive buffer.
Definition: VCluster.hpp:186
bool SSendRecvP(openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
Semantic Send and receive, send the data to processors and receive from the other processors...
Definition: VCluster.hpp:735
bool recv(size_t proc, size_t tag, void *v, size_t sz)
Recv data from a processor.
virtual void incRef()
Increment the reference counter.
Definition: ExtPreAlloc.hpp:69
bool SGather(T &send, S &recv, size_t root)
Semantic Gather, gather the data from all processors into one node.
Definition: VCluster.hpp:330
static void process_recv(Vcluster &vcl, S &recv, openfpm::vector< size_t > *sz_recv, openfpm::vector< size_t > *sz_recv_byte, op &op_param)
Process the receive buffer.
Definition: VCluster.hpp:50
These set of classes generate an array definition at compile-time.
Definition: ct_array.hpp:25
void reorder_buffer(openfpm::vector< size_t > &prc, openfpm::vector< size_t > &sz_recv)
reorder the receiving buffer
Definition: VCluster.hpp:548
It return true if the object T require complex serialization.
openfpm::vector< size_t > & sz
size of each message
Definition: VCluster.hpp:190
bool SGather(T &send, S &recv, openfpm::vector< size_t > &prc, openfpm::vector< size_t > &sz, size_t root)
Semantic Gather, gather the data from all processors into one node.
Definition: VCluster.hpp:375
virtual void decRef()
Decrement the reference counter.
Definition: ExtPreAlloc.hpp:73
Helper class to add data.
openfpm::vector< size_t > & prc
receiving processor list
Definition: VCluster.hpp:188
bool SScatter(T &send, S &recv, openfpm::vector< size_t > &prc, openfpm::vector< size_t > &sz, size_t root)
Semantic Scatter, scatter the data from one processor to the other node.
Definition: VCluster.hpp:481
Packing status object.
Definition: Pack_stat.hpp:51
metafunction
Definition: VCluster.hpp:339
Vcluster(int *argc, char ***argv)
Constructor.
Definition: VCluster.hpp:298
openfpm::vector< MPI_Request > req
vector of MPI requests
openfpm::vector< BHeapMemory > recv_buf
Receive buffers.