OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
MPI_IBcastW.hpp
1 /*
2  * MPI_IBcastW.hpp
3  *
4  * Created on: Apr 8, 2017
5  * Author: i-bird
6  */
7 
8 #ifndef OPENFPM_VCLUSTER_SRC_MPI_WRAPPER_MPI_IBCASTW_HPP_
9 #define OPENFPM_VCLUSTER_SRC_MPI_WRAPPER_MPI_IBCASTW_HPP_
10 
11 
12 
13 #include <mpi.h>
14 
15 
16 
34 {
35 public:
36  static inline void bcast(size_t proc ,void * buf, size_t sz, MPI_Request & req)
37  {
38  MPI_SAFE_CALL(MPI_Ibcast(buf,sz,MPI_BYTE, proc , MPI_COMM_WORLD,&req));
39  }
40 };
41 
48 template<typename T> class MPI_IBcastW
49 {
50 public:
51  template<typename Memory> static inline void bcast(size_t proc ,openfpm::vector<T,Memory> & v, MPI_Request & req)
52  {
53  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc , MPI_COMM_WORLD,&req));
54  }
55 };
56 
57 
61 template<> class MPI_IBcastW<int>
62 {
63 public:
64  static inline void bcast(size_t proc ,openfpm::vector<int> & v, MPI_Request & req)
65  {
66  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_INT, proc , MPI_COMM_WORLD,&req));
67  }
68 };
69 
73 template<> class MPI_IBcastW<unsigned int>
74 {
75 public:
76  static inline void bcast(size_t proc ,openfpm::vector<unsigned int> & v, MPI_Request & req)
77  {
78  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED, proc , MPI_COMM_WORLD,&req));
79  }
80 };
81 
85 template<> class MPI_IBcastW<short>
86 {
87 public:
88  static inline void bcast(size_t proc ,openfpm::vector<short> & v, MPI_Request & req)
89  {
90  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_SHORT, proc , MPI_COMM_WORLD,&req));
91  }
92 };
93 
97 template<> class MPI_IBcastW<unsigned short>
98 {
99 public:
100  static inline void bcast(size_t proc ,openfpm::vector<unsigned short> & v, MPI_Request & req)
101  {
102  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc , MPI_COMM_WORLD,&req));
103  }
104 };
105 
109 template<> class MPI_IBcastW<char>
110 {
111 public:
112  static inline void bcast(size_t proc ,openfpm::vector<char> & v, MPI_Request & req)
113  {
114  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_CHAR, proc , MPI_COMM_WORLD,&req));
115  }
116 };
117 
121 template<> class MPI_IBcastW<unsigned char>
122 {
123 public:
124  static inline void bcast(size_t proc ,openfpm::vector<unsigned char> & v, MPI_Request & req)
125  {
126  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc , MPI_COMM_WORLD,&req));
127  }
128 };
129 
133 template<> class MPI_IBcastW<size_t>
134 {
135 public:
136  static inline void bcast(size_t proc ,openfpm::vector<size_t> & v, MPI_Request & req)
137  {
138  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc , MPI_COMM_WORLD,&req));
139  }
140 };
141 
145 template<> class MPI_IBcastW<long int>
146 {
147 public:
148  static inline void bcast(size_t proc ,openfpm::vector<long int> & v, MPI_Request & req)
149  {
150  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_LONG, proc , MPI_COMM_WORLD,&req));
151  }
152 };
153 
157 template<> class MPI_IBcastW<float>
158 {
159 public:
160  static inline void bcast(size_t proc ,openfpm::vector<float> & v, MPI_Request & req)
161  {
162  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_FLOAT, proc , MPI_COMM_WORLD,&req));
163  }
164 };
165 
169 template<> class MPI_IBcastW<double>
170 {
171 public:
172  static inline void bcast(size_t proc ,openfpm::vector<double> & v, MPI_Request & req)
173  {
174  MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_DOUBLE, proc , MPI_COMM_WORLD,&req));
175  }
176 };
177 
178 
186 template<typename vect>
188 {
190  vect & send;
191 
194 
196  size_t root;
197 
203  inline bcast_inte_impl(vect & send,
205  size_t root)
206  :send(send),req(req),root(root)
207  {};
208 
210  template<typename T>
211  inline void operator()(T& t)
212  {
213  typedef typename boost::mpl::at<typename vect::value_type::type,T>::type send_type;
214 
215  // Create one request
216  req.add();
217 
218  // gather
219  MPI_IBcastWB::bcast(root,&send.template get<T::value>(0),send.size()*sizeof(send_type),req.last());
220  }
221 };
222 
223 template<bool is_lin_or_inte>
225 {
226  template<typename T, typename Mem, template<typename> class layout_base >
227  static void bcast_(openfpm::vector<MPI_Request> & req,
229  size_t root)
230  {
231  // Create one request
232  req.add();
233 
234  // gather
235  MPI_IBcastW<T>::bcast(root,v,req.last());
236  }
237 };
238 
239 template<>
240 struct b_cast_helper<false>
241 {
242  template<typename T, typename Mem, template<typename> class layout_base >
243  static void bcast_(openfpm::vector<MPI_Request> & req,
245  size_t root)
246  {
248 
249  boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(bc);
250  }
251 };
252 
253 #endif /* OPENFPM_VCLUSTER_SRC_MPI_WRAPPER_MPI_IBCASTW_HPP_ */
vect & send
vector to broadcast
bcast_inte_impl(vect &send, openfpm::vector< MPI_Request > &req, size_t root)
constructor
General recv for vector of.
Definition: MPI_IBcastW.hpp:48
this class is a functor for "for_each" algorithm
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
Set of wrapping classing for MPI_Irecv.
Definition: MPI_IBcastW.hpp:33
void operator()(T &t)
It call the copy function for each property.
size_t root
root processor
Implementation of 1-D std::vector like structure.
Definition: map_vector.hpp:202
openfpm::vector< MPI_Request > & req
vector of requests