OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
 
Loading...
Searching...
No Matches
MPI_IBcastW.hpp
1/*
2 * MPI_IBcastW.hpp
3 *
4 * Created on: Apr 8, 2017
5 * Author: i-bird
6 */
7
8#ifndef OPENFPM_VCLUSTER_SRC_MPI_WRAPPER_MPI_IBCASTW_HPP_
9#define OPENFPM_VCLUSTER_SRC_MPI_WRAPPER_MPI_IBCASTW_HPP_
10
11
12
13#include <mpi.h>
14
15
16
34{
35public:
36 static inline void bcast(size_t proc ,void * buf, size_t sz, MPI_Request & req)
37 {
38 MPI_SAFE_CALL(MPI_Ibcast(buf,sz,MPI_BYTE, proc , MPI_COMM_WORLD,&req));
39 }
40};
41
48template<typename T> class MPI_IBcastW
49{
50public:
51 template<typename Memory> static inline void bcast(size_t proc ,openfpm::vector<T,Memory> & v, MPI_Request & req)
52 {
53 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc , MPI_COMM_WORLD,&req));
54 }
55};
56
57
61template<> class MPI_IBcastW<int>
62{
63public:
64 static inline void bcast(size_t proc ,openfpm::vector<int> & v, MPI_Request & req)
65 {
66 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_INT, proc , MPI_COMM_WORLD,&req));
67 }
68};
69
73template<> class MPI_IBcastW<unsigned int>
74{
75public:
76 static inline void bcast(size_t proc ,openfpm::vector<unsigned int> & v, MPI_Request & req)
77 {
78 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED, proc , MPI_COMM_WORLD,&req));
79 }
80};
81
85template<> class MPI_IBcastW<short>
86{
87public:
88 static inline void bcast(size_t proc ,openfpm::vector<short> & v, MPI_Request & req)
89 {
90 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_SHORT, proc , MPI_COMM_WORLD,&req));
91 }
92};
93
97template<> class MPI_IBcastW<unsigned short>
98{
99public:
100 static inline void bcast(size_t proc ,openfpm::vector<unsigned short> & v, MPI_Request & req)
101 {
102 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc , MPI_COMM_WORLD,&req));
103 }
104};
105
109template<> class MPI_IBcastW<char>
110{
111public:
112 static inline void bcast(size_t proc ,openfpm::vector<char> & v, MPI_Request & req)
113 {
114 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_CHAR, proc , MPI_COMM_WORLD,&req));
115 }
116};
117
121template<> class MPI_IBcastW<unsigned char>
122{
123public:
124 static inline void bcast(size_t proc ,openfpm::vector<unsigned char> & v, MPI_Request & req)
125 {
126 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc , MPI_COMM_WORLD,&req));
127 }
128};
129
133template<> class MPI_IBcastW<size_t>
134{
135public:
136 static inline void bcast(size_t proc ,openfpm::vector<size_t> & v, MPI_Request & req)
137 {
138 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc , MPI_COMM_WORLD,&req));
139 }
140};
141
145template<> class MPI_IBcastW<long int>
146{
147public:
148 static inline void bcast(size_t proc ,openfpm::vector<long int> & v, MPI_Request & req)
149 {
150 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_LONG, proc , MPI_COMM_WORLD,&req));
151 }
152};
153
157template<> class MPI_IBcastW<float>
158{
159public:
160 static inline void bcast(size_t proc ,openfpm::vector<float> & v, MPI_Request & req)
161 {
162 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_FLOAT, proc , MPI_COMM_WORLD,&req));
163 }
164};
165
169template<> class MPI_IBcastW<double>
170{
171public:
172 static inline void bcast(size_t proc ,openfpm::vector<double> & v, MPI_Request & req)
173 {
174 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_DOUBLE, proc , MPI_COMM_WORLD,&req));
175 }
176};
177
178
186template<typename vect>
188{
190 vect & send;
191
194
196 size_t root;
197
203 inline bcast_inte_impl(vect & send,
205 size_t root)
207 {};
208
210 template<typename T>
211 inline void operator()(T& t)
212 {
213 typedef typename boost::mpl::at<typename vect::value_type::type,T>::type send_type;
214
215 // Create one request
216 req.add();
217
218 // gather
219 MPI_IBcastWB::bcast(root,&send.template get<T::value>(0),send.size()*sizeof(send_type),req.last());
220 }
221};
222
223template<bool is_lin_or_inte>
225{
226 template<typename T, typename Mem, template<typename> class layout_base >
227 static void bcast_(openfpm::vector<MPI_Request> & req,
229 size_t root)
230 {
231 // Create one request
232 req.add();
233
234 // gather
235 MPI_IBcastW<T>::bcast(root,v,req.last());
236 }
237};
238
239template<>
240struct b_cast_helper<false>
241{
242 template<typename T, typename Mem, template<typename> class layout_base >
243 static void bcast_(openfpm::vector<MPI_Request> & req,
245 size_t root)
246 {
248
249 boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(bc);
250 }
251};
252
253#endif /* OPENFPM_VCLUSTER_SRC_MPI_WRAPPER_MPI_IBCASTW_HPP_ */
Set of wrapping classing for MPI_Irecv.
General recv for vector of.
Implementation of 1-D std::vector like structure.
this class is a functor for "for_each" algorithm
void operator()(T &t)
It call the copy function for each property.
vect & send
vector to broadcast
openfpm::vector< MPI_Request > & req
vector of requests
bcast_inte_impl(vect &send, openfpm::vector< MPI_Request > &req, size_t root)
constructor
size_t root
root processor