OpenFPM_pdata  1.1.0
Project that contain the implementation of distributed structures
 All Data Structures Namespaces Functions Variables Typedefs Enumerations Friends Pages
Vcluster Class Reference

Implementation of VCluster class. More...

Detailed Description

Implementation of VCluster class.

This class implement communication functions. Like summation, minimum and maximum across processors, or Dynamic Sparse Data Exchange (DSDE)

Vcluster Min max sum

Vcluster & vcl = create_vcluster();
unsigned char uc = 1;
char c = 1;
short s = 1;
unsigned short us = 1;
int i = 1;
unsigned int ui = 1;
long int li = 1;
unsigned long int uli = 1;
float f = 1;
double d = 1;
unsigned char uc_max = vcl.getProcessUnitID();
char c_max = vcl.getProcessUnitID();
short s_max = vcl.getProcessUnitID();
unsigned short us_max = vcl.getProcessUnitID();
int i_max = vcl.getProcessUnitID();
unsigned int ui_max = vcl.getProcessUnitID();
long int li_max = vcl.getProcessUnitID();
unsigned long int uli_max = vcl.getProcessUnitID();
float f_max = vcl.getProcessUnitID();
double d_max = vcl.getProcessUnitID();
// Sum reductions
if ( vcl.getProcessingUnits() < 128 )
vcl.sum(c);
if ( vcl.getProcessingUnits() < 256 )
vcl.sum(uc);
if ( vcl.getProcessingUnits() < 32768 )
vcl.sum(s);
if ( vcl.getProcessingUnits() < 65536 )
vcl.sum(us);
if ( vcl.getProcessingUnits() < 2147483648 )
vcl.sum(i);
if ( vcl.getProcessingUnits() < 4294967296 )
vcl.sum(ui);
vcl.sum(li);
vcl.sum(uli);
vcl.sum(f);
vcl.sum(d);
// Max reduction
if ( vcl.getProcessingUnits() < 128 )
vcl.max(c_max);
if ( vcl.getProcessingUnits() < 256 )
vcl.max(uc_max);
if ( vcl.getProcessingUnits() < 32768 )
vcl.max(s_max);
if ( vcl.getProcessingUnits() < 65536 )
vcl.max(us_max);
if ( vcl.getProcessingUnits() < 2147483648 )
vcl.max(i_max);
if ( vcl.getProcessingUnits() < 4294967296 )
vcl.max(ui_max);
vcl.max(li_max);
vcl.max(uli_max);
vcl.max(f_max);
vcl.max(d_max);
vcl.execute();

Vcluster all gather

T data = vcl.getProcessUnitID();
vcl.allGather(data,clt);
vcl.execute();
for (size_t i = 0 ; i < vcl.getProcessingUnits() ; i++)
BOOST_REQUIRE_EQUAL(i,(size_t)clt.get(i));

Dynamic sparse data exchange with complex objects

// A vector of vector we want to send each internal vector to one specified processor
// We use this empty vector to receive data
// We use this empty vector to receive data
// in this case each processor will send a message of different size to all the other processor
// but can also be a subset of processors
v1.resize(vcl.getProcessingUnits());
// We fill the send buffer with some sense-less data
for(size_t i = 0 ; i < v1.size() ; i++)
{
// each vector is filled with a different message size
for (size_t j = 0 ; j < i % SSCATTER_MAX ; j++)
v1.get(i).add(j);
// generate the sending list (in this case the sendinf list is all the other processor)
// but in general can be some of them and totally random
prc_send.add((i + vcl.getProcessUnitID()) % vcl.getProcessingUnits());
}
// Send and receive from the other processor v2 container the received data
// Because in this case v2 is an openfpm::vector<size_t>, all the received
// vector are concatenated one over the other. For example if the processor receive 3 openfpm::vector<size_t>
// each having 3,4,5 elements. v2 will be a vector of 12 elements
vcl.SSendRecv(v1,v2,prc_send,prc_recv2,sz_recv2);
// Send and receive from the other processors v2 contain the received data
// Because in this case v2 is an openfpm::vector<openfpm::vector<size_t>>, all the vector from
// each processor will be collected. For example if the processor receive 3 openfpm::vector<size_t>
// each having 3,4,5 elements. v2 will be a vector of vector of 3 elements (openfpm::vector) and
// each element will be respectivly 3,4,5 elements
vcl.SSendRecv(v1,v3,prc_send,prc_recv3,sz_recv3);

Dynamic sparse data exchange with buffers

// We send one message for each processor (one message is an openfpm::vector<unsigned char>)
// or an array of bytes
// receving messages. Each receiving message is an openfpm::vector<unsigned char>
// or an array if bytes
// each processor communicate based on a list of processor
// We construct the processor list in particular in this case
// each processor communicate with the 8 next (in id) processors
for (size_t i = 0 ; i < 8 && i < n_proc ; i++)
{
size_t p_id = (i + 1 + vcl.getProcessUnitID()) % n_proc;
// avoid to communicate with yourself
if (p_id != vcl.getProcessUnitID())
{
// Create an hello message
prc.add(p_id);
message.add();
std::ostringstream msg;
msg << "Hello from " << vcl.getProcessUnitID() << " to " << p_id;
std::string str(msg.str());
message.last().resize(j);
memset(message.last().getPointer(),0,j);
std::copy(str.c_str(),&(str.c_str())[msg.str().size()],&(message.last().get(0)));
}
}
// For simplicity we create in advance a receiving buffer for all processors
recv_message.resize(n_proc);
// The pattern is not really random preallocate the receive buffer
for (size_t i = 0 ; i < 8 && i < n_proc ; i++)
{
long int p_id = vcl.getProcessUnitID() - i - 1;
if (p_id < 0)
p_id += n_proc;
else
p_id = p_id % n_proc;
if (p_id != (long int)vcl.getProcessUnitID())
recv_message.get(p_id).resize(j);
}
if (opt == RECEIVE_UNKNOWN)
{
// Send and receive
vcl.sendrecvMultipleMessagesNBX(prc,message,msg_alloc,&recv_message);
}
void * msg_alloc(size_t msg_i ,size_t total_msg, size_t total_p, size_t i,size_t ri, void * ptr)
{
// convert the void pointer argument into a pointer to receiving buffers
if (create_vcluster().getProcessingUnits() <= 8)
{if (totp_check) BOOST_REQUIRE_EQUAL(total_p,create_vcluster().getProcessingUnits()-1);}
else
{if (totp_check) BOOST_REQUIRE_EQUAL(total_p,(size_t)8);}
BOOST_REQUIRE_EQUAL(msg_i, global_step);
// Create the memory to receive the message
// msg_i contain the size of the message to receive
// i contain the processor id
v->get(i).resize(msg_i);
// return the pointer of the allocated memory
return &(v->get(i).get(0));
}

Definition at line 36 of file VCluster.hpp.

#include <VCluster.hpp>

+ Inheritance diagram for Vcluster:

Data Structures

struct  base_info
 Base info. More...
 
struct  index_gen
 
struct  index_gen< index_tuple< prp...> >
 Process the receive buffer using the specified properties (meta-function) More...
 
struct  MetaFuncOrd
 metafunction More...
 

Public Member Functions

 Vcluster (int *argc, char ***argv)
 Constructor. More...
 
template<typename T , typename S >
bool SGather (T &send, S &recv, size_t root)
 Semantic Gather, gather the data from all processors into one node. More...
 
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool SGather (T &send, S &recv, openfpm::vector< size_t > &prc, openfpm::vector< size_t > &sz, size_t root)
 Semantic Gather, gather the data from all processors into one node. More...
 
template<typename T , typename S >
bool SScatter (T &send, S &recv, openfpm::vector< size_t > &prc, openfpm::vector< size_t > &sz, size_t root)
 Semantic Scatter, scatter the data from one processor to the other node. More...
 
void reorder_buffer (openfpm::vector< size_t > &prc, openfpm::vector< size_t > &sz_recv)
 reorder the receiving buffer More...
 
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool SSendRecv (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors. More...
 
template<typename T , typename S , template< typename > class layout_base, int... prp>
bool SSendRecvP (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, openfpm::vector< size_t > &sz_recv_byte, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors. More...
 
template<typename T , typename S , template< typename > class layout_base, int... prp>
bool SSendRecvP (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors. More...
 
template<typename op , typename T , typename S , template< typename > class layout_base, int... prp>
bool SSendRecvP_op (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, op &op_param, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &recv_sz, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors. More...
 
- Public Member Functions inherited from Vcluster_base
 Vcluster_base (int *argc, char ***argv)
 Virtual cluster constructor. More...
 
MPI_Comm getMPIComm ()
 Get the MPI_Communicator (or processor group) this VCluster is using. More...
 
size_t getProcessingUnits ()
 Get the total number of processors. More...
 
size_t size ()
 Get the total number of processors. More...
 
size_t getProcessUnitID ()
 Get the process unit id. More...
 
size_t rank ()
 Get the process unit id. More...
 
template<typename T >
void sum (T &num)
 Sum the numbers across all processors and get the result. More...
 
template<typename T >
void max (T &num)
 Get the maximum number across all processors (or reduction with infinity norm) More...
 
template<typename T >
void min (T &num)
 Get the minimum number across all processors (or reduction with insinity norm) More...
 
template<typename T >
void sendrecvMultipleMessagesNBX (openfpm::vector< size_t > &prc, openfpm::vector< T > &data, openfpm::vector< size_t > prc_recv, openfpm::vector< size_t > &recv_sz, void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages. More...
 
template<typename T >
void sendrecvMultipleMessagesNBX (openfpm::vector< size_t > &prc, openfpm::vector< T > &data, void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages. More...
 
void sendrecvMultipleMessagesNBX (size_t n_send, size_t sz[], size_t prc[], void *ptr[], size_t n_recv, size_t prc_recv[], size_t sz_recv[], void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages. More...
 
void sendrecvMultipleMessagesNBX (size_t n_send, size_t sz[], size_t prc[], void *ptr[], size_t n_recv, size_t prc_recv[], void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages. More...
 
void sendrecvMultipleMessagesNBX (size_t n_send, size_t sz[], size_t prc[], void *ptr[], void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages. More...
 
bool send (size_t proc, size_t tag, const void *mem, size_t sz)
 Send data to a processor. More...
 
template<typename T , typename Mem , typename gr >
bool send (size_t proc, size_t tag, openfpm::vector< T, Mem, gr > &v)
 Send data to a processor. More...
 
bool recv (size_t proc, size_t tag, void *v, size_t sz)
 Recv data from a processor. More...
 
template<typename T , typename Mem , typename gr >
bool recv (size_t proc, size_t tag, openfpm::vector< T, Mem, gr > &v)
 Recv data from a processor. More...
 
template<typename T , typename Mem , typename gr >
bool allGather (T &send, openfpm::vector< T, Mem, gr > &v)
 Gather the data from all processors. More...
 
template<typename T , typename Mem , typename gr >
bool Bcast (openfpm::vector< T, Mem, gr > &v, size_t root)
 Broadcast the data to all processors. More...
 
void execute ()
 Execute all the requests. More...
 
void clear ()
 Release the buffer used for communication. More...
 

Private Member Functions

template<typename op , typename T , typename S , template< typename > class layout_base>
void prepare_send_buffer (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt)
 Prepare the send buffer and send the message to other processors. More...
 
void reset_recv_buf ()
 Reset the receive buffer. More...
 
template<typename op , typename T , typename S , template< typename > class layout_base, unsigned int... prp>
void process_receive_buffer_with_prp (S &recv, openfpm::vector< size_t > *sz, openfpm::vector< size_t > *sz_byte, op &op_param)
 Process the receive buffer. More...
 

Static Private Member Functions

static void * msg_alloc (size_t msg_i, size_t total_msg, size_t total_p, size_t i, size_t ri, void *ptr)
 Call-back to allocate buffer to receive data. More...
 
static void * msg_alloc_known (size_t msg_i, size_t total_msg, size_t total_p, size_t i, size_t ri, void *ptr)
 Call-back to allocate buffer to receive data. More...
 

Additional Inherited Members

- Data Fields inherited from Vcluster_base
openfpm::vector< size_t > sz_recv_tmp
 
- Protected Attributes inherited from Vcluster_base
openfpm::vector< BHeapMemoryrecv_buf
 Receive buffers.
 

Constructor & Destructor Documentation

Vcluster::Vcluster ( int *  argc,
char ***  argv 
)
inline

Constructor.

Parameters
argcmain number of arguments
argvmain set of arguments

Definition at line 298 of file VCluster.hpp.

Member Function Documentation

static void* Vcluster::msg_alloc ( size_t  msg_i,
size_t  total_msg,
size_t  total_p,
size_t  i,
size_t  ri,
void *  ptr 
)
inlinestaticprivate

Call-back to allocate buffer to receive data.

Parameters
msg_isize required to receive the message from i
total_msgtotal size to receive from all the processors
total_pthe total number of processor that want to communicate with you
iprocessor id
rirequest id (it is an id that goes from 0 to total_p, and is unique every time message_alloc is called)
ptra pointer to the vector_dist structure
Returns
the pointer where to store the message for the processor i

Definition at line 211 of file VCluster.hpp.

static void* Vcluster::msg_alloc_known ( size_t  msg_i,
size_t  total_msg,
size_t  total_p,
size_t  i,
size_t  ri,
void *  ptr 
)
inlinestaticprivate

Call-back to allocate buffer to receive data.

Parameters
msg_isize required to receive the message from i
total_msgtotal size to receive from all the processors
total_pthe total number of processor that want to communicate with you
iprocessor id
rirequest id (it is an id that goes from 0 to total_p, and is unique every time message_alloc is called)
ptra pointer to the vector_dist structure
Returns
the pointer where to store the message for the processor i

Definition at line 247 of file VCluster.hpp.

template<typename op , typename T , typename S , template< typename > class layout_base>
void Vcluster::prepare_send_buffer ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
size_t  opt 
)
inlineprivate

Prepare the send buffer and send the message to other processors.

Template Parameters
opOperation to execute in merging the receiving data
Tsending object
Sreceiving object
Note
T and S must not be the same object but a S.operation(T) must be defined. There the flexibility of the operation is defined by op
Parameters
sendsending buffer
recvreceiving object
prc_sendeach object T in the vector send is sent to one processor specified in this list. This mean that prc_send.size() == send.size()
prc_recvlist of processor from where we receive (output), in case of RECEIVE_KNOWN muts be filled
sz_recvsize of each receiving message (output), in case of RECEICE_KNOWN must be filled
optOptions using RECEIVE_KNOWN enable patters with less latencies, in case of RECEIVE_KNOWN

Definition at line 74 of file VCluster.hpp.

template<typename op , typename T , typename S , template< typename > class layout_base, unsigned int... prp>
void Vcluster::process_receive_buffer_with_prp ( S &  recv,
openfpm::vector< size_t > *  sz,
openfpm::vector< size_t > *  sz_byte,
op &  op_param 
)
inlineprivate

Process the receive buffer.

Template Parameters
opoperation to do in merging the received data
Ttype of sending object
Stype of receiving object
prpproperties to receive
Parameters
recvreceive object
szvector that store how many element has been added per processors on S
sz_bytebyte received on a per processor base
op_paramoperation to do in merging the received information with recv

Definition at line 279 of file VCluster.hpp.

void Vcluster::reorder_buffer ( openfpm::vector< size_t > &  prc,
openfpm::vector< size_t > &  sz_recv 
)
inline

reorder the receiving buffer

Parameters
prclist of the receiving processors
sz_recvlist of size of the receiving messages (in byte)

processor

position in the receive list

default constructor

needed to reorder

Definition at line 548 of file VCluster.hpp.

void Vcluster::reset_recv_buf ( )
inlineprivate

Reset the receive buffer.

Definition at line 168 of file VCluster.hpp.

template<typename T , typename S >
bool Vcluster::SGather ( T &  send,
S &  recv,
size_t  root 
)
inline

Semantic Gather, gather the data from all processors into one node.

Semantic communication differ from the normal one. They in general follow the following model.

Gather(T,S,root,op=add);

"Gather" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add(T).

Example send a vector of structures, and merge all together in one vector

v1.resize(vcl.getProcessUnitID());
for(size_t i = 0 ; i < vcl.getProcessUnitID() ; i++)
{v1.get(i) = 5;}
vcl.SGather(v1,v2,(i%vcl.getProcessingUnits()));

Example send a vector of structures, and merge all together in one vector

v1.resize(vcl.getProcessUnitID());
for(size_t i = 0 ; i < vcl.getProcessUnitID() ; i++)
{v1.get(i) = 5;}
vcl.SGather(v1,v2,0);
Template Parameters
Ttype of sending object
Stype of receiving object
Parameters
sendObject to send
recvObject to receive
rootwitch node should collect the information
Returns
true if the function completed succefully

Definition at line 330 of file VCluster.hpp.

template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool Vcluster::SGather ( T &  send,
S &  recv,
openfpm::vector< size_t > &  prc,
openfpm::vector< size_t > &  sz,
size_t  root 
)
inline

Semantic Gather, gather the data from all processors into one node.

Semantic communication differ from the normal one. They in general follow the following model.

Gather(T,S,root,op=add);

"Gather" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add(T).

Example send a vector of structures, and merge all together in one vector

v1.resize(vcl.getProcessUnitID());
for(size_t i = 0 ; i < vcl.getProcessUnitID() ; i++)
{v1.get(i) = 5;}
vcl.SGather(v1,v2,(i%vcl.getProcessingUnits()));

Example send a vector of structures, and merge all together in one vector

v1.resize(vcl.getProcessUnitID());
for(size_t i = 0 ; i < vcl.getProcessUnitID() ; i++)
{v1.get(i) = 5;}
vcl.SGather(v1,v2,0);
Template Parameters
Ttype of sending object
Stype of receiving object
Parameters
sendObject to send
recvObject to receive
rootwitch node should collect the information
prcprocessors from witch we received the information
szsize of the received information for each processor
Returns
true if the function completed succefully

Definition at line 375 of file VCluster.hpp.

template<typename T , typename S >
bool Vcluster::SScatter ( T &  send,
S &  recv,
openfpm::vector< size_t > &  prc,
openfpm::vector< size_t > &  sz,
size_t  root 
)
inline

Semantic Scatter, scatter the data from one processor to the other node.

Semantic communication differ from the normal one. They in general follow the following model.

Scatter(T,S,...,op=add);

"Scatter" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add(T).

Example scatter a vector of structures, to other processors

// Scatter pattern
for (size_t i = 0 ; i < vcl.getProcessingUnits() ; i++)
{
sz.add(i % SSCATTER_MAX);
prc.add(i);
}
vcl.SScatter(v1,v2,prc,sz,(i%vcl.getProcessingUnits()));
Template Parameters
Ttype of sending object
Stype of receiving object
Parameters
sendObject to send
recvObject to receive
prcprocessor involved in the scatter
szsize of each chunks
rootwhich processor should scatter the information
Returns
true if the function completed succefully

Definition at line 481 of file VCluster.hpp.

template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool Vcluster::SSendRecv ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors.

Semantic communication differ from the normal one. They in general follow the following model.

Recv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add(T).

Example scatter a vector of structures, to other processors

// A vector of vector we want to send each internal vector to one specified processor
// We use this empty vector to receive data
// We use this empty vector to receive data
// in this case each processor will send a message of different size to all the other processor
// but can also be a subset of processors
v1.resize(vcl.getProcessingUnits());
// We fill the send buffer with some sense-less data
for(size_t i = 0 ; i < v1.size() ; i++)
{
// each vector is filled with a different message size
for (size_t j = 0 ; j < i % SSCATTER_MAX ; j++)
v1.get(i).add(j);
// generate the sending list (in this case the sendinf list is all the other processor)
// but in general can be some of them and totally random
prc_send.add((i + vcl.getProcessUnitID()) % vcl.getProcessingUnits());
}
// Send and receive from the other processor v2 container the received data
// Because in this case v2 is an openfpm::vector<size_t>, all the received
// vector are concatenated one over the other. For example if the processor receive 3 openfpm::vector<size_t>
// each having 3,4,5 elements. v2 will be a vector of 12 elements
vcl.SSendRecv(v1,v2,prc_send,prc_recv2,sz_recv2);
// Send and receive from the other processors v2 contain the received data
// Because in this case v2 is an openfpm::vector<openfpm::vector<size_t>>, all the vector from
// each processor will be collected. For example if the processor receive 3 openfpm::vector<size_t>
// each having 3,4,5 elements. v2 will be a vector of vector of 3 elements (openfpm::vector) and
// each element will be respectivly 3,4,5 elements
vcl.SSendRecv(v1,v3,prc_send,prc_recv3,sz_recv3);
Template Parameters
Ttype of sending object
Stype of receiving object
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
prc_recvlist of the receiving processors
sz_recvnumber of elements added
optoptions
Returns
true if the function completed succefully

Definition at line 639 of file VCluster.hpp.

template<typename T , typename S , template< typename > class layout_base, int... prp>
bool Vcluster::SSendRecvP ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
openfpm::vector< size_t > &  sz_recv_byte,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors.

Semantic communication differ from the normal one. They in general follow the following model.

SSendRecv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add<prp...>(T).

Example scatter a vector of structures, to other processors

// Scatter pattern
for (size_t i = 0 ; i < vcl.getProcessingUnits() ; i++)
{
sz.add(i % SSCATTER_MAX);
prc.add(i);
}
vcl.SScatter(v1,v2,prc,sz,(i%vcl.getProcessingUnits()));
Template Parameters
Ttype of sending object
Stype of receiving object
prpproperties for merging
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
prc_recvprocessors from which we received
sz_recvnumber of elements added per processor
sz_recv_bytemessage received from each processor in byte
Returns
true if the function completed successful

Definition at line 687 of file VCluster.hpp.

template<typename T , typename S , template< typename > class layout_base, int... prp>
bool Vcluster::SSendRecvP ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors.

Semantic communication differ from the normal one. They in general follow the following model.

SSendRecv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add<prp...>(T).

Example scatter a vector of structures, to other processors

// Scatter pattern
for (size_t i = 0 ; i < vcl.getProcessingUnits() ; i++)
{
sz.add(i % SSCATTER_MAX);
prc.add(i);
}
vcl.SScatter(v1,v2,prc,sz,(i%vcl.getProcessingUnits()));
Template Parameters
Ttype of sending object
Stype of receiving object
prpproperties for merging
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
prc_recvlist of the processors from which we receive
sz_recvnumber of elements added per processors
Returns
true if the function completed succefully

Definition at line 735 of file VCluster.hpp.

template<typename op , typename T , typename S , template< typename > class layout_base, int... prp>
bool Vcluster::SSendRecvP_op ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
op &  op_param,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  recv_sz,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors.

Semantic communication differ from the normal one. They in general follow the following model.

SSendRecv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add<prp...>(T).

Example scatter a vector of structures, to other processors

// Scatter pattern
for (size_t i = 0 ; i < vcl.getProcessingUnits() ; i++)
{
sz.add(i % SSCATTER_MAX);
prc.add(i);
}
vcl.SScatter(v1,v2,prc,sz,(i%vcl.getProcessingUnits()));
Template Parameters
optype of operation
Ttype of sending object
Stype of receiving object
prpproperties for merging
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
op_paramoperation object (operation to do im merging the information)
recv_szsize of each receiving buffer. This parameters are output with RECEIVE_KNOWN you must feed this parameter
prc_recvfrom which processor we receive messages with RECEIVE_KNOWN you must feed this parameter
optoptions default is NONE, another is RECEIVE_KNOWN. In this case each processor is assumed to know from which processor receive, and the size of the message. in such case prc_recv and sz_recv are not anymore parameters but must be input.
Returns
true if the function completed successful

Definition at line 794 of file VCluster.hpp.


The documentation for this class was generated from the following file: