OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
 
Loading...
Searching...
No Matches
Vcluster< InternalMemory > Class Template Reference

Implementation of VCluster class. More...

Detailed Description

template<typename InternalMemory = HeapMemory>
class Vcluster< InternalMemory >

Implementation of VCluster class.

This class implement communication functions. Like summation, minimum and maximum across processors, or Dynamic Sparse Data Exchange (DSDE)

Vcluster Min max sum

Vcluster all gather

T data = vcl.getProcessUnitID();
vcl.allGather(data,clt);
vcl.execute();
for (size_t i = 0 ; i < vcl.getProcessingUnits() ; i++)
BOOST_REQUIRE_EQUAL(i,(size_t)clt.get(i));
Implementation of 1-D std::vector like structure.

Dynamic sparse data exchange with complex objects

Dynamic sparse data exchange with buffers

// We send one message for each processor (one message is an openfpm::vector<unsigned char>)
// or an array of bytes
// receving messages. Each receiving message is an openfpm::vector<unsigned char>
// or an array if bytes
// each processor communicate based on a list of processor
// We construct the processor list in particular in this case
// each processor communicate with the 8 next (in id) processors
for (size_t i = 0 ; i < 8 && i < n_proc ; i++)
{
size_t p_id = (i + 1 + vcl.getProcessUnitID()) % n_proc;
// avoid to communicate with yourself
if (p_id != vcl.getProcessUnitID())
{
// Create an hello message
prc.add(p_id);
message.add();
std::ostringstream msg;
msg << "Hello from " << vcl.getProcessUnitID() << " to " << p_id;
std::string str(msg.str());
message.last().resize(j);
memset(message.last().getPointer(),0,j);
std::copy(str.c_str(),&(str.c_str())[msg.str().size()],&(message.last().get(0)));
}
}
// For simplicity we create in advance a receiving buffer for all processors
recv_message.resize(n_proc);
// The pattern is not really random preallocate the receive buffer
for (size_t i = 0 ; i < 8 && i < n_proc ; i++)
{
long int p_id = vcl.getProcessUnitID() - i - 1;
if (p_id < 0)
p_id += n_proc;
else
p_id = p_id % n_proc;
if (p_id != (long int)vcl.getProcessUnitID())
recv_message.get(p_id).resize(j);
}
if (opt == RECEIVE_UNKNOWN)
{
// Send and receive
commFunc<ip>(vcl,prc,message,msg_alloc,&recv_message);
}
static void * msg_alloc(size_t msg_i, size_t total_msg, size_t total_p, size_t i, size_t ri, size_t tag, void *ptr)
Call-back to allocate buffer to receive data.
Definition VCluster.hpp:318
void * msg_alloc(size_t msg_i ,size_t total_msg, size_t total_p, size_t i,size_t ri, size_t tag, void * ptr)
{
// convert the void pointer argument into a pointer to receiving buffers
if (create_vcluster().getProcessingUnits() <= 8)
{if (totp_check) BOOST_REQUIRE_EQUAL(total_p,create_vcluster().getProcessingUnits()-1);}
else
{if (totp_check) BOOST_REQUIRE_EQUAL(total_p,(size_t)8);}
BOOST_REQUIRE_EQUAL(msg_i, global_step);
// Create the memory to receive the message
// msg_i contain the size of the message to receive
// i contain the processor id
v->get(i).resize(msg_i);
// return the pointer of the allocated memory
return &(v->get(i).get(0));
}
size_t getProcessingUnits()
Get the total number of processors.

Definition at line 58 of file VCluster.hpp.

#include <VCluster.hpp>

+ Inheritance diagram for Vcluster< InternalMemory >:

Data Structures

struct  base_info
 Base info. More...
 
struct  index_gen
 
struct  index_gen< index_tuple< prp... > >
 Process the receive buffer using the specified properties (meta-function) More...
 
struct  MetaFuncOrd
 metafunction More...
 

Public Member Functions

 Vcluster (int *argc, char ***argv)
 Constructor.
 
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool SGather (T &send, S &recv, size_t root)
 Semantic Gather, gather the data from all processors into one node.
 
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool SGather (T &send, S &recv, openfpm::vector< size_t > &prc, openfpm::vector< size_t > &sz, size_t root)
 Semantic Gather, gather the data from all processors into one node.
 
void barrier ()
 Just a call to mpi_barrier.
 
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool SScatter (T &send, S &recv, openfpm::vector< size_t > &prc, openfpm::vector< size_t > &sz, size_t root)
 Semantic Scatter, scatter the data from one processor to the other node.
 
void reorder_buffer (openfpm::vector< size_t > &prc, const openfpm::vector< size_t > &tags, openfpm::vector< size_t > &sz_recv)
 reorder the receiving buffer
 
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool SSendRecv (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors.
 
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool SSendRecvAsync (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors asynchronous version.
 
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool SSendRecvP (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, openfpm::vector< size_t > &sz_recv_byte_out, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors (with properties)
 
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool SSendRecvPAsync (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, openfpm::vector< size_t > &sz_recv_byte_out, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors (with properties) asynchronous version.
 
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool SSendRecvP (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors (with properties)
 
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool SSendRecvPAsync (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors (with properties) asynchronous version.
 
template<typename op , typename T , typename S , template< typename > class layout_base, int ... prp>
bool SSendRecvP_op (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, op &op_param, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &recv_sz, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors.
 
template<typename op , typename T , typename S , template< typename > class layout_base, int ... prp>
bool SSendRecvP_opAsync (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, op &op_param, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &recv_sz, size_t opt=NONE)
 Semantic Send and receive, send the data to processors and receive from the other processors asynchronous version.
 
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool SSendRecvWait (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
 Synchronize with SSendRecv.
 
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool SSendRecvPWait (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, openfpm::vector< size_t > &sz_recv_byte_out, size_t opt=NONE)
 Synchronize with SSendRecvP.
 
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool SSendRecvPWait (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
 Synchronize with SSendRecvP.
 
template<typename op , typename T , typename S , template< typename > class layout_base, int ... prp>
bool SSendRecvP_opWait (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, op &op_param, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &recv_sz, size_t opt=NONE)
 Synchronize with SSendRecvP_op.
 
- Public Member Functions inherited from Vcluster_base< InternalMemory >
 Vcluster_base (int *argc, char ***argv)
 Virtual cluster constructor.
 
gpu::ofp_context_tgetgpuContext (bool iw=true)
 If nvidia cuda is activated return a gpu context.
 
MPI_Comm getMPIComm ()
 Get the MPI_Communicator (or processor group) this VCluster is using.
 
size_t getProcessingUnits ()
 Get the total number of processors.
 
size_t size ()
 Get the total number of processors.
 
void print_stats ()
 
void clear_stats ()
 
size_t getProcessUnitID ()
 Get the process unit id.
 
size_t rank ()
 Get the process unit id.
 
template<typename T >
void sum (T &num)
 Sum the numbers across all processors and get the result.
 
template<typename T >
void max (T &num)
 Get the maximum number across all processors (or reduction with infinity norm)
 
template<typename T >
void min (T &num)
 Get the minimum number across all processors (or reduction with insinity norm)
 
void progressCommunication ()
 In case of Asynchonous communications like sendrecvMultipleMessagesNBXAsync this function progress the communication.
 
template<typename T >
void sendrecvMultipleMessagesNBX (openfpm::vector< size_t > &prc, openfpm::vector< T > &data, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &recv_sz, void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages.
 
template<typename T >
void sendrecvMultipleMessagesNBXAsync (openfpm::vector< size_t > &prc, openfpm::vector< T > &data, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &recv_sz, void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages asynchronous version.
 
template<typename T >
void sendrecvMultipleMessagesNBX (openfpm::vector< size_t > &prc, openfpm::vector< T > &data, void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages.
 
template<typename T >
void sendrecvMultipleMessagesNBXAsync (openfpm::vector< size_t > &prc, openfpm::vector< T > &data, void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages asynchronous version.
 
void sendrecvMultipleMessagesNBX (size_t n_send, size_t sz[], size_t prc[], void *ptr[], size_t n_recv, size_t prc_recv[], size_t sz_recv[], void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages.
 
void sendrecvMultipleMessagesNBXAsync (size_t n_send, size_t sz[], size_t prc[], void *ptr[], size_t n_recv, size_t prc_recv[], size_t sz_recv[], void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages asynchronous version.
 
void sendrecvMultipleMessagesNBX (size_t n_send, size_t sz[], size_t prc[], void *ptr[], size_t n_recv, size_t prc_recv[], void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages.
 
void sendrecvMultipleMessagesNBXAsync (size_t n_send, size_t sz[], size_t prc[], void *ptr[], size_t n_recv, size_t prc_recv[], void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages asynchronous version.
 
void sendrecvMultipleMessagesNBX (size_t n_send, size_t sz[], size_t prc[], void *ptr[], void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages.
 
void sendrecvMultipleMessagesNBXAsync (size_t n_send, size_t sz[], size_t prc[], void *ptr[], void *(*msg_alloc)(size_t, size_t, size_t, size_t, size_t, size_t, void *), void *ptr_arg, long int opt=NONE)
 Send and receive multiple messages Asynchronous version.
 
void sendrecvMultipleMessagesNBXWait ()
 Send and receive multiple messages wait NBX communication to complete.
 
bool send (size_t proc, size_t tag, const void *mem, size_t sz)
 Send data to a processor.
 
template<typename T , typename Mem , template< typename > class gr>
bool send (size_t proc, size_t tag, openfpm::vector< T, Mem, gr > &v)
 Send data to a processor.
 
bool recv (size_t proc, size_t tag, void *v, size_t sz)
 Recv data from a processor.
 
template<typename T , typename Mem , template< typename > class gr>
bool recv (size_t proc, size_t tag, openfpm::vector< T, Mem, gr > &v)
 Recv data from a processor.
 
template<typename T , typename Mem , template< typename > class gr>
bool allGather (T &send, openfpm::vector< T, Mem, gr > &v)
 Gather the data from all processors.
 
template<typename T , typename Mem , template< typename > class layout_base>
bool Bcast (openfpm::vector< T, Mem, layout_base > &v, size_t root)
 Broadcast the data to all processors.
 
void execute ()
 Execute all the requests.
 
void clear ()
 Release the buffer used for communication.
 

Private Types

typedef Vcluster_base< InternalMemory > self_base
 

Private Member Functions

template<typename op , typename T , typename S , template< typename > class layout_base>
void prepare_send_buffer (openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt)
 Prepare the send buffer and send the message to other processors.
 
void reset_recv_buf ()
 Reset the receive buffer.
 
template<typename op , typename T , typename S , template< typename > class layout_base, unsigned int ... prp>
void process_receive_buffer_with_prp (S &recv, openfpm::vector< size_t > *sz, openfpm::vector< size_t > *sz_byte, op &op_param, size_t opt)
 Process the receive buffer.
 

Static Private Member Functions

static void * msg_alloc (size_t msg_i, size_t total_msg, size_t total_p, size_t i, size_t ri, size_t tag, void *ptr)
 Call-back to allocate buffer to receive data.
 
static void * msg_alloc_known (size_t msg_i, size_t total_msg, size_t total_p, size_t i, size_t ri, size_t tag, void *ptr)
 Call-back to allocate buffer to receive data.
 

Private Attributes

ExtPreAlloc< HeapMemory > * mem [NQUEUE]
 
openfpm::vector< size_t > sz_recv_byte [NQUEUE]
 
openfpm::vector< const void * > send_buf
 
openfpm::vector< size_t > send_sz_byte
 
openfpm::vector< size_t > prc_send_
 
unsigned int NBX_prc_scnt = 0
 
unsigned int NBX_prc_pcnt = 0
 
HeapMemorypmem [NQUEUE]
 
base_info< InternalMemory > NBX_prc_bi [NQUEUE]
 

Additional Inherited Members

- Data Fields inherited from Vcluster_base< InternalMemory >
openfpm::vector< size_t > sz_recv_tmp
 
- Protected Attributes inherited from Vcluster_base< InternalMemory >
openfpm::vector_fr< BMemory< InternalMemory > > recv_buf [NQUEUE]
 Receive buffers.
 
openfpm::vector< size_t > tags [NQUEUE]
 tags receiving
 

Member Typedef Documentation

◆ self_base

template<typename InternalMemory = HeapMemory>
typedef Vcluster_base<InternalMemory> Vcluster< InternalMemory >::self_base
private

Definition at line 123 of file VCluster.hpp.

Constructor & Destructor Documentation

◆ Vcluster()

template<typename InternalMemory = HeapMemory>
Vcluster< InternalMemory >::Vcluster ( int *  argc,
char ***  argv 
)
inline

Constructor.

Parameters
argcmain number of arguments
argvmain set of arguments

Definition at line 418 of file VCluster.hpp.

Member Function Documentation

◆ barrier()

template<typename InternalMemory = HeapMemory>
void Vcluster< InternalMemory >::barrier ( )
inline

Just a call to mpi_barrier.

Definition at line 589 of file VCluster.hpp.

◆ msg_alloc()

template<typename InternalMemory = HeapMemory>
static void * Vcluster< InternalMemory >::msg_alloc ( size_t  msg_i,
size_t  total_msg,
size_t  total_p,
size_t  i,
size_t  ri,
size_t  tag,
void *  ptr 
)
inlinestaticprivate

Call-back to allocate buffer to receive data.

Parameters
msg_isize required to receive the message from i
total_msgtotal size to receive from all the processors
total_pthe total number of processor that want to communicate with you
iprocessor id
rirequest id (it is an id that goes from 0 to total_p, and is unique every time message_alloc is called)
ptra pointer to the vector_dist structure
Returns
the pointer where to store the message for the processor i

Definition at line 318 of file VCluster.hpp.

◆ msg_alloc_known()

template<typename InternalMemory = HeapMemory>
static void * Vcluster< InternalMemory >::msg_alloc_known ( size_t  msg_i,
size_t  total_msg,
size_t  total_p,
size_t  i,
size_t  ri,
size_t  tag,
void *  ptr 
)
inlinestaticprivate

Call-back to allocate buffer to receive data.

Parameters
msg_isize required to receive the message from i
total_msgtotal size to receive from all the processors
total_pthe total number of processor that want to communicate with you
iprocessor id
rirequest id (it is an id that goes from 0 to total_p, and is unique every time message_alloc is called)
ptra pointer to the vector_dist structure
Returns
the pointer where to store the message for the processor i

Definition at line 366 of file VCluster.hpp.

◆ prepare_send_buffer()

template<typename InternalMemory = HeapMemory>
template<typename op , typename T , typename S , template< typename > class layout_base>
void Vcluster< InternalMemory >::prepare_send_buffer ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
size_t  opt 
)
inlineprivate

Prepare the send buffer and send the message to other processors.

Template Parameters
opOperation to execute in merging the receiving data
Tsending object
Sreceiving object
Note
T and S must not be the same object but a S.operation(T) must be defined. There the flexibility of the operation is defined by op
Parameters
sendsending buffer
recvreceiving object
prc_sendeach object T in the vector send is sent to one processor specified in this list. This mean that prc_send.size() == send.size()
prc_recvlist of processor from where we receive (output), in case of RECEIVE_KNOWN muts be filled
sz_recvsize of each receiving message (output), in case of RECEICE_KNOWN must be filled
optOptions using RECEIVE_KNOWN enable patters with less latencies, in case of RECEIVE_KNOWN

Definition at line 171 of file VCluster.hpp.

◆ process_receive_buffer_with_prp()

template<typename InternalMemory = HeapMemory>
template<typename op , typename T , typename S , template< typename > class layout_base, unsigned int ... prp>
void Vcluster< InternalMemory >::process_receive_buffer_with_prp ( S &  recv,
openfpm::vector< size_t > *  sz,
openfpm::vector< size_t > *  sz_byte,
op &  op_param,
size_t  opt 
)
inlineprivate

Process the receive buffer.

Template Parameters
opoperation to do in merging the received data
Ttype of sending object
Stype of receiving object
prpproperties to receive
Parameters
recvreceive object
szvector that store how many element has been added per processors on S
sz_bytebyte received on a per processor base
op_paramoperation to do in merging the received information with recv

Definition at line 398 of file VCluster.hpp.

◆ reorder_buffer()

template<typename InternalMemory = HeapMemory>
void Vcluster< InternalMemory >::reorder_buffer ( openfpm::vector< size_t > &  prc,
const openfpm::vector< size_t > &  tags,
openfpm::vector< size_t > &  sz_recv 
)
inline

reorder the receiving buffer

Parameters
prclist of the receiving processors
sz_recvlist of size of the receiving messages (in byte)

processor

position in the receive list

default constructor

needed to reorder

Definition at line 692 of file VCluster.hpp.

◆ reset_recv_buf()

template<typename InternalMemory = HeapMemory>
void Vcluster< InternalMemory >::reset_recv_buf ( )
inlineprivate

Reset the receive buffer.

Definition at line 297 of file VCluster.hpp.

◆ SGather() [1/2]

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool Vcluster< InternalMemory >::SGather ( T &  send,
S &  recv,
openfpm::vector< size_t > &  prc,
openfpm::vector< size_t > &  sz,
size_t  root 
)
inline

Semantic Gather, gather the data from all processors into one node.

Semantic communication differ from the normal one. They in general follow the following model.

Gather(T,S,root,op=add);

"Gather" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add(T).

Example send a vector of structures, and merge all together in one vector

Example send a vector of structures, and merge all together in one vector

Template Parameters
Ttype of sending object
Stype of receiving object
Parameters
sendObject to send
recvObject to receive
rootwitch node should collect the information
prcprocessors from witch we received the information
szsize of the received information for each processor
Returns
true if the function completed succefully

Definition at line 495 of file VCluster.hpp.

◆ SGather() [2/2]

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool Vcluster< InternalMemory >::SGather ( T &  send,
S &  recv,
size_t  root 
)
inline

Semantic Gather, gather the data from all processors into one node.

Semantic communication differ from the normal one. They in general follow the following model.

Gather(T,S,root,op=add);

"Gather" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add(T).

Example send a vector of structures, and merge all together in one vector

Example send a vector of structures, and merge all together in one vector

Template Parameters
Ttype of sending object
Stype of receiving object
Parameters
sendObject to send
recvObject to receive
rootwitch node should collect the information
Returns
true if the function completed succefully

Definition at line 450 of file VCluster.hpp.

◆ SScatter()

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool Vcluster< InternalMemory >::SScatter ( T &  send,
S &  recv,
openfpm::vector< size_t > &  prc,
openfpm::vector< size_t > &  sz,
size_t  root 
)
inline

Semantic Scatter, scatter the data from one processor to the other node.

Semantic communication differ from the normal one. They in general follow the following model.

Scatter(T,S,...,op=add);

"Scatter" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add(T).

Example scatter a vector of structures, to other processors

Template Parameters
Ttype of sending object
Stype of receiving object
Parameters
sendObject to send
recvObject to receive
prcprocessor involved in the scatter
szsize of each chunks
rootwhich processor should scatter the information
Returns
true if the function completed succefully

Definition at line 621 of file VCluster.hpp.

◆ SSendRecv()

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool Vcluster< InternalMemory >::SSendRecv ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors.

Semantic communication differ from the normal one. They in general follow the following model.

Recv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add(T).

Example scatter a vector of structures, to other processors

Template Parameters
Ttype of sending object
Stype of receiving object
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
prc_recvlist of the receiving processors
sz_recvnumber of elements added
optoptions
Returns
true if the function completed succefully

Definition at line 797 of file VCluster.hpp.

◆ SSendRecvAsync()

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool Vcluster< InternalMemory >::SSendRecvAsync ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors asynchronous version.

See also
progressCommunication to progress communications SSendRecvWait for synchronizing

Semantic communication differ from the normal one. They in general follow the following model.

Recv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add(T).

Example scatter a vector of structures, to other processors

Template Parameters
Ttype of sending object
Stype of receiving object
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
prc_recvlist of the receiving processors
sz_recvnumber of elements added
optoptions
Returns
true if the function completed succefully

Definition at line 858 of file VCluster.hpp.

◆ SSendRecvP() [1/2]

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool Vcluster< InternalMemory >::SSendRecvP ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
openfpm::vector< size_t > &  sz_recv_byte_out,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors (with properties)

Semantic communication differ from the normal one. They in general follow the following model.

SSendRecv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add<prp...>(T).

Example scatter a vector of structures, to other processors

Template Parameters
Ttype of sending object
Stype of receiving object
prpproperties for merging
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
prc_recvprocessors from which we received
sz_recvnumber of elements added per processor
sz_recv_bytemessage received from each processor in byte
Returns
true if the function completed successful

Definition at line 901 of file VCluster.hpp.

◆ SSendRecvP() [2/2]

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool Vcluster< InternalMemory >::SSendRecvP ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors (with properties)

Semantic communication differ from the normal one. They in general follow the following model.

SSendRecv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add<prp...>(T).

Example scatter a vector of structures, to other processors

Template Parameters
Ttype of sending object
Stype of receiving object
prpproperties for merging
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
prc_recvlist of the processors from which we receive
sz_recvnumber of elements added per processors
Returns
true if the function completed succefully

Definition at line 1004 of file VCluster.hpp.

◆ SSendRecvP_op()

template<typename InternalMemory = HeapMemory>
template<typename op , typename T , typename S , template< typename > class layout_base, int ... prp>
bool Vcluster< InternalMemory >::SSendRecvP_op ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
op &  op_param,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  recv_sz,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors.

Semantic communication differ from the normal one. They in general follow the following model.

SSendRecv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add<prp...>(T).

Example scatter a vector of structures, to other processors

Template Parameters
optype of operation
Ttype of sending object
Stype of receiving object
prpproperties for merging
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
op_paramoperation object (operation to do im merging the information)
recv_szsize of each receiving buffer. This parameters are output with RECEIVE_KNOWN you must feed this parameter
prc_recvfrom which processor we receive messages with RECEIVE_KNOWN you must feed this parameter
optoptions default is NONE, another is RECEIVE_KNOWN. In this case each processor is assumed to know from which processor receive, and the size of the message. in such case prc_recv and sz_recv are not anymore parameters but must be input.
Returns
true if the function completed successful

Definition at line 1117 of file VCluster.hpp.

◆ SSendRecvP_opAsync()

template<typename InternalMemory = HeapMemory>
template<typename op , typename T , typename S , template< typename > class layout_base, int ... prp>
bool Vcluster< InternalMemory >::SSendRecvP_opAsync ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
op &  op_param,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  recv_sz,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors asynchronous version.

See also
progressCommunication to incrementally progress the communication SSendRecvP_opWait to synchronize

Semantic communication differ from the normal one. They in general follow the following model.

SSendRecv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add<prp...>(T).

Example scatter a vector of structures, to other processors

Template Parameters
optype of operation
Ttype of sending object
Stype of receiving object
prpproperties for merging
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
op_paramoperation object (operation to do im merging the information)
recv_szsize of each receiving buffer. This parameters are output with RECEIVE_KNOWN you must feed this parameter
prc_recvfrom which processor we receive messages with RECEIVE_KNOWN you must feed this parameter
optoptions default is NONE, another is RECEIVE_KNOWN. In this case each processor is assumed to know from which processor receive, and the size of the message. in such case prc_recv and sz_recv are not anymore parameters but must be input.
Returns
true if the function completed successful

Definition at line 1185 of file VCluster.hpp.

◆ SSendRecvP_opWait()

template<typename InternalMemory = HeapMemory>
template<typename op , typename T , typename S , template< typename > class layout_base, int ... prp>
bool Vcluster< InternalMemory >::SSendRecvP_opWait ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
op &  op_param,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  recv_sz,
size_t  opt = NONE 
)
inline

Synchronize with SSendRecvP_op.

Note
arguments are discussed in SSendRecvP_opAsync

Definition at line 1328 of file VCluster.hpp.

◆ SSendRecvPAsync() [1/2]

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool Vcluster< InternalMemory >::SSendRecvPAsync ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
openfpm::vector< size_t > &  sz_recv_byte_out,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors (with properties) asynchronous version.

See also
progressCommunication to progress communications SSendRecvWait for synchronizing

Semantic communication differ from the normal one. They in general follow the following model.

SSendRecv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add<prp...>(T).

Example scatter a vector of structures, to other processors

Template Parameters
Ttype of sending object
Stype of receiving object
prpproperties for merging
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
prc_recvprocessors from which we received
sz_recvnumber of elements added per processor
sz_recv_bytemessage received from each processor in byte
Returns
true if the function completed successful

Definition at line 961 of file VCluster.hpp.

◆ SSendRecvPAsync() [2/2]

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool Vcluster< InternalMemory >::SSendRecvPAsync ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
size_t  opt = NONE 
)
inline

Semantic Send and receive, send the data to processors and receive from the other processors (with properties) asynchronous version.

See also
progressCommunication to progress communications SSendRecvWait for synchronizing

Semantic communication differ from the normal one. They in general follow the following model.

SSendRecv(T,S,...,op=add);

"SendRecv" indicate the communication pattern, or how the information flow T is the object to send, S is the object that will receive the data. In order to work S must implement the interface S.add<prp...>(T).

Example scatter a vector of structures, to other processors

Template Parameters
Ttype of sending object
Stype of receiving object
prpproperties for merging
Parameters
sendObject to send
recvObject to receive
prc_senddestination processors
prc_recvlist of the processors from which we receive
sz_recvnumber of elements added per processors
Returns
true if the function completed succefully

Definition at line 1062 of file VCluster.hpp.

◆ SSendRecvPWait() [1/2]

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool Vcluster< InternalMemory >::SSendRecvPWait ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
openfpm::vector< size_t > &  sz_recv_byte_out,
size_t  opt = NONE 
)
inline

Synchronize with SSendRecvP.

Note
arguments are discussed in SSendRecvPAsync

Definition at line 1247 of file VCluster.hpp.

◆ SSendRecvPWait() [2/2]

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base, int ... prp>
bool Vcluster< InternalMemory >::SSendRecvPWait ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
size_t  opt = NONE 
)
inline

Synchronize with SSendRecvP.

Note
arguments are discussed in SSendRecvPAsync

Definition at line 1286 of file VCluster.hpp.

◆ SSendRecvWait()

template<typename InternalMemory = HeapMemory>
template<typename T , typename S , template< typename > class layout_base = memory_traits_lin>
bool Vcluster< InternalMemory >::SSendRecvWait ( openfpm::vector< T > &  send,
S &  recv,
openfpm::vector< size_t > &  prc_send,
openfpm::vector< size_t > &  prc_recv,
openfpm::vector< size_t > &  sz_recv,
size_t  opt = NONE 
)
inline

Synchronize with SSendRecv.

Note
arguments are discussed in SSendRecvAsync

Definition at line 1208 of file VCluster.hpp.

Field Documentation

◆ mem

template<typename InternalMemory = HeapMemory>
ExtPreAlloc<HeapMemory>* Vcluster< InternalMemory >::mem[NQUEUE]
private

Definition at line 61 of file VCluster.hpp.

◆ NBX_prc_bi

template<typename InternalMemory = HeapMemory>
base_info<InternalMemory> Vcluster< InternalMemory >::NBX_prc_bi[NQUEUE]
private

Definition at line 121 of file VCluster.hpp.

◆ NBX_prc_pcnt

template<typename InternalMemory = HeapMemory>
unsigned int Vcluster< InternalMemory >::NBX_prc_pcnt = 0
private

Definition at line 72 of file VCluster.hpp.

◆ NBX_prc_scnt

template<typename InternalMemory = HeapMemory>
unsigned int Vcluster< InternalMemory >::NBX_prc_scnt = 0
private

Definition at line 71 of file VCluster.hpp.

◆ pmem

template<typename InternalMemory = HeapMemory>
HeapMemory* Vcluster< InternalMemory >::pmem[NQUEUE]
private

Definition at line 77 of file VCluster.hpp.

◆ prc_send_

template<typename InternalMemory = HeapMemory>
openfpm::vector<size_t> Vcluster< InternalMemory >::prc_send_
private

Definition at line 69 of file VCluster.hpp.

◆ send_buf

template<typename InternalMemory = HeapMemory>
openfpm::vector<const void *> Vcluster< InternalMemory >::send_buf
private

Definition at line 67 of file VCluster.hpp.

◆ send_sz_byte

template<typename InternalMemory = HeapMemory>
openfpm::vector<size_t> Vcluster< InternalMemory >::send_sz_byte
private

Definition at line 68 of file VCluster.hpp.

◆ sz_recv_byte

template<typename InternalMemory = HeapMemory>
openfpm::vector<size_t> Vcluster< InternalMemory >::sz_recv_byte[NQUEUE]
private

Definition at line 64 of file VCluster.hpp.


The documentation for this class was generated from the following file: