8#ifndef VCLUSTER_LOG_HPP_
9#define VCLUSTER_LOG_HPP_
48 void start(
size_t log_delay)
50 this->log_delay = log_delay;
59 void openLog(
size_t rank)
62 char p_name[MPI_MAX_PROCESSOR_NAME];
63 MPI_Get_processor_name(p_name, &result );
65 std::stringstream str;
66 str <<
"vcluster_log_" << p_name <<
"_" << rank;
75 void logRecv(MPI_Status & stat)
85 void logSend(
size_t prc)
100 std::cerr <<
"Error: " << __FILE__ <<
":" << __LINE__ <<
" req.size() != s_log.size() " << req.
size() <<
"!=" << s_log.
size() <<
"\n" ;
104 if (t.
getwct() >= log_delay)
106 f <<
"=============================== NBX ==================================\n";
110 f <<
"NBX counter: " << nbx <<
"\n";
114 for (
size_t i = 0 ; i < req.
size() ; i++)
117 MPI_SAFE_CALL(MPI_Request_get_status(req.get(i),&flag,&stat));
119 f <<
"Send to: " << s_log.get(i) <<
" with tag " << stat.MPI_TAG <<
" completed" <<
"\n";
121 f <<
"Send to: " << s_log.get(i) <<
" with tag " << stat.MPI_TAG <<
" pending" <<
"\n";
128 for (
size_t j = 0 ; j < r_log.
size() ; j++)
130 f <<
"Received from: " << r_log.get(j).MPI_SOURCE <<
" with tag " << r_log.get(j).MPI_TAG <<
"\n";
136 f <<
"Barrier status: active\n";
138 f <<
"Barrier status: inactive\n";
142 f <<
"======================================================================\n";
170 inline void start(
size_t log_delay) {}
171 inline void openLog(
size_t rank) {}
172 inline void logRecv(MPI_Status & stat) {}
173 inline void logSend(
size_t prc) {}
175 inline void clear() {};
Implementation of 1-D std::vector like structure.
Class for cpu time benchmarking.
void reset()
Reset the timer.
void start()
Start the timer.
double getwct()
Return the elapsed real time.