8 #ifndef DISTPARMETIS_UTIL_HPP
9 #define DISTPARMETIS_UTIL_HPP
13 #include "VTKWriter/VTKWriter.hpp"
14 #include "VCluster/VCluster.hpp"
86 #define BALANCE_CC_O(c) c+1
96 template<
typename Graph>
103 MPI_Comm
comm = (MPI_Comm)NULL;
123 Mg.
nvtxs[0] = sub_g.getNVertex();
124 Mg.
part =
new idx_t[sub_g.getNVertex()];
125 for (
size_t i = 0; i < sub_g.getNVertex(); i++)
129 Mg.
xadj =
new idx_t[sub_g.getNVertex() + 1];
130 Mg.
adjncy =
new idx_t[sub_g.getNEdge()];
131 Mg.
vwgt =
new idx_t[sub_g.getNVertex()];
132 Mg.
adjwgt =
new idx_t[sub_g.getNEdge()];
133 Mg.
vsize =
new idx_t[sub_g.getNVertex()];
141 for (
size_t i = 0, j = sub_g.firstId(); i < sub_g.getNVertex() && j <= sub_g.lastId(); i++, j++)
143 size_t idx = sub_g.nodeById(j);
146 Mg.
vwgt[i] = sub_g.vertex(idx).template get<nm_v::computation>();
147 Mg.
vsize[i] = sub_g.vertex(idx).template get<nm_v::migration>();
153 for (
size_t s = 0; s < sub_g.getNChilds(idx); s++)
155 Mg.
adjncy[prev + s] = sub_g.getChild(idx, s);
157 Mg.
adjwgt[prev + s] = sub_g.getChildEdge(idx, s).template get<nm_e::communication>();
161 prev += sub_g.getNChilds(idx);
185 MPI_Comm_dup(MPI_COMM_WORLD, &
comm);
299 Mg.
nvtxs[0] = sub_g.getNVertex();
332 Mg.
itr =
new real_t[1];
340 for (
int s = 0; s <
Mg.
nparts[0]; s++)
365 template<
unsigned int i>
371 ParMETIS_V3_PartKway((idx_t *) sub_g.getVtxdist()->getPointer(),
Mg.
xadj,
Mg.
adjncy,
Mg.
vwgt,
Mg.
adjwgt,
Mg.
wgtflag,
Mg.
numflag,
Mg.
ncon,
Mg.
nparts,
Mg.
tpwgts,
Mg.
ubvec,
Mg.
options,
Mg.
edgecut,
Mg.
part, &
comm);
379 for (
size_t id = 0, j = sub_g.firstId();
id < sub_g.getNVertex() && j <= sub_g.lastId();
id++, j++)
381 sub_g.vertex(sub_g.nodeById(j)).
template get<i>() =
Mg.
part[id];
393 template<
unsigned int i>
398 ParMETIS_V3_AdaptiveRepart((idx_t *) sub_g.getVtxdist()->getPointer(),
Mg.
xadj,
Mg.
adjncy,
Mg.
vwgt,
Mg.
vsize,
Mg.
adjwgt,
Mg.
wgtflag,
Mg.
numflag,
Mg.
ncon,
Mg.
nparts,
Mg.
tpwgts,
Mg.
ubvec,
Mg.
itr,
Mg.
options,
Mg.
edgecut,
Mg.
part, &
comm);
401 for (
size_t id = 0, j = sub_g.firstId();
id < sub_g.getNVertex() && j <= sub_g.lastId();
id++, j++)
403 sub_g.vertex(sub_g.nodeById(j)).
template get<i>() =
Mg.
part[id];
456 sub_g.deleteGhosts();
void constructAdjList(Graph &sub_g)
Construct Adjacency list.
idx_t * nparts
number of part to partition the graph
real_t * tpwgts
Desired weight for each partition (one for each constrain)
idx_t * vsize
Array of the vertex size, basically is the total communication amount.
real_t * ubvec
For each partition load imbalance tollerated.
size_t getProcessUnitID()
Get the process unit id.
idx_t * nvtxs
The number of vertices in the graph.
DistParmetis(Vcluster &v_cl, size_t nc)
Constructor.
int p_id
Process rank information.
MPI_Comm comm
Communticator for OpenMPI.
idx_t * edgecut
Upon successful completion, the number of edges that are cut by the partitioning is written to this p...
~DistParmetis()
destructor
idx_t * options
Additional option for the graph partitioning.
void decompose(Graph &sub_g)
Decompose the graph.
Helper class to define Metis graph.
void reset(Graph &sub_g)
Reset graph and reconstruct it.
real_t * itr
This parameter describes the ratio of inter-processor communication time compared to data redistri- b...
Implementation of VCluster class.
size_t nc
nc Number of partition
idx_t * vwgt
Array that store the weight for each vertex.
idx_t * objval
return the total comunication cost for each partition
idx_t * adjwgt
The weight of the edge.
idx_t * adjncy
For each vertex it store a list of all neighborhood vertex.
idx_t * getPartition()
Get graph partition vector.
idx_t * numflag
This is used to indicate the numbering scheme that is used for the vtxdist, xadj, adjncy...
idx_t * xadj
For each vertex it store the adjacency lost start for the vertex i.
void initSubGraph(Graph &sub_g)
Set the Sub-graph.
void refine(Graph &sub_g)
Refine the graph.
idx_t * part
Is a output vector containing the partition for each vertex.
Parmetis_dist_graph Mg
Graph in parmetis reppresentation.