13 template <
unsigned int j,
unsigned int i,
typename Graph>
void optimize(
size_t start_p, Graph & graph)
42 bool can_expand =
true;
50 for (
int d = 0 ; d < n_wf ; d++)
58 bool w_can_expand =
true;
62 for (
size_t sub = 0 ; sub < wf_d.
size() ; sub++)
68 size_t sub_w = wf_d.get<0>(sub);
71 size_t exp_p = graph.getChild(sub_w,d).get<j>();
74 if (exp_p != domain_id)
81 if (w_can_expand ==
true)
84 for (
size_t sub = 0 ; sub < wf_d.
size() ; sub++)
87 wf_d.get<0>(sub) = wf_d.get<0>(sub) + gh.stride(d);
93 SubHyperCube<dim,dim-1> sub_hyp = hyp.getSubHyperCube(d);
105#ifndef PARALLEL_DECOMPOSITION
115 size_t Np = v_cl.getProcessingUnits();
121 for (
size_t p_id = v_cl.getProcessUnitID(); p_id < Np ; p_id += Np)
122 id_sub.push_back(p_id);
134 auto & dec = g_dist.getDecomposition();
136 Vcluster & v_cl = *global_v_cluster;
139 val = dec.check_consistency();
140 BOOST_REQUIRE_EQUAL(val,
true);
144 size_t n_grid = dec.getNLocalHyperCube();
151 for (
size_t i = 0 ; i < n_grid ; i++)
156 Box<2,size_t> g_box = g_dist.getCellDecomposer().convertDomainSpaceIntoGridUnits(sub);
165 BOOST_REQUIRE_EQUAL(vol,k*k);
197 BOOST_AUTO_TEST_CASE( grid_dist_id_poisson_test_use)
236 typedef scalar<float> S;
244 auto pos = g_it.get();
246 g_dist.template get<S::ele>(pos) = (g_dist.template get<S::ele>(pos.move(0,1)) +
247 g_dist.template get<S::ele>(pos.move(0,-1)) +
248 g_dist.template get<S::ele>(pos.move(1,1)) +
249 g_dist.template get<S::ele>(pos.move(1,-1)) / 4.0);
264 #include "CartDecomposition.hpp"
299 bool borderOrBulk(neighborhood & nb)
301 device::grid<1,size_t> nbr = nb.next();
309 while (iterator_nbr.hasNext())
315 if(subspace.isBound(data.template get<Point::x>(key_nbr)) ==
false)
321 ret.bord.push_back(key);
341 template<
unsigned int dim,
typename T,
template<
typename>
class layout,
typename Memory,
template<
unsigned int,
typename>
class Domain,
template<
typename,
typename,
typename>
class data_s>
348 ret.
bord =
new boost::shared_ptr<T>(
new T());
349 ret.
inte =
new boost::shared_ptr<T>(
new T());
350 ret.
ext =
new boost::shared_ptr<T>(
new T());
358 while (iterator.hasNext())
368 if (borderOrBulk(nb) ==
true)
372 ret.
bord.push_back(key);
378 ret.bulk.push_back(key);
385 ret.
ext.push_back(key);
391>>>>>>> Jenkin script
for taurus
651 Al.load(
"debug_matrix_single_processor");
655 std::unordered_map<size_t,size_t> map_row;
657 auto it2 = g_map.getDomainGhostIterator();
658 auto ginfo = g_map.getGridInfoVoid();
662 auto key = it2.
get();
663 auto key_g = g_map.getGKey(key);
664 key_g += pd.getKP1();
667 bool is_negative =
false;
668 for (
size_t i = 0 ; i < Sys_eqs::dims ; i++)
670 if (key_g.get(i) < 0)
674 if (is_negative ==
true)
682 if (g_map.template get<0>(key) == 7)
688 map_row[g_map.template get<0>(key)] = ginfo.LinId(key_g);
695 Vcluster & v_cl = *global_v_cluster;
702 std::unordered_map<long int,float> cols;
705 b.resize(Sys_eqs::nvar * g_map.size());
707 bool is_first = skip_first;
725 T::value(g_map,key,gs,spacing,cols,1.0);
729 auto g_calc_pos = g_map.getGKey(key);
730 g_calc_pos += pd.getKP1();
736 for (
auto it = cols.begin(); it != cols.end(); ++it )
739 trpl.last().row() = g_map.template get<0>(key)*Sys_eqs::nvar + id;
740 trpl.last().col() = it->first;
741 trpl.last().value() = it->second;
745 auto ginfo = g_map.getGridInfoVoid();
747 size_t r = (trpl.last().row() / Sys_eqs::nvar);
748 size_t r_rest = (trpl.last().row() % Sys_eqs::nvar);
749 size_t c = (trpl.last().col() / Sys_eqs::nvar);
750 size_t c_rest = (trpl.last().col() % Sys_eqs::nvar);
751 double val = trpl.last().value();
755 size_t rf = map_row[r] * 3 + r_rest;
756 size_t cf = map_row[c] * 3 + c_rest;
758 auto position_row = ginfo.InvLinId(rf / 3);
759 auto position_col = ginfo.InvLinId(cf / 3);
761 double valf = Al.getValue(rf,cf);
774 b(g_map.template get<0>(key)*Sys_eqs::nvar +
id) = num;
790typename Sys_eqs::SparseMatrix_type A;
797typename Sys_eqs::SparseMatrix_type & getA()
802 A.resize(g_map.size()*Sys_eqs::nvar,g_map.size()*Sys_eqs::nvar);
815typename Sys_eqs::SparseMatrix_type A;
822typename Sys_eqs::SparseMatrix_type & getA()
827 A.resize(g_map.size()*Sys_eqs::nvar,g_map.size()*Sys_eqs::nvar);
845typename Sys_eqs::Vector_type & getB()
869void link_ebox_with_ibox()
984 for (
size_t k = 0 ; k < dim ; k++)
991 std::cout <<
"Fixing internal external" << std::endl;
997 else if (cmb.
c[k] == 1)
1009 if (dom_ext < ext_ibox)
1022bool ret = fix_box_ig(bx_src,gdb_ext.get(i).Dbox,bx_dst,loc_eg_box.get(sub_id_dst).bid.get(k).cmb);
1025 std::cerr <<
"ERROR FAIL TO FIX " << std::endl;
1042inline void fix_ie_g_box()
1044 if (init_fix_ie_g_box ==
true)
return;
1058 std::unordered_map<long int,std::pair<long int,long int>> iglist;
1062 for(
size_t i = 0 ; i < dec.getNNProcessors() ; i++)
1064 for (
size_t j = 0 ; j < ig_box.get(i).bid.size() ; j++)
1066 if (ig_box.get(i).bid.get(j).cmb != zero)
1068 auto &
ele = iglist[ig_box.get(i).bid.get(j).g_id];
1075 for(
size_t i = 0 ; i < dec.getNNProcessors() ; i++)
1077 for (
size_t j = 0 ; j < eg_box.get(i).bid.size() ; j++)
1079 if (eg_box.get(i).bid.get(j).cmb != zero)
1081 box_ext_send.get(i).add();
1082 box_ext_send.get(i).last().bx = eg_box.get(i).bid.get(j).l_e_box;
1083 box_ext_send.get(i).last().g_id = eg_box.get(i).bid.get(j).g_id;
1086 prc.add(dec.IDtoProc(i));
1089 v_cl.
SSendRecv(box_ext_send,box_ext_recv,prc,prc_recv,sz_recv);
1092 for (
size_t i = 0 ; i < box_ext_recv.
size() ; i++)
1095 for (
size_t j = 0 ; j < box_ext_recv.get(i).
size() ; j++)
1098 size_t proc_id = dec.ProctoID(prc_recv.get(i));
1100 auto it = g_id_to_internal_ghost_box.get(proc_id).find(box_ext_recv.get(i).get(j).g_id);
1104 if (it == g_id_to_internal_ghost_box.get(proc_id).end())
1106 std::cerr << __FILE__ <<
":" << __LINE__ <<
" warning unlinked external ghost box" << std::endl;
1112 size_t link = it->second;
1117 Box<dim,long int> & box_sub_i = gdb_ext.get(ig_box.get(proc_id).bid.get(link).sub).Dbox;
1119 comb<dim> cmb = ig_box.get(proc_id).bid.get(link).cmb;
1125 bool ret = fix_box_ig(box_i,box_sub_i,box_ext_recv.get(i).get(j).bx,cmb);
1128 std::cerr << __FILE__ <<
":" << __LINE__ <<
" and inconsistency between internal and external ghost boxes has been detected. The fix is not possible please change your ghost size (by a small amount) on the order of 10^-5 if you use float 10^-14 if you use double" << std::endl;
1131 auto &
ele = iglist[box_ext_recv.get(i).get(j).g_id];
1141 for (
auto it = iglist.begin(); it != iglist.end(); ++it )
1144 if (it->second.first != -1)
1146 size_t a = it->second.first;
1147 size_t b = it->second.second;
1148 ig_box.get(a).bid.get(b).box.invalidate();
This class represent an N-dimensional box.
__device__ __host__ T getLow(int i) const
get the i-coordinate of the low bound interval of the box
__device__ __host__ T getHigh(int i) const
get the high interval of the box
T getVolumeKey() const
Get the volume spanned by the Box P1 and P2 interpreted as grid key.
__device__ __host__ void setHigh(int i, T val)
set the high interval of the box
__device__ __host__ void setLow(int i, T val)
set the low interval of the box
This class decompose a space into sub-sub-domains and distribute them across processors.
This class calculate elements of the hyper-cube.
static std::vector< comb< dim > > getCombinations_R(size_t d)
static size_t getNumberOfElements_R(size_t d)
Get the number of Elements of dimension d.
This class implement the point shape in an N-dimensional space.
This class represent an N-dimensional box.
Sparse Matrix implementation.
This represent a sub-hyper-cube of an hyper-cube like a face or an edge of a cube.
void execute()
Execute all the requests.
size_t getProcessUnitID()
Get the process unit id.
Implementation of VCluster class.
bool SSendRecv(openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
Semantic Send and receive, send the data to processors and receive from the other processors.
class that store Internal part external and border part of a dataset
boost::shared_ptr< T > inte
internal part of your data
boost::shared_ptr< T > ext
external part of your data
boost::shared_ptr< T > bord
Border part of the data.
This is a distributed grid.
Distributed grid iterator.
grid_key_dx is the key to access any element in the grid
__device__ __host__ index_type get(index_type i) const
Get the i index.
Implementation of 1-D std::vector like structure.
Position of the element of dimension d in the hyper-cube of dimension dim.
void zero()
Set all the elements to zero.
signed char c[dim]
Array that store the combination.