5 #include <unordered_map>
6 #include "Grid/map_grid.hpp"
7 #include "VCluster/VCluster.hpp"
8 #include "Space/SpaceBox.hpp"
9 #include "util/mathutil.hpp"
10 #include "Iterators/grid_dist_id_iterator_dec.hpp"
11 #include "Iterators/grid_dist_id_iterator.hpp"
12 #include "Iterators/grid_dist_id_iterator_sub.hpp"
13 #include "grid_dist_key.hpp"
14 #include "NN/CellList/CellDecomposer.hpp"
15 #include "util/object_util.hpp"
16 #include "memory/ExtPreAlloc.hpp"
17 #include "VTKWriter/VTKWriter.hpp"
18 #include "Packer_Unpacker/Packer.hpp"
19 #include "Packer_Unpacker/Unpacker.hpp"
20 #include "Decomposition/CartDecomposition.hpp"
21 #include "data_type/aggregate.hpp"
23 #include "grid_dist_id_comm.hpp"
24 #include "HDF5_wr/HDF5_wr.hpp"
27 template<
unsigned int dim>
40 #define GRID_SUB_UNIT_FACTOR 64
68 template<
unsigned int dim,
typename St,
typename T,
typename Decomposition = CartDecomposition<dim,St>,
typename Memory=HeapMemory ,
typename device_gr
id=gr
id_cpu<dim,T> >
102 CellDecomposer_sm<dim,St,shift<dim,St>>
cd_sm;
149 grid_dist_id<dim,St,T,Decomposition,Memory,device_grid> * g =
static_cast<grid_dist_id<dim,St,T,Decomposition,Memory,device_grid> *
>(ptr);
155 size_t lc_id = g->
dec.ProctoID(i);
176 for (
size_t i = 0 ; i < dim; i++)
183 else if (cmb[i] == 1)
188 else if (cmb[i] == -1)
223 for (
size_t i = 0 ; i < dim ; i++)
230 else if (cmb.
c[i] == -1)
237 sub_domain_other_exp.
enlarge(g);
238 if (sub_domain_other_exp.
Intersect(sub_domain,ib) ==
false)
240 for (
size_t i = 0 ; i < dim ; i++)
253 auto g =
cd_sm.getGrid();
258 for (
size_t i = 0 ; i <
dec.getNNProcessors() ; i++)
261 auto&& pib =
ig_box.last();
263 pib.prc =
dec.IDtoProc(i);
264 for (
size_t j = 0 ; j <
dec.getProcessorNIGhost(i) ; j++)
274 size_t sub_id =
dec.getProcessorIGhostSub(i,j);
275 size_t r_sub =
dec.getProcessorIGhostSSub(i,j);
277 auto & n_box =
dec.getNearSubdomains(
dec.IDtoProc(i));
280 sub +=
gdb_ext.get(sub_id).origin;
283 n_box.get(r_sub),
dec.getProcessorIGhostPos(i,j),
294 bid_t.
g_id =
dec.getProcessorIGhostId(i,j);
295 bid_t.
sub =
dec.getProcessorIGhostSub(i,j);
296 bid_t.
cmb =
dec.getProcessorIGhostPos(i,j);
297 bid_t.
r_sub =
dec.getProcessorIGhostSSub(i,j);
313 auto g =
cd_sm.getGrid();
325 for(
size_t i = 0 ; i <
dec.getNNProcessors() ; i++)
327 for (
size_t j = 0 ; j <
ig_box.get(i).bid.
size() ; j++)
329 box_int_send.get(i).add();
330 box_int_send.get(i).last().bx =
ig_box.get(i).bid.get(j).box;
331 box_int_send.get(i).last().g_id =
ig_box.get(i).bid.get(j).g_id;
332 box_int_send.get(i).last().r_sub =
ig_box.get(i).bid.get(j).r_sub;
333 box_int_send.get(i).last().cmb =
ig_box.get(i).bid.get(j).cmb;
335 prc.add(
dec.IDtoProc(i));
338 v_cl.
SSendRecv(box_int_send,box_int_recv,prc,prc_recv,sz_recv);
345 for (
size_t i = 0 ; i < box_int_recv.
size() ; i++)
347 size_t p_id =
dec.ProctoID(prc_recv.get(i));
348 auto&& pib =
eg_box.get(p_id);
349 pib.prc = prc_recv.get(i);
352 for (
size_t j = 0 ; j < box_int_recv.get(i).
size() ; j++)
354 size_t send_list_id = box_int_recv.get(i).get(j).r_sub;
360 size_t sub_id = s_sub.get(send_list_id);
364 bid_t.
cmb = box_int_recv.get(i).get(j).cmb;
365 bid_t.
cmb.sign_flip();
368 bid_t.
g_id = box_int_recv.get(i).get(j).g_id;
371 tb -=
gdb_ext.get(sub_id).origin;
389 auto g =
cd_sm.getGrid();
394 for (
size_t i = 0 ; i <
dec.getNSubDomain() ; i++)
399 for (
size_t j = 0 ; j <
dec.getLocalNIGhost(i) ; j++)
410 size_t r_sub =
dec.getLocalIGhostSub(i,j);
413 sub +=
gdb_ext.get(sub_id).origin;
423 pib.bid.last().box = ib;
424 pib.bid.last().sub =
dec.getLocalIGhostSub(i,j);
425 pib.bid.last().k =
dec.getLocalIGhostE(i,j);
426 pib.bid.last().cmb =
dec.getLocalIGhostPos(i,j);
439 auto g =
cd_sm.getGrid();
446 for (
size_t i = 0 ; i <
dec.getNSubDomain() ; i++)
454 pib.bid.resize(
dec.getLocalNEGhost(k));
457 pib.bid.get(s).sub =
dec.getLocalEGhostSub(k,s);
458 pib.bid.get(s).cmb =
loc_ig_box.get(i).bid.get(j).cmb;
459 pib.bid.get(s).cmb.sign_flip();
460 pib.bid.get(s).k = j;
461 pib.bid.get(s).initialized =
true;
475 for (
size_t i = 0 ; i < dim ; i++)
478 std::cerr <<
"Error: " << __FILE__ <<
":" << __LINE__ <<
" distrobuted grids with size smaller than 2 are not supported\n";
488 size_t n_grid =
dec.getNSubDomain();
500 for (
size_t i = 0 ; i < n_grid ; i++)
510 for (
size_t j = 0 ; j < dim ; j++)
527 check_new(
this,8,GRID_DIST_EVENT,4);
540 cd_sm.setDimensions(cd_old,ext);
556 getCellDecomposerPar<dim>(c_g,
g_sz,bc);
571 for (
size_t i = 0 ; i < dim ; i++) {this->
g_sz[i] =
g_sz[i];}
576 size_t n_sub = n_proc * GRID_SUB_UNIT_FACTOR;
581 for (
size_t i = 0 ; i < dim ; i++)
582 {div[i] = openfpm::math::round_big_2(pow(n_sub,1.0/dim));}
597 for (
size_t i = 0 ; i < dim ; i++) {this->
g_sz[i] =
g_sz[i];}
636 for (
size_t i = 0 ; i < dim ; i++)
666 static const unsigned int dims = dim;
699 return cd_sm.getCellBox().getHigh(i);
741 check_new(
this,8,GRID_DIST_EVENT,4);
745 for (
size_t i = 0 ; i < dim ; i++) {ext_dim[i] = g.getGridInfoVoid().size(i) + ext.
getKP1().get(i) + ext.
getKP2().get(i);}
757 for (
size_t i = 0 ; i < dim ; i++)
761 if (g.getDecomposition().periodicity(i) == NON_PERIODIC)
763 this->
domain.
setLow(i,g.getDomain().getLow(i) - ext.
getLow(i) * g.spacing(i) - g.spacing(i) / 2.0);
764 this->
domain.
setHigh(i,g.getDomain().getHigh(i) + ext.
getHigh(i) * g.spacing(i) + g.spacing(i) / 2.0);
786 const size_t (&
g_sz)[dim],
792 check_new(
this,8,GRID_DIST_EVENT,4);
812 check_new(
this,8,GRID_DIST_EVENT,4);
834 check_new(
this,8,GRID_DIST_EVENT,4);
840 this->dec = dec.duplicate(ghost);
861 check_new(
this,8,GRID_DIST_EVENT,4);
912 :domain(domain),
ghost(g),
ghost_int(INVALID_GHOST),dec(create_vcluster()),
v_cl(create_vcluster()),
916 check_new(
this,8,GRID_DIST_EVENT,4);
940 check_new(
this,8,GRID_DIST_EVENT,4);
1011 check_valid(
this,8);
1026 check_valid(
this,8);
1028 for (
size_t i = 0 ; i < dim ; i++)
1030 if (gk.
get(i) < 0 || gk.
get(i) >= (
long int)
g_sz[i])
1045 check_valid(
this,8);
1051 total +=
gdb_ext.get(i).Dbox.getVolumeKey();
1065 check_valid(
this,8);
1071 total +=
gdb_ext.get(i).GDbox.getVolumeKey();
1086 check_valid(
this,8);
1099 check_valid(
this,8);
1110 v_cl.
send(i,0,&size,
sizeof(
size_t));
1115 v_cl.
recv(0,0,&size_r,
sizeof(
size_t));
1142 check_valid(
this,8);
1163 check_valid(
this,8);
1183 template<
unsigned int Np>
1188 check_valid(
this,8);
1209 check_valid(
this,8);
1231 check_valid(
this,8);
1274 check_valid(
this,8);
1297 template <
unsigned int p = 0>
inline auto get(
const grid_dist_key_dx<dim> & v1)
const ->
typename std::add_lvalue_reference<decltype(
loc_grid.get(v1.getSub()).
template get<p>(v1.getKey()))>::type
1300 check_valid(
this,8);
1302 return loc_grid.get(v1.getSub()).
template get<p>(v1.getKey());
1313 template <
unsigned int p = 0>
inline auto get(
const grid_dist_key_dx<dim> & v1) ->
typename std::add_lvalue_reference<decltype(
loc_grid.get(v1.getSub()).
template get<p>(v1.getKey()))>::type
1316 check_valid(
this,8);
1318 return loc_grid.get(v1.getSub()).
template get<p>(v1.getKey());
1329 template <
unsigned int p = 0>
inline auto get(
grid_dist_g_dx<device_grid> & v1)
const ->
typename std::add_lvalue_reference<decltype(v1.getSub()->template get<p>(v1.getKey()))>::type
1332 check_valid(
this,8);
1334 return v1.getSub()->template get<p>(v1.getKey());
1345 template <
unsigned int p = 0>
inline auto get(
grid_dist_g_dx<device_grid> & v1) ->
typename std::add_lvalue_reference<decltype(v1.getSub()->template get<p>(v1.getKey()))>::type
1348 check_valid(
this,8);
1350 return v1.getSub()->template get<p>(v1.getKey());
1361 template <
unsigned int p = 0>
inline auto get(
const grid_dist_lin_dx & v1)
const ->
typename std::add_lvalue_reference<decltype(
loc_grid.get(v1.getSub()).
template get<p>(v1.getKey()))>::type
1364 check_valid(
this,8);
1366 return loc_grid.get(v1.getSub()).
template get<p>(v1.getKey());
1377 template <
unsigned int p = 0>
inline auto get(
const grid_dist_lin_dx & v1) ->
typename std::add_lvalue_reference<decltype(
loc_grid.get(v1.getSub()).
template get<p>(v1.getKey()))>::type
1380 check_valid(
this,8);
1382 return loc_grid.get(v1.getSub()).
template get<p>(v1.getKey());
1395 return this->
template get<p>(v1);
1408 return this->
template get<p>(v1);
1440 check_valid(
this,8);
1469 template<
template<
typename,
typename>
class op,
int... prp>
void ghost_put()
1472 check_valid(
this,8);
1509 grid_dist_id<dim,St,T,Decomposition,Memory,device_grid> &
copy(
grid_dist_id<dim,St,T,Decomposition,Memory,device_grid> & g,
bool use_memcpy =
true)
1511 if (T::noPointers() ==
true && use_memcpy)
1517 long int start = gs_src.LinId(
gdb_ext.get(i).Dbox.getKP1());
1518 long int stop = gs_src.LinId(
gdb_ext.get(i).Dbox.getKP2());
1520 if (stop < start) {
continue;}
1522 void * dst =
static_cast<void *
>(
static_cast<char *
>(this->
get_loc_grid(i).getPointer()) + start*
sizeof(T));
1523 void * src =
static_cast<void *
>(
static_cast<char *
>(g.
get_loc_grid(i).getPointer()) + start*
sizeof(T));
1525 memcpy(dst,src,
sizeof(T) * (stop + 1 - start));
1543 auto Cp = it.template getStencil<0>();
1545 dst.get_o(Cp) = src.get_o(Cp);
1562 return cd_sm.getCellBox().getP2();
1578 check_valid(
this,8);
1581 size_t sub_id = k.
getSub();
1586 k_glob = k_glob +
gdb_ext.get(sub_id).origin;
1602 bool write(std::string output,
size_t opt = VTK_WRITER | FORMAT_ASCII)
1605 check_valid(
this,8);
1607 file_type ft = file_type::ASCII;
1609 if (opt & FORMAT_BINARY)
1610 ft = file_type::BINARY;
1636 bool write_frame(std::string output,
size_t i,
size_t opt = VTK_WRITER | FORMAT_ASCII)
1639 check_valid(
this,8);
1641 file_type ft = file_type::ASCII;
1643 if (opt & FORMAT_BINARY)
1644 ft = file_type::BINARY;
1683 gdb_ext.get(i).Dbox.getKP2());
1693 template<
unsigned int Np>
1723 return check_whoami(
this,8);
1735 std::cout <<
"-------- External Ghost boxes ---------- " << std::endl;
1739 std::cout <<
"Processor: " <<
eg_box.get(i).prc <<
" Boxes:" << std::endl;
1741 for (
size_t j = 0; j <
eg_box.get(i).bid.
size() ; j++)
1743 std::cout <<
" Box: " <<
eg_box.get(i).bid.get(j).g_e_box.toString() <<
" Id: " <<
eg_box.get(i).bid.get(j).g_id << std::endl;
1747 std::cout <<
"-------- Internal Ghost boxes ---------- " << std::endl;
1751 std::cout <<
"Processor: " <<
ig_box.get(i).prc <<
" Boxes:" << std::endl;
1753 for (
size_t j = 0 ; j <
ig_box.get(i).bid.
size() ; j++)
1755 std::cout <<
" Box: " <<
ig_box.get(i).bid.get(j).box.toString() <<
" Id: " <<
ig_box.get(i).bid.get(j).g_id << std::endl;
1789 inline void save(
const std::string & filename)
const
1796 inline void load(
const std::string & filename)
bool init_e_g_box
Flag that indicate if the external ghost box has been initialized.
size_t g_id
Global id of the internal ghost box.
std::unordered_map< size_t, size_t > g_id_to_external_ghost_box
Decomposition decomposition
Decomposition used.
static Ghost< dim, float > convert_ghost(const Ghost< dim, long int > &gd, const CellDecomposer_sm< dim, St, shift< dim, St >> &cd_sm)
Convert a ghost from grid point units into continus space.
openfpm::vector< size_t > recv_sz
Receiving size.
size_t g_sz[dim]
Size of the grid on each dimension.
grid_dist_id(const size_t(&g_sz)[dim], const Box< dim, St > &domain, const Ghost< dim, St > &g, const periodicity< dim > &p)
Vcluster & v_cl
Communicator class.
void magnify_fix_P1(T mg)
Magnify the box by a factor keeping fix the point P1.
openfpm::vector< e_lbox_grid< dim > > loc_eg_box
Local external ghost boxes in grid units.
This class represent an N-dimensional box.
comb< dim > cmb
In which sector live the box.
auto getProp(const grid_dist_key_dx< dim > &v1) -> decltype(this->template get< p >(v1))
Get the reference of the selected element.
grid_dist_iterator< dim, device_grid, FREE > getDomainIterator() const
It return an iterator that span the full grid domain (each processor span its local domain) ...
Point< dim, St > getSpacing()
Get the spacing on each dimension.
void ghost_put_(const openfpm::vector< ip_box_grid< dim >> &ig_box, const openfpm::vector< ep_box_grid< dim >> &eg_box, const openfpm::vector< i_lbox_grid< dim >> &loc_ig_box, const openfpm::vector< e_lbox_grid< dim >> &loc_eg_box, const openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext, openfpm::vector< device_grid > &loc_grid, openfpm::vector< std::unordered_map< size_t, size_t >> &g_id_to_internal_ghost_box)
It merge the information in the ghost with the real information.
comb< dim > cmb
Sector position of the external ghost.
grid_dist_id(const Decomposition &dec, const size_t(&g_sz)[dim], const Ghost< dim, long int > &g)
void getGlobalGridsInfo(openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext_global) const
It gathers the information about local grids for all of the processors.
openfpm::vector< GBoxes< device_grid::dims > > gdb_ext
Extension of each grid: Domain and ghost + domain.
const openfpm::vector< i_lbox_grid< dim > > & get_ig_box()
Get the internal ghost box.
size_t getLocalDomainWithGhostSize() const
Get the total number of grid points with ghost for the calling processor.
Decomposition dec
Space Decomposition.
bool write_frame(std::string output, size_t i, size_t opt=VTK_WRITER|FORMAT_ASCII)
Write the distributed grid information.
static const unsigned int dims
Number of dimensions.
T getLow(int i) const
get the i-coordinate of the low bound interval of the box
This class is an helper for the communication of grid_dist_id.
bool isValid() const
Check if the Box is a valid box P2 >= P1.
grid_key_dx is the key to access any element in the grid
Position of the element of dimension d in the hyper-cube of dimension dim.
size_t getProcessUnitID()
Get the process unit id.
Distributed linearized key.
const grid_sm< dim, void > & getGridInfoVoid() const
Get an object containing the grid informations without type.
const CellDecomposer_sm< dim, St, shift< dim, St > > & getCellDecomposer() const
Return the cell decomposer.
Ghost< dim, St > ghost
Ghost expansion.
void execute()
Execute all the requests.
const grid_sm< dim, T > & getGridInfo() const
Get an object containing the grid informations.
size_t sub
sub_id in which sub-domain this box live
const openfpm::vector< i_lbox_grid< dim > > & get_loc_ig_box()
Get the internal local ghost box.
bool init_local_i_g_box
Indicate if the local internal ghost box has been initialized.
grid_key_dx< dim > getKP2() const
Get the point p12 as grid_key_dx.
grid_key_dx_iterator_sub< dim, stencil_offset_compute< dim, Np > > get_loc_grid_iterator_stencil(size_t i, const grid_key_dx< dim >(&stencil_pnt)[Np])
Get the i sub-domain grid.
openfpm::vector< device_grid > loc_grid_old
Old local grids.
size_t size() const
Return the size of the grid.
const Box< dim, St > getDomain() const
Get the domain where the grid is defined.
grid_dist_id(const grid_dist_id< dim, St, T, Decomposition, Memory, device_grid > &g)
Default Copy constructor on this class make no sense and is unsafe, this definition disable it...
grid_sm< dim, T > ginfo
Grid informations object.
const Decomposition & getDecomposition() const
Get the object that store the information about the decomposition.
grid_dist_id(const grid_dist_id< dim, St, H, typename Decomposition::base_type, Memory, grid_cpu< dim, H >> &g, const Ghost< dim, long int > &gh, Box< dim, size_t > ext)
This constructor is special, it construct an expanded grid that perfectly overlap with the previous...
void Create()
Create the grids on memory.
void create_eg_box()
Create per-processor internal ghost box list in grid units.
bool write(std::string output, size_t opt=VTK_WRITER|FORMAT_ASCII)
Write the distributed grid information.
T getHigh(int i) const
get the high interval of the box
device_grid device_grid_type
Type of device grid.
void create_local_eg_box()
Create per-processor external ghost boxes list in grid units.
size_t size() const
Return the total number of points in the grid.
Box< dim, size_t > bx
Box in global unit.
void setHigh(int i, T val)
set the high interval of the box
openfpm::vector< GBoxes< device_grid::dims > > gdb_ext_global
Global gdb_ext.
Grid key for a distributed grid.
void check_size(const size_t(&g_sz)[dim])
Check the grid has a valid size.
This class implement the point shape in an N-dimensional space.
::Box< dim, long int > box
Box.
grid_dist_id(const Decomposition &dec, const size_t(&g_sz)[dim], const Ghost< dim, St > &ghost)
comb< dim > cmb
Sector where it live the linked external ghost box.
Point< dim, T > getP1() const
Get the point p1.
grid_dist_iterator_sub< dim, device_grid > getSubDomainIterator(const grid_key_dx< dim > &start, const grid_key_dx< dim > &stop) const
It return an iterator that span the grid domain only in the specified part.
grid_key_dx< dim > getKP1() const
Get the point p1 as grid_key_dx.
openfpm::vector< i_lbox_grid< dim > > loc_ig_box
Local internal ghost boxes in grid units.
grid_dist_id(const size_t(&g_sz)[dim], const Box< dim, St > &domain, const Ghost< dim, long int > &g)
Internal ghost box sent to construct external ghost box into the other processors.
bool send(size_t proc, size_t tag, const void *mem, size_t sz)
Send data to a processor.
grid_dist_id(const size_t(&g_sz)[dim], const Box< dim, St > &domain, const Ghost< dim, long int > &g, const periodicity< dim > &p)
mem_id get(size_t i) const
Get the i index.
Implementation of VCluster class.
void map()
It move all the grid parts that do not belong to the local processor to the respective processor...
This structure store the Box that define the domain inside the Ghost + domain box.
void InitializeStructures(const size_t(&g_sz)[dim])
Initialize the grid.
This class define the domain decomposition interface.
bool init_i_g_box
Flag that indicate if the internal ghost box has been initialized.
device_grid d_grid
Which kind of grid the structure store.
bool init_local_e_g_box
Indicate if the local external ghost box has been initialized.
void InitializeCellDecomposer(const size_t(&g_sz)[dim], const size_t(&bc)[dim])
Initialize the Cell decomposer of the grid.
bool Intersect(const Box< dim, T > &b, Box< dim, T > &b_out) const
Intersect.
~grid_dist_id()
Destructor.
bool SSendRecv(openfpm::vector< T > &send, S &recv, openfpm::vector< size_t > &prc_send, openfpm::vector< size_t > &prc_recv, openfpm::vector< size_t > &sz_recv, size_t opt=NONE)
Semantic Send and receive, send the data to processors and receive from the other processors...
void ghost_get_(const openfpm::vector< ip_box_grid< dim >> &ig_box, const openfpm::vector< ep_box_grid< dim >> &eg_box, const openfpm::vector< i_lbox_grid< dim >> &loc_ig_box, const openfpm::vector< e_lbox_grid< dim >> &loc_eg_box, const openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext, openfpm::vector< device_grid > &loc_grid, std::unordered_map< size_t, size_t > &g_id_to_external_ghost_box)
It fill the ghost part of the grids.
auto getProp(const grid_dist_key_dx< dim > &v1) const -> decltype(this->template get< p >(v1))
Get the reference of the selected element.
grid_dist_iterator_sub< dim, device_grid > getSubDomainIterator(const long int(&start)[dim], const long int(&stop)[dim]) const
It return an iterator that span the grid domain only in the specified part.
grid_key_dx< dim > getGKey(const grid_dist_key_dx< dim > &k)
Convert a g_dist_key_dx into a global key.
bool isInside(const grid_key_dx< dim > &gk) const
Check that the global grid key is inside the grid domain.
This is a distributed grid.
size_t getN_loc_grid()
Return the number of local grid.
CellDecomposer_sm< dim, St, shift< dim, St > > cd_sm
Structure that divide the space into cells.
Box< dim, size_t > getDomain(size_t i)
Given a local sub-domain i with a local grid Domain + ghost return the part of the local grid that is...
grid_dist_iterator< dim, device_grid, FREE > getOldDomainIterator() const
It return an iterator that span the full grid domain (each processor span its local domain) ...
grid_sm< dim, void > ginfo_v
Grid informations object without type.
it store a box, its unique id and the sub-domain from where it come from
const openfpm::vector< GBoxes< device_grid::dims > > & getLocalGridsInfo()
It return the informations about the local grids.
void set_for_adjustment(const Box< dim, long int > &sub_domain, const Box< dim, St > &sub_domain_other, const comb< dim > &cmb, Box< dim, long int > &ib, Ghost< dim, long int > &g)
this function is for optimization of the ghost size
void enlarge(const Box< dim, T > &gh)
Enlarge the box with ghost margin.
void zero()
Set to zero the key.
bool recv(size_t proc, size_t tag, void *v, size_t sz)
Recv data from a processor.
const size_t(& getSize() const)[N]
Return the size of the grid as an array.
grid_dist_id(Decomposition &&dec, const size_t(&g_sz)[dim], const Ghost< dim, long int > &g)
static void * msg_alloc_external_box(size_t msg_i, size_t total_msg, size_t total_p, size_t i, size_t ri, void *ptr)
Call-back to allocate buffer to receive incoming objects (external ghost boxes)
bool is_staggered()
Indicate that this grid is not staggered.
openfpm::vector< ip_box_grid< dim > > ig_box
Internal ghost boxes in grid units.
void ghost_put()
It synchronize the ghost parts.
Distributed grid iterator.
bool SGather(T &send, S &recv, size_t root)
Semantic Gather, gather the data from all processors into one node.
void setLow(int i, T val)
set the low interval of the box
It store the information about the external ghost box.
Ghost< dim, long int > ghost_int
Ghost expansion.
device_grid & get_loc_grid(size_t i)
Get the i sub-domain grid.
Box< dim, St > domain
Domain.
bool init_fix_ie_g_box
Flag that indicate if the internal and external ghost box has been fixed.
void setPropNames(const openfpm::vector< std::string > &names)
Set the properties names.
Memory memory_type
Type of Memory.
void InitializeCellDecomposer(const CellDecomposer_sm< dim, St, shift< dim, St >> &cd_old, const Box< dim, size_t > &ext)
Initialize the Cell decomposer of the grid enforcing perfect overlap of the cells.
void ghost_get()
It synchronize the ghost parts.
grid_dist_id< dim, St, T, Decomposition, Memory, device_grid > & copy(grid_dist_id< dim, St, T, Decomposition, Memory, device_grid > &g, bool use_memcpy=true)
Copy the give grid into this grid.
::Box< dim, long int > l_e_box
Box defining the external ghost box in local coordinates.
::Box< dim, long int > g_e_box
Box defining the external ghost box in global coordinates.
void map_(Decomposition &dec, CellDecomposer_sm< dim, St, shift< dim, St >> &cd_sm, openfpm::vector< device_grid > &loc_grid, openfpm::vector< device_grid > &loc_grid_old, openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext, openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext_old, openfpm::vector< GBoxes< device_grid::dims >> &gdb_ext_global)
Moves all the grids that does not belong to the local processor to the respective processor...
grid_dist_iterator< dim, device_grid, FIXED > getDomainGhostIterator() const
It return an iterator that span the grid domain + ghost part.
openfpm::vector< std::string > prp_names
properties names
Declaration grid_key_dx_iterator_sub.
size_t r_sub
r_sub id of the sub-domain in the sent list
size_t size(size_t i) const
Return the total number of points in the grid.
void InitializeDecomposition(const size_t(&g_sz)[dim], const size_t(&bc)[dim])
Initialize the grid.
openfpm::vector< HeapMemory > recv_mem_gg
Receiving buffer for particles ghost get.
grid_key_dx_iterator_sub< dim, no_stencil > get_loc_grid_iterator(size_t i)
Get the i sub-domain grid.
grid_dist_iterator< dim, device_grid, FREE, stencil_offset_compute< dim, Np > > getDomainIteratorStencil(const grid_key_dx< dim >(&stencil_pnt)[Np]) const
It return an iterator that span the full grid domain (each processor span its local domain) ...
void debugPrint()
It print the internal ghost boxes and external ghost boxes in global unit.
size_t getLocalDomainSize() const
Get the total number of grid points for the calling processor.
void create_ig_box()
Create per-processor internal ghost boxes list in grid units and g_id_to_external_ghost_box.
grid_dist_id(Decomposition &&dec, const size_t(&g_sz)[dim], const Ghost< dim, St > &ghost)
Distributed grid iterator.
openfpm::vector< ep_box_grid< dim > > eg_box
External ghost boxes in grid units.
Box< dim, long int > flip_box(const Box< dim, long int > &box, const comb< dim > &cmb)
flip box just convert and internal ghost box into an external ghost box
void create_local_ig_box()
Create local internal ghost box in grid units.
openfpm::vector< device_grid > loc_grid
Local grids.
void one()
Set to one the key.
grid_dist_id(const size_t(&g_sz)[dim], const Box< dim, St > &domain, const Ghost< dim, St > &g)
size_t getProcessingUnits()
Get the total number of processors.
size_t r_sub
from which sub-domain this internal ghost box is generated (or with which sub-domain is overlapping) ...
openfpm::vector< std::unordered_map< size_t, size_t > > g_id_to_internal_ghost_box
bool isInvalidGhost()
check if the Ghost is valid
void setDimensions(const size_t(&dims)[N])
Reset the dimension of the grid.
Decomposition & getDecomposition()
Get the object that store the information about the decomposition.
size_t getSub() const
Get the local grid.
long int who()
It return the id of structure in the allocation list.
char c[dim]
Array that store the combination.
St spacing(size_t i) const
Get the spacing of the grid in direction i.
Point< dim, St > getOffset(size_t i)
Get the point where it start the origin of the grid of the sub-domain i.
Vcluster & getVC()
Get the Virtual Cluster machine.
grid_key_dx< dim > getKey() const
Get the key.
Distributed linearized key.
openfpm::vector< GBoxes< device_grid::dims > > gdb_ext_old
Extension of each old grid (old): Domain and ghost + domain.