8#ifndef AMR_GRID_AMR_DIST_HPP_ 
    9#define AMR_GRID_AMR_DIST_HPP_ 
   11#define OPENFPM_DATA_ENABLE_IO_MODULE 
   13#include "Grid/grid_dist_id.hpp" 
   14#include "Amr/grid_dist_amr_key_iterator.hpp" 
   17#include "SparseGridGpu/SparseGridGpu.hpp" 
   20#define AMR_IMPL_TRIVIAL 1 
   21#define AMR_IMPL_PATCHES 2 
   22#define AMR_IMPL_OPENVDB 3 
   24template<
typename Decomposition, 
typename garray>
 
   33    :dec(dec),gd_array(gd_array)
 
   48        for(
size_t i = 0 ; i < gd_array.size() ; i++)
 
   51            gd_array.get(i).getDecomposition() = dec.duplicate(gold);
 
   64        for(
size_t i = 0 ; i < gd_array.size() ; i++)
 
   67            gd_array.get(i).getDecomposition() = dec.duplicate(gold);
 
   80        for(
size_t i = 0 ; i < gd_array.size() ; i++)
 
   83            gd_array.get(i).getDecomposition() = dec.duplicate(gold);
 
   87    auto getDistribution() -> 
decltype(dec.getDistribution())
 
   89        return dec.getDistribution();
 
   94        for(
size_t i = 0 ; i < gd_array.size() ; i++)
 
   95        {gd_array.get(i).getDecomposition() = de.gd_array.get(i).getDecomposition();}
 
  100    bool write(std::string output)
 const 
  102        return dec.write(output);
 
  106template<
unsigned int dim,
 
  109         unsigned int impl=AMR_IMPL_TRIVIAL ,
 
  127template<
unsigned int dim,
 
  155    typedef decltype(device_grid::type_of_iterator()) 
device_it;
 
  180        for (
size_t i = 0; i < n_lvl - 1 ; i++)
 
  182            for (
size_t j = 0 ; j < dim ; j++)
 
  184                if (bc.bc[j] == NON_PERIODIC)
 
  185                {g_sz_lvl[j] = (g_sz_lvl[j]-1)*2 + 1;}
 
  187                {g_sz_lvl[j] = g_sz_lvl[j]*2;}
 
  191            gd_array.last().setBackgroundValue(bck);
 
  193            gd_array.last().getDecomposition().free_geo_cell();
 
  194            gd_array.last().getDecomposition().getDistribution().destroy_internal_graph();
 
  195            gd_array.last().getDecomposition().free_fines();
 
  211    :domain(domain),g_int(g)
 
  215        for (
size_t i = 0; i < dim ; i++)
 
  216        {bc.bc[i] = NON_PERIODIC;}
 
  227    :domain(domain),g_int(g),bc(bc)
 
  240        size_t g_sz_lvl[dim];
 
  242        for (
size_t i = 0; i < dim ; i++)
 
  243        {g_sz_lvl[i] = g_sz[i];}
 
  247        gd_array.last().setBackgroundValue(bck);
 
  249        initialize_other(n_lvl,g_sz_lvl);
 
  261        initLevels(dec.internal_dec(),n_lvl,g_sz);
 
  289        mv_off.resize(gd_array.size());
 
  291        for (
size_t i = 1 ; i < gd_array.size() ; i++)
 
  293            auto & g_box_c = gd_array.get(i-1).getLocalGridsInfo();
 
  294            auto & g_box_f = gd_array.get(i).getLocalGridsInfo();
 
  298            if (g_box_c.size() != g_box_f.size())
 
  300                std::cerr << __FILE__ << 
":" << __LINE__ << 
" error it seem that the AMR construction between level " <<
 
  301                        i << 
" and " << i-1 << 
" is inconsistent" << std::endl;
 
  306            mv_off.get(i-1).resize(g_box_f.size());
 
  307            mv_off.get(i).resize(g_box_f.size());
 
  309            for (
size_t j = 0 ; j < g_box_f.size() ; j++)
 
  311                for (
size_t s = 0 ; s < dim ; s++)
 
  313                    size_t d_orig_c = g_box_c.get(j).origin.get(s);
 
  314                    size_t d_orig_f = g_box_f.get(j).origin.get(s);
 
  316                    mv_off.get(i-1).get(j).dw.get(s) = d_orig_c*2 - d_orig_f;
 
  317                    mv_off.get(i).get(j).up.get(s) = d_orig_c*2 - d_orig_f;
 
  330    void initLevels(
size_t n_lvl,
const size_t (& g_sz)[dim], 
size_t opt = 0)
 
  332        size_t g_sz_lvl[dim];
 
  334        for (
size_t i = 0; i < dim ; i++)
 
  335        {g_sz_lvl[i] = g_sz[i];}
 
  340        initialize_other(n_lvl,g_sz_lvl);
 
  353        gd_array.get(0).addComputationCosts(md,ts);
 
  377        return gd_array.get(lvl);
 
  384    getDomainIteratorCells()
 
  388        for (
size_t i = 0 ; i < gd_array.size() ; i++)
 
  393            for (
size_t j = 0 ; j < dim ; j++)
 
  396                if (bc.bc[j] == NON_PERIODIC)
 
  397                {stop.
set_d(j,getGridInfoVoid(i).size(j) - 2);}
 
  399                {stop.
set_d(j,getGridInfoVoid(i).size(j) - 1);}
 
  402            git_sub.add(gd_array.get(i).getSubDomainIterator(start,stop));
 
  415        for (
size_t j = 0 ; j < dim ; j++)
 
  418            if (bc.bc[j] == NON_PERIODIC)
 
  419            {stop.
set_d(j,getGridInfoVoid(lvl).size(j) - 2);}
 
  421            {stop.
set_d(j,getGridInfoVoid(lvl).size(j) - 1);}
 
  424        return gd_array.get(lvl).getSubDomainIterator(start,stop);
 
  437        for (
size_t i = 0 ; i < dim ; i++)
 
  440            key_stop.
set_d(i,g_int.
getHigh(i) + getGridInfoVoid(lvl).size(i) -1);
 
  443        return gd_array.get(lvl).getGridGhostIterator(key_start,key_stop);
 
  453        return gd_array.get(lvl).getGridIterator();
 
  463        return gd_array.get(lvl).getGridIterator(start,stop);
 
  473    auto getGridIteratorGPU(
size_t lvl) -> 
decltype(gd_array.get(lvl).getGridIteratorGPU())
 
  475        return gd_array.get(lvl).getGridIteratorGPU();
 
  490        for (
size_t j = 0 ; j < dim ; j++)
 
  493            if (bc.bc[j] == NON_PERIODIC)
 
  494            {stop.
set_d(j,getGridInfoVoid(lvl).size(j) - 2);}
 
  496            {stop.
set_d(j,getGridInfoVoid(lvl).size(j) - 1);}
 
  499        return gd_array.get(lvl).getGridIterator(start,stop);
 
  513        return gd_array.get(lvl).getDomainIterator();
 
  524    decltype(device_grid::type_of_iterator()),
 
  528            return gd_array.get(lvl).getDomainGhostIterator();
 
  541        for (
size_t i = 0 ; i < gd_array.size() ; i++)
 
  543            git.add(gd_array.get(i).getDomainIterator());
 
  560            for (
size_t i = 0 ; i < gd_array.size() ; i++)
 
  562                    git_g.add(gd_array.get(i).getDomainGhostIterator());
 
  577    template <
unsigned int p>
inline auto get(
const grid_dist_amr_key<dim> & v1) 
const -> 
decltype(gd_array.get(v1.getLvl()).template get<p>(v1.getKey()))
 
  582        return gd_array.get(v1.getLvl()).template get<p>(v1.getKey());
 
  593    template <
unsigned int p>
inline auto get(
const grid_dist_amr_key<dim> & v1) -> 
decltype(gd_array.get(v1.getLvl()).template get<p>(v1.getKey()))
 
  598        return gd_array.get(v1.getLvl()).template get<p>(v1.getKey());
 
  610    template <
unsigned int p>
inline auto get(
size_t lvl, 
const grid_dist_key_dx<dim> & v1) 
const -> 
decltype(gd_array.get(lvl).template get<p>(v1))
 
  615        return gd_array.get(lvl).template get<p>(v1);
 
  626    template <
unsigned int p>
inline auto get(
size_t lvl, 
const grid_dist_key_dx<dim> & v1) -> 
decltype(gd_array.get(lvl).template get<p>(v1))
 
  631        return gd_array.get(lvl).template get<p>(v1);
 
  645    template <
unsigned int p>
 
  647    -> 
decltype(gd_array.get(v1.getLvl()).template insert<p>(v1.getKey()))
 
  652        return gd_array.get(v1.getLvl()).template insert<p>(v1.getKey());
 
  666    -> 
decltype(gd_array.get(lvl).template insert<p>(v1))
 
  671        return gd_array.get(lvl).template insert<p>(v1);
 
  685        return gd_array.get(lvl);
 
  715        return gd_array.get(lvl).remove(v1);
 
  724        for (
int lvl = 0 ; lvl < gd_array.size() ; lvl++)
 
  728                gd_array.get(lvl).construct_link_dw(gd_array.get(lvl+1),mv_off.get(lvl));
 
  730            else if (lvl == gd_array.size() - 1)
 
  731            {gd_array.get(lvl).construct_link_up(gd_array.get(lvl-1),mv_off.get(lvl));}
 
  734                gd_array.get(lvl).construct_link_dw(gd_array.get(lvl+1),mv_off.get(lvl));
 
  735                gd_array.get(lvl).construct_link_up(gd_array.get(lvl-1),mv_off.get(lvl));
 
  745    template<
typename stencil_type>
 
  748        for (
int lvl = 0 ; lvl < gd_array.size() ; lvl++)
 
  750            gd_array.get(lvl).template tagBoundaries<stencil_type>();
 
  763        for (
size_t i = 0 ; i < gd_array.size() ; i++)
 
  765            gd_array.get(i).template ghost_get<prp...>(opt);
 
  774        for (
size_t i = 0 ; i < gd_array.size() ; i++)
 
  776            gd_array.get(i).map();
 
  787    template<
template<
typename,
typename> 
class op,
int... prp> 
void ghost_put()
 
  789        for (
size_t i = 0 ; i < gd_array.size() ; i++)
 
  791            gd_array.get(i).template ghost_put<op,prp...>();
 
  802        return gd_array.get(lvl).size_local_inserted();
 
  813        for (
size_t i = 0 ; i < getNLvl() ; i++)
 
  814        {gd_array.get(i).setBackgroundValue(bv);}
 
  827        for (
size_t i = 0 ; i < getNLvl() ; i++)
 
  828        {gd_array.get(i).clear();}
 
  840        return gd_array.get(lvl).getGridInfoVoid();
 
  850        return gd_array.size();
 
  862        if (key.
getLvl() >= getNLvl() - 1)
 
  863        {std::cerr << __FILE__ << 
":" << __LINE__ << 
" error: we are already at the last level, we cannot go one level down" << std::endl;}
 
  868        size_t lvl = key.
getLvl();
 
  870        for (
size_t i = 0 ; i < dim ; i++)
 
  872            key_ref.set_d(i,(key_ref.get(i) << 1) + mv_off.get(key.
getLvl()).get(key.
getKeyRef().
getSub()).dw.get(i) );
 
  888        if (lvl >= getNLvl() - 1)
 
  889        {std::cerr << __FILE__ << 
":" << __LINE__ << 
" error: we are already at the last level, we cannot go one level down" << std::endl;}
 
  895        for (
size_t i = 0 ; i < dim ; i++)
 
  926        {std::cerr << __FILE__ << 
":" << __LINE__ << 
" error: we are already at the top level, we cannot go one level up" << std::endl;}
 
  931        size_t lvl = key.
getLvl();
 
  933        for (
size_t i = 0 ; i < dim ; i++)
 
  935            key_ref.set_d(i,(key_ref.get(i) - mv_off.get(key.
getLvl()).get(key.
getKeyRef().
getSub()).up.get(i)) >> 1);
 
  952        {std::cerr << __FILE__ << 
":" << __LINE__ << 
" error: we are already at the top level, we cannot go one level up" << std::endl;}
 
  958        for (
size_t i = 0 ; i < dim ; i++)
 
  990        return gd_array.
get(lvl).getGKey(v1);
 
 1014        return gd_array.get(lvl).getPos(v1);
 
 1026        return gd_array.get(lvl).getSpacing();
 
 1036        return gd_array.get(v1.
getLvl()).existPoint(v1.
getKey());
 
 1046        return gd_array.get(lvl).existPoint(v1);
 
 1055    bool write(std::string output, 
size_t opt = VTK_WRITER | FORMAT_ASCII )
 
 1059        for (
size_t i = 0 ; i < gd_array.size() ; i++)
 
 1061            ret &= gd_array.get(i).write(output + 
"_" + std::to_string(i),opt);
 
 1072    template<
unsigned int ... prp> 
void deviceToHost()
 
 1074        for (
size_t i = 0 ; i < gd_array.size() ; i++)
 
 1076            gd_array.get(i).template deviceToHost<prp ...>();
 
 1083    template<
unsigned int ... prp> 
void hostToDevice()
 
 1085        for (
size_t i = 0 ; i < gd_array.size() ; i++)
 
 1087            gd_array.get(i).template hostToDevice<prp ...>();
 
 1094template<
unsigned int dim, 
typename St, 
typename T>
 
 1099template<
unsigned int dim, 
typename St, 
typename T, 
unsigned int blockEdgeSize = 8>
 
 1100using sgrid_dist_amr_gpu = 
grid_dist_amr<dim,St,T,AMR_IMPL_TRIVIAL,CartDecomposition<dim,St,CudaMemory,memory_traits_inte>,
CudaMemory,
SparseGridGpu<dim,T,blockEdgeSize,IntPow<blockEdgeSize,dim>::value >>;
 
This class represent an N-dimensional box.
__device__ __host__ T getLow(int i) const
get the i-coordinate of the low bound interval of the box
__device__ __host__ T getHigh(int i) const
get the high interval of the box
This class decompose a space into sub-sub-domains and distribute them across processors.
void refine(size_t ts)
Refine the decomposition, available only for ParMetis distribution, for Metis it is a null call.
void decompose()
Start decomposition.
void redecompose(size_t ts)
Refine the decomposition, available only for ParMetis distribution, for Metis it is a null call.
This class define the domain decomposition interface.
This class allocate, and destroy CPU memory.
This class implement the point shape in an N-dimensional space.
decltype(device_grid::type_of_subiterator()) device_sub_it
Type of structure sub-grid iterator.
Ghost< dim, long int > g_int
Ghost integer.
size_t getNLvl()
Return the maximum number of levels in the AMR struct.
auto get(const grid_dist_amr_key< dim > &v1) -> decltype(gd_array.get(v1.getLvl()).template get< p >(v1.getKey()))
Get the reference of the selected element.
Decomposition_encap< Decomposition, decltype(gd_array)> getDecomposition()
Get the object that store the information about the decomposition.
auto insert(size_t lvl, const grid_dist_key_dx< dim > &v1) -> decltype(gd_array.get(lvl).template insert< p >(v1))
Get the reference of the selected element.
void construct_level_connections()
construct level connections for padding particles
auto get(size_t lvl, const grid_dist_key_dx< dim > &v1) const -> decltype(gd_array.get(lvl).template get< p >(v1))
Get the reference of the selected element.
Point< dim, St > getPos(int lvl, const grid_dist_key_dx< dim > &v1)
Get the the position of the point.
void recalculate_mvoff()
Recalculate the offset array for the moveLvlUp and moveLvlDw.
openfpm::vector< grid_dist_iterator< dim, device_grid, device_sub_it, FREE > > git
Domain iterator for each distributed grid.
grid_key_dx< dim > getGKey(const grid_dist_amr_key< dim > &v1)
Get the position on the grid in global coordinates.
void remove(const grid_dist_amr_key< dim > &v1)
Remove a grid point (this function make sense only in case of sparse grid)
auto getGridIteratorCells(size_t lvl) -> decltype(gd_array.get(lvl).getGridIterator())
Get an iterator to the grid.
grid_dist_amr_key< dim > getAMRKey(size_t lvl, grid_dist_key_dx< dim > key)
From a distributed key it return a AMR key that contain also the grid level.
grid_dist_amr_key_iterator< dim, device_grid, decltype(device_grid::type_of_subiterator())> getDomainIterator()
Get domain iterator.
auto getGridIterator(size_t lvl) -> decltype(gd_array.get(lvl).getGridIterator())
Get an iterator to the grid.
grid_dist_id< dim, St, T, Decomposition, Memory, device_grid > & getLevel(size_t lvl)
Get the underlying grid level.
void moveLvlDw(grid_dist_amr_key< dim > &key)
Move down (to finer level) the key.
void setBackgroundValue(T &bv)
set the background value
openfpm::vector< grid_dist_iterator_sub< dim, device_grid > > git_sub
Iterator for each distributed grid.
Point< dim, St > getPos(const grid_dist_amr_key< dim > &v1)
Get the the position of the point.
void ghost_get(size_t opt=0)
It synchronize the ghost parts.
void ghost_put()
Apply the ghost put.
size_t size_inserted(size_t lvl)
Return the number of inserted points on a particular level.
void addComputationCosts(Model md=Model(), size_t ts=1)
Add the computation cost on the decomposition using a resolution function.
grid_dist_amr(const Box< dim, St > &domain, const Ghost< dim, long int > &g, periodicity< dim > &bc)
Constructor.
void moveLvlUp(grid_dist_amr_key< dim > &key)
Move up (to coarser level) the key.
auto get(size_t lvl, const grid_dist_key_dx< dim > &v1) -> decltype(gd_array.get(lvl).template get< p >(v1))
Get the reference of the selected element.
void tagBoundaries()
construct level connections for padding particles
grid_key_dx< dim > getGKey(int lvl, const grid_dist_key_dx< dim > &v1)
Get the position on the grid in global coordinates.
openfpm::vector< openfpm::vector< offset_mv< dim > > > mv_off
Moving offsets.
void initLevels(size_t n_lvl, const size_t(&g_sz)[dim], size_t opt=0)
Initialize the amr grid.
grid_dist_key_dx< dim > moveUp(int lvl, const grid_dist_key_dx< dim > &key)
Move up (to coarser level) the key.
auto getGridIterator(size_t lvl, grid_key_dx< dim > &start, grid_key_dx< dim > &stop) -> decltype(gd_array.get(lvl).getGridIterator(start, stop))
Get an iterator to the grid.
void initLevels(const Decomposition_encap< Decomposition, TT > &dec, size_t n_lvl, const size_t(&g_sz)[dim])
Initialize the amr grid.
void initialize_other(size_t n_lvl, size_t(&g_sz_lvl)[dim])
Initialize the others levels.
grid_dist_id< dim, St, T, Decomposition, Memory, device_grid > & getDistGrid(size_t lvl)
Get the internal distributed grid.
Box< dim, St > domain
Simulation domain.
grid_dist_key_dx< dim > moveDw(int lvl, const grid_dist_key_dx< dim > &key)
Move down (to finer level) the key.
const grid_sm< dim, void > & getGridInfoVoid(size_t lvl) const
Get an object containing the grid informations for a specific level.
openfpm::vector< grid_dist_iterator< dim, device_grid, device_it, FIXED > > git_g
Domain and ghost iterator for each distributed grid.
bool write(std::string output, size_t opt=VTK_WRITER|FORMAT_ASCII)
Write on vtk file.
Point< dim, St > getSpacing(size_t lvl)
return the spacing for the grid in the level lvl
grid_dist_amr_key_iterator< dim, device_grid, decltype(device_grid::type_of_iterator()), grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_iterator()), FIXED > > getDomainGhostIterator()
Get domain iterator.
periodicity< dim > bc
Boundary conditions of the structure.
decltype(device_grid::type_of_iterator()) device_it
Type of structure for the grid iterator.
void remove(size_t lvl, const grid_dist_key_dx< dim > &v1)
Remove a grid point (this function make sense only in case of sparse grid)
openfpm::vector< grid_dist_id< dim, St, T, Decomposition, Memory, device_grid >, HeapMemory, memory_traits_lin, openfpm::grow_policy_identity, STD_VECTOR > gd_array
array of grids
void map(size_t opt=0)
It move all the grid parts that do not belong to the local processor to the respective processor.
grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_iterator()), FIXED > getDomainGhostIterator(size_t lvl) const
return an iterator over the level lvl
auto get(const grid_dist_amr_key< dim > &v1) const -> decltype(gd_array.get(v1.getLvl()).template get< p >(v1.getKey()))
Get the reference of the selected element.
grid_dist_amr(const Box< dim, St > &domain, const Ghost< dim, long int > &g)
Constructor.
grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_subiterator()), FREE > getDomainIterator(size_t lvl) const
return an iterator over the level lvl
auto insert(const grid_dist_amr_key< dim > &v1) -> decltype(gd_array.get(v1.getLvl()).template insert< p >(v1.getKey()))
Get the reference of the selected element.
void clear()
delete all the points in the grid
auto getGridGhostIterator(size_t lvl) -> decltype(gd_array.get(lvl).getGridGhostIterator(grid_key_dx< dim >(), grid_key_dx< dim >()))
Get an iterator to the grid.
void initLevels(const Decomposition &dec, size_t n_lvl, const size_t(&g_sz)[dim])
Initialize the amr grid.
Amr grid distributed key.
grid_dist_key_dx< dim > & getKeyRef()
Return the grid key (as reference)
const grid_dist_key_dx< dim > & getKey() const
Return the grid key.
void setLvl(size_t lvl)
Return the level.
size_t getLvl() const
Return the level.
This is a distributed grid.
static grid_dist_iterator_sub< dim, device_grid > type_of_subiterator()
This is a meta-function return which type of sub iterator a grid produce.
Distributed grid iterator.
Distributed grid iterator.
Grid key for a distributed grid.
size_t getSub() const
Get the local grid.
void setSub(size_t sub)
Set the local grid.
base_key & getKeyRef()
Get the reference key.
grid_key_dx is the key to access any element in the grid
__device__ __host__ void set_d(index_type i, index_type id)
Set the i index.
__device__ __host__ index_type get(index_type i) const
Get the i index.
Grow policy define how the vector should grow every time we exceed the size.
Implementation of 1-D std::vector like structure.
Transform the boost::fusion::vector into memory specification (memory_traits)