8#ifndef GRID_DIST_ID_ITERATOR_HPP_
9#define GRID_DIST_ID_ITERATOR_HPP_
13#define ITERATION_ISOLATION 4
15#include "Grid/grid_dist_key.hpp"
16#include "VCluster/VCluster.hpp"
17#include "util/GBoxes.hpp"
20#include "SparseGridGpu/encap_num.hpp"
23#include "Grid/cuda/grid_dist_id_kernels.cuh"
25template<
unsigned int dim>
28 template<
typename ec_type,
typename lambda_t,
typename coord_type>
29 __device__
inline static void call(ec_type & ec,lambda_t f, coord_type coord)
31 printf(
"Not implemented in this direction \n");
34 template<
typename ite_type>
44 template<
typename gr
id_type,
typename lambda_t1,
typename lambda_t2,
typename itd_type,
typename coord_type>
46 lambda_t1 f1, lambda_t2 f2,
50 coord_type & keyg,
unsigned int offset,
bool & is_block_empty,
55 bool is_active =
false;
57 {is_active = f1(keyg.get(0),keyg.get(1),keyg.get(2));}
59 if (is_active ==
true)
60 {is_block_empty =
false;}
64 if (is_block_empty ==
false)
66 auto ec =
grid.insertBlock(blockId);
67 enc_num<
decltype(
grid.insertBlock(blockId))> ecn(ec,offset);
69 if ( is_active ==
true)
71 f2(ecn,keyg.get(0),keyg.get(1),keyg.get(2));
72 ec.template get<grid_type::pMask>()[offset] = 1;
79 template<
typename ite_type>
84 key.set_d(0,threadIdx.x + blockIdx.x * blockDim.x + itg.start.get(0));
85 key.set_d(1,threadIdx.y + blockIdx.y * blockDim.y + itg.start.get(1));
86 key.set_d(2,threadIdx.z + blockIdx.z * blockDim.z + itg.start.get(2));
88 keyg.set_d(0,key.get(0) + itg.origin.get(0));
89 keyg.set_d(1,key.get(1) + itg.origin.get(1));
90 keyg.set_d(2,key.get(2) + itg.origin.get(2));
92 if (key.get(0) > itg.stop.get(0) || key.get(1) > itg.stop.get(1) || key.get(2) > itg.stop.get(2) ||
93 key.get(0) < itg.start_base.get(0) || key.get(1) < itg.start_base.get(1) || key.get(2) < itg.start_base.get(2))
103 template<
typename gr
id_type,
typename lambda_t1,
typename lambda_t2,
typename itd_type,
typename coord_type>
105 lambda_t1 f1, lambda_t2 f2,
106 unsigned int blockId,
109 coord_type & keyg,
unsigned int offset,
bool & is_block_empty,
114 bool is_active =
false;
116 {is_active = f1(keyg.get(0),keyg.get(1));}
118 if (is_active ==
true)
119 {is_block_empty =
false;}
123 if (is_block_empty ==
false)
125 auto ec =
grid.insertBlock(blockId);
126 enc_num<
decltype(
grid.insertBlock(blockId))> ecn(ec,offset);
128 if ( is_active ==
true)
130 f2(ecn,keyg.get(0),keyg.get(1));
131 ec.template get<grid_type::pMask>()[offset] = 1;
138 template<
typename ite_type>
142 key.set_d(0,threadIdx.x + blockIdx.x * blockDim.x + itg.start.get(0));
143 key.set_d(1,threadIdx.y + blockIdx.y * blockDim.y + itg.start.get(1));
145 keyg.set_d(0,key.get(0) + itg.origin.get(0));
146 keyg.set_d(1,key.get(1) + itg.origin.get(1));
148 if (key.get(0) > itg.stop.get(0) || key.get(1) > itg.stop.get(1) ||
149 key.get(0) < itg.start_base.get(0) || key.get(1) < itg.start_base.get(1))
158 template<
typename gr
id_type,
typename ite_type,
typename lambda_f1,
typename lambda_f2>
159 __device__
void operator()(
grid_type &
grid, ite_type itg,
bool & is_block_empty, lambda_f1 f1, lambda_f2 f2)
168 if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
169 {is_block_empty =
true;}
175 bool out =
grid.template getInsertBlockOffset<ite_type>(itg,key,blk,offset);
177 auto blockId =
grid.getBlockLinId(blk);
183 grid.flush_block_insert();
188template<
unsigned int dim>
191 template<
typename gr
id_type,
typename ite_type,
typename lambda_f2>
192 __device__
void operator()(
grid_type &
grid, ite_type itg, lambda_f2 f2)
196 printf(
"grid on GPU Dimension %d not implemented, yet\n",(
int)dim);
205 template<
typename gr
id_type,
typename ite_type,
typename lambda_f2>
206 __device__
void operator()(
grid_type &
grid, ite_type itg, lambda_f2 f2)
210 GRID_ID_2_GLOBAL(itg);
212 auto obj =
grid.get_o(key);
214 f2(obj,keyg.get(0),keyg.get(1));
223 template<
typename gr
id_type,
typename ite_type,
typename lambda_f2>
224 __device__
void operator()(
grid_type &
grid, ite_type itg, lambda_f2 f2)
228 GRID_ID_3_GLOBAL(itg);
230 auto obj =
grid.get_o(key);
232 f2(obj,keyg.get(0),keyg.get(1),keyg.get(2));
238template<
bool is_free>
241 template<
typename a_it_type,
typename gdb_ext_type,
typename gList_type>
242 static inline void call(a_it_type & a_it, gdb_ext_type & gdb_ext, gList_type & gList,
size_t & g_c)
244 if (gdb_ext.get(g_c).Dbox.isValid() ==
false)
248 a_it.reinitialize(gList.get(g_c).getIterator(gdb_ext.get(g_c).Dbox.getKP1(),gdb_ext.get(g_c).Dbox.getKP2()));
249 if (a_it.isNext() ==
false) {g_c++;}
257 template<
typename a_it_type,
typename gdb_ext_type,
typename gList_type>
258 static inline void call(a_it_type & a_it, gdb_ext_type & gdb_ext, gList_type & gList,
size_t & g_c)
261 a_it.reinitialize(gList.get(g_c).getIterator());
262 if (a_it.isNext() ==
false) {g_c++;}
276template<
unsigned int dim,
typename device_gr
id,
typename device_sub_it,
int impl,
typename stencil = no_stencil >
387 if (
a_it.isNext() ==
true)
478 size_t sub_id = k.getSub();
480 auto k_glob = k.getKey();
483 auto k_glob2 = k_glob +
gdb_ext.get(sub_id).origin;
This is a distributed grid.
Distributed grid iterator.
size_t g_c
grid list counter
grid_dist_iterator(openfpm::vector< device_grid > &gk, const openfpm::vector< GBoxes< device_grid::dims > > &gdb_ext, const grid_key_dx< dim > &stop, const grid_key_dx< dim >(&stencil_pnt)[stencil::nsp])
Constructor of the distributed grid iterator with stencil support.
grid_dist_key_dx< dim, typename device_grid::base_key > get() const
Get the actual key.
grid_key_dx< dim > getStart() const
it return the start point of the iterator
device_sub_it a_it
Actual iterator.
const openfpm::vector< GBoxes< device_grid::dims > > & gdb_ext
Extension of each grid: domain and ghost + domain.
grid_key_dx< dim > getStop() const
it return the stop point of the iterator
const openfpm::vector< GBoxes< device_grid::dims > > & getGBoxes()
Get the boxes.
grid_dist_iterator(const openfpm::vector< device_grid > &gk, const openfpm::vector< GBoxes< device_grid::dims > > &gdb_ext, const grid_key_dx< dim > &stop)
Constructor of the distributed grid iterator.
grid_key_dx< dim > getGKey(const grid_dist_key_dx< dim, typename device_grid::base_key > &k)
Convert a g_dist_key_dx into a global key.
grid_dist_iterator(grid_dist_iterator< dim, device_grid, device_sub_it, impl, stencil > &&g)
Copy constructor.
grid_dist_iterator(const grid_dist_iterator< dim, device_grid, device_sub_it, impl, stencil > &g)
Copy constructor.
const openfpm::vector< device_grid > & gList
List of the grids we are going to iterate.
grid_dist_iterator< dim, device_grid, device_sub_it, impl, stencil > & operator++()
Get the next element.
void selectValidGrid()
from g_c increment g_c until you find a valid grid
grid_key_dx< dim > stop
stop point (is the grid size)
bool isNext() const
Check if there is the next element.
~grid_dist_iterator()
Destructor.
grid_dist_lin_dx getStencil()
Return the stencil point offset.
Grid key for a distributed grid.
Distributed linearized key.
grid_key_dx is the key to access any element in the grid
void zero()
Set to zero the key.
Implementation of 1-D std::vector like structure.
This structure store the Box that define the domain inside the Ghost + domain box.