8 #ifndef VECTOR_DIST_GPU_HPP_ 9 #define VECTOR_DIST_GPU_HPP_ 11 constexpr
unsigned int POS_PROP = (
unsigned int)-1;
15 #define GET_PARTICLE(vd) blockDim.x*blockIdx.x + threadIdx.x; if (blockDim.x*blockIdx.x + threadIdx.x >= static_cast<unsigned int>(vd.size_local())) {return;}; 16 #define GET_PARTICLE_SORT(p,NN) if (blockDim.x*blockIdx.x + threadIdx.x >= NN.get_g_m()) {return;}\ 17 else{p = NN.getDomainSortIds().template get<0>(blockDim.x*blockIdx.x + threadIdx.x);} 20 #define GET_PARTICLE_BY_ID(p,ids) if (blockDim.x*blockIdx.x + threadIdx.x >= ids.size()) {return;}\ 21 else{p = ids.template get<0>(blockDim.x*blockIdx.x + threadIdx.x);} 29 template<
typename vector_dist_ker>
30 struct check_vector_dist_kernels
33 const vector_dist_ker & o1;
35 const vector_dist_ker & o2;
45 inline check_vector_dist_kernels(
const vector_dist_ker & o1,
const vector_dist_ker & o2)
46 :o1(o1),o2(o2),check(false)
51 __device__ __host__
inline void operator()(T& t)
53 check &= o1.template getPointer<T::value>() == o2.template getPointer<T::value>();
57 template<
unsigned int dim,
80 static const unsigned int dims = dim;
83 typedef int vector_kernel;
86 typedef int yes_has_check_device_pointer;
89 typedef prop value_type;
97 :g_m(g_m),v_pos(v_pos),v_prp(v_prp)
105 __device__ __host__
int size_local()
const {
return g_m;}
112 __device__ __host__
int size()
const {
return v_pos.
size();}
123 __device__ __host__
inline auto getPos(
int vec_key) -> decltype(v_pos.template get<0>(vec_key))
125 return v_pos.template get<0>(vec_key);
137 __device__ __host__
inline auto getPos(
const vect_dist_key_dx & vec_key) -> decltype(v_pos.template get<0>(vec_key.getKey()))
139 return v_pos.template get<0>(vec_key.getKey());
151 __device__ __host__
inline auto getPos(
int vec_key)
const -> decltype(v_pos.template get<0>(vec_key))
153 return v_pos.template get<0>(vec_key);
165 __device__ __host__
inline auto getPos(
const vect_dist_key_dx & vec_key)
const -> decltype(v_pos.template get<0>(vec_key.getKey()))
167 return v_pos.template get<0>(vec_key.getKey());
180 template<
unsigned int id> __device__ __host__
inline auto getProp(
int vec_key) -> decltype(v_prp.template get<id>(vec_key))
182 return v_prp.template get<id>(vec_key);
195 template<
unsigned int id> __device__ __host__
inline auto getProp(
const vect_dist_key_dx & vec_key) -> decltype(v_prp.template get<id>(vec_key.getKey()))
197 return v_prp.template get<id>(vec_key.getKey());
210 template<
unsigned int id> __device__ __host__
inline auto getProp(
int vec_key)
const -> decltype(v_prp.template get<id>(vec_key))
212 return v_prp.template get<id>(vec_key);
225 template<
unsigned int id> __device__ __host__
inline auto getProp(
const vect_dist_key_dx & vec_key)
const -> decltype(v_prp.template get<id>(vec_key.getKey()))
227 return v_prp.template get<id>(vec_key.getKey());
253 std::cout << __FILE__ <<
":" << __LINE__ <<
" error getDomainIterator used on a vector_dist_ker object is not allowed" << std::endl;
263 __host__
ite_gpu<1> getDomainIteratorGPU(
size_t n_thr = default_kernel_wg_threads_)
const 273 __host__
bool operator==(
const vector_dist_ker & v)
275 if (v.v_pos.template getPointer<0>() != v_pos.template getPointer<0>())
278 check_vector_dist_kernels<openfpm::vector_gpu_ker<prop,memory_traits_inte>> cv(this->v_prp,v.v_prp);
283 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,prop::max_prop> >(cv);
293 __host__
bool isSubset()
const 312 pc = v_pos.check_device_pointer(ptr);
314 if (pc.
match ==
true)
316 pc.
match_str = std::string(
"Particle index overflow in position (v_pos): ") +
"\n" + pc.
match_str;
320 pc = v_prp.check_device_pointer(ptr);
321 if (pc.
match ==
true)
323 pc.
match_str = std::string(
"Particle index overflow in properties (v_prp): ") +
"\n" + pc.
match_str;
334 template<
template <
typename>
class layout_base,
typename T>
337 typedef typename apply_transform<layout_base,typename T::value_type>::type aggr;
339 typedef vector_dist_ker<T::dims,typename T::stype,aggr,layout_base> type;
bool match
Indicate if the pointer match.
grid interface available when on gpu
Iterator that Iterate across particle indexes.
This class implement the point shape in an N-dimensional space.
Grid key for a distributed grid.
ite_gpu< 1 > getGPUIteratorTo(size_t stop, size_t n_thr=default_kernel_wg_threads_) const
Get an iterator for the GPU.
Transform the boost::fusion::vector into memory specification (memory_traits)
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
__device__ __host__ unsigned int size() const
Return the size of the vector.
std::string match_str
match string