OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
BlockMapGpu_dimensionalityWrappers.cuh
1 //
2 // Created by tommaso on 24/05/19.
3 //
4 
5 #ifndef OPENFPM_PDATA_BLOCKMAPGPU_DIMENSIONALITYWRAPPERS_CUH
6 #define OPENFPM_PDATA_BLOCKMAPGPU_DIMENSIONALITYWRAPPERS_CUH
7 
8 // Type management and wrapping
9 
15 template<typename BaseT>
17 {
18  typedef typename BaseT::scalarType type;
19 };
20 
21 template<typename BaseType, unsigned int N1>
22 struct ComposeArrayType<BaseType[N1]>
23 {
24  typedef typename BaseType::scalarType type[N1];
25 };
26 
27 template<typename BaseType, unsigned int N1, unsigned int N2>
28 struct ComposeArrayType<BaseType[N1][N2]>
29 {
30  typedef typename BaseType::scalarType type[N1][N2];
31 };
32 
33 // Data management and wrapping
34 
43 template<typename BaseT, unsigned int Nup, unsigned int N>
45 {
46  typedef typename std::remove_extent<BaseT>::type array_slice;
47  typedef std::extent<BaseT> ext;
48 
49  BaseT * ptr;
50 
51 public:
52  __device__ __host__ inline MultiArrayViewGpu(BaseT * ptr)
53  :ptr(ptr)
54  {}
55 
56  __device__ __host__ inline MultiArrayViewGpu<array_slice, ext::value, N-1> operator[](int i)
57  {
58  return MultiArrayViewGpu< array_slice,ext::value,N-1>((array_slice *)(ptr+i));
59  }
60 };
61 
62 template<typename BaseT, unsigned int Nup>
63 class MultiArrayViewGpu<BaseT,Nup,0>
64 {
65  BaseT * ptr;
66 
67 public:
68 
69  __device__ __host__ MultiArrayViewGpu(BaseT * ptr)
70  :ptr(ptr)
71  {}
72 
73  template <typename IndexT>
74  __device__ __host__ MultiArrayViewGpu(BaseT * ptr, IndexT offset)
75  :ptr((BaseT*)(((typename BaseT::scalarType *)ptr) + offset))
76  {}
77 
78  __device__ __host__ inline typename BaseT::scalarType & operator[](int i)
79  {
80  return *((typename BaseT::scalarType *)(&(ptr[i])));
81  }
82 
83  __device__ __host__ inline typename BaseT::scalarType & operator[](int i) const
84  {
85  return *((typename BaseT::scalarType *)(&(ptr[i])));
86  }
87 
88  template <typename T>
89  __device__ __host__ inline MultiArrayViewGpu<BaseT,Nup,0> & operator=(const T& other)
90  {
91  for (int i=0; i< Nup ; i++)
92  {
93  this->operator[](i) = other[i];
94  }
95  return *this;
96  }
97 };
98 
103 template<typename BaseT>
105 {
106  BaseT data;
107 
108  __device__ __host__ inline typename BaseT::scalarType & operator[](int i)
109  {
110  return data[i];
111  }
112 
113  __device__ __host__ inline const typename BaseT::scalarType & operator[](int i) const
114  {
115  return data[i];
116  }
117 };
118 
119 template<typename BaseType, unsigned int N1>
120 struct ArrayWrapper<BaseType[N1]>
121 {
122  BaseType data[N1];
123 
124  __device__ __host__ MultiArrayViewGpu<BaseType,N1,0> operator[](int i)
125  {
126  return MultiArrayViewGpu<BaseType,N1,0>((BaseType*)(((typename BaseType::scalarType *)data) + i));
127  }
128 
129  __device__ __host__ const MultiArrayViewGpu<BaseType,N1,0> operator[](int i) const
130  {
131  return MultiArrayViewGpu<BaseType,N1,0>((BaseType*)(((typename BaseType::scalarType *)data) + i));
132  }
133 };
134 
135 template<typename BaseType, unsigned int N1, unsigned int N2>
136 struct ArrayWrapper<BaseType[N1][N2]>
137 {
138  BaseType array[N1][N2];
139 
140  __device__ __host__ MultiArrayViewGpu<BaseType[N2],N1,1> operator[](int i)
141  {
142  return MultiArrayViewGpu<BaseType[N2],N1,1>((BaseType(*)[N2])(((typename BaseType::scalarType *)array) + i));
143  }
144 };
145 
152 template<typename BlockT>
154 {
155  typename BlockT::scalarType &value;
156 
157  template <typename IndexT>
158  __device__ __host__ RhsBlockWrapper(BlockT &block, IndexT offset) : value(block[offset]) {}
159 };
160 
161 template<typename BlockT, unsigned int N>
162 struct RhsBlockWrapper<BlockT[N]>
163 {
164  typename BlockT::scalarType value[N];
165 
166  template <typename T, typename IndexT>
167  __device__ __host__ RhsBlockWrapper(T input, IndexT offset)
168  {
169  for (int i=0; i<N; ++i)
170  {
171  value[i] = input[i][offset];
172  }
173  }
174 };
175 
176 template<typename BlockT, unsigned int N1, unsigned int N2>
177 struct RhsBlockWrapper<BlockT[N1][N2]>
178 {
179  typename BlockT::scalarType value[N1][N2];
180 
181  template <typename T, typename IndexT>
182  __device__ __host__ RhsBlockWrapper(T input, IndexT offset)
183  {
184  for (int i=0; i<N1; ++i)
185  {
186  for (int j=0; j<N2; ++j)
187  {
188  value[i][j] = input[i][j][offset];
189  }
190  }
191  }
192 };
193 
199 template <typename T1>
201 {
202  template <typename T1b, typename T2, typename T3>
203  __device__ __host__ inline static void assignWithOffsetRHS(T1b &dst, const T2 &src, T3 offset)
204  {
205  dst = src[offset];
206  }
207 
208  template <typename T1b, typename T2, typename T3>
209  __device__ __host__ inline static void assignWithOffset(T1b &dst, const T2 &src, T3 offset)
210  {
211  dst[offset] = src[offset];
212  }
213 
214  template<typename op, typename T1b, typename T2>
215  __device__ inline static void applyOp(T1b &a, const T2 &b, bool aExist, bool bExist)
216  {
217  op op_;
218  if (aExist && bExist)
219  {
220  a = op_(a, b);
221  }
222  else if (bExist)
223  {
224  a = b;
225  }
226  }
227 };
228 
229 template <typename T1, unsigned int N1>
231 {
232  template <typename T1b, typename T2, typename T3>
233  __device__ __host__ inline static void assignWithOffsetRHS(T1b (& dst)[N1], const T2 &src, T3 offset)
234  {
235  for (int i = 0; i < N1; ++i)
236  {
237 // dst[i] = src[i][offset];
238  generalDimensionFunctor<T1>::assignWithOffsetRHS(dst[i], src[i], offset);
239  }
240  }
241 
242  template <typename T1b, typename T2, typename T3>
243 // __device__ __host__ inline static void assignWithOffset(T1b (& dst)[N1], const T2 &src, T3 offset)
244  __device__ __host__ inline static void assignWithOffset(T1b dst, const T2 &src, T3 offset)
245  {
246  for (int i = 0; i < N1; ++i)
247  {
248 // dst[i][offset] = src[i][offset];
249  generalDimensionFunctor<T1>::assignWithOffset(dst[i], src[i], offset);
250  }
251  }
252 
253  template<typename op, typename T1b, typename T2>
254  __device__ inline static void applyOp(T1b &a, const T2 &b, bool aExist, bool bExist)
255  {
256  for (int i = 0; i < N1; ++i)
257  {
258  generalDimensionFunctor<T1>::template applyOp<op>(a[i], b[i], aExist, bExist);
259  }
260  }
261 
262  template<typename op, typename T1b, typename T2>
263  __device__ inline static void applyOp(const T1b &a, const T2 &b, bool aExist, bool bExist)
264  {
265  for (int i = 0; i < N1; ++i)
266  {
267  generalDimensionFunctor<T1>::template applyOp<op>(a[i], b[i], aExist, bExist);
268  }
269  }
270 };
271 
272 //template <typename T1, unsigned int N1, unsigned int N2>
273 //struct generalDimensionFunctor<T1[N1][N2]>
274 //{
275 // template <typename T2, typename T3>
276 // __device__ __host__ inline static void assign(T1 (& dst)[N1][N2], const T2 &src, T3 offset)
277 // {
278 // for (int i = 0; i < N1; ++i)
279 // {
280 // for (int j = 0; j < N2; ++j)
281 // {
282 // dst[i][j] = src[i][j][offset];
283 // }
284 // }
285 // }
286 //
287 //
288 //};
289 
290 #endif //OPENFPM_PDATA_BLOCKMAPGPU_DIMENSIONALITYWRAPPERS_CUH