OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
 
Loading...
Searching...
No Matches
SparseGrid_iterator_block.hpp
1/*
2 * SparseGrid_iterator_block.hpp
3 *
4 * Created on: Feb 25, 2020
5 * Author: i-bird
6 */
7
8#ifndef SPARSEGRID_ITERATOR_BLOCK_HPP_
9#define SPARSEGRID_ITERATOR_BLOCK_HPP_
10
11#if !defined(__NVCC__) || defined(CUDA_ON_CPU) || defined(__HIP__)
12// Nvcc does not like VC ... for some reason
13#include <Vc/Vc>
14#define DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR (Vc::float_v::Size *sizeof(float) >= sizeof(T))
15
16#else
17
18#define DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR 1
19
20#endif
21
22
23
24#include "Grid/iterators/grid_skin_iterator.hpp"
25#include "SparseGrid_chunk_copy.hpp"
26
27
28template<int c,bool is_neg = c < 0>
29struct fix_neg_to_one
30{
31 typedef boost::mpl::int_<c> type;
32};
33
34template<int c>
35struct fix_neg_to_one<c,true>
36{
37 typedef boost::mpl::int_<1> type;
38};
39
50template<unsigned int dim, typename vector_blocks_ext>
51struct calc_loc
52{
54
60 calc_loc(grid_key_dx<dim> & k)
61 :k(k)
62 {};
63
65 template<typename T>
66 inline void operator()(T& val)
67 {
68 k.set_d(T::value,boost::mpl::at<typename vector_blocks_ext::type,boost::mpl::int_<T::value>>::type::value*k.get(T::value));
69 }
70};
71
72
73
84template<unsigned int dim, typename header_type, typename vector_blocks_ext>
85struct fill_chunk_block
86{
88 Box<dim,size_t> cnk_box;
89
90 unsigned int chunk_id;
91
92 header_type & header;
93
99 inline fill_chunk_block(header_type & header, unsigned int chunk_id)
100 :chunk_id(chunk_id),header(header)
101 {};
102
104 template<typename T>
105 inline void operator()(T& val)
106 {
107 cnk_box.setLow(T::value,header.get(chunk_id).pos.get(T::value));
108 cnk_box.setHigh(T::value,header.get(chunk_id).pos.get(T::value) + boost::mpl::at<typename vector_blocks_ext::type,T>::type::value - 1);
109 }
110};
111
112template<unsigned int prop, unsigned int stencil_size, unsigned int dim, typename vector_blocks_exts, typename vector_ext>
113struct loadBlock_impl
114{
115 template<unsigned int N1, typename T, typename SparseGridType>
116 static void loadBlock(T arr[N1], SparseGridType & sgt, int chunk_id, unsigned char mask[N1])
117 {
119
120 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(gbs);
121
122 grid_sm<dim,void> g_block(gbs.sz_ext);
123 grid_sm<dim,void> g_in_block(gbs.sz_block);
124 grid_sm<dim,void> g_block_arr(gbs.sz_tot);
125;
126 grid_key_dx_iterator<dim> it_in_block(g_in_block);
127
128 auto & data = sgt.private_get_data();
129 auto & header_mask = sgt.private_get_header_mask();
130
131 auto & h = header_mask.get(chunk_id);
132
133 auto & ref_block = data.template get<prop>(chunk_id);
134
135 while(it_in_block.isNext())
136 {
137 auto p = it_in_block.get();
138
139 grid_key_dx<dim> arr_p;
140
141 for (int i = 0 ; i < dim ; i++)
142 {arr_p.set_d(i,p.get(i)+stencil_size);}
143
144 size_t id = g_block_arr.LinId(arr_p);
145 size_t idi = g_in_block.LinId(p);
146
147 arr[id] = ref_block[idi];
148 mask[id] = exist_sub(h,idi);
149
150 ++it_in_block;
151 }
152 }
153
154 template<unsigned int N1, typename T, typename SparseGridType>
155 static void loadBlock(T arr[N1], SparseGridType & sgt, int chunk_id)
156 {
158
159 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(gbs);
160
161 grid_sm<dim,void> g_block(gbs.sz_ext);
162 grid_sm<dim,void> g_in_block(gbs.sz_block);
163 grid_sm<dim,void> g_block_arr(gbs.sz_tot);
164
165 grid_key_dx_iterator<dim> it_in_block(g_in_block);
166
167 auto & data = sgt.private_get_data();
168 auto & header_mask = sgt.private_get_header_mask();
169
170 auto & ref_block = data.template get<prop>(chunk_id);
171
172 while(it_in_block.isNext())
173 {
174 auto p = it_in_block.get();
175
176 grid_key_dx<dim> arr_p;
177
178 for (int i = 0 ; i < dim ; i++)
179 {arr_p.set_d(i,p.get(i)+stencil_size);}
180
181 arr[g_block_arr.LinId(arr_p)] = ref_block[g_in_block.LinId(p)];
182
183 ++it_in_block;
184 }
185 }
186
187 template<unsigned int N1, typename T, typename SparseGridType>
188 static void storeBlock(T arr[N1], SparseGridType & sgt, int chunk_id)
189 {
191
192 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(gbs);
193
194 grid_sm<dim,void> g_block(gbs.sz_ext);
195 grid_sm<dim,void> g_in_block(gbs.sz_block);
196 grid_sm<dim,void> g_block_arr(gbs.sz_tot);
197
198 grid_key_dx_iterator<dim> it_in_block(g_in_block);
199
200 auto & data = sgt.private_get_data();
201 auto & header_mask = sgt.private_get_header_mask();
202
203 auto & ref_block = data.template get<prop>(chunk_id);
204
205 while(it_in_block.isNext())
206 {
207 auto p = it_in_block.get();
208
209 ref_block[g_in_block.LinId(p)] = arr[g_in_block.LinId(p)];
210
211 ++it_in_block;
212 }
213 }
214
215
221 template<unsigned int N1, typename T, typename SparseGridType>
222 static void loadBorder(T arr[N1],
223 SparseGridType & sgt,
224 size_t chunk_id,
229 unsigned char mask[N1],
231 {
233
234 auto & data = sgt.private_get_data();
235 auto & header_mask = sgt.private_get_header_mask();
236 auto & header_inf = sgt.private_get_header_inf();
237
238 auto & hm = header_mask.get(chunk_id);
239 auto & hc = header_inf.get(chunk_id);
240
241 maps_blk.resize(block_skin.size());
242
243 for (int i = 0 ; i < maps_blk.size() ; i++)
244 {
246
247 for (int j = 0 ; j < dim ; j++)
248 {p.set_d(j,block_skin.get(i).get(j) + hc.pos.get(j) / size::data[j] - 1);}
249
250 maps_blk.get(i) = sgt.getChunk(p);
251 }
252
253 for (int i = 0 ; i < bord.size(); i++)
254 {
255 size_t ac = maps_blk.get(chunk_ids.get(i));
256
257 size_t b = bord.get(i);
258 size_t off = offsets.template get<0>(i);
259
260 auto & h = header_mask.get(ac);
261
262 arr[b] = (ac == data.size()-1)?data.template get<prop>(0)[off]:data.template get<prop>(ac)[off];
263 mask[b] = (ac == data.size()-1)?0:exist_sub(h,off);
264 }
265 }
266};
267
271template<unsigned int prop, unsigned int stencil_size, typename vector_blocks_exts, typename vector_ext>
272struct loadBlock_impl<prop,stencil_size,3,vector_blocks_exts,vector_ext>
273{
274 template<unsigned int N1, typename T, typename SparseGridType>
275 inline static void loadBlock(T arr[N1], SparseGridType & sgt, int chunk_id, unsigned char mask[N1])
276 {
277 auto & data = sgt.private_get_data();
278 auto & header_mask = sgt.private_get_header_mask();
279
280 auto & h = header_mask.get(chunk_id);
281
282 // Faster version
283
284 auto & chunk = data.template get<prop>(chunk_id);
285
286 copy_xyz<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,false>::template copy<N1>(arr,mask,h,chunk);
287 }
288
289
290 template<unsigned int N1, typename T, typename SparseGridType>
291 inline static void storeBlock(T arr[N1], SparseGridType & sgt, int chunk_id)
292 {
293
294 auto & data = sgt.private_get_data();
295 auto & header_mask = sgt.private_get_header_mask();
296
297 // Faster version
298
299 auto & chunk = data.template get<prop>(chunk_id);
300
301 copy_xyz<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,false>::template store<N1>(arr,chunk);
302
303 }
304
305
306
312 template<bool findNN, typename NNType, unsigned int N1, typename T, typename SparseGridType>
313 inline static void loadBorder(T arr[N1],
314 SparseGridType & sgt,
315 size_t chunk_id,
320 unsigned char mask[N1],
322 {
324
325 auto & data = sgt.private_get_data();
326 auto & header_mask = sgt.private_get_header_mask();
327 auto & NNlist = sgt.private_get_nnlist();
328
329 auto & h = header_mask.get(chunk_id);
330
331
333
334 typedef typename boost::mpl::at<typename vector_blocks_exts::type,boost::mpl::int_<0>>::type sz0;
335 typedef typename boost::mpl::at<typename vector_blocks_exts::type,boost::mpl::int_<1>>::type sz1;
336 typedef typename boost::mpl::at<typename vector_blocks_exts::type,boost::mpl::int_<2>>::type sz2;
337
339
340 bool exist;
341 long int r;
342 if (findNN == false)
343 {
344 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({0,0,1});
345 r = sgt.getChunk(p,exist);
346 NNlist.template get<0>(chunk_id*NNType::nNN) = (exist)?r:-1;
347 }
348 else
349 {
350 r = NNlist.template get<0>(chunk_id*NNType::nNN);
351 exist = (r != -1);
352 }
353 if (exist == true)
354 {
355 auto & h = header_mask.get(r);
356 copy_xy_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<0,stencil_size+sz2::value,N1>(arr,mask,h,data.get(r));
357 }
358 else
359 {
360 copy_xy_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<stencil_size+sz2::value,N1>(mask);
361 }
362 if (findNN == false)
363 {
364 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({0,0,-1});
365 r = sgt.getChunk(p,exist);
366 NNlist.template get<0>(chunk_id*NNType::nNN+1) = (exist)?r:-1;
367 }
368 else
369 {
370 r = NNlist.template get<0>(chunk_id*NNType::nNN+1);
371 exist = (r != -1);
372 }
373 if (exist == true)
374 {
375 auto & h = header_mask.get(r);
376 copy_xy_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<sz2::value - stencil_size,0,N1>(arr,mask,h,data.get(r));
377 }
378 else
379 {
380 copy_xy_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<0,N1>(mask);
381 }
382
383 if (findNN == false)
384 {
385 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({0,1,0});
386 r = sgt.getChunk(p,exist);
387 NNlist.template get<0>(chunk_id*NNType::nNN+2) = (exist)?r:-1;
388 }
389 else
390 {
391 r = NNlist.template get<0>(chunk_id*NNType::nNN+2);
392 exist = (r != -1);
393 }
394 if (exist == true)
395 {
396 auto & h = header_mask.get(r);
397 copy_xz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<0,stencil_size+sz1::value,N1>(arr,mask,h,data.get(r));
398 }
399 else
400 {
401 copy_xz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<stencil_size+sz1::value,N1>(mask);
402 }
403 if (findNN == false)
404 {
405 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({0,-1,0});
406 r = sgt.getChunk(p,exist);
407 NNlist.template get<0>(chunk_id*NNType::nNN+3) = (exist)?r:-1;
408 }
409 else
410 {
411 r = NNlist.template get<0>(chunk_id*NNType::nNN+3);
412 exist = (r != -1);
413 }
414 if (exist == true)
415 {
416 auto & h = header_mask.get(r);
417 copy_xz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<sz1::value-stencil_size,0,N1>(arr,mask,h,data.get(r));
418 }
419 else
420 {
421 copy_xz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<0,N1>(mask);
422 }
423
424 if (findNN == false)
425 {
426 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({1,0,0});
427 r = sgt.getChunk(p,exist);
428 NNlist.template get<0>(chunk_id*NNType::nNN+4) = (exist)?r:-1;
429 }
430 else
431 {
432 r = NNlist.template get<0>(chunk_id*NNType::nNN+4);
433 exist = (r != -1);
434 }
435 if (exist == true)
436 {
437 auto & h = header_mask.get(r);
438 copy_yz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<0,sz0::value+stencil_size,N1>(arr,mask,h,data.get(r));
439 }
440 else
441 {
442 copy_yz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<sz0::value+stencil_size,N1>(mask);
443 }
444 if (findNN == false)
445 {
446 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({-1,0,0});
447 r = sgt.getChunk(p,exist);
448 NNlist.template get<0>(chunk_id*NNType::nNN+5) = (exist)?r:-1;
449 }
450 else
451 {
452 r = NNlist.template get<0>(chunk_id*NNType::nNN+5);
453 exist = (r != -1);
454 }
455 if (exist == true)
456 {
457 auto & h = header_mask.get(r);
458 copy_yz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<sz0::value-stencil_size,0,N1>(arr,mask,h,data.get(r));
459 }
460 else
461 {
462 copy_yz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value && DISABLE_VECTORIZATION_OPTIMIZATION_WHEN_VCDEVEL_IS_SCALAR ,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<0,N1>(mask);
463 }
464 }
465};
466
467
472template<unsigned dim,
473 unsigned int stencil_size,
474 typename SparseGridType,
475 typename vector_blocks_exts,
476 typename vector_ext = typename vmpl_create_constant<dim,1>::type>
477class grid_key_sparse_dx_iterator_block_sub
478{
480 SparseGridType & spg;
481
483 size_t chunk_id;
484
486 grid_key_dx<dim> start_;
487
489 grid_key_dx<dim> stop_;
490
493
496
499
502
505
506 // chunk header container
508 cheader<dim> * hc;
509
510 // temporary buffer for Load border
512
514 Box<dim,size_t> block_it;
515
520 void SelectValid()
521 {
522 auto & header = spg.private_get_header_inf();
523 auto & header_mask = spg.private_get_header_mask();
524
525 while (chunk_id < header.size())
526 {
527 auto & mask = header_mask.get(chunk_id).mask;
528
529 fill_chunk_block<dim,decltype(header),vector_blocks_exts> fcb(header,chunk_id);
530
531 boost::mpl::for_each_ref<boost::mpl::range_c<int,0,dim>>(fcb);
532
533 if (bx.Intersect(fcb.cnk_box,block_it) == true)
534 {
535 block_it -= header.get(chunk_id).pos.toPoint();
536 break;
537 }
538 else
539 {chunk_id += 1;}
540 }
541 }
542
543public:
544
545 // we create first a vector with
546
547 typedef typename vmpl_sum_constant<2*stencil_size,typename vector_blocks_exts::type>::type stop_border_vmpl;
548 typedef typename vmpl_create_constant<dim,stencil_size>::type start_border_vmpl;
549
553
554 typedef vector_blocks_exts vector_blocks_exts_type;
555 typedef vector_ext vector_ext_type;
556
557 static const int sizeBlock = vector_blocks_exts::size::value;
558 static const int sizeBlockBord = vmpl_reduce_prod<stop_border_vmpl>::type::value;
559
566 grid_key_sparse_dx_iterator_block_sub() {};
567
568 grid_key_sparse_dx_iterator_block_sub(SparseGridType & spg,
569 const grid_key_dx<dim> & start,
570 const grid_key_dx<dim> & stop)
571 :spg(spg),chunk_id(1),
572 start_(start),stop_(stop)
573 {
574 // Create border coeficents
576
577 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(gbs);
578
579 Box<dim,int> skinb;
580 Box<dim,int> skinbb;
581
582 size_t bc[dim];
583 for (int i = 0 ; i < dim ; i ++)
584 {
585 skinb.setLow(i,0);
586 skinb.setHigh(i,gbs.sz_tot[i]-1);
587 skinbb.setLow(i,0);
588 skinbb.setHigh(i,gbs.sz_ext_b[i]-1);
589 bc[i] = NON_PERIODIC;
590 }
591
592 grid_sm<dim,void> g_smb(gbs.sz_ext_b);
593
594 // Create block skin index
595
597 grid_skin_iterator_bc<3> gsi_b(g_smb,skinbb,skinbb,bc);
598
599 b_map.resize(g_smb.size());
600
601 while (gsi_b.isNext())
602 {
603 auto p = gsi_b.get();
604
605 block_skin.add(p);
606
607 b_map.get(g_smb.LinId(p)) = block_skin.size() - 1;
608
609 ++gsi_b;
610 }
611
612 grid_sm<dim,void> g_sm(gbs.sz_tot);
613 grid_skin_iterator_bc<3> gsi(g_sm,skinb,skinb,bc);
614
615 while (gsi.isNext())
616 {
617 auto p = gsi.get();
618
620
621 bord.add(g_sm.LinId(p));
622
623 short offset = 0;
624 int stride = 1;
625 for (int i = 0 ; i < dim ; i++)
626 {
627
628 if (p.get(i) < stencil_size)
629 {offset += (gbs.sz_block[i]-1)*stride;}
630 else if (p.get(i) >= gbs.sz_tot[i] - stencil_size)
631 {offset += 0;}
632 else
633 {offset += (p.get(i)-stencil_size)*stride;}
634
635 sh.set_d(i,(p.get(i) + (gbs.sz_block[i] - stencil_size)) / gbs.sz_block[i]);
636 stride *= gbs.sz_block[i];
637 }
638
639 offsets.add(offset);
640
641 size_t bid = g_smb.LinId(sh);
642 chunk_shifts.add(b_map.get(bid));
643
644 ++gsi;
645 }
646
647 for (size_t i = 0 ; i < dim ; i++)
648 {
649 bx.setLow(i,start.get(i));
650 bx.setHigh(i,stop.get(i));
651 }
652
653 SelectValid();
654 }
655
664 inline void reinitialize(const grid_key_sparse_dx_iterator_sub<dim,vector_blocks_exts::size::value> & g_s_it)
665 {
666 spg = g_s_it.spg;
667 chunk_id = g_s_it.chunk_id;
668 start_ = g_s_it.start_;
669 stop_ = g_s_it.stop_;
670 bx = g_s_it.bx;
671 }
672
673 inline grid_key_sparse_dx_iterator_block_sub<dim,stencil_size,SparseGridType,vector_blocks_exts> & operator++()
674 {
675 auto & header = spg.private_get_header_inf();
676
677 chunk_id++;
678
679 if (chunk_id < header.size())
680 {
681 SelectValid();
682 }
683
684 return *this;
685 }
686
692 bool isNext()
693 {
694 auto & header = spg.private_get_header_inf();
695
696 return chunk_id < header.size();
697 }
698
704 const grid_key_dx<dim> & getStart() const
705 {
706 return start_;
707 }
708
714 const grid_key_dx<dim> & getStop() const
715 {
716 return stop_;
717 }
718
719
720 template<unsigned int prop, typename T>
721 void loadBlock(T arr[sizeBlock])
722 {
723 auto & header_mask = spg.private_get_header_mask();
724 auto & header_inf = spg.private_get_header_inf();
725
726 loadBlock_impl<prop,stencil_size,dim,vector_blocks_exts,vector_ext>::template loadBlock<prop>(arr,spg,chunk_id);
727
728 hm = &header_mask.get(chunk_id);
729 hc = &header_inf.get(chunk_id);
730 }
731
732 template<unsigned int prop,typename T>
733 void loadBlock(T arr[sizeBlock], unsigned char mask[sizeBlock])
734 {
735 auto & header_mask = spg.private_get_header_mask();
736 auto & header_inf = spg.private_get_header_inf();
737
738 loadBlock_impl<prop,stencil_size,dim,vector_blocks_exts,vector_ext>::template loadBlock<prop>(arr,spg,chunk_id,mask);
739
740 hm = &header_mask.get(chunk_id);
741 hc = &header_inf.get(chunk_id);
742 }
743
744 template<unsigned int prop,typename T>
745 void storeBlock(T arr[sizeBlock])
746 {
747 auto & header_mask = spg.private_get_header_mask();
748 auto & header_inf = spg.private_get_header_inf();
749
750 loadBlock_impl<prop,stencil_size,dim,vector_blocks_exts,vector_ext>::template storeBlock<sizeBlock>(arr,spg,chunk_id);
751
752 hm = &header_mask.get(chunk_id);
753 hc = &header_inf.get(chunk_id);
754 }
755
756
757 template<unsigned int prop, typename NNtype, bool findNN, typename T>
758 void loadBlockBorder(T arr[sizeBlockBord],unsigned char mask[sizeBlockBord])
759 {
760 auto & header_mask = spg.private_get_header_mask();
761 auto & header_inf = spg.private_get_header_inf();
762
763 loadBlock_impl<prop,stencil_size,dim,vector_blocks_exts,vector_ext>::template loadBlock<sizeBlockBord>(arr,spg,chunk_id,mask);
764 loadBlock_impl<prop,stencil_size,dim,vector_blocks_exts,vector_ext>::template loadBorder<findNN,NNtype,sizeBlockBord>(arr,spg,chunk_id,bord,block_skin,chunk_shifts,offsets,mask,maps_blk);
765
766 hm = &header_mask.get(chunk_id);
767 hc = &header_inf.get(chunk_id);
768 }
769
770
776 constexpr int start_b(int i) const
777 {
778 return block_it.getLow(i) + stencil_size;
779 }
780
786 constexpr int stop_b(int i) const
787 {
788 return block_it.getHigh(i) + 1 + stencil_size;
789 }
790
796 constexpr int start(int i) const
797 {
798 return block_it.getLow(i);
799 }
800
806 constexpr int stop(int i) const
807 {
808 return block_it.getHigh(i) + 1;
809 }
810
816 template<typename a, typename ...lT>
817 __device__ __host__ inline size_t Lin(a v,lT...t) const
818 {
819#ifdef SE_CLASS1
820 if (sizeof...(t)+1 > dim)
821 {
822 std::cerr << "Error incorrect grid cannot linearize more index than its dimensionality" << "\n";
823 }
824#endif
825
826 return v*vmpl_reduce_prod_stop<typename vector_blocks_exts::type,(int)dim - (int)sizeof...(t) - 2>::type::value + Lin(t...);
827 }
828
830 template<typename a> __device__ __host__ inline size_t Lin(a v) const
831 {
832 return v*vmpl_reduce_prod_stop<typename vector_blocks_exts::type,(int)dim - 2>::type::value;
833 }
834
840 template<typename a, typename ...lT>
841 __device__ __host__ inline size_t LinB(a v,lT...t) const
842 {
843#ifdef SE_CLASS1
844 if (sizeof...(t)+1 > dim)
845 {
846 std::cerr << "Error incorrect grid cannot linearize more index than its dimensionality" << "\n";
847 }
848#endif
849
850 return v*vmpl_reduce_prod_stop<stop_border_vmpl,(int)dim - (int)sizeof...(t) - 2>::type::value + LinB(t...);
851 }
852
854 template<typename a> __device__ __host__ inline size_t LinB(a v) const
855 {
856 return v*vmpl_reduce_prod_stop<stop_border_vmpl,(int)dim - 2>::type::value;
857 }
858
864 template<typename a, typename ...lT>
865 __device__ __host__ inline size_t LinB_off(a v,lT...t) const
866 {
867#ifdef SE_CLASS1
868 if (sizeof...(t)+1 > dim)
869 {
870 std::cerr << "Error incorrect grid cannot linearize more index than its dimensionality" << "\n";
871 }
872#endif
873
874 return (v-stencil_size)*vmpl_reduce_prod_stop<typename vector_blocks_exts::type,(int)dim - (int)sizeof...(t) - 2>::type::value + LinB_off(t...);
875 }
876
878 template<typename a> __device__ __host__ inline size_t LinB_off(a v) const
879 {
880 return (v-stencil_size)*(vmpl_reduce_prod_stop<typename vector_blocks_exts::type,(int)dim - 2>::type::value);
881 }
882
888 template<typename ... ArgsType>
889 bool exist(ArgsType ... args)
890 {
891 size_t l = LinB_off(args ...);
892
893 return spg.exist_sub(*hm,l);
894 }
895
901 int getChunkId()
902 {
903 return chunk_id;
904 }
905};
906
907
908#endif /* SPARSEGRID_ITERATOR_BLOCK_HPP_ */
This class represent an N-dimensional box.
Definition Box.hpp:61
__device__ __host__ T getLow(int i) const
get the i-coordinate of the low bound interval of the box
Definition Box.hpp:556
__device__ __host__ bool Intersect(const Box< dim, T > &b, Box< dim, T > &b_out) const
Intersect.
Definition Box.hpp:95
__device__ __host__ T getHigh(int i) const
get the high interval of the box
Definition Box.hpp:567
__device__ __host__ void setHigh(int i, T val)
set the high interval of the box
Definition Box.hpp:544
__device__ __host__ void setLow(int i, T val)
set the low interval of the box
Definition Box.hpp:533
grid_key_dx is the key to access any element in the grid
Definition grid_key.hpp:19
__device__ __host__ void set_d(index_type i, index_type id)
Set the i index.
Definition grid_key.hpp:516
__device__ __host__ index_type get(index_type i) const
Get the i index.
Definition grid_key.hpp:503
Grid key sparse iterator on a sub-part of the domain.
size_t chunk_id
point to the actual chunk
Box< dim, size_t > bx
Sub-grid box.
Declaration grid_sm.
Definition grid_sm.hpp:167
Implementation of 1-D std::vector like structure.
size_t size()
Stub size.
KeyT const ValueT ValueT OffsetIteratorT OffsetIteratorT int
[in] The number of segments that comprise the sorting data
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
__device__ __host__ boost::mpl::at< type, boost::mpl::int_< i > >::type & get()
get the properties i
This structure contain the information of a chunk.
Copy XY surface in 3D.
Copy block in 3D.
Copy XZ surface in 3D.
Copy YZ surface in 3D.
generate_array_vector_impl< T, boost::mpl::size< F >::value-1, F >::result result
generate compile-time array vector
Definition ct_array.hpp:210
this class is a functor for "for_each" algorithm
size_t sz_block[dim]
sizes
size_t sz_tot[dim]
sizes in point with border
size_t sz_ext_b[dim]
sizes with border block
size_t sz_ext[dim]
sizes blocks
This structure contain the information of a chunk.
to_variadic_const_impl< 1, N, M, exit_::value, M >::type type
generate the boost::fusion::vector apply H on each term