OpenFPM_pdata  4.1.0
Project that contain the implementation of distributed structures
 
Loading...
Searching...
No Matches
main_vic_petsc_opt.cpp
1
27#include "config.h"
28
29#ifdef HAVE_PETSC
30
31//#define SE_CLASS1
32//#define PRINT_STACKTRACE
33
34#include "interpolation/interpolation.hpp"
35#include "Grid/grid_dist_id.hpp"
36#include "Vector/vector_dist.hpp"
37#include "Matrix/SparseMatrix.hpp"
38#include "Vector/Vector.hpp"
39#include "FiniteDifference/FDScheme.hpp"
40#include "Solvers/petsc_solver.hpp"
41#include "interpolation/mp4_kernel.hpp"
42#include "Solvers/petsc_solver_AMG_report.hpp"
43#include "Decomposition/Distribution/SpaceDistribution.hpp"
44
45constexpr int x = 0;
46constexpr int y = 1;
47constexpr int z = 2;
48constexpr unsigned int phi = 0;
49
50// The type of the grids
52
53// The type of the grids
55
56// The type of the particles
57typedef vector_dist<3,
58 float,
64
65typedef vector_dist<3,
66 float,
72
73// radius of the torus
74float ringr1 = 1.0;
75// radius of the core of the torus
76float sigma = 1.0/3.523;
77// Reynold number (If you want to use 7500.0 you have to use a grid 1600x400x400)
78//float tgtre = 7500.0;
79float tgtre = 3000.0;
80
81// Kinematic viscosity
82float nu = 1.0/tgtre;
83
84// Time step
85// float dt = 0.0025 for Re 7500
86float dt = 0.0125;
87
88#ifdef TEST_RUN
89const unsigned int nsteps = 10;
90#else
91const unsigned int nsteps = 10001;
92#endif
93
94// All the properties by index
95constexpr unsigned int vorticity = 0;
96constexpr unsigned int velocity = 0;
97constexpr unsigned int p_vel = 1;
98constexpr int rhs_part = 2;
99constexpr unsigned int old_vort = 3;
100constexpr unsigned int old_pos = 4;
101
102template<typename grid> void calc_and_print_max_div_and_int(grid & g_vort)
103{
104 g_vort.template ghost_get<vorticity>();
105 auto it5 = g_vort.getDomainIterator();
106
107 double max_vort = 0.0;
108
109 double int_vort[3] = {0.0,0.0,0.0};
110
111 while (it5.isNext())
112 {
113 auto key = it5.get();
114
115 double tmp;
116
117 tmp = 1.0f/g_vort.spacing(x)/2.0f*(g_vort.template get<vorticity>(key.move(x,1))[x] - g_vort.template get<vorticity>(key.move(x,-1))[x] ) +
118 1.0f/g_vort.spacing(y)/2.0f*(g_vort.template get<vorticity>(key.move(y,1))[y] - g_vort.template get<vorticity>(key.move(y,-1))[y] ) +
119 1.0f/g_vort.spacing(z)/2.0f*(g_vort.template get<vorticity>(key.move(z,1))[z] - g_vort.template get<vorticity>(key.move(z,-1))[z] );
120
121 int_vort[x] += g_vort.template get<vorticity>(key)[x];
122 int_vort[y] += g_vort.template get<vorticity>(key)[y];
123 int_vort[z] += g_vort.template get<vorticity>(key)[z];
124
125 if (tmp > max_vort)
126 max_vort = tmp;
127
128 ++it5;
129 }
130
131 Vcluster<> & v_cl = create_vcluster();
132 v_cl.max(max_vort);
133 v_cl.sum(int_vort[0]);
134 v_cl.sum(int_vort[1]);
135 v_cl.sum(int_vort[2]);
136 v_cl.execute();
137
138 if (v_cl.getProcessUnitID() == 0)
139 {std::cout << "Max div for vorticity " << max_vort << " Integral: " << int_vort[0] << " " << int_vort[1] << " " << int_vort[2] << std::endl;}
140}
141
142void init_ring(grid_type & gr, const Box<3,float> & domain)
143{
144 // To add some noise to the vortex ring we create two random
145 // vector
146 constexpr int nk = 32;
147
148 float ak[nk];
149 float bk[nk];
150
151 for (size_t i = 0 ; i < nk ; i++)
152 {
153 ak[i] = rand()/RAND_MAX;
154 bk[i] = rand()/RAND_MAX;
155 }
156
157 // We calculate the circuitation gamma
158 float gamma = nu * tgtre;
159 float rinv2 = 1.0f/(sigma*sigma);
160 float max_vorticity = gamma*rinv2/M_PI;
161
162 // We go through the grid to initialize the vortex
163 auto it = gr.getDomainIterator();
164
165 while (it.isNext())
166 {
167 auto key_d = it.get();
168 auto key = it.getGKey(key_d);
169
170 float tx = (key.get(x)-2)*gr.spacing(x) + domain.getLow(x);
171 float ty = (key.get(y)-2)*gr.spacing(y) + domain.getLow(y);
172 float tz = (key.get(z)-2)*gr.spacing(z) + domain.getLow(z);
173 float theta1 = atan2((ty-domain.getHigh(1)/2.0f),(tz-domain.getHigh(2)/2.0f));
174
175
176 float rad1r = sqrt((ty-domain.getHigh(1)/2.0f)*(ty-domain.getHigh(1)/2.0f) + (tz-domain.getHigh(2)/2.0f)*(tz-domain.getHigh(2)/2.0f)) - ringr1;
177 float rad1t = tx - 1.0f;
178 float rad1sq = rad1r*rad1r + rad1t*rad1t;
179 float radstr = -exp(-rad1sq*rinv2)*rinv2*gamma/M_PI;
180 gr.template get<vorticity>(key_d)[x] = 0.0f;
181 gr.template get<vorticity>(key_d)[y] = -radstr * cos(theta1);
182 gr.template get<vorticity>(key_d)[z] = radstr * sin(theta1);
183
184 // kill the axis term
185
186 float rad1r_ = sqrt((ty-domain.getHigh(1)/2.0f)*(ty-domain.getHigh(1)/2.0f) + (tz-domain.getHigh(2)/2.0f)*(tz-domain.getHigh(2)/2.0f)) + ringr1;
187 float rad1sqTILDA = rad1sq*rinv2;
188 radstr = exp(-rad1sq*rinv2)*rinv2*gamma/M_PI;
189 gr.template get<vorticity>(key_d)[x] = 0.0f;
190 gr.template get<vorticity>(key_d)[y] = -radstr * cos(theta1);
191 gr.template get<vorticity>(key_d)[z] = radstr * sin(theta1);
192
193 ++it;
194 }
195
196 gr.write("Initialization");
197 return;
198}
199
200struct poisson_nn_helm
201{
203 static const unsigned int dims = 3;
205 static const unsigned int nvar = 1;
207 static const bool boundary[];
209 typedef float stype;
217 static const int grid_type = NORMAL_GRID;
218};
219
221const bool poisson_nn_helm::boundary[] = {PERIODIC,PERIODIC,PERIODIC};
222
223
226 grid_type & gr,
227 const Box<3,float> & domain,
228 petsc_solver<double> & solver,
230 bool init)
231{
232 // ghost get
233 gr.template ghost_get<vorticity>();
234
235 // ghost size of the psi function
237
238 // Calculate the divergence of the vortex field
239 auto it = gr.getDomainIterator();
240
241 while (it.isNext())
242 {
243 auto key = it.get();
244
245 psi.template get<phi>(key) = 1.0f/gr.spacing(x)/2.0f*(gr.template get<vorticity>(key.move(x,1))[x] - gr.template get<vorticity>(key.move(x,-1))[x] ) +
246 1.0f/gr.spacing(y)/2.0f*(gr.template get<vorticity>(key.move(y,1))[y] - gr.template get<vorticity>(key.move(y,-1))[y] ) +
247 1.0f/gr.spacing(z)/2.0f*(gr.template get<vorticity>(key.move(z,1))[z] - gr.template get<vorticity>(key.move(z,-1))[z] );
248
249 ++it;
250 }
251
252 calc_and_print_max_div_and_int(gr);
253
255
256 fd.new_b();
257 fd.template impose_dit_b<0>(psi,psi.getDomainIterator());
258
260
261 timer tm_solve;
262 if (init == true)
263 {
264 // Set the conjugate-gradient as method to solve the linear solver
265 solver.setSolver(KSPBCGS);
266
267 // Set the absolute tolerance to determine that the method is converged
268 solver.setAbsTol(0.0001);
269
270 // Set the maximum number of iterations
271 solver.setMaxIter(500);
272
273#ifdef USE_MULTIGRID
274
275 solver.setPreconditioner(PCHYPRE_BOOMERAMG);
276 solver.setPreconditionerAMG_nl(6);
277 solver.setPreconditionerAMG_maxit(1);
278 solver.setPreconditionerAMG_relax("SOR/Jacobi");
279 solver.setPreconditionerAMG_cycleType("V",0,4);
280 solver.setPreconditionerAMG_coarsenNodalType(0);
281 solver.setPreconditionerAMG_coarsen("HMIS");
282
283
284#endif
285
286 tm_solve.start();
287
288 // Give to the solver A and b, return x, the solution
289 solver.solve(fd.getA(),x_,fd.getB());
290
291 tm_solve.stop();
292 }
293 else
294 {
295 tm_solve.start();
296 solver.solve(x_,fd.getB());
297 tm_solve.stop();
298 }
299
300 // copy the solution x to the grid psi
301 fd.template copy<phi>(x_,psi);
302
303 psi.template ghost_get<phi>();
304
305 // Correct the vorticity to make it divergence free
306
307 auto it2 = gr.getDomainIterator();
308
309 while (it2.isNext())
310 {
311 auto key = it2.get();
312
313 gr.template get<vorticity>(key)[x] -= 1.0f/2.0f/psi.spacing(x)*(psi.template get<phi>(key.move(x,1))-psi.template get<phi>(key.move(x,-1)));
314 gr.template get<vorticity>(key)[y] -= 1.0f/2.0f/psi.spacing(y)*(psi.template get<phi>(key.move(y,1))-psi.template get<phi>(key.move(y,-1)));
315 gr.template get<vorticity>(key)[z] -= 1.0f/2.0f/psi.spacing(z)*(psi.template get<phi>(key.move(z,1))-psi.template get<phi>(key.move(z,-1)));
316
317 ++it2;
318 }
319
320 calc_and_print_max_div_and_int(gr);
321}
322
323void remesh(particles_type & vd, grid_type & gr,Box<3,float> & domain)
324{
325 // Remove all particles because we reinitialize in a grid like position
326 vd.clear();
327
328 // Get a grid iterator
329 auto git = vd.getGridIterator(gr.getGridInfo().getSize());
330
331 // For each grid point
332 while (git.isNext())
333 {
334 // Get the grid point in global coordinates (key). p is in local coordinates
335 auto p = git.get();
336 auto key = git.get_dist();
337
338 // Add the particle
339 vd.add();
340
341 // Assign the position to the particle in a grid like way
342 vd.getLastPos()[x] = gr.spacing(x)*p.get(x) + domain.getLow(x);
343 vd.getLastPos()[y] = gr.spacing(y)*p.get(y) + domain.getLow(y);
344 vd.getLastPos()[z] = gr.spacing(z)*p.get(z) + domain.getLow(z);
345
346 // Initialize the vorticity
347 vd.template getLastProp<vorticity>()[x] = gr.template get<vorticity>(key)[x];
348 vd.template getLastProp<vorticity>()[y] = gr.template get<vorticity>(key)[y];
349 vd.template getLastProp<vorticity>()[z] = gr.template get<vorticity>(key)[z];
350
351 // next grid point
352 ++git;
353 }
354
355 // redistribute the particles
356 vd.map();
357}
358
359
360void comp_vel(grid_type_s & gr_ps,
361 grid_type & phi_v,
363 Box<3,float> & domain,
364 grid_type & g_vort,
365 grid_type & g_vel,
367 petsc_solver<double> & solver)
368{
369 // We calculate and print the maximum divergence of the vorticity
370 // and the integral of it
371 calc_and_print_max_div_and_int(g_vort);
372
373 // For each component solve a poisson
374 for (size_t i = 0 ; i < 3 ; i++)
375 {
377
378 // Copy the vorticity component i in gr_ps
379 auto it2 = gr_ps.getDomainIterator();
380
381 // calculate the velocity from the curl of phi
382 while (it2.isNext())
383 {
384 auto key = it2.get();
385
386 // copy
387 gr_ps.get<vorticity>(key) = g_vort.template get<vorticity>(key)[i];
388
389 ++it2;
390 }
391
392 // impose the poisson equation using gr_ps = vorticity for the right-hand-side (on the full domain)
393 fd.new_b();
394 fd.template impose_dit_b<phi>(gr_ps,gr_ps.getDomainIterator());
395
396 solver.setAbsTol(0.01);
397
398 // Get the vector that represent the right-hand-side
400
401 timer tm_solve;
402 tm_solve.start();
403
404 // Give to the solver A and b in this case we are giving
405 // an intitial guess phi_s calculated in the previous
406 // time step
407 solver.solve(phi_s[i],b);
408
409 tm_solve.stop();
410
411 // Calculate the residual
412
413 solError serr;
414 serr = solver.get_residual_error(phi_s[i],b);
415
416 Vcluster<> & v_cl = create_vcluster();
417 if (v_cl.getProcessUnitID() == 0)
418 {std::cout << "Solved component " << i << " Error: " << serr.err_inf << std::endl;}
419
420 // copy the solution to grid
421 fd.template copy<phi>(phi_s[i],gr_ps);
422
423 auto it3 = gr_ps.getDomainIterator();
424
425 // calculate the velocity from the curl of phi
426 while (it3.isNext())
427 {
428 auto key = it3.get();
429
430 phi_v.get<velocity>(key)[i] = gr_ps.get<phi>(key);
431
432 ++it3;
433 }
434 }
435
436 phi_v.ghost_get<phi>();
437
438 float inv_dx = 0.5f / g_vort.spacing(0);
439 float inv_dy = 0.5f / g_vort.spacing(1);
440 float inv_dz = 0.5f / g_vort.spacing(2);
441
442 auto it3 = phi_v.getDomainIterator();
443
444 // calculate the velocity from the curl of phi
445 while (it3.isNext())
446 {
447 auto key = it3.get();
448
449 float phi_xy = (phi_v.get<phi>(key.move(y,1))[x] - phi_v.get<phi>(key.move(y,-1))[x])*inv_dy;
450 float phi_xz = (phi_v.get<phi>(key.move(z,1))[x] - phi_v.get<phi>(key.move(z,-1))[x])*inv_dz;
451 float phi_yx = (phi_v.get<phi>(key.move(x,1))[y] - phi_v.get<phi>(key.move(x,-1))[y])*inv_dx;
452 float phi_yz = (phi_v.get<phi>(key.move(z,1))[y] - phi_v.get<phi>(key.move(z,-1))[y])*inv_dz;
453 float phi_zx = (phi_v.get<phi>(key.move(x,1))[z] - phi_v.get<phi>(key.move(x,-1))[z])*inv_dx;
454 float phi_zy = (phi_v.get<phi>(key.move(y,1))[z] - phi_v.get<phi>(key.move(y,-1))[z])*inv_dy;
455
456 g_vel.template get<velocity>(key)[x] = phi_zy - phi_yz;
457 g_vel.template get<velocity>(key)[y] = phi_xz - phi_zx;
458 g_vel.template get<velocity>(key)[z] = phi_yx - phi_xy;
459
460 ++it3;
461 }
462}
463
464
465template<unsigned int prp> void set_zero(grid_type & gr)
466{
467 auto it = gr.getDomainGhostIterator();
468
469 // calculate the velocity from the curl of phi
470 while (it.isNext())
471 {
472 auto key = it.get();
473
474 gr.template get<prp>(key)[0] = 0.0;
475 gr.template get<prp>(key)[1] = 0.0;
476 gr.template get<prp>(key)[2] = 0.0;
477
478 ++it;
479 }
480}
481
482
483template<unsigned int prp> void set_zero(particles_type & vd)
484{
485 auto it = vd.getDomainIterator();
486
487 // calculate the velocity from the curl of phi
488 while (it.isNext())
489 {
490 auto key = it.get();
491
492 vd.template getProp<prp>(key)[0] = 0.0;
493 vd.template getProp<prp>(key)[1] = 0.0;
494 vd.template getProp<prp>(key)[2] = 0.0;
495
496 ++it;
497 }
498}
499
500
501template<typename grid> void calc_rhs(grid & g_vort, grid & g_vel, grid & g_dwp)
502{
503 // usefull constant
504 constexpr int rhs = 0;
505
506 // calculate several pre-factors for the stencil finite
507 // difference
508 float fac1 = 2.0f*nu/(g_vort.spacing(0)*g_vort.spacing(0));
509 float fac2 = 2.0f*nu/(g_vort.spacing(1)*g_vort.spacing(1));
510 float fac3 = 2.0f*nu/(g_vort.spacing(2)*g_vort.spacing(2));
511
512 float fac4 = 0.5f/(g_vort.spacing(0));
513 float fac5 = 0.5f/(g_vort.spacing(1));
514 float fac6 = 0.5f/(g_vort.spacing(2));
515
516 auto it = g_dwp.getDomainIterator();
517
518 g_vort.template ghost_get<vorticity>();
519
520 while (it.isNext())
521 {
522 auto key = it.get();
523
524 g_dwp.template get<rhs>(key)[x] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[x]+g_vort.template get<vorticity>(key.move(x,-1))[x])+
525 fac2*(g_vort.template get<vorticity>(key.move(y,1))[x]+g_vort.template get<vorticity>(key.move(y,-1))[x])+
526 fac3*(g_vort.template get<vorticity>(key.move(z,1))[x]+g_vort.template get<vorticity>(key.move(z,-1))[x])-
527 2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[x]+
528 fac4*g_vort.template get<vorticity>(key)[x]*
529 (g_vel.template get<velocity>(key.move(x,1))[x] - g_vel.template get<velocity>(key.move(x,-1))[x])+
530 fac5*g_vort.template get<vorticity>(key)[y]*
531 (g_vel.template get<velocity>(key.move(y,1))[x] - g_vel.template get<velocity>(key.move(y,-1))[x])+
532 fac6*g_vort.template get<vorticity>(key)[z]*
533 (g_vel.template get<velocity>(key.move(z,1))[x] - g_vel.template get<velocity>(key.move(z,-1))[x]);
534
535 g_dwp.template get<rhs>(key)[y] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[y]+g_vort.template get<vorticity>(key.move(x,-1))[y])+
536 fac2*(g_vort.template get<vorticity>(key.move(y,1))[y]+g_vort.template get<vorticity>(key.move(y,-1))[y])+
537 fac3*(g_vort.template get<vorticity>(key.move(z,1))[y]+g_vort.template get<vorticity>(key.move(z,-1))[y])-
538 2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[y]+
539 fac4*g_vort.template get<vorticity>(key)[x]*
540 (g_vel.template get<velocity>(key.move(x,1))[y] - g_vel.template get<velocity>(key.move(x,-1))[y])+
541 fac5*g_vort.template get<vorticity>(key)[y]*
542 (g_vel.template get<velocity>(key.move(y,1))[y] - g_vel.template get<velocity>(key.move(y,-1))[y])+
543 fac6*g_vort.template get<vorticity>(key)[z]*
544 (g_vel.template get<velocity>(key.move(z,1))[y] - g_vel.template get<velocity>(key.move(z,-1))[y]);
545
546
547 g_dwp.template get<rhs>(key)[z] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[z]+g_vort.template get<vorticity>(key.move(x,-1))[z])+
548 fac2*(g_vort.template get<vorticity>(key.move(y,1))[z]+g_vort.template get<vorticity>(key.move(y,-1))[z])+
549 fac3*(g_vort.template get<vorticity>(key.move(z,1))[z]+g_vort.template get<vorticity>(key.move(z,-1))[z])-
550 2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[z]+
551 fac4*g_vort.template get<vorticity>(key)[x]*
552 (g_vel.template get<velocity>(key.move(x,1))[z] - g_vel.template get<velocity>(key.move(x,-1))[z])+
553 fac5*g_vort.template get<vorticity>(key)[y]*
554 (g_vel.template get<velocity>(key.move(y,1))[z] - g_vel.template get<velocity>(key.move(y,-1))[z])+
555 fac6*g_vort.template get<vorticity>(key)[z]*
556 (g_vel.template get<velocity>(key.move(z,1))[z] - g_vel.template get<velocity>(key.move(z,-1))[z]);
557
558 ++it;
559 }
560}
561
562
563void rk_step1(particles_type & particles)
564{
565 auto it = particles.getDomainIterator();
566
567 while (it.isNext())
568 {
569 auto key = it.get();
570
571 // Save the old vorticity
572 particles.getProp<old_vort>(key)[x] = particles.getProp<vorticity>(key)[x];
573 particles.getProp<old_vort>(key)[y] = particles.getProp<vorticity>(key)[y];
574 particles.getProp<old_vort>(key)[z] = particles.getProp<vorticity>(key)[z];
575
576 particles.getProp<vorticity>(key)[x] = particles.getProp<vorticity>(key)[x] + 0.5f * dt * particles.getProp<rhs_part>(key)[x];
577 particles.getProp<vorticity>(key)[y] = particles.getProp<vorticity>(key)[y] + 0.5f * dt * particles.getProp<rhs_part>(key)[y];
578 particles.getProp<vorticity>(key)[z] = particles.getProp<vorticity>(key)[z] + 0.5f * dt * particles.getProp<rhs_part>(key)[z];
579
580 ++it;
581 }
582
583 auto it2 = particles.getDomainIterator();
584
585 while (it2.isNext())
586 {
587 auto key = it2.get();
588
589 // Save the old position
590 particles.getProp<old_pos>(key)[x] = particles.getPos(key)[x];
591 particles.getProp<old_pos>(key)[y] = particles.getPos(key)[y];
592 particles.getProp<old_pos>(key)[z] = particles.getPos(key)[z];
593
594 particles.getPos(key)[x] = particles.getPos(key)[x] + 0.5f * dt * particles.getProp<p_vel>(key)[x];
595 particles.getPos(key)[y] = particles.getPos(key)[y] + 0.5f * dt * particles.getProp<p_vel>(key)[y];
596 particles.getPos(key)[z] = particles.getPos(key)[z] + 0.5f * dt * particles.getProp<p_vel>(key)[z];
597
598 ++it2;
599 }
600
601 particles.map();
602}
603
604
605void rk_step2(particles_type & particles)
606{
607 auto it = particles.getDomainIterator();
608
609 while (it.isNext())
610 {
611 auto key = it.get();
612
613 particles.getProp<vorticity>(key)[x] = particles.getProp<old_vort>(key)[x] + dt * particles.getProp<rhs_part>(key)[x];
614 particles.getProp<vorticity>(key)[y] = particles.getProp<old_vort>(key)[y] + dt * particles.getProp<rhs_part>(key)[y];
615 particles.getProp<vorticity>(key)[z] = particles.getProp<old_vort>(key)[z] + dt * particles.getProp<rhs_part>(key)[z];
616
617 ++it;
618 }
619
620 auto it2 = particles.getDomainIterator();
621
622 while (it2.isNext())
623 {
624 auto key = it2.get();
625
626 particles.getPos(key)[x] = particles.getProp<old_pos>(key)[x] + dt * particles.getProp<p_vel>(key)[x];
627 particles.getPos(key)[y] = particles.getProp<old_pos>(key)[y] + dt * particles.getProp<p_vel>(key)[y];
628 particles.getPos(key)[z] = particles.getProp<old_pos>(key)[z] + dt * particles.getProp<p_vel>(key)[z];
629
630 ++it2;
631 }
632
633 particles.map();
634}
635
636template<typename grid, typename vector> void do_step(grid_type_s & psi,
637 grid_type & phi_v,
639 vector & particles,
640 grid & g_vort,
641 grid & g_vel,
642 grid & g_dvort,
643 Box<3,float> & domain,
646 petsc_solver<double> & solver)
647{
648 constexpr int rhs = 0;
649
650 set_zero<vorticity>(g_vort);
651 inte.template p2m<vorticity,vorticity>(particles,g_vort);
652
653 g_vort.template ghost_put<add_,vorticity>();
654
655 // Calculate velocity from vorticity
656 comp_vel(psi,phi_v,fd,domain,g_vort,g_vel,phi_s,solver);
657 calc_rhs(g_vort,g_vel,g_dvort);
658
659 g_dvort.template ghost_get<rhs>();
660 g_vel.template ghost_get<velocity>();
663 inte.template m2p<rhs,rhs_part>(g_dvort,particles);
664 inte.template m2p<velocity,p_vel>(g_vel,particles);
665
667}
668
669template<typename vector, typename grid> void check_point_and_save(vector & particles,
670 grid & g_vort,
671 grid & g_vel,
672 grid & g_dvort,
673 size_t i)
674{
675 Vcluster<> & v_cl = create_vcluster();
676
677 if (v_cl.getProcessingUnits() < 24)
678 {
679 particles.write("part_out_" + std::to_string(i),VTK_WRITER | FORMAT_BINARY);
680 g_vort.template ghost_get<vorticity>();
681 g_vel.template ghost_get<velocity>();
682 g_vel.write_frame("grid_velocity",i, VTK_WRITER | FORMAT_BINARY);
683 g_vort.write_frame("grid_vorticity",i, VTK_WRITER | FORMAT_BINARY);
684 }
685
686 // In order to reduce the size of the saved data we apply a threshold.
687 // We only save particles with vorticity higher than 0.1
689
690 auto it_s = particles.getDomainIterator();
691
692 while (it_s.isNext())
693 {
694 auto p = it_s.get();
695
696 float vort_magn = sqrt(particles.template getProp<vorticity>(p)[0] * particles.template getProp<vorticity>(p)[0] +
697 particles.template getProp<vorticity>(p)[1] * particles.template getProp<vorticity>(p)[1] +
698 particles.template getProp<vorticity>(p)[2] * particles.template getProp<vorticity>(p)[2]);
699
700 if (vort_magn < 0.1)
701 {
702 ++it_s;
703 continue;
704 }
705
706 part_save.add();
707
708 part_save.getLastPos()[0] = particles.getPos(p)[0];
709 part_save.getLastPos()[1] = particles.getPos(p)[1];
710 part_save.getLastPos()[2] = particles.getPos(p)[2];
711
712 part_save.template getLastProp<0>() = vort_magn;
713
714 ++it_s;
715 }
716
717 part_save.map();
718
719 particles.save("check_point");
720}
721
722int main(int argc, char* argv[])
723{
724 // Initialize
725 openfpm_init(&argc,&argv);
726 {
727 // Domain, a rectangle
728 // For the grid 1600x400x400 use
729 // Box<3,float> domain({0.0,0.0,0.0},{22.0,5.57,5.57});
730 Box<3,float> domain({0.0,0.0,0.0},{3.57,3.57,3.57});
731
732 // Ghost (Not important in this case but required)
734
735 // Grid points on x=128 y=64 z=64
736 // if we use Re = 7500
737 // long int sz[] = {1600,400,400};
738 long int sz[] = {128,128,128};
739 size_t szu[] = {(size_t)sz[0],(size_t)sz[1],(size_t)sz[2]};
740
741 periodicity<3> bc = {{PERIODIC,PERIODIC,PERIODIC}};
742
744
745 grid_type g_vort(szu,domain,g,bc);
746 grid_type g_vel(g_vort.getDecomposition(),szu,g);
747 grid_type g_dvort(g_vort.getDecomposition(),szu,g);
748 particles_type particles(g_vort.getDecomposition(),0);
749
750 // Construct an FDScheme is heavy so we construct it here
751 // We also construct distributed temporal grids here because
752 // they are expensive
753
754 // Here we create temporal distributed grid, create a distributed grid is expensive we do it once outside
755 // And the vectorial phi_v
756 grid_type_s psi(g_vort.getDecomposition(),g_vort.getGridInfo().getSize(),g);
757 grid_type phi_v(g_vort.getDecomposition(),g_vort.getGridInfo().getSize(),g);
758
759 // In order to create a matrix that represent the poisson equation we have to indicate
760 // we have to indicate the maximum extension of the stencil and we we need an extra layer
761 // of points in case we have to impose particular boundary conditions.
762 Ghost<3,long int> stencil_max(2);
763
764 // We define a field phi_f
765 typedef Field<phi,poisson_nn_helm> phi_f;
766
767 // We assemble the poisson equation doing the
768 // poisson of the Laplacian of phi using Laplacian
769 // central scheme (where the both derivative use central scheme, not
770 // forward composed backward like usually)
772
773 FDScheme<poisson_nn_helm> fd(stencil_max, domain, psi);
774
775 fd.template impose_dit<0>(poisson(),psi,psi.getDomainIterator());
776
778
779 // It store the solution to compute velocity
780 // It is used as initial guess every time we call the solver
783
784 // Parallel object
785 Vcluster<> & v_cl = create_vcluster();
786
787 // print out the spacing of the grid
788 if (v_cl.getProcessUnitID() == 0)
789 {std::cout << "SPACING " << g_vort.spacing(0) << " " << g_vort.spacing(1) << " " << g_vort.spacing(2) << std::endl;}
790
791 // initialize the ring step 1
792 init_ring(g_vort,domain);
793
794 x_.resize(g_vort.size(),g_vort.getLocalDomainSize());
795 x_.setZero();
796
797 // Create a PETSC solver to get the solution x
799
800 // Do the helmotz projection step 2
801 helmotz_hodge_projection(psi,fd,g_vort,domain,solver,x_,true);
802
803 // initialize the particle on a mesh step 3
804 remesh(particles,g_vort,domain);
805
806 // Get the total number of particles
807 size_t tot_part = particles.size_local();
808 v_cl.sum(tot_part);
809 v_cl.execute();
810
811 // Now we set the size of phi_s
812 phi_s[0].resize(g_vort.size(),g_vort.getLocalDomainSize());
813 phi_s[1].resize(g_vort.size(),g_vort.getLocalDomainSize());
814 phi_s[2].resize(g_vort.size(),g_vort.getLocalDomainSize());
815
816 // And we initially set it to zero
817 phi_s[0].setZero();
818 phi_s[1].setZero();
819 phi_s[2].setZero();
820
821 // We create the interpolation object outside the loop cycles
822 // to avoid to recreate it at every cycle. Internally this object
823 // create some data-structure to optimize the conversion particle
824 // position to sub-domain. If there are no re-balancing it is safe
825 // to reuse-it
827
828 // With more than 24 core we rely on the HDF5 checkpoint restart file
829 if (v_cl.getProcessingUnits() < 24)
830 {g_vort.write("grid_vorticity_init", VTK_WRITER | FORMAT_BINARY);}
831
832
833 // Before entering the loop we check if we want to restart from
834 // a previous simulation
835 size_t i = 0;
836
837 // if we have an argument
838 if (argc > 1)
839 {
840 // take that argument
841 std::string restart(argv[1]);
842
843 // convert it into number
844 i = std::stoi(restart);
845
846 // load the file
847 particles.load("check_point_" + std::to_string(i));
848
849 // Print to inform that we are restarting from a
850 // particular position
851 if (v_cl.getProcessUnitID() == 0)
852 {std::cout << "Restarting from " << i << std::endl;}
853 }
854
855 // Time Integration
856 for ( ; i < nsteps ; i++)
857 {
858 // do step 4-5-6-7
859 do_step(psi,phi_v,fd,particles,g_vort,g_vel,g_dvort,domain,inte,phi_s,solver);
860
861 // do step 8
862 rk_step1(particles);
863
864 // do step 9-10-11-12
865 do_step(psi,phi_v,fd,particles,g_vort,g_vel,g_dvort,domain,inte,phi_s,solver);
866
867 // do step 13
868 rk_step2(particles);
869
870 // so step 14
871 set_zero<vorticity>(g_vort);
872 inte.template p2m<vorticity,vorticity>(particles,g_vort);
873 g_vort.template ghost_put<add_,vorticity>();
874
875 // helmotz-hodge projection
876 helmotz_hodge_projection(psi,fd,g_vort,domain,solver,x_,false);
877
878 remesh(particles,g_vort,domain);
879
880 // print the step number
881 if (v_cl.getProcessUnitID() == 0)
882 {std::cout << "Step " << i << std::endl;}
883
884 // every 100 steps write the output
885 if (i % 100 == 0) {check_point_and_save(particles,g_vort,g_vel,g_dvort,i);}
886
887 }
888 }
889
890 openfpm_finalize();
891}
892
893#else
894
895int main(int argc, char* argv[])
896{
897 return 0;
898}
899
900#endif
This class represent an N-dimensional box.
Definition Box.hpp:61
This class decompose a space into sub-sub-domains and distribute them across processors.
Finite Differences.
Definition FDScheme.hpp:127
Sys_eqs::Vector_type & getB()
produce the B vector
Sys_eqs::SparseMatrix_type & getA()
produce the Matrix
void new_b()
In case we want to impose a new b re-using FDScheme we have to call This function.
Definition FDScheme.hpp:778
Definition eq.hpp:83
This class allocate, and destroy CPU memory.
Laplacian second order on h (spacing)
Definition Laplacian.hpp:23
Class that distribute sub-sub-domains across processors using an hilbert curve to divide the space.
Sparse Matrix implementation.
void execute()
Execute all the requests.
void sum(T &num)
Sum the numbers across all processors and get the result.
size_t getProcessUnitID()
Get the process unit id.
size_t getProcessingUnits()
Get the total number of processors.
void max(T &num)
Get the maximum number across all processors (or reduction with infinity norm)
Implementation of VCluster class.
Definition VCluster.hpp:59
Sparse Matrix implementation stub object when OpenFPM is compiled with no linear algebra support.
Definition Vector.hpp:40
void resize(size_t row, size_t row_n)
stub resize
Definition Vector.hpp:97
This is a distributed grid.
const grid_sm< dim, T > & getGridInfo() const
Get an object containing the grid informations.
void ghost_get(size_t opt=0)
It synchronize the ghost parts.
grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_iterator()), FIXED > getDomainGhostIterator() const
It return an iterator that span the grid domain + ghost part.
St spacing(size_t i) const
Get the spacing of the grid in direction i.
bool write(std::string output, size_t opt=VTK_WRITER|FORMAT_BINARY)
Write the distributed grid information.
grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_subiterator()), FREE > getDomainIterator() const
It return an iterator that span the full grid domain (each processor span its local domain)
auto get(const grid_dist_key_dx< dim, bg_key > &v1) const -> typename std::add_lvalue_reference< decltype(loc_grid.get(v1.getSub()).template get< p >(v1.getKey()))>::type
Get the reference of the selected element.
__device__ __host__ const size_t(& getSize() const)[N]
Return the size of the grid as an array.
Definition grid_sm.hpp:760
Main class for interpolation Particle to mest p2m and Mesh to particle m2p.
In case T does not match the PETSC precision compilation create a stub structure.
static Vector< T > solve(const SparseMatrix< T, impl > &A, const Vector< T > &b)
Solve the linear system.
Class for cpu time benchmarking.
Definition timer.hpp:28
void stop()
Stop the timer.
Definition timer.hpp:119
void start()
Start the timer.
Definition timer.hpp:90
vect_dist_key_dx get()
Get the actual key.
Distributed vector.
size_t size_local() const
return the local size of the vector
auto getProp(vect_dist_key_dx vec_key) -> decltype(v_prp.template get< id >(vec_key.getKey()))
Get the property of an element.
grid_dist_id_iterator_dec< Decomposition > getGridIterator(const size_t(&sz)[dim])
auto getPos(vect_dist_key_dx vec_key) -> decltype(v_pos.template get< 0 >(vec_key.getKey()))
Get the position of an element.
vector_dist_iterator getDomainIterator() const
Get an iterator that traverse the particles in the domain.
void map(size_t opt=NONE)
It move all the particles that does not belong to the local processor to the respective processor.
void clear()
remove all the elements
bool write(std::string out, int opt=VTK_WRITER)
Output particle position and properties.
auto getLastPos() -> decltype(v_pos.template get< 0 >(0))
Get the position of the last element.
void save(const std::string &filename) const
Save the distributed vector on HDF5 file.
void add()
Add local particle.
void load(const std::string &filename)
Load the distributed vector from an HDF5 file.
Decomposition & getDecomposition()
Get the decomposition.
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
Transform the boost::fusion::vector into memory specification (memory_traits)
Boundary conditions.
Definition common.hpp:22
Vector< double, PETSC_BASE > Vector_type
Vector to solve the system (PETSC)
float stype
type of the spatial coordinates
SparseMatrix< double, int, PETSC_BASE > SparseMatrix_type
Sparse matrix used to sove the linear system (we use PETSC)
static const bool boundary[]
specify the boundary conditions
static const unsigned int nvar
We have only one scalar unknown.
grid_type_s b_grid
grid that store \psi
static const unsigned int dims
Number of dimansions of the problem.
Meta-function to return a compatible zero-element.
It contain statistic of the error of the calculated solution.
PetscReal err_inf
infinity norm of the error