OpenFPM_pdata  1.1.0
Project that contain the implementation of distributed structures
 All Data Structures Namespaces Functions Variables Typedefs Enumerations Friends Pages
main_vic_petsc_opt.cpp
1 
27 //#define SE_CLASS1
28 //#define PRINT_STACKTRACE
29 
30 #include "interpolation/interpolation.hpp"
31 #include "Grid/grid_dist_id.hpp"
32 #include "Vector/vector_dist.hpp"
33 #include "Matrix/SparseMatrix.hpp"
34 #include "Vector/Vector.hpp"
35 #include "FiniteDifference/FDScheme.hpp"
36 #include "Solvers/petsc_solver.hpp"
37 #include "interpolation/mp4_kernel.hpp"
38 #include "Solvers/petsc_solver_AMG_report.hpp"
39 #include "Decomposition/Distribution/SpaceDistribution.hpp"
40 
41 constexpr int x = 0;
42 constexpr int y = 1;
43 constexpr int z = 2;
44 constexpr unsigned int phi = 0;
45 
46 // The type of the grids
48 
49 // The type of the grids
51 
52 // The type of the particles
54 
56 
57 // radius of the torus
58 float ringr1 = 1.0;
59 // radius of the core of the torus
60 float sigma = 1.0/3.523;
61 // Reynold number (If you want to use 7500.0 you have to use a grid 1600x400x400)
62 //float tgtre = 7500.0;
63 float tgtre = 3000.0;
64 
65 // Kinematic viscosity
66 float nu = 1.0/tgtre;
67 
68 // Time step
69 // float dt = 0.0025 for Re 7500
70 float dt = 0.0125;
71 
72 #ifdef TEST_RUN
73 const unsigned int nsteps = 10;
74 #else
75 const unsigned int nsteps = 10001;
76 #endif
77 
78 // All the properties by index
79 constexpr unsigned int vorticity = 0;
80 constexpr unsigned int velocity = 0;
81 constexpr unsigned int p_vel = 1;
82 constexpr int rhs_part = 2;
83 constexpr unsigned int old_vort = 3;
84 constexpr unsigned int old_pos = 4;
85 
86 template<typename grid> void calc_and_print_max_div_and_int(grid & g_vort)
87 {
88  g_vort.template ghost_get<vorticity>();
89  auto it5 = g_vort.getDomainIterator();
90 
91  double max_vort = 0.0;
92 
93  double int_vort[3] = {0.0,0.0,0.0};
94 
95  while (it5.isNext())
96  {
97  auto key = it5.get();
98 
99  double tmp;
100 
101  tmp = 1.0f/g_vort.spacing(x)/2.0f*(g_vort.template get<vorticity>(key.move(x,1))[x] - g_vort.template get<vorticity>(key.move(x,-1))[x] ) +
102  1.0f/g_vort.spacing(y)/2.0f*(g_vort.template get<vorticity>(key.move(y,1))[y] - g_vort.template get<vorticity>(key.move(y,-1))[y] ) +
103  1.0f/g_vort.spacing(z)/2.0f*(g_vort.template get<vorticity>(key.move(z,1))[z] - g_vort.template get<vorticity>(key.move(z,-1))[z] );
104 
105  int_vort[x] += g_vort.template get<vorticity>(key)[x];
106  int_vort[y] += g_vort.template get<vorticity>(key)[y];
107  int_vort[z] += g_vort.template get<vorticity>(key)[z];
108 
109  if (tmp > max_vort)
110  max_vort = tmp;
111 
112  ++it5;
113  }
114 
115  Vcluster & v_cl = create_vcluster();
116  v_cl.max(max_vort);
117  v_cl.sum(int_vort[0]);
118  v_cl.sum(int_vort[1]);
119  v_cl.sum(int_vort[2]);
120  v_cl.execute();
121 
122  if (v_cl.getProcessUnitID() == 0)
123  {std::cout << "Max div for vorticity " << max_vort << " Integral: " << int_vort[0] << " " << int_vort[1] << " " << int_vort[2] << std::endl;}
124 }
125 
126 void init_ring(grid_type & gr, const Box<3,float> & domain)
127 {
128  // To add some noise to the vortex ring we create two random
129  // vector
130  constexpr int nk = 32;
131 
132  float ak[nk];
133  float bk[nk];
134 
135  for (size_t i = 0 ; i < nk ; i++)
136  {
137  ak[i] = rand()/RAND_MAX;
138  bk[i] = rand()/RAND_MAX;
139  }
140 
141  // We calculate the circuitation gamma
142  float gamma = nu * tgtre;
143  float rinv2 = 1.0f/(sigma*sigma);
144  float max_vorticity = gamma*rinv2/M_PI;
145 
146  // We go through the grid to initialize the vortex
147  auto it = gr.getDomainIterator();
148 
149  while (it.isNext())
150  {
151  auto key_d = it.get();
152  auto key = it.getGKey(key_d);
153 
154  float tx = (key.get(x)-2)*gr.spacing(x) + domain.getLow(x);
155  float ty = (key.get(y)-2)*gr.spacing(y) + domain.getLow(y);
156  float tz = (key.get(z)-2)*gr.spacing(z) + domain.getLow(z);
157  float theta1 = atan2((ty-domain.getHigh(1)/2.0f),(tz-domain.getHigh(2)/2.0f));
158 
159 
160  float rad1r = sqrt((ty-domain.getHigh(1)/2.0f)*(ty-domain.getHigh(1)/2.0f) + (tz-domain.getHigh(2)/2.0f)*(tz-domain.getHigh(2)/2.0f)) - ringr1;
161  float rad1t = tx - 1.0f;
162  float rad1sq = rad1r*rad1r + rad1t*rad1t;
163  float radstr = -exp(-rad1sq*rinv2)*rinv2*gamma/M_PI;
164  gr.template get<vorticity>(key_d)[x] = 0.0f;
165  gr.template get<vorticity>(key_d)[y] = -radstr * cos(theta1);
166  gr.template get<vorticity>(key_d)[z] = radstr * sin(theta1);
167 
168  // kill the axis term
169 
170  float rad1r_ = sqrt((ty-domain.getHigh(1)/2.0f)*(ty-domain.getHigh(1)/2.0f) + (tz-domain.getHigh(2)/2.0f)*(tz-domain.getHigh(2)/2.0f)) + ringr1;
171  float rad1sqTILDA = rad1sq*rinv2;
172  radstr = exp(-rad1sq*rinv2)*rinv2*gamma/M_PI;
173  gr.template get<vorticity>(key_d)[x] = 0.0f;
174  gr.template get<vorticity>(key_d)[y] = -radstr * cos(theta1);
175  gr.template get<vorticity>(key_d)[z] = radstr * sin(theta1);
176 
177  ++it;
178  }
179 }
180 
181 struct poisson_nn_helm
182 {
184  static const unsigned int dims = 3;
186  static const unsigned int nvar = 1;
188  static const bool boundary[];
190  typedef float stype;
198  static const int grid_type = NORMAL_GRID;
199 };
200 
202 const bool poisson_nn_helm::boundary[] = {PERIODIC,PERIODIC,PERIODIC};
203 
204 
205 void helmotz_hodge_projection(grid_dist_id<3,float,aggregate<float>,CartDecomposition<3,float,HeapMemory,SpaceDistribution<3,float>>> & psi,
207  grid_type & gr,
208  const Box<3,float> & domain,
209  petsc_solver<double> & solver,
211  bool init)
212 {
213  // ghost get
214  gr.template ghost_get<vorticity>();
215 
216  // ghost size of the psi function
217  Ghost<3,long int> g(2);
218 
219  // Calculate the divergence of the vortex field
220  auto it = gr.getDomainIterator();
221 
222  while (it.isNext())
223  {
224  auto key = it.get();
225 
226  psi.template get<phi>(key) = 1.0f/gr.spacing(x)/2.0f*(gr.template get<vorticity>(key.move(x,1))[x] - gr.template get<vorticity>(key.move(x,-1))[x] ) +
227  1.0f/gr.spacing(y)/2.0f*(gr.template get<vorticity>(key.move(y,1))[y] - gr.template get<vorticity>(key.move(y,-1))[y] ) +
228  1.0f/gr.spacing(z)/2.0f*(gr.template get<vorticity>(key.move(z,1))[z] - gr.template get<vorticity>(key.move(z,-1))[z] );
229 
230  ++it;
231  }
232 
233  calc_and_print_max_div_and_int(gr);
234 
236 
237  fd.new_b();
238  fd.template impose_dit_b<0>(psi,psi.getDomainIterator());
239 
241 
242  timer tm_solve;
243  if (init == true)
244  {
245  // Set the conjugate-gradient as method to solve the linear solver
246  solver.setSolver(KSPBCGS);
247 
248  // Set the absolute tolerance to determine that the method is converged
249  solver.setAbsTol(0.0001);
250 
251  // Set the maximum number of iterations
252  solver.setMaxIter(500);
253 
254 #ifdef USE_MULTIGRID
255 
256  solver.setPreconditioner(PCHYPRE_BOOMERAMG);
257  solver.setPreconditionerAMG_nl(6);
258  solver.setPreconditionerAMG_maxit(1);
259  solver.setPreconditionerAMG_relax("SOR/Jacobi");
260  solver.setPreconditionerAMG_cycleType("V",0,4);
262  solver.setPreconditionerAMG_coarsen("HMIS");
263 
264 
265 #endif
266 
267  tm_solve.start();
268 
269  // Give to the solver A and b, return x, the solution
270  solver.solve(fd.getA(),x_,fd.getB());
271 
272  tm_solve.stop();
273  }
274  else
275  {
276  tm_solve.start();
277  solver.solve(x_,fd.getB());
278  tm_solve.stop();
279  }
280 
281  // copy the solution x to the grid psi
282  fd.template copy<phi>(x_,psi);
283 
284  psi.template ghost_get<phi>();
285 
286  // Correct the vorticity to make it divergence free
287 
288  auto it2 = gr.getDomainIterator();
289 
290  while (it2.isNext())
291  {
292  auto key = it2.get();
293 
294  gr.template get<vorticity>(key)[x] -= 1.0f/2.0f/psi.spacing(x)*(psi.template get<phi>(key.move(x,1))-psi.template get<phi>(key.move(x,-1)));
295  gr.template get<vorticity>(key)[y] -= 1.0f/2.0f/psi.spacing(y)*(psi.template get<phi>(key.move(y,1))-psi.template get<phi>(key.move(y,-1)));
296  gr.template get<vorticity>(key)[z] -= 1.0f/2.0f/psi.spacing(z)*(psi.template get<phi>(key.move(z,1))-psi.template get<phi>(key.move(z,-1)));
297 
298  ++it2;
299  }
300 
301  calc_and_print_max_div_and_int(gr);
302 }
303 
304 void remesh(particles_type & vd, grid_type & gr,Box<3,float> & domain)
305 {
306  // Remove all particles because we reinitialize in a grid like position
307  vd.clear();
308 
309  // Get a grid iterator
310  auto git = vd.getGridIterator(gr.getGridInfo().getSize());
311 
312  // For each grid point
313  while (git.isNext())
314  {
315  // Get the grid point in global coordinates (key). p is in local coordinates
316  auto p = git.get();
317  auto key = git.get_dist();
318 
319  // Add the particle
320  vd.add();
321 
322  // Assign the position to the particle in a grid like way
323  vd.getLastPos()[x] = gr.spacing(x)*p.get(x) + domain.getLow(x);
324  vd.getLastPos()[y] = gr.spacing(y)*p.get(y) + domain.getLow(y);
325  vd.getLastPos()[z] = gr.spacing(z)*p.get(z) + domain.getLow(z);
326 
327  // Initialize the vorticity
328  vd.template getLastProp<vorticity>()[x] = gr.template get<vorticity>(key)[x];
329  vd.template getLastProp<vorticity>()[y] = gr.template get<vorticity>(key)[y];
330  vd.template getLastProp<vorticity>()[z] = gr.template get<vorticity>(key)[z];
331 
332  // next grid point
333  ++git;
334  }
335 
336  // redistribute the particles
337  vd.map();
338 }
339 
340 
341 void comp_vel(grid_type_s & gr_ps,
342  grid_type & phi_v,
344  Box<3,float> & domain,
345  grid_type & g_vort,
346  grid_type & g_vel,
348  petsc_solver<double> & solver)
349 {
350  // We calculate and print the maximum divergence of the vorticity
351  // and the integral of it
352  calc_and_print_max_div_and_int(g_vort);
353 
354  // For each component solve a poisson
355  for (size_t i = 0 ; i < 3 ; i++)
356  {
358 
359  // Copy the vorticity component i in gr_ps
360  auto it2 = gr_ps.getDomainIterator();
361 
362  // calculate the velocity from the curl of phi
363  while (it2.isNext())
364  {
365  auto key = it2.get();
366 
367  // copy
368  gr_ps.get<vorticity>(key) = g_vort.template get<vorticity>(key)[i];
369 
370  ++it2;
371  }
372 
373  // impose the poisson equation using gr_ps = vorticity for the right-hand-side (on the full domain)
374  fd.new_b();
375  fd.template impose_dit_b<phi>(gr_ps,gr_ps.getDomainIterator());
376 
377  solver.setAbsTol(0.01);
378 
379  // Get the vector that represent the right-hand-side
380  Vector<double,PETSC_BASE> & b = fd.getB();
381 
382  timer tm_solve;
383  tm_solve.start();
384 
385  // Give to the solver A and b in this case we are giving
386  // an intitial guess phi_s calculated in the previous
387  // time step
388  solver.solve(phi_s[i],b);
389 
390  tm_solve.stop();
391 
392  // Calculate the residual
393 
394  solError serr;
395  serr = solver.get_residual_error(phi_s[i],b);
396 
397  Vcluster & v_cl = create_vcluster();
398  if (v_cl.getProcessUnitID() == 0)
399  {std::cout << "Solved component " << i << " Error: " << serr.err_inf << std::endl;}
400 
401  // copy the solution to grid
402  fd.template copy<phi>(phi_s[i],gr_ps);
403 
404  auto it3 = gr_ps.getDomainIterator();
405 
406  // calculate the velocity from the curl of phi
407  while (it3.isNext())
408  {
409  auto key = it3.get();
410 
411  phi_v.get<velocity>(key)[i] = gr_ps.get<phi>(key);
412 
413  ++it3;
414  }
415  }
416 
417  phi_v.ghost_get<phi>();
418 
419  float inv_dx = 0.5f / g_vort.spacing(0);
420  float inv_dy = 0.5f / g_vort.spacing(1);
421  float inv_dz = 0.5f / g_vort.spacing(2);
422 
423  auto it3 = phi_v.getDomainIterator();
424 
425  // calculate the velocity from the curl of phi
426  while (it3.isNext())
427  {
428  auto key = it3.get();
429 
430  float phi_xy = (phi_v.get<phi>(key.move(y,1))[x] - phi_v.get<phi>(key.move(y,-1))[x])*inv_dy;
431  float phi_xz = (phi_v.get<phi>(key.move(z,1))[x] - phi_v.get<phi>(key.move(z,-1))[x])*inv_dz;
432  float phi_yx = (phi_v.get<phi>(key.move(x,1))[y] - phi_v.get<phi>(key.move(x,-1))[y])*inv_dx;
433  float phi_yz = (phi_v.get<phi>(key.move(z,1))[y] - phi_v.get<phi>(key.move(z,-1))[y])*inv_dz;
434  float phi_zx = (phi_v.get<phi>(key.move(x,1))[z] - phi_v.get<phi>(key.move(x,-1))[z])*inv_dx;
435  float phi_zy = (phi_v.get<phi>(key.move(y,1))[z] - phi_v.get<phi>(key.move(y,-1))[z])*inv_dy;
436 
437  g_vel.template get<velocity>(key)[x] = phi_zy - phi_yz;
438  g_vel.template get<velocity>(key)[y] = phi_xz - phi_zx;
439  g_vel.template get<velocity>(key)[z] = phi_yx - phi_xy;
440 
441  ++it3;
442  }
443 }
444 
445 
446 template<unsigned int prp> void set_zero(grid_type & gr)
447 {
448  auto it = gr.getDomainGhostIterator();
449 
450  // calculate the velocity from the curl of phi
451  while (it.isNext())
452  {
453  auto key = it.get();
454 
455  gr.template get<prp>(key)[0] = 0.0;
456  gr.template get<prp>(key)[1] = 0.0;
457  gr.template get<prp>(key)[2] = 0.0;
458 
459  ++it;
460  }
461 }
462 
463 
464 template<unsigned int prp> void set_zero(particles_type & vd)
465 {
466  auto it = vd.getDomainIterator();
467 
468  // calculate the velocity from the curl of phi
469  while (it.isNext())
470  {
471  auto key = it.get();
472 
473  vd.template getProp<prp>(key)[0] = 0.0;
474  vd.template getProp<prp>(key)[1] = 0.0;
475  vd.template getProp<prp>(key)[2] = 0.0;
476 
477  ++it;
478  }
479 }
480 
481 
482 template<typename grid> void calc_rhs(grid & g_vort, grid & g_vel, grid & g_dwp)
483 {
484  // usefull constant
485  constexpr int rhs = 0;
486 
487  // calculate several pre-factors for the stencil finite
488  // difference
489  float fac1 = 1.0f*nu/(g_vort.spacing(0)*g_vort.spacing(0));
490  float fac2 = 1.0f*nu/(g_vort.spacing(1)*g_vort.spacing(1));
491  float fac3 = 1.0f*nu/(g_vort.spacing(2)*g_vort.spacing(2));
492 
493  float fac4 = 0.5f/(g_vort.spacing(0));
494  float fac5 = 0.5f/(g_vort.spacing(1));
495  float fac6 = 0.5f/(g_vort.spacing(2));
496 
497  auto it = g_dwp.getDomainIterator();
498 
499  g_vort.template ghost_get<vorticity>();
500 
501  while (it.isNext())
502  {
503  auto key = it.get();
504 
505  g_dwp.template get<rhs>(key)[x] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[x]+g_vort.template get<vorticity>(key.move(x,-1))[x])+
506  fac2*(g_vort.template get<vorticity>(key.move(y,1))[x]+g_vort.template get<vorticity>(key.move(y,-1))[x])+
507  fac3*(g_vort.template get<vorticity>(key.move(z,1))[x]+g_vort.template get<vorticity>(key.move(z,-1))[x])-
508  2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[x]+
509  fac4*g_vort.template get<vorticity>(key)[x]*
510  (g_vel.template get<velocity>(key.move(x,1))[x] - g_vel.template get<velocity>(key.move(x,-1))[x])+
511  fac5*g_vort.template get<vorticity>(key)[y]*
512  (g_vel.template get<velocity>(key.move(y,1))[x] - g_vel.template get<velocity>(key.move(y,-1))[x])+
513  fac6*g_vort.template get<vorticity>(key)[z]*
514  (g_vel.template get<velocity>(key.move(z,1))[x] - g_vel.template get<velocity>(key.move(z,-1))[x]);
515 
516  g_dwp.template get<rhs>(key)[y] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[y]+g_vort.template get<vorticity>(key.move(x,-1))[y])+
517  fac2*(g_vort.template get<vorticity>(key.move(y,1))[y]+g_vort.template get<vorticity>(key.move(y,-1))[y])+
518  fac3*(g_vort.template get<vorticity>(key.move(z,1))[y]+g_vort.template get<vorticity>(key.move(z,-1))[y])-
519  2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[y]+
520  fac4*g_vort.template get<vorticity>(key)[x]*
521  (g_vel.template get<velocity>(key.move(x,1))[y] - g_vel.template get<velocity>(key.move(x,-1))[y])+
522  fac5*g_vort.template get<vorticity>(key)[y]*
523  (g_vel.template get<velocity>(key.move(y,1))[y] - g_vel.template get<velocity>(key.move(y,-1))[y])+
524  fac6*g_vort.template get<vorticity>(key)[z]*
525  (g_vel.template get<velocity>(key.move(z,1))[y] - g_vel.template get<velocity>(key.move(z,-1))[y]);
526 
527 
528  g_dwp.template get<rhs>(key)[z] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[z]+g_vort.template get<vorticity>(key.move(x,-1))[z])+
529  fac2*(g_vort.template get<vorticity>(key.move(y,1))[z]+g_vort.template get<vorticity>(key.move(y,-1))[z])+
530  fac3*(g_vort.template get<vorticity>(key.move(z,1))[z]+g_vort.template get<vorticity>(key.move(z,-1))[z])-
531  2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[z]+
532  fac4*g_vort.template get<vorticity>(key)[x]*
533  (g_vel.template get<velocity>(key.move(x,1))[z] - g_vel.template get<velocity>(key.move(x,-1))[z])+
534  fac5*g_vort.template get<vorticity>(key)[y]*
535  (g_vel.template get<velocity>(key.move(y,1))[z] - g_vel.template get<velocity>(key.move(y,-1))[z])+
536  fac6*g_vort.template get<vorticity>(key)[z]*
537  (g_vel.template get<velocity>(key.move(z,1))[z] - g_vel.template get<velocity>(key.move(z,-1))[z]);
538 
539  ++it;
540  }
541 }
542 
543 
544 void rk_step1(particles_type & particles)
545 {
546  auto it = particles.getDomainIterator();
547 
548  while (it.isNext())
549  {
550  auto key = it.get();
551 
552  // Save the old vorticity
553  particles.getProp<old_vort>(key)[x] = particles.getProp<vorticity>(key)[x];
554  particles.getProp<old_vort>(key)[y] = particles.getProp<vorticity>(key)[y];
555  particles.getProp<old_vort>(key)[z] = particles.getProp<vorticity>(key)[z];
556 
557  particles.getProp<vorticity>(key)[x] = particles.getProp<vorticity>(key)[x] + 0.5f * dt * particles.getProp<rhs_part>(key)[x];
558  particles.getProp<vorticity>(key)[y] = particles.getProp<vorticity>(key)[y] + 0.5f * dt * particles.getProp<rhs_part>(key)[y];
559  particles.getProp<vorticity>(key)[z] = particles.getProp<vorticity>(key)[z] + 0.5f * dt * particles.getProp<rhs_part>(key)[z];
560 
561  ++it;
562  }
563 
564  auto it2 = particles.getDomainIterator();
565 
566  while (it2.isNext())
567  {
568  auto key = it2.get();
569 
570  // Save the old position
571  particles.getProp<old_pos>(key)[x] = particles.getPos(key)[x];
572  particles.getProp<old_pos>(key)[y] = particles.getPos(key)[y];
573  particles.getProp<old_pos>(key)[z] = particles.getPos(key)[z];
574 
575  particles.getPos(key)[x] = particles.getPos(key)[x] + 0.5f * dt * particles.getProp<p_vel>(key)[x];
576  particles.getPos(key)[y] = particles.getPos(key)[y] + 0.5f * dt * particles.getProp<p_vel>(key)[y];
577  particles.getPos(key)[z] = particles.getPos(key)[z] + 0.5f * dt * particles.getProp<p_vel>(key)[z];
578 
579  ++it2;
580  }
581 
582  particles.map();
583 }
584 
585 
586 void rk_step2(particles_type & particles)
587 {
588  auto it = particles.getDomainIterator();
589 
590  while (it.isNext())
591  {
592  auto key = it.get();
593 
594  particles.getProp<vorticity>(key)[x] = particles.getProp<old_vort>(key)[x] + dt * particles.getProp<rhs_part>(key)[x];
595  particles.getProp<vorticity>(key)[y] = particles.getProp<old_vort>(key)[y] + dt * particles.getProp<rhs_part>(key)[y];
596  particles.getProp<vorticity>(key)[z] = particles.getProp<old_vort>(key)[z] + dt * particles.getProp<rhs_part>(key)[z];
597 
598  ++it;
599  }
600 
601  auto it2 = particles.getDomainIterator();
602 
603  while (it2.isNext())
604  {
605  auto key = it2.get();
606 
607  particles.getPos(key)[x] = particles.getProp<old_pos>(key)[x] + dt * particles.getProp<p_vel>(key)[x];
608  particles.getPos(key)[y] = particles.getProp<old_pos>(key)[y] + dt * particles.getProp<p_vel>(key)[y];
609  particles.getPos(key)[z] = particles.getProp<old_pos>(key)[z] + dt * particles.getProp<p_vel>(key)[z];
610 
611  ++it2;
612  }
613 
614  particles.map();
615 }
616 
617 template<typename grid, typename vector> void do_step(grid_type_s & psi,
618  grid_type & phi_v,
620  vector & particles,
621  grid & g_vort,
622  grid & g_vel,
623  grid & g_dvort,
624  Box<3,float> & domain,
627  petsc_solver<double> & solver)
628 {
629  constexpr int rhs = 0;
630 
631  set_zero<vorticity>(g_vort);
632  inte.template p2m<vorticity,vorticity>(particles,g_vort);
633 
634  g_vort.template ghost_put<add_,vorticity>();
635 
636  // Calculate velocity from vorticity
637  comp_vel(psi,phi_v,fd,domain,g_vort,g_vel,phi_s,solver);
638  calc_rhs(g_vort,g_vel,g_dvort);
639 
640  g_dvort.template ghost_get<rhs>();
641  g_vel.template ghost_get<velocity>();
642  set_zero<rhs_part>(particles);
643  set_zero<p_vel>(particles);
644  inte.template m2p<rhs,rhs_part>(g_dvort,particles);
645  inte.template m2p<velocity,p_vel>(g_vel,particles);
646 
648 }
649 
650 template<typename vector, typename grid> void check_point_and_save(vector & particles,
651  grid & g_vort,
652  grid & g_vel,
653  grid & g_dvort,
654  size_t i)
655 {
656  Vcluster & v_cl = create_vcluster();
657 
658  if (v_cl.getProcessingUnits() < 24)
659  {
660  particles.write("part_out_" + std::to_string(i),VTK_WRITER | FORMAT_BINARY);
661  g_vort.template ghost_get<vorticity>();
662  g_vel.template ghost_get<velocity>();
663  g_vel.write_frame("grid_velocity",i, VTK_WRITER | FORMAT_BINARY);
664  g_vort.write_frame("grid_vorticity",i, VTK_WRITER | FORMAT_BINARY);
665  }
666 
667  // In order to reduce the size of the saved data we apply a threshold.
668  // We only save particles with vorticity higher than 0.1
669  particles_type_s part_save(particles.getDecomposition(),0);
670 
671  auto it_s = particles.getDomainIterator();
672 
673  while (it_s.isNext())
674  {
675  auto p = it_s.get();
676 
677  float vort_magn = sqrt(particles.template getProp<vorticity>(p)[0] * particles.template getProp<vorticity>(p)[0] +
678  particles.template getProp<vorticity>(p)[1] * particles.template getProp<vorticity>(p)[1] +
679  particles.template getProp<vorticity>(p)[2] * particles.template getProp<vorticity>(p)[2]);
680 
681  if (vort_magn < 0.1)
682  {
683  ++it_s;
684  continue;
685  }
686 
687  part_save.add();
688 
689  part_save.getLastPos()[0] = particles.getPos(p)[0];
690  part_save.getLastPos()[1] = particles.getPos(p)[1];
691  part_save.getLastPos()[2] = particles.getPos(p)[2];
692 
693  part_save.template getLastProp<0>() = vort_magn;
694 
695  ++it_s;
696  }
697 
698  part_save.map();
699 
700  particles.save("check_point");
701 }
702 
703 int main(int argc, char* argv[])
704 {
705  // Initialize
706  openfpm_init(&argc,&argv);
707  {
708  // Domain, a rectangle
709  // For the grid 1600x400x400 use
710  // Box<3,float> domain({0.0,0.0,0.0},{22.0,5.57,5.57});
711  Box<3,float> domain({0.0,0.0,0.0},{3.57,3.57,3.57});
712 
713  // Ghost (Not important in this case but required)
714  Ghost<3,long int> g(2);
715 
716  // Grid points on x=128 y=64 z=64
717  // if we use Re = 7500
718  // long int sz[] = {1600,400,400};
719  long int sz[] = {128,128,128};
720  size_t szu[] = {(size_t)sz[0],(size_t)sz[1],(size_t)sz[2]};
721 
722  periodicity<3> bc = {{PERIODIC,PERIODIC,PERIODIC}};
723 
725 
726  grid_type g_vort(szu,domain,g,bc);
727  grid_type g_vel(g_vort.getDecomposition(),szu,g);
728  grid_type g_dvort(g_vort.getDecomposition(),szu,g);
729  particles_type particles(g_vort.getDecomposition(),0);
730 
731  // Construct an FDScheme is heavy so we construct it here
732  // We also construct distributed temporal grids here because
733  // they are expensive
734 
735  // Here we create temporal distributed grid, create a distributed grid is expensive we do it once outside
736  // And the vectorial phi_v
737  grid_type_s psi(g_vort.getDecomposition(),g_vort.getGridInfo().getSize(),g);
738  grid_type phi_v(g_vort.getDecomposition(),g_vort.getGridInfo().getSize(),g);
739 
740  // In order to create a matrix that represent the poisson equation we have to indicate
741  // we have to indicate the maximum extension of the stencil and we we need an extra layer
742  // of points in case we have to impose particular boundary conditions.
743  Ghost<3,long int> stencil_max(2);
744 
745  // We define a field phi_f
746  typedef Field<phi,poisson_nn_helm> phi_f;
747 
748  // We assemble the poisson equation doing the
749  // poisson of the Laplacian of phi using Laplacian
750  // central scheme (where the both derivative use central scheme, not
751  // forward composed backward like usually)
753 
754  FDScheme<poisson_nn_helm> fd(stencil_max, domain, psi);
755 
756  fd.template impose_dit<0>(poisson(),psi,psi.getDomainIterator());
757 
759 
760  // It store the solution to compute velocity
761  // It is used as initial guess every time we call the solver
762  Vector<double,PETSC_BASE> phi_s[3];
764 
765  // Parallel object
766  Vcluster & v_cl = create_vcluster();
767 
768  // print out the spacing of the grid
769  if (v_cl.getProcessUnitID() == 0)
770  {std::cout << "SPACING " << g_vort.spacing(0) << " " << g_vort.spacing(1) << " " << g_vort.spacing(2) << std::endl;}
771 
772  // initialize the ring step 1
773  init_ring(g_vort,domain);
774 
775  x_.resize(g_vort.size(),g_vort.getLocalDomainSize());
776  x_.setZero();
777 
778  // Create a PETSC solver to get the solution x
779  petsc_solver<double> solver;
780 
781  // Do the helmotz projection step 2
782  helmotz_hodge_projection(psi,fd,g_vort,domain,solver,x_,true);
783 
784  // initialize the particle on a mesh step 3
785  remesh(particles,g_vort,domain);
786 
787  // Get the total number of particles
788  size_t tot_part = particles.size_local();
789  v_cl.sum(tot_part);
790  v_cl.execute();
791 
792  // Now we set the size of phi_s
793  phi_s[0].resize(g_vort.size(),g_vort.getLocalDomainSize());
794  phi_s[1].resize(g_vort.size(),g_vort.getLocalDomainSize());
795  phi_s[2].resize(g_vort.size(),g_vort.getLocalDomainSize());
796 
797  // And we initially set it to zero
798  phi_s[0].setZero();
799  phi_s[1].setZero();
800  phi_s[2].setZero();
801 
802  // We create the interpolation object outside the loop cycles
803  // to avoid to recreate it at every cycle. Internally this object
804  // create some data-structure to optimize the conversion particle
805  // position to sub-domain. If there are no re-balancing it is safe
806  // to reuse-it
808 
809  // With more than 24 core we rely on the HDF5 checkpoint restart file
810  if (v_cl.getProcessingUnits() < 24)
811  {g_vort.write("grid_vorticity_init", VTK_WRITER | FORMAT_BINARY);}
812 
813 
814  // Before entering the loop we check if we want to restart from
815  // a previous simulation
816  size_t i = 0;
817 
818  // if we have an argument
819  if (argc > 1)
820  {
821  // take that argument
822  std::string restart(argv[1]);
823 
824  // convert it into number
825  i = std::stoi(restart);
826 
827  // load the file
828  particles.load("check_point_" + std::to_string(i));
829 
830  // Print to inform that we are restarting from a
831  // particular position
832  if (v_cl.getProcessUnitID() == 0)
833  {std::cout << "Restarting from " << i << std::endl;}
834  }
835 
836  // Time Integration
837  for ( ; i < nsteps ; i++)
838  {
839  // do step 4-5-6-7
840  do_step(psi,phi_v,fd,particles,g_vort,g_vel,g_dvort,domain,inte,phi_s,solver);
841 
842  // do step 8
843  rk_step1(particles);
844 
845  // do step 9-10-11-12
846  do_step(psi,phi_v,fd,particles,g_vort,g_vel,g_dvort,domain,inte,phi_s,solver);
847 
848  // do step 13
849  rk_step2(particles);
850 
851  // so step 14
852  set_zero<vorticity>(g_vort);
853  inte.template p2m<vorticity,vorticity>(particles,g_vort);
854  g_vort.template ghost_put<add_,vorticity>();
855 
856  // helmotz-hodge projection
857  helmotz_hodge_projection(psi,fd,g_vort,domain,solver,x_,false);
858 
859  remesh(particles,g_vort,domain);
860 
861  // print the step number
862  if (v_cl.getProcessUnitID() == 0)
863  {std::cout << "Step " << i << std::endl;}
864 
865  // every 100 steps write the output
866  if (i % 100 == 0) {check_point_and_save(particles,g_vort,g_vel,g_dvort,i);}
867 
868  }
869  }
870 
871  openfpm_finalize();
872 }
873 
void sum(T &num)
Sum the numbers across all processors and get the result.
auto get(const grid_dist_key_dx< dim > &v1) const -> typename std::add_lvalue_reference< decltype(loc_grid.get(v1.getSub()).template get< p >(v1.getKey()))>::type
Get the reference of the selected element.
grid_dist_iterator< dim, device_grid, FREE > getDomainIterator() const
It return an iterator that span the full grid domain (each processor span its local domain) ...
void setPreconditioner(PCType type)
Set the preconditioner of the linear solver.
Transform the boost::fusion::vector into memory specification (memory_traits)
Definition: memory_conf.hpp:93
auto getProp(vect_dist_key_dx vec_key) -> decltype(v_prp.template get< id >(vec_key.getKey()))
Get the property of an element.
T getLow(int i) const
get the i-coordinate of the low bound interval of the box
Definition: Box.hpp:479
size_t getProcessUnitID()
Get the process unit id.
void execute()
Execute all the requests.
const grid_sm< dim, T > & getGridInfo() const
Get an object containing the grid informations.
Sparse Matrix implementation stub object when OpenFPM is compiled with no linear algebra support...
Definition: Vector.hpp:39
Definition: eq.hpp:81
static const unsigned int dims
Number of dimansions of the problem.
auto getPos(vect_dist_key_dx vec_key) -> decltype(v_pos.template get< 0 >(vec_key.getKey()))
Get the position of an element.
void setMaxIter(PetscInt n)
Set the maximum number of iteration for Krylov solvers.
Class that distribute sub-sub-domains across processors using an hilbert curve to divide the space...
In case T does not match the PETSC precision compilation create a stub structure. ...
SparseMatrix< double, int, PETSC_BASE > SparseMatrix_type
Sparse matrix used to sove the linear system (we use PETSC)
T getHigh(int i) const
get the high interval of the box
Definition: Box.hpp:490
Meta-function to return a compatible zero-element.
It contain statistic of the error of the calculated solution.
void setPreconditionerAMG_cycleType(const std::string &cycle_type, int sweep_up=-1, int sweep_dw=-1, int sweep_crs=-1)
It set the type of cycle and optionally the number of sweep.
float stype
type of the spatial coordinates
void setAbsTol(PetscReal abstol_)
Set the absolute tolerance as stop criteria.
Vector< double, PETSC_BASE > solve(SparseMatrix< double, int, PETSC_BASE > &A, const Vector< double, PETSC_BASE > &b, bool initial_guess=false)
Here we invert the matrix and solve the system.
void max(T &num)
Get the maximum number across all processors (or reduction with infinity norm)
grid_dist_id_iterator_dec< Decomposition > getGridIterator(const size_t(&sz)[dim])
Sys_eqs::SparseMatrix_type & getA()
produce the Matrix
Definition: FDScheme.hpp:950
This class allocate, and destroy CPU memory.
Definition: HeapMemory.hpp:39
Definition: Ghost.hpp:39
Vector< double, PETSC_BASE > Vector_type
Vector to solve the system (PETSC)
Implementation of VCluster class.
Definition: VCluster.hpp:36
void setPreconditionerAMG_maxit(int nit)
Set the maximum number of V or W cycle for algebraic-multi-grid.
This class decompose a space into sub-sub-domains and distribute them across processors.
Sparse Matrix implementation.
This is a distributed grid.
void new_b()
In case we want to impose a new b re-using FDScheme we have to call This function.
Definition: FDScheme.hpp:857
void resize(size_t row, size_t row_n)
stub resize
Definition: Vector.hpp:97
static const unsigned int nvar
We have only one scalar unknown.
grid_type_s b_grid
grid that store
const size_t(& getSize() const)[N]
Return the size of the grid as an array.
Definition: grid_sm.hpp:677
void setPreconditionerAMG_nl(int nl)
Set the number of levels for the algebraic-multigrid preconditioner.
This class is able to do Matrix inversion in parallel with PETSC solvers.
void clear()
remove all the elements
void setSolver(KSPType type)
Set the Petsc solver.
void start()
Start the timer.
Definition: timer.hpp:73
Laplacian second order on h (spacing)
Definition: Laplacian.hpp:22
void map(size_t opt=NONE)
It move all the particles that does not belong to the local processor to the respective processor...
void add()
Add local particle.
This class is a trick to indicate the compiler a specific specialization pattern. ...
Definition: memory_c.hpp:201
void ghost_get()
It synchronize the ghost parts.
grid_dist_iterator< dim, device_grid, FIXED > getDomainGhostIterator() const
It return an iterator that span the grid domain + ghost part.
vector_dist_iterator getDomainIterator() const
Get an iterator that traverse the particles in the domain.
Distributed vector.
Sys_eqs::Vector_type & getB()
produce the B vector
Definition: FDScheme.hpp:968
void setPreconditionerAMG_coarsen(const std::string &type)
Set the coarsening method for the algebraic-multi-grid preconditioner.
void setPreconditionerAMG_relax(const std::string &type, int k=REL_ALL)
Set the relaxation method for the algebraic-multi-grid preconditioner.
vect_dist_key_dx get()
Get the actual key.
aggregate of properties, from a list of object if create a struct that follow the OPENFPM native stru...
Definition: aggregate.hpp:81
auto getLastPos() -> decltype(v_pos.template get< 0 >(0))
Get the position of the last element.
static const bool boundary[]
specify the boundary conditions
Finite Differences.
Definition: FDScheme.hpp:124
solError get_residual_error(SparseMatrix< double, int, PETSC_BASE > &A, const Vector< double, PETSC_BASE > &x, const Vector< double, PETSC_BASE > &b)
It return the resiual error.
size_t getProcessingUnits()
Get the total number of processors.
Class for cpu time benchmarking.
Definition: timer.hpp:25
void setPreconditionerAMG_coarsenNodalType(int norm)
Set the block coarsening norm type.
Main class for interpolation Particle to mest p2m and Mesh to particle m2p.
St spacing(size_t i) const
Get the spacing of the grid in direction i.
void stop()
Stop the timer.
Definition: timer.hpp:97
PetscReal err_inf
infinity norm of the error