OpenFPM  5.2.0
Project that contain the implementation of distributed structures
main_vic_petsc.cpp
Go to the documentation of this file.
1 
115 #include "config.h"
116 
117 #ifdef HAVE_PETSC
118 
119 //#define SE_CLASS1
120 //#define PRINT_STACKTRACE
121 
123 
124 #include "interpolation/interpolation.hpp"
125 #include "Grid/grid_dist_id.hpp"
126 #include "Vector/vector_dist.hpp"
127 #include "Matrix/SparseMatrix.hpp"
128 #include "Vector/Vector.hpp"
129 #include "FiniteDifference/FDScheme.hpp"
130 #include "Solvers/petsc_solver.hpp"
131 #include "interpolation/mp4_kernel.hpp"
132 #include "Solvers/petsc_solver_AMG_report.hpp"
133 
134 constexpr int x = 0;
135 constexpr int y = 1;
136 constexpr int z = 2;
137 
138 // The type of the grids
140 
141 // The type of the particles
143 
144 // radius of the torus
145 float ringr1 = 1.0;
146 // radius of the core of the torus
147 float sigma = 1.0/3.523;
148 // Reynold number (If you want to use 7500.0 you have to use a grid 1600x400x400)
149 //float tgtre = 7500.0;
150 float tgtre = 3000.0;
151 
152 // Kinematic viscosity
153 float nu = 1.0/tgtre;
154 
155 // Time step
156 // float dt = 0.0025 for Re 7500
157 float dt = 0.0125;
158 
159 #ifdef TEST_RUN
160 const unsigned int nsteps = 10;
161 #else
162 const unsigned int nsteps = 10001;
163 #endif
164 
165 // All the properties by index
166 constexpr unsigned int vorticity = 0;
167 constexpr unsigned int velocity = 0;
168 constexpr unsigned int p_vel = 1;
169 constexpr int rhs_part = 2;
170 constexpr unsigned int old_vort = 3;
171 constexpr unsigned int old_pos = 4;
172 
174 
175 template<typename grid> void calc_and_print_max_div_and_int(grid & g_vort)
176 {
178 
179  g_vort.template ghost_get<vorticity>();
180  auto it5 = g_vort.getDomainIterator();
181 
182  double max_vort = 0.0;
183 
184  double int_vort[3] = {0.0,0.0,0.0};
185 
186  while (it5.isNext())
187  {
188  auto key = it5.get();
189 
190  double tmp;
191 
192  tmp = 1.0f/g_vort.spacing(x)/2.0f*(g_vort.template get<vorticity>(key.move(x,1))[x] - g_vort.template get<vorticity>(key.move(x,-1))[x] ) +
193  1.0f/g_vort.spacing(y)/2.0f*(g_vort.template get<vorticity>(key.move(y,1))[y] - g_vort.template get<vorticity>(key.move(y,-1))[y] ) +
194  1.0f/g_vort.spacing(z)/2.0f*(g_vort.template get<vorticity>(key.move(z,1))[z] - g_vort.template get<vorticity>(key.move(z,-1))[z] );
195 
196  int_vort[x] += g_vort.template get<vorticity>(key)[x];
197  int_vort[y] += g_vort.template get<vorticity>(key)[y];
198  int_vort[z] += g_vort.template get<vorticity>(key)[z];
199 
200  if (tmp > max_vort)
201  max_vort = tmp;
202 
203  ++it5;
204  }
205 
206  Vcluster<> & v_cl = create_vcluster();
207  v_cl.max(max_vort);
208  v_cl.sum(int_vort[0]);
209  v_cl.sum(int_vort[1]);
210  v_cl.sum(int_vort[2]);
211  v_cl.execute();
212 
213  if (v_cl.getProcessUnitID() == 0)
214  {std::cout << "Max div for vorticity " << max_vort << " Integral: " << int_vort[0] << " " << int_vort[1] << " " << int_vort[2] << std::endl;}
215 
217 }
218 
243 
244 /*
245  * gr is the grid where we are initializing the vortex ring
246  * domain is the simulation domain
247  *
248  */
249 void init_ring(grid_type & gr, const Box<3,float> & domain)
250 {
251  // To add some noise to the vortex ring we create two random
252  // vector
253  constexpr int nk = 32;
254 
255  float ak[nk];
256  float bk[nk];
257 
258  for (size_t i = 0 ; i < nk ; i++)
259  {
260  ak[i] = rand()/RAND_MAX;
261  bk[i] = rand()/RAND_MAX;
262  }
263 
264  // We calculate the circuitation gamma
265  float gamma = nu * tgtre;
266  float rinv2 = 1.0f/(sigma*sigma);
267  float max_vorticity = gamma*rinv2/M_PI;
268 
269  // We go through the grid to initialize the vortex
270  auto it = gr.getDomainIterator();
271 
272  while (it.isNext())
273  {
274  auto key_d = it.get();
275  auto key = it.getGKey(key_d);
276 
277  float tx = (key.get(x)-2)*gr.spacing(x) + domain.getLow(x);
278  float ty = (key.get(y)-2)*gr.spacing(y) + domain.getLow(y);
279  float tz = (key.get(z)-2)*gr.spacing(z) + domain.getLow(z);
280  float theta1 = atan2((ty-domain.getHigh(1)/2.0f),(tz-domain.getHigh(2)/2.0f));
281 
282 
283  float rad1r = sqrt((ty-domain.getHigh(1)/2.0f)*(ty-domain.getHigh(1)/2.0f) + (tz-domain.getHigh(2)/2.0f)*(tz-domain.getHigh(2)/2.0f)) - ringr1;
284  float rad1t = tx - 1.0f;
285  float rad1sq = rad1r*rad1r + rad1t*rad1t;
286  float radstr = -exp(-rad1sq*rinv2)*rinv2*gamma/M_PI;
287  gr.template get<vorticity>(key_d)[x] = 0.0f;
288  gr.template get<vorticity>(key_d)[y] = -radstr * cos(theta1);
289  gr.template get<vorticity>(key_d)[z] = radstr * sin(theta1);
290 
291  // kill the axis term
292 
293  float rad1r_ = sqrt((ty-domain.getHigh(1)/2.0f)*(ty-domain.getHigh(1)/2.0f) + (tz-domain.getHigh(2)/2.0f)*(tz-domain.getHigh(2)/2.0f)) + ringr1;
294  float rad1sqTILDA = rad1sq*rinv2;
295  radstr = exp(-rad1sq*rinv2)*rinv2*gamma/M_PI;
296  gr.template get<vorticity>(key_d)[x] = 0.0f;
297  gr.template get<vorticity>(key_d)[y] = -radstr * cos(theta1);
298  gr.template get<vorticity>(key_d)[z] = radstr * sin(theta1);
299 
300  ++it;
301  }
302 }
303 
305 
307 
308 // Specification of the poisson equation for the helmotz-hodge projection
309 // 3D (dims = 3). The field is a scalar value (nvar = 1), bournary are periodic
310 // type of the the space is float. The grid type that store \psi
311 // The others indicate which kind of Matrix to use to construct the linear system and
312 // which kind of vector to construct for the right hand side. Here we use a PETSC Sparse Matrix
313 // and PETSC vector. NORMAL_GRID indicate that is a standard grid (non-staggered)
314 struct poisson_nn_helm
315 {
317  static const unsigned int dims = 3;
319  static const unsigned int nvar = 1;
321  static const bool boundary[];
323  typedef float stype;
331  static const int grid_type = NORMAL_GRID;
332 };
333 
335 const bool poisson_nn_helm::boundary[] = {PERIODIC,PERIODIC,PERIODIC};
336 
338 
339 
448 /*
449  * gr vorticity grid where we apply the correction
450  * domain simulation domain
451  *
452  */
453 void helmotz_hodge_projection(grid_type & gr, const Box<3,float> & domain, petsc_solver<double> & solver, petsc_solver<double>::return_type & x_ , bool init)
454 {
456 
458 
459  // Convenient constant
460  constexpr unsigned int phi = 0;
461 
462  // We define a field phi_f
463  typedef Field<phi,poisson_nn_helm> phi_f;
464 
465  // We assemble the poisson equation doing the
466  // poisson of the Laplacian of phi using Laplacian
467  // central scheme (where the both derivative use central scheme, not
468  // forward composed backward like usually)
470 
472 
474 
475  // ghost get
476  gr.template ghost_get<vorticity>();
477 
478  // ghost size of the psi function
479  Ghost<3,long int> g(2);
480 
481  // Here we create a distributed grid to store the result of the helmotz projection
483 
484  // Calculate the divergence of the vortex field
485  auto it = gr.getDomainIterator();
486 
487  while (it.isNext())
488  {
489  auto key = it.get();
490 
491  psi.template get<phi>(key) = 1.0f/gr.spacing(x)/2.0f*(gr.template get<vorticity>(key.move(x,1))[x] - gr.template get<vorticity>(key.move(x,-1))[x] ) +
492  1.0f/gr.spacing(y)/2.0f*(gr.template get<vorticity>(key.move(y,1))[y] - gr.template get<vorticity>(key.move(y,-1))[y] ) +
493  1.0f/gr.spacing(z)/2.0f*(gr.template get<vorticity>(key.move(z,1))[z] - gr.template get<vorticity>(key.move(z,-1))[z] );
494 
495  ++it;
496  }
497 
499 
500 
501  calc_and_print_max_div_and_int(gr);
502 
503 
505 
506  // In order to create a matrix that represent the poisson equation we have to indicate
507  // we have to indicate the maximum extension of the stencil and we we need an extra layer
508  // of points in case we have to impose particular boundary conditions.
509  Ghost<3,long int> stencil_max(2);
510 
511  // Here we define our Finite difference disctetization scheme object
512  FDScheme<poisson_nn_helm> fd(stencil_max, domain, psi);
513 
514  // impose the poisson equation, using right hand side psi on the full domain (psi.getDomainIterator)
515  // the template paramereter instead define which property of phi carry the righ-hand-side
516  // in this case phi has only one property, so the property 0 carry the right hand side
517  fd.template impose_dit<0>(poisson(),psi,psi.getDomainIterator());
518 
520 
522 
523  timer tm_solve;
524  if (init == true)
525  {
526  // Set the conjugate-gradient as method to solve the linear solver
527  solver.setSolver(KSPBCGS);
528 
529  // Set the absolute tolerance to determine that the method is converged
530  solver.setAbsTol(0.0001);
531 
532  // Set the maximum number of iterations
533  solver.setMaxIter(500);
534 
535 #ifdef USE_MULTIGRID
536 
537  solver.setPreconditioner(PCHYPRE_BOOMERAMG);
538  solver.setPreconditionerAMG_nl(6);
539  solver.setPreconditionerAMG_maxit(1);
540  solver.setPreconditionerAMG_relax("SOR/Jacobi");
541  solver.setPreconditionerAMG_cycleType("V",0,4);
543  solver.setPreconditionerAMG_coarsen("HMIS");
544 
545 
546 #endif
547 
548  tm_solve.start();
549 
550  // Give to the solver A and b, return x, the solution
551  solver.solve(fd.getA(),x_,fd.getB());
552 
553  tm_solve.stop();
554  }
555  else
556  {
557  tm_solve.start();
558  solver.solve(x_,fd.getB());
559  tm_solve.stop();
560  }
561 
562  // copy the solution x to the grid psi
563  fd.template copy<phi>(x_,psi);
564 
566 
568 
569  psi.template ghost_get<phi>();
570 
571  // Correct the vorticity to make it divergence free
572 
573  auto it2 = gr.getDomainIterator();
574 
575  while (it2.isNext())
576  {
577  auto key = it2.get();
578 
579  gr.template get<vorticity>(key)[x] -= 1.0f/2.0f/psi.spacing(x)*(psi.template get<phi>(key.move(x,1))-psi.template get<phi>(key.move(x,-1)));
580  gr.template get<vorticity>(key)[y] -= 1.0f/2.0f/psi.spacing(y)*(psi.template get<phi>(key.move(y,1))-psi.template get<phi>(key.move(y,-1)));
581  gr.template get<vorticity>(key)[z] -= 1.0f/2.0f/psi.spacing(z)*(psi.template get<phi>(key.move(z,1))-psi.template get<phi>(key.move(z,-1)));
582 
583  ++it2;
584  }
585 
587 
588  calc_and_print_max_div_and_int(gr);
589 }
590 
591 
608 
609 void remesh(particles_type & vd, grid_type & gr,Box<3,float> & domain)
610 {
611  // Remove all particles because we reinitialize in a grid like position
612  vd.clear();
613 
614  // Get a grid iterator
615  auto git = vd.getGridIterator(gr.getGridInfo().getSize());
616 
617  // For each grid point
618  while (git.isNext())
619  {
620  // Get the grid point in global coordinates (key). p is in local coordinates
621  auto p = git.get();
622  auto key = git.get_dist();
623 
624  // Add the particle
625  vd.add();
626 
627  // Assign the position to the particle in a grid like way
628  vd.getLastPos()[x] = gr.spacing(x)*p.get(x) + domain.getLow(x);
629  vd.getLastPos()[y] = gr.spacing(y)*p.get(y) + domain.getLow(y);
630  vd.getLastPos()[z] = gr.spacing(z)*p.get(z) + domain.getLow(z);
631 
632  // Initialize the vorticity
633  vd.template getLastProp<vorticity>()[x] = gr.template get<vorticity>(key)[x];
634  vd.template getLastProp<vorticity>()[y] = gr.template get<vorticity>(key)[y];
635  vd.template getLastProp<vorticity>()[z] = gr.template get<vorticity>(key)[z];
636 
637  // next grid point
638  ++git;
639  }
640 
641  // redistribute the particles
642  vd.map();
643 }
644 
646 
647 
700 void comp_vel(Box<3,float> & domain, grid_type & g_vort,grid_type & g_vel, petsc_solver<double>::return_type (& phi_s)[3],petsc_solver<double> & solver)
701 {
703 
704  // Convenient constant
705  constexpr unsigned int phi = 0;
706 
707  // We define a field phi_f
708  typedef Field<phi,poisson_nn_helm> phi_f;
709 
710  // We assemble the poisson equation doing the
711  // poisson of the Laplacian of phi using Laplacian
712  // central scheme (where the both derivative use central scheme, not
713  // forward composed backward like usually)
715 
717 
718  // Maximum size of the stencil
719  Ghost<3,long int> g(2);
720 
721  // Here we create a distributed grid to store the result
724 
725  // We calculate and print the maximum divergence of the vorticity
726  // and the integral of it
727  calc_and_print_max_div_and_int(g_vort);
728 
729  // For each component solve a poisson
730  for (size_t i = 0 ; i < 3 ; i++)
731  {
733 
734  // Copy the vorticity component i in gr_ps
735  auto it2 = gr_ps.getDomainIterator();
736 
737  // calculate the velocity from the curl of phi
738  while (it2.isNext())
739  {
740  auto key = it2.get();
741 
742  // copy
743  gr_ps.get<vorticity>(key) = g_vort.template get<vorticity>(key)[i];
744 
745  ++it2;
746  }
747 
748  // Maximum size of the stencil
749  Ghost<3,long int> stencil_max(2);
750 
751  // Finite difference scheme
752  FDScheme<poisson_nn_helm> fd(stencil_max, domain, gr_ps);
753 
754  // impose the poisson equation using gr_ps = vorticity for the right-hand-side (on the full domain)
755  fd.template impose_dit<phi>(poisson(),gr_ps,gr_ps.getDomainIterator());
756 
757  solver.setAbsTol(0.01);
758 
759  // Get the vector that represent the right-hand-side
760  Vector<double,PETSC_BASE> & b = fd.getB();
761 
762  timer tm_solve;
763  tm_solve.start();
764 
765  // Give to the solver A and b in this case we are giving
766  // an intitial guess phi_s calculated in the previous
767  // time step
768  solver.solve(phi_s[i],b);
769 
770  tm_solve.stop();
771 
772  // Calculate the residual
773 
774  solError serr;
775  serr = solver.get_residual_error(phi_s[i],b);
776 
777  Vcluster<> & v_cl = create_vcluster();
778  if (v_cl.getProcessUnitID() == 0)
779  {std::cout << "Solved component " << i << " Error: " << serr.err_inf << std::endl;}
780 
781  // copy the solution to grid
782  fd.template copy<phi>(phi_s[i],gr_ps);
783 
785 
787 
788  auto it3 = gr_ps.getDomainIterator();
789 
790  // calculate the velocity from the curl of phi
791  while (it3.isNext())
792  {
793  auto key = it3.get();
794 
795  phi_v.get<velocity>(key)[i] = gr_ps.get<phi>(key);
796 
797  ++it3;
798  }
799 
801  }
802 
804 
805  phi_v.ghost_get<phi>();
806 
807  float inv_dx = 0.5f / g_vort.spacing(0);
808  float inv_dy = 0.5f / g_vort.spacing(1);
809  float inv_dz = 0.5f / g_vort.spacing(2);
810 
811  auto it3 = phi_v.getDomainIterator();
812 
813  // calculate the velocity from the curl of phi
814  while (it3.isNext())
815  {
816  auto key = it3.get();
817 
818  float phi_xy = (phi_v.get<phi>(key.move(y,1))[x] - phi_v.get<phi>(key.move(y,-1))[x])*inv_dy;
819  float phi_xz = (phi_v.get<phi>(key.move(z,1))[x] - phi_v.get<phi>(key.move(z,-1))[x])*inv_dz;
820  float phi_yx = (phi_v.get<phi>(key.move(x,1))[y] - phi_v.get<phi>(key.move(x,-1))[y])*inv_dx;
821  float phi_yz = (phi_v.get<phi>(key.move(z,1))[y] - phi_v.get<phi>(key.move(z,-1))[y])*inv_dz;
822  float phi_zx = (phi_v.get<phi>(key.move(x,1))[z] - phi_v.get<phi>(key.move(x,-1))[z])*inv_dx;
823  float phi_zy = (phi_v.get<phi>(key.move(y,1))[z] - phi_v.get<phi>(key.move(y,-1))[z])*inv_dy;
824 
825  g_vel.template get<velocity>(key)[x] = phi_zy - phi_yz;
826  g_vel.template get<velocity>(key)[y] = phi_xz - phi_zx;
827  g_vel.template get<velocity>(key)[z] = phi_yx - phi_xy;
828 
829  ++it3;
830  }
831 
833 }
834 
835 
836 template<unsigned int prp> void set_zero(grid_type & gr)
837 {
838  auto it = gr.getDomainGhostIterator();
839 
840  // calculate the velocity from the curl of phi
841  while (it.isNext())
842  {
843  auto key = it.get();
844 
845  gr.template get<prp>(key)[0] = 0.0;
846  gr.template get<prp>(key)[1] = 0.0;
847  gr.template get<prp>(key)[2] = 0.0;
848 
849  ++it;
850  }
851 }
852 
853 
854 template<unsigned int prp> void set_zero(particles_type & vd)
855 {
856  auto it = vd.getDomainIterator();
857 
858  // calculate the velocity from the curl of phi
859  while (it.isNext())
860  {
861  auto key = it.get();
862 
863  vd.template getProp<prp>(key)[0] = 0.0;
864  vd.template getProp<prp>(key)[1] = 0.0;
865  vd.template getProp<prp>(key)[2] = 0.0;
866 
867  ++it;
868  }
869 }
870 
885 
886 // Calculate the right hand side of the vorticity formulation
887 template<typename grid> void calc_rhs(grid & g_vort, grid & g_vel, grid & g_dwp)
888 {
889  // usefull constant
890  constexpr int rhs = 0;
891 
892  // calculate several pre-factors for the stencil finite
893  // difference
894  float fac1 = 2.0f*nu/(g_vort.spacing(0)*g_vort.spacing(0));
895  float fac2 = 2.0f*nu/(g_vort.spacing(1)*g_vort.spacing(1));
896  float fac3 = 2.0f*nu/(g_vort.spacing(2)*g_vort.spacing(2));
897 
898  float fac4 = 0.5f/(g_vort.spacing(0));
899  float fac5 = 0.5f/(g_vort.spacing(1));
900  float fac6 = 0.5f/(g_vort.spacing(2));
901 
902  auto it = g_dwp.getDomainIterator();
903 
904  g_vort.template ghost_get<vorticity>();
905 
906  while (it.isNext())
907  {
908  auto key = it.get();
909 
910  g_dwp.template get<rhs>(key)[x] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[x]+g_vort.template get<vorticity>(key.move(x,-1))[x])+
911  fac2*(g_vort.template get<vorticity>(key.move(y,1))[x]+g_vort.template get<vorticity>(key.move(y,-1))[x])+
912  fac3*(g_vort.template get<vorticity>(key.move(z,1))[x]+g_vort.template get<vorticity>(key.move(z,-1))[x])-
913  2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[x]+
914  fac4*g_vort.template get<vorticity>(key)[x]*
915  (g_vel.template get<velocity>(key.move(x,1))[x] - g_vel.template get<velocity>(key.move(x,-1))[x])+
916  fac5*g_vort.template get<vorticity>(key)[y]*
917  (g_vel.template get<velocity>(key.move(y,1))[x] - g_vel.template get<velocity>(key.move(y,-1))[x])+
918  fac6*g_vort.template get<vorticity>(key)[z]*
919  (g_vel.template get<velocity>(key.move(z,1))[x] - g_vel.template get<velocity>(key.move(z,-1))[x]);
920 
921  g_dwp.template get<rhs>(key)[y] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[y]+g_vort.template get<vorticity>(key.move(x,-1))[y])+
922  fac2*(g_vort.template get<vorticity>(key.move(y,1))[y]+g_vort.template get<vorticity>(key.move(y,-1))[y])+
923  fac3*(g_vort.template get<vorticity>(key.move(z,1))[y]+g_vort.template get<vorticity>(key.move(z,-1))[y])-
924  2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[y]+
925  fac4*g_vort.template get<vorticity>(key)[x]*
926  (g_vel.template get<velocity>(key.move(x,1))[y] - g_vel.template get<velocity>(key.move(x,-1))[y])+
927  fac5*g_vort.template get<vorticity>(key)[y]*
928  (g_vel.template get<velocity>(key.move(y,1))[y] - g_vel.template get<velocity>(key.move(y,-1))[y])+
929  fac6*g_vort.template get<vorticity>(key)[z]*
930  (g_vel.template get<velocity>(key.move(z,1))[y] - g_vel.template get<velocity>(key.move(z,-1))[y]);
931 
932 
933  g_dwp.template get<rhs>(key)[z] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[z]+g_vort.template get<vorticity>(key.move(x,-1))[z])+
934  fac2*(g_vort.template get<vorticity>(key.move(y,1))[z]+g_vort.template get<vorticity>(key.move(y,-1))[z])+
935  fac3*(g_vort.template get<vorticity>(key.move(z,1))[z]+g_vort.template get<vorticity>(key.move(z,-1))[z])-
936  2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[z]+
937  fac4*g_vort.template get<vorticity>(key)[x]*
938  (g_vel.template get<velocity>(key.move(x,1))[z] - g_vel.template get<velocity>(key.move(x,-1))[z])+
939  fac5*g_vort.template get<vorticity>(key)[y]*
940  (g_vel.template get<velocity>(key.move(y,1))[z] - g_vel.template get<velocity>(key.move(y,-1))[z])+
941  fac6*g_vort.template get<vorticity>(key)[z]*
942  (g_vel.template get<velocity>(key.move(z,1))[z] - g_vel.template get<velocity>(key.move(z,-1))[z]);
943 
944  ++it;
945  }
946 }
947 
949 
969 
970 void rk_step1(particles_type & particles)
971 {
972  auto it = particles.getDomainIterator();
973 
974  while (it.isNext())
975  {
976  auto key = it.get();
977 
978  // Save the old vorticity
979  particles.getProp<old_vort>(key)[x] = particles.getProp<vorticity>(key)[x];
980  particles.getProp<old_vort>(key)[y] = particles.getProp<vorticity>(key)[y];
981  particles.getProp<old_vort>(key)[z] = particles.getProp<vorticity>(key)[z];
982 
983  particles.getProp<vorticity>(key)[x] = particles.getProp<vorticity>(key)[x] + 0.5f * dt * particles.getProp<rhs_part>(key)[x];
984  particles.getProp<vorticity>(key)[y] = particles.getProp<vorticity>(key)[y] + 0.5f * dt * particles.getProp<rhs_part>(key)[y];
985  particles.getProp<vorticity>(key)[z] = particles.getProp<vorticity>(key)[z] + 0.5f * dt * particles.getProp<rhs_part>(key)[z];
986 
987  ++it;
988  }
989 
990  auto it2 = particles.getDomainIterator();
991 
992  while (it2.isNext())
993  {
994  auto key = it2.get();
995 
996  // Save the old position
997  particles.getProp<old_pos>(key)[x] = particles.getPos(key)[x];
998  particles.getProp<old_pos>(key)[y] = particles.getPos(key)[y];
999  particles.getProp<old_pos>(key)[z] = particles.getPos(key)[z];
1000 
1001  particles.getPos(key)[x] = particles.getPos(key)[x] + 0.5f * dt * particles.getProp<p_vel>(key)[x];
1002  particles.getPos(key)[y] = particles.getPos(key)[y] + 0.5f * dt * particles.getProp<p_vel>(key)[y];
1003  particles.getPos(key)[z] = particles.getPos(key)[z] + 0.5f * dt * particles.getProp<p_vel>(key)[z];
1004 
1005  ++it2;
1006  }
1007 
1008  particles.map();
1009 }
1010 
1012 
1032 
1033 void rk_step2(particles_type & particles)
1034 {
1035  auto it = particles.getDomainIterator();
1036 
1037  while (it.isNext())
1038  {
1039  auto key = it.get();
1040 
1041  particles.getProp<vorticity>(key)[x] = particles.getProp<old_vort>(key)[x] + dt * particles.getProp<rhs_part>(key)[x];
1042  particles.getProp<vorticity>(key)[y] = particles.getProp<old_vort>(key)[y] + dt * particles.getProp<rhs_part>(key)[y];
1043  particles.getProp<vorticity>(key)[z] = particles.getProp<old_vort>(key)[z] + dt * particles.getProp<rhs_part>(key)[z];
1044 
1045  ++it;
1046  }
1047 
1048  auto it2 = particles.getDomainIterator();
1049 
1050  while (it2.isNext())
1051  {
1052  auto key = it2.get();
1053 
1054  particles.getPos(key)[x] = particles.getProp<old_pos>(key)[x] + dt * particles.getProp<p_vel>(key)[x];
1055  particles.getPos(key)[y] = particles.getProp<old_pos>(key)[y] + dt * particles.getProp<p_vel>(key)[y];
1056  particles.getPos(key)[z] = particles.getProp<old_pos>(key)[z] + dt * particles.getProp<p_vel>(key)[z];
1057 
1058  ++it2;
1059  }
1060 
1061  particles.map();
1062 }
1063 
1064 
1065 
1067 
1088 template<typename grid, typename vector> void do_step(vector & particles,
1089  grid & g_vort,
1090  grid & g_vel,
1091  grid & g_dvort,
1092  Box<3,float> & domain,
1094  petsc_solver<double>::return_type (& phi_s)[3],
1095  petsc_solver<double> & solver)
1096 {
1097  constexpr int rhs = 0;
1098 
1100 
1101  set_zero<vorticity>(g_vort);
1102  inte.template p2m<vorticity,vorticity>(particles,g_vort);
1103 
1104  g_vort.template ghost_put<add_,vorticity>();
1105 
1107 
1109 
1110  // Calculate velocity from vorticity
1111  comp_vel(domain,g_vort,g_vel,phi_s,solver);
1112  calc_rhs(g_vort,g_vel,g_dvort);
1113 
1115 
1117 
1118  g_dvort.template ghost_get<rhs>();
1119  g_vel.template ghost_get<velocity>();
1122  inte.template m2p<rhs,rhs_part>(g_dvort,particles);
1123  inte.template m2p<velocity,p_vel>(g_vel,particles);
1124 
1126 }
1127 
1128 template<typename vector, typename grid> void check_point_and_save(vector & particles,
1129  grid & g_vort,
1130  grid & g_vel,
1131  grid & g_dvort,
1132  size_t i)
1133 {
1134  Vcluster<> & v_cl = create_vcluster();
1135 
1136  if (v_cl.getProcessingUnits() < 24)
1137  {
1138  particles.write("part_out_" + std::to_string(i),VTK_WRITER | FORMAT_BINARY);
1139  g_vort.template ghost_get<vorticity>();
1140  g_vel.template ghost_get<velocity>();
1141  g_vel.write_frame("grid_velocity",i, VTK_WRITER | FORMAT_BINARY);
1142  g_vort.write_frame("grid_vorticity",i, VTK_WRITER | FORMAT_BINARY);
1143  }
1144 
1145  // In order to reduce the size of the saved data we apply a threshold.
1146  // We only save particles with vorticity higher than 0.1
1148 
1149  auto it_s = particles.getDomainIterator();
1150 
1151  while (it_s.isNext())
1152  {
1153  auto p = it_s.get();
1154 
1155  float vort_magn = sqrt(particles.template getProp<vorticity>(p)[0] * particles.template getProp<vorticity>(p)[0] +
1156  particles.template getProp<vorticity>(p)[1] * particles.template getProp<vorticity>(p)[1] +
1157  particles.template getProp<vorticity>(p)[2] * particles.template getProp<vorticity>(p)[2]);
1158 
1159  if (vort_magn < 0.1)
1160  {
1161  ++it_s;
1162  continue;
1163  }
1164 
1165  part_save.add();
1166 
1167  part_save.getLastPos()[0] = particles.getPos(p)[0];
1168  part_save.getLastPos()[1] = particles.getPos(p)[1];
1169  part_save.getLastPos()[2] = particles.getPos(p)[2];
1170 
1171  part_save.template getLastProp<0>() = vort_magn;
1172 
1173  ++it_s;
1174  }
1175 
1176  part_save.map();
1177 
1178  // Save and HDF5 file for checkpoint restart
1179  particles.save("check_point");
1180 }
1181 
1200 int main(int argc, char* argv[])
1201 {
1202  // Initialize
1203  openfpm_init(&argc,&argv);
1204  {
1205  // Domain, a rectangle
1206  // For the grid 1600x400x400 use
1207  // Box<3,float> domain({0.0,0.0,0.0},{22.0,5.57,5.57});
1208  Box<3,float> domain({0.0,0.0,0.0},{3.57,3.57,3.57});
1209 
1210  // Ghost (Not important in this case but required)
1211  Ghost<3,long int> g(2);
1212 
1213  // Grid points on x=128 y=64 z=64
1214  // if we use Re = 7500
1215  // long int sz[] = {1600,400,400};
1216  long int sz[] = {96,96,96};
1217  size_t szu[] = {(size_t)sz[0],(size_t)sz[1],(size_t)sz[2]};
1218 
1219  periodicity<3> bc = {{PERIODIC,PERIODIC,PERIODIC}};
1220 
1221  grid_type g_vort(szu,domain,g,bc);
1222  grid_type g_vel(g_vort.getDecomposition(),szu,g);
1223  grid_type g_dvort(g_vort.getDecomposition(),szu,g);
1224  particles_type particles(g_vort.getDecomposition(),0);
1225 
1226  // It store the solution to compute velocity
1227  // It is used as initial guess every time we call the solver
1228  Vector<double,PETSC_BASE> phi_s[3];
1230 
1231  // Parallel object
1232  Vcluster<> & v_cl = create_vcluster();
1233 
1234  // print out the spacing of the grid
1235  if (v_cl.getProcessUnitID() == 0)
1236  {std::cout << "SPACING " << g_vort.spacing(0) << " " << g_vort.spacing(1) << " " << g_vort.spacing(2) << std::endl;}
1237 
1238  // initialize the ring step 1
1239  init_ring(g_vort,domain);
1240 
1241  x_.resize(g_vort.size(),g_vort.getLocalDomainSize());
1242  x_.setZero();
1243 
1244  // Create a PETSC solver to get the solution x
1245  petsc_solver<double> solver;
1246 
1247  // Do the helmotz projection step 2
1248  helmotz_hodge_projection(g_vort,domain,solver,x_,true);
1249 
1250  // initialize the particle on a mesh step 3
1251  remesh(particles,g_vort,domain);
1252 
1253  // Get the total number of particles
1254  size_t tot_part = particles.size_local();
1255  v_cl.sum(tot_part);
1256  v_cl.execute();
1257 
1258  // Now we set the size of phi_s
1259  phi_s[0].resize(g_vort.size(),g_vort.getLocalDomainSize());
1260  phi_s[1].resize(g_vort.size(),g_vort.getLocalDomainSize());
1261  phi_s[2].resize(g_vort.size(),g_vort.getLocalDomainSize());
1262 
1263  // And we initially set it to zero
1264  phi_s[0].setZero();
1265  phi_s[1].setZero();
1266  phi_s[2].setZero();
1267 
1268  // We create the interpolation object outside the loop cycles
1269  // to avoid to recreate it at every cycle. Internally this object
1270  // create some data-structure to optimize the conversion particle
1271  // position to sub-domain. If there are no re-balancing it is safe
1272  // to reuse-it
1274 
1275  // With more than 24 core we rely on the HDF5 checkpoint restart file
1276  if (v_cl.getProcessingUnits() < 24)
1277  {g_vort.write("grid_vorticity_init", VTK_WRITER | FORMAT_BINARY);}
1278 
1279 
1280  // Before entering the loop we check if we want to restart from
1281  // a previous simulation
1282  size_t i = 0;
1283 
1284  // if we have an argument
1285  if (argc > 1)
1286  {
1287  // take that argument
1288  std::string restart(argv[1]);
1289 
1290  // convert it into number
1291  i = std::stoi(restart);
1292 
1293  // load the file
1294  particles.load("check_point_" + std::to_string(i));
1295 
1296  // Print to inform that we are restarting from a
1297  // particular position
1298  if (v_cl.getProcessUnitID() == 0)
1299  {std::cout << "Restarting from " << i << std::endl;}
1300  }
1301 
1302  // Time Integration
1303  for ( ; i < nsteps ; i++)
1304  {
1305  // do step 4-5-6-7
1306  do_step(particles,g_vort,g_vel,g_dvort,domain,inte,phi_s,solver);
1307 
1308  // do step 8
1309  rk_step1(particles);
1310 
1311  // do step 9-10-11-12
1312  do_step(particles,g_vort,g_vel,g_dvort,domain,inte,phi_s,solver);
1313 
1314  // do step 13
1315  rk_step2(particles);
1316 
1317  // so step 14
1318  set_zero<vorticity>(g_vort);
1319  inte.template p2m<vorticity,vorticity>(particles,g_vort);
1320  g_vort.template ghost_put<add_,vorticity>();
1321 
1322  // helmotz-hodge projection
1323  helmotz_hodge_projection(g_vort,domain,solver,x_,false);
1324 
1325  remesh(particles,g_vort,domain);
1326 
1327  // print the step number
1328  if (v_cl.getProcessUnitID() == 0)
1329  {std::cout << "Step " << i << std::endl;}
1330 
1331  // every 100 steps write the output
1332  if (i % 100 == 0) {check_point_and_save(particles,g_vort,g_vel,g_dvort,i);}
1333 
1334  }
1335  }
1336 
1337  openfpm_finalize();
1338 }
1339 
1340 #else
1341 
1342 int main(int argc, char* argv[])
1343 {
1344  return 0;
1345 }
1346 
1347 #endif
Finite Differences.
Definition: FDScheme.hpp:127
Definition: eq.hpp:83
Definition: Ghost.hpp:40
Laplacian second order on h (spacing)
Definition: Laplacian.hpp:23
Sparse Matrix implementation.
void execute()
Execute all the requests.
void sum(T &num)
Sum the numbers across all processors and get the result.
size_t getProcessUnitID()
Get the process unit id.
size_t getProcessingUnits()
Get the total number of processors.
void max(T &num)
Get the maximum number across all processors (or reduction with infinity norm)
Implementation of VCluster class.
Definition: VCluster.hpp:59
Sparse Matrix implementation stub object when OpenFPM is compiled with no linear algebra support.
Definition: Vector.hpp:40
void resize(size_t row, size_t row_n)
stub resize
Definition: Vector.hpp:97
This is a distributed grid.
Decomposition & getDecomposition()
Get the object that store the information about the decomposition.
const grid_sm< dim, T > & getGridInfo() const
Get an object containing the grid informations.
grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_iterator()), FIXED > getDomainGhostIterator() const
It return an iterator that span the grid domain + ghost part.
St spacing(size_t i) const
Get the spacing of the grid in direction i.
grid_dist_iterator< dim, device_grid, decltype(device_grid::type_of_subiterator()), FREE > getDomainIterator() const
It return an iterator that span the full grid domain (each processor span its local domain)
__device__ __host__ const size_t(& getSize() const)[N]
Return the size of the grid as an array.
Definition: grid_sm.hpp:760
Main class for interpolation Particle to mest p2m and Mesh to particle m2p.
This class is able to do Matrix inversion in parallel with PETSC solvers.
Vector< T, PETSC_BASE > solve(SparseMatrix< T, int, PETSC_BASE > &A, const Vector< T, PETSC_BASE > &b, bool initial_guess=false)
Here we invert the matrix and solve the system.
void setPreconditionerAMG_cycleType(const std::string &cycle_type, int sweep_up=-1, int sweep_dw=-1, int sweep_crs=-1)
It set the type of cycle and optionally the number of sweep.
solError get_residual_error(SparseMatrix< T, int, PETSC_BASE > &A, const Vector< T, PETSC_BASE > &x, const Vector< T, PETSC_BASE > &b)
It return the resiual error.
void setPreconditionerAMG_nl(int nl)
Set the number of levels for the algebraic-multigrid preconditioner.
void setPreconditionerAMG_coarsenNodalType(int norm)
Set the block coarsening norm type.
void setPreconditioner(PCType type)
Set the preconditioner of the linear solver.
void setPreconditionerAMG_coarsen(const std::string &type)
Set the coarsening method for the algebraic-multi-grid preconditioner.
void setSolver(KSPType type)
Set the Petsc solver.
void setPreconditionerAMG_relax(const std::string &type, int k=REL_ALL)
Set the relaxation method for the algebraic-multi-grid preconditioner.
void setMaxIter(PetscInt n)
Set the maximum number of iteration for Krylov solvers.
void setAbsTol(PetscReal abstol_)
Set the absolute tolerance as stop criteria.
void setPreconditionerAMG_maxit(int nit)
Set the maximum number of V or W cycle for algebraic-multi-grid.
Class for cpu time benchmarking.
Definition: timer.hpp:28
void stop()
Stop the timer.
Definition: timer.hpp:119
void start()
Start the timer.
Definition: timer.hpp:90
vect_dist_key_dx get()
Get the actual key.
Distributed vector.
size_t size_local() const
return the local size of the vector
auto getProp(vect_dist_key_dx vec_key) -> decltype(vPrp.template get< id >(vec_key.getKey()))
Get the property of an element.
Decomposition & getDecomposition()
Get the decomposition.
grid_dist_id_iterator_dec< Decomposition > getGridIterator(const size_t(&sz)[dim])
auto getPos(vect_dist_key_dx vec_key) -> decltype(vPos.template get< 0 >(vec_key.getKey()))
Get the position of an element.
vector_dist_iterator getDomainIterator() const
Get an iterator that traverse the particles in the domain.
void map(size_t opt=NONE)
It move all the particles that does not belong to the local processor to the respective processor.
void clear()
remove all the elements
bool write(std::string out, int opt=VTK_WRITER)
Output particle position and properties.
void save(const std::string &filename) const
Save the distributed vector on HDF5 file.
void add()
Add local particle.
void load(const std::string &filename)
Load the distributed vector from an HDF5 file.
auto getLastPos() -> decltype(vPos.template get< 0 >(0))
Get the position of the last element.
OutputIteratorT OffsetT ReductionOpT OuputT init
< [in] The initial value of the reduction
Boundary conditions.
Definition: common.hpp:22
Vector< double, PETSC_BASE > Vector_type
Vector to solve the system (PETSC)
float stype
type of the spatial coordinates
SparseMatrix< double, int, PETSC_BASE > SparseMatrix_type
Sparse matrix used to sove the linear system (we use PETSC)
static const bool boundary[]
specify the boundary conditions
static const unsigned int nvar
We have only one scalar unknown.
static const unsigned int dims
Number of dimansions of the problem.
grid_dist_id< 3, float, aggregate< float > > b_grid
grid that store \psi
Meta-function to return a compatible zero-element.
It contain statistic of the error of the calculated solution.
PetscReal err_inf
infinity norm of the error