OpenFPM_pdata  1.1.0
Project that contain the implementation of distributed structures
 All Data Structures Namespaces Functions Variables Typedefs Enumerations Friends Pages
main_vic_petsc.cpp
1 
105 //#define SE_CLASS1
106 //#define PRINT_STACKTRACE
107 
109 
110 #include "interpolation/interpolation.hpp"
111 #include "Grid/grid_dist_id.hpp"
112 #include "Vector/vector_dist.hpp"
113 #include "Matrix/SparseMatrix.hpp"
114 #include "Vector/Vector.hpp"
115 #include "FiniteDifference/FDScheme.hpp"
116 #include "Solvers/petsc_solver.hpp"
117 #include "interpolation/mp4_kernel.hpp"
118 #include "Solvers/petsc_solver_AMG_report.hpp"
119 
120 constexpr int x = 0;
121 constexpr int y = 1;
122 constexpr int z = 2;
123 
124 // The type of the grids
126 
127 // The type of the particles
129 
130 // radius of the torus
131 float ringr1 = 1.0;
132 // radius of the core of the torus
133 float sigma = 1.0/3.523;
134 // Reynold number (If you want to use 7500.0 you have to use a grid 1600x400x400)
135 //float tgtre = 7500.0;
136 float tgtre = 3000.0;
137 
138 // Kinematic viscosity
139 float nu = 1.0/tgtre;
140 
141 // Time step
142 // float dt = 0.0025 for Re 7500
143 float dt = 0.0125;
144 
145 #ifdef TEST_RUN
146 const unsigned int nsteps = 10;
147 #else
148 const unsigned int nsteps = 10001;
149 #endif
150 
151 // All the properties by index
152 constexpr unsigned int vorticity = 0;
153 constexpr unsigned int velocity = 0;
154 constexpr unsigned int p_vel = 1;
155 constexpr int rhs_part = 2;
156 constexpr unsigned int old_vort = 3;
157 constexpr unsigned int old_pos = 4;
158 
160 
161 template<typename grid> void calc_and_print_max_div_and_int(grid & g_vort)
162 {
164 
165  g_vort.template ghost_get<vorticity>();
166  auto it5 = g_vort.getDomainIterator();
167 
168  double max_vort = 0.0;
169 
170  double int_vort[3] = {0.0,0.0,0.0};
171 
172  while (it5.isNext())
173  {
174  auto key = it5.get();
175 
176  double tmp;
177 
178  tmp = 1.0f/g_vort.spacing(x)/2.0f*(g_vort.template get<vorticity>(key.move(x,1))[x] - g_vort.template get<vorticity>(key.move(x,-1))[x] ) +
179  1.0f/g_vort.spacing(y)/2.0f*(g_vort.template get<vorticity>(key.move(y,1))[y] - g_vort.template get<vorticity>(key.move(y,-1))[y] ) +
180  1.0f/g_vort.spacing(z)/2.0f*(g_vort.template get<vorticity>(key.move(z,1))[z] - g_vort.template get<vorticity>(key.move(z,-1))[z] );
181 
182  int_vort[x] += g_vort.template get<vorticity>(key)[x];
183  int_vort[y] += g_vort.template get<vorticity>(key)[y];
184  int_vort[z] += g_vort.template get<vorticity>(key)[z];
185 
186  if (tmp > max_vort)
187  max_vort = tmp;
188 
189  ++it5;
190  }
191 
192  Vcluster & v_cl = create_vcluster();
193  v_cl.max(max_vort);
194  v_cl.sum(int_vort[0]);
195  v_cl.sum(int_vort[1]);
196  v_cl.sum(int_vort[2]);
197  v_cl.execute();
198 
199  if (v_cl.getProcessUnitID() == 0)
200  {std::cout << "Max div for vorticity " << max_vort << " Integral: " << int_vort[0] << " " << int_vort[1] << " " << int_vort[2] << std::endl;}
201 
203 }
204 
228 
230 /*
231  * gr is the grid where we are initializing the vortex ring
232  * domain is the simulation domain
233  *
234  */
235 void init_ring(grid_type & gr, const Box<3,float> & domain)
236 {
237  // To add some noise to the vortex ring we create two random
238  // vector
239  constexpr int nk = 32;
240 
241  float ak[nk];
242  float bk[nk];
243 
244  for (size_t i = 0 ; i < nk ; i++)
245  {
246  ak[i] = rand()/RAND_MAX;
247  bk[i] = rand()/RAND_MAX;
248  }
249 
250  // We calculate the circuitation gamma
251  float gamma = nu * tgtre;
252  float rinv2 = 1.0f/(sigma*sigma);
253  float max_vorticity = gamma*rinv2/M_PI;
254 
255  // We go through the grid to initialize the vortex
256  auto it = gr.getDomainIterator();
257 
258  while (it.isNext())
259  {
260  auto key_d = it.get();
261  auto key = it.getGKey(key_d);
262 
263  float tx = (key.get(x)-2)*gr.spacing(x) + domain.getLow(x);
264  float ty = (key.get(y)-2)*gr.spacing(y) + domain.getLow(y);
265  float tz = (key.get(z)-2)*gr.spacing(z) + domain.getLow(z);
266  float theta1 = atan2((ty-domain.getHigh(1)/2.0f),(tz-domain.getHigh(2)/2.0f));
267 
268 
269  float rad1r = sqrt((ty-domain.getHigh(1)/2.0f)*(ty-domain.getHigh(1)/2.0f) + (tz-domain.getHigh(2)/2.0f)*(tz-domain.getHigh(2)/2.0f)) - ringr1;
270  float rad1t = tx - 1.0f;
271  float rad1sq = rad1r*rad1r + rad1t*rad1t;
272  float radstr = -exp(-rad1sq*rinv2)*rinv2*gamma/M_PI;
273  gr.template get<vorticity>(key_d)[x] = 0.0f;
274  gr.template get<vorticity>(key_d)[y] = -radstr * cos(theta1);
275  gr.template get<vorticity>(key_d)[z] = radstr * sin(theta1);
276 
277  // kill the axis term
278 
279  float rad1r_ = sqrt((ty-domain.getHigh(1)/2.0f)*(ty-domain.getHigh(1)/2.0f) + (tz-domain.getHigh(2)/2.0f)*(tz-domain.getHigh(2)/2.0f)) + ringr1;
280  float rad1sqTILDA = rad1sq*rinv2;
281  radstr = exp(-rad1sq*rinv2)*rinv2*gamma/M_PI;
282  gr.template get<vorticity>(key_d)[x] = 0.0f;
283  gr.template get<vorticity>(key_d)[y] = -radstr * cos(theta1);
284  gr.template get<vorticity>(key_d)[z] = radstr * sin(theta1);
285 
286  ++it;
287  }
288 }
289 
291 
293 
294 // Specification of the poisson equation for the helmotz-hodge projection
295 // 3D (dims = 3). The field is a scalar value (nvar = 1), bournary are periodic
296 // type of the the space is float. The grid type that store \psi
297 // The others indicate which kind of Matrix to use to construct the linear system and
298 // which kind of vector to construct for the right hand side. Here we use a PETSC Sparse Matrix
299 // and PETSC vector. NORMAL_GRID indicate that is a standard grid (non-staggered)
300 struct poisson_nn_helm
301 {
303  static const unsigned int dims = 3;
305  static const unsigned int nvar = 1;
307  static const bool boundary[];
309  typedef float stype;
317  static const int grid_type = NORMAL_GRID;
318 };
319 
321 const bool poisson_nn_helm::boundary[] = {PERIODIC,PERIODIC,PERIODIC};
322 
324 
325 
434 /*
435  * gr vorticity grid where we apply the correction
436  * domain simulation domain
437  *
438  */
439 void helmotz_hodge_projection(grid_type & gr, const Box<3,float> & domain, petsc_solver<double> & solver, petsc_solver<double>::return_type & x_ , bool init)
440 {
442 
444 
445  // Convenient constant
446  constexpr unsigned int phi = 0;
447 
448  // We define a field phi_f
449  typedef Field<phi,poisson_nn_helm> phi_f;
450 
451  // We assemble the poisson equation doing the
452  // poisson of the Laplacian of phi using Laplacian
453  // central scheme (where the both derivative use central scheme, not
454  // forward composed backward like usually)
456 
458 
460 
461  // ghost get
462  gr.template ghost_get<vorticity>();
463 
464  // ghost size of the psi function
465  Ghost<3,long int> g(2);
466 
467  // Here we create a distributed grid to store the result of the helmotz projection
469 
470  // Calculate the divergence of the vortex field
471  auto it = gr.getDomainIterator();
472 
473  while (it.isNext())
474  {
475  auto key = it.get();
476 
477  psi.template get<phi>(key) = 1.0f/gr.spacing(x)/2.0f*(gr.template get<vorticity>(key.move(x,1))[x] - gr.template get<vorticity>(key.move(x,-1))[x] ) +
478  1.0f/gr.spacing(y)/2.0f*(gr.template get<vorticity>(key.move(y,1))[y] - gr.template get<vorticity>(key.move(y,-1))[y] ) +
479  1.0f/gr.spacing(z)/2.0f*(gr.template get<vorticity>(key.move(z,1))[z] - gr.template get<vorticity>(key.move(z,-1))[z] );
480 
481  ++it;
482  }
483 
485 
486 
487  calc_and_print_max_div_and_int(gr);
488 
489 
491 
492  // In order to create a matrix that represent the poisson equation we have to indicate
493  // we have to indicate the maximum extension of the stencil and we we need an extra layer
494  // of points in case we have to impose particular boundary conditions.
495  Ghost<3,long int> stencil_max(2);
496 
497  // Here we define our Finite difference disctetization scheme object
498  FDScheme<poisson_nn_helm> fd(stencil_max, domain, psi);
499 
500  // impose the poisson equation, using right hand side psi on the full domain (psi.getDomainIterator)
501  // the template paramereter instead define which property of phi carry the righ-hand-side
502  // in this case phi has only one property, so the property 0 carry the right hand side
503  fd.template impose_dit<0>(poisson(),psi,psi.getDomainIterator());
504 
506 
508 
509  timer tm_solve;
510  if (init == true)
511  {
512  // Set the conjugate-gradient as method to solve the linear solver
513  solver.setSolver(KSPBCGS);
514 
515  // Set the absolute tolerance to determine that the method is converged
516  solver.setAbsTol(0.0001);
517 
518  // Set the maximum number of iterations
519  solver.setMaxIter(500);
520 
521 #ifdef USE_MULTIGRID
522 
523  solver.setPreconditioner(PCHYPRE_BOOMERAMG);
524  solver.setPreconditionerAMG_nl(6);
525  solver.setPreconditionerAMG_maxit(1);
526  solver.setPreconditionerAMG_relax("SOR/Jacobi");
527  solver.setPreconditionerAMG_cycleType("V",0,4);
529  solver.setPreconditionerAMG_coarsen("HMIS");
530 
531 
532 #endif
533 
534  tm_solve.start();
535 
536  // Give to the solver A and b, return x, the solution
537  solver.solve(fd.getA(),x_,fd.getB());
538 
539  tm_solve.stop();
540  }
541  else
542  {
543  tm_solve.start();
544  solver.solve(x_,fd.getB());
545  tm_solve.stop();
546  }
547 
548  // copy the solution x to the grid psi
549  fd.template copy<phi>(x_,psi);
550 
552 
554 
555  psi.template ghost_get<phi>();
556 
557  // Correct the vorticity to make it divergence free
558 
559  auto it2 = gr.getDomainIterator();
560 
561  while (it2.isNext())
562  {
563  auto key = it2.get();
564 
565  gr.template get<vorticity>(key)[x] -= 1.0f/2.0f/psi.spacing(x)*(psi.template get<phi>(key.move(x,1))-psi.template get<phi>(key.move(x,-1)));
566  gr.template get<vorticity>(key)[y] -= 1.0f/2.0f/psi.spacing(y)*(psi.template get<phi>(key.move(y,1))-psi.template get<phi>(key.move(y,-1)));
567  gr.template get<vorticity>(key)[z] -= 1.0f/2.0f/psi.spacing(z)*(psi.template get<phi>(key.move(z,1))-psi.template get<phi>(key.move(z,-1)));
568 
569  ++it2;
570  }
571 
573 
574  calc_and_print_max_div_and_int(gr);
575 }
576 
577 
593 
595 void remesh(particles_type & vd, grid_type & gr,Box<3,float> & domain)
596 {
597  // Remove all particles because we reinitialize in a grid like position
598  vd.clear();
599 
600  // Get a grid iterator
601  auto git = vd.getGridIterator(gr.getGridInfo().getSize());
602 
603  // For each grid point
604  while (git.isNext())
605  {
606  // Get the grid point in global coordinates (key). p is in local coordinates
607  auto p = git.get();
608  auto key = git.get_dist();
609 
610  // Add the particle
611  vd.add();
612 
613  // Assign the position to the particle in a grid like way
614  vd.getLastPos()[x] = gr.spacing(x)*p.get(x) + domain.getLow(x);
615  vd.getLastPos()[y] = gr.spacing(y)*p.get(y) + domain.getLow(y);
616  vd.getLastPos()[z] = gr.spacing(z)*p.get(z) + domain.getLow(z);
617 
618  // Initialize the vorticity
619  vd.template getLastProp<vorticity>()[x] = gr.template get<vorticity>(key)[x];
620  vd.template getLastProp<vorticity>()[y] = gr.template get<vorticity>(key)[y];
621  vd.template getLastProp<vorticity>()[z] = gr.template get<vorticity>(key)[z];
622 
623  // next grid point
624  ++git;
625  }
626 
627  // redistribute the particles
628  vd.map();
629 }
630 
632 
633 
686 void comp_vel(Box<3,float> & domain, grid_type & g_vort,grid_type & g_vel, petsc_solver<double>::return_type (& phi_s)[3],petsc_solver<double> & solver)
687 {
689 
690  // Convenient constant
691  constexpr unsigned int phi = 0;
692 
693  // We define a field phi_f
694  typedef Field<phi,poisson_nn_helm> phi_f;
695 
696  // We assemble the poisson equation doing the
697  // poisson of the Laplacian of phi using Laplacian
698  // central scheme (where the both derivative use central scheme, not
699  // forward composed backward like usually)
701 
703 
704  // Maximum size of the stencil
705  Ghost<3,long int> g(2);
706 
707  // Here we create a distributed grid to store the result
710 
711  // We calculate and print the maximum divergence of the vorticity
712  // and the integral of it
713  calc_and_print_max_div_and_int(g_vort);
714 
715  // For each component solve a poisson
716  for (size_t i = 0 ; i < 3 ; i++)
717  {
719 
720  // Copy the vorticity component i in gr_ps
721  auto it2 = gr_ps.getDomainIterator();
722 
723  // calculate the velocity from the curl of phi
724  while (it2.isNext())
725  {
726  auto key = it2.get();
727 
728  // copy
729  gr_ps.get<vorticity>(key) = g_vort.template get<vorticity>(key)[i];
730 
731  ++it2;
732  }
733 
734  // Maximum size of the stencil
735  Ghost<3,long int> stencil_max(2);
736 
737  // Finite difference scheme
738  FDScheme<poisson_nn_helm> fd(stencil_max, domain, gr_ps);
739 
740  // impose the poisson equation using gr_ps = vorticity for the right-hand-side (on the full domain)
741  fd.template impose_dit<phi>(poisson(),gr_ps,gr_ps.getDomainIterator());
742 
743  solver.setAbsTol(0.01);
744 
745  // Get the vector that represent the right-hand-side
746  Vector<double,PETSC_BASE> & b = fd.getB();
747 
748  timer tm_solve;
749  tm_solve.start();
750 
751  // Give to the solver A and b in this case we are giving
752  // an intitial guess phi_s calculated in the previous
753  // time step
754  solver.solve(phi_s[i],b);
755 
756  tm_solve.stop();
757 
758  // Calculate the residual
759 
760  solError serr;
761  serr = solver.get_residual_error(phi_s[i],b);
762 
763  Vcluster & v_cl = create_vcluster();
764  if (v_cl.getProcessUnitID() == 0)
765  {std::cout << "Solved component " << i << " Error: " << serr.err_inf << std::endl;}
766 
767  // copy the solution to grid
768  fd.template copy<phi>(phi_s[i],gr_ps);
769 
771 
773 
774  auto it3 = gr_ps.getDomainIterator();
775 
776  // calculate the velocity from the curl of phi
777  while (it3.isNext())
778  {
779  auto key = it3.get();
780 
781  phi_v.get<velocity>(key)[i] = gr_ps.get<phi>(key);
782 
783  ++it3;
784  }
785 
787  }
788 
790 
791  phi_v.ghost_get<phi>();
792 
793  float inv_dx = 0.5f / g_vort.spacing(0);
794  float inv_dy = 0.5f / g_vort.spacing(1);
795  float inv_dz = 0.5f / g_vort.spacing(2);
796 
797  auto it3 = phi_v.getDomainIterator();
798 
799  // calculate the velocity from the curl of phi
800  while (it3.isNext())
801  {
802  auto key = it3.get();
803 
804  float phi_xy = (phi_v.get<phi>(key.move(y,1))[x] - phi_v.get<phi>(key.move(y,-1))[x])*inv_dy;
805  float phi_xz = (phi_v.get<phi>(key.move(z,1))[x] - phi_v.get<phi>(key.move(z,-1))[x])*inv_dz;
806  float phi_yx = (phi_v.get<phi>(key.move(x,1))[y] - phi_v.get<phi>(key.move(x,-1))[y])*inv_dx;
807  float phi_yz = (phi_v.get<phi>(key.move(z,1))[y] - phi_v.get<phi>(key.move(z,-1))[y])*inv_dz;
808  float phi_zx = (phi_v.get<phi>(key.move(x,1))[z] - phi_v.get<phi>(key.move(x,-1))[z])*inv_dx;
809  float phi_zy = (phi_v.get<phi>(key.move(y,1))[z] - phi_v.get<phi>(key.move(y,-1))[z])*inv_dy;
810 
811  g_vel.template get<velocity>(key)[x] = phi_zy - phi_yz;
812  g_vel.template get<velocity>(key)[y] = phi_xz - phi_zx;
813  g_vel.template get<velocity>(key)[z] = phi_yx - phi_xy;
814 
815  ++it3;
816  }
817 
819 }
820 
821 
822 template<unsigned int prp> void set_zero(grid_type & gr)
823 {
824  auto it = gr.getDomainGhostIterator();
825 
826  // calculate the velocity from the curl of phi
827  while (it.isNext())
828  {
829  auto key = it.get();
830 
831  gr.template get<prp>(key)[0] = 0.0;
832  gr.template get<prp>(key)[1] = 0.0;
833  gr.template get<prp>(key)[2] = 0.0;
834 
835  ++it;
836  }
837 }
838 
839 
840 template<unsigned int prp> void set_zero(particles_type & vd)
841 {
842  auto it = vd.getDomainIterator();
843 
844  // calculate the velocity from the curl of phi
845  while (it.isNext())
846  {
847  auto key = it.get();
848 
849  vd.template getProp<prp>(key)[0] = 0.0;
850  vd.template getProp<prp>(key)[1] = 0.0;
851  vd.template getProp<prp>(key)[2] = 0.0;
852 
853  ++it;
854  }
855 }
856 
870 
872 // Calculate the right hand side of the vorticity formulation
873 template<typename grid> void calc_rhs(grid & g_vort, grid & g_vel, grid & g_dwp)
874 {
875  // usefull constant
876  constexpr int rhs = 0;
877 
878  // calculate several pre-factors for the stencil finite
879  // difference
880  float fac1 = 2.0f*nu/(g_vort.spacing(0)*g_vort.spacing(0));
881  float fac2 = 2.0f*nu/(g_vort.spacing(1)*g_vort.spacing(1));
882  float fac3 = 2.0f*nu/(g_vort.spacing(2)*g_vort.spacing(2));
883 
884  float fac4 = 0.5f/(g_vort.spacing(0));
885  float fac5 = 0.5f/(g_vort.spacing(1));
886  float fac6 = 0.5f/(g_vort.spacing(2));
887 
888  auto it = g_dwp.getDomainIterator();
889 
890  g_vort.template ghost_get<vorticity>();
891 
892  while (it.isNext())
893  {
894  auto key = it.get();
895 
896  g_dwp.template get<rhs>(key)[x] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[x]+g_vort.template get<vorticity>(key.move(x,-1))[x])+
897  fac2*(g_vort.template get<vorticity>(key.move(y,1))[x]+g_vort.template get<vorticity>(key.move(y,-1))[x])+
898  fac3*(g_vort.template get<vorticity>(key.move(z,1))[x]+g_vort.template get<vorticity>(key.move(z,-1))[x])-
899  2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[x]+
900  fac4*g_vort.template get<vorticity>(key)[x]*
901  (g_vel.template get<velocity>(key.move(x,1))[x] - g_vel.template get<velocity>(key.move(x,-1))[x])+
902  fac5*g_vort.template get<vorticity>(key)[y]*
903  (g_vel.template get<velocity>(key.move(y,1))[x] - g_vel.template get<velocity>(key.move(y,-1))[x])+
904  fac6*g_vort.template get<vorticity>(key)[z]*
905  (g_vel.template get<velocity>(key.move(z,1))[x] - g_vel.template get<velocity>(key.move(z,-1))[x]);
906 
907  g_dwp.template get<rhs>(key)[y] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[y]+g_vort.template get<vorticity>(key.move(x,-1))[y])+
908  fac2*(g_vort.template get<vorticity>(key.move(y,1))[y]+g_vort.template get<vorticity>(key.move(y,-1))[y])+
909  fac3*(g_vort.template get<vorticity>(key.move(z,1))[y]+g_vort.template get<vorticity>(key.move(z,-1))[y])-
910  2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[y]+
911  fac4*g_vort.template get<vorticity>(key)[x]*
912  (g_vel.template get<velocity>(key.move(x,1))[y] - g_vel.template get<velocity>(key.move(x,-1))[y])+
913  fac5*g_vort.template get<vorticity>(key)[y]*
914  (g_vel.template get<velocity>(key.move(y,1))[y] - g_vel.template get<velocity>(key.move(y,-1))[y])+
915  fac6*g_vort.template get<vorticity>(key)[z]*
916  (g_vel.template get<velocity>(key.move(z,1))[y] - g_vel.template get<velocity>(key.move(z,-1))[y]);
917 
918 
919  g_dwp.template get<rhs>(key)[z] = fac1*(g_vort.template get<vorticity>(key.move(x,1))[z]+g_vort.template get<vorticity>(key.move(x,-1))[z])+
920  fac2*(g_vort.template get<vorticity>(key.move(y,1))[z]+g_vort.template get<vorticity>(key.move(y,-1))[z])+
921  fac3*(g_vort.template get<vorticity>(key.move(z,1))[z]+g_vort.template get<vorticity>(key.move(z,-1))[z])-
922  2.0f*(fac1+fac2+fac3)*g_vort.template get<vorticity>(key)[z]+
923  fac4*g_vort.template get<vorticity>(key)[x]*
924  (g_vel.template get<velocity>(key.move(x,1))[z] - g_vel.template get<velocity>(key.move(x,-1))[z])+
925  fac5*g_vort.template get<vorticity>(key)[y]*
926  (g_vel.template get<velocity>(key.move(y,1))[z] - g_vel.template get<velocity>(key.move(y,-1))[z])+
927  fac6*g_vort.template get<vorticity>(key)[z]*
928  (g_vel.template get<velocity>(key.move(z,1))[z] - g_vel.template get<velocity>(key.move(z,-1))[z]);
929 
930  ++it;
931  }
932 }
933 
935 
954 
956 void rk_step1(particles_type & particles)
957 {
958  auto it = particles.getDomainIterator();
959 
960  while (it.isNext())
961  {
962  auto key = it.get();
963 
964  // Save the old vorticity
965  particles.getProp<old_vort>(key)[x] = particles.getProp<vorticity>(key)[x];
966  particles.getProp<old_vort>(key)[y] = particles.getProp<vorticity>(key)[y];
967  particles.getProp<old_vort>(key)[z] = particles.getProp<vorticity>(key)[z];
968 
969  particles.getProp<vorticity>(key)[x] = particles.getProp<vorticity>(key)[x] + 0.5f * dt * particles.getProp<rhs_part>(key)[x];
970  particles.getProp<vorticity>(key)[y] = particles.getProp<vorticity>(key)[y] + 0.5f * dt * particles.getProp<rhs_part>(key)[y];
971  particles.getProp<vorticity>(key)[z] = particles.getProp<vorticity>(key)[z] + 0.5f * dt * particles.getProp<rhs_part>(key)[z];
972 
973  ++it;
974  }
975 
976  auto it2 = particles.getDomainIterator();
977 
978  while (it2.isNext())
979  {
980  auto key = it2.get();
981 
982  // Save the old position
983  particles.getProp<old_pos>(key)[x] = particles.getPos(key)[x];
984  particles.getProp<old_pos>(key)[y] = particles.getPos(key)[y];
985  particles.getProp<old_pos>(key)[z] = particles.getPos(key)[z];
986 
987  particles.getPos(key)[x] = particles.getPos(key)[x] + 0.5f * dt * particles.getProp<p_vel>(key)[x];
988  particles.getPos(key)[y] = particles.getPos(key)[y] + 0.5f * dt * particles.getProp<p_vel>(key)[y];
989  particles.getPos(key)[z] = particles.getPos(key)[z] + 0.5f * dt * particles.getProp<p_vel>(key)[z];
990 
991  ++it2;
992  }
993 
994  particles.map();
995 }
996 
998 
1017 
1019 void rk_step2(particles_type & particles)
1020 {
1021  auto it = particles.getDomainIterator();
1022 
1023  while (it.isNext())
1024  {
1025  auto key = it.get();
1026 
1027  particles.getProp<vorticity>(key)[x] = particles.getProp<old_vort>(key)[x] + dt * particles.getProp<rhs_part>(key)[x];
1028  particles.getProp<vorticity>(key)[y] = particles.getProp<old_vort>(key)[y] + dt * particles.getProp<rhs_part>(key)[y];
1029  particles.getProp<vorticity>(key)[z] = particles.getProp<old_vort>(key)[z] + dt * particles.getProp<rhs_part>(key)[z];
1030 
1031  ++it;
1032  }
1033 
1034  auto it2 = particles.getDomainIterator();
1035 
1036  while (it2.isNext())
1037  {
1038  auto key = it2.get();
1039 
1040  particles.getPos(key)[x] = particles.getProp<old_pos>(key)[x] + dt * particles.getProp<p_vel>(key)[x];
1041  particles.getPos(key)[y] = particles.getProp<old_pos>(key)[y] + dt * particles.getProp<p_vel>(key)[y];
1042  particles.getPos(key)[z] = particles.getProp<old_pos>(key)[z] + dt * particles.getProp<p_vel>(key)[z];
1043 
1044  ++it2;
1045  }
1046 
1047  particles.map();
1048 }
1049 
1050 
1051 
1053 
1074 template<typename grid, typename vector> void do_step(vector & particles,
1075  grid & g_vort,
1076  grid & g_vel,
1077  grid & g_dvort,
1078  Box<3,float> & domain,
1080  petsc_solver<double>::return_type (& phi_s)[3],
1081  petsc_solver<double> & solver)
1082 {
1083  constexpr int rhs = 0;
1084 
1086 
1087  set_zero<vorticity>(g_vort);
1088  inte.template p2m<vorticity,vorticity>(particles,g_vort);
1089 
1090  g_vort.template ghost_put<add_,vorticity>();
1091 
1093 
1095 
1096  // Calculate velocity from vorticity
1097  comp_vel(domain,g_vort,g_vel,phi_s,solver);
1098  calc_rhs(g_vort,g_vel,g_dvort);
1099 
1101 
1103 
1104  g_dvort.template ghost_get<rhs>();
1105  g_vel.template ghost_get<velocity>();
1106  set_zero<rhs_part>(particles);
1107  set_zero<p_vel>(particles);
1108  inte.template m2p<rhs,rhs_part>(g_dvort,particles);
1109  inte.template m2p<velocity,p_vel>(g_vel,particles);
1110 
1112 }
1113 
1114 template<typename vector, typename grid> void check_point_and_save(vector & particles,
1115  grid & g_vort,
1116  grid & g_vel,
1117  grid & g_dvort,
1118  size_t i)
1119 {
1120  Vcluster & v_cl = create_vcluster();
1121 
1122  if (v_cl.getProcessingUnits() < 24)
1123  {
1124  particles.write("part_out_" + std::to_string(i),VTK_WRITER | FORMAT_BINARY);
1125  g_vort.template ghost_get<vorticity>();
1126  g_vel.template ghost_get<velocity>();
1127  g_vel.write_frame("grid_velocity",i, VTK_WRITER | FORMAT_BINARY);
1128  g_vort.write_frame("grid_vorticity",i, VTK_WRITER | FORMAT_BINARY);
1129  }
1130 
1131  // In order to reduce the size of the saved data we apply a threshold.
1132  // We only save particles with vorticity higher than 0.1
1133  vector_dist<3,float,aggregate<float>> part_save(particles.getDecomposition(),0);
1134 
1135  auto it_s = particles.getDomainIterator();
1136 
1137  while (it_s.isNext())
1138  {
1139  auto p = it_s.get();
1140 
1141  float vort_magn = sqrt(particles.template getProp<vorticity>(p)[0] * particles.template getProp<vorticity>(p)[0] +
1142  particles.template getProp<vorticity>(p)[1] * particles.template getProp<vorticity>(p)[1] +
1143  particles.template getProp<vorticity>(p)[2] * particles.template getProp<vorticity>(p)[2]);
1144 
1145  if (vort_magn < 0.1)
1146  {
1147  ++it_s;
1148  continue;
1149  }
1150 
1151  part_save.add();
1152 
1153  part_save.getLastPos()[0] = particles.getPos(p)[0];
1154  part_save.getLastPos()[1] = particles.getPos(p)[1];
1155  part_save.getLastPos()[2] = particles.getPos(p)[2];
1156 
1157  part_save.template getLastProp<0>() = vort_magn;
1158 
1159  ++it_s;
1160  }
1161 
1162  part_save.map();
1163 
1164  // Save and HDF5 file for checkpoint restart
1165  particles.save("check_point");
1166 }
1167 
1186 int main(int argc, char* argv[])
1187 {
1188  // Initialize
1189  openfpm_init(&argc,&argv);
1190  {
1191  // Domain, a rectangle
1192  // For the grid 1600x400x400 use
1193  // Box<3,float> domain({0.0,0.0,0.0},{22.0,5.57,5.57});
1194  Box<3,float> domain({0.0,0.0,0.0},{3.57,3.57,3.57});
1195 
1196  // Ghost (Not important in this case but required)
1197  Ghost<3,long int> g(2);
1198 
1199  // Grid points on x=128 y=64 z=64
1200  // if we use Re = 7500
1201  // long int sz[] = {1600,400,400};
1202  long int sz[] = {96,96,96};
1203  size_t szu[] = {(size_t)sz[0],(size_t)sz[1],(size_t)sz[2]};
1204 
1205  periodicity<3> bc = {{PERIODIC,PERIODIC,PERIODIC}};
1206 
1207  grid_type g_vort(szu,domain,g,bc);
1208  grid_type g_vel(g_vort.getDecomposition(),szu,g);
1209  grid_type g_dvort(g_vort.getDecomposition(),szu,g);
1210  particles_type particles(g_vort.getDecomposition(),0);
1211 
1212  // It store the solution to compute velocity
1213  // It is used as initial guess every time we call the solver
1214  Vector<double,PETSC_BASE> phi_s[3];
1216 
1217  // Parallel object
1218  Vcluster & v_cl = create_vcluster();
1219 
1220  // print out the spacing of the grid
1221  if (v_cl.getProcessUnitID() == 0)
1222  {std::cout << "SPACING " << g_vort.spacing(0) << " " << g_vort.spacing(1) << " " << g_vort.spacing(2) << std::endl;}
1223 
1224  // initialize the ring step 1
1225  init_ring(g_vort,domain);
1226 
1227  x_.resize(g_vort.size(),g_vort.getLocalDomainSize());
1228  x_.setZero();
1229 
1230  // Create a PETSC solver to get the solution x
1231  petsc_solver<double> solver;
1232 
1233  // Do the helmotz projection step 2
1234  helmotz_hodge_projection(g_vort,domain,solver,x_,true);
1235 
1236  // initialize the particle on a mesh step 3
1237  remesh(particles,g_vort,domain);
1238 
1239  // Get the total number of particles
1240  size_t tot_part = particles.size_local();
1241  v_cl.sum(tot_part);
1242  v_cl.execute();
1243 
1244  // Now we set the size of phi_s
1245  phi_s[0].resize(g_vort.size(),g_vort.getLocalDomainSize());
1246  phi_s[1].resize(g_vort.size(),g_vort.getLocalDomainSize());
1247  phi_s[2].resize(g_vort.size(),g_vort.getLocalDomainSize());
1248 
1249  // And we initially set it to zero
1250  phi_s[0].setZero();
1251  phi_s[1].setZero();
1252  phi_s[2].setZero();
1253 
1254  // We create the interpolation object outside the loop cycles
1255  // to avoid to recreate it at every cycle. Internally this object
1256  // create some data-structure to optimize the conversion particle
1257  // position to sub-domain. If there are no re-balancing it is safe
1258  // to reuse-it
1260 
1261  // With more than 24 core we rely on the HDF5 checkpoint restart file
1262  if (v_cl.getProcessingUnits() < 24)
1263  {g_vort.write("grid_vorticity_init", VTK_WRITER | FORMAT_BINARY);}
1264 
1265 
1266  // Before entering the loop we check if we want to restart from
1267  // a previous simulation
1268  size_t i = 0;
1269 
1270  // if we have an argument
1271  if (argc > 1)
1272  {
1273  // take that argument
1274  std::string restart(argv[1]);
1275 
1276  // convert it into number
1277  i = std::stoi(restart);
1278 
1279  // load the file
1280  particles.load("check_point_" + std::to_string(i));
1281 
1282  // Print to inform that we are restarting from a
1283  // particular position
1284  if (v_cl.getProcessUnitID() == 0)
1285  {std::cout << "Restarting from " << i << std::endl;}
1286  }
1287 
1288  // Time Integration
1289  for ( ; i < nsteps ; i++)
1290  {
1291  // do step 4-5-6-7
1292  do_step(particles,g_vort,g_vel,g_dvort,domain,inte,phi_s,solver);
1293 
1294  // do step 8
1295  rk_step1(particles);
1296 
1297  // do step 9-10-11-12
1298  do_step(particles,g_vort,g_vel,g_dvort,domain,inte,phi_s,solver);
1299 
1300  // do step 13
1301  rk_step2(particles);
1302 
1303  // so step 14
1304  set_zero<vorticity>(g_vort);
1305  inte.template p2m<vorticity,vorticity>(particles,g_vort);
1306  g_vort.template ghost_put<add_,vorticity>();
1307 
1308  // helmotz-hodge projection
1309  helmotz_hodge_projection(g_vort,domain,solver,x_,false);
1310 
1311  remesh(particles,g_vort,domain);
1312 
1313  // print the step number
1314  if (v_cl.getProcessUnitID() == 0)
1315  {std::cout << "Step " << i << std::endl;}
1316 
1317  // every 100 steps write the output
1318  if (i % 100 == 0) {check_point_and_save(particles,g_vort,g_vel,g_dvort,i);}
1319 
1320  }
1321  }
1322 
1323  openfpm_finalize();
1324 }
1325 
void sum(T &num)
Sum the numbers across all processors and get the result.
auto get(const grid_dist_key_dx< dim > &v1) const -> typename std::add_lvalue_reference< decltype(loc_grid.get(v1.getSub()).template get< p >(v1.getKey()))>::type
Get the reference of the selected element.
grid_dist_iterator< dim, device_grid, FREE > getDomainIterator() const
It return an iterator that span the full grid domain (each processor span its local domain) ...
void setPreconditioner(PCType type)
Set the preconditioner of the linear solver.
auto getProp(vect_dist_key_dx vec_key) -> decltype(v_prp.template get< id >(vec_key.getKey()))
Get the property of an element.
T getLow(int i) const
get the i-coordinate of the low bound interval of the box
Definition: Box.hpp:479
size_t getProcessUnitID()
Get the process unit id.
void execute()
Execute all the requests.
const grid_sm< dim, T > & getGridInfo() const
Get an object containing the grid informations.
Sparse Matrix implementation stub object when OpenFPM is compiled with no linear algebra support...
Definition: Vector.hpp:39
Definition: eq.hpp:81
static const unsigned int dims
Number of dimansions of the problem.
auto getPos(vect_dist_key_dx vec_key) -> decltype(v_pos.template get< 0 >(vec_key.getKey()))
Get the position of an element.
void setMaxIter(PetscInt n)
Set the maximum number of iteration for Krylov solvers.
In case T does not match the PETSC precision compilation create a stub structure. ...
SparseMatrix< double, int, PETSC_BASE > SparseMatrix_type
Sparse matrix used to sove the linear system (we use PETSC)
T getHigh(int i) const
get the high interval of the box
Definition: Box.hpp:490
Meta-function to return a compatible zero-element.
It contain statistic of the error of the calculated solution.
void setPreconditionerAMG_cycleType(const std::string &cycle_type, int sweep_up=-1, int sweep_dw=-1, int sweep_crs=-1)
It set the type of cycle and optionally the number of sweep.
float stype
type of the spatial coordinates
void setAbsTol(PetscReal abstol_)
Set the absolute tolerance as stop criteria.
Vector< double, PETSC_BASE > solve(SparseMatrix< double, int, PETSC_BASE > &A, const Vector< double, PETSC_BASE > &b, bool initial_guess=false)
Here we invert the matrix and solve the system.
void max(T &num)
Get the maximum number across all processors (or reduction with infinity norm)
grid_dist_id_iterator_dec< Decomposition > getGridIterator(const size_t(&sz)[dim])
Definition: Ghost.hpp:39
Vector< double, PETSC_BASE > Vector_type
Vector to solve the system (PETSC)
Implementation of VCluster class.
Definition: VCluster.hpp:36
void setPreconditionerAMG_maxit(int nit)
Set the maximum number of V or W cycle for algebraic-multi-grid.
Sparse Matrix implementation.
This is a distributed grid.
void resize(size_t row, size_t row_n)
stub resize
Definition: Vector.hpp:97
static const unsigned int nvar
We have only one scalar unknown.
const size_t(& getSize() const)[N]
Return the size of the grid as an array.
Definition: grid_sm.hpp:677
void setPreconditionerAMG_nl(int nl)
Set the number of levels for the algebraic-multigrid preconditioner.
This class is able to do Matrix inversion in parallel with PETSC solvers.
void clear()
remove all the elements
void setSolver(KSPType type)
Set the Petsc solver.
void start()
Start the timer.
Definition: timer.hpp:73
Laplacian second order on h (spacing)
Definition: Laplacian.hpp:22
void map(size_t opt=NONE)
It move all the particles that does not belong to the local processor to the respective processor...
void add()
Add local particle.
This class is a trick to indicate the compiler a specific specialization pattern. ...
Definition: memory_c.hpp:201
grid_dist_id< 3, float, aggregate< float > > b_grid
grid that store
grid_dist_iterator< dim, device_grid, FIXED > getDomainGhostIterator() const
It return an iterator that span the grid domain + ghost part.
vector_dist_iterator getDomainIterator() const
Get an iterator that traverse the particles in the domain.
Distributed vector.
void setPreconditionerAMG_coarsen(const std::string &type)
Set the coarsening method for the algebraic-multi-grid preconditioner.
void setPreconditionerAMG_relax(const std::string &type, int k=REL_ALL)
Set the relaxation method for the algebraic-multi-grid preconditioner.
auto getLastPos() -> decltype(v_pos.template get< 0 >(0))
Get the position of the last element.
static const bool boundary[]
specify the boundary conditions
Finite Differences.
Definition: FDScheme.hpp:124
solError get_residual_error(SparseMatrix< double, int, PETSC_BASE > &A, const Vector< double, PETSC_BASE > &x, const Vector< double, PETSC_BASE > &b)
It return the resiual error.
size_t getProcessingUnits()
Get the total number of processors.
Decomposition & getDecomposition()
Get the object that store the information about the decomposition.
Class for cpu time benchmarking.
Definition: timer.hpp:25
void setPreconditionerAMG_coarsenNodalType(int norm)
Set the block coarsening norm type.
Main class for interpolation Particle to mest p2m and Mesh to particle m2p.
St spacing(size_t i) const
Get the spacing of the grid in direction i.
void stop()
Stop the timer.
Definition: timer.hpp:97
PetscReal err_inf
infinity norm of the error